Skip to content

Commit

Permalink
KVM: x86/mmu: fast_page_fault support for the TDP MMU
Browse files Browse the repository at this point in the history
Make fast_page_fault interoperate with the TDP MMU by leveraging
walk_shadow_page_lockless_{begin,end} to acquire the RCU read lock and
introducing a new helper function kvm_tdp_mmu_get_last_sptep_lockless to
grab the lowest level sptep.

Suggested-by: Ben Gardon <bgardon@google.com>
Signed-off-by: David Matlack <dmatlack@google.com>
  • Loading branch information
dmatlack authored and intel-lab-lkp committed Jun 30, 2021
1 parent d0699d7 commit 7709823
Show file tree
Hide file tree
Showing 3 changed files with 78 additions and 15 deletions.
55 changes: 40 additions & 15 deletions arch/x86/kvm/mmu/mmu.c
Expand Up @@ -3105,15 +3105,45 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
return spte & PT_PRESENT_MASK;
}

/*
* Returns the last level spte pointer of the shadow page walk for the given
* gpa, and sets *spte to the spte value. This spte may be non-preset.
*
* If no walk could be performed, returns NULL and *spte does not contain valid
* data.
*
* Constraints:
* - Must be called between walk_shadow_page_lockless_{begin,end}.
* - The returned sptep must not be used after walk_shadow_page_lockless_end.
*/
u64 *get_last_sptep_lockless(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{
struct kvm_shadow_walk_iterator iterator;
u64 old_spte;
u64 *sptep = NULL;

if (is_tdp_mmu(vcpu->arch.mmu))
return kvm_tdp_mmu_get_last_sptep_lockless(vcpu, gpa, spte);

for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
sptep = iterator.sptep;
*spte = old_spte;

if (!is_shadow_present_pte(old_spte))
break;
}

return sptep;
}

/*
* Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
*/
static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
int ret = RET_PF_INVALID;
u64 spte = 0ull;
u64 *sptep = NULL;
uint retry_count = 0;

if (!page_fault_can_be_fast(error_code))
Expand All @@ -3122,16 +3152,14 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
walk_shadow_page_lockless_begin(vcpu);

do {
struct kvm_mmu_page *sp;
u64 new_spte;

for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte)
if (!is_shadow_present_pte(spte))
break;

sptep = get_last_sptep_lockless(vcpu, gpa, &spte);
if (!is_shadow_present_pte(spte))
break;

sp = sptep_to_sp(iterator.sptep);
sp = sptep_to_sp(sptep);
if (!is_last_spte(spte, sp->role.level))
break;

Expand Down Expand Up @@ -3189,8 +3217,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
* since the gfn is not stable for indirect shadow page. See
* Documentation/virt/kvm/locking.rst to get more detail.
*/
if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
new_spte)) {
if (fast_pf_fix_direct_spte(vcpu, sp, sptep, spte, new_spte)) {
ret = RET_PF_FIXED;
break;
}
Expand All @@ -3203,7 +3230,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)

} while (true);

trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret);
trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret);
walk_shadow_page_lockless_end(vcpu);

return ret;
Expand Down Expand Up @@ -3838,11 +3865,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;

if (!is_tdp_mmu_fault) {
r = fast_page_fault(vcpu, gpa, error_code);
if (r != RET_PF_INVALID)
return r;
}
r = fast_page_fault(vcpu, gpa, error_code);
if (r != RET_PF_INVALID)
return r;

r = mmu_topup_memory_caches(vcpu, false);
if (r)
Expand Down
36 changes: 36 additions & 0 deletions arch/x86/kvm/mmu/tdp_mmu.c
Expand Up @@ -527,6 +527,10 @@ static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
if (is_removed_spte(iter->old_spte))
return false;

/*
* TDP MMU sptes can also be concurrently cmpxchg'd in
* fast_pf_fix_direct_spte as part of fast_page_fault.
*/
if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
new_spte) != iter->old_spte)
return false;
Expand Down Expand Up @@ -1546,3 +1550,35 @@ int kvm_tdp_mmu_get_walk_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,

return leaf;
}

/*
* Must be called between kvm_tdp_mmu_walk_shadow_page_lockless_{begin,end}.
*
* The returned sptep must not be used after
* kvm_tdp_mmu_walk_shadow_page_lockless_end.
*/
u64 *kvm_tdp_mmu_get_last_sptep_lockless(struct kvm_vcpu *vcpu, u64 addr,
u64 *spte)
{
struct tdp_iter iter;
struct kvm_mmu *mmu = vcpu->arch.mmu;
gfn_t gfn = addr >> PAGE_SHIFT;
tdp_ptep_t sptep = NULL;

tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
*spte = iter.old_spte;
sptep = iter.sptep;
}

if (sptep)
/*
* Perform the rcu dereference here since we are passing the
* sptep up to the generic MMU code which does not know the
* synchronization details of the TDP MMU. This is safe as long
* as the caller obeys the contract that the sptep is not used
* after kvm_tdp_mmu_walk_shadow_page_lockless_end.
*/
return rcu_dereference(sptep);

return NULL;
}
2 changes: 2 additions & 0 deletions arch/x86/kvm/mmu/tdp_mmu.h
Expand Up @@ -81,6 +81,8 @@ void kvm_tdp_mmu_walk_lockless_begin(void);
void kvm_tdp_mmu_walk_lockless_end(void);
int kvm_tdp_mmu_get_walk_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
u64 *kvm_tdp_mmu_get_last_sptep_lockless(struct kvm_vcpu *vcpu, u64 addr,
u64 *spte);

#ifdef CONFIG_X86_64
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
Expand Down

0 comments on commit 7709823

Please sign in to comment.