Skip to content

Commit

Permalink
KVM: x86/mmu: Refactor yield safe root iterator
Browse files Browse the repository at this point in the history
Refactor the yield safe TDP MMU root iterator to be more amenable to
changes in future commits which will allow it to be used under the MMU
lock in read mode. Currently the iterator requires a complicated dance
between the helper functions and different parts of the for loop which
makes it hard to reason about. Moving all the logic into a single function
simplifies the iterator substantially.

Signed-off-by: Ben Gardon <bgardon@google.com>
  • Loading branch information
Ben Gardon authored and intel-lab-lkp committed Mar 31, 2021
1 parent 69d1447 commit 124c869
Showing 1 changed file with 25 additions and 18 deletions.
43 changes: 25 additions & 18 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,26 +68,34 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
tdp_mmu_free_sp(root);
}

static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
struct kvm_mmu_page *root)
/*
* Finds the next valid root after root (or the first valid root if root
* is NULL), takes a reference on it, and returns that next root. If root
* is not NULL, this thread should have already taken a reference on it, and
* that reference will be dropped. If no valid root is found, this
* function will return NULL.
*/
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *prev_root)
{
lockdep_assert_held_write(&kvm->mmu_lock);
struct kvm_mmu_page *next_root;

if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;
lockdep_assert_held_write(&kvm->mmu_lock);

kvm_tdp_mmu_get_root(kvm, root);
return true;
if (prev_root)
next_root = list_next_entry(prev_root, link);
else
next_root = list_first_entry(&kvm->arch.tdp_mmu_roots,
typeof(*next_root), link);

}
if (list_entry_is_head(next_root, &kvm->arch.tdp_mmu_roots, link))
next_root = NULL;
else
kvm_tdp_mmu_get_root(kvm, next_root);

static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *root)
{
struct kvm_mmu_page *next_root;
if (prev_root)
kvm_tdp_mmu_put_root(kvm, prev_root);

next_root = list_next_entry(root, link);
kvm_tdp_mmu_put_root(kvm, root);
return next_root;
}

Expand All @@ -97,10 +105,9 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* if exiting the loop early, the caller must drop the reference to the most
* recent root. (Unless keeping a live reference is desirable.)
*/
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
typeof(*_root), link); \
tdp_mmu_next_root_valid(_kvm, _root); \
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = tdp_mmu_next_root(_kvm, NULL); \
_root; \
_root = tdp_mmu_next_root(_kvm, _root))

/* Only safe under the MMU lock in write mode, without yielding. */
Expand Down

0 comments on commit 124c869

Please sign in to comment.