Skip to content

Commit

Permalink
KVM: x86/MMU: Clean up naming of exported Shadow MMU functions
Browse files Browse the repository at this point in the history
Change the naming scheme on several functions exported from the shadow
MMU to match the naming scheme used by the TDP MMU: kvm_shadow_mmu_.
More cleanups will follow to convert the remaining functions to a similar
naming scheme, but for now, start with the trivial renames.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
Link: https://lore.kernel.org/r/20230202182809.1929122-13-bgardon@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
Ben Gardon authored and sean-jc committed Mar 17, 2023
1 parent 65d06e1 commit 6872b75
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 27 deletions.
19 changes: 10 additions & 9 deletions arch/x86/kvm/mmu/mmu.c
Expand Up @@ -1100,7 +1100,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
int r;

write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
r = kvm_shadow_mmu_make_pages_available(vcpu);
if (r < 0)
goto out_unlock;

Expand Down Expand Up @@ -1175,7 +1175,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
if (is_tdp_mmu_active(vcpu))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes, &root);
leaf = kvm_shadow_mmu_get_walk(vcpu, addr, sptes, &root);

walk_shadow_page_lockless_end(vcpu);

Expand Down Expand Up @@ -1443,11 +1443,11 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (is_page_fault_stale(vcpu, fault))
goto out_unlock;

r = make_mmu_pages_available(vcpu);
r = kvm_shadow_mmu_make_pages_available(vcpu);
if (r)
goto out_unlock;

r = direct_map(vcpu, fault);
r = kvm_shadow_mmu_direct_map(vcpu, fault);

out_unlock:
write_unlock(&vcpu->kvm->mmu_lock);
Expand Down Expand Up @@ -1482,7 +1482,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
trace_kvm_page_fault(vcpu, fault_address, error_code);

if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, fault_address);
kvm_shadow_mmu_unprotect_page_virt(vcpu, fault_address);
r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
insn_len);
} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
Expand Down Expand Up @@ -2818,7 +2818,8 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
* In order to ensure all vCPUs drop their soon-to-be invalid roots,
* invalidating TDP MMU roots must be done while holding mmu_lock for
* write and in the same critical section as making the reload request,
* e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
* e.g. before kvm_shadow_mmu_zap_obsolete_pages() could drop mmu_lock
* and yield.
*/
if (tdp_mmu_enabled)
kvm_tdp_mmu_invalidate_all_roots(kvm);
Expand All @@ -2833,7 +2834,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
*/
kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);

kvm_zap_obsolete_pages(kvm);
kvm_shadow_mmu_zap_obsolete_pages(kvm);

write_unlock(&kvm->mmu_lock);

Expand Down Expand Up @@ -2922,7 +2923,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)

kvm_mmu_invalidate_begin(kvm, 0, -1ul);

flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
flush = kvm_shadow_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);

if (tdp_mmu_enabled) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
Expand Down Expand Up @@ -3065,7 +3066,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
kvm_rmap_zap_collapsible_sptes(kvm, slot);
kvm_shadow_mmu_zap_collapsible_sptes(kvm, slot);
write_unlock(&kvm->mmu_lock);
}

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/mmu/paging_tmpl.h
Expand Up @@ -822,7 +822,7 @@ int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (is_page_fault_stale(vcpu, fault))
goto out_unlock;

r = make_mmu_pages_available(vcpu);
r = kvm_shadow_mmu_make_pages_available(vcpu);
if (r)
goto out_unlock;
r = FNAME(fetch)(vcpu, fault, &walker);
Expand Down
20 changes: 11 additions & 9 deletions arch/x86/kvm/mmu/shadow_mmu.c
Expand Up @@ -2089,7 +2089,7 @@ static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
return 0;
}

static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
int kvm_shadow_mmu_make_pages_available(struct kvm_vcpu *vcpu)
{
unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);

Expand Down Expand Up @@ -2153,7 +2153,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
return r;
}

static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
int kvm_shadow_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa;
int r;
Expand Down Expand Up @@ -2442,7 +2442,8 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep);
}

static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
int kvm_shadow_mmu_direct_map(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp;
Expand Down Expand Up @@ -2660,7 +2661,7 @@ int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
return r;

write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
r = kvm_shadow_mmu_make_pages_available(vcpu);
if (r < 0)
goto out_unlock;

Expand Down Expand Up @@ -2908,7 +2909,8 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
*
* Must be called between walk_shadow_page_lockless_{begin,end}.
*/
int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
int kvm_shadow_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{
struct kvm_shadow_walk_iterator iterator;
int leaf = -1;
Expand Down Expand Up @@ -3219,7 +3221,7 @@ static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
}

#define BATCH_ZAP_PAGES 10
void kvm_zap_obsolete_pages(struct kvm *kvm)
void kvm_shadow_mmu_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
int nr_zapped, batch = 0;
Expand Down Expand Up @@ -3280,7 +3282,7 @@ bool kvm_shadow_mmu_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
bool kvm_shadow_mmu_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
const struct kvm_memory_slot *memslot;
struct kvm_memslots *slots;
Expand Down Expand Up @@ -3531,8 +3533,8 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
return need_tlb_flush;
}

static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
void kvm_shadow_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{
/*
* Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
Expand Down
17 changes: 9 additions & 8 deletions arch/x86/kvm/mmu/shadow_mmu.h
Expand Up @@ -72,18 +72,19 @@ bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list);
void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list);

int make_mmu_pages_available(struct kvm_vcpu *vcpu);
int kvm_shadow_mmu_make_pages_available(struct kvm_vcpu *vcpu);

int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
int kvm_shadow_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);

int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
int kvm_shadow_mmu_direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte);

hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level);
int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu);
int mmu_alloc_special_roots(struct kvm_vcpu *vcpu);

int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level);
int kvm_shadow_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);

void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes, struct kvm_page_track_notifier_node *node);
Expand All @@ -98,8 +99,8 @@ bool walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot,
bool walk_slot_rmaps_4k(struct kvm *kvm, const struct kvm_memory_slot *slot,
slot_rmaps_handler fn, bool flush_on_yield);

void kvm_zap_obsolete_pages(struct kvm *kvm);
bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
void kvm_shadow_mmu_zap_obsolete_pages(struct kvm *kvm);
bool kvm_shadow_mmu_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);

bool slot_rmap_write_protect(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot);
Expand All @@ -108,8 +109,8 @@ void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
const struct kvm_memory_slot *slot,
gfn_t start, gfn_t end,
int target_level);
void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot);
void kvm_shadow_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot);

bool kvm_shadow_mmu_has_zapped_obsolete_pages(struct kvm *kvm);
unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free);
Expand Down

0 comments on commit 6872b75

Please sign in to comment.