Skip to content

Commit 1488199

Browse files
Ben Gardonbonzini
authored andcommitted
kvm: x86/mmu: Support disabling dirty logging for the tdp MMU
Dirty logging ultimately breaks down MMU mappings to 4k granularity. When dirty logging is no longer needed, these granaular mappings represent a useless performance penalty. When dirty logging is disabled, search the paging structure for mappings that could be re-constituted into a large page mapping. Zap those mappings so that they can be faulted in again at a higher mapping level. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20201014182700.2888246-17-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent a6a0b05 commit 1488199

File tree

3 files changed

+63
-0
lines changed

3 files changed

+63
-0
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5544,6 +5544,9 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
55445544
spin_lock(&kvm->mmu_lock);
55455545
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
55465546
kvm_mmu_zap_collapsible_spte, true);
5547+
5548+
if (kvm->arch.tdp_mmu_enabled)
5549+
kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
55475550
spin_unlock(&kvm->mmu_lock);
55485551
}
55495552

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,3 +1020,61 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
10201020
return spte_set;
10211021
}
10221022

1023+
/*
1024+
* Clear non-leaf entries (and free associated page tables) which could
1025+
* be replaced by large mappings, for GFNs within the slot.
1026+
*/
1027+
static void zap_collapsible_spte_range(struct kvm *kvm,
1028+
struct kvm_mmu_page *root,
1029+
gfn_t start, gfn_t end)
1030+
{
1031+
struct tdp_iter iter;
1032+
kvm_pfn_t pfn;
1033+
bool spte_set = false;
1034+
1035+
tdp_root_for_each_pte(iter, root, start, end) {
1036+
if (!is_shadow_present_pte(iter.old_spte) ||
1037+
is_last_spte(iter.old_spte, iter.level))
1038+
continue;
1039+
1040+
pfn = spte_to_pfn(iter.old_spte);
1041+
if (kvm_is_reserved_pfn(pfn) ||
1042+
!PageTransCompoundMap(pfn_to_page(pfn)))
1043+
continue;
1044+
1045+
tdp_mmu_set_spte(kvm, &iter, 0);
1046+
1047+
spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
1048+
}
1049+
1050+
if (spte_set)
1051+
kvm_flush_remote_tlbs(kvm);
1052+
}
1053+
1054+
/*
1055+
* Clear non-leaf entries (and free associated page tables) which could
1056+
* be replaced by large mappings, for GFNs within the slot.
1057+
*/
1058+
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1059+
const struct kvm_memory_slot *slot)
1060+
{
1061+
struct kvm_mmu_page *root;
1062+
int root_as_id;
1063+
1064+
for_each_tdp_mmu_root(kvm, root) {
1065+
root_as_id = kvm_mmu_page_as_id(root);
1066+
if (root_as_id != slot->as_id)
1067+
continue;
1068+
1069+
/*
1070+
* Take a reference on the root so that it cannot be freed if
1071+
* this thread releases the MMU lock and yields in this loop.
1072+
*/
1073+
kvm_mmu_get_root(kvm, root);
1074+
1075+
zap_collapsible_spte_range(kvm, root, slot->base_gfn,
1076+
slot->base_gfn + slot->npages);
1077+
1078+
kvm_mmu_put_root(kvm, root);
1079+
}
1080+
}

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
3838
gfn_t gfn, unsigned long mask,
3939
bool wrprot);
4040
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
41+
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
42+
const struct kvm_memory_slot *slot);
4143
#endif /* __KVM_X86_MMU_TDP_MMU_H */

0 commit comments

Comments
 (0)