Skip to content

Commit 472ba32

Browse files
mzhang3579sean-jc
authored andcommitted
KVM: x86/mmu: Plumb "struct kvm" all the way to pte_list_remove()
Plumb "struct kvm" all the way to pte_list_remove() to allow the usage of KVM_BUG() and/or KVM_BUG_ON(). This will allow killing only the offending VM instead of doing BUG() if the kernel is built with CONFIG_BUG_ON_DATA_CORRUPTION=n, i.e. does NOT want to BUG() if KVM's data structures (rmaps) appear to be corrupted. Signed-off-by: Mingwei Zhang <mizhang@google.com> [sean: tweak changelog] Link: https://lore.kernel.org/r/20230729004722.1056172-12-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent bc90c97 commit 472ba32

File tree

1 file changed

+18
-15
lines changed

1 file changed

+18
-15
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -965,7 +965,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
965965
return count;
966966
}
967967

968-
static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
968+
static void pte_list_desc_remove_entry(struct kvm *kvm,
969+
struct kvm_rmap_head *rmap_head,
969970
struct pte_list_desc *desc, int i)
970971
{
971972
struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
@@ -1001,7 +1002,8 @@ static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
10011002
mmu_free_pte_list_desc(head_desc);
10021003
}
10031004

1004-
static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1005+
static void pte_list_remove(struct kvm *kvm, u64 *spte,
1006+
struct kvm_rmap_head *rmap_head)
10051007
{
10061008
struct pte_list_desc *desc;
10071009
int i;
@@ -1020,7 +1022,8 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
10201022
while (desc) {
10211023
for (i = 0; i < desc->spte_count; ++i) {
10221024
if (desc->sptes[i] == spte) {
1023-
pte_list_desc_remove_entry(rmap_head, desc, i);
1025+
pte_list_desc_remove_entry(kvm, rmap_head,
1026+
desc, i);
10241027
return;
10251028
}
10261029
}
@@ -1035,7 +1038,7 @@ static void kvm_zap_one_rmap_spte(struct kvm *kvm,
10351038
struct kvm_rmap_head *rmap_head, u64 *sptep)
10361039
{
10371040
mmu_spte_clear_track_bits(kvm, sptep);
1038-
pte_list_remove(sptep, rmap_head);
1041+
pte_list_remove(kvm, sptep, rmap_head);
10391042
}
10401043

10411044
/* Return true if at least one SPTE was zapped, false otherwise */
@@ -1110,7 +1113,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
11101113
slot = __gfn_to_memslot(slots, gfn);
11111114
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
11121115

1113-
pte_list_remove(spte, rmap_head);
1116+
pte_list_remove(kvm, spte, rmap_head);
11141117
}
11151118

11161119
/*
@@ -1757,16 +1760,16 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
17571760
pte_list_add(cache, parent_pte, &sp->parent_ptes);
17581761
}
17591762

1760-
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1763+
static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
17611764
u64 *parent_pte)
17621765
{
1763-
pte_list_remove(parent_pte, &sp->parent_ptes);
1766+
pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
17641767
}
17651768

1766-
static void drop_parent_pte(struct kvm_mmu_page *sp,
1769+
static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
17671770
u64 *parent_pte)
17681771
{
1769-
mmu_page_remove_parent_pte(sp, parent_pte);
1772+
mmu_page_remove_parent_pte(kvm, sp, parent_pte);
17701773
mmu_spte_clear_no_track(parent_pte);
17711774
}
17721775

@@ -2481,7 +2484,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
24812484
if (child->role.access == direct_access)
24822485
return;
24832486

2484-
drop_parent_pte(child, sptep);
2487+
drop_parent_pte(vcpu->kvm, child, sptep);
24852488
kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
24862489
}
24872490
}
@@ -2499,7 +2502,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
24992502
drop_spte(kvm, spte);
25002503
} else {
25012504
child = spte_to_child_sp(pte);
2502-
drop_parent_pte(child, spte);
2505+
drop_parent_pte(kvm, child, spte);
25032506

25042507
/*
25052508
* Recursively zap nested TDP SPs, parentless SPs are
@@ -2530,13 +2533,13 @@ static int kvm_mmu_page_unlink_children(struct kvm *kvm,
25302533
return zapped;
25312534
}
25322535

2533-
static void kvm_mmu_unlink_parents(struct kvm_mmu_page *sp)
2536+
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
25342537
{
25352538
u64 *sptep;
25362539
struct rmap_iterator iter;
25372540

25382541
while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2539-
drop_parent_pte(sp, sptep);
2542+
drop_parent_pte(kvm, sp, sptep);
25402543
}
25412544

25422545
static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -2575,7 +2578,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
25752578
++kvm->stat.mmu_shadow_zapped;
25762579
*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
25772580
*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2578-
kvm_mmu_unlink_parents(sp);
2581+
kvm_mmu_unlink_parents(kvm, sp);
25792582

25802583
/* Zapping children means active_mmu_pages has become unstable. */
25812584
list_unstable = *nr_zapped;
@@ -2933,7 +2936,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
29332936
u64 pte = *sptep;
29342937

29352938
child = spte_to_child_sp(pte);
2936-
drop_parent_pte(child, sptep);
2939+
drop_parent_pte(vcpu->kvm, child, sptep);
29372940
flush = true;
29382941
} else if (pfn != spte_to_pfn(*sptep)) {
29392942
drop_spte(vcpu->kvm, sptep);

0 commit comments

Comments
 (0)