@@ -965,7 +965,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
965
965
return count ;
966
966
}
967
967
968
- static void pte_list_desc_remove_entry (struct kvm_rmap_head * rmap_head ,
968
+ static void pte_list_desc_remove_entry (struct kvm * kvm ,
969
+ struct kvm_rmap_head * rmap_head ,
969
970
struct pte_list_desc * desc , int i )
970
971
{
971
972
struct pte_list_desc * head_desc = (struct pte_list_desc * )(rmap_head -> val & ~1ul );
@@ -1001,7 +1002,8 @@ static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
1001
1002
mmu_free_pte_list_desc (head_desc );
1002
1003
}
1003
1004
1004
- static void pte_list_remove (u64 * spte , struct kvm_rmap_head * rmap_head )
1005
+ static void pte_list_remove (struct kvm * kvm , u64 * spte ,
1006
+ struct kvm_rmap_head * rmap_head )
1005
1007
{
1006
1008
struct pte_list_desc * desc ;
1007
1009
int i ;
@@ -1020,7 +1022,8 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1020
1022
while (desc ) {
1021
1023
for (i = 0 ; i < desc -> spte_count ; ++ i ) {
1022
1024
if (desc -> sptes [i ] == spte ) {
1023
- pte_list_desc_remove_entry (rmap_head , desc , i );
1025
+ pte_list_desc_remove_entry (kvm , rmap_head ,
1026
+ desc , i );
1024
1027
return ;
1025
1028
}
1026
1029
}
@@ -1035,7 +1038,7 @@ static void kvm_zap_one_rmap_spte(struct kvm *kvm,
1035
1038
struct kvm_rmap_head * rmap_head , u64 * sptep )
1036
1039
{
1037
1040
mmu_spte_clear_track_bits (kvm , sptep );
1038
- pte_list_remove (sptep , rmap_head );
1041
+ pte_list_remove (kvm , sptep , rmap_head );
1039
1042
}
1040
1043
1041
1044
/* Return true if at least one SPTE was zapped, false otherwise */
@@ -1110,7 +1113,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
1110
1113
slot = __gfn_to_memslot (slots , gfn );
1111
1114
rmap_head = gfn_to_rmap (gfn , sp -> role .level , slot );
1112
1115
1113
- pte_list_remove (spte , rmap_head );
1116
+ pte_list_remove (kvm , spte , rmap_head );
1114
1117
}
1115
1118
1116
1119
/*
@@ -1757,16 +1760,16 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
1757
1760
pte_list_add (cache , parent_pte , & sp -> parent_ptes );
1758
1761
}
1759
1762
1760
- static void mmu_page_remove_parent_pte (struct kvm_mmu_page * sp ,
1763
+ static void mmu_page_remove_parent_pte (struct kvm * kvm , struct kvm_mmu_page * sp ,
1761
1764
u64 * parent_pte )
1762
1765
{
1763
- pte_list_remove (parent_pte , & sp -> parent_ptes );
1766
+ pte_list_remove (kvm , parent_pte , & sp -> parent_ptes );
1764
1767
}
1765
1768
1766
- static void drop_parent_pte (struct kvm_mmu_page * sp ,
1769
+ static void drop_parent_pte (struct kvm * kvm , struct kvm_mmu_page * sp ,
1767
1770
u64 * parent_pte )
1768
1771
{
1769
- mmu_page_remove_parent_pte (sp , parent_pte );
1772
+ mmu_page_remove_parent_pte (kvm , sp , parent_pte );
1770
1773
mmu_spte_clear_no_track (parent_pte );
1771
1774
}
1772
1775
@@ -2481,7 +2484,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2481
2484
if (child -> role .access == direct_access )
2482
2485
return ;
2483
2486
2484
- drop_parent_pte (child , sptep );
2487
+ drop_parent_pte (vcpu -> kvm , child , sptep );
2485
2488
kvm_flush_remote_tlbs_sptep (vcpu -> kvm , sptep );
2486
2489
}
2487
2490
}
@@ -2499,7 +2502,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2499
2502
drop_spte (kvm , spte );
2500
2503
} else {
2501
2504
child = spte_to_child_sp (pte );
2502
- drop_parent_pte (child , spte );
2505
+ drop_parent_pte (kvm , child , spte );
2503
2506
2504
2507
/*
2505
2508
* Recursively zap nested TDP SPs, parentless SPs are
@@ -2530,13 +2533,13 @@ static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2530
2533
return zapped ;
2531
2534
}
2532
2535
2533
- static void kvm_mmu_unlink_parents (struct kvm_mmu_page * sp )
2536
+ static void kvm_mmu_unlink_parents (struct kvm * kvm , struct kvm_mmu_page * sp )
2534
2537
{
2535
2538
u64 * sptep ;
2536
2539
struct rmap_iterator iter ;
2537
2540
2538
2541
while ((sptep = rmap_get_first (& sp -> parent_ptes , & iter )))
2539
- drop_parent_pte (sp , sptep );
2542
+ drop_parent_pte (kvm , sp , sptep );
2540
2543
}
2541
2544
2542
2545
static int mmu_zap_unsync_children (struct kvm * kvm ,
@@ -2575,7 +2578,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2575
2578
++ kvm -> stat .mmu_shadow_zapped ;
2576
2579
* nr_zapped = mmu_zap_unsync_children (kvm , sp , invalid_list );
2577
2580
* nr_zapped += kvm_mmu_page_unlink_children (kvm , sp , invalid_list );
2578
- kvm_mmu_unlink_parents (sp );
2581
+ kvm_mmu_unlink_parents (kvm , sp );
2579
2582
2580
2583
/* Zapping children means active_mmu_pages has become unstable. */
2581
2584
list_unstable = * nr_zapped ;
@@ -2933,7 +2936,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
2933
2936
u64 pte = * sptep ;
2934
2937
2935
2938
child = spte_to_child_sp (pte );
2936
- drop_parent_pte (child , sptep );
2939
+ drop_parent_pte (vcpu -> kvm , child , sptep );
2937
2940
flush = true;
2938
2941
} else if (pfn != spte_to_pfn (* sptep )) {
2939
2942
drop_spte (vcpu -> kvm , sptep );
0 commit comments