@@ -178,7 +178,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
178178static u64 __read_mostly shadow_user_mask ;
179179static u64 __read_mostly shadow_accessed_mask ;
180180static u64 __read_mostly shadow_dirty_mask ;
181- static u64 __read_mostly shadow_mt_mask ;
182181
183182static inline u64 rsvd_bits (int s , int e )
184183{
@@ -199,14 +198,13 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
199198EXPORT_SYMBOL_GPL (kvm_mmu_set_base_ptes );
200199
201200void kvm_mmu_set_mask_ptes (u64 user_mask , u64 accessed_mask ,
202- u64 dirty_mask , u64 nx_mask , u64 x_mask , u64 mt_mask )
201+ u64 dirty_mask , u64 nx_mask , u64 x_mask )
203202{
204203 shadow_user_mask = user_mask ;
205204 shadow_accessed_mask = accessed_mask ;
206205 shadow_dirty_mask = dirty_mask ;
207206 shadow_nx_mask = nx_mask ;
208207 shadow_x_mask = x_mask ;
209- shadow_mt_mask = mt_mask ;
210208}
211209EXPORT_SYMBOL_GPL (kvm_mmu_set_mask_ptes );
212210
@@ -1608,7 +1606,7 @@ static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
16081606 return mtrr_state -> def_type ;
16091607}
16101608
1611- static u8 get_memory_type (struct kvm_vcpu * vcpu , gfn_t gfn )
1609+ u8 kvm_get_guest_memory_type (struct kvm_vcpu * vcpu , gfn_t gfn )
16121610{
16131611 u8 mtrr ;
16141612
@@ -1618,6 +1616,7 @@ static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
16181616 mtrr = MTRR_TYPE_WRBACK ;
16191617 return mtrr ;
16201618}
1619+ EXPORT_SYMBOL_GPL (kvm_get_guest_memory_type );
16211620
16221621static int kvm_unsync_page (struct kvm_vcpu * vcpu , struct kvm_mmu_page * sp )
16231622{
@@ -1670,7 +1669,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
16701669{
16711670 u64 spte ;
16721671 int ret = 0 ;
1673- u64 mt_mask = shadow_mt_mask ;
16741672
16751673 /*
16761674 * We don't set the accessed bit, since we sometimes want to see
@@ -1690,16 +1688,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
16901688 spte |= shadow_user_mask ;
16911689 if (largepage )
16921690 spte |= PT_PAGE_SIZE_MASK ;
1693- if (mt_mask ) {
1694- if (!kvm_is_mmio_pfn (pfn )) {
1695- mt_mask = get_memory_type (vcpu , gfn ) <<
1696- kvm_x86_ops -> get_mt_mask_shift ();
1697- mt_mask |= VMX_EPT_IGMT_BIT ;
1698- } else
1699- mt_mask = MTRR_TYPE_UNCACHABLE <<
1700- kvm_x86_ops -> get_mt_mask_shift ();
1701- spte |= mt_mask ;
1702- }
1691+ if (tdp_enabled )
1692+ spte |= kvm_x86_ops -> get_mt_mask (vcpu , gfn ,
1693+ kvm_is_mmio_pfn (pfn ));
17031694
17041695 spte |= (u64 )pfn << PAGE_SHIFT ;
17051696
0 commit comments