Skip to content

Commit ffe9d79

Browse files
committed
KVM: x86/mmu: Locally cache whether a PFN is host MMIO when making a SPTE
When making a SPTE, cache whether or not the target PFN is host MMIO in order to avoid multiple rounds of the slow path of kvm_is_mmio_pfn(), e.g. hitting pat_pfn_immune_to_uc_mtrr() in particular can be problematic. KVM currently avoids multiple calls by virtue of the two users being mutually exclusive (.get_mt_mask() is Intel-only, shadow_me_value is AMD-only), but that won't hold true if/when KVM needs to detect host MMIO mappings for other reasons, e.g. for mitigating the MMIO Stale Data vulnerability. No functional change intended. Tested-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Link: https://lore.kernel.org/r/20250523011756.3243624-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent c126b46 commit ffe9d79

File tree

1 file changed

+18
-4
lines changed

1 file changed

+18
-4
lines changed

arch/x86/kvm/mmu/spte.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
104104
return spte;
105105
}
106106

107-
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
107+
static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn)
108108
{
109109
if (pfn_valid(pfn))
110110
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
@@ -125,6 +125,19 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
125125
E820_TYPE_RAM);
126126
}
127127

128+
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
129+
{
130+
/*
131+
* Determining if a PFN is host MMIO is relative expensive. Cache the
132+
* result locally (in the sole caller) to avoid doing the full query
133+
* multiple times when creating a single SPTE.
134+
*/
135+
if (*is_host_mmio < 0)
136+
*is_host_mmio = __kvm_is_mmio_pfn(pfn);
137+
138+
return *is_host_mmio;
139+
}
140+
128141
/*
129142
* Returns true if the SPTE needs to be updated atomically due to having bits
130143
* that may be changed without holding mmu_lock, and for which KVM must not
@@ -162,6 +175,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
162175
{
163176
int level = sp->role.level;
164177
u64 spte = SPTE_MMU_PRESENT_MASK;
178+
int is_host_mmio = -1;
165179
bool wrprot = false;
166180

167181
/*
@@ -210,14 +224,14 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
210224
spte |= PT_PAGE_SIZE_MASK;
211225

212226
if (kvm_x86_ops.get_mt_mask)
213-
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
214-
227+
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
228+
kvm_is_mmio_pfn(pfn, &is_host_mmio));
215229
if (host_writable)
216230
spte |= shadow_host_writable_mask;
217231
else
218232
pte_access &= ~ACC_WRITE_MASK;
219233

220-
if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
234+
if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio))
221235
spte |= shadow_me_value;
222236

223237
spte |= (u64)pfn << PAGE_SHIFT;

0 commit comments

Comments
 (0)