Skip to content

Commit 0387d79

Browse files
committed
KVM: x86/mmu: Fold all of make_spte()'s writable handling into one if-else
Now that make_spte() no longer uses a funky goto to bail out for a special case of its unsync handling, combine all of the unsync vs. writable logic into a single if-else statement. No functional change intended. Link: https://lore.kernel.org/r/20241011021051.1557902-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent cc7ed33 commit 0387d79

File tree

1 file changed

+4
-9
lines changed

1 file changed

+4
-9
lines changed

arch/x86/kvm/mmu/spte.c

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -217,8 +217,6 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
217217
spte |= (u64)pfn << PAGE_SHIFT;
218218

219219
if (pte_access & ACC_WRITE_MASK) {
220-
spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
221-
222220
/*
223221
* Unsync shadow pages that are reachable by the new, writable
224222
* SPTE. Write-protect the SPTE if the page can't be unsync'd,
@@ -233,16 +231,13 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
233231
* guaranteed by both the shadow MMU and the TDP MMU.
234232
*/
235233
if ((!is_last_spte(old_spte, level) || !is_writable_pte(old_spte)) &&
236-
mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) {
234+
mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch))
237235
wrprot = true;
238-
pte_access &= ~ACC_WRITE_MASK;
239-
spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
240-
}
236+
else
237+
spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask |
238+
spte_shadow_dirty_mask(spte);
241239
}
242240

243-
if (pte_access & ACC_WRITE_MASK)
244-
spte |= spte_shadow_dirty_mask(spte);
245-
246241
if (prefetch && !synchronizing)
247242
spte = mark_spte_for_access_track(spte);
248243

0 commit comments

Comments
 (0)