Skip to content

Commit 67c9380

Browse files
committed
KVM: x86/mmu: Fold mmu_spte_update_no_track() into mmu_spte_update()
Fold the guts of mmu_spte_update_no_track() into mmu_spte_update() now that the latter doesn't flush when clearing A/D bits, i.e. now that there is no need to explicitly avoid TLB flushes when aging SPTEs. Opportunistically WARN if mmu_spte_update() requests a TLB flush when aging SPTEs, as aging should never modify a SPTE in such a way that KVM thinks a TLB flush is needed. Link: https://lore.kernel.org/r/20241011021051.1557902-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 0103441 commit 67c9380

File tree

1 file changed

+21
-29
lines changed

1 file changed

+21
-29
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -485,32 +485,6 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
485485
__set_spte(sptep, new_spte);
486486
}
487487

488-
/*
489-
* Update the SPTE (excluding the PFN), but do not track changes in its
490-
* accessed/dirty status.
491-
*/
492-
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
493-
{
494-
u64 old_spte = *sptep;
495-
496-
WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
497-
check_spte_writable_invariants(new_spte);
498-
499-
if (!is_shadow_present_pte(old_spte)) {
500-
mmu_spte_set(sptep, new_spte);
501-
return old_spte;
502-
}
503-
504-
if (!spte_has_volatile_bits(old_spte))
505-
__update_clear_spte_fast(sptep, new_spte);
506-
else
507-
old_spte = __update_clear_spte_slow(sptep, new_spte);
508-
509-
WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
510-
511-
return old_spte;
512-
}
513-
514488
/* Rules for using mmu_spte_update:
515489
* Update the state bits, it means the mapped pfn is not changed.
516490
*
@@ -535,10 +509,23 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
535509
*/
536510
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
537511
{
538-
u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
512+
u64 old_spte = *sptep;
539513

540-
if (!is_shadow_present_pte(old_spte))
514+
WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
515+
check_spte_writable_invariants(new_spte);
516+
517+
if (!is_shadow_present_pte(old_spte)) {
518+
mmu_spte_set(sptep, new_spte);
541519
return false;
520+
}
521+
522+
if (!spte_has_volatile_bits(old_spte))
523+
__update_clear_spte_fast(sptep, new_spte);
524+
else
525+
old_spte = __update_clear_spte_slow(sptep, new_spte);
526+
527+
WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
528+
spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
542529

543530
return is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte);
544531
}
@@ -1598,8 +1585,13 @@ static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
15981585
clear_bit((ffs(shadow_accessed_mask) - 1),
15991586
(unsigned long *)sptep);
16001587
} else {
1588+
/*
1589+
* WARN if mmu_spte_update() signals the need
1590+
* for a TLB flush, as Access tracking a SPTE
1591+
* should never trigger an _immediate_ flush.
1592+
*/
16011593
spte = mark_spte_for_access_track(spte);
1602-
mmu_spte_update_no_track(sptep, spte);
1594+
WARN_ON_ONCE(mmu_spte_update(sptep, spte));
16031595
}
16041596
young = true;
16051597
}

0 commit comments

Comments
 (0)