Skip to content

Commit a5da5dd

Browse files
committed
KVM: x86/mmu: Add a dedicated flag to track if A/D bits are globally enabled
Add a dedicated flag to track if KVM has enabled A/D bits at the module level, instead of inferring the state based on whether or not the MMU's shadow_accessed_mask is non-zero. This will allow defining and using shadow_accessed_mask even when A/D bits aren't used by hardware. Link: https://lore.kernel.org/r/20241011021051.1557902-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 1a17508 commit a5da5dd

File tree

4 files changed

+20
-16
lines changed

4 files changed

+20
-16
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3357,7 +3357,7 @@ static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault
33573357
* by setting the Writable bit, which can be done out of mmu_lock.
33583358
*/
33593359
if (!fault->present)
3360-
return !kvm_ad_enabled();
3360+
return !kvm_ad_enabled;
33613361

33623362
/*
33633363
* Note, instruction fetches and writes are mutually exclusive, ignore
@@ -3492,7 +3492,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
34923492
* uses A/D bits for non-nested MMUs. Thus, if A/D bits are
34933493
* enabled, the SPTE can't be an access-tracked SPTE.
34943494
*/
3495-
if (unlikely(!kvm_ad_enabled()) && is_access_track_spte(spte))
3495+
if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte))
34963496
new_spte = restore_acc_track_spte(new_spte);
34973497

34983498
/*
@@ -5469,7 +5469,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
54695469
role.efer_nx = true;
54705470
role.smm = cpu_role.base.smm;
54715471
role.guest_mode = cpu_role.base.guest_mode;
5472-
role.ad_disabled = !kvm_ad_enabled();
5472+
role.ad_disabled = !kvm_ad_enabled;
54735473
role.level = kvm_mmu_get_tdp_level(vcpu);
54745474
role.direct = true;
54755475
role.has_4_byte_gpte = false;

arch/x86/kvm/mmu/spte.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ static bool __ro_after_init allow_mmio_caching;
2424
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
2525
EXPORT_SYMBOL_GPL(enable_mmio_caching);
2626

27+
bool __read_mostly kvm_ad_enabled;
28+
2729
u64 __read_mostly shadow_host_writable_mask;
2830
u64 __read_mostly shadow_mmu_writable_mask;
2931
u64 __read_mostly shadow_nx_mask;
@@ -414,6 +416,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
414416

415417
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
416418
{
419+
kvm_ad_enabled = has_ad_bits;
420+
417421
shadow_user_mask = VMX_EPT_READABLE_MASK;
418422
shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
419423
shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
@@ -447,6 +451,8 @@ void kvm_mmu_reset_all_pte_masks(void)
447451
u8 low_phys_bits;
448452
u64 mask;
449453

454+
kvm_ad_enabled = true;
455+
450456
/*
451457
* If the CPU has 46 or less physical address bits, then set an
452458
* appropriate mask to guard against L1TF attacks. Otherwise, it is

arch/x86/kvm/mmu/spte.h

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,15 @@ static_assert(!(SHADOW_NONPRESENT_VALUE & SPTE_MMU_PRESENT_MASK));
167167
#define SHADOW_NONPRESENT_VALUE 0ULL
168168
#endif
169169

170+
171+
/*
172+
* True if A/D bits are supported in hardware and are enabled by KVM. When
173+
* enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can disable
174+
* A/D bits in EPTP12, SP and SPTE variants are needed to handle the scenario
175+
* where KVM is using A/D bits for L1, but not L2.
176+
*/
177+
extern bool __read_mostly kvm_ad_enabled;
178+
170179
extern u64 __read_mostly shadow_host_writable_mask;
171180
extern u64 __read_mostly shadow_mmu_writable_mask;
172181
extern u64 __read_mostly shadow_nx_mask;
@@ -285,17 +294,6 @@ static inline bool is_ept_ve_possible(u64 spte)
285294
(spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
286295
}
287296

288-
/*
289-
* Returns true if A/D bits are supported in hardware and are enabled by KVM.
290-
* When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
291-
* disable A/D bits in EPTP12, SP and SPTE variants are needed to handle the
292-
* scenario where KVM is using A/D bits for L1, but not L2.
293-
*/
294-
static inline bool kvm_ad_enabled(void)
295-
{
296-
return !!shadow_accessed_mask;
297-
}
298-
299297
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
300298
{
301299
return sp->role.ad_disabled;

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1075,7 +1075,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
10751075
static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
10761076
struct kvm_mmu_page *sp, bool shared)
10771077
{
1078-
u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1078+
u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled);
10791079
int ret = 0;
10801080

10811081
if (shared) {
@@ -1491,7 +1491,7 @@ static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
14911491
* from level, so it is valid to key off any shadow page to determine if
14921492
* write protection is needed for an entire tree.
14931493
*/
1494-
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
1494+
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled;
14951495
}
14961496

14971497
static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,

0 commit comments

Comments
 (0)