Skip to content

Commit e54f1ff

Browse files
kaihuangbonzini
authored andcommitted
KVM: x86/mmu: Add shadow_me_value and repurpose shadow_me_mask
Intel Multi-Key Total Memory Encryption (MKTME) repurposes couple of high bits of physical address bits as 'KeyID' bits. Intel Trust Domain Extentions (TDX) further steals part of MKTME KeyID bits as TDX private KeyID bits. TDX private KeyID bits cannot be set in any mapping in the host kernel since they can only be accessed by software running inside a new CPU isolated mode. And unlike to AMD's SME, host kernel doesn't set any legacy MKTME KeyID bits to any mapping either. Therefore, it's not legitimate for KVM to set any KeyID bits in SPTE which maps guest memory. KVM maintains shadow_zero_check bits to represent which bits must be zero for SPTE which maps guest memory. MKTME KeyID bits should be set to shadow_zero_check. Currently, shadow_me_mask is used by AMD to set the sme_me_mask to SPTE, and shadow_me_shadow is excluded from shadow_zero_check. So initializing shadow_me_mask to represent all MKTME keyID bits doesn't work for VMX (as oppositely, they must be set to shadow_zero_check). Introduce a new 'shadow_me_value' to replace existing shadow_me_mask, and repurpose shadow_me_mask as 'all possible memory encryption bits'. The new schematic of them will be: - shadow_me_value: the memory encryption bit(s) that will be set to the SPTE (the original shadow_me_mask). - shadow_me_mask: all possible memory encryption bits (which is a super set of shadow_me_value). - For now, shadow_me_value is supposed to be set by SVM and VMX respectively, and it is a constant during KVM's life time. This perhaps doesn't fit MKTME but for now host kernel doesn't support it (and perhaps will never do). - Bits in shadow_me_mask are set to shadow_zero_check, except the bits in shadow_me_value. Introduce a new helper kvm_mmu_set_me_spte_mask() to initialize them. Replace shadow_me_mask with shadow_me_value in almost all code paths, except the one in PT64_PERM_MASK, which is used by need_remote_flush() to determine whether remote TLB flush is needed. This should still use shadow_me_mask as any encryption bit change should need a TLB flush. And for AMD, move initializing shadow_me_value/shadow_me_mask from kvm_mmu_reset_all_pte_masks() to svm_hardware_setup(). Signed-off-by: Kai Huang <kai.huang@intel.com> Message-Id: <f90964b93a3398b1cf1c56f510f3281e0709e2ab.1650363789.git.kai.huang@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent c919e88 commit e54f1ff

File tree

5 files changed

+34
-10
lines changed

5 files changed

+34
-10
lines changed

arch/x86/kvm/mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ static inline gfn_t kvm_mmu_max_gfn(void)
9090
}
9191

9292
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
93+
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
9394
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
9495

9596
void kvm_init_mmu(struct kvm_vcpu *vcpu);

arch/x86/kvm/mmu/mmu.c

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3546,7 +3546,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
35463546
* or a PAE 3-level page table. In either case we need to be aware that
35473547
* the shadow page table may be a PAE or a long mode page table.
35483548
*/
3549-
pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3549+
pm_mask = PT_PRESENT_MASK | shadow_me_value;
35503550
if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
35513551
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
35523552

@@ -4531,8 +4531,16 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
45314531
return;
45324532

45334533
for (i = context->root_role.level; --i >= 0;) {
4534-
shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4535-
shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4534+
/*
4535+
* So far shadow_me_value is a constant during KVM's life
4536+
* time. Bits in shadow_me_value are allowed to be set.
4537+
* Bits in shadow_me_mask but not in shadow_me_value are
4538+
* not allowed to be set.
4539+
*/
4540+
shadow_zero_check->rsvd_bits_mask[0][i] |= shadow_me_mask;
4541+
shadow_zero_check->rsvd_bits_mask[1][i] |= shadow_me_mask;
4542+
shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_value;
4543+
shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_value;
45364544
}
45374545

45384546
}
@@ -5624,7 +5632,7 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
56245632
if (!tdp_enabled)
56255633
set_memory_decrypted((unsigned long)mmu->pae_root, 1);
56265634
else
5627-
WARN_ON_ONCE(shadow_me_mask);
5635+
WARN_ON_ONCE(shadow_me_value);
56285636

56295637
for (i = 0; i < 4; ++i)
56305638
mmu->pae_root[i] = INVALID_PAE_ROOT;

arch/x86/kvm/mmu/spte.c

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ u64 __read_mostly shadow_mmio_value;
3333
u64 __read_mostly shadow_mmio_mask;
3434
u64 __read_mostly shadow_mmio_access_mask;
3535
u64 __read_mostly shadow_present_mask;
36+
u64 __read_mostly shadow_me_value;
3637
u64 __read_mostly shadow_me_mask;
3738
u64 __read_mostly shadow_acc_track_mask;
3839

@@ -167,8 +168,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
167168
else
168169
pte_access &= ~ACC_WRITE_MASK;
169170

170-
if (shadow_me_mask && !kvm_is_mmio_pfn(pfn))
171-
spte |= shadow_me_mask;
171+
if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
172+
spte |= shadow_me_value;
172173

173174
spte |= (u64)pfn << PAGE_SHIFT;
174175

@@ -284,7 +285,7 @@ u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
284285
u64 spte = SPTE_MMU_PRESENT_MASK;
285286

286287
spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
287-
shadow_user_mask | shadow_x_mask | shadow_me_mask;
288+
shadow_user_mask | shadow_x_mask | shadow_me_value;
288289

289290
if (ad_disabled)
290291
spte |= SPTE_TDP_AD_DISABLED_MASK;
@@ -388,6 +389,17 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
388389
}
389390
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
390391

392+
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
393+
{
394+
/* shadow_me_value must be a subset of shadow_me_mask */
395+
if (WARN_ON(me_value & ~me_mask))
396+
me_value = me_mask = 0;
397+
398+
shadow_me_value = me_value;
399+
shadow_me_mask = me_mask;
400+
}
401+
EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
402+
391403
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
392404
{
393405
shadow_user_mask = VMX_EPT_READABLE_MASK;
@@ -397,8 +409,6 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
397409
shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
398410
shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
399411
shadow_acc_track_mask = VMX_EPT_RWX_MASK;
400-
shadow_me_mask = 0ull;
401-
402412
shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
403413
shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
404414

@@ -449,7 +459,8 @@ void kvm_mmu_reset_all_pte_masks(void)
449459
shadow_x_mask = 0;
450460
shadow_present_mask = PT_PRESENT_MASK;
451461
shadow_acc_track_mask = 0;
452-
shadow_me_mask = sme_me_mask;
462+
shadow_me_mask = 0;
463+
shadow_me_value = 0;
453464

454465
shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE;
455466
shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE;

arch/x86/kvm/mmu/spte.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ extern u64 __read_mostly shadow_mmio_value;
151151
extern u64 __read_mostly shadow_mmio_mask;
152152
extern u64 __read_mostly shadow_mmio_access_mask;
153153
extern u64 __read_mostly shadow_present_mask;
154+
extern u64 __read_mostly shadow_me_value;
154155
extern u64 __read_mostly shadow_me_mask;
155156

156157
/*

arch/x86/kvm/svm/svm.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4892,6 +4892,9 @@ static __init int svm_hardware_setup(void)
48924892
get_npt_level(), PG_LEVEL_1G);
48934893
pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
48944894

4895+
/* Setup shadow_me_value and shadow_me_mask */
4896+
kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
4897+
48954898
/* Note, SEV setup consumes npt_enabled. */
48964899
sev_hardware_setup();
48974900

0 commit comments

Comments
 (0)