Skip to content

Commit

Permalink
KVM: x86/tdp_mmu: implement MapGPA hypercall for TDX
Browse files Browse the repository at this point in the history
The TDX Guest-Hypervisor communication interface(GHCI) specification
defines MapGPA hypercall for guest TD to request the host VMM to map given
GPA range as private or shared.

It means the guest TD uses the GPA as shared (or private).  The GPA
won't be used as private (or shared).  VMM should enforce GPA usage. VMM
doesn't have to map the GPA on the hypercall request.

- Zap the aliased region.
  If shared (or private) GPA is requested, zap private (or shared) GPA
  (modulo shared bit).
- Record the request GPA is shared (or private) by kvm.mem_attr_array.
- Don't map GPA. The GPA is mapped on the next EPT violation.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
  • Loading branch information
yamahata committed Aug 7, 2022
1 parent b77b8c1 commit 1293aaf
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 0 deletions.
3 changes: 3 additions & 0 deletions arch/x86/kvm/mmu.h
Expand Up @@ -217,6 +217,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);

int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);

int kvm_mmu_map_gpa(struct kvm_vcpu *vcpu, gfn_t *startp, gfn_t end,
bool map_private);

int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);

Expand Down
50 changes: 50 additions & 0 deletions arch/x86/kvm/mmu/mmu.c
Expand Up @@ -6761,6 +6761,56 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
}
}

int kvm_mmu_map_gpa(struct kvm_vcpu *vcpu, gfn_t *startp, gfn_t end,
bool map_private)
{
struct kvm *kvm = vcpu->kvm;
gfn_t start = *startp;
int attr;
int ret;


if (!kvm_gfn_shared_mask(kvm))
return -EOPNOTSUPP;

attr = map_private ? KVM_MEM_ATTR_PRIVATE : KVM_MEM_ATTR_SHARED;
start = start & ~kvm_gfn_shared_mask(kvm);
end = end & ~kvm_gfn_shared_mask(kvm);

/*
* To make the following kvm_vm_set_mem_attr() success within spinlock
* without memory allocation.
*/
ret = kvm_vm_reserve_mem_attr(kvm, start, end);
if (ret)
return ret;

write_lock(&kvm->mmu_lock);
if (is_tdp_mmu_enabled(kvm)) {
gfn_t s = start;

ret = kvm_tdp_mmu_map_gpa(vcpu, &s, end, map_private);
if (!ret) {
WARN_ON(kvm_vm_set_mem_attr(kvm, attr, start, end));
} else if (ret == -EAGAIN) {
WARN_ON(kvm_vm_set_mem_attr(kvm, attr, start, s));
start = s;
}
} else {
ret = -EOPNOTSUPP;
}
write_unlock(&kvm->mmu_lock);

if (ret == -EAGAIN) {
if (map_private)
*startp = kvm_gfn_private(kvm, start);
else
*startp = kvm_gfn_shared(kvm, start);
}
return ret;
}
EXPORT_SYMBOL_GPL(kvm_mmu_map_gpa);

static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
Expand Down
40 changes: 40 additions & 0 deletions arch/x86/kvm/mmu/tdp_mmu.c
Expand Up @@ -2086,6 +2086,46 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
return spte_set;
}

int kvm_tdp_mmu_map_gpa(struct kvm_vcpu *vcpu,
gfn_t *startp, gfn_t end, bool map_private)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *root;
gfn_t start = *startp;
bool flush = false;
int i;

lockdep_assert_held_write(&kvm->mmu_lock);
WARN_ON(start & kvm_gfn_shared_mask(kvm));
WARN_ON(end & kvm_gfn_shared_mask(kvm));

if (!VALID_PAGE(mmu->root.hpa) || !VALID_PAGE(mmu->private_root_hpa))
return -EINVAL;

kvm_inc_notifier_count(kvm, start, end);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for_each_tdp_mmu_root_yield_safe(kvm, root, i) {
if (is_private_sp(root) == map_private)
continue;

/*
* TODO: If necessary, return to the caller with -EAGAIN
* instead of yield-and-resume within
* tdp_mmu_zap_leafs().
*/
flush = tdp_mmu_zap_leafs(
kvm, root, start, end, /*can_yield=*/false,
flush, /*zap_private=*/is_private_sp(root));
}
}
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
kvm_dec_notifier_count(kvm, start, end);

return 0;
}

/*
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kvm/mmu/tdp_mmu.h
Expand Up @@ -51,6 +51,9 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
gfn_t start, gfn_t end,
int target_level, bool shared);

int kvm_tdp_mmu_map_gpa(struct kvm_vcpu *vcpu,
gfn_t *startp, gfn_t end, bool map_private);

static inline void kvm_tdp_mmu_walk_lockless_begin(void)
{
rcu_read_lock();
Expand Down

0 comments on commit 1293aaf

Please sign in to comment.