Skip to content
Permalink
Browse files
KVM: TDX: Implement TLB flush operation for TDX
Before removing guest pages from EPT, TLB needs to be flushed.  Implement
TLB operation for TDX.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
  • Loading branch information
yamahata committed Dec 15, 2021
1 parent e0ed0e2 commit dbf2e415a636bba18795eea694e38ea0f1d595a6
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 4 deletions.
@@ -110,6 +110,38 @@ static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
return vmx_vcpu_reset(vcpu, init_event);
}

static void vt_flush_tlb_all(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return tdx_flush_tlb(vcpu);

vmx_flush_tlb_all(vcpu);
}

static void vt_flush_tlb_current(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return tdx_flush_tlb(vcpu);

vmx_flush_tlb_current(vcpu);
}

static void vt_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
if (KVM_BUG_ON(is_td_vcpu(vcpu), vcpu->kvm))
return;

vmx_flush_tlb_gva(vcpu, addr);
}

static void vt_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return;

vmx_flush_tlb_guest(vcpu);
}

static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int pgd_level)
{
@@ -190,10 +222,10 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,

.tlb_flush_all = vmx_flush_tlb_all,
.tlb_flush_current = vmx_flush_tlb_current,
.tlb_flush_gva = vmx_flush_tlb_gva,
.tlb_flush_guest = vmx_flush_tlb_guest,
.tlb_flush_all = vt_flush_tlb_all,
.tlb_flush_current = vt_flush_tlb_current,
.tlb_flush_gva = vt_flush_tlb_gva,
.tlb_flush_guest = vt_flush_tlb_guest,

.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,
@@ -5,11 +5,13 @@
#include "capabilities.h"
#include "tdx_errno.h"
#include "tdx_ops.h"
#include "vmx_ops.h"
#include "x86_ops.h"
#include "cpuid.h"
#include "lapic.h"
#include "mmu.h"
#include "tdx.h"
#include "vmx.h"

#include <trace/events/kvm.h>
#include "trace.h"
@@ -967,6 +969,21 @@ static int tdx_td_init(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
return ret;
}

void tdx_flush_tlb(struct kvm_vcpu *vcpu)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
struct kvm_mmu *mmu = vcpu->arch.mmu;
u64 root_hpa = mmu->root_hpa;

/* Flush the shared EPTP, if it's valid. */
if (VALID_PAGE(root_hpa))
ept_sync_context(construct_eptp(vcpu, root_hpa,
mmu->shadow_root_level));

while (READ_ONCE(kvm_tdx->tdh_mem_track))
cpu_relax();
}

static inline bool tdx_is_private_gpa(struct kvm *kvm, gpa_t gpa)
{
return !((gpa >> PAGE_SHIFT) & kvm->arch.gfn_shared_mask);
@@ -15,4 +15,5 @@ int tdx_dev_ioctl(void __user *argp) { return -EOPNOTSUPP; }
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -ENOPNOTSUPP; }

void tdx_flush_tlb(struct kvm_vcpu *vcpu) {}
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
@@ -140,6 +140,7 @@ int tdx_dev_ioctl(void __user *argp);
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);

void tdx_flush_tlb(struct kvm_vcpu *vcpu);
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);

#endif /* __KVM_X86_VMX_X86_OPS_H */

0 comments on commit dbf2e41

Please sign in to comment.