Skip to content
Permalink
Browse files
KVM: TDX: Add a place holder to handle TDX VM exit
Wire up handle_exit and handle_exit_irqoff methods and add a place holder
to handle VM exit.  Add helper functions to get exit info, exit
qualification, etc.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
  • Loading branch information
yamahata committed Dec 16, 2021
1 parent d89c79e commit 23fe81f995e342b4152daa56cad4105a72277d4f
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 3 deletions.
@@ -150,6 +150,23 @@ static void vt_vcpu_put(struct kvm_vcpu *vcpu)
return vmx_vcpu_put(vcpu);
}

static int vt_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath)
{
if (is_td_vcpu(vcpu))
return tdx_handle_exit(vcpu, fastpath);

return vmx_handle_exit(vcpu, fastpath);
}

static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return tdx_handle_exit_irqoff(vcpu);

vmx_handle_exit_irqoff(vcpu);
}

static void vt_apicv_post_state_restore(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
@@ -278,6 +295,18 @@ static void vt_request_immediate_exit(struct kvm_vcpu *vcpu)
vmx_request_immediate_exit(vcpu);
}

static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
{
if (is_td_vcpu(vcpu)) {
tdx_get_exit_info(vcpu, reason, info1, info2, intr_info,
error_code);
return;
}

vmx_get_exit_info(vcpu, reason, info1, info2, intr_info, error_code);
}

static int vt_pre_block(struct kvm_vcpu *vcpu)
{
if (pi_pre_block(vcpu))
@@ -374,7 +403,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.tlb_flush_guest = vt_flush_tlb_guest,

.run = vt_vcpu_run,
.handle_exit = vmx_handle_exit,
.handle_exit = vt_handle_exit,
.skip_emulated_instruction = vmx_skip_emulated_instruction,
.update_emulated_instruction = vmx_update_emulated_instruction,
.set_interrupt_shadow = vt_set_interrupt_shadow,
@@ -408,7 +437,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.set_identity_map_addr = vmx_set_identity_map_addr,
.get_mt_mask = vmx_get_mt_mask,

.get_exit_info = vmx_get_exit_info,
.get_exit_info = vt_get_exit_info,

.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,

@@ -422,7 +451,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.load_mmu_pgd = vt_load_mmu_pgd,

.check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff,
.handle_exit_irqoff = vt_handle_exit_irqoff,

.request_immediate_exit = vt_request_immediate_exit,

@@ -119,6 +119,26 @@ static struct tdx_uret_msr tdx_uret_msrs[] = {
{.msr = MSR_TSC_AUX,},
};

static __always_inline unsigned long tdexit_exit_qual(struct kvm_vcpu *vcpu)
{
return kvm_rcx_read(vcpu);
}

static __always_inline unsigned long tdexit_ext_exit_qual(struct kvm_vcpu *vcpu)
{
return kvm_rdx_read(vcpu);
}

static __always_inline unsigned long tdexit_gpa(struct kvm_vcpu *vcpu)
{
return kvm_r8_read(vcpu);
}

static __always_inline unsigned long tdexit_intr_info(struct kvm_vcpu *vcpu)
{
return kvm_r9_read(vcpu);
}

static inline bool is_td_vcpu_created(struct vcpu_tdx *tdx)
{
return tdx->tdvpr.added;
@@ -798,6 +818,25 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}

void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
u16 exit_reason = tdx->exit_reason.basic;

if (exit_reason == EXIT_REASON_EXCEPTION_NMI)
vmx_handle_exception_nmi_irqoff(vcpu, tdexit_intr_info(vcpu));
else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
vmx_handle_external_interrupt_irqoff(vcpu,
tdexit_intr_info(vcpu));
}

static int tdx_handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu->mmio_needed = 0;
return 0;
}

void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
{
td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa & PAGE_MASK);
@@ -999,6 +1038,50 @@ int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
return 0;
}

int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
{
union tdx_exit_reason exit_reason = to_tdx(vcpu)->exit_reason;

if (unlikely(exit_reason.non_recoverable || exit_reason.error)) {
kvm_pr_unimpl("TD exit 0x%llx, %d qual 0x%lx ext 0x%lx gpa 0x%lx intr 0x%lx\n",
exit_reason.full, exit_reason.basic,
tdexit_exit_qual(vcpu),
tdexit_ext_exit_qual(vcpu),
tdexit_gpa(vcpu),
tdexit_intr_info(vcpu));
if (exit_reason.basic == EXIT_REASON_TRIPLE_FAULT)
return tdx_handle_triple_fault(vcpu);

goto unhandled_exit;
}

WARN_ON_ONCE(fastpath != EXIT_FASTPATH_NONE);

switch (exit_reason.basic) {
default:
break;
}

unhandled_exit:
vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
vcpu->run->hw.hardware_exit_reason = exit_reason.full;
return 0;
}

void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);

*reason = tdx->exit_reason.full;

*info1 = tdexit_exit_qual(vcpu);
*info2 = tdexit_ext_exit_qual(vcpu);

*intr_info = tdexit_intr_info(vcpu);
*error_code = 0;
}

int tdx_dev_ioctl(void __user *argp)
{
struct kvm_tdx_capabilities __user *user_caps;
@@ -14,9 +14,14 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu) { return EXIT_FASTPATH_NONE; }
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu) {}
int tdx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath) { return 0; }

void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu) {}
int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { return 0; }
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code) {}

int tdx_dev_ioctl(void __user *argp) { return -EOPNOTSUPP; }
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
@@ -138,9 +138,14 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu);
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
int tdx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath);

void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu);
int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector);
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);

int tdx_dev_ioctl(void __user *argp);
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);

0 comments on commit 23fe81f

Please sign in to comment.