Skip to content
Permalink
Browse files
KVM: TDX: Implement methods to inject NMI
TDX vcpu control structure defines one bit for pending NMI for VMM to
inject NMI by setting the bit without knowing TDX vcpu NMI states.  Because
the vcpu state is protected, VMM can't know about NMI states of TDX vcpu.
The TDX module handles actual injection and NMI states transition.

Add methods for NMI and treat NMI can be injected always.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
  • Loading branch information
yamahata committed Dec 16, 2021
1 parent e57ab20 commit e5add375b2478be4f63bfc75497bfaae0d514fa0
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 5 deletions.
@@ -251,6 +251,58 @@ static void vt_flush_tlb_guest(struct kvm_vcpu *vcpu)
vmx_flush_tlb_guest(vcpu);
}

static void vt_inject_nmi(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return tdx_inject_nmi(vcpu);

vmx_inject_nmi(vcpu);
}

static int vt_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
/*
* TDX-SEAM manages NMI windows and NMI reinjection, and hides NMI
* blocking, all KVM can do is throw an NMI over the wall.
*/
if (is_td_vcpu(vcpu))
return true;

return vmx_nmi_allowed(vcpu, for_injection);
}

static bool vt_get_nmi_mask(struct kvm_vcpu *vcpu)
{
/*
* Assume NMIs are always unmasked. KVM could query PEND_NMI and treat
* NMIs as masked if a previous NMI is still pending, but SEAMCALLs are
* expensive and the end result is unchanged as the only relevant usage
* of get_nmi_mask() is to limit the number of pending NMIs, i.e. it
* only changes whether KVM or TDX-SEAM drops an NMI.
*/
if (is_td_vcpu(vcpu))
return false;

return vmx_get_nmi_mask(vcpu);
}

static void vt_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
if (is_td_vcpu(vcpu))
return;

vmx_set_nmi_mask(vcpu, masked);
}

static void vt_enable_nmi_window(struct kvm_vcpu *vcpu)
{
/* TDX-SEAM handles NMI windows, KVM always reports NMIs as unblocked. */
if (is_td_vcpu(vcpu))
return;

vmx_enable_nmi_window(vcpu);
}

static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int pgd_level)
{
@@ -438,14 +490,14 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.get_interrupt_shadow = vt_get_interrupt_shadow,
.patch_hypercall = vmx_patch_hypercall,
.set_irq = vt_inject_irq,
.set_nmi = vmx_inject_nmi,
.set_nmi = vt_inject_nmi,
.queue_exception = vmx_queue_exception,
.cancel_injection = vt_cancel_injection,
.interrupt_allowed = vt_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
.get_nmi_mask = vmx_get_nmi_mask,
.set_nmi_mask = vmx_set_nmi_mask,
.enable_nmi_window = vmx_enable_nmi_window,
.nmi_allowed = vt_nmi_allowed,
.get_nmi_mask = vt_get_nmi_mask,
.set_nmi_mask = vt_set_nmi_mask,
.enable_nmi_window = vt_enable_nmi_window,
.enable_irq_window = vt_enable_irq_window,
.update_cr8_intercept = vmx_update_cr8_intercept,
.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
@@ -1200,6 +1200,11 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return 1;
}

void tdx_inject_nmi(struct kvm_vcpu *vcpu)
{
td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
}

void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
{
td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa & PAGE_MASK);
@@ -10,6 +10,7 @@ void tdx_hardware_disable(void) {}
int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
void tdx_vcpu_free(struct kvm_vcpu *vcpu) {}
void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {}
void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu) { return EXIT_FASTPATH_NONE; }
void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
@@ -144,6 +144,7 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu,

void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu);
int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector);
void tdx_inject_nmi(struct kvm_vcpu *vcpu);
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
bool tdx_is_emulated_msr(u32 index, bool write);

0 comments on commit e5add37

Please sign in to comment.