Skip to content
Permalink
Browse files
KVM: TDX: Implement callbacks for MSR operations for TDX
Implements set_msr/get_msr/has_emulated_msr methods for TDX.  The TDX
module handles some MSRs and VMM handles some MSRs.  The TDX module
specification defines it.  Without explicit allowance, KVM doesn't handle
MSRs.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
  • Loading branch information
yamahata committed Dec 16, 2021
1 parent 3b9bc97 commit 99eb8f4e0badbe0b353a53c07bbb43879cbd10a9
Show file tree
Hide file tree
Showing 4 changed files with 93 additions and 3 deletions.
@@ -167,6 +167,34 @@ static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu)
vmx_handle_exit_irqoff(vcpu);
}

static int vt_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
if (unlikely(is_td_vcpu(vcpu)))
return tdx_set_msr(vcpu, msr_info);

return vmx_set_msr(vcpu, msr_info);
}

/*
* The kvm parameter can be NULL (module initialization, or invocation before
* VM creation). Be sure to check the kvm parameter before using it.
*/
static bool vt_has_emulated_msr(struct kvm *kvm, u32 index)
{
if (kvm && is_td(kvm))
return tdx_is_emulated_msr(index, true);

return vmx_has_emulated_msr(kvm, index);
}

static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
if (unlikely(is_td_vcpu(vcpu)))
return tdx_get_msr(vcpu, msr_info);

return vmx_get_msr(vcpu, msr_info);
}

static void vt_apicv_post_state_restore(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
@@ -358,7 +386,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.hardware_enable = vt_hardware_enable,
.hardware_disable = vt_hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority,
.has_emulated_msr = vmx_has_emulated_msr,
.has_emulated_msr = vt_has_emulated_msr,

.is_vm_type_supported = vt_is_vm_type_supported,
.vm_size = sizeof(struct kvm_vmx),
@@ -376,8 +404,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {

.update_exception_bitmap = vmx_update_exception_bitmap,
.get_msr_feature = vmx_get_msr_feature,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_msr = vt_get_msr,
.set_msr = vt_set_msr,
.get_segment_base = vmx_get_segment_base,
.get_segment = vmx_get_segment,
.set_segment = vmx_set_segment,
@@ -1210,6 +1210,62 @@ void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
*error_code = 0;
}

bool tdx_is_emulated_msr(u32 index, bool write)
{
switch (index) {
case MSR_IA32_UCODE_REV:
case MSR_IA32_ARCH_CAPABILITIES:
case MSR_IA32_POWER_CTL:
case MSR_MTRRcap:
case 0x200 ... 0x2ff:
case MSR_IA32_TSC_DEADLINE:
case MSR_IA32_MISC_ENABLE:
case MSR_KVM_STEAL_TIME:
case MSR_KVM_POLL_CONTROL:
case MSR_PLATFORM_INFO:
case MSR_MISC_FEATURES_ENABLES:
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(32) - 1:
return true;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
/*
* x2APIC registers that are virtualized by the CPU can't be
* emulated, KVM doesn't have access to the virtual APIC page.
*/
switch (index) {
case X2APIC_MSR(APIC_TASKPRI):
case X2APIC_MSR(APIC_PROCPRI):
case X2APIC_MSR(APIC_EOI):
case X2APIC_MSR(APIC_ISR) ... X2APIC_MSR(APIC_ISR + APIC_ISR_NR):
case X2APIC_MSR(APIC_TMR) ... X2APIC_MSR(APIC_TMR + APIC_ISR_NR):
case X2APIC_MSR(APIC_IRR) ... X2APIC_MSR(APIC_IRR + APIC_ISR_NR):
return false;
default:
return true;
}
case MSR_IA32_APICBASE:
case MSR_EFER:
return !write;
default:
return false;
}
}

int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
if (tdx_is_emulated_msr(msr->index, false))
return kvm_get_msr_common(vcpu, msr);
return 1;
}

int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
if (tdx_is_emulated_msr(msr->index, true))
return kvm_set_msr_common(vcpu, msr);
return 1;
}

int tdx_dev_ioctl(void __user *argp)
{
struct kvm_tdx_capabilities __user *user_caps;
@@ -17,6 +17,9 @@ void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu) {}
int tdx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath) { return 0; }
bool tdx_is_emulated_msr(u32 index, bool write) { return false; }
int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }

void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu) {}
int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { return 0; }
@@ -146,6 +146,9 @@ void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu);
int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector);
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code);
bool tdx_is_emulated_msr(u32 index, bool write);
int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);

int tdx_dev_ioctl(void __user *argp);
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);

0 comments on commit 99eb8f4

Please sign in to comment.