Skip to content
Permalink
Browse files
KVM: TDX: allocate/free TDX vcpu structure
The next step of TDX guest creation is to create vcpu.  Allocate TDX vcpu
structures, initialize it.  Allocate pages of TDX vcpu for the TDX module.

In the case of the conventional case, cpuid is empty at the initialization.
and cpuid is configured after the vcpu initialization.  Because TDX
supports only X2APIC mode, cpuid is forcibly initialized to support X2APIC
on the vcpu initialization.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
  • Loading branch information
yamahata committed Dec 15, 2021
1 parent aa43996 commit bd5ba6a8b26525834dfa3bc5ade2dc2726b73468
Show file tree
Hide file tree
Showing 6 changed files with 175 additions and 3 deletions.
@@ -77,6 +77,30 @@ static void vt_vm_free(struct kvm *kvm)
}
}

static int vt_vcpu_create(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return tdx_vcpu_create(vcpu);

return vmx_create_vcpu(vcpu);
}

static void vt_vcpu_free(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
return tdx_vcpu_free(vcpu);

return vmx_free_vcpu(vcpu);
}

static void vt_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
if (is_td_vcpu(vcpu))
return tdx_vcpu_reset(vcpu, init_event);

return vmx_vcpu_reset(vcpu, init_event);
}

static int vt_mem_enc_op_dev(void __user *argp)
{
if (!enable_tdx)
@@ -109,9 +133,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.vm_destroy = vt_vm_destroy,
.vm_free = vt_vm_free,

.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
.vcpu_reset = vmx_vcpu_reset,
.vcpu_create = vt_vcpu_create,
.vcpu_free = vt_vcpu_free,
.vcpu_reset = vt_vcpu_reset,

.prepare_guest_switch = vmx_prepare_switch_to_guest,
.vcpu_load = vmx_vcpu_load,
@@ -91,6 +91,11 @@ static __always_inline hpa_t set_hkid_to_hpa(hpa_t pa, u16 hkid)
return pa;
}

static inline bool is_td_vcpu_created(struct vcpu_tdx *tdx)
{
return tdx->tdvpr.added;
}

static inline bool is_td_created(struct kvm_tdx *kvm_tdx)
{
return kvm_tdx->tdr.added;
@@ -358,6 +363,136 @@ int tdx_vm_init(struct kvm *kvm)
return ret;
}

int tdx_vcpu_create(struct kvm_vcpu *vcpu)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
int ret, i;

ret = tdx_alloc_td_page(&tdx->tdvpr);
if (ret)
return ret;

tdx->tdvpx = kcalloc(tdx_caps.tdvpx_nr_pages, sizeof(*tdx->tdvpx),
GFP_KERNEL_ACCOUNT);
if (!tdx->tdvpx) {
ret = -ENOMEM;
goto free_tdvpr;
}
for (i = 0; i < tdx_caps.tdvpx_nr_pages; i++) {
ret = tdx_alloc_td_page(&tdx->tdvpx[i]);
if (ret)
goto free_tdvpx;
}

vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;

vcpu->arch.cr0_guest_owned_bits = -1ul;
vcpu->arch.cr4_guest_owned_bits = -1ul;

vcpu->arch.tsc_offset = to_kvm_tdx(vcpu->kvm)->tsc_offset;
vcpu->arch.l1_tsc_offset = vcpu->arch.tsc_offset;
vcpu->arch.guest_state_protected =
!(to_kvm_tdx(vcpu->kvm)->attributes & TDX_TD_ATTRIBUTE_DEBUG);

vcpu->arch.mcg_cap = 0;

tdx->pi_desc.nv = POSTED_INTR_VECTOR;
tdx->pi_desc.sn = 1;

return 0;

free_tdvpx:
/* @i points at the TDVPX page that failed allocation. */
for (--i; i >= 0; i--)
free_page(tdx->tdvpx[i].va);
kfree(tdx->tdvpx);
free_tdvpr:
free_page(tdx->tdvpr.va);

return ret;
}

void tdx_vcpu_free(struct kvm_vcpu *vcpu)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
int i;

/* Can't reclaim or free pages if teardown failed. */
if (is_hkid_assigned(to_kvm_tdx(vcpu->kvm)))
return;

for (i = 0; i < tdx_caps.tdvpx_nr_pages; i++)
tdx_reclaim_td_page(&tdx->tdvpx[i]);
kfree(tdx->tdvpx);
tdx_reclaim_td_page(&tdx->tdvpr);
}

void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct msr_data apic_base_msr;
u64 err;
int i;

/* TDX doesn't support INIT event. */
if (WARN_ON(init_event))
goto td_bugged;
/* TDX supports only X2APIC enabled. */
if (WARN_ON(!vcpu->arch.apic))
goto td_bugged;
if (WARN_ON(is_td_vcpu_created(tdx)))
goto td_bugged;

err = tdh_vp_create(kvm_tdx->tdr.pa, tdx->tdvpr.pa);
if (WARN_ON_ONCE(err)) {
pr_tdx_error(TDH_VP_CREATE, err, NULL);
goto td_bugged;
}
tdx_add_td_page(&tdx->tdvpr);

for (i = 0; i < tdx_caps.tdvpx_nr_pages; i++) {
err = tdh_vp_addcx(tdx->tdvpr.pa, tdx->tdvpx[i].pa);
if (WARN_ON_ONCE(err)) {
pr_tdx_error(TDH_VP_ADDCX, err, NULL);
goto td_bugged;
}
tdx_add_td_page(&tdx->tdvpx[i]);
}

if (!vcpu->arch.cpuid_entries) {
/*
* On cpu creation, cpuid entry is blank. Forcibly enable
* X2APIC feature to allow X2APIC.
*/
struct kvm_cpuid_entry2 *e;
e = kvmalloc_array(1, sizeof(*e), GFP_KERNEL_ACCOUNT);
*e = (struct kvm_cpuid_entry2) {
.function = 1, /* Features for X2APIC */
.index = 0,
.eax = 0,
.ebx = 0,
.ecx = 1ULL << 21, /* X2APIC */
.edx = 0,
};
vcpu->arch.cpuid_entries = e;
vcpu->arch.cpuid_nent = 1;
}
apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | LAPIC_MODE_X2APIC;
if (kvm_vcpu_is_reset_bsp(vcpu))
apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
apic_base_msr.host_initiated = true;
if (WARN_ON(kvm_set_apic_base(vcpu, &apic_base_msr)))
goto td_bugged;

vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;

return;

td_bugged:
vcpu->kvm->vm_bugged = true;
}

int tdx_dev_ioctl(void __user *argp)
{
struct kvm_tdx_capabilities __user *user_caps;
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/kvm_host.h>

#include "posted_intr.h"
#include "tdx_errno.h"
#include "tdx_arch.h"
#include "tdx_ops.h"
@@ -38,6 +39,9 @@ struct vcpu_tdx {

struct tdx_td_page tdvpr;
struct tdx_td_page *tdvpx;

/* Posted interrupt descriptor */
struct pi_desc pi_desc;
};

#define TDX_MAX_NR_CPUID_CONFIGS \
@@ -41,6 +41,7 @@ static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page,

static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr)
{
tdx_clflush_page(addr);
return seamcall(TDH_VP_ADDCX, addr, tdvpr, 0, 0, 0, NULL);
}

@@ -69,6 +70,7 @@ static inline u64 tdh_mng_create(hpa_t tdr, int hkid)

static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr)
{
tdx_clflush_page(tdvpr);
return seamcall(TDH_VP_CREATE, tdvpr, tdr, 0, 0, 0, NULL);
}

@@ -7,5 +7,9 @@ int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops) { return -EOPNOTSUPP;
void tdx_hardware_enable(void) {}
void tdx_hardware_disable(void) {}

int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
void tdx_vcpu_free(struct kvm_vcpu *vcpu) {}
void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {}

int tdx_dev_ioctl(void __user *argp) { return -EOPNOTSUPP; }
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
@@ -132,6 +132,9 @@ void tdx_hardware_disable(void);
int tdx_vm_init(struct kvm *kvm);
void tdx_vm_teardown(struct kvm *kvm);
void tdx_vm_free(struct kvm *kvm);
int tdx_vcpu_create(struct kvm_vcpu *vcpu);
void tdx_vcpu_free(struct kvm_vcpu *vcpu);
void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);

int tdx_dev_ioctl(void __user *argp);
int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);

0 comments on commit bd5ba6a

Please sign in to comment.