|
31 | 31 | #include <hv_lib.h>
|
32 | 32 | #include <acrn_common.h>
|
33 | 33 | #include <hv_arch.h>
|
| 34 | +#include <acrn_hv_defs.h> |
| 35 | +#include <hv_debug.h> |
| 36 | + |
| 37 | +_Static_assert(NR_WORLD == 2, "Only 2 Worlds supported!"); |
| 38 | + |
| 39 | +/* Trusty EPT rebase gpa: 511G */ |
| 40 | +#define TRUSTY_EPT_REBASE_GPA (511ULL*1024ULL*1024ULL*1024ULL) |
| 41 | + |
| 42 | +#define save_segment(seg, SEG_NAME) \ |
| 43 | +{ \ |
| 44 | + seg.selector = exec_vmread(VMX_GUEST_##SEG_NAME##_SEL); \ |
| 45 | + seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \ |
| 46 | + seg.limit = exec_vmread(VMX_GUEST_##SEG_NAME##_LIMIT); \ |
| 47 | + seg.attr = exec_vmread(VMX_GUEST_##SEG_NAME##_ATTR); \ |
| 48 | +} |
| 49 | + |
| 50 | +#define load_segment(seg, SEG_NAME) \ |
| 51 | +{ \ |
| 52 | + exec_vmwrite(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \ |
| 53 | + exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \ |
| 54 | + exec_vmwrite(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \ |
| 55 | + exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \ |
| 56 | +} |
34 | 57 |
|
35 | 58 | void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
36 | 59 | uint64_t size, uint64_t rebased_gpa)
|
@@ -97,3 +120,122 @@ void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
97 | 120 |
|
98 | 121 | }
|
99 | 122 |
|
| 123 | +static void save_world_ctx(struct run_context *context) |
| 124 | +{ |
| 125 | + /* VMCS Execution field */ |
| 126 | + context->tsc_offset = exec_vmread64(VMX_TSC_OFFSET_FULL); |
| 127 | + |
| 128 | + /* VMCS GUEST field */ |
| 129 | + /* CR3, RIP, RSP, RFLAGS already saved on VMEXIT */ |
| 130 | + context->cr0 = exec_vmread(VMX_GUEST_CR0); |
| 131 | + context->cr4 = exec_vmread(VMX_GUEST_CR4); |
| 132 | + context->dr7 = exec_vmread(VMX_GUEST_DR7); |
| 133 | + context->ia32_debugctl = exec_vmread64(VMX_GUEST_IA32_DEBUGCTL_FULL); |
| 134 | + context->ia32_pat = exec_vmread64(VMX_GUEST_IA32_PAT_FULL); |
| 135 | + context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL); |
| 136 | + context->ia32_sysenter_cs = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS); |
| 137 | + context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP); |
| 138 | + context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP); |
| 139 | + save_segment(context->cs, CS); |
| 140 | + save_segment(context->ss, SS); |
| 141 | + save_segment(context->ds, DS); |
| 142 | + save_segment(context->es, ES); |
| 143 | + save_segment(context->fs, FS); |
| 144 | + save_segment(context->gs, GS); |
| 145 | + save_segment(context->tr, TR); |
| 146 | + save_segment(context->ldtr, LDTR); |
| 147 | + /* Only base and limit for IDTR and GDTR */ |
| 148 | + context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE); |
| 149 | + context->idtr.limit = exec_vmread(VMX_GUEST_IDTR_LIMIT); |
| 150 | + context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE); |
| 151 | + context->gdtr.limit = exec_vmread(VMX_GUEST_GDTR_LIMIT); |
| 152 | + |
| 153 | + /* MSRs which not in the VMCS */ |
| 154 | + context->ia32_star = msr_read(MSR_IA32_STAR); |
| 155 | + context->ia32_lstar = msr_read(MSR_IA32_LSTAR); |
| 156 | + context->ia32_fmask = msr_read(MSR_IA32_FMASK); |
| 157 | + context->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE); |
| 158 | + |
| 159 | + /* FX area */ |
| 160 | + asm volatile("fxsave (%0)" |
| 161 | + : : "r" (context->fxstore_guest_area) : "memory"); |
| 162 | +} |
| 163 | + |
| 164 | +static void load_world_ctx(struct run_context *context) |
| 165 | +{ |
| 166 | + /* VMCS Execution field */ |
| 167 | + exec_vmwrite64(VMX_TSC_OFFSET_FULL, context->tsc_offset); |
| 168 | + |
| 169 | + /* VMCS GUEST field */ |
| 170 | + exec_vmwrite(VMX_GUEST_CR0, context->cr0); |
| 171 | + exec_vmwrite(VMX_GUEST_CR3, context->cr3); |
| 172 | + exec_vmwrite(VMX_GUEST_CR4, context->cr4); |
| 173 | + exec_vmwrite(VMX_GUEST_RIP, context->rip); |
| 174 | + exec_vmwrite(VMX_GUEST_RSP, context->rsp); |
| 175 | + exec_vmwrite(VMX_GUEST_RFLAGS, context->rflags); |
| 176 | + exec_vmwrite(VMX_GUEST_DR7, context->dr7); |
| 177 | + exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl); |
| 178 | + exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->ia32_pat); |
| 179 | + exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer); |
| 180 | + exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs); |
| 181 | + exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp); |
| 182 | + exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip); |
| 183 | + load_segment(context->cs, CS); |
| 184 | + load_segment(context->ss, SS); |
| 185 | + load_segment(context->ds, DS); |
| 186 | + load_segment(context->es, ES); |
| 187 | + load_segment(context->fs, FS); |
| 188 | + load_segment(context->gs, GS); |
| 189 | + load_segment(context->tr, TR); |
| 190 | + load_segment(context->ldtr, LDTR); |
| 191 | + /* Only base and limit for IDTR and GDTR */ |
| 192 | + exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base); |
| 193 | + exec_vmwrite(VMX_GUEST_IDTR_LIMIT, context->idtr.limit); |
| 194 | + exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base); |
| 195 | + exec_vmwrite(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit); |
| 196 | + |
| 197 | + /* MSRs which not in the VMCS */ |
| 198 | + msr_write(MSR_IA32_STAR, context->ia32_star); |
| 199 | + msr_write(MSR_IA32_LSTAR, context->ia32_lstar); |
| 200 | + msr_write(MSR_IA32_FMASK, context->ia32_fmask); |
| 201 | + msr_write(MSR_IA32_KERNEL_GS_BASE, context->ia32_kernel_gs_base); |
| 202 | + |
| 203 | + /* FX area */ |
| 204 | + asm volatile("fxrstor (%0)" : : "r" (context->fxstore_guest_area)); |
| 205 | +} |
| 206 | + |
| 207 | +static void copy_smc_param(struct run_context *prev_ctx, |
| 208 | + struct run_context *next_ctx) |
| 209 | +{ |
| 210 | + next_ctx->guest_cpu_regs.regs.rdi = prev_ctx->guest_cpu_regs.regs.rdi; |
| 211 | + next_ctx->guest_cpu_regs.regs.rsi = prev_ctx->guest_cpu_regs.regs.rsi; |
| 212 | + next_ctx->guest_cpu_regs.regs.rdx = prev_ctx->guest_cpu_regs.regs.rdx; |
| 213 | + next_ctx->guest_cpu_regs.regs.rbx = prev_ctx->guest_cpu_regs.regs.rbx; |
| 214 | +} |
| 215 | + |
| 216 | +void switch_world(struct vcpu *vcpu, int next_world) |
| 217 | +{ |
| 218 | + struct vcpu_arch *arch_vcpu = &vcpu->arch_vcpu; |
| 219 | + |
| 220 | + /* save previous world context */ |
| 221 | + save_world_ctx(&arch_vcpu->contexts[!next_world]); |
| 222 | + |
| 223 | + /* load next world context */ |
| 224 | + load_world_ctx(&arch_vcpu->contexts[next_world]); |
| 225 | + |
| 226 | + /* Copy SMC parameters: RDI, RSI, RDX, RBX */ |
| 227 | + copy_smc_param(&arch_vcpu->contexts[!next_world], |
| 228 | + &arch_vcpu->contexts[next_world]); |
| 229 | + |
| 230 | + /* load EPTP for next world */ |
| 231 | + if (next_world == NORMAL_WORLD) { |
| 232 | + exec_vmwrite64(VMX_EPT_POINTER_FULL, |
| 233 | + ((uint64_t)vcpu->vm->arch_vm.nworld_eptp) | (3<<3) | 6); |
| 234 | + } else { |
| 235 | + exec_vmwrite64(VMX_EPT_POINTER_FULL, |
| 236 | + ((uint64_t)vcpu->vm->arch_vm.sworld_eptp) | (3<<3) | 6); |
| 237 | + } |
| 238 | + |
| 239 | + /* Update world index */ |
| 240 | + arch_vcpu->cur_context = next_world; |
| 241 | +} |
0 commit comments