Skip to content

Commit 0d309e2

Browse files
binbinwu1lijinxia
authored andcommitted
hv: add support to start a vcpu from protected mode
In current implementation, a vcpu can only start from real mode or 64bit mode. This patch adds support to start a vcpu from protected mode. Signed-off-by: Binbin Wu <binbin.wu@intel.com> Reviewed-by: Eddie Dong <eddie.dong@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Acked-by: Xu, Anthony <anthony.xu@intel.com>
1 parent 881eaa6 commit 0d309e2

File tree

1 file changed

+73
-61
lines changed

1 file changed

+73
-61
lines changed

hypervisor/arch/x86/vmx.c

Lines changed: 73 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,12 @@ extern struct efi_ctx* efi_ctx;
1919
((uint64_t)PAT_MEM_TYPE_UCM << 48) + \
2020
((uint64_t)PAT_MEM_TYPE_UC << 56))
2121

22+
#define REAL_MODE_BSP_INIT_CODE_SEL (0xf000)
23+
#define REAL_MODE_DATA_SEG_AR (0x0093)
24+
#define REAL_MODE_CODE_SEG_AR (0x009f)
25+
#define PROTECTED_MODE_DATA_SEG_AR (0xc093)
26+
#define PROTECTED_MODE_CODE_SEG_AR (0xc09b)
27+
2228
static uint32_t cr0_host_mask;
2329
static uint32_t cr0_always_on_mask;
2430
static uint32_t cr0_always_off_mask;
@@ -438,65 +444,75 @@ static void init_guest_state(struct vcpu *vcpu)
438444
struct vm *vm = vcpu->vm;
439445
struct run_context *cur_context =
440446
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
447+
enum vm_cpu_mode vcpu_mode = get_vcpu_mode(vcpu);
441448

442449
pr_dbg("*********************");
443450
pr_dbg("Initialize guest state");
444451
pr_dbg("*********************");
445452

453+
454+
/* Will not init vcpu mode to compatibility mode */
455+
ASSERT(vcpu_mode != CPU_MODE_COMPATIBILITY,
456+
"don't support start vcpu from compatibility mode");
457+
446458
/*************************************************/
447459
/* Set up CRx */
448460
/*************************************************/
449461
pr_dbg("Natural-width********");
450462

463+
if (vcpu_mode == CPU_MODE_64BIT)
464+
cur_context->ia32_efer = MSR_IA32_EFER_LME_BIT;
465+
451466
/* Setup guest control register values */
452467
/* Set up guest CRO field */
453-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
454-
/*cur_context->cr0 = (CR0_CD | CR0_NW | CR0_ET | CR0_NE);*/
455-
cur_context->cr0 = CR0_ET | CR0_NE;
456-
cur_context->cr3 = 0;
457-
cur_context->cr4 = CR4_VMXE;
458-
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
459-
cur_context->cr0 = ((uint64_t)CR0_PG | CR0_PE | CR0_NE);
460-
cur_context->cr4 = ((uint64_t)CR4_PSE | CR4_PAE | CR4_MCE | CR4_VMXE);
461-
cur_context->cr3 = vm->arch_vm.guest_init_pml4 | CR3_PWT;
468+
if (vcpu_mode == CPU_MODE_REAL) {
469+
vmx_write_cr4(vcpu, 0);
470+
vmx_write_cr0(vcpu, CR0_ET | CR0_NE);
471+
vmx_write_cr3(vcpu, 0);
472+
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
473+
vmx_write_cr4(vcpu, 0);
474+
vmx_write_cr0(vcpu, CR0_ET | CR0_NE | CR0_PE);
475+
vmx_write_cr3(vcpu, 0);
476+
} else if (vcpu_mode == CPU_MODE_64BIT) {
477+
vmx_write_cr4(vcpu, CR4_PSE | CR4_PAE | CR4_MCE);
478+
vmx_write_cr0(vcpu, CR0_PG | CR0_PE | CR0_NE);
479+
vmx_write_cr3(vcpu, vm->arch_vm.guest_init_pml4 | CR3_PWT);
462480
}
463481

464-
value = cur_context->cr0;
465-
field = VMX_GUEST_CR0;
466-
exec_vmwrite(field, value & 0xFFFFFFFF);
467-
pr_dbg("VMX_GUEST_CR0: 0x%016llx ", value);
468-
469-
/* Set up guest CR3 field */
470-
value = cur_context->cr3;
471-
field = VMX_GUEST_CR3;
472-
exec_vmwrite(field, value & 0xFFFFFFFF);
473-
pr_dbg("VMX_GUEST_CR3: 0x%016llx ", value);
474-
475-
/* Set up guest CR4 field */
476-
value = cur_context->cr4;
477-
field = VMX_GUEST_CR4;
478-
exec_vmwrite(field, value & 0xFFFFFFFF);
479-
pr_dbg("VMX_GUEST_CR4: 0x%016llx ", value);
480-
481482
/***************************************************/
482483
/* Set up Flags - the value of RFLAGS on VM entry */
483484
/***************************************************/
484485
field = VMX_GUEST_RFLAGS;
485486
cur_context->rflags = 0x2; /* Bit 1 is a active high reserved bit */
486487
exec_vmwrite(field, cur_context->rflags);
487-
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", value);
488+
pr_dbg("VMX_GUEST_RFLAGS: 0x%016llx ", cur_context->rflags);
488489

489490
/***************************************************/
490491
/* Set Code Segment - CS */
491492
/***************************************************/
492-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
493-
/* AP is initialized with real mode
494-
* and CS value is left shift 8 bits from sipi vector;
495-
*/
496-
sel = vcpu->arch_vcpu.sipi_vector << 8;
493+
if (vcpu_mode == CPU_MODE_REAL) {
494+
if (is_vcpu_bsp(vcpu)) {
495+
ASSERT(!is_vm0(vcpu->vm),
496+
"VM0 bsp should not be inited as realmode");
497+
/* BP is initialized with real mode */
498+
sel = REAL_MODE_BSP_INIT_CODE_SEL;
499+
/* For unrestricted guest, it is able to set a
500+
* high base address */
501+
base = (uint64_t)vcpu->entry_addr & 0xFFFF0000UL;
502+
} else {
503+
/* AP is initialized with real mode
504+
* and CS value is left shift 8 bits from sipi vector;
505+
*/
506+
sel = vcpu->arch_vcpu.sipi_vector << 8;
507+
base = sel << 4;
508+
}
497509
limit = 0xffff;
498-
access = 0x9F;
499-
base = sel << 4;
510+
access = REAL_MODE_CODE_SEG_AR;
511+
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
512+
limit = 0xffffffff;
513+
base = 0;
514+
access = PROTECTED_MODE_CODE_SEG_AR;
515+
sel = 0x10; /* Linear CS selector in guest init gdt */
500516
} else {
501517
HV_ARCH_VMX_GET_CS(sel);
502518
access = get_cs_access_rights();
@@ -529,15 +545,18 @@ static void init_guest_state(struct vcpu *vcpu)
529545
/***************************************************/
530546
/* Set up guest instruction pointer */
531547
field = VMX_GUEST_RIP;
532-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL)
533-
value32 = 0;
548+
if (vcpu_mode == CPU_MODE_REAL)
549+
if (is_vcpu_bsp(vcpu))
550+
value32 = 0x0000FFF0;
551+
else
552+
value32 = 0;
534553
else
535-
value32 = (uint32_t) ((uint64_t) vcpu->entry_addr & 0xFFFFFFFF);
554+
value32 = (uint32_t)((uint64_t)vcpu->entry_addr);
536555

537556
pr_dbg("GUEST RIP on VMEntry %x ", value32);
538557
exec_vmwrite(field, value32);
539558

540-
if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
559+
if (vcpu_mode == CPU_MODE_64BIT) {
541560
/* Set up guest stack pointer to 0 */
542561
field = VMX_GUEST_RSP;
543562
value32 = 0;
@@ -551,13 +570,15 @@ static void init_guest_state(struct vcpu *vcpu)
551570
/***************************************************/
552571

553572
/* GDTR - Global Descriptor Table */
554-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
573+
if (vcpu_mode == CPU_MODE_REAL) {
555574
/* Base */
556575
base = 0;
557576

558577
/* Limit */
559578
limit = 0xFFFF;
560-
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
579+
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
580+
base = create_guest_init_gdt(vcpu->vm, &limit);
581+
} else if (vcpu_mode == CPU_MODE_64BIT) {
561582
descriptor_table gdtb = {0, 0};
562583

563584
/* Base *//* TODO: Should guest GDTB point to host GDTB ? */
@@ -586,13 +607,14 @@ static void init_guest_state(struct vcpu *vcpu)
586607
pr_dbg("VMX_GUEST_GDTR_LIMIT: 0x%x ", limit);
587608

588609
/* IDTR - Interrupt Descriptor Table */
589-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
610+
if ((vcpu_mode == CPU_MODE_REAL) ||
611+
(vcpu_mode == CPU_MODE_PROTECTED)) {
590612
/* Base */
591613
base = 0;
592614

593615
/* Limit */
594616
limit = 0xFFFF;
595-
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
617+
} else if (vcpu_mode == CPU_MODE_64BIT) {
596618
descriptor_table idtb = {0, 0};
597619

598620
/* TODO: Should guest IDTR point to host IDTR ? */
@@ -630,11 +652,14 @@ static void init_guest_state(struct vcpu *vcpu)
630652
/* ES, CS, SS, DS, FS, GS */
631653
/***************************************************/
632654
data32_idx = 0x10;
633-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
655+
if (vcpu_mode == CPU_MODE_REAL) {
634656
es = ss = ds = fs = gs = data32_idx;
635657
limit = 0xffff;
636658

637-
} else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT) {
659+
} else if (vcpu_mode == CPU_MODE_PROTECTED) {
660+
es = ss = ds = fs = gs = 0x18;
661+
limit = 0xffffffff;
662+
} else if (vcpu_mode == CPU_MODE_64BIT) {
638663
asm volatile ("movw %%es, %%ax":"=a" (es));
639664
asm volatile ("movw %%ss, %%ax":"=a" (ss));
640665
asm volatile ("movw %%ds, %%ax":"=a" (ds));
@@ -682,10 +707,10 @@ static void init_guest_state(struct vcpu *vcpu)
682707
pr_dbg("VMX_GUEST_GS_LIMIT: 0x%x ", limit);
683708

684709
/* Access */
685-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL)
686-
value32 = 0x0093;
687-
else if (get_vcpu_mode(vcpu) == CPU_MODE_64BIT)
688-
value32 = 0xc093;
710+
if (vcpu_mode == CPU_MODE_REAL)
711+
value32 = REAL_MODE_DATA_SEG_AR;
712+
else /* same value for protected mode and long mode */
713+
value32 = PROTECTED_MODE_DATA_SEG_AR;
689714

690715
field = VMX_GUEST_ES_ATTR;
691716
exec_vmwrite(field, value32);
@@ -795,19 +820,6 @@ static void init_guest_state(struct vcpu *vcpu)
795820
pr_dbg("VMX_GUEST_IA32_PAT: 0x%016llx ",
796821
value64);
797822

798-
if (get_vcpu_mode(vcpu) == CPU_MODE_REAL) {
799-
/* Disable long mode (clear IA32_EFER.LME) in VMCS IA32_EFER
800-
* MSR
801-
*/
802-
value64 = msr_read(MSR_IA32_EFER);
803-
value64 &= ~(MSR_IA32_EFER_LME_BIT | MSR_IA32_EFER_LMA_BIT);
804-
} else {
805-
value64 = msr_read(MSR_IA32_EFER);
806-
}
807-
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, value64);
808-
pr_dbg("VMX_GUEST_IA32_EFER: 0x%016llx ",
809-
value64);
810-
811823
value64 = 0;
812824
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, value64);
813825
pr_dbg("VMX_GUEST_IA32_DEBUGCTL: 0x%016llx ",

0 commit comments

Comments
 (0)