@@ -19,6 +19,12 @@ extern struct efi_ctx* efi_ctx;
19
19
((uint64_t)PAT_MEM_TYPE_UCM << 48) + \
20
20
((uint64_t)PAT_MEM_TYPE_UC << 56))
21
21
22
+ #define REAL_MODE_BSP_INIT_CODE_SEL (0xf000)
23
+ #define REAL_MODE_DATA_SEG_AR (0x0093)
24
+ #define REAL_MODE_CODE_SEG_AR (0x009f)
25
+ #define PROTECTED_MODE_DATA_SEG_AR (0xc093)
26
+ #define PROTECTED_MODE_CODE_SEG_AR (0xc09b)
27
+
22
28
static uint32_t cr0_host_mask ;
23
29
static uint32_t cr0_always_on_mask ;
24
30
static uint32_t cr0_always_off_mask ;
@@ -438,65 +444,75 @@ static void init_guest_state(struct vcpu *vcpu)
438
444
struct vm * vm = vcpu -> vm ;
439
445
struct run_context * cur_context =
440
446
& vcpu -> arch_vcpu .contexts [vcpu -> arch_vcpu .cur_context ];
447
+ enum vm_cpu_mode vcpu_mode = get_vcpu_mode (vcpu );
441
448
442
449
pr_dbg ("*********************" );
443
450
pr_dbg ("Initialize guest state" );
444
451
pr_dbg ("*********************" );
445
452
453
+
454
+ /* Will not init vcpu mode to compatibility mode */
455
+ ASSERT (vcpu_mode != CPU_MODE_COMPATIBILITY ,
456
+ "don't support start vcpu from compatibility mode" );
457
+
446
458
/*************************************************/
447
459
/* Set up CRx */
448
460
/*************************************************/
449
461
pr_dbg ("Natural-width********" );
450
462
463
+ if (vcpu_mode == CPU_MODE_64BIT )
464
+ cur_context -> ia32_efer = MSR_IA32_EFER_LME_BIT ;
465
+
451
466
/* Setup guest control register values */
452
467
/* Set up guest CRO field */
453
- if (get_vcpu_mode (vcpu ) == CPU_MODE_REAL ) {
454
- /*cur_context->cr0 = (CR0_CD | CR0_NW | CR0_ET | CR0_NE);*/
455
- cur_context -> cr0 = CR0_ET | CR0_NE ;
456
- cur_context -> cr3 = 0 ;
457
- cur_context -> cr4 = CR4_VMXE ;
458
- } else if (get_vcpu_mode (vcpu ) == CPU_MODE_64BIT ) {
459
- cur_context -> cr0 = ((uint64_t )CR0_PG | CR0_PE | CR0_NE );
460
- cur_context -> cr4 = ((uint64_t )CR4_PSE | CR4_PAE | CR4_MCE | CR4_VMXE );
461
- cur_context -> cr3 = vm -> arch_vm .guest_init_pml4 | CR3_PWT ;
468
+ if (vcpu_mode == CPU_MODE_REAL ) {
469
+ vmx_write_cr4 (vcpu , 0 );
470
+ vmx_write_cr0 (vcpu , CR0_ET | CR0_NE );
471
+ vmx_write_cr3 (vcpu , 0 );
472
+ } else if (vcpu_mode == CPU_MODE_PROTECTED ) {
473
+ vmx_write_cr4 (vcpu , 0 );
474
+ vmx_write_cr0 (vcpu , CR0_ET | CR0_NE | CR0_PE );
475
+ vmx_write_cr3 (vcpu , 0 );
476
+ } else if (vcpu_mode == CPU_MODE_64BIT ) {
477
+ vmx_write_cr4 (vcpu , CR4_PSE | CR4_PAE | CR4_MCE );
478
+ vmx_write_cr0 (vcpu , CR0_PG | CR0_PE | CR0_NE );
479
+ vmx_write_cr3 (vcpu , vm -> arch_vm .guest_init_pml4 | CR3_PWT );
462
480
}
463
481
464
- value = cur_context -> cr0 ;
465
- field = VMX_GUEST_CR0 ;
466
- exec_vmwrite (field , value & 0xFFFFFFFF );
467
- pr_dbg ("VMX_GUEST_CR0: 0x%016llx " , value );
468
-
469
- /* Set up guest CR3 field */
470
- value = cur_context -> cr3 ;
471
- field = VMX_GUEST_CR3 ;
472
- exec_vmwrite (field , value & 0xFFFFFFFF );
473
- pr_dbg ("VMX_GUEST_CR3: 0x%016llx " , value );
474
-
475
- /* Set up guest CR4 field */
476
- value = cur_context -> cr4 ;
477
- field = VMX_GUEST_CR4 ;
478
- exec_vmwrite (field , value & 0xFFFFFFFF );
479
- pr_dbg ("VMX_GUEST_CR4: 0x%016llx " , value );
480
-
481
482
/***************************************************/
482
483
/* Set up Flags - the value of RFLAGS on VM entry */
483
484
/***************************************************/
484
485
field = VMX_GUEST_RFLAGS ;
485
486
cur_context -> rflags = 0x2 ; /* Bit 1 is a active high reserved bit */
486
487
exec_vmwrite (field , cur_context -> rflags );
487
- pr_dbg ("VMX_GUEST_RFLAGS: 0x%016llx " , value );
488
+ pr_dbg ("VMX_GUEST_RFLAGS: 0x%016llx " , cur_context -> rflags );
488
489
489
490
/***************************************************/
490
491
/* Set Code Segment - CS */
491
492
/***************************************************/
492
- if (get_vcpu_mode (vcpu ) == CPU_MODE_REAL ) {
493
- /* AP is initialized with real mode
494
- * and CS value is left shift 8 bits from sipi vector;
495
- */
496
- sel = vcpu -> arch_vcpu .sipi_vector << 8 ;
493
+ if (vcpu_mode == CPU_MODE_REAL ) {
494
+ if (is_vcpu_bsp (vcpu )) {
495
+ ASSERT (!is_vm0 (vcpu -> vm ),
496
+ "VM0 bsp should not be inited as realmode" );
497
+ /* BP is initialized with real mode */
498
+ sel = REAL_MODE_BSP_INIT_CODE_SEL ;
499
+ /* For unrestricted guest, it is able to set a
500
+ * high base address */
501
+ base = (uint64_t )vcpu -> entry_addr & 0xFFFF0000UL ;
502
+ } else {
503
+ /* AP is initialized with real mode
504
+ * and CS value is left shift 8 bits from sipi vector;
505
+ */
506
+ sel = vcpu -> arch_vcpu .sipi_vector << 8 ;
507
+ base = sel << 4 ;
508
+ }
497
509
limit = 0xffff ;
498
- access = 0x9F ;
499
- base = sel << 4 ;
510
+ access = REAL_MODE_CODE_SEG_AR ;
511
+ } else if (vcpu_mode == CPU_MODE_PROTECTED ) {
512
+ limit = 0xffffffff ;
513
+ base = 0 ;
514
+ access = PROTECTED_MODE_CODE_SEG_AR ;
515
+ sel = 0x10 ; /* Linear CS selector in guest init gdt */
500
516
} else {
501
517
HV_ARCH_VMX_GET_CS (sel );
502
518
access = get_cs_access_rights ();
@@ -529,15 +545,18 @@ static void init_guest_state(struct vcpu *vcpu)
529
545
/***************************************************/
530
546
/* Set up guest instruction pointer */
531
547
field = VMX_GUEST_RIP ;
532
- if (get_vcpu_mode (vcpu ) == CPU_MODE_REAL )
533
- value32 = 0 ;
548
+ if (vcpu_mode == CPU_MODE_REAL )
549
+ if (is_vcpu_bsp (vcpu ))
550
+ value32 = 0x0000FFF0 ;
551
+ else
552
+ value32 = 0 ;
534
553
else
535
- value32 = (uint32_t ) ((uint64_t ) vcpu -> entry_addr & 0xFFFFFFFF );
554
+ value32 = (uint32_t )((uint64_t )vcpu -> entry_addr );
536
555
537
556
pr_dbg ("GUEST RIP on VMEntry %x " , value32 );
538
557
exec_vmwrite (field , value32 );
539
558
540
- if (get_vcpu_mode ( vcpu ) == CPU_MODE_64BIT ) {
559
+ if (vcpu_mode == CPU_MODE_64BIT ) {
541
560
/* Set up guest stack pointer to 0 */
542
561
field = VMX_GUEST_RSP ;
543
562
value32 = 0 ;
@@ -551,13 +570,15 @@ static void init_guest_state(struct vcpu *vcpu)
551
570
/***************************************************/
552
571
553
572
/* GDTR - Global Descriptor Table */
554
- if (get_vcpu_mode ( vcpu ) == CPU_MODE_REAL ) {
573
+ if (vcpu_mode == CPU_MODE_REAL ) {
555
574
/* Base */
556
575
base = 0 ;
557
576
558
577
/* Limit */
559
578
limit = 0xFFFF ;
560
- } else if (get_vcpu_mode (vcpu ) == CPU_MODE_64BIT ) {
579
+ } else if (vcpu_mode == CPU_MODE_PROTECTED ) {
580
+ base = create_guest_init_gdt (vcpu -> vm , & limit );
581
+ } else if (vcpu_mode == CPU_MODE_64BIT ) {
561
582
descriptor_table gdtb = {0 , 0 };
562
583
563
584
/* Base */ /* TODO: Should guest GDTB point to host GDTB ? */
@@ -586,13 +607,14 @@ static void init_guest_state(struct vcpu *vcpu)
586
607
pr_dbg ("VMX_GUEST_GDTR_LIMIT: 0x%x " , limit );
587
608
588
609
/* IDTR - Interrupt Descriptor Table */
589
- if (get_vcpu_mode (vcpu ) == CPU_MODE_REAL ) {
610
+ if ((vcpu_mode == CPU_MODE_REAL ) ||
611
+ (vcpu_mode == CPU_MODE_PROTECTED )) {
590
612
/* Base */
591
613
base = 0 ;
592
614
593
615
/* Limit */
594
616
limit = 0xFFFF ;
595
- } else if (get_vcpu_mode ( vcpu ) == CPU_MODE_64BIT ) {
617
+ } else if (vcpu_mode == CPU_MODE_64BIT ) {
596
618
descriptor_table idtb = {0 , 0 };
597
619
598
620
/* TODO: Should guest IDTR point to host IDTR ? */
@@ -630,11 +652,14 @@ static void init_guest_state(struct vcpu *vcpu)
630
652
/* ES, CS, SS, DS, FS, GS */
631
653
/***************************************************/
632
654
data32_idx = 0x10 ;
633
- if (get_vcpu_mode ( vcpu ) == CPU_MODE_REAL ) {
655
+ if (vcpu_mode == CPU_MODE_REAL ) {
634
656
es = ss = ds = fs = gs = data32_idx ;
635
657
limit = 0xffff ;
636
658
637
- } else if (get_vcpu_mode (vcpu ) == CPU_MODE_64BIT ) {
659
+ } else if (vcpu_mode == CPU_MODE_PROTECTED ) {
660
+ es = ss = ds = fs = gs = 0x18 ;
661
+ limit = 0xffffffff ;
662
+ } else if (vcpu_mode == CPU_MODE_64BIT ) {
638
663
asm volatile ("movw %%es, %%ax" :"=a" (es ));
639
664
asm volatile ("movw %%ss, %%ax" :"=a" (ss ));
640
665
asm volatile ("movw %%ds, %%ax" :"=a" (ds ));
@@ -682,10 +707,10 @@ static void init_guest_state(struct vcpu *vcpu)
682
707
pr_dbg ("VMX_GUEST_GS_LIMIT: 0x%x " , limit );
683
708
684
709
/* Access */
685
- if (get_vcpu_mode ( vcpu ) == CPU_MODE_REAL )
686
- value32 = 0x0093 ;
687
- else if ( get_vcpu_mode ( vcpu ) == CPU_MODE_64BIT )
688
- value32 = 0xc093 ;
710
+ if (vcpu_mode == CPU_MODE_REAL )
711
+ value32 = REAL_MODE_DATA_SEG_AR ;
712
+ else /* same value for protected mode and long mode */
713
+ value32 = PROTECTED_MODE_DATA_SEG_AR ;
689
714
690
715
field = VMX_GUEST_ES_ATTR ;
691
716
exec_vmwrite (field , value32 );
@@ -795,19 +820,6 @@ static void init_guest_state(struct vcpu *vcpu)
795
820
pr_dbg ("VMX_GUEST_IA32_PAT: 0x%016llx " ,
796
821
value64 );
797
822
798
- if (get_vcpu_mode (vcpu ) == CPU_MODE_REAL ) {
799
- /* Disable long mode (clear IA32_EFER.LME) in VMCS IA32_EFER
800
- * MSR
801
- */
802
- value64 = msr_read (MSR_IA32_EFER );
803
- value64 &= ~(MSR_IA32_EFER_LME_BIT | MSR_IA32_EFER_LMA_BIT );
804
- } else {
805
- value64 = msr_read (MSR_IA32_EFER );
806
- }
807
- exec_vmwrite64 (VMX_GUEST_IA32_EFER_FULL , value64 );
808
- pr_dbg ("VMX_GUEST_IA32_EFER: 0x%016llx " ,
809
- value64 );
810
-
811
823
value64 = 0 ;
812
824
exec_vmwrite64 (VMX_GUEST_IA32_DEBUGCTL_FULL , value64 );
813
825
pr_dbg ("VMX_GUEST_IA32_DEBUGCTL: 0x%016llx " ,
0 commit comments