Skip to content

Commit 08c13a9

Browse files
fyin1wenlingz
authored andcommitted
hv: Update SOS BSP to use new API to init BSP state
We move the SOS BSP state init to vm loader and drop function init_guest_context_vm0_bsp. Update the definition of vm0_boot_context to fix code violations. Tracked-On: #1231 Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 26627bd commit 08c13a9

File tree

3 files changed

+18
-44
lines changed

3 files changed

+18
-44
lines changed

hypervisor/arch/x86/vmx.c

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -604,42 +604,6 @@ static void init_guest_context_real(struct vcpu *vcpu)
604604
ectx->idtr.limit = 0xFFFFU;
605605
}
606606

607-
static void init_guest_context_vm0_bsp(struct vcpu *vcpu)
608-
{
609-
struct ext_context *ectx =
610-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].ext_ctx;
611-
struct acrn_vcpu_regs* init_ctx =
612-
(struct acrn_vcpu_regs*)(&vm0_boot_context);
613-
uint16_t *sel = &(init_ctx->cs_sel);
614-
struct segment_sel *seg;
615-
616-
for (seg = &(ectx->cs); seg <= &(ectx->gs); seg++) {
617-
seg->base = 0UL;
618-
seg->limit = 0xFFFFFFFFU;
619-
seg->attr = PROTECTED_MODE_DATA_SEG_AR;
620-
seg->selector = *sel;
621-
sel++;
622-
}
623-
ectx->cs.attr = init_ctx->cs_ar; /* override cs attr */
624-
625-
vcpu_set_rip(vcpu, (uint64_t)vcpu->entry_addr);
626-
vcpu_set_efer(vcpu, init_ctx->ia32_efer);
627-
628-
ectx->gdtr.base = init_ctx->gdt.base;
629-
ectx->gdtr.limit = init_ctx->gdt.limit;
630-
631-
ectx->idtr.base = init_ctx->idt.base;
632-
ectx->idtr.limit = init_ctx->idt.limit;
633-
634-
ectx->ldtr.selector = init_ctx->ldt_sel;
635-
ectx->tr.selector = init_ctx->tr_sel;
636-
#ifdef CONFIG_EFI_STUB
637-
vcpu_set_rsp(vcpu, efi_ctx->vcpu_regs.gprs.rsp);
638-
/* clear flags for CF/PF/AF/ZF/SF/OF */
639-
vcpu_set_rflags(vcpu, efi_ctx->vcpu_regs.rflags & ~(0x8d5UL));
640-
#endif
641-
}
642-
643607
/* only be called for UOS when bsp start from protected mode */
644608
static void init_guest_context_protect(struct vcpu *vcpu)
645609
{
@@ -730,7 +694,6 @@ static void init_guest_state(struct vcpu *vcpu)
730694
init_guest_context_real(vcpu);
731695
init_guest_vmx(vcpu, CR0_ET | CR0_NE, 0UL, 0UL);
732696
} else if (is_vm0(vcpu->vm) && is_vcpu_bsp(vcpu)) {
733-
init_guest_context_vm0_bsp(vcpu);
734697
init_guest_vmx(vcpu, init_ctx->cr0, init_ctx->cr3,
735698
init_ctx->cr4 & ~CR4_VMXE);
736699
} else {

hypervisor/bsp/uefi/uefi.c

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ void efi_spurious_handler(int vector)
3939
int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu)
4040
{
4141
int ret = 0;
42-
struct run_context *cur_context =
43-
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].run_ctx;
42+
struct acrn_vcpu_regs *vcpu_regs =
43+
(struct acrn_vcpu_regs *)&vm0_boot_context;
4444

4545
ASSERT(vm != NULL, "Incorrect argument");
4646

@@ -51,9 +51,17 @@ int uefi_sw_loader(struct vm *vm, struct vcpu *vcpu)
5151

5252
vlapic_restore(vcpu_vlapic(vcpu), &uefi_lapic_regs);
5353

54-
vcpu->entry_addr = (void *)efi_ctx->vcpu_regs.rip;
55-
memcpy_s(&cur_context->guest_cpu_regs, sizeof(struct acrn_gp_regs),
56-
&efi_ctx->vcpu_regs.gprs, sizeof(struct acrn_gp_regs));
54+
/* For UEFI platform, the bsp init regs come from two places:
55+
* 1. saved in efi_boot: gpregs, rip
56+
* 2. saved when HV started: other registers
57+
* We copy the info saved in efi_boot to vm0_boot_context and
58+
* init bsp with vm0_boot_context.
59+
*/
60+
memcpy_s(&(vcpu_regs->gprs), sizeof(struct acrn_gp_regs),
61+
&(efi_ctx->vcpu_regs.gprs), sizeof(struct acrn_gp_regs));
62+
63+
vcpu_regs->rip = efi_ctx->vcpu_regs.rip;
64+
set_vcpu_regs(vcpu, vcpu_regs);
5765

5866
/* defer irq enabling till vlapic is ready */
5967
CPU_IRQ_ENABLE();

hypervisor/common/vm_load.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,8 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
138138
}
139139
#endif
140140

141+
set_vcpu_regs(vcpu, (struct acrn_vcpu_regs *)&vm0_boot_context);
142+
141143
/* calculate the kernel entry point */
142144
zeropage = (struct zero_page *)sw_kernel->kernel_src_addr;
143145
kernel_entry_offset = (uint32_t)(zeropage->hdr.setup_sects + 1U) * 512U;
@@ -151,9 +153,10 @@ int general_sw_loader(struct vm *vm, struct vcpu *vcpu)
151153
+ kernel_entry_offset);
152154
if (is_vcpu_bsp(vcpu)) {
153155
/* Set VCPU entry point to kernel entry */
154-
vcpu->entry_addr = sw_kernel->kernel_entry_addr;
156+
vcpu_set_rip(vcpu, (uint64_t)sw_kernel->kernel_entry_addr);
155157
pr_info("%s, VM %hu VCPU %hu Entry: 0x%016llx ",
156-
__func__, vm->vm_id, vcpu->vcpu_id, vcpu->entry_addr);
158+
__func__, vm->vm_id, vcpu->vcpu_id,
159+
sw_kernel->kernel_entry_addr);
157160
}
158161

159162
/* Calculate the host-physical address where the guest will be loaded */

0 commit comments

Comments
 (0)