Skip to content

Commit 08139c3

Browse files
fyin1lijinxia
authored andcommitted
hv: add vmx_off and update exec_vmxon_instr
To handle cpu down/up dynamically, arcn needs to support vmx off/on dynamically. Following changes is introduced: vmx_off will be used when down AP. It does: - vmclear the mapped vcpu - off vmx. exec_vmxon_instr is updated to handle start and up AP both. It does - if vmx was on on AP, load the vmxon_region saved. Otherwise, allocate vmxon_region. - if there is mapped vcpu, vmptrld mapped vcpu. Signed-off-by: Zheng Gen <gen.zheng@intel.com> Signed-off-by: Yin Fegnwei <fengwei.yin@intel.com> Acked-by: Eddie Dong <Eddie.dong@intel.com>
1 parent fbeafd5 commit 08139c3

File tree

4 files changed

+43
-8
lines changed

4 files changed

+43
-8
lines changed

hypervisor/arch/x86/vmx.c

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -82,16 +82,23 @@ static inline int exec_vmxon(void *addr)
8282
return status;
8383
}
8484

85-
int exec_vmxon_instr(void)
85+
/* Per cpu data to hold the vmxon_region_pa for each pcpu.
86+
* It will be used again when we start a pcpu after the pcpu was down.
87+
* S3 enter/exit will use it.
88+
*/
89+
int exec_vmxon_instr(uint32_t pcpu_id)
8690
{
87-
uint64_t tmp64;
91+
uint64_t tmp64, vmcs_pa;
8892
uint32_t tmp32;
8993
int ret = -ENOMEM;
9094
void *vmxon_region_va;
91-
uint64_t vmxon_region_pa;
95+
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
9296

9397
/* Allocate page aligned memory for VMXON region */
94-
vmxon_region_va = alloc_page();
98+
if (per_cpu(vmxon_region_pa, pcpu_id) == 0)
99+
vmxon_region_va = alloc_page();
100+
else
101+
vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id));
95102

96103
if (vmxon_region_va != 0) {
97104
/* Initialize vmxon page with revision id from IA32 VMX BASIC
@@ -107,15 +114,39 @@ int exec_vmxon_instr(void)
107114
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
108115

109116
/* Turn ON VMX */
110-
vmxon_region_pa = HVA2HPA(vmxon_region_va);
111-
ret = exec_vmxon(&vmxon_region_pa);
117+
per_cpu(vmxon_region_pa, pcpu_id) = HVA2HPA(vmxon_region_va);
118+
ret = exec_vmxon(&per_cpu(vmxon_region_pa, pcpu_id));
119+
120+
if (vcpu) {
121+
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
122+
ret = exec_vmptrld(&vmcs_pa);
123+
}
112124
} else
113125
pr_err("%s, alloc memory for VMXON region failed\n",
114126
__func__);
115127

116128
return ret;
117129
}
118130

131+
int vmx_off(int pcpu_id)
132+
{
133+
int ret = 0;
134+
135+
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
136+
uint64_t vmcs_pa;
137+
138+
if (vcpu) {
139+
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
140+
ret = exec_vmclear((void *)&vmcs_pa);
141+
if (ret)
142+
return ret;
143+
}
144+
145+
asm volatile ("vmxoff" : : : "memory");
146+
147+
return 0;
148+
}
149+
119150
int exec_vmclear(void *addr)
120151
{
121152
uint64_t rflags;

hypervisor/common/hv_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ int hv_main(int cpu_id)
125125
}
126126

127127
/* Enable virtualization extensions */
128-
ret = exec_vmxon_instr();
128+
ret = exec_vmxon_instr(cpu_id);
129129
if (ret != 0)
130130
return ret;
131131

hypervisor/include/arch/x86/per_cpu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ struct per_cpu_region {
1818
uint64_t vmexit_time[64];
1919
uint64_t softirq_pending;
2020
uint64_t spurious;
21+
uint64_t vmxon_region_pa;
2122
struct dev_handler_node *timer_node;
2223
struct shared_buf *earlylog_sbuf;
2324
void *vcpu;

hypervisor/include/arch/x86/vmx.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,13 +401,16 @@
401401
#define VMX_SUPPORT_UNRESTRICTED_GUEST (1<<5)
402402

403403
/* External Interfaces */
404-
int exec_vmxon_instr(void);
404+
int exec_vmxon_instr(uint32_t pcpu_id);
405405
uint64_t exec_vmread(uint32_t field);
406406
uint64_t exec_vmread64(uint32_t field_full);
407407
void exec_vmwrite(uint32_t field, uint64_t value);
408408
void exec_vmwrite64(uint32_t field_full, uint64_t value);
409409
int init_vmcs(struct vcpu *vcpu);
410410

411+
int vmx_off(int pcpu_id);
412+
int vmx_restart(int pcpu_id);
413+
411414
int exec_vmclear(void *addr);
412415
int exec_vmptrld(void *addr);
413416

0 commit comments

Comments
 (0)