Skip to content

Commit b627c2c

Browse files
ZideChen0lijinxia
authored andcommitted
hv: switch IA32_TSC_AUX between host/guest through VM Controls
Currently guest IA32_TSC_AUX MSR is loaded manually right before VM entry, and saved right after VM exit. This patch enables VM-Entry Control and VM-Exit Control to switch MSR IA32_TSC_AUX between host and guest automatically. This helps to keep vcpu_thread() function and struct acrn_vcpu cleaner. Also it removes the dead code of intercepting IA32_TSC_AUX. Tracked-On: #1867 Signed-off-by: Zide Chen <zide.chen@intel.com> Reviewed-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent d0b37f8 commit b627c2c

File tree

5 files changed

+39
-42
lines changed

5 files changed

+39
-42
lines changed

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -611,10 +611,6 @@ int prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
611611
return ret;
612612
}
613613

614-
/* init_vmcs is delayed to vcpu vmcs launch first time */
615-
/* initialize the vcpu tsc aux */
616-
vcpu->msr_tsc_aux_guest = vcpu->vcpu_id;
617-
618614
set_pcpu_used(pcpu_id);
619615

620616
INIT_LIST_HEAD(&vcpu->run_list);

hypervisor/arch/x86/guest/vmsr.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,14 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, enum rw_mode mode)
132132
}
133133
}
134134

135+
static void init_msr_area(struct acrn_vcpu *vcpu)
136+
{
137+
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_num = MSR_IA32_TSC_AUX;
138+
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
139+
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_num = MSR_IA32_TSC_AUX;
140+
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id;
141+
}
142+
135143
void init_msr_emulation(struct acrn_vcpu *vcpu)
136144
{
137145
uint32_t i;
@@ -182,6 +190,9 @@ void init_msr_emulation(struct acrn_vcpu *vcpu)
182190
value64 = hva2hpa(vcpu->vm->arch_vm.msr_bitmap);
183191
exec_vmwrite64(VMX_MSR_BITMAP_FULL, value64);
184192
pr_dbg("VMX_MSR_BITMAP: 0x%016llx ", value64);
193+
194+
/* Initialize the MSR save/store area */
195+
init_msr_area(vcpu);
185196
}
186197

187198
int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
@@ -259,11 +270,6 @@ int rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
259270
v = vmx_rdmsr_pat(vcpu);
260271
break;
261272
}
262-
case MSR_IA32_TSC_AUX:
263-
{
264-
v = vcpu->arch.msr_tsc_aux;
265-
break;
266-
}
267273
case MSR_IA32_APIC_BASE:
268274
{
269275
/* Read APIC base */
@@ -396,11 +402,6 @@ int wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
396402
exec_vmwrite(VMX_GUEST_GS_BASE, v);
397403
break;
398404
}
399-
case MSR_IA32_TSC_AUX:
400-
{
401-
vcpu->arch.msr_tsc_aux = v;
402-
break;
403-
}
404405
case MSR_IA32_APIC_BASE:
405406
{
406407
err = vlapic_wrmsr(vcpu, msr, v);

hypervisor/arch/x86/vmx.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -956,7 +956,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
956956
exec_vmwrite(VMX_CR3_TARGET_3, 0UL);
957957
}
958958

959-
static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
959+
static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
960960
{
961961
uint32_t value32;
962962

@@ -985,7 +985,8 @@ static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
985985
* MSRs on load from memory on VM entry from mem address provided by
986986
* VM-entry MSR load address field
987987
*/
988-
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, 0U);
988+
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, MSR_AREA_COUNT);
989+
exec_vmwrite64(VMX_ENTRY_MSR_LOAD_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.guest);
989990

990991
/* Set up VM entry interrupt information field pg 2909 24.8.3 */
991992
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0U);
@@ -997,7 +998,7 @@ static void init_entry_ctrl(__unused const struct acrn_vcpu *vcpu)
997998
exec_vmwrite32(VMX_ENTRY_INSTR_LENGTH, 0U);
998999
}
9991000

1000-
static void init_exit_ctrl(void)
1001+
static void init_exit_ctrl(struct acrn_vcpu *vcpu)
10011002
{
10021003
uint32_t value32;
10031004

@@ -1029,8 +1030,10 @@ static void init_exit_ctrl(void)
10291030
* The 64 bit VM-exit MSR store and load address fields provide the
10301031
* corresponding addresses
10311032
*/
1032-
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, 0U);
1033-
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, 0U);
1033+
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, MSR_AREA_COUNT);
1034+
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, MSR_AREA_COUNT);
1035+
exec_vmwrite64(VMX_EXIT_MSR_STORE_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.guest);
1036+
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, (uint64_t)vcpu->arch.msr_area.host);
10341037
}
10351038

10361039
/**
@@ -1061,7 +1064,7 @@ void init_vmcs(struct acrn_vcpu *vcpu)
10611064
init_exec_ctrl(vcpu);
10621065
init_guest_state(vcpu);
10631066
init_entry_ctrl(vcpu);
1064-
init_exit_ctrl();
1067+
init_exit_ctrl(vcpu);
10651068
}
10661069

10671070
#ifndef CONFIG_PARTITION_MODE

hypervisor/common/hv_main.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ static void run_vcpu_pre_work(struct acrn_vcpu *vcpu)
2020
void vcpu_thread(struct acrn_vcpu *vcpu)
2121
{
2222
uint32_t basic_exit_reason = 0U;
23-
uint64_t tsc_aux_hyp_cpu = (uint64_t) vcpu->pcpu_id;
2423
int32_t ret = 0;
2524

2625
/* If vcpu is not launched, we need to do init_vmcs first */
@@ -62,12 +61,6 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
6261

6362
profiling_vmenter_handler(vcpu);
6463

65-
/* Restore guest TSC_AUX */
66-
if (vcpu->launched) {
67-
cpu_msr_write(MSR_IA32_TSC_AUX,
68-
vcpu->msr_tsc_aux_guest);
69-
}
70-
7164
ret = run_vcpu(vcpu);
7265
if (ret != 0) {
7366
pr_fatal("vcpu resume failed");
@@ -76,10 +69,6 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
7669
}
7770

7871
vcpu->arch.nrexits++;
79-
/* Save guest TSC_AUX */
80-
cpu_msr_read(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
81-
/* Restore native TSC_AUX */
82-
cpu_msr_write(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
8372

8473
CPU_IRQ_ENABLE();
8574
/* Dispatch handler */

hypervisor/include/arch/x86/guest/vcpu.h

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,23 @@ struct cpu_context {
176176
struct ext_context ext_ctx;
177177
};
178178

179+
/* Intel SDM 24.8.2, the address must be 16-byte aligned */
180+
struct msr_store_entry {
181+
uint32_t msr_num;
182+
uint32_t reserved;
183+
uint64_t value;
184+
} __aligned(16);
185+
186+
enum {
187+
MSR_AREA_TSC_AUX = 0,
188+
MSR_AREA_COUNT,
189+
};
190+
191+
struct msr_store_area {
192+
struct msr_store_entry guest[MSR_AREA_COUNT];
193+
struct msr_store_entry host[MSR_AREA_COUNT];
194+
};
195+
179196
struct acrn_vcpu_arch {
180197
/* vmcs region for this vcpu, MUST be 4KB-aligned */
181198
uint8_t vmcs[CPU_PAGE_SIZE];
@@ -199,9 +216,6 @@ struct acrn_vcpu_arch {
199216
uint32_t irq_window_enabled;
200217
uint32_t nrexits;
201218

202-
/* Auxiliary TSC value */
203-
uint64_t msr_tsc_aux;
204-
205219
/* VCPU context state information */
206220
uint32_t exit_reason;
207221
uint32_t idt_vectoring_info;
@@ -217,6 +231,8 @@ struct acrn_vcpu_arch {
217231
bool inject_event_pending;
218232
struct event_injection_info inject_info;
219233

234+
/* List of MSRS to be stored and loaded on VM exits or VM entries */
235+
struct msr_store_area msr_area;
220236
} __aligned(CPU_PAGE_SIZE);
221237

222238
struct acrn_vm;
@@ -242,14 +258,6 @@ struct acrn_vcpu {
242258

243259
struct io_request req; /* used by io/ept emulation */
244260

245-
/* save guest msr tsc aux register.
246-
* Before VMENTRY, save guest MSR_TSC_AUX to this fields.
247-
* After VMEXIT, restore this fields to guest MSR_TSC_AUX.
248-
* This is only temperary workaround. Once MSR emulation
249-
* is enabled, we should remove this fields and related
250-
* code.
251-
*/
252-
uint64_t msr_tsc_aux_guest;
253261
uint64_t guest_msrs[IDX_MAX_MSR];
254262
#ifdef CONFIG_MTRR_ENABLED
255263
struct mtrr_state mtrr;

0 commit comments

Comments
 (0)