Skip to content

Commit 612cdce

Browse files
wuxyintellijinxia
authored andcommitted
HV:treewide:Add exec_vmread32 and exec_vmwrite32 functions
In the hypervisor, VMCS fields include 16-bit fields, 32-bit fields, 64-bit fields and natural-width fields. In the current implement, no exec_vmread32/exec_vmwrite32 is for accessing 32-bit fields. So there are many type casting for the return value and parameters vmread/vmwrite operations. Create exec_vmread32 and exec_vmwrite32 functions to access 32-bit fields in VMCS; Update related variables type for vmread/vmwrite operations; Update related caller according to VMCS fields size. V1--V2: This is new part of this patch serial to only update 32 bit vmread/vmread opertions and related caller. V2-->V3: Update related variables type in data structure for exec_vmread32/exec_vmwrite32. Rename temp variable 'low' into 'value' for exec_vmread32; V3-->V4: Remove useless type conversion. Signed-off-by: Xiangyang Wu <xiangyang.wu@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
1 parent 6543796 commit 612cdce

File tree

9 files changed

+113
-97
lines changed

9 files changed

+113
-97
lines changed

hypervisor/arch/x86/guest/instr_emul_wrapper.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,8 @@ int vm_set_seg_desc(struct vcpu *vcpu, enum cpu_reg_name seg,
110110
}
111111

112112
exec_vmwrite(base, ret_desc->base);
113-
exec_vmwrite(limit, ret_desc->limit);
114-
exec_vmwrite(access, ret_desc->access);
113+
exec_vmwrite32(limit, ret_desc->limit);
114+
exec_vmwrite32(access, ret_desc->access);
115115

116116
return 0;
117117
}
@@ -136,8 +136,8 @@ int vm_get_seg_desc(struct vcpu *vcpu, enum cpu_reg_name seg,
136136
}
137137

138138
desc->base = exec_vmread(base);
139-
desc->limit = (uint32_t)exec_vmread(limit);
140-
desc->access = (uint32_t)exec_vmread(access);
139+
desc->limit = exec_vmread32(limit);
140+
desc->access = exec_vmread32(access);
141141

142142
return 0;
143143
}
@@ -351,7 +351,7 @@ int decode_instruction(struct vcpu *vcpu)
351351
return retval;
352352
}
353353

354-
csar = (uint32_t)exec_vmread(VMX_GUEST_CS_ATTR);
354+
csar = exec_vmread32(VMX_GUEST_CS_ATTR);
355355
get_guest_paging_info(vcpu, emul_ctxt, csar);
356356
cpu_mode = get_vcpu_mode(vcpu);
357357

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -205,24 +205,24 @@ int start_vcpu(struct vcpu *vcpu)
205205

206206
/* Save guest IA32_EFER register */
207207
cur_context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
208-
set_vcpu_mode(vcpu, exec_vmread(VMX_GUEST_CS_ATTR));
208+
set_vcpu_mode(vcpu, exec_vmread32(VMX_GUEST_CS_ATTR));
209209

210210
/* Obtain current VCPU instruction pointer and length */
211211
cur_context->rip = exec_vmread(VMX_GUEST_RIP);
212-
vcpu->arch_vcpu.inst_len = exec_vmread(VMX_EXIT_INSTR_LEN);
212+
vcpu->arch_vcpu.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);
213213

214214
cur_context->rsp = exec_vmread(VMX_GUEST_RSP);
215215
cur_context->rflags = exec_vmread(VMX_GUEST_RFLAGS);
216216

217217
/* Obtain VM exit reason */
218-
vcpu->arch_vcpu.exit_reason = exec_vmread(VMX_EXIT_REASON);
218+
vcpu->arch_vcpu.exit_reason = exec_vmread32(VMX_EXIT_REASON);
219219

220220
if (status != 0) {
221221
/* refer to 64-ia32 spec section 24.9.1 volume#3 */
222222
if (vcpu->arch_vcpu.exit_reason & VMX_VMENTRY_FAIL)
223223
pr_fatal("vmentry fail reason=%lx", vcpu->arch_vcpu.exit_reason);
224224
else
225-
pr_fatal("vmexit fail err_inst=%lx", exec_vmread(VMX_INSTR_ERROR));
225+
pr_fatal("vmexit fail err_inst=%x", exec_vmread32(VMX_INSTR_ERROR));
226226

227227
ASSERT(status == 0, "vm fail");
228228
}

hypervisor/arch/x86/guest/vmsr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ int rdmsr_vmexit_handler(struct vcpu *vcpu)
198198
/* following MSR not emulated now just left for future */
199199
case MSR_IA32_SYSENTER_CS:
200200
{
201-
v = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS);
201+
v = (uint64_t)exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
202202
break;
203203
}
204204
case MSR_IA32_SYSENTER_ESP:
@@ -331,7 +331,7 @@ int wrmsr_vmexit_handler(struct vcpu *vcpu)
331331
/* following MSR not emulated now just left for future */
332332
case MSR_IA32_SYSENTER_CS:
333333
{
334-
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, v);
334+
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, (uint32_t)v);
335335
break;
336336
}
337337
case MSR_IA32_SYSENTER_ESP:

hypervisor/arch/x86/trusty.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,16 +38,16 @@ static struct key_info g_key_info = {
3838
{ \
3939
seg.selector = exec_vmread16(VMX_GUEST_##SEG_NAME##_SEL); \
4040
seg.base = exec_vmread(VMX_GUEST_##SEG_NAME##_BASE); \
41-
seg.limit = exec_vmread(VMX_GUEST_##SEG_NAME##_LIMIT); \
42-
seg.attr = exec_vmread(VMX_GUEST_##SEG_NAME##_ATTR); \
41+
seg.limit = exec_vmread32(VMX_GUEST_##SEG_NAME##_LIMIT); \
42+
seg.attr = exec_vmread32(VMX_GUEST_##SEG_NAME##_ATTR); \
4343
}
4444

4545
#define load_segment(seg, SEG_NAME) \
4646
{ \
4747
exec_vmwrite16(VMX_GUEST_##SEG_NAME##_SEL, seg.selector); \
4848
exec_vmwrite(VMX_GUEST_##SEG_NAME##_BASE, seg.base); \
49-
exec_vmwrite(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
50-
exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
49+
exec_vmwrite32(VMX_GUEST_##SEG_NAME##_LIMIT, seg.limit); \
50+
exec_vmwrite32(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
5151
}
5252

5353
#ifndef WORKAROUND_FOR_TRUSTY_4G_MEM
@@ -234,9 +234,9 @@ static void save_world_ctx(struct run_context *context)
234234
*/
235235
context->vmx_ia32_pat = exec_vmread(VMX_GUEST_IA32_PAT_FULL);
236236
context->ia32_efer = exec_vmread64(VMX_GUEST_IA32_EFER_FULL);
237-
context->ia32_sysenter_cs = exec_vmread(VMX_GUEST_IA32_SYSENTER_CS);
238237
context->ia32_sysenter_esp = exec_vmread(VMX_GUEST_IA32_SYSENTER_ESP);
239238
context->ia32_sysenter_eip = exec_vmread(VMX_GUEST_IA32_SYSENTER_EIP);
239+
context->ia32_sysenter_cs = exec_vmread32(VMX_GUEST_IA32_SYSENTER_CS);
240240
save_segment(context->cs, CS);
241241
save_segment(context->ss, SS);
242242
save_segment(context->ds, DS);
@@ -247,9 +247,9 @@ static void save_world_ctx(struct run_context *context)
247247
save_segment(context->ldtr, LDTR);
248248
/* Only base and limit for IDTR and GDTR */
249249
context->idtr.base = exec_vmread(VMX_GUEST_IDTR_BASE);
250-
context->idtr.limit = exec_vmread(VMX_GUEST_IDTR_LIMIT);
251250
context->gdtr.base = exec_vmread(VMX_GUEST_GDTR_BASE);
252-
context->gdtr.limit = exec_vmread(VMX_GUEST_GDTR_LIMIT);
251+
context->idtr.limit = exec_vmread32(VMX_GUEST_IDTR_LIMIT);
252+
context->gdtr.limit = exec_vmread32(VMX_GUEST_GDTR_LIMIT);
253253

254254
/* MSRs which not in the VMCS */
255255
context->ia32_star = msr_read(MSR_IA32_STAR);
@@ -280,7 +280,7 @@ static void load_world_ctx(struct run_context *context)
280280
exec_vmwrite64(VMX_GUEST_IA32_DEBUGCTL_FULL, context->ia32_debugctl);
281281
exec_vmwrite64(VMX_GUEST_IA32_PAT_FULL, context->vmx_ia32_pat);
282282
exec_vmwrite64(VMX_GUEST_IA32_EFER_FULL, context->ia32_efer);
283-
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
283+
exec_vmwrite32(VMX_GUEST_IA32_SYSENTER_CS, context->ia32_sysenter_cs);
284284
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_ESP, context->ia32_sysenter_esp);
285285
exec_vmwrite(VMX_GUEST_IA32_SYSENTER_EIP, context->ia32_sysenter_eip);
286286
load_segment(context->cs, CS);
@@ -293,9 +293,9 @@ static void load_world_ctx(struct run_context *context)
293293
load_segment(context->ldtr, LDTR);
294294
/* Only base and limit for IDTR and GDTR */
295295
exec_vmwrite(VMX_GUEST_IDTR_BASE, context->idtr.base);
296-
exec_vmwrite(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
297296
exec_vmwrite(VMX_GUEST_GDTR_BASE, context->gdtr.base);
298-
exec_vmwrite(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
297+
exec_vmwrite32(VMX_GUEST_IDTR_LIMIT, context->idtr.limit);
298+
exec_vmwrite32(VMX_GUEST_GDTR_LIMIT, context->gdtr.limit);
299299

300300
/* MSRs which not in the VMCS */
301301
msr_write(MSR_IA32_STAR, context->ia32_star);

hypervisor/arch/x86/virq.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ static int is_guest_irq_enabled(struct vcpu *vcpu)
6363
if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0UL) {
6464
/* Interrupts are allowed */
6565
/* Check for temporarily disabled interrupts */
66-
guest_state = exec_vmread(VMX_GUEST_INTERRUPTIBILITY_INFO);
66+
guest_state = exec_vmread32(VMX_GUEST_INTERRUPTIBILITY_INFO);
6767

6868
if ((guest_state & (HV_ARCH_VCPU_BLOCKED_BY_STI |
6969
HV_ARCH_VCPU_BLOCKED_BY_MOVSS)) == 0UL) {
@@ -139,7 +139,7 @@ static int vcpu_do_pending_event(struct vcpu *vcpu)
139139
return -1;
140140
}
141141

142-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
142+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
143143
(vector & 0xFFU));
144144

145145
vlapic_intr_accepted(vlapic, vector);
@@ -163,7 +163,7 @@ static int vcpu_do_pending_extint(struct vcpu *vcpu)
163163
if (vector <= NR_MAX_VECTOR) {
164164
dev_dbg(ACRN_DBG_INTR, "VPIC: to inject PIC vector %d\n",
165165
vector & 0xFFU);
166-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
166+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
167167
VMX_INT_INFO_VALID |
168168
(vector & 0xFFU));
169169
vpic_intr_accepted(vcpu->vm, vector);
@@ -245,12 +245,12 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
245245
static void _vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
246246
{
247247
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
248-
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE,
248+
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
249249
vcpu->arch_vcpu.exception_info.error);
250250
}
251251

252-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
253-
(exception_type[vector] << 8) | (vector & 0xFFU));
252+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, VMX_INT_INFO_VALID |
253+
(exception_type[vector] << 8U) | (vector & 0xFFU));
254254

255255
vcpu->arch_vcpu.exception_info.exception = VECTOR_INVALID;
256256
}
@@ -324,9 +324,9 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu)
324324
* Disable the interrupt window exiting
325325
*/
326326
vcpu->arch_vcpu.irq_window_enabled = 0U;
327-
value32 = exec_vmread(VMX_PROC_VM_EXEC_CONTROLS);
327+
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
328328
value32 &= ~(VMX_PROCBASED_CTLS_IRQ_WIN);
329-
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, value32);
329+
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, value32);
330330
}
331331

332332
vcpu_retain_rip(vcpu);
@@ -338,7 +338,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
338338
uint32_t intr_info;
339339
struct intr_excp_ctx ctx;
340340

341-
intr_info = exec_vmread(VMX_EXIT_INT_INFO);
341+
intr_info = exec_vmread32(VMX_EXIT_INT_INFO);
342342
if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
343343
(((intr_info & VMX_INT_TYPE_MASK) >> 8)
344344
!= VMX_INT_TYPE_EXT_INT)) {
@@ -383,10 +383,10 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
383383
if (vcpu->arch_vcpu.inject_event_pending) {
384384
if ((vcpu->arch_vcpu.inject_info.intr_info &
385385
(EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
386-
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE,
386+
exec_vmwrite32(VMX_ENTRY_EXCEPTION_ERROR_CODE,
387387
vcpu->arch_vcpu.inject_info.error_code);
388388

389-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
389+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
390390
vcpu->arch_vcpu.inject_info.intr_info);
391391

392392
vcpu->arch_vcpu.inject_event_pending = false;
@@ -401,8 +401,8 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
401401
/* inject NMI before maskable hardware interrupt */
402402
if (bitmap_test_and_clear(ACRN_REQUEST_NMI, pending_req_bits)) {
403403
/* Inject NMI vector = 2 */
404-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
405-
VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8) | IDT_NMI);
404+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
405+
VMX_INT_INFO_VALID | (VMX_INT_TYPE_NMI << 8U) | IDT_NMI);
406406

407407
goto INTR_WIN;
408408
}
@@ -415,7 +415,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
415415
* at next vm exit?
416416
*/
417417
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
418-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
418+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD,
419419
vcpu->arch_vcpu.idt_vectoring_info);
420420
goto INTR_WIN;
421421
}
@@ -450,9 +450,9 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
450450
/* Enable interrupt window exiting if pending */
451451
if (intr_pending && vcpu->arch_vcpu.irq_window_enabled == 0U) {
452452
vcpu->arch_vcpu.irq_window_enabled = 1U;
453-
tmp = exec_vmread(VMX_PROC_VM_EXEC_CONTROLS);
453+
tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
454454
tmp |= (VMX_PROCBASED_CTLS_IRQ_WIN);
455-
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, tmp);
455+
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp);
456456
}
457457

458458
return ret;
@@ -462,7 +462,7 @@ void cancel_event_injection(struct vcpu *vcpu)
462462
{
463463
uint32_t intinfo;
464464

465-
intinfo = exec_vmread(VMX_ENTRY_INT_INFO_FIELD);
465+
intinfo = exec_vmread32(VMX_ENTRY_INT_INFO_FIELD);
466466

467467
/*
468468
* If event is injected, we clear VMX_ENTRY_INT_INFO_FIELD,
@@ -475,10 +475,10 @@ void cancel_event_injection(struct vcpu *vcpu)
475475

476476
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
477477
vcpu->arch_vcpu.inject_info.error_code =
478-
exec_vmread(VMX_ENTRY_EXCEPTION_ERROR_CODE);
478+
exec_vmread32(VMX_ENTRY_EXCEPTION_ERROR_CODE);
479479

480480
vcpu->arch_vcpu.inject_info.intr_info = intinfo;
481-
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD, 0UL);
481+
exec_vmwrite32(VMX_ENTRY_INT_INFO_FIELD, 0UL);
482482
}
483483
}
484484

@@ -500,21 +500,21 @@ int exception_vmexit_handler(struct vcpu *vcpu)
500500
pr_dbg(" Handling guest exception");
501501

502502
/* Obtain VM-Exit information field pg 2912 */
503-
intinfo = exec_vmread(VMX_EXIT_INT_INFO);
503+
intinfo = exec_vmread32(VMX_EXIT_INT_INFO);
504504
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
505505
exception_vector = intinfo & 0xFFU;
506506
/* Check if exception caused by the guest is a HW exception.
507507
* If the exit occurred due to a HW exception obtain the
508508
* error code to be conveyed to get via the stack
509509
*/
510510
if ((intinfo & VMX_INT_INFO_ERR_CODE_VALID) != 0U) {
511-
int_err_code = exec_vmread(VMX_EXIT_INT_ERROR_CODE);
511+
int_err_code = exec_vmread32(VMX_EXIT_INT_ERROR_CODE);
512512

513513
/* get current privilege level and fault address */
514-
cpl = exec_vmread(VMX_GUEST_CS_ATTR);
515-
cpl = (cpl >> 5) & 3U;
514+
cpl = exec_vmread32(VMX_GUEST_CS_ATTR);
515+
cpl = (cpl >> 5U) & 3U;
516516

517-
if (cpl < 3)
517+
if (cpl < 3U)
518518
int_err_code &= ~4U;
519519
else
520520
int_err_code |= 4U;

hypervisor/arch/x86/vmexit.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ int vmexit_handler(struct vcpu *vcpu)
141141

142142
/* Obtain interrupt info */
143143
vcpu->arch_vcpu.idt_vectoring_info =
144-
exec_vmread(VMX_IDT_VEC_INFO_FIELD);
144+
exec_vmread32(VMX_IDT_VEC_INFO_FIELD);
145145
/* Filter out HW exception & NMI */
146146
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
147147
uint32_t vector_info = vcpu->arch_vcpu.idt_vectoring_info;
@@ -151,7 +151,7 @@ int vmexit_handler(struct vcpu *vcpu)
151151

152152
if (type == VMX_INT_TYPE_HW_EXP) {
153153
if ((vector_info & VMX_INT_INFO_ERR_CODE_VALID) != 0U)
154-
err_code = exec_vmread(VMX_IDT_VEC_ERROR_CODE);
154+
err_code = exec_vmread32(VMX_IDT_VEC_ERROR_CODE);
155155
vcpu_queue_exception(vcpu, vector, err_code);
156156
vcpu->arch_vcpu.idt_vectoring_info = 0U;
157157
} else if (type == VMX_INT_TYPE_NMI) {

0 commit comments

Comments
 (0)