Skip to content

Commit ce79d3a

Browse files
KaigeFulijinxia
authored andcommitted
HV: instr_emul: Handle error gracefully
ASSERT is too strict for HV when error happens during emulating instruction. This patch remove all ASSERT and return a negative error code when failing to emulate instruction. Originally, getcc will return -EINVAL when opsize are not one of (1, 2, 4, 8). But theoretically, opsize in current implementation can only be one of (1, 2, 4, 8). So, we will always get valid "cc". This patch add a pre-assumption and make sure that getcc always return valid value. For the current code, #GP will be injected to guest if something goes wrong with instruction emulation. Signed-off-by: Kaige Fu <kaige.fu@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
1 parent 8836abe commit ce79d3a

File tree

2 files changed

+50
-51
lines changed

2 files changed

+50
-51
lines changed

hypervisor/arch/x86/guest/instr_emul.c

Lines changed: 46 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -313,8 +313,6 @@ vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
313313
}
314314

315315
error = vm_set_register(vcpu, reg, val);
316-
ASSERT(error == 0, "%s: Error (%d) happens when update reg",
317-
__func__, error);
318316

319317
return error;
320318
}
@@ -358,21 +356,21 @@ build_getcc(getcc16, uint16_t, x, y)
358356
build_getcc(getcc32, uint32_t, x, y)
359357
build_getcc(getcc64, uint64_t, x, y)
360358

361-
static uint64_t
362-
getcc(uint8_t opsize, uint64_t x, uint64_t y)
359+
/**
360+
* @pre opsize = 1, 2, 4 or 8
361+
*/
362+
static uint64_t getcc(uint8_t opsize, uint64_t x, uint64_t y)
363363
{
364-
ASSERT(opsize == 1U || opsize == 2U || opsize == 4U || opsize == 8U,
365-
"getcc: invalid operand size %hhu", opsize);
366-
367-
if (opsize == 1U) {
368-
return getcc8((uint8_t)x, (uint8_t)y);
369-
} else if (opsize == 2U) {
370-
return getcc16((uint16_t)x, (uint16_t)y);
371-
} else if (opsize == 4U) {
372-
return getcc32((uint32_t)x, (uint32_t)y);
373-
} else {
374-
return getcc64(x, y);
375-
}
364+
switch (opsize) {
365+
case 1:
366+
return getcc8((uint8_t) x, (uint8_t) y);
367+
case 2:
368+
return getcc16((uint16_t) x, (uint16_t) y);
369+
case 4:
370+
return getcc32((uint32_t) x, (uint32_t) y);
371+
default: /* opsize == 8 */
372+
return getcc64(x, y);
373+
}
376374
}
377375

378376
static int emulate_mov(struct vcpu *vcpu, struct instr_emul_vie *vie)
@@ -723,7 +721,6 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,
723721
}
724722
}
725723
done:
726-
ASSERT(error == 0, "%s: unexpected error %d", __func__, error);
727724
return error;
728725
}
729726

@@ -1206,8 +1203,6 @@ static int emulate_stack_op(struct vcpu *vcpu, struct instr_emul_vie *vie,
12061203
* stack pointer.
12071204
*/
12081205
error = vm_get_seg_desc(vcpu, CPU_REG_SS, &ss_desc);
1209-
ASSERT(error == 0, "%s: error %d getting SS descriptor",
1210-
__func__, error);
12111206
if (SEG_DESC_DEF32(ss_desc.access)) {
12121207
stackaddrsize = 4U;
12131208
} else {
@@ -1428,15 +1423,12 @@ int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
14281423
return error;
14291424
}
14301425

1431-
int
1432-
vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0, uint64_t rflags,
1433-
uint64_t gla)
1426+
int vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0,
1427+
uint64_t rflags, uint64_t gla)
14341428
{
1435-
ASSERT(size == 1U || size == 2U || size == 4U || size == 8U,
1436-
"%s: invalid size %hhu", __func__, size);
1437-
ASSERT(cpl <= 3U, "%s: invalid cpl %d", __func__, cpl);
1429+
pr_dbg("Checking alignment with cpl: %hhu, addrsize: %hhu", cpl, size);
14381430

1439-
if (cpl != 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) {
1431+
if (cpl < 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) {
14401432
return 0;
14411433
}
14421434

@@ -1464,8 +1456,15 @@ vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
14641456
}
14651457
}
14661458

1467-
int
1468-
vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
1459+
/*
1460+
*@pre seg must be segment register index
1461+
*@pre length_arg must be 1, 2, 4 or 8
1462+
*@pre prot must be PROT_READ or PROT_WRITE
1463+
*
1464+
*return 0 - on success
1465+
*return -1 - on failure
1466+
*/
1467+
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
14691468
struct seg_desc *desc, uint64_t offset_arg, uint8_t length_arg,
14701469
uint8_t addrsize, uint32_t prot, uint64_t *gla)
14711470
{
@@ -1475,23 +1474,20 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
14751474
uint8_t glasize;
14761475
uint32_t type;
14771476

1478-
ASSERT((seg >= CPU_REG_SEG_FIRST) && (seg <= CPU_REG_SEG_LAST),
1479-
"%s: invalid segment %d", __func__, seg);
1480-
ASSERT(length == 1U || length == 2U || length == 4U || length == 8U,
1481-
"%s: invalid operand size %hhu", __func__, length);
1482-
ASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0U,
1483-
"%s: invalid prot %#x", __func__, prot);
1484-
14851477
firstoff = offset;
14861478
if (cpu_mode == CPU_MODE_64BIT) {
1487-
ASSERT(addrsize == 4U || addrsize == 8U,
1488-
"%s: invalid address size %d for cpu_mode %d",
1489-
__func__, addrsize, cpu_mode);
1479+
if (addrsize != 4U && addrsize != 8U) {
1480+
pr_dbg("%s: invalid addr size %d for cpu mode %d",
1481+
__func__, addrsize, cpu_mode);
1482+
return -1;
1483+
}
14901484
glasize = 8U;
14911485
} else {
1492-
ASSERT(addrsize == 2U || addrsize == 4U,
1493-
"%s: invalid address size %d for cpu mode %d",
1494-
__func__, addrsize, cpu_mode);
1486+
if (addrsize != 2U && addrsize != 4U) {
1487+
pr_dbg("%s: invalid addr size %d for cpu mode %d",
1488+
__func__, addrsize, cpu_mode);
1489+
return -1;
1490+
}
14951491
glasize = 4U;
14961492
/*
14971493
* If the segment selector is loaded with a NULL selector
@@ -1508,16 +1504,17 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
15081504
* descriptor that is not present. If this was the case then
15091505
* it would have been checked before the VM-exit.
15101506
*/
1511-
ASSERT(SEG_DESC_PRESENT(desc->access),
1512-
"segment %d not present: %#x", seg, desc->access);
1507+
if (SEG_DESC_PRESENT(desc->access) != 0) {
1508+
/* TODO: Inject #NP */
1509+
return -1;
1510+
}
15131511

1514-
/*
1515-
* The descriptor type must indicate a code/data segment.
1516-
*/
1512+
/* The descriptor type must indicate a code/data segment. */
15171513
type = SEG_DESC_TYPE(desc->access);
1518-
ASSERT(type >= 16U && type <= 31U,
1519-
"segment %d has invalid descriptor type %#x",
1520-
seg, type);
1514+
if (type < 16 || type > 31) {
1515+
/*TODO: Inject #GP */
1516+
return -1;
1517+
}
15211518

15221519
if ((prot & PROT_READ) != 0U) {
15231520
/* #GP on a read access to a exec-only code segment */

hypervisor/arch/x86/guest/instr_emul_wrapper.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -296,8 +296,6 @@ static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emu
296296
{
297297
uint8_t cpl;
298298

299-
ASSERT(emul_ctxt != NULL && vcpu != NULL, "Error in input arguments");
300-
301299
cpl = (uint8_t)((csar >> 5) & 3U);
302300
emul_ctxt->paging.cr3 = exec_vmread(VMX_GUEST_CR3);
303301
emul_ctxt->paging.cpl = cpl;
@@ -313,6 +311,10 @@ int decode_instruction(struct vcpu *vcpu)
313311
enum vm_cpu_mode cpu_mode;
314312

315313
emul_ctxt = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);
314+
if (emul_ctxt == NULL) {
315+
pr_err("%s: Failed to get emul_ctxt", __func__);
316+
return -1;
317+
}
316318
emul_ctxt->vcpu = vcpu;
317319

318320
retval = vie_init(&emul_ctxt->vie, vcpu);

0 commit comments

Comments
 (0)