@@ -313,8 +313,6 @@ vie_update_register(struct vcpu *vcpu, enum cpu_reg_name reg,
313
313
}
314
314
315
315
error = vm_set_register (vcpu , reg , val );
316
- ASSERT (error == 0 , "%s: Error (%d) happens when update reg" ,
317
- __func__ , error );
318
316
319
317
return error ;
320
318
}
@@ -358,21 +356,21 @@ build_getcc(getcc16, uint16_t, x, y)
358
356
build_getcc (getcc32 , uint32_t , x , y )
359
357
build_getcc (getcc64 , uint64_t , x , y )
360
358
361
- static uint64_t
362
- getcc (uint8_t opsize , uint64_t x , uint64_t y )
359
+ /**
360
+ * @pre opsize = 1, 2, 4 or 8
361
+ */
362
+ static uint64_t getcc (uint8_t opsize , uint64_t x , uint64_t y )
363
363
{
364
- ASSERT (opsize == 1U || opsize == 2U || opsize == 4U || opsize == 8U ,
365
- "getcc: invalid operand size %hhu" , opsize );
366
-
367
- if (opsize == 1U ) {
368
- return getcc8 ((uint8_t )x , (uint8_t )y );
369
- } else if (opsize == 2U ) {
370
- return getcc16 ((uint16_t )x , (uint16_t )y );
371
- } else if (opsize == 4U ) {
372
- return getcc32 ((uint32_t )x , (uint32_t )y );
373
- } else {
374
- return getcc64 (x , y );
375
- }
364
+ switch (opsize ) {
365
+ case 1 :
366
+ return getcc8 ((uint8_t ) x , (uint8_t ) y );
367
+ case 2 :
368
+ return getcc16 ((uint16_t ) x , (uint16_t ) y );
369
+ case 4 :
370
+ return getcc32 ((uint32_t ) x , (uint32_t ) y );
371
+ default : /* opsize == 8 */
372
+ return getcc64 (x , y );
373
+ }
376
374
}
377
375
378
376
static int emulate_mov (struct vcpu * vcpu , struct instr_emul_vie * vie )
@@ -723,7 +721,6 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,
723
721
}
724
722
}
725
723
done :
726
- ASSERT (error == 0 , "%s: unexpected error %d" , __func__ , error );
727
724
return error ;
728
725
}
729
726
@@ -1206,8 +1203,6 @@ static int emulate_stack_op(struct vcpu *vcpu, struct instr_emul_vie *vie,
1206
1203
* stack pointer.
1207
1204
*/
1208
1205
error = vm_get_seg_desc (vcpu , CPU_REG_SS , & ss_desc );
1209
- ASSERT (error == 0 , "%s: error %d getting SS descriptor" ,
1210
- __func__ , error );
1211
1206
if (SEG_DESC_DEF32 (ss_desc .access )) {
1212
1207
stackaddrsize = 4U ;
1213
1208
} else {
@@ -1428,15 +1423,12 @@ int vmm_emulate_instruction(struct instr_emul_ctxt *ctxt)
1428
1423
return error ;
1429
1424
}
1430
1425
1431
- int
1432
- vie_alignment_check (uint8_t cpl , uint8_t size , uint64_t cr0 , uint64_t rflags ,
1433
- uint64_t gla )
1426
+ int vie_alignment_check (uint8_t cpl , uint8_t size , uint64_t cr0 ,
1427
+ uint64_t rflags , uint64_t gla )
1434
1428
{
1435
- ASSERT (size == 1U || size == 2U || size == 4U || size == 8U ,
1436
- "%s: invalid size %hhu" , __func__ , size );
1437
- ASSERT (cpl <= 3U , "%s: invalid cpl %d" , __func__ , cpl );
1429
+ pr_dbg ("Checking alignment with cpl: %hhu, addrsize: %hhu" , cpl , size );
1438
1430
1439
- if (cpl != 3U || (cr0 & CR0_AM ) == 0UL || (rflags & PSL_AC ) == 0UL ) {
1431
+ if (cpl < 3U || (cr0 & CR0_AM ) == 0UL || (rflags & PSL_AC ) == 0UL ) {
1440
1432
return 0 ;
1441
1433
}
1442
1434
@@ -1464,8 +1456,15 @@ vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1464
1456
}
1465
1457
}
1466
1458
1467
- int
1468
- vie_calculate_gla (enum vm_cpu_mode cpu_mode , enum cpu_reg_name seg ,
1459
+ /*
1460
+ *@pre seg must be segment register index
1461
+ *@pre length_arg must be 1, 2, 4 or 8
1462
+ *@pre prot must be PROT_READ or PROT_WRITE
1463
+ *
1464
+ *return 0 - on success
1465
+ *return -1 - on failure
1466
+ */
1467
+ int vie_calculate_gla (enum vm_cpu_mode cpu_mode , enum cpu_reg_name seg ,
1469
1468
struct seg_desc * desc , uint64_t offset_arg , uint8_t length_arg ,
1470
1469
uint8_t addrsize , uint32_t prot , uint64_t * gla )
1471
1470
{
@@ -1475,23 +1474,20 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
1475
1474
uint8_t glasize ;
1476
1475
uint32_t type ;
1477
1476
1478
- ASSERT ((seg >= CPU_REG_SEG_FIRST ) && (seg <= CPU_REG_SEG_LAST ),
1479
- "%s: invalid segment %d" , __func__ , seg );
1480
- ASSERT (length == 1U || length == 2U || length == 4U || length == 8U ,
1481
- "%s: invalid operand size %hhu" , __func__ , length );
1482
- ASSERT ((prot & ~(PROT_READ | PROT_WRITE )) == 0U ,
1483
- "%s: invalid prot %#x" , __func__ , prot );
1484
-
1485
1477
firstoff = offset ;
1486
1478
if (cpu_mode == CPU_MODE_64BIT ) {
1487
- ASSERT (addrsize == 4U || addrsize == 8U ,
1488
- "%s: invalid address size %d for cpu_mode %d" ,
1489
- __func__ , addrsize , cpu_mode );
1479
+ if (addrsize != 4U && addrsize != 8U ) {
1480
+ pr_dbg ("%s: invalid addr size %d for cpu mode %d" ,
1481
+ __func__ , addrsize , cpu_mode );
1482
+ return -1 ;
1483
+ }
1490
1484
glasize = 8U ;
1491
1485
} else {
1492
- ASSERT (addrsize == 2U || addrsize == 4U ,
1493
- "%s: invalid address size %d for cpu mode %d" ,
1494
- __func__ , addrsize , cpu_mode );
1486
+ if (addrsize != 2U && addrsize != 4U ) {
1487
+ pr_dbg ("%s: invalid addr size %d for cpu mode %d" ,
1488
+ __func__ , addrsize , cpu_mode );
1489
+ return -1 ;
1490
+ }
1495
1491
glasize = 4U ;
1496
1492
/*
1497
1493
* If the segment selector is loaded with a NULL selector
@@ -1508,16 +1504,17 @@ vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
1508
1504
* descriptor that is not present. If this was the case then
1509
1505
* it would have been checked before the VM-exit.
1510
1506
*/
1511
- ASSERT (SEG_DESC_PRESENT (desc -> access ),
1512
- "segment %d not present: %#x" , seg , desc -> access );
1507
+ if (SEG_DESC_PRESENT (desc -> access ) != 0 ) {
1508
+ /* TODO: Inject #NP */
1509
+ return -1 ;
1510
+ }
1513
1511
1514
- /*
1515
- * The descriptor type must indicate a code/data segment.
1516
- */
1512
+ /* The descriptor type must indicate a code/data segment. */
1517
1513
type = SEG_DESC_TYPE (desc -> access );
1518
- ASSERT (type >= 16U && type <= 31U ,
1519
- "segment %d has invalid descriptor type %#x" ,
1520
- seg , type );
1514
+ if (type < 16 || type > 31 ) {
1515
+ /*TODO: Inject #GP */
1516
+ return -1 ;
1517
+ }
1521
1518
1522
1519
if ((prot & PROT_READ ) != 0U ) {
1523
1520
/* #GP on a read access to a exec-only code segment */
0 commit comments