Skip to content

Commit 54c2541

Browse files
fyin1lijinxia
authored andcommitted
hv: remove unnecessary check for gva
According to SDM vol3 25.1.1 With VMX enabled, following exception will be handled by guest without trigger VM exit: - faults based on privilege level - general protection due to relevent segment being unusable - general protection due to offset beyond limit of relevent segment - alignment check exception ACRN always assume VMX is enabled. So we don't need to these check in instruction emulation. But we need to do page fault related check. Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Acked-by: Anthony Xu <anthony.xu@intel.com>
1 parent 5663dd7 commit 54c2541

File tree

1 file changed

+7
-72
lines changed

1 file changed

+7
-72
lines changed

hypervisor/arch/x86/guest/instr_emul.c

Lines changed: 7 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -385,18 +385,6 @@ static void get_guest_paging_info(struct vcpu *vcpu, struct instr_emul_ctxt *emu
385385
emul_ctxt->paging.paging_mode = get_vcpu_paging_mode(vcpu);
386386
}
387387

388-
static int vie_alignment_check(uint8_t cpl, uint8_t size, uint64_t cr0,
389-
uint64_t rflags, uint64_t gla)
390-
{
391-
pr_dbg("Checking alignment with cpl: %hhu, addrsize: %hhu", cpl, size);
392-
393-
if (cpl < 3U || (cr0 & CR0_AM) == 0UL || (rflags & PSL_AC) == 0UL) {
394-
return 0;
395-
}
396-
397-
return ((gla & (size - 1U)) != 0UL) ? 1 : 0;
398-
}
399-
400388
static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
401389
{
402390
uint64_t mask;
@@ -426,12 +414,11 @@ static int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
426414
*return -1 - on failure
427415
*/
428416
static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
429-
struct seg_desc *desc, uint64_t offset_arg, uint8_t length_arg,
430-
uint8_t addrsize, uint32_t prot, uint64_t *gla)
417+
struct seg_desc *desc, uint64_t offset_arg, uint8_t addrsize,
418+
uint32_t prot, uint64_t *gla)
431419
{
432-
uint64_t firstoff, low_limit, high_limit, segbase;
420+
uint64_t firstoff, segbase;
433421
uint64_t offset = offset_arg;
434-
uint8_t length = length_arg;
435422
uint8_t glasize;
436423
uint32_t type;
437424

@@ -450,25 +437,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
450437
return -1;
451438
}
452439
glasize = 4U;
453-
/*
454-
* If the segment selector is loaded with a NULL selector
455-
* then the descriptor is unusable and attempting to use
456-
* it results in a #GP(0).
457-
*/
458-
if (SEG_DESC_UNUSABLE(desc->access)) {
459-
return -1;
460-
}
461-
462-
/*
463-
* The processor generates a #NP exception when a segment
464-
* register is loaded with a selector that points to a
465-
* descriptor that is not present. If this was the case then
466-
* it would have been checked before the VM-exit.
467-
*/
468-
if (SEG_DESC_PRESENT(desc->access) != 0) {
469-
/* TODO: Inject #NP */
470-
return -1;
471-
}
472440

473441
/* The descriptor type must indicate a code/data segment. */
474442
type = SEG_DESC_TYPE(desc->access);
@@ -497,30 +465,6 @@ static int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum cpu_reg_name seg,
497465
return -1;
498466
}
499467
}
500-
501-
/*
502-
* 'desc->limit' is fully expanded taking granularity into
503-
* account.
504-
*/
505-
if ((type & 0xCU) == 0x4U) {
506-
/* expand-down data segment */
507-
low_limit = desc->limit + 1U;
508-
high_limit = SEG_DESC_DEF32(desc->access) ?
509-
0xffffffffU : 0xffffU;
510-
} else {
511-
/* code segment or expand-up data segment */
512-
low_limit = 0U;
513-
high_limit = desc->limit;
514-
}
515-
516-
while (length > 0U) {
517-
offset &= size2mask[addrsize];
518-
if (offset < low_limit || offset > high_limit) {
519-
return -1;
520-
}
521-
offset++;
522-
length--;
523-
}
524468
}
525469

526470
/*
@@ -932,6 +876,7 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,
932876
uint8_t opsize, uint8_t addrsize, uint32_t prot, enum cpu_reg_name seg,
933877
enum cpu_reg_name gpr, uint64_t *gla, int *fault)
934878
{
879+
int ret;
935880
struct seg_desc desc;
936881
uint64_t cr0, val, rflags;
937882

@@ -940,41 +885,31 @@ static int get_gla(struct vcpu *vcpu, __unused struct instr_emul_vie *vie,
940885
val = vm_get_register(vcpu, gpr);
941886
vm_get_seg_desc(seg, &desc);
942887

943-
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
944-
addrsize, prot, gla) != 0) {
888+
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val,
889+
addrsize, prot, gla) != 0) {
945890
if (seg == CPU_REG_SS) {
946-
/*vm_inject_ss(vcpu, 0);*/
947891
pr_err("TODO: inject ss exception");
948892
} else {
949-
/*vm_inject_gp(vcpu);*/
950893
pr_err("TODO: inject gp exception");
951894
}
952895
goto guest_fault;
953896
}
954897

955898
if (vie_canonical_check(paging->cpu_mode, *gla) != 0) {
956899
if (seg == CPU_REG_SS) {
957-
/*vm_inject_ss(vcpu, 0);*/
958900
pr_err("TODO: inject ss exception");
959901
} else {
960-
/*vm_inject_gp(vcpu);*/
961902
pr_err("TODO: inject gp exception");
962903
}
963904
goto guest_fault;
964905
}
965906

966-
if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla) != 0) {
967-
/*vm_inject_ac(vcpu, 0);*/
968-
pr_err("TODO: inject ac exception");
969-
goto guest_fault;
970-
}
971-
972907
*fault = 0;
973908
return 0;
974909

975910
guest_fault:
976911
*fault = 1;
977-
return 0;
912+
return ret;
978913
}
979914

980915
static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie,

0 commit comments

Comments
 (0)