Skip to content
This repository was archived by the owner on Jan 28, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 24 additions & 4 deletions core/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -586,28 +586,48 @@ static void register_add(struct em_context_t *ctxt,

static uint8_t insn_fetch_u8(struct em_context_t *ctxt)
{
uint8_t result = *(uint8_t *)(&ctxt->insn[ctxt->len]);
uint8_t result;

if (ctxt->len >= INSTR_MAX_LEN)
return 0;

result = *(uint8_t *)(&ctxt->insn[ctxt->len]);
ctxt->len += 1;
return result;
}

static uint16_t insn_fetch_u16(struct em_context_t *ctxt)
{
uint16_t result = *(uint16_t *)(&ctxt->insn[ctxt->len]);
uint16_t result;

if (ctxt->len >= INSTR_MAX_LEN)
return 0;

result = *(uint16_t *)(&ctxt->insn[ctxt->len]);
ctxt->len += 2;
return result;
}

static uint32_t insn_fetch_u32(struct em_context_t *ctxt)
{
uint32_t result = *(uint32_t *)(&ctxt->insn[ctxt->len]);
uint32_t result;

if (ctxt->len >= INSTR_MAX_LEN)
return 0;

result = *(uint32_t *)(&ctxt->insn[ctxt->len]);
ctxt->len += 4;
return result;
}

static uint64_t insn_fetch_u64(struct em_context_t *ctxt)
{
uint64_t result = *(uint64_t *)(&ctxt->insn[ctxt->len]);
uint64_t result;

if (ctxt->len >= INSTR_MAX_LEN)
return 0;

result = *(uint64_t *)(&ctxt->insn[ctxt->len]);
ctxt->len += 8;
return result;
}
Expand Down
85 changes: 47 additions & 38 deletions core/hax.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,44 @@ static int hax_vmx_enable_check(void)
return 0;
}

/*
* Allows the guest to read from and/or write to the specified MSRs without
* causing a VM exit.
* |start| is the start MSR address, |count| the number of MSRs. Together they
* specify a range of consecutive MSR addresses.
* |read| and |write| determine if each MSR can be read or written freely by the
* guest, respectively.
*/
static void set_msr_access(uint32_t start, uint32_t count, bool read, bool write)
{
uint32_t end = start + count - 1;
uint32_t read_base, write_base, bit;
uint8_t *msr_bitmap = hax_page_va(msr_bitmap_page);

hax_assert(((start ^ (start << 1)) & 0x80000000) == 0);
hax_assert((start & 0x3fffe000) == 0);
hax_assert(((start ^ end) & 0xffffe000) == 0);
hax_assert(msr_bitmap);

// See IA SDM Vol. 3C 24.6.9 for the layout of the MSR bitmaps page
read_base = start & 0x80000000 ? 1024 : 0;
write_base = read_base + 2048;
for (bit = (start & 0x1fff); bit <= (end & 0x1fff); bit++) {
// Bit clear means allowed
if (read) {
btr(msr_bitmap + read_base, bit);
} else {
bts(msr_bitmap + read_base, bit);
}

if (write) {
btr(msr_bitmap + write_base, bit);
} else {
bts(msr_bitmap + write_base, bit);
}
}
}

static int hax_vmx_init(void)
{
int ret = -ENOMEM;
Expand Down Expand Up @@ -297,6 +335,15 @@ static int hax_vmx_init(void)
if ((ret = hax_vmx_enable_check()) < 0)
goto out_5;

// Set MSRs loaded on VM entries/exits to pass-through
// See Intel SDM Vol. 3C 24.6.9 (MSR-Bitmap Address)

// 4 consecutive MSRs starting from IA32_STAR:
// IA32_STAR, IA32_LSTAR, IA32_CSTAR and IA32_SF_MASK
set_msr_access(IA32_STAR, 4, true, true);
set_msr_access(IA32_KERNEL_GS_BASE, 1, true, true);
set_msr_access(IA32_TSC_AUX, 1, true, true);

return 0;
out_5:
hax_disable_vmx();
Expand Down Expand Up @@ -393,44 +440,6 @@ int hax_get_capability(void *buf, int bufLeng, int *outLength)
return 0;
}

/*
* Allows the guest to read from and/or write to the specified MSRs without
* causing a VM exit.
* |start| is the start MSR address, |count| the number of MSRs. Together they
* specify a range of consecutive MSR addresses.
* |read| and |write| determine if each MSR can be read or written freely by the
* guest, respectively.
*/
static void set_msr_access(uint32_t start, uint32_t count, bool read, bool write)
{
uint32_t end = start + count - 1;
uint32_t read_base, write_base, bit;
uint8_t *msr_bitmap = hax_page_va(msr_bitmap_page);

hax_assert(((start ^ (start << 1)) & 0x80000000) == 0);
hax_assert((start & 0x3fffe000) == 0);
hax_assert(((start ^ end) & 0xffffe000) == 0);
hax_assert(msr_bitmap);

// See IA SDM Vol. 3C 24.6.9 for the layout of the MSR bitmaps page
read_base = start & 0x80000000 ? 1024 : 0;
write_base = read_base + 2048;
for (bit = (start & 0x1fff); bit <= (end & 0x1fff); bit++) {
// Bit clear means allowed
if (read) {
btr(msr_bitmap + read_base, bit);
} else {
bts(msr_bitmap + read_base, bit);
}

if (write) {
btr(msr_bitmap + write_base, bit);
} else {
bts(msr_bitmap + write_base, bit);
}
}
}

/*
* Probes the host CPU to determine its performance monitoring capabilities.
*/
Expand Down
7 changes: 7 additions & 0 deletions core/include/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,12 @@ struct vcpu_t;
struct vcpu_state_t;

#define NR_HMSR 6
// The number of MSRs to be loaded on VM exits
// Currently the MSRs list only supports automatic loading of below MSRs, the
// total count of which is 8.
// * IA32_PMCx
// * IA32_PERFEVTSELx
#define NR_HMSR_AUTOLOAD 8

struct hstate {
/* ldt is not covered by host vmcs area */
Expand All @@ -65,6 +71,7 @@ struct hstate {
uint64_t fs_base;
uint64_t hcr2;
struct vmx_msr hmsr[NR_HMSR];
vmx_msr_entry hmsr_autoload[NR_HMSR_AUTOLOAD];
// IA32_PMCx, since APM v1
uint64_t apm_pmc_msrs[APM_MAX_GENERAL_COUNT];
// IA32_PERFEVTSELx, since APM v1
Expand Down
4 changes: 4 additions & 0 deletions core/include/emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ struct em_operand_t;
/* Emulator interface flags */
#define EM_OPS_NO_TRANSLATION (1 << 0)

// Instructions are never longer than 15 bytes:
// http://wiki.osdev.org/X86-64_Instruction_Encoding
#define INSTR_MAX_LEN 15

typedef struct em_vcpu_ops_t {
uint64_t (*read_gpr)(void *vcpu, uint32_t reg_index);
void (*write_gpr)(void *vcpu, uint32_t reg_index, uint64_t value);
Expand Down
9 changes: 9 additions & 0 deletions core/include/vcpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,18 @@

#define NR_GMSR 5
#define NR_EMT64MSR 6
// The number of MSRs to be loaded on VM entries
// Currently the MSRs list only supports automatic loading of below MSRs, the
// total count of which is 14.
// * IA32_PMCx
// * IA32_PERFEVTSELx
// * IA32_TSC_AUX
// * all MSRs defined in gmsr_list[]
#define NR_GMSR_AUTOLOAD 14

struct gstate {
struct vmx_msr gmsr[NR_GMSR];
vmx_msr_entry gmsr_autoload[NR_GMSR_AUTOLOAD];
// IA32_PMCx, since APM v1
uint64_t apm_pmc_msrs[APM_MAX_GENERAL_COUNT];
// IA32_PERFEVTSELx, since APM v1
Expand Down
6 changes: 6 additions & 0 deletions core/include/vmx.h
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,12 @@ struct invept_desc {
uint64_t rsvd;
};

// Intel SDM Vol. 3C: Table 24-12. Format of an MSR Entry
typedef struct ALIGNED(16) vmx_msr_entry {
uint64_t index;
uint64_t data;
} vmx_msr_entry;

struct vcpu_state_t;
struct vcpu_t;

Expand Down
Loading