diff --git a/core/chunk.c b/core/chunk.c index 200b575f..d0abb393 100644 --- a/core/chunk.c +++ b/core/chunk.c @@ -39,24 +39,25 @@ int chunk_alloc(uint64_t base_uva, uint64_t size, hax_chunk **chunk) int ret; if (!chunk) { - hax_error("chunk_alloc: chunk is NULL\n"); + hax_log(HAX_LOGE, "chunk_alloc: chunk is NULL\n"); return -EINVAL; } if ((base_uva & (PAGE_SIZE_4K - 1)) != 0) { - hax_error("chunk_alloc: base_uva 0x%llx is not page aligned.\n", - base_uva); + hax_log(HAX_LOGE, "chunk_alloc: base_uva 0x%llx is not page aligned.\n", + base_uva); return -EINVAL; } if ((size & (PAGE_SIZE_4K - 1)) != 0) { - hax_error("chunk_alloc: size 0x%llx is not page aligned.\n", size); + hax_log(HAX_LOGE, "chunk_alloc: size 0x%llx is not page aligned.\n", + size); return -EINVAL; } chk = hax_vmalloc(sizeof(hax_chunk), 0); if (!chk) { - hax_error("hax_chunk: vmalloc failed.\n"); + hax_log(HAX_LOGE, "hax_chunk: vmalloc failed.\n"); return -ENOMEM; } @@ -64,8 +65,8 @@ int chunk_alloc(uint64_t base_uva, uint64_t size, hax_chunk **chunk) chk->size = size; ret = hax_pin_user_pages(base_uva, size, &chk->memdesc); if (ret) { - hax_error("hax_chunk: pin user pages failed," - " uva: 0x%llx, size: 0x%llx.\n", base_uva, size); + hax_log(HAX_LOGE, "hax_chunk: pin user pages failed," + " uva: 0x%llx, size: 0x%llx.\n", base_uva, size); hax_vfree(chk, sizeof(hax_chunk)); return ret; } @@ -79,13 +80,13 @@ int chunk_free(hax_chunk *chunk) int ret; if (!chunk) { - hax_error("chunk_free: chunk is NULL.\n"); + hax_log(HAX_LOGE, "chunk_free: chunk is NULL.\n"); return -EINVAL; } ret = hax_unpin_user_pages(&chunk->memdesc); if (ret) { - hax_error("chunk_free: unpin user pages failed.\n"); + hax_log(HAX_LOGE, "chunk_free: unpin user pages failed.\n"); return ret; } diff --git a/core/cpu.c b/core/cpu.c index f6c07bd2..595071a9 100644 --- a/core/cpu.c +++ b/core/cpu.c @@ -110,9 +110,9 @@ void cpu_init_vmx(void *arg) vt_enable = 1; if (vt_enable) cpu_data->cpu_features |= HAX_CPUF_ENABLE_VT; - hax_info("fc_msr %x\n", fc_msr); - hax_info("vt_enable %d\n", vt_enable); - hax_info("nx_enable %d\n", nx_enable); + hax_log(HAX_LOGI, "fc_msr %x\n", fc_msr); + hax_log(HAX_LOGI, "vt_enable %d\n", vt_enable); + hax_log(HAX_LOGI, "nx_enable %d\n", nx_enable); if (!nx_enable || !vt_enable) return; @@ -132,7 +132,7 @@ void cpu_init_vmx(void *arg) /* get VMX capabilities */ vmx_read_info(&vmx_info); #if 0 - //hax_info("-----------cpu %d---------------\n", cpu_data->cpu_id); + //hax_log(HAX_LOGI, "-----------cpu %d---------------\n", cpu_data->cpu_id); if ((cpu_data->cpu_id == 0 || memcmp(&vmx_info, &hax_cpu_data[0]->vmx_info, @@ -142,13 +142,13 @@ void cpu_init_vmx(void *arg) #endif if (vmx_info._vmcs_region_length > HAX_PAGE_SIZE) - hax_info("VMCS of %d bytes not supported by this Hypervisor. " + hax_log(HAX_LOGI, "VMCS of %d bytes not supported by this Hypervisor. " "Max supported %u bytes\n", vmx_info._vmcs_region_length, (uint32_t)HAX_PAGE_SIZE); vmxon = (vmcs_t *)hax_page_va(cpu_data->vmxon_page); vmxon->_revision_id = vmx_info._vmcs_revision_id; - //hax_info("enabled VMX mode (vmxon = %p)\n", + //hax_log(HAX_LOGI, "enabled VMX mode (vmxon = %p)\n", // hax_page_va(cpu_data->vmxon_page)); vmx_read_info(&cpu_data->vmx_info); @@ -195,10 +195,10 @@ static void vmread_cr(struct vcpu_t *vcpu) // This must use the actual cr0 mask, not _cr0_mask. mword cr0 = vmread(vcpu, GUEST_CR0); mword cr0_mask = vmread(vcpu, VMX_CR0_MASK); // should cache this - hax_debug("vmread_cr cr0 %lx, cr0_mask %lx, state->_cr0 %llx\n", cr0, - cr0_mask, state->_cr0); + hax_log(HAX_LOGD, "vmread_cr cr0 %lx, cr0_mask %lx, state->_cr0 %llx\n", + cr0, cr0_mask, state->_cr0); state->_cr0 = (cr0 & ~cr0_mask) | (state->_cr0 & cr0_mask); - hax_debug("vmread_cr, state->_cr0 %llx\n", state->_cr0); + hax_log(HAX_LOGD, "vmread_cr, state->_cr0 %llx\n", state->_cr0); // update CR3 only if guest is allowed to change it if (!(vmx(vcpu, pcpu_ctls) & CR3_LOAD_EXITING)) @@ -220,7 +220,7 @@ bool vcpu_is_panic(struct vcpu_t *vcpu) { struct hax_tunnel *htun = vcpu->tunnel; if (vcpu->panicked) { - hax_error("vcpu has panicked, id:%d\n", vcpu->vcpu_id); + hax_log(HAX_LOGE, "vcpu has panicked, id:%d\n", vcpu->vcpu_id); hax_panic_log(vcpu); htun->_exit_status = HAX_EXIT_STATECHANGE; return 1; @@ -274,7 +274,7 @@ vmx_result_t cpu_vmx_run(struct vcpu_t *vcpu, struct hax_tunnel *htun) mword host_rip; /* prepare the RIP */ - hax_debug("vm entry!\n"); + hax_log(HAX_LOGD, "vm entry!\n"); vcpu_save_host_state(vcpu); hax_disable_irq(); @@ -362,7 +362,8 @@ int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun) return 0; if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "load_vmcs fail: %x\n", vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "load_vmcs fail: %x\n", vmcs_err); hax_panic_log(vcpu); return 0; } @@ -394,17 +395,18 @@ int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun) res = cpu_vmx_run(vcpu, htun); if (res) { - hax_error("cpu_vmx_run error, code:%x\n", res); + hax_log(HAX_LOGE, "cpu_vmx_run error, code:%x\n", res); if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "put_vmcs fail: %x\n", vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs fail: %x\n", vmcs_err); hax_panic_log(vcpu); } return -EINVAL; } exit_reason.raw = vmread(vcpu, VM_EXIT_INFO_REASON); - hax_debug("....exit_reason.raw %x, cpu %d %d\n", exit_reason.raw, - vcpu->cpu_id, hax_cpuid()); + hax_log(HAX_LOGD, "....exit_reason.raw %x, cpu %d %d\n", + exit_reason.raw, vcpu->cpu_id, hax_cpuid()); /* XXX Currently we take active save/restore for MSR and FPU, the main * reason is, we have no schedule hook to get notified of preemption @@ -444,8 +446,9 @@ int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun) vcpu->cur_state = GS_STALE; vmcs_err = put_vmcs(vcpu, &flags); if (vmcs_err) { - hax_panic_vcpu(vcpu, "put_vmcs() fail before vmexit. %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs() fail before vmexit. %x\n", + vmcs_err); hax_panic_log(vcpu); } hax_enable_irq(); @@ -496,17 +499,17 @@ void hax_panic_log(struct vcpu_t *vcpu) { if (!vcpu) return; - hax_error("log_host_cr4_vmxe: %x\n", log_host_cr4_vmxe); - hax_error("log_host_cr4 %llx\n", log_host_cr4); - hax_error("log_vmxon_res %x\n", log_vmxon_res); - hax_error("log_vmxon_addr %llx\n", log_vmxon_addr); - hax_error("log_vmxon_err_type1 %x\n", log_vmxon_err_type1); - hax_error("log_vmxon_err_type2 %x\n", log_vmxon_err_type2); - hax_error("log_vmxon_err_type3 %x\n", log_vmxon_err_type3); - hax_error("log_vmclear_err %x\n", log_vmclear_err); - hax_error("log_vmptrld_err %x\n", log_vmptrld_err); - hax_error("log_vmoff_no %x\n", log_vmxoff_no); - hax_error("log_vmxoff_res %x\n", log_vmxoff_res); + hax_log(HAX_LOGE, "log_host_cr4_vmxe: %x\n", log_host_cr4_vmxe); + hax_log(HAX_LOGE, "log_host_cr4 %llx\n", log_host_cr4); + hax_log(HAX_LOGE, "log_vmxon_res %x\n", log_vmxon_res); + hax_log(HAX_LOGE, "log_vmxon_addr %llx\n", log_vmxon_addr); + hax_log(HAX_LOGE, "log_vmxon_err_type1 %x\n", log_vmxon_err_type1); + hax_log(HAX_LOGE, "log_vmxon_err_type2 %x\n", log_vmxon_err_type2); + hax_log(HAX_LOGE, "log_vmxon_err_type3 %x\n", log_vmxon_err_type3); + hax_log(HAX_LOGE, "log_vmclear_err %x\n", log_vmclear_err); + hax_log(HAX_LOGE, "log_vmptrld_err %x\n", log_vmptrld_err); + hax_log(HAX_LOGE, "log_vmoff_no %x\n", log_vmxoff_no); + hax_log(HAX_LOGE, "log_vmxoff_res %x\n", log_vmxoff_res); } uint32_t load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags) @@ -545,7 +548,7 @@ uint32_t load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags) if (asm_vmptrld(&vmcs_phy) != VMX_SUCCEED) { - hax_error("vmptrld failed (%08llx)\n", vmcs_phy); + hax_log(HAX_LOGE, "vmptrld failed (%08llx)\n", vmcs_phy); cpu_vmxroot_leave(); log_vmxon_err_type3 = 1; hax_enable_preemption(flags); @@ -569,7 +572,7 @@ void restore_host_cr4_vmxe(struct per_cpu_data *cpu_data) if (cpu_data->vmm_flag & VMXON_HAX) { // TODO: Need to understand why this happens (on both Windows and // macOS) - hax_debug("VMM flag (VMON_HAX) is not clear!\n"); + hax_log(HAX_LOGD, "VMM flag (VMON_HAX) is not clear!\n"); } set_cr4(get_cr4() | CR4_VMXE); } else { @@ -593,7 +596,7 @@ uint32_t put_vmcs(struct vcpu_t *vcpu, preempt_flag *flags) vmcs_phy = hax_page_pa(cpu_data->vmcs_page); if (asm_vmclear(&vmcs_phy) != VMX_SUCCEED) { - hax_error("vmclear failed (%llx)\n", vmcs_phy); + hax_log(HAX_LOGE, "vmclear failed (%llx)\n", vmcs_phy); log_vmclear_err = 1; } @@ -646,19 +649,19 @@ static void cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_result_t result) { uint64_t error, reason; - hax_error("VM entry failed: RIP=%08lx\n", - (mword)vmread(vcpu, GUEST_RIP)); + hax_log(HAX_LOGE, "VM entry failed: RIP=%08lx\n", + (mword)vmread(vcpu, GUEST_RIP)); //dump_vmcs(); reason = vmread(vcpu, VM_EXIT_INFO_REASON); if (result == VMX_FAIL_VALID) { error = vmread(vcpu, VMX_INSTRUCTION_ERROR_CODE); - hax_error("VMfailValid. Prev exit: %llx. Error code: %llu (%s)\n", - reason, error, name_vmx_error(error)); + hax_log(HAX_LOGE, "VMfailValid. Prev exit: %llx. Error code: " + "%llu (%s)\n", reason, error, name_vmx_error(error)); } else { - hax_error("VMfailInvalid. Prev exit: %llx no error code\n", - reason); + hax_log(HAX_LOGE, "VMfailInvalid. Prev exit: %llx no error code\n", + reason); } } @@ -673,17 +676,17 @@ vmx_result_t cpu_vmxroot_leave(void) cpu_data->vmm_flag &= ~VMXON_HAX; restore_host_cr4_vmxe(cpu_data); } else { - hax_error("VMXOFF Failed..........\n"); + hax_log(HAX_LOGE, "VMXOFF Failed..........\n"); } } else { log_vmxoff_no = 1; #ifdef HAX_PLATFORM_DARWIN - hax_debug("Skipping VMXOFF because another VMM (VirtualBox or macOS" - " Hypervisor Framework) is running\n"); + hax_log(HAX_LOGD, "Skipping VMXOFF because another VMM (VirtualBox or " + "macOS Hypervisor Framework) is running\n"); #else // It should not go here in Win64/win32 result = VMX_FAIL_VALID; - hax_error("NO VMXOFF.......\n"); + hax_log(HAX_LOGE, "NO VMXOFF.......\n"); #endif } cpu_data->vmxoff_res = result; @@ -701,8 +704,8 @@ vmx_result_t cpu_vmxroot_enter(void) cpu_data->host_cr4_vmxe = (get_cr4() & CR4_VMXE); if (cpu_data->host_cr4_vmxe) { if (debug_vmcs_count % 100000 == 0) { - hax_debug("host VT has enabled!\n"); - hax_debug("Cr4 value = 0x%lx\n", get_cr4()); + hax_log(HAX_LOGD, "host VT has enabled!\n"); + hax_log(HAX_LOGD, "Cr4 value = 0x%lx\n", get_cr4()); log_host_cr4_vmxe = 1; log_host_cr4 = get_cr4(); } @@ -764,9 +767,9 @@ vmx_result_t cpu_vmxroot_enter(void) #endif if (fatal) { - hax_error("VMXON failed for region 0x%llx (result=0x%x, vmxe=%x)\n", - hax_page_pa(cpu_data->vmxon_page), (uint32_t)result, - (uint32_t)cpu_data->host_cr4_vmxe); + hax_log(HAX_LOGE, "VMXON failed for region 0x%llx (result=0x%x, " + "vmxe=%x)\n", hax_page_pa(cpu_data->vmxon_page), + (uint32_t)result, (uint32_t)cpu_data->host_cr4_vmxe); restore_host_cr4_vmxe(cpu_data); if (result == VMX_FAIL_INVALID) { log_vmxon_err_type1 = 1; diff --git a/core/dump.c b/core/dump.c index 2aed9a6d..d10a0741 100644 --- a/core/dump.c +++ b/core/dump.c @@ -195,30 +195,30 @@ void dump_vmcs(struct vcpu_t *vcpu) void dump_vmx_info(struct info_t *info) { - hax_info("VMCS Rev %d\n", info->_vmcs_revision_id); + hax_log(HAX_LOGI, "VMCS Rev %d\n", info->_vmcs_revision_id); - hax_info("VMX basic info : 0x%016llX\n", - info->_basic_info); - hax_info("VMX misc info : 0x%016llX\n", - info->_miscellaneous); - hax_info("VMX revision control : %u\n", - info->_vmcs_revision_id); - hax_info("VMX exit ctls : 0x%X, 0x%X\n", - info->exit_ctls_0, info->exit_ctls_1); - hax_info("VMX entry ctls : 0x%X, 0x%X\n", - info->entry_ctls_0, info->entry_ctls_1); - hax_info("VMX pin ctls : 0x%X, 0x%X\n", - info->pin_ctls_0, info->pin_ctls_1); - hax_info("VMX cpu prim ctrls : 0x%X, 0x%X\n", - info->pcpu_ctls_0, info->pcpu_ctls_1); - hax_info("VMX cpu sec ctrl : 0x%X, 0x%X\n", - info->scpu_ctls_0, info->scpu_ctls_1); - hax_info("VMX fixed CR0 bits : 0x%llX, %llX\n", - info->_cr0_fixed_0, info->_cr0_fixed_1); - hax_info("VMX fixed CR4 bits : 0x%llX, %llX\n", - info->_cr4_fixed_0, info->_cr4_fixed_1); - hax_info("VMX EPT/VPID caps : 0x%016llX\n", - info->_ept_cap); + hax_log(HAX_LOGI, "VMX basic info : 0x%016llX\n", + info->_basic_info); + hax_log(HAX_LOGI, "VMX misc info : 0x%016llX\n", + info->_miscellaneous); + hax_log(HAX_LOGI, "VMX revision control : %u\n", + info->_vmcs_revision_id); + hax_log(HAX_LOGI, "VMX exit ctls : 0x%X, 0x%X\n", + info->exit_ctls_0, info->exit_ctls_1); + hax_log(HAX_LOGI, "VMX entry ctls : 0x%X, 0x%X\n", + info->entry_ctls_0, info->entry_ctls_1); + hax_log(HAX_LOGI, "VMX pin ctls : 0x%X, 0x%X\n", + info->pin_ctls_0, info->pin_ctls_1); + hax_log(HAX_LOGI, "VMX cpu prim ctrls : 0x%X, 0x%X\n", + info->pcpu_ctls_0, info->pcpu_ctls_1); + hax_log(HAX_LOGI, "VMX cpu sec ctrl : 0x%X, 0x%X\n", + info->scpu_ctls_0, info->scpu_ctls_1); + hax_log(HAX_LOGI, "VMX fixed CR0 bits : 0x%llX, %llX\n", + info->_cr0_fixed_0, info->_cr0_fixed_1); + hax_log(HAX_LOGI, "VMX fixed CR4 bits : 0x%llX, %llX\n", + info->_cr4_fixed_0, info->_cr4_fixed_1); + hax_log(HAX_LOGI, "VMX EPT/VPID caps : 0x%016llX\n", + info->_ept_cap); } /*Remove this function. It only for debug*/ @@ -231,31 +231,31 @@ void dump_vmx_info(struct info_t *info) seg_desc = (struct seg_desc_t *)((mword)desc._base) + (cs >> 3); - hax_debug("\nsel: %x\n", cs >> 3); - hax_debug("type: %x\n", seg_desc->_type); - hax_debug("s: %x\n", seg_desc->_s); - hax_debug("present: %x\n", seg_desc->_present); - hax_debug("avl: %x\n", seg_desc->_avl); - hax_debug("long: %x\n", seg_desc->_longmode); - hax_debug("d/b: %x\n", seg_desc->_d); - hax_debug("g: %x\n", seg_desc->_granularity); - hax_debug("base0: %x\n", seg_desc->_base0); - hax_debug("limit: %x\n", seg_desc->_limit0); - hax_debug("dpl: %x\n", seg_desc->_limit0); + hax_log(HAX_LOGD, "\nsel: %x\n", cs >> 3); + hax_log(HAX_LOGD, "type: %x\n", seg_desc->_type); + hax_log(HAX_LOGD, "s: %x\n", seg_desc->_s); + hax_log(HAX_LOGD, "present: %x\n", seg_desc->_present); + hax_log(HAX_LOGD, "avl: %x\n", seg_desc->_avl); + hax_log(HAX_LOGD, "long: %x\n", seg_desc->_longmode); + hax_log(HAX_LOGD, "d/b: %x\n", seg_desc->_d); + hax_log(HAX_LOGD, "g: %x\n", seg_desc->_granularity); + hax_log(HAX_LOGD, "base0: %x\n", seg_desc->_base0); + hax_log(HAX_LOGD, "limit: %x\n", seg_desc->_limit0); + hax_log(HAX_LOGD, "dpl: %x\n", seg_desc->_limit0); - hax_debug("raw: %llx\n", seg_desc->_raw); + hax_log(HAX_LOGD, "raw: %llx\n", seg_desc->_raw); seg_desc = (struct seg_desc_t *)((mword)desc._base) + (ds >> 3); - hax_debug("\nsel: %x\n", ds >> 3); - hax_debug("type: %x\n", seg_desc->_type); - hax_debug("s: %x\n", seg_desc->_s); - hax_debug("present: %x\n", seg_desc->_present); - hax_debug("avl: %x\n", seg_desc->_avl); - hax_debug("long: %x\n", seg_desc->_longmode); - hax_debug("d/b: %x\n", seg_desc->_d); - hax_debug("g: %x\n", seg_desc->_granularity); - hax_debug("base0: %x\n", seg_desc->_base0); - hax_debug("limit: %x\n", seg_desc->_limit0); - hax_debug("dpl: %x\n", seg_desc->_dpl); - hax_debug("raw: %llx\n", seg_desc->_raw); + hax_log(HAX_LOGD, "\nsel: %x\n", ds >> 3); + hax_log(HAX_LOGD, "type: %x\n", seg_desc->_type); + hax_log(HAX_LOGD, "s: %x\n", seg_desc->_s); + hax_log(HAX_LOGD, "present: %x\n", seg_desc->_present); + hax_log(HAX_LOGD, "avl: %x\n", seg_desc->_avl); + hax_log(HAX_LOGD, "long: %x\n", seg_desc->_longmode); + hax_log(HAX_LOGD, "d/b: %x\n", seg_desc->_d); + hax_log(HAX_LOGD, "g: %x\n", seg_desc->_granularity); + hax_log(HAX_LOGD, "base0: %x\n", seg_desc->_base0); + hax_log(HAX_LOGD, "limit: %x\n", seg_desc->_limit0); + hax_log(HAX_LOGD, "dpl: %x\n", seg_desc->_dpl); + hax_log(HAX_LOGD, "raw: %llx\n", seg_desc->_raw); }*/ diff --git a/core/ept.c b/core/ept.c index 6b241048..cc6d3276 100644 --- a/core/ept.c +++ b/core/ept.c @@ -50,14 +50,15 @@ static uint64_t ept_capabilities; bool ept_set_caps(uint64_t caps) { if ((caps & EPT_BASIC_CAPS) != EPT_BASIC_CAPS) { - hax_warning("Broken host EPT support detected (caps=0x%llx)\n", caps); + hax_log(HAX_LOGW, "Broken host EPT support detected (caps=0x%llx)\n", + caps); return 0; } // ept_cap_invept_ac implies ept_cap_invept if (!(caps & ept_cap_invept)) { - hax_warning("ept_set_caps: Assuming support for INVEPT (caps=0x%llx)\n", - caps); + hax_log(HAX_LOGW, "ept_set_caps: Assuming support for INVEPT " + "(caps=0x%llx)\n", caps); caps |= ept_cap_invept; } @@ -106,9 +107,10 @@ bool ept_set_pte(hax_vm_t *hax_vm, hax_paddr_t gpa, hax_paddr_t hpa, uint emt, uint perm; epte_t *pde = ept_get_pde(ept, gpa); - // hax_debug("hpa %llx gpa %llx\n", hpa, gpa); + // hax_log(HAX_LOGD, "hpa %llx gpa %llx\n", hpa, gpa); if (which_g >= EPT_MAX_MEM_G) { - hax_error("Error: Guest's memory size is beyond %dG!\n", EPT_MAX_MEM_G); + hax_log(HAX_LOGE, "Error: Guest's memory size is beyond %dG!\n", + EPT_MAX_MEM_G); return false; } hax_mutex_lock(hax_vm->vm_lock); @@ -141,9 +143,9 @@ bool ept_set_pte(hax_vm_t *hax_vm, hax_paddr_t gpa, hax_paddr_t hpa, uint emt, pte = (epte_t *)pte_base + ept_get_pte_idx(gpa); // TODO: Just for debugging, need check QEMU for more information /* if (epte_is_present(pte)) { - * hax_debug("Can't change the pte entry!\n"); + * hax_log(HAX_LOGD, "Can't change the pte entry!\n"); * hax_mutex_unlock(hax_vm->vm_lock); - * hax_debug("\npte %llx\n", pte->val); + * hax_log(HAX_LOGD, "\npte %llx\n", pte->val); * hax_vunmap_pfn(pte_base); * return 0; * } @@ -162,7 +164,7 @@ bool ept_set_pte(hax_vm_t *hax_vm, hax_paddr_t gpa, hax_paddr_t hpa, uint emt, break; } default: { - hax_error("Unsupported mapping type 0x%x\n", mem_type); + hax_log(HAX_LOGE, "Unsupported mapping type 0x%x\n", mem_type); ret = false; goto out_unmap; } @@ -187,7 +189,7 @@ static bool ept_lookup(struct vcpu_t *vcpu, hax_paddr_t gpa, hax_paddr_t *hpa) hax_assert(ept->ept_root_page); if (which_g >= EPT_MAX_MEM_G) { - hax_debug("ept_lookup error!\n"); + hax_log(HAX_LOGD, "ept_lookup error!\n"); return 0; } @@ -249,13 +251,14 @@ bool ept_init(hax_vm_t *hax_vm) struct hax_ept *ept; if (hax_vm->ept) { - hax_debug("EPT has been created already!\n"); + hax_log(HAX_LOGD, "EPT: EPT has been created already!\n"); return 0; } ept = hax_vmalloc(sizeof(struct hax_ept), 0); if (!ept) { - hax_debug("EPT: No enough memory for creating EPT structure!\n"); + hax_log(HAX_LOGD, + "EPT: No enough memory for creating EPT structure!\n"); return 0; } memset(ept, 0, sizeof(struct hax_ept)); @@ -263,7 +266,7 @@ bool ept_init(hax_vm_t *hax_vm) page = hax_alloc_pages(EPT_PRE_ALLOC_PG_ORDER, 0, 1); if (!page) { - hax_debug("EPT: No enough memory for creating ept table!\n"); + hax_log(HAX_LOGD, "EPT: No enough memory for creating ept table!\n"); hax_vfree(hax_vm->ept, sizeof(struct hax_ept)); return 0; } @@ -290,7 +293,7 @@ bool ept_init(hax_vm_t *hax_vm) hax_init_list_head(&ept->ept_page_list); - hax_info("ept_init: Calling INVEPT\n"); + hax_log(HAX_LOGI, "ept_init: Calling INVEPT\n"); invept(hax_vm, EPT_INVEPT_SINGLE_CONTEXT); return 1; } @@ -306,7 +309,7 @@ void ept_free (hax_vm_t *hax_vm) if (!ept->ept_root_page) return; - hax_info("ept_free: Calling INVEPT\n"); + hax_log(HAX_LOGI, "ept_free: Calling INVEPT\n"); invept(hax_vm, EPT_INVEPT_SINGLE_CONTEXT); hax_list_entry_for_each_safe(page, n, &ept->ept_page_list, struct hax_page, list) { @@ -349,8 +352,8 @@ void invept(hax_vm_t *hax_vm, uint type) uint32_t res; if (!ept_has_cap(ept_cap_invept)) { - hax_warning("INVEPT was not called due to missing host support" - " (ept_capabilities=0x%llx)\n", ept_capabilities); + hax_log(HAX_LOGW, "INVEPT was not called due to missing host support" + " (ept_capabilities=0x%llx)\n", ept_capabilities); return; } @@ -377,7 +380,7 @@ void invept(hax_vm_t *hax_vm, uint type) &bundle); /* - * It is not safe to call hax_error(), etc. from invept_smpfunc(), + * It is not safe to call hax_log(), etc. from invept_smpfunc(), * especially on macOS; instead, invept_smpfunc() writes VMX instruction * results in hax_cpu_data[], which are checked below. */ @@ -390,24 +393,24 @@ void invept(hax_vm_t *hax_vm, uint type) cpu_data = hax_cpu_data[cpu_id]; if (!cpu_data) { // Should never happen - hax_warning("invept: hax_cpu_data[%d] is NULL\n", cpu_id); + hax_log(HAX_LOGW, "invept: hax_cpu_data[%d] is NULL\n", cpu_id); continue; } res = (uint32_t)cpu_data->vmxon_res; if (res != VMX_SUCCEED) { - hax_error("[Processor #%d] INVEPT was not called, because VMXON" - " failed (err=0x%x)\n", cpu_id, res); + hax_log(HAX_LOGE, "[Processor #%d] INVEPT was not called, because " + "VMXON failed (err=0x%x)\n", cpu_id, res); } else { res = (uint32_t)cpu_data->invept_res; if (res != VMX_SUCCEED) { - hax_error("[Processor #%d] INVEPT failed (err=0x%x)\n", cpu_id, - res); + hax_log(HAX_LOGE, "[Processor #%d] INVEPT failed (err=0x%x)\n", + cpu_id, res); } res = (uint32_t)cpu_data->vmxoff_res; if (res != VMX_SUCCEED) { - hax_error("[Processor #%d] INVEPT was called, but VMXOFF failed" - " (err=0x%x)\n", cpu_id, res); + hax_log(HAX_LOGE, "[Processor #%d] INVEPT was called, but " + "VMXOFF failed (err=0x%x)\n", cpu_id, res); } } } diff --git a/core/ept2.c b/core/ept2.c index 7f3d1778..761ee8b2 100644 --- a/core/ept2.c +++ b/core/ept2.c @@ -42,12 +42,13 @@ void ept_handle_mapping_removed(hax_gpa_space_listener *listener, hax_ept_tree *tree; int ret; - hax_info("%s: %s=>MMIO: start_gfn=0x%llx, npages=0x%llx, uva=0x%llx\n", - __func__, is_rom ? "ROM" : "RAM", start_gfn, npages, uva); + hax_log(HAX_LOGI, "%s: %s=>MMIO: start_gfn=0x%llx, npages=0x%llx, " + "uva=0x%llx\n", __func__, is_rom ? "ROM" : "RAM", start_gfn, + npages, uva); hax_assert(listener != NULL); tree = (hax_ept_tree *) listener->opaque; ret = ept_tree_invalidate_entries(tree, start_gfn, npages); - hax_info("%s: Invalidated %d PTEs\n", __func__, ret); + hax_log(HAX_LOGI, "%s: Invalidated %d PTEs\n", __func__, ret); } void ept_handle_mapping_changed(hax_gpa_space_listener *listener, @@ -60,13 +61,14 @@ void ept_handle_mapping_changed(hax_gpa_space_listener *listener, hax_ept_tree *tree; int ret; - hax_info("%s: %s=>%s: start_gfn=0x%llx, npages=0x%llx, old_uva=0x%llx," - " new_uva=0x%llx\n", __func__, was_rom ? "ROM" : "RAM", - is_rom ? "ROM" : "RAM", start_gfn, npages, old_uva, new_uva); + hax_log(HAX_LOGI, "%s: %s=>%s: start_gfn=0x%llx, npages=0x%llx, " + "old_uva=0x%llx, new_uva=0x%llx\n", __func__, + was_rom ? "ROM" : "RAM", is_rom ? "ROM" : "RAM", start_gfn, + npages, old_uva, new_uva); hax_assert(listener != NULL); tree = (hax_ept_tree *) listener->opaque; ret = ept_tree_invalidate_entries(tree, start_gfn, npages); - hax_info("%s: Invalidated %d PTEs\n", __func__, ret); + hax_log(HAX_LOGI, "%s: Invalidated %d PTEs\n", __func__, ret); } int ept_handle_access_violation(hax_gpa_space *gpa_space, hax_ept_tree *tree, @@ -89,7 +91,8 @@ int ept_handle_access_violation(hax_gpa_space *gpa_space, hax_ept_tree *tree, slot = memslot_find(gpa_space, gfn); if (!slot) { // The faulting GPA is reserved for MMIO - hax_debug("%s: gpa=0x%llx is reserved for MMIO\n", __func__, gpa); + hax_log(HAX_LOGD, "%s: gpa=0x%llx is reserved for MMIO\n", + __func__, gpa); return 0; } @@ -99,14 +102,14 @@ int ept_handle_access_violation(hax_gpa_space *gpa_space, hax_ept_tree *tree, if ((qual.raw & HAX_EPT_ACC_W) && !(combined_perm & HAX_EPT_PERM_W) && (slot->flags == HAX_MEMSLOT_READONLY)) { // Handle a write to ROM/ROM device as MMIO - hax_debug("%s: write to a read-only gpa=0x%llx\n", - __func__, gpa); + hax_log(HAX_LOGD, "%s: write to a read-only gpa=0x%llx\n", + __func__, gpa); return 0; } // See IA SDM Vol. 3C 27.2.1 Table 27-7, especially note 2 - hax_error("%s: Cannot handle the case where the PTE corresponding to" - " the faulting GPA is present: qual=0x%llx, gpa=0x%llx\n", - __func__, qual.raw, gpa); + hax_log(HAX_LOGE, "%s: Cannot handle the case where the PTE " + "corresponding to the faulting GPA is present: qual=0x%llx, " + "gpa=0x%llx\n", __func__, qual.raw, gpa); return -EACCES; } @@ -135,12 +138,12 @@ int ept_handle_access_violation(hax_gpa_space *gpa_space, hax_ept_tree *tree, hax_assert(offset_within_block < block->size); chunk = ramblock_get_chunk(block, offset_within_block, true); if (!chunk) { - hax_error("%s: Failed to grab the RAM chunk for %s gpa=0x%llx:" - " slot.base_gfn=0x%llx, slot.offset_within_block=0x%llx," - " offset_within_slot=0x%llx, block.base_uva=0x%llx," - " block.size=0x%llx\n", __func__, is_rom ? "ROM" : "RAM", gpa, - slot->base_gfn, slot->offset_within_block, offset_within_slot, - block->base_uva, block->size); + hax_log(HAX_LOGE, "%s: Failed to grab the RAM chunk for %s gpa=0x%llx:" + " slot.base_gfn=0x%llx, slot.offset_within_block=0x%llx," + " offset_within_slot=0x%llx, block.base_uva=0x%llx," + " block.size=0x%llx\n", __func__, is_rom ? "ROM" : "RAM", gpa, + slot->base_gfn, slot->offset_within_block, offset_within_slot, + block->base_uva, block->size); return -ENOMEM; } @@ -164,14 +167,14 @@ int ept_handle_access_violation(hax_gpa_space *gpa_space, hax_ept_tree *tree, size >> PG_ORDER_4K, chunk, offset_within_chunk, slot->flags); if (ret < 0) { - hax_error("%s: Failed to create PTEs for GFN range: ret=%d, gpa=0x%llx," - " start_gfn=0x%llx, npages=%llu\n", __func__, ret, gpa, - start_gpa >> PG_ORDER_4K, size >> PG_ORDER_4K); + hax_log(HAX_LOGE, "%s: Failed to create PTEs for GFN range: ret=%d, " + "gpa=0x%llx, start_gfn=0x%llx, npages=%llu\n", __func__, ret, + gpa, start_gpa >> PG_ORDER_4K, size >> PG_ORDER_4K); return ret; } - hax_debug("%s: Created %d PTEs for GFN range: gpa=0x%llx, start_gfn=0x%llx," - " npages=%llu\n", __func__, ret, gpa, start_gpa >> PG_ORDER_4K, - size >> PG_ORDER_4K); + hax_log(HAX_LOGD, "%s: Created %d PTEs for GFN range: gpa=0x%llx, " + "start_gfn=0x%llx, npages=%llu\n", __func__, ret, gpa, + start_gpa >> PG_ORDER_4K, size >> PG_ORDER_4K); return 1; } @@ -197,9 +200,9 @@ static void fix_epte(hax_ept_tree *tree, uint64_t gfn, int level, hax_epte *epte // Entries that are not present are never checked by hardware for // misconfigurations if (old_epte.value) { - hax_warning("%s: Entry is not present but some bits are set:" - " value=0x%llx, level=%d, gfn=0x%llx\n", __func__, - old_epte.value, level, gfn); + hax_log(HAX_LOGW, "%s: Entry is not present but some bits are set:" + " value=0x%llx, level=%d, gfn=0x%llx\n", __func__, + old_epte.value, level, gfn); } return; } @@ -240,8 +243,9 @@ static void fix_epte(hax_ept_tree *tree, uint64_t gfn, int level, hax_epte *epte // Clear all reserved bits new_epte.value &= preserved_bits; } - hax_warning("%s: gfn=0x%llx, level=%d, value=0x%llx, new_value=0x%llx\n", - __func__, gfn, level, old_epte.value, new_epte.value); + hax_log(HAX_LOGW, "%s: gfn=0x%llx, level=%d, value=0x%llx, " + "new_value=0x%llx\n", __func__, gfn, level, old_epte.value, + new_epte.value); if (epte->value != new_epte.value) { bundle->misconfigured_count++; if (epte->pfn != new_epte.pfn) { @@ -251,8 +255,8 @@ static void fix_epte(hax_ept_tree *tree, uint64_t gfn, int level, hax_epte *epte &epte->value)) { // *epte != old_epte, probably because another thread has changed // this EPT entry, so just assume the entry has been fixed - hax_warning("%s: Entry has changed: current_value=0x%llx\n", - __func__, epte->value); + hax_log(HAX_LOGW, "%s: Entry has changed: current_value=0x%llx\n", + __func__, epte->value); } } } @@ -275,17 +279,18 @@ int ept_handle_misconfiguration(hax_gpa_space *gpa_space, hax_ept_tree *tree, bundle.slot = memslot_find(gpa_space, gfn); if (!bundle.slot) { // The GPA being accessed is reserved for MMIO - hax_warning("%s: gpa=0x%llx is reserved for MMIO\n", __func__, gpa); + hax_log(HAX_LOGW, "%s: gpa=0x%llx is reserved for MMIO\n", + __func__, gpa); } ept_tree_walk(tree, gfn, fix_epte, &bundle); if (bundle.error_count) { - hax_error("%s: Failed to fix %d/%d misconfigured entries for" - " gpa=0x%llx\n", __func__, bundle.error_count, - bundle.misconfigured_count, gpa); + hax_log(HAX_LOGE, "%s: Failed to fix %d/%d misconfigured entries for" + " gpa=0x%llx\n", __func__, bundle.error_count, + bundle.misconfigured_count, gpa); return -bundle.error_count; } - hax_warning("%s: Fixed %d misconfigured entries for gpa=0x%llx\n", __func__, - bundle.misconfigured_count, gpa); + hax_log(HAX_LOGW, "%s: Fixed %d misconfigured entries for gpa=0x%llx\n", + __func__, bundle.misconfigured_count, gpa); return bundle.misconfigured_count; } diff --git a/core/ept_tree.c b/core/ept_tree.c index cd8c034f..593a0317 100644 --- a/core/ept_tree.c +++ b/core/ept_tree.c @@ -84,7 +84,8 @@ static hax_ept_page * ept_tree_alloc_page(hax_ept_tree *tree) } ret = hax_alloc_page_frame(HAX_PAGE_ALLOC_ZEROED, &page->memdesc); if (ret) { - hax_error("%s: hax_alloc_page_frame() returned %d\n", __func__, ret); + hax_log(HAX_LOGE, "%s: hax_alloc_page_frame() returned %d\n", + __func__, ret); hax_vfree(page, sizeof(*page)); return NULL; } @@ -150,7 +151,7 @@ int ept_tree_init(hax_ept_tree *tree) uint64_t pfn; if (!tree) { - hax_error("%s: tree == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: tree == NULL\n", __func__); return -EINVAL; } @@ -160,13 +161,13 @@ int ept_tree_init(hax_ept_tree *tree) tree->lock = hax_spinlock_alloc_init(); if (!tree->lock) { - hax_error("%s: Failed to allocate EPT tree lock\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate EPT tree lock\n", __func__); return -ENOMEM; } root_page = ept_tree_alloc_page(tree); if (!root_page) { - hax_error("%s: Failed to allocate EPT root page\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate EPT root page\n", __func__); hax_spinlock_free(tree->lock); return -ENOMEM; } @@ -183,7 +184,7 @@ int ept_tree_init(hax_ept_tree *tree) tree->eptp.ept_mt = HAX_EPT_MEMTYPE_WB; tree->eptp.max_level = HAX_EPT_LEVEL_MAX; tree->eptp.pfn = pfn; - hax_info("%s: eptp=0x%llx\n", __func__, tree->eptp.value); + hax_log(HAX_LOGI, "%s: eptp=0x%llx\n", __func__, tree->eptp.value); return 0; } @@ -192,13 +193,14 @@ static void ept_page_free(hax_ept_page *page) int ret; if (!page) { - hax_warning("%s: page == NULL\n", __func__); + hax_log(HAX_LOGW, "%s: page == NULL\n", __func__); return; } ret = hax_free_page_frame(&page->memdesc); if (ret) { - hax_warning("%s: hax_free_page_frame() returned %d\n", __func__, ret); + hax_log(HAX_LOGW, "%s: hax_free_page_frame() returned %d\n", + __func__, ret); // Still need to free the hax_ept_page object } hax_vfree(page, sizeof(*page)); @@ -210,7 +212,7 @@ int ept_tree_free(hax_ept_tree *tree) int i = 0; if (!tree) { - hax_error("%s: tree == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: tree == NULL\n", __func__); return -EINVAL; } @@ -220,7 +222,7 @@ int ept_tree_free(hax_ept_tree *tree) ept_page_free(page); i++; } - hax_info("%s: Total %d EPT page(s) freed\n", __func__, i); + hax_log(HAX_LOGI, "%s: Total %d EPT page(s) freed\n", __func__, i); hax_spinlock_free(tree->lock); return 0; @@ -307,8 +309,8 @@ static hax_epte * ept_tree_get_next_table(hax_ept_tree *tree, uint64_t gfn, page = ept_tree_alloc_page(tree); if (!page) { epte->value = 0; - hax_error("%s: Failed to create EPT page table: gfn=0x%llx," - " next_level=%d\n", __func__, gfn, next_level); + hax_log(HAX_LOGE, "%s: Failed to create EPT page table: gfn=0x%llx," + " next_level=%d\n", __func__, gfn, next_level); return NULL; } pfn = hax_get_pfn_phys(&page->memdesc); @@ -332,10 +334,10 @@ static hax_epte * ept_tree_get_next_table(hax_ept_tree *tree, uint64_t gfn, epte->value = temp_epte.value; next_table = (hax_epte *) kva; - hax_debug("%s: Created EPT page table: gfn=0x%llx, next_level=%d," - " pfn=0x%llx, kva=%p, freq_page_index=%ld\n", __func__, gfn, - next_level, pfn, kva, freq_page ? freq_page - tree->freq_pages - : -1); + hax_log(HAX_LOGD, "%s: Created EPT page table: gfn=0x%llx, " + "next_level=%d, pfn=0x%llx, kva=%p, freq_page_index=%ld\n", + __func__, gfn, next_level, pfn, kva, + freq_page ? freq_page - tree->freq_pages : -1); } else { // !hax_cmpxchg64(0, INVALID_EPTE.value, &epte->value) // epte->value != 0, which could mean epte->perm != HAX_EPT_PERM_NONE, // i.e. the EPT entry pointing to the next-level EPT page table is @@ -348,10 +350,12 @@ static hax_epte * ept_tree_get_next_table(hax_ept_tree *tree, uint64_t gfn, // Eventually the other thread will set epte->pfn to either a valid // PFN or 0 if (!(++i % 10000)) { // 10^4 - hax_info("%s: In iteration %d of while loop\n", __func__, i); + hax_log(HAX_LOGI, "%s: In iteration %d of while loop\n", + __func__, i); if (i == 100000000) { // 10^8 (< INT_MAX) - hax_error("%s: Breaking out of infinite loop: gfn=0x%llx," - " next_level=%d\n", __func__, gfn, next_level); + hax_log(HAX_LOGE, "%s: Breaking out of infinite loop: " + "gfn=0x%llx, next_level=%d\n", __func__, gfn, + next_level); return NULL; } } @@ -359,9 +363,9 @@ static hax_epte * ept_tree_get_next_table(hax_ept_tree *tree, uint64_t gfn, if (!epte->value) { // The other thread has cleared epte->value, indicating it could not // create the next-level page table - hax_error("%s: Another thread tried to create the same EPT page" - " table first, but failed: gfn=0x%llx, next_level=%d\n", - __func__, gfn, next_level); + hax_log(HAX_LOGE, "%s: Another thread tried to create the same EPT " + "page table first, but failed: gfn=0x%llx, next_level=%d\n", + __func__, gfn, next_level); return NULL; } @@ -377,8 +381,8 @@ static hax_epte * ept_tree_get_next_table(hax_ept_tree *tree, uint64_t gfn, hax_assert(kmap != NULL); kva = hax_map_page_frame(epte->pfn, kmap); if (!kva) { - hax_error("%s: Failed to map pfn=0x%llx into KVA space\n", - __func__, epte->pfn); + hax_log(HAX_LOGE, "%s: Failed to map pfn=0x%llx into " + "KVA space\n", __func__, epte->pfn); } } next_table = (hax_epte *) kva; @@ -406,11 +410,11 @@ int ept_tree_create_entry(hax_ept_tree *tree, uint64_t gfn, hax_epte value) hax_epte *pte; if (!tree) { - hax_error("%s: tree == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: tree == NULL\n", __func__); return -EINVAL; } if (value.perm == HAX_EPT_PERM_NONE) { - hax_error("%s: value.perm == 0\n", __func__); + hax_log(HAX_LOGE, "%s: value.perm == 0\n", __func__); return -EINVAL; } @@ -426,8 +430,8 @@ int ept_tree_create_entry(hax_ept_tree *tree, uint64_t gfn, hax_epte value) hax_assert(ret == 0); // prev_kmap is now filled with zeroes if (!table) { - hax_error("%s: Failed to grab the next-level EPT page table:" - " gfn=0x%llx, level=%d\n", __func__, gfn, level); + hax_log(HAX_LOGE, "%s: Failed to grab the next-level EPT page " + "table: gfn=0x%llx, level=%d\n", __func__, gfn, level); return -ENOMEM; } // Swap prev_kmap with kmap @@ -441,14 +445,15 @@ int ept_tree_create_entry(hax_ept_tree *tree, uint64_t gfn, hax_epte value) if (!hax_cmpxchg64(0, value.value, &pte->value)) { // pte->value != 0, implying pte->perm != HAX_EPT_PERM_NONE if (pte->value != value.value) { - hax_error("%s: A different PTE corresponding to gfn=0x%llx already" - " exists: old_value=0x%llx, new_value=0x%llx\n", __func__, - gfn, pte->value, value.value); + hax_log(HAX_LOGE, "%s: A different PTE corresponding to gfn=0x%llx" + " already exists: old_value=0x%llx, new_value=0x%llx\n", + __func__, gfn, pte->value, value.value); hax_unmap_page_frame(&kmap); return -EEXIST; } else { - hax_info("%s: Another thread has already created the same PTE:" - " gfn=0x%llx, value=0x%llx\n", __func__, gfn, value.value); + hax_log(HAX_LOGI, "%s: Another thread has already created the" + " same PTE: gfn=0x%llx, value=0x%llx\n", + __func__, gfn, value.value); } } @@ -491,8 +496,8 @@ int ept_tree_create_entries(hax_ept_tree *tree, uint64_t start_gfn, uint64_t npa pdpt = ept_tree_get_next_table(tree, gfn, HAX_EPT_LEVEL_PML4, pml4, &pdpt_kmap, true, NULL, NULL); if (!pdpt) { - hax_error("%s: Failed to grab the EPT PDPT for %s gfn=0x%llx\n", - __func__, is_rom ? "ROM" : "RAM", gfn); + hax_log(HAX_LOGE, "%s: Failed to grab the EPT PDPT for %s gfn=0x%llx\n", + __func__, is_rom ? "ROM" : "RAM", gfn); ret = -ENOMEM; goto out; } @@ -500,8 +505,8 @@ int ept_tree_create_entries(hax_ept_tree *tree, uint64_t start_gfn, uint64_t npa pd = ept_tree_get_next_table(tree, gfn, HAX_EPT_LEVEL_PDPT, pdpt, &pd_kmap, true, NULL, NULL); if (!pd) { - hax_error("%s: Failed to grab the EPT PD for %s gfn=0x%llx\n", __func__, - is_rom ? "ROM" : "RAM", gfn); + hax_log(HAX_LOGE, "%s: Failed to grab the EPT PD for %s gfn=0x%llx\n", + __func__, is_rom ? "ROM" : "RAM", gfn); ret = -ENOMEM; goto out_pdpt; } @@ -509,8 +514,8 @@ int ept_tree_create_entries(hax_ept_tree *tree, uint64_t start_gfn, uint64_t npa pt = ept_tree_get_next_table(tree, gfn, HAX_EPT_LEVEL_PD, pd, &pt_kmap, true, NULL, NULL); if (!pt) { - hax_error("%s: Failed to grab the EPT PT for %s gfn=0x%llx\n", __func__, - is_rom ? "ROM" : "RAM", gfn); + hax_log(HAX_LOGE, "%s: Failed to grab the EPT PT for %s gfn=0x%llx\n", + __func__, is_rom ? "ROM" : "RAM", gfn); ret = -ENOMEM; goto out_pd; } @@ -539,16 +544,17 @@ int ept_tree_create_entries(hax_ept_tree *tree, uint64_t start_gfn, uint64_t npa if (!hax_cmpxchg64(0, new_pte.value, &pte->value)) { // pte->value != 0, implying pte->perm != HAX_EPT_PERM_NONE if (pte->value != new_pte.value) { - hax_error("%s: A different PTE corresponding to %s gfn=0x%llx" - " already exists: old_value=0x%llx, new_value=0x%llx" - "\n", __func__, is_rom ? "ROM" : "RAM", gfn, - pte->value, new_pte.value); + hax_log(HAX_LOGE, "%s: A different PTE corresponding to %s " + "gfn=0x%llx already exists: old_value=0x%llx, " + "new_value=0x%llx\n", __func__, is_rom ? "ROM" : "RAM", + gfn, pte->value, new_pte.value); ret = -EEXIST; goto out_pt; } else { - hax_debug("%s: Another thread has already created the same PTE:" - " gfn=0x%llx, value=0x%llx, is_rom=%s\n", __func__, - gfn, new_pte.value, is_rom ? "true" : "false"); + hax_log(HAX_LOGD, "%s: Another thread has already created the " + "same PTE: gfn=0x%llx, value=0x%llx, is_rom=%s\n", + __func__, gfn, new_pte.value, + is_rom ? "true" : "false"); } } else { // pte->value was 0, but has been set to new_pte.value @@ -631,11 +637,11 @@ void ept_tree_walk(hax_ept_tree *tree, uint64_t gfn, epte_visitor visit_epte, hax_epte *pte; if (!tree) { - hax_error("%s: tree == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: tree == NULL\n", __func__); return; } if (!visit_epte) { - hax_warning("%s: visit_epte == NULL\n", __func__); + hax_log(HAX_LOGW, "%s: visit_epte == NULL\n", __func__); return; } @@ -683,8 +689,8 @@ void invalidate_pte(hax_ept_tree *tree, uint64_t gfn, int level, hax_epte *epte, return; } - hax_info("%s: Invalidating PTE: gfn=0x%llx, value=0x%llx\n", __func__, gfn, - pte->value); + hax_log(HAX_LOGI, "%s: Invalidating PTE: gfn=0x%llx, value=0x%llx\n", + __func__, gfn, pte->value); ept_tree_lock(tree); pte->value = 0; // implies pte->perm == HAX_EPT_PERM_NONE ept_tree_unlock(tree); @@ -708,7 +714,7 @@ int ept_tree_invalidate_entries(hax_ept_tree *tree, uint64_t start_gfn, int modified_count = 0; if (!tree) { - hax_error("%s: tree == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: tree == NULL\n", __func__); return -EINVAL; } @@ -720,7 +726,8 @@ int ept_tree_invalidate_entries(hax_ept_tree *tree, uint64_t start_gfn, } if (modified_count) { if (hax_test_and_set_bit(0, (uint64_t *) &tree->invept_pending)) { - hax_warning("%s: INVEPT pending flag is already set\n", __func__); + hax_log(HAX_LOGW, "%s: INVEPT pending flag is already set\n", + __func__); } } return modified_count; diff --git a/core/gpa_space.c b/core/gpa_space.c index 0ae4b9eb..ba67b456 100644 --- a/core/gpa_space.c +++ b/core/gpa_space.c @@ -39,7 +39,7 @@ int gpa_space_init(hax_gpa_space *gpa_space) int ret = 0; if (!gpa_space) { - hax_error("gpa_space_init: param gpa_space is null!\n"); + hax_log(HAX_LOGE, "gpa_space_init: param gpa_space is null!\n"); return -EINVAL; } @@ -79,7 +79,7 @@ void gpa_space_free(hax_gpa_space *gpa_space) { hax_gpa_space_listener *listener, *tmp; if (!gpa_space) { - hax_error("gpa_space_free: invalid param!\n"); + hax_log(HAX_LOGE, "gpa_space_free: invalid param!\n"); return; } @@ -100,7 +100,7 @@ void gpa_space_add_listener(hax_gpa_space *gpa_space, hax_gpa_space_listener *listener) { if (!gpa_space || !listener) { - hax_error("gpa_space_add_listener: invalid param!\n"); + hax_log(HAX_LOGE, "gpa_space_add_listener: invalid param!\n"); return; } @@ -112,7 +112,7 @@ void gpa_space_remove_listener(hax_gpa_space *gpa_space, hax_gpa_space_listener *listener) { if (!gpa_space || !listener) { - hax_error("gpa_space_remove_listener: invalid param!\n"); + hax_log(HAX_LOGE, "gpa_space_remove_listener: invalid param!\n"); return; } @@ -139,7 +139,7 @@ static int gpa_space_map_range(hax_gpa_space *gpa_space, uint64_t start_gpa, void *kva; if (len < 0) { - hax_error("%s: len=%d < 0\n", __func__, len); + hax_log(HAX_LOGE, "%s: len=%d < 0\n", __func__, len); return -EINVAL; } if (!len) { @@ -154,18 +154,18 @@ static int gpa_space_map_range(hax_gpa_space *gpa_space, uint64_t start_gpa, npages = (size + PAGE_SIZE_4K - 1) >> PG_ORDER_4K; slot = memslot_find(gpa_space, gfn); if (!slot) { - hax_error("%s: start_gpa=0x%llx is reserved for MMIO\n", __func__, - start_gpa); + hax_log(HAX_LOGE, "%s: start_gpa=0x%llx is reserved for MMIO\n", + __func__, start_gpa); return -EINVAL; } if (writable) { *writable = !(slot->flags & HAX_MEMSLOT_READONLY); } if (gfn + npages > slot->base_gfn + slot->npages) { - hax_warning("%s: GPA range spans more than one memslot:" - " start_gpa=0x%llx, len=%d, slot_base_gfn=0x%llx," - " slot_npages=%llu, gfn=0x%llx, npages=%u\n", __func__, - start_gpa, len, slot->base_gfn, slot->npages, gfn, npages); + hax_log(HAX_LOGW, "%s: GPA range spans more than one memslot:" + " start_gpa=0x%llx, len=%d, slot_base_gfn=0x%llx," + " slot_npages=%llu, gfn=0x%llx, npages=%u\n", __func__, + start_gpa, len, slot->base_gfn, slot->npages, gfn, npages); npages = (uint) (slot->base_gfn + slot->npages - gfn); size = npages << PG_ORDER_4K; } @@ -175,25 +175,25 @@ static int gpa_space_map_range(hax_gpa_space *gpa_space, uint64_t start_gpa, slot->offset_within_block; chunk = ramblock_get_chunk(block, offset_within_block, true); if (!chunk) { - hax_error("%s: ramblock_get_chunk() failed: start_gpa=0x%llx\n", - __func__, start_gpa); + hax_log(HAX_LOGE, "%s: ramblock_get_chunk() failed: start_gpa=0x%llx\n", + __func__, start_gpa); return -ENOMEM; } offset_within_chunk = offset_within_block - (chunk->base_uva - block->base_uva); if (offset_within_chunk + size > chunk->size) { - hax_warning("%s: GPA range spans more than one chunk: start_gpa=0x%llx," - " len=%d, offset_within_chunk=0x%llx, size=0x%x," - " chunk_size=0x%llx\n", __func__, start_gpa, len, - offset_within_chunk, size, chunk->size); + hax_log(HAX_LOGW, "%s: GPA range spans more than one chunk: " + "start_gpa=0x%llx, len=%d, offset_within_chunk=0x%llx, " + "size=0x%x, chunk_size=0x%llx\n", __func__, start_gpa, len, + offset_within_chunk, size, chunk->size); size = (uint) (chunk->size - offset_within_chunk); } // Assuming kmap != NULL kva = hax_map_user_pages(&chunk->memdesc, offset_within_chunk, size, kmap); if (!kva) { - hax_error("%s: hax_map_user_pages() failed: start_gpa=0x%llx, len=%d\n", - __func__, start_gpa, len); + hax_log(HAX_LOGE, "%s: hax_map_user_pages() failed: start_gpa=0x%llx," + " len=%d\n", __func__, start_gpa, len); return -ENOMEM; } // Assuming buf != NULL @@ -209,22 +209,22 @@ int gpa_space_read_data(hax_gpa_space *gpa_space, uint64_t start_gpa, int len, int ret, nbytes; if (!data) { - hax_error("%s: data == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: data == NULL\n", __func__); return -EINVAL; } ret = gpa_space_map_range(gpa_space, start_gpa, len, &buf, &kmap, NULL); if (ret < 0) { - hax_error("%s: gpa_space_map_range() failed: start_gpa=0x%llx," - " len=%d\n", __func__, start_gpa, len); + hax_log(HAX_LOGE, "%s: gpa_space_map_range() failed: start_gpa=0x%llx," + " len=%d\n", __func__, start_gpa, len); return ret; } nbytes = ret; if (nbytes < len) { - hax_warning("%s: Not enough bytes readable from guest RAM: nbytes=%d," - " start_gpa=0x%llx, len=%d\n", __func__, nbytes, start_gpa, - len); + hax_log(HAX_LOGW, "%s: Not enough bytes readable from guest RAM: " + "nbytes=%d, start_gpa=0x%llx, len=%d\n", __func__, nbytes, + start_gpa, len); if (!nbytes) { return 0; } @@ -232,8 +232,8 @@ int gpa_space_read_data(hax_gpa_space *gpa_space, uint64_t start_gpa, int len, memcpy_s(data, nbytes, buf, nbytes); ret = hax_unmap_user_pages(&kmap); if (ret) { - hax_warning("%s: hax_unmap_user_pages() failed: ret=%d\n", __func__, - ret); + hax_log(HAX_LOGW, "%s: hax_unmap_user_pages() failed: ret=%d\n", + __func__, ret); // This is not a fatal error, so ignore it } return nbytes; @@ -248,28 +248,28 @@ int gpa_space_write_data(hax_gpa_space *gpa_space, uint64_t start_gpa, int len, int ret, nbytes; if (!data) { - hax_error("%s: data == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: data == NULL\n", __func__); return -EINVAL; } ret = gpa_space_map_range(gpa_space, start_gpa, len, &buf, &kmap, &writable); if (ret < 0) { - hax_error("%s: gpa_space_map_range() failed: start_gpa=0x%llx," - " len=%d\n", __func__, start_gpa, len); + hax_log(HAX_LOGE, "%s: gpa_space_map_range() failed: start_gpa=0x%llx," + " len=%d\n", __func__, start_gpa, len); return ret; } if (!writable) { - hax_error("%s: Cannot write to ROM: start_gpa=0x%llx, len=%d\n", - __func__, start_gpa, len); + hax_log(HAX_LOGE, "%s: Cannot write to ROM: start_gpa=0x%llx, len=%d\n", + __func__, start_gpa, len); return -EACCES; } nbytes = ret; if (nbytes < len) { - hax_warning("%s: Not enough bytes writable to guest RAM: nbytes=%d," - " start_gpa=0x%llx, len=%d\n", __func__, nbytes, start_gpa, - len); + hax_log(HAX_LOGW, "%s: Not enough bytes writable to guest RAM: " + "nbytes=%d, start_gpa=0x%llx, len=%d\n", __func__, nbytes, + start_gpa, len); if (!nbytes) { return 0; } @@ -277,8 +277,8 @@ int gpa_space_write_data(hax_gpa_space *gpa_space, uint64_t start_gpa, int len, memcpy_s(buf, nbytes, data, nbytes); ret = hax_unmap_user_pages(&kmap); if (ret) { - hax_warning("%s: hax_unmap_user_pages() failed: ret=%d\n", __func__, - ret); + hax_log(HAX_LOGW, "%s: hax_unmap_user_pages() failed: ret=%d\n", + __func__, ret); // This is not a fatal error, so ignore it } return nbytes; @@ -296,7 +296,8 @@ void * gpa_space_map_page(hax_gpa_space *gpa_space, uint64_t gfn, ret = gpa_space_map_range(gpa_space, gfn << PG_ORDER_4K, PAGE_SIZE_4K, &buf, kmap, writable); if (ret < PAGE_SIZE_4K) { - hax_error("%s: gpa_space_map_range() returned %d\n", __func__, ret); + hax_log(HAX_LOGE, "%s: gpa_space_map_range() returned %d\n", + __func__, ret); return NULL; } kva = (void *) buf; @@ -311,7 +312,8 @@ void gpa_space_unmap_page(hax_gpa_space *gpa_space, hax_kmap_user *kmap) hax_assert(kmap != NULL); ret = hax_unmap_user_pages(kmap); if (ret) { - hax_warning("%s: hax_unmap_user_pages() returned %d\n", __func__, ret); + hax_log(HAX_LOGW, "%s: hax_unmap_user_pages() returned %d\n", + __func__, ret); } } @@ -328,7 +330,8 @@ uint64_t gpa_space_get_pfn(hax_gpa_space *gpa_space, uint64_t gfn, uint8_t *flag slot = memslot_find(gpa_space, gfn); if (!slot) { // The gfn is reserved for MMIO - hax_debug("%s: gfn=0x%llx is reserved for MMIO\n", __func__, gfn); + hax_log(HAX_LOGD, "%s: gfn=0x%llx is reserved for MMIO\n", + __func__, gfn); if (flags) { *flags = HAX_MEMSLOT_INVALID; } @@ -347,13 +350,13 @@ uint64_t gpa_space_get_pfn(hax_gpa_space *gpa_space, uint64_t gfn, uint8_t *flag hax_assert(offset_within_block < block->size); chunk = ramblock_get_chunk(block, offset_within_block, true); if (!chunk) { - hax_error("%s: Failed to grab the RAM chunk for %s gfn=0x%llx:" - " slot.base_gfn=0x%llx, slot.offset_within_block=0x%llx," - " offset_within_slot=0x%llx, block.base_uva=0x%llx," - " block.size=0x%llx\n", __func__, - (slot->flags & HAX_MEMSLOT_READONLY) ? "ROM" : "RAM", gfn, - slot->base_gfn, slot->offset_within_block, offset_within_slot, - block->base_uva, block->size); + hax_log(HAX_LOGE, "%s: Failed to grab the RAM chunk for %s gfn=0x%llx:" + " slot.base_gfn=0x%llx, slot.offset_within_block=0x%llx," + " offset_within_slot=0x%llx, block.base_uva=0x%llx," + " block.size=0x%llx\n", __func__, + (slot->flags & HAX_MEMSLOT_READONLY) ? "ROM" : "RAM", gfn, + slot->base_gfn, slot->offset_within_block, offset_within_slot, + block->base_uva, block->size); return INVALID_PFN; } @@ -376,16 +379,17 @@ int gpa_space_adjust_prot_bitmap(hax_gpa_space *gpa_space, uint64_t end_gfn) if (end_gfn <= pb->end_gfn) return 0; - hax_info("%s: end_gfn 0x%llx -> 0x%llx\n", __func__, pb->end_gfn, end_gfn); + hax_log(HAX_LOGI, "%s: end_gfn 0x%llx -> 0x%llx\n", __func__, + pb->end_gfn, end_gfn); new_size = gpa_space_prot_bitmap_size(end_gfn); if (!new_size) { - hax_error("%s: end_gfn=0x%llx is too big\n", __func__, end_gfn); + hax_log(HAX_LOGE, "%s: end_gfn=0x%llx is too big\n", __func__, end_gfn); return -EINVAL; } bmnew = hax_vmalloc(new_size, HAX_MEM_NONPAGE); if (!bmnew) { - hax_error("%s: Not enough memory for new protection bitmap\n", - __func__); + hax_log(HAX_LOGE, "%s: Not enough memory for new protection bitmap\n", + __func__); return -ENOMEM; } pb->bitmap = bmnew; @@ -483,30 +487,30 @@ int gpa_space_protect_range(struct hax_gpa_space *gpa_space, hax_gpa_space_listener *listener; if (perm == HAX_RAM_PERM_NONE) { - hax_info("%s: Restricting access to GPA range 0x%llx..0x%llx\n", - __func__, start_gpa, start_gpa + len); + hax_log(HAX_LOGI, "%s: Restricting access to GPA range 0x%llx.." + "0x%llx\n", __func__, start_gpa, start_gpa + len); } else { - hax_debug("%s: start_gpa=0x%llx, len=0x%llx, flags=%x\n", __func__, - start_gpa, len, flags); + hax_log(HAX_LOGD, "%s: start_gpa=0x%llx, len=0x%llx, flags=%x\n", + __func__, start_gpa, len, flags); } if (len == 0) { - hax_error("%s: len = 0\n", __func__); + hax_log(HAX_LOGE, "%s: len = 0\n", __func__); return -EINVAL; } // Find-grained protection (R/W/X) is not supported yet if (perm != HAX_RAM_PERM_NONE && perm != HAX_RAM_PERM_RWX) { - hax_error("%s: Unsupported flags=%d\n", __func__, flags); + hax_log(HAX_LOGE, "%s: Unsupported flags=%d\n", __func__, flags); return -EINVAL; } first_gfn = start_gpa >> PG_ORDER_4K; last_gfn = (start_gpa + len - 1) >> PG_ORDER_4K; if (last_gfn >= gpa_space->prot.end_gfn) { - hax_error("%s: GPA range exceeds protection bitmap, start_gpa=0x%llx," - " len=0x%llx, flags=0x%x, end_gfn=0x%llx\n", __func__, - start_gpa, len, flags, gpa_space->prot.end_gfn); + hax_log(HAX_LOGE, "%s: GPA range exceeds protection bitmap, " + "start_gpa=0x%llx, len=0x%llx, flags=0x%x, end_gfn=0x%llx\n", + __func__, start_gpa, len, flags, gpa_space->prot.end_gfn); return -EINVAL; } npages = last_gfn - first_gfn + 1; diff --git a/core/hax.c b/core/hax.c index 74ee3686..cf9a7400 100644 --- a/core/hax.c +++ b/core/hax.c @@ -185,25 +185,29 @@ static int hax_vmx_enable_check(void) } } if (vts != tnum) { - hax_error("VT is not supported in the system, HAXM exits, sorry!\n"); + hax_log(HAX_LOGE, "VT is not supported in the system, HAXM exits, " + "sorry!\n"); hax_notify_host_event(HaxNoVtEvent, NULL, 0); return -1; } if (nxs != tnum) { - hax_error("NX is not supported in the system, HAXM exits, sorry!\n"); + hax_log(HAX_LOGE, "NX is not supported in the system, HAXM exits, " + "sorry!\n"); hax_notify_host_event(HaxNoNxEvent, NULL, 0); return -1; } #if 0 if (em64s != tnum) { - hax_error("EM64T is not supported in the system, HAXM exits, sorry!\n"); + hax_log(HAX_LOGE, "EM64T is not supported in the system, HAXM exits, " + "sorry!\n"); hax_notify_host_event(HaxNoEMT64Event, NULL, 0); return -1; } #endif if (nxe != tnum) { - hax_error("NX is not enabled in the system, HAXM does not function.\n"); + hax_log(HAX_LOGE, "NX is not enabled in the system, " + "HAXM does not function.\n"); error = 1; hax_notify_host_event(HaxNxDisable, NULL, 0); } else { @@ -211,7 +215,8 @@ static int hax_vmx_enable_check(void) } if (vte != tnum) { - hax_error("VT is not enabled in the system, HAXM does not function.\n"); + hax_log(HAX_LOGE, "VT is not enabled in the system, " + "HAXM does not function.\n"); hax_notify_host_event(HaxVtDisable, NULL, 0); error = 1; } else { @@ -230,7 +235,7 @@ static int hax_vmx_enable_check(void) } if ((error == 0) && (tnum != finished)) { - hax_error("Unknown reason happens to VT init, HAXM exit\n"); + hax_log(HAX_LOGE, "Unknown reason happens to VT init, HAXM exit\n"); hax_notify_host_event(HaxVtEnableFailure, NULL, 0); return -1; } @@ -305,10 +310,10 @@ int hax_set_memlimit(void *buf, int bufLeng, int *outLength) } if (!memlimit->enable_memlimit) { hax->mem_limit = 0; - hax_error("disable memlimit\n"); + hax_log(HAX_LOGE, "disable memlimit\n"); } else { hax->mem_limit = hax->mem_quota = memlimit->memory_limit << 20; - hax_info("set memlimit 0x%llx\n", hax->mem_limit); + hax_log(HAX_LOGI, "set memlimit 0x%llx\n", hax->mem_limit); } hax_mutex_unlock(hax->hax_lock); return 0; @@ -426,7 +431,8 @@ static void hax_pmu_init(void) cpu_data = hax_cpu_data[cpu_id]; // Should never happen if (!cpu_data) { - hax_warning("hax_pmu_init: hax_cpu_data[%d] is NULL\n", cpu_id); + hax_log(HAX_LOGW, "hax_pmu_init: hax_cpu_data[%d] is NULL\n", + cpu_id); continue; } @@ -459,11 +465,11 @@ static void hax_pmu_init(void) ? APM_MAX_EVENT_COUNT : ref_pmu_info->apm_event_count; hax->apm_event_unavailability = ref_pmu_info->apm_event_unavailability & ((1UL << hax->apm_event_count) - 1); - hax_info("APM: version %u\n", hax->apm_version); - hax_info("APM: %u general-purpose counters, bitmask 0x%llx\n", - hax->apm_general_count, hax->apm_general_mask); - hax_info("APM: %u events, unavailability 0x%x\n", hax->apm_event_count, - hax->apm_event_unavailability); + hax_log(HAX_LOGI, "APM: version %u\n", hax->apm_version); + hax_log(HAX_LOGI, "APM: %u general-purpose counters, bitmask 0x%llx\n", + hax->apm_general_count, hax->apm_general_mask); + hax_log(HAX_LOGI, "APM: %u events, unavailability 0x%x\n", + hax->apm_event_count, hax->apm_event_unavailability); set_msr_access(IA32_PMC0, hax->apm_general_count, true, true); set_msr_access(IA32_PERFEVTSEL0, hax->apm_general_count, true, true); @@ -475,8 +481,8 @@ static void hax_pmu_init(void) apm_fixed_bitlen = ref_pmu_info->apm_fixed_bitlen; hax->apm_fixed_mask = apm_fixed_bitlen > 63 ? (uint64_t)-1 : (1ULL << apm_fixed_bitlen) - 1; - hax_info("APM: %u fixed-function counters, bitmask 0x%llx\n", - hax->apm_fixed_count, hax->apm_fixed_mask); + hax_log(HAX_LOGI, "APM: %u fixed-function counters, bitmask " + "0x%llx\n",hax->apm_fixed_count, hax->apm_fixed_mask); } else { hax->apm_fixed_count = 0; apm_fixed_bitlen = 0; @@ -494,7 +500,7 @@ static void hax_pmu_init(void) pmu_info->apm_fixed_count = hax->apm_fixed_count; pmu_info->apm_fixed_bitlen = apm_fixed_bitlen; } else { - hax_warning("Host CPU does not support APM\n"); + hax_log(HAX_LOGW, "Host CPU does not support APM\n"); hax->apm_general_count = 0; hax->apm_general_mask = 0; hax->apm_event_count = 0; @@ -545,7 +551,8 @@ int hax_module_init(void) hax_pmu_init(); hax_init_list_head(&hax->hax_vmlist); - hax_warning("-------- HAXM v%s Start --------\n", HAXM_RELEASE_VERSION_STR); + hax_log(HAX_LOGW, "-------- HAXM v%s Start --------\n", + HAXM_RELEASE_VERSION_STR); return 0; @@ -571,7 +578,7 @@ int hax_module_exit(void) int i, ret; if (!hax_list_empty(&hax->hax_vmlist)) { - hax_error("Still VM not be destroyed?\n"); + hax_log(HAX_LOGE, "Still VM not be destroyed?\n"); return -EBUSY; } @@ -591,7 +598,8 @@ int hax_module_exit(void) hax_vfree(hax_cpu_data, max_cpus * sizeof(void *)); hax_mutex_free(hax->hax_lock); hax_vfree(hax, sizeof(struct hax_t)); - hax_warning("-------- HAXM v%s End --------\n", HAXM_RELEASE_VERSION_STR); + hax_log(HAX_LOGW, "-------- HAXM v%s End --------\n", + HAXM_RELEASE_VERSION_STR); return 0; } diff --git a/core/include/hax_core_interface.h b/core/include/hax_core_interface.h index aa836d66..d974ce2f 100644 --- a/core/include/hax_core_interface.h +++ b/core/include/hax_core_interface.h @@ -94,7 +94,6 @@ int hax_vm_set_qemuversion(struct vm_t *vm, struct hax_qemu_version *ver); struct vm_t * hax_create_vm(int *vm_id); int hax_teardown_vm(struct vm_t *vm); int vcpu_event_pending(struct vcpu_t *vcpu); -void vcpu_set_panic(struct vcpu_t *vcpu); #ifdef __cplusplus } diff --git a/core/include/vcpu.h b/core/include/vcpu.h index 2a842b8f..a873942b 100644 --- a/core/include/vcpu.h +++ b/core/include/vcpu.h @@ -290,8 +290,6 @@ static inline bool valid_vcpu_id(int vcpu_id) } bool vcpu_is_panic(struct vcpu_t *vcpu); -#ifndef hax_panic_vcpu -void hax_panic_vcpu(struct vcpu_t *v, char *fmt, ...); -#endif +void vcpu_set_panic(struct vcpu_t *vcpu); #endif // HAX_CORE_VCPU_H_ diff --git a/core/intr_exc.c b/core/intr_exc.c index a233b8c0..80bd18a0 100644 --- a/core/intr_exc.c +++ b/core/intr_exc.c @@ -68,7 +68,7 @@ void hax_set_pending_intr(struct vcpu_t *vcpu, uint8_t vector) uint8_t nr_word = vector / 32; if (intr_pending[nr_word] & (1 << offset)) { - hax_debug("vector :%d is already pending.", vector); + hax_log(HAX_LOGD, "vector :%d is already pending.", vector); return; } intr_pending[nr_word] |= 1 << offset; @@ -144,12 +144,12 @@ void hax_handle_idt_vectoring(struct vcpu_t *vcpu) /* One ext interrupt is pending ? Re-inject it ? */ vector = (uint8_t) (idt_vec & 0xff); hax_set_pending_intr(vcpu, vector); - hax_debug("extern interrupt is vectoring....vector:%d\n", vector); + hax_log(HAX_LOGD, "extern interrupt is vectoring....vector:%d\n", + vector); } else { - hax_debug("VM Exit @ IDT vectoring, type:%d, vector:%d," - " error code:%llx\n", - (idt_vec & 0x700) >> 8, idt_vec & 0xff, - vmread(vcpu, VM_EXIT_INFO_IDT_VECTORING_ERROR_CODE)); + hax_log(HAX_LOGD, "VM Exit @ IDT vectoring, type:%d, vector:%d, " + "error code:%llx\n", (idt_vec & 0x700) >> 8, idt_vec & 0xff, + vmread(vcpu, VM_EXIT_INFO_IDT_VECTORING_ERROR_CODE)); } } } @@ -202,7 +202,7 @@ void hax_inject_exception(struct vcpu_t *vcpu, uint8_t vector, uint32_t error_co uint32_t exit_instr_length = vmx(vcpu, exit_instr_length); if (vcpu->event_injected == 1) - hax_debug("Event is injected already!!:\n"); + hax_log(HAX_LOGD, "Event is injected already!!:\n"); if (vect_info & INTR_INFO_VALID_MASK) { first_vec = (uint8_t) (vect_info & INTR_INFO_VECTOR_MASK); @@ -237,7 +237,7 @@ void hax_inject_exception(struct vcpu_t *vcpu, uint8_t vector, uint32_t error_co vmwrite(vcpu, VMX_ENTRY_INTERRUPT_INFO, intr_info); } - hax_debug("Guest is injecting exception info:%x\n", intr_info); + hax_log(HAX_LOGD, "Guest is injecting exception info:%x\n", intr_info); vcpu->event_injected = 1; } diff --git a/core/memory.c b/core/memory.c index 8f434dca..18cf8cae 100644 --- a/core/memory.c +++ b/core/memory.c @@ -44,11 +44,11 @@ static int handle_alloc_ram(struct vm_t *vm, uint64_t start_uva, uint64_t size) hax_ramblock *block; if (!start_uva) { - hax_error("%s: start_uva == 0\n", __func__); + hax_log(HAX_LOGE, "%s: start_uva == 0\n", __func__); return -EINVAL; } if (!size) { - hax_error("%s: size == 0\n", __func__); + hax_log(HAX_LOGE, "%s: size == 0\n", __func__); return -EINVAL; } @@ -56,8 +56,8 @@ static int handle_alloc_ram(struct vm_t *vm, uint64_t start_uva, uint64_t size) ret = ramblock_add(&vm->gpa_space.ramblock_list, start_uva, size, NULL, &block); if (ret) { - hax_error("%s: ramblock_add() failed: ret=%d, start_uva=0x%llx," - " size=0x%llx\n", __func__, ret, start_uva, size); + hax_log(HAX_LOGE, "%s: ramblock_add() failed: ret=%d, start_uva=0x%llx," + " size=0x%llx\n", __func__, ret, start_uva, size); return ret; } return 0; @@ -77,32 +77,32 @@ int hax_vm_add_ramblock(struct vm_t *vm, uint64_t start_uva, uint64_t size) /* A valid size is needed */ if (0 == size) { - hax_error("hax_vm_alloc_ram: the size is 0, invalid!\n"); + hax_log(HAX_LOGE, "hax_vm_alloc_ram: the size is 0, invalid!\n"); return -EINVAL; } if (!gva || gva & 0xfff) { - hax_error("Invalid gva %llx for allocating memory.\n", gva); + hax_log(HAX_LOGE, "Invalid gva %llx for allocating memory.\n", gva); return -EINVAL; } - hax_info("hax_vm_alloc_ram: size 0x%x\n", size); + hax_log(HAX_LOGI, "hax_vm_alloc_ram: size 0x%x\n", size); if (!hax_test_bit(VM_STATE_FLAGS_MEM_ALLOC, &vm->flags)) { - hax_info("!VM_STATE_FLAGS_MEM_ALLOC\n"); + hax_log(HAX_LOGI, "!VM_STATE_FLAGS_MEM_ALLOC\n"); hax_mutex_lock(hax->hax_lock); if (hax->mem_limit && (size > hax->mem_quota)) { - hax_error("HAX is out of memory quota.\n"); + hax_log(HAX_LOGE, "HAX is out of memory quota.\n"); hax_mutex_unlock(hax->hax_lock); return -EINVAL; } hax_mutex_unlock(hax->hax_lock); - hax_info("Memory allocation, va:%llx, size:%x\n", *va, size); + hax_log(HAX_LOGI, "Memory allocation, va:%llx, size:%x\n", *va, size); } else { - hax_info("spare alloc: mem_limit 0x%llx, size 0x%x, spare_ram 0x%llx\n", - hax->mem_limit, size, vm->spare_ramsize); + hax_log(HAX_LOGI, "spare alloc: mem_limit 0x%llx, size 0x%x, " + "spare_ram 0x%llx\n", hax->mem_limit, size, vm->spare_ramsize); if (hax->mem_limit && (size > vm->spare_ramsize)) { - hax_error("HAX is out of memory quota, because application" - " requests another %x bytes\n", size); + hax_log(HAX_LOGE, "HAX is out of memory quota, because application" + " requests another %x bytes\n", size); return -EINVAL; } } @@ -123,14 +123,15 @@ int hax_vm_add_ramblock(struct vm_t *vm, uint64_t start_uva, uint64_t size) leftsize = size; while (leftsize > 0) { cursize = leftsize > HAX_RAM_ENTRY_SIZE ? HAX_RAM_ENTRY_SIZE : leftsize; - hax_debug("Memory allocation, gva:%llx, cur_size:%x\n", gva, cursize); + hax_log(HAX_LOGD, "Memory allocation, gva:%llx, cur_size:%x\n", + gva, cursize); ret = hax_setup_vcpumem(curmem, gva, cursize, HAX_VCPUMEM_VALIDVA); if (ret < 0) goto fail; - hax_debug("Alloc ram %x kva is %p uva %llx\n", cursize, curmem->kva, - curmem->uva); + hax_log(HAX_LOGD, "Alloc ram %x kva is %p uva %llx\n", + cursize, curmem->kva, curmem->uva); leftsize -= cursize; curmem++; @@ -152,16 +153,16 @@ int hax_vm_add_ramblock(struct vm_t *vm, uint64_t start_uva, uint64_t size) hax_mutex_unlock(hax->hax_lock); hax_test_and_set_bit(VM_STATE_FLAGS_MEM_ALLOC, &vm->flags); vm->spare_ramsize = VM_SPARE_RAMSIZE; - hax_info("!VM_STATE_FLAGS_MEM_ALLOC: spare_ram 0x%llx\n", - vm->spare_ramsize); + hax_log(HAX_LOGI, "!VM_STATE_FLAGS_MEM_ALLOC: spare_ram 0x%llx\n", + vm->spare_ramsize); } else { if (hax->mem_limit) { vm->spare_ramsize -= size; - hax_info("VM_STATE_FLAGS_MEM_ALLOC: spare_ram 0x%llx\n", - vm->spare_ramsize); + hax_log(HAX_LOGI, "VM_STATE_FLAGS_MEM_ALLOC: spare_ram 0x%llx\n", + vm->spare_ramsize); } } - hax_debug("Memory allocationg done!\n"); + hax_log(HAX_LOGD, "Memory allocationg done!\n"); return 0; fail: @@ -244,16 +245,16 @@ static int handle_set_ram(struct vm_t *vm, uint64_t start_gpa, uint64_t size, // HAX_RAM_INFO_INVALID indicates that guest physical address range // [start_gpa, start_gpa + size) should be unmapped if (unmap && (flags != HAX_MEMSLOT_INVALID || start_uva)) { - hax_error("%s: Invalid start_uva=0x%llx or flags=0x%x for unmapping\n", - __func__, start_uva, flags); + hax_log(HAX_LOGE, "%s: Invalid start_uva=0x%llx or flags=0x%x for " + "unmapping\n", __func__, start_uva, flags); return -EINVAL; } if (!unmap && !start_uva) { - hax_error("%s: Cannot map to an invalid UVA\n", __func__); + hax_log(HAX_LOGE, "%s: Cannot map to an invalid UVA\n", __func__); return -EINVAL; } if (!size) { - hax_error("%s: size == 0\n", __func__); + hax_log(HAX_LOGE, "%s: size == 0\n", __func__); return -EINVAL; } @@ -264,17 +265,17 @@ static int handle_set_ram(struct vm_t *vm, uint64_t start_gpa, uint64_t size, ret = gpa_space_adjust_prot_bitmap(gpa_space, start_gfn + npages); if (ret) { - hax_error("%s: Failed to resize prot bitmap: ret=%d, start_gfn=0x%llx," - " npages=0x%llx, start_uva=0x%llx, flags=0x%x\n", __func__, - ret, start_gfn, npages, start_uva, flags); + hax_log(HAX_LOGE, "%s: Failed to resize prot bitmap: ret=%d, start_gfn=" + "0x%llx, npages=0x%llx, start_uva=0x%llx, flags=0x%x\n", + __func__, ret, start_gfn, npages, start_uva, flags); return ret; } ret = memslot_set_mapping(gpa_space, start_gfn, npages, start_uva, flags); if (ret) { - hax_error("%s: memslot_set_mapping() failed: ret=%d, start_gfn=0x%llx," - " npages=0x%llx, start_uva=0x%llx, flags=0x%x\n", __func__, - ret, start_gfn, npages, start_uva, flags); + hax_log(HAX_LOGE, "%s: memslot_set_mapping() failed: ret=%d, start_gfn=" + "0x%llx, npages=0x%llx, start_uva=0x%llx, flags=0x%x\n", + __func__, ret, start_gfn, npages, start_uva, flags); return ret; } memslot_dump_list(gpa_space); @@ -282,7 +283,8 @@ static int handle_set_ram(struct vm_t *vm, uint64_t start_gpa, uint64_t size, ept_tree = &vm->ept_tree; if (!hax_test_and_clear_bit(0, (uint64_t *)&ept_tree->invept_pending)) { // INVEPT pending flag was set - hax_info("%s: Invoking INVEPT for VM #%d\n", __func__, vm->vm_id); + hax_log(HAX_LOGI, "%s: Invoking INVEPT for VM #%d\n", + __func__, vm->vm_id); invept(vm, EPT_INVEPT_SINGLE_CONTEXT); } return 0; @@ -308,8 +310,8 @@ int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info) // HAX_RAM_INFO_INVALID indicates that guest physical address range // [pa_start, pa_start + size) should be unmapped if (is_unmap && (info->flags != HAX_RAM_INFO_INVALID || info->va)) { - hax_error("HAX_VM_IOCTL_SET_RAM called with invalid parameter(s):" - " flags=0x%x, va=0x%llx\n", info->flags, info->va); + hax_log(HAX_LOGE, "HAX_VM_IOCTL_SET_RAM called with invalid " + "parameter(s): flags=0x%x, va=0x%llx\n", info->flags, info->va); return -EINVAL; } @@ -324,13 +326,13 @@ int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info) } else { struct hax_vcpu_mem *pmem = get_pmem_range(vm, cur_va); if (!pmem) { - hax_error("Can't find pmem for va %llx", cur_va); + hax_log(HAX_LOGE, "Can't find pmem for va %llx", cur_va); return -ENOMEM; } hpfn = get_hpfn_from_pmem(pmem, cur_va); if (hpfn <= 0) { - hax_error("Can't get host address for va %llx", cur_va); + hax_log(HAX_LOGE, "Can't get host address for va %llx", cur_va); /* * Shall we revert the already setup one? Assume not since the * QEMU should exit on such situation, although it does not. @@ -358,8 +360,8 @@ int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info) } if (!ept_set_pte(vm, gpfn << HAX_PAGE_SHIFT, hpfn << HAX_PAGE_SHIFT, emt, perm, &epte_modified)) { - hax_error("ept_set_pte() failed at gpfn 0x%llx hpfn 0x%llx\n", gpfn, - hpfn); + hax_log(HAX_LOGE, "ept_set_pte() failed at gpfn 0x%llx " + "hpfn 0x%llx\n", gpfn, hpfn); return -ENOMEM; } ept_modified = ept_modified || epte_modified; @@ -369,8 +371,9 @@ int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info) } if (ept_modified) { /* Invalidate EPT cache (see IASDM Vol. 3C 28.3.3.4) */ - hax_info("Calling INVEPT after EPT update (pa_start=0x%llx, size=0x%x," - " flags=0x%x)\n", info->pa_start, info->size, info->flags); + hax_log(HAX_LOGI, "Calling INVEPT after EPT update (pa_start=0x%llx, " + "size=0x%x, flags=0x%x)\n", info->pa_start, info->size, + info->flags); invept(vm, EPT_INVEPT_SINGLE_CONTEXT); } return 0; @@ -400,7 +403,7 @@ int hax_vcpu_setup_hax_tunnel(struct vcpu_t *cv, struct hax_tunnel_info *info) // The tunnel and iobuf are always set together. if (cv->tunnel && cv->iobuf_vcpumem) { - hax_info("setup hax tunnel request for already setup one\n"); + hax_log(HAX_LOGI, "setup hax tunnel request for already setup one\n"); info->size = HAX_PAGE_SIZE; info->va = cv->tunnel_vcpumem->uva; info->io_va = cv->iobuf_vcpumem->uva; diff --git a/core/memslot.c b/core/memslot.c index 89e57191..39e3e527 100644 --- a/core/memslot.c +++ b/core/memslot.c @@ -146,18 +146,18 @@ void memslot_dump_list(hax_gpa_space *gpa_space) hax_memslot *memslot = NULL; int i = 0; - hax_info("memslot dump begins:\n"); + hax_log(HAX_LOGI, "memslot dump begins:\n"); hax_list_entry_for_each(memslot, &gpa_space->memslot_list, hax_memslot, entry) { - hax_info("memslot [%d]: base_gfn = 0x%016llx, npages = 0x%llx, " - "uva = 0x%016llx, flags = 0x%02x " - "(block_base_uva = 0x%016llx, offset_within_block = 0x%llx)\n", - i++, memslot->base_gfn, memslot->npages, - memslot->block->base_uva + memslot->offset_within_block, - memslot->flags, memslot->block->base_uva, - memslot->offset_within_block); + hax_log(HAX_LOGI, "memslot [%d]: base_gfn = 0x%016llx, " + "npages = 0x%llx, uva = 0x%016llx, flags = 0x%02x " + "(block_base_uva = 0x%016llx, offset_within_block = 0x%llx)\n", + i++, memslot->base_gfn, memslot->npages, + memslot->block->base_uva + memslot->offset_within_block, + memslot->flags, memslot->block->base_uva, + memslot->offset_within_block); } - hax_info("memslot dump ends!\n"); + hax_log(HAX_LOGI, "memslot dump ends!\n"); } int memslot_set_mapping(hax_gpa_space *gpa_space, uint64_t start_gfn, @@ -171,8 +171,8 @@ int memslot_set_mapping(hax_gpa_space *gpa_space, uint64_t start_gfn, bool is_valid = false, is_found = false; uint8_t route = 0, state = 0; - hax_info("%s: start_gfn=0x%llx, npages=0x%llx, uva=0x%llx, flags=0x%x\n", - __func__, start_gfn, npages, uva, flags); + hax_log(HAX_LOGI, "%s: start_gfn=0x%llx, npages=0x%llx, uva=0x%llx, " + "flags=0x%x\n", __func__, start_gfn, npages, uva, flags); if ((gpa_space == NULL) || (npages == 0)) return -EINVAL; @@ -184,9 +184,9 @@ int memslot_set_mapping(hax_gpa_space *gpa_space, uint64_t start_gfn, ret = ramblock_add(&gpa_space->ramblock_list, uva, npages << PG_ORDER_4K, NULL, &block); if (ret != 0 || block == NULL) { - hax_error("%s: Failed to create standalone RAM block:" - "start_gfn=0x%llx, npages=0x%llx, uva=0x%llx\n", - __func__, start_gfn, npages, uva); + hax_log(HAX_LOGE, "%s: Failed to create standalone RAM block:" + "start_gfn=0x%llx, npages=0x%llx, uva=0x%llx\n", + __func__, start_gfn, npages, uva); return ret < 0 ? ret : -EINVAL; } @@ -197,8 +197,8 @@ int memslot_set_mapping(hax_gpa_space *gpa_space, uint64_t start_gfn, } else { block = ramblock_find(&gpa_space->ramblock_list, uva, NULL); if (block == NULL) { - hax_error("%s: Failed to find uva=0x%llx in RAM block\n", - __func__, uva); + hax_log(HAX_LOGE, "%s: Failed to find uva=0x%llx in " + "RAM block\n", __func__, uva); return -EINVAL; } } @@ -345,7 +345,7 @@ static inline hax_memslot * memslot_dup(hax_memslot *slot) new_slot = (hax_memslot *)hax_vmalloc(sizeof(hax_memslot), HAX_MEM_NONPAGE); if (new_slot == NULL) { - hax_error("%s: Failed to allocate memslot\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate memslot\n", __func__); return NULL; } @@ -414,7 +414,7 @@ static inline hax_memslot * memslot_append_rest(hax_memslot *dest, rest = (hax_memslot *)hax_vmalloc(sizeof(hax_memslot), HAX_MEM_NONPAGE); if (rest == NULL) { - hax_error("%s: Failed to allocate memslot\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate memslot\n", __func__); return NULL; } diff --git a/core/ramblock.c b/core/ramblock.c index 571b6a4f..5575e2cc 100644 --- a/core/ramblock.c +++ b/core/ramblock.c @@ -31,8 +31,6 @@ #include "../include/hax.h" #include "include/memory.h" -#define ramblock_info hax_info - static inline uint64_t ramblock_count_chunks(hax_ramblock *block) { // Assuming block != NULL && block->size != 0 @@ -63,8 +61,8 @@ static hax_ramblock * ramblock_alloc(uint64_t base_uva, uint64_t size) block = (hax_ramblock *) hax_vmalloc(sizeof(*block), 0); if (!block) { - hax_error("%s: Failed to allocate ramblock for UVA range:" - " base_uva=0x%llx, size=0x%llx\n", __func__, base_uva, size); + hax_log(HAX_LOGE, "%s: Failed to allocate ramblock for UVA range:" + " base_uva=0x%llx, size=0x%llx\n", __func__, base_uva, size); return NULL; } @@ -73,8 +71,8 @@ static hax_ramblock * ramblock_alloc(uint64_t base_uva, uint64_t size) nchunks = ramblock_count_chunks(block); chunks = (hax_chunk **) hax_vmalloc(nchunks * sizeof(*chunks), 0); if (!chunks) { - hax_error("%s: Failed to allocate chunks array: nchunks=0x%llx," - " size=0x%llx\n", __func__, nchunks, size); + hax_log(HAX_LOGE, "%s: Failed to allocate chunks array: nchunks=0x%llx," + " size=0x%llx\n", __func__, nchunks, size); hax_vfree(block, sizeof(*block)); return NULL; } @@ -84,9 +82,9 @@ static hax_ramblock * ramblock_alloc(uint64_t base_uva, uint64_t size) chunks_bitmap_size = ramblock_count_bitmap_size(nchunks); chunks_bitmap = (uint8_t *) hax_vmalloc(chunks_bitmap_size, 0); if (!chunks_bitmap) { - hax_error("%s: Failed to allocate chunks bitmap: nchunks=0x%llx," - " chunks_bitmap_size=0x%llx, size=0x%llx\n", __func__, - nchunks, chunks_bitmap_size, size); + hax_log(HAX_LOGE, "%s: Failed to allocate chunks bitmap: " + "nchunks=0x%llx, chunks_bitmap_size=0x%llx, size=0x%llx\n", + __func__, nchunks, chunks_bitmap_size, size); hax_vfree(chunks, nchunks * sizeof(*chunks)); hax_vfree(block, sizeof(*block)); return NULL; @@ -110,9 +108,11 @@ static void ramblock_free_chunks(hax_ramblock *block, bool destroy) chunks = block->chunks; nchunks = ramblock_count_chunks(block); chunks_bitmap_size = ramblock_count_bitmap_size(nchunks); - hax_info("%s: Freeing <= %llu chunks, bitmap:\n", __func__, nchunks); + hax_log(HAX_LOGI, "%s: Freeing <= %llu chunks, bitmap:\n", + __func__, nchunks); for (i = 0; i < chunks_bitmap_size; i++) { - hax_info("%s: [%llu]=0x%02x\n", __func__, i, block->chunks_bitmap[i]); + hax_log(HAX_LOGI, "%s: [%llu]=0x%02x\n", __func__, i, + block->chunks_bitmap[i]); } for (i = 0; i < nchunks; i++) { hax_chunk *chunk = chunks[i]; @@ -133,29 +133,30 @@ static void ramblock_free_chunks(hax_ramblock *block, bool destroy) // reverse the order to avoid that. if (hax_test_and_clear_bit((int) i, (uint64_t *) block->chunks_bitmap)) { // Bit i of chunks_bitmap was already clear - hax_warning("%s: chunks[%llu] existed but its bit in chunks_bitmap" - " was not set: size=0x%llx, block.size=0x%llx\n", - __func__, i, chunk->size, block->size); + hax_log(HAX_LOGW, "%s: chunks[%llu] existed but its bit in " + "chunks_bitmap was not set: size=0x%llx, " + "block.size=0x%llx\n", __func__, i, chunk->size, + block->size); } chunks[i] = NULL; ret = chunk_free(chunk); if (ret) { - hax_warning("%s: Failed to free chunk: i=%llu, base_uva=0x%llx," - " size=0x%llx, ret=%d\n", __func__, i, chunk->base_uva, - chunk->size, ret); + hax_log(HAX_LOGW, "%s: Failed to free chunk: i=%llu, " + "base_uva=0x%llx, size=0x%llx, ret=%d\n", __func__, i, + chunk->base_uva, chunk->size, ret); } } // Double check that there is no bit set in chunks_bitmap for (i = 0; i < chunks_bitmap_size; i++) { if (block->chunks_bitmap[i]) { - hax_warning("%s: chunks_bitmap[%llu]=0x%02x\n", __func__, i, - block->chunks_bitmap[i]); + hax_log(HAX_LOGW, "%s: chunks_bitmap[%llu]=0x%02x\n", __func__, i, + block->chunks_bitmap[i]); block->chunks_bitmap[i] = 0; } } - hax_info("%s: All chunks freed: %lluKB total, %lluKB used\n", __func__, - block->size / 1024, nbytes_used / 1024); + hax_log(HAX_LOGI, "%s: All chunks freed: %lluKB total, %lluKB used\n", + __func__, block->size / 1024, nbytes_used / 1024); if (!destroy) return; @@ -169,7 +170,7 @@ static void ramblock_free_chunks(hax_ramblock *block, bool destroy) static void ramblock_free(hax_ramblock *block) { if (!block) { - hax_warning("%s: block == NULL\n", __func__); + hax_log(HAX_LOGW, "%s: block == NULL\n", __func__); return; } @@ -181,10 +182,10 @@ static void ramblock_free(hax_ramblock *block) int ramblock_init_list(hax_list_head *list) { if (!list) { - hax_error("ramblock_init_list: list is null \n"); + hax_log(HAX_LOGE, "ramblock_init_list: list is null \n"); return -EINVAL; } - ramblock_info("ramblock_init_list\n"); + hax_log(HAX_LOGI, "ramblock_init_list\n"); hax_init_list_head(list); return 0; @@ -193,10 +194,9 @@ int ramblock_init_list(hax_list_head *list) static void ramblock_remove(hax_ramblock *block) { hax_assert(block != NULL); - ramblock_info("%s: Removing RAM block: base_uva=0x%llx, size=0x%llx," - " is_standalone=%d, ref_count=%d\n", __func__, - block->base_uva, block->size, block->is_standalone, - block->ref_count); + hax_log(HAX_LOGI, "%s: Removing RAM block: base_uva=0x%llx, size=0x%llx," + " is_standalone=%d, ref_count=%d\n", __func__, block->base_uva, + block->size, block->is_standalone, block->ref_count); hax_list_del(&block->entry); ramblock_free(block); } @@ -206,11 +206,11 @@ void ramblock_free_list(hax_list_head *list) hax_ramblock *ramblock, *tmp; if (!list) { - hax_error("ramblock_free_list: list is null \n"); + hax_log(HAX_LOGE, "ramblock_free_list: list is null \n"); return; } - ramblock_info("ramblock_free_list\n"); + hax_log(HAX_LOGI, "ramblock_free_list\n"); hax_list_entry_for_each_safe(ramblock, tmp, list, hax_ramblock, entry) { ramblock_remove(ramblock); } @@ -221,13 +221,13 @@ void ramblock_dump_list(hax_list_head *list) hax_ramblock *ramblock; int i = 0; - ramblock_info("ramblock dump begin:\n"); + hax_log(HAX_LOGI, "ramblock dump begin:\n"); hax_list_entry_for_each(ramblock, list, hax_ramblock, entry) { - ramblock_info("block %d (%p): base_uva 0x%llx, size 0x%llx, ref_count " - "%d\n", i++, ramblock, ramblock->base_uva, - ramblock->size, ramblock->ref_count); + hax_log(HAX_LOGI, "block %d (%p): base_uva 0x%llx, size 0x%llx, " + "ref_count %d\n", i++, ramblock, ramblock->base_uva, + ramblock->size, ramblock->ref_count); } - ramblock_info("ramblock dump end!\n"); + hax_log(HAX_LOGI, "ramblock dump end!\n"); } // TODO: parameter 'start' is ignored for now @@ -241,16 +241,16 @@ hax_ramblock * ramblock_find(hax_list_head *list, uint64_t uva, break; if (uva < ramblock->base_uva + ramblock->size) { - hax_debug("%s: (%p): base_uva 0x%llx, size 0x%llx, ref_count " - "%d\n", __func__, ramblock, ramblock->base_uva, - ramblock->size, ramblock->ref_count); + hax_log(HAX_LOGD, "%s: (%p): base_uva 0x%llx, size 0x%llx, " + "ref_count %d\n", __func__, ramblock, ramblock->base_uva, + ramblock->size, ramblock->ref_count); ramblock_ref(ramblock); return ramblock; } } - hax_warning("can not find 0x%llx in ramblock list.\n", uva); + hax_log(HAX_LOGW, "can not find 0x%llx in ramblock list.\n", uva); return NULL; } @@ -261,7 +261,7 @@ int ramblock_add(hax_list_head *list, uint64_t base_uva, uint64_t size, hax_ramblock *ramblock, *ramblock2; if (!list) { - hax_error("invalid list: list head is null.\n"); + hax_log(HAX_LOGE, "invalid list: list head is null.\n"); return -EINVAL; } @@ -270,8 +270,8 @@ int ramblock_add(hax_list_head *list, uint64_t base_uva, uint64_t size, return -ENOMEM; } - ramblock_info("Adding block: base_uva 0x%llx, size 0x%llx\n", - ramblock->base_uva, ramblock->size); + hax_log(HAX_LOGI, "Adding block: base_uva 0x%llx, size 0x%llx\n", + ramblock->base_uva, ramblock->size); if (hax_list_empty(list)) { // TODO: change hax_list_add to hax_list_insert_after @@ -297,9 +297,10 @@ int ramblock_add(hax_list_head *list, uint64_t base_uva, uint64_t size, // If the program comes here, it denotes that there is overlap // between ramblock and ramblock2 ramblock_free(ramblock); - hax_error("New ramblock base_uva 0x%llx, size 0x%llx overlaps with" - " existing ramblock: base_uva 0x%llx, size 0x%llx\n", - base_uva, size, ramblock2->base_uva, ramblock2->size); + hax_log(HAX_LOGE, "New ramblock base_uva 0x%llx, size 0x%llx " + "overlaps with existing ramblock: base_uva 0x%llx, " + "size 0x%llx\n", base_uva, size, ramblock2->base_uva, + ramblock2->size); return -EINVAL; } } @@ -320,12 +321,12 @@ hax_chunk * ramblock_get_chunk(hax_ramblock *block, uint64_t uva_offset, uint64_t chunk_index; if (!block) { - hax_error("%s: block == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: block == NULL\n", __func__); return NULL; } if (uva_offset >= block->size) { - hax_warning("%s: uva_offset=0x%llx >= block->size=0x%llx\n", __func__, - uva_offset, block->size); + hax_log(HAX_LOGW, "%s: uva_offset=0x%llx >= block->size=0x%llx\n", + __func__, uva_offset, block->size); return NULL; } @@ -359,9 +360,10 @@ hax_chunk * ramblock_get_chunk(hax_ramblock *block, uint64_t uva_offset, // no such API as hax_clear_bit() was_clear = hax_test_and_clear_bit((int) chunk_index, (uint64_t *) block->chunks_bitmap); - hax_error("%s: Failed to allocate chunk: ret=%d, index=%llu," - " base_uva=0x%llx, size=0x%llx, was_clear=%d\n", __func__, - ret, chunk_index, chunk_base_uva, chunk_size, was_clear); + hax_log(HAX_LOGE, "%s: Failed to allocate chunk: ret=%d, " + "index=%llu, base_uva=0x%llx, size=0x%llx, was_clear=%d\n", + __func__, ret, chunk_index, chunk_base_uva, chunk_size, + was_clear); return NULL; } hax_assert(chunk != NULL); @@ -378,19 +380,20 @@ hax_chunk * ramblock_get_chunk(hax_ramblock *block, uint64_t uva_offset, (uint64_t *) block->chunks_bitmap)) { // The other thread has reset the bit, indicating the chunk // could not be allocated/pinned - hax_error("%s: Another thread tried to allocate this chunk" - " first, but failed: index=%llu, block.size=0x%llx," - " block.base_uva=0x%llx\n", __func__, chunk_index, - block->size, block->base_uva); + hax_log(HAX_LOGE, "%s: Another thread tried to allocate this " + "chunk first, but failed: index=%llu, " + "block.size=0x%llx, block.base_uva=0x%llx\n", + __func__, chunk_index, block->size, block->base_uva); return NULL; } if (!(++i % 100000)) { // 10^5 - hax_info("%s: In iteration %d of while loop\n", __func__, i); + hax_log(HAX_LOGI, "%s: In iteration %d of while loop\n", + __func__, i); if (i == 1000000000) { // 10^9 (< INT_MAX) - hax_error("%s: Breaking out of infinite loop: index=%llu," - " block.size=0x%llx, block.base_uva=0x%llx\n", - __func__, chunk_index, block->size, - block->base_uva); + hax_log(HAX_LOGE, "%s: Breaking out of infinite loop: " + "index=%llu, block.size=0x%llx, " + "block.base_uva=0x%llx\n", __func__, chunk_index, + block->size, block->base_uva); return NULL; } } @@ -403,31 +406,31 @@ hax_chunk * ramblock_get_chunk(hax_ramblock *block, uint64_t uva_offset, void ramblock_ref(hax_ramblock *block) { if (block == NULL) { - hax_error("%s: Invalid RAM block\n", __func__); + hax_log(HAX_LOGE, "%s: Invalid RAM block\n", __func__); return; } ++block->ref_count; - hax_debug("%s: block (%p): base_uva = 0x%llx, size = 0x%llx, ref_count = " - "%d\n", __func__, block, block->base_uva, block->size, - block->ref_count); + hax_log(HAX_LOGD, "%s: block (%p): base_uva = 0x%llx, size = 0x%llx, " + "ref_count = %d\n", __func__, block, block->base_uva, block->size, + block->ref_count); } void ramblock_deref(hax_ramblock *block) { if (block == NULL) { - hax_error("%s: Invalid RAM block\n", __func__); + hax_log(HAX_LOGE, "%s: Invalid RAM block\n", __func__); return; } if (block->ref_count <= 0) { - hax_error("%s: Unreferenced block (ref_count = %d)\n", __func__, - block->ref_count); + hax_log(HAX_LOGE, "%s: Unreferenced block (ref_count = %d)\n", + __func__, block->ref_count); return; } if (--block->ref_count == 0) { - hax_debug("%s: Reset RAM block (%p): base_uva = 0x%llx, size = 0x%llx, " - "ref_count = %d\n", __func__, block, block->base_uva, - block->size, block->ref_count); + hax_log(HAX_LOGD, "%s: Reset RAM block (%p): base_uva = 0x%llx, " + "size = 0x%llx, ref_count = %d\n", __func__, block, + block->base_uva, block->size, block->ref_count); if (block->is_standalone) { // A stand-alone mapping is created along with a "disposable" RAM // block, which must be destroyed when the mapping is unmapped, so @@ -439,7 +442,7 @@ void ramblock_deref(hax_ramblock *block) return; } - hax_debug("%s: block (%p): base_uva = 0x%llx, size = 0x%llx, ref_count = " - "%d\n", __func__, block, block->base_uva, block->size, - block->ref_count); + hax_log(HAX_LOGD, "%s: block (%p): base_uva = 0x%llx, size = 0x%llx, " + "ref_count = %d\n", __func__, block, block->base_uva, block->size, + block->ref_count); } diff --git a/core/vcpu.c b/core/vcpu.c index 56e0e83d..072bac7d 100644 --- a/core/vcpu.c +++ b/core/vcpu.c @@ -195,19 +195,20 @@ static uint64_t vcpu_read_cr(struct vcpu_state_t *state, uint32_t n) break; } default: { - hax_warning("Ignored unsupported CR%d read, returning 0\n", n); + hax_log(HAX_LOGW, "Ignored unsupported CR%d read, returning 0\n", + n); break; } } - hax_debug("vcpu_read_cr cr %x val %llx\n", n, val); + hax_log(HAX_LOGD, "vcpu_read_cr cr %x val %llx\n", n, val); return val; } static void vcpu_write_cr(struct vcpu_state_t *state, uint32_t n, uint64_t val) { - hax_debug("vcpu_write_cr cr %x val %llx\n", n, val); + hax_log(HAX_LOGD, "vcpu_write_cr cr %x val %llx\n", n, val); switch (n) { case 0: { @@ -227,7 +228,7 @@ static void vcpu_write_cr(struct vcpu_state_t *state, uint32_t n, uint64_t val) break; } default: { - hax_error("write_cr: Unsupported CR%d access\n", n); + hax_log(HAX_LOGE, "write_cr: Unsupported CR%d access\n", n); break; } } @@ -304,8 +305,9 @@ static int vcpu_vpid_alloc(struct vcpu_t *vcpu) max_bit = vpid_seed_bits > 0xff ? 0xff : vpid_seed_bits; if (0 != vcpu->vpid) { - hax_warning("vcpu_vpid_alloc: vcpu %u in vm %d already has a valid " - "VPID 0x%x.\n", vcpu->vcpu_id, vcpu->vm->vm_id, vcpu->vpid); + hax_log(HAX_LOGW, "vcpu_vpid_alloc: vcpu %u in vm %d already has a " + "valid VPID 0x%x.\n", vcpu->vcpu_id, vcpu->vm->vm_id, + vcpu->vpid); return -1; } @@ -316,8 +318,8 @@ static int vcpu_vpid_alloc(struct vcpu_t *vcpu) if (bit == max_bit) { // No available VPID resource - hax_error("vcpu_vpid_alloc: no available vpid resource. vcpu: %u, " - "vm: %d\n", vcpu->vcpu_id, vcpu->vm->vm_id); + hax_log(HAX_LOGE, "vcpu_vpid_alloc: no available vpid resource. " + "vcpu: %u, vm: %d\n", vcpu->vcpu_id, vcpu->vm->vm_id); return -2; } @@ -329,8 +331,8 @@ static int vcpu_vpid_alloc(struct vcpu_t *vcpu) * Note: vpid can't be zero. */ vcpu->vpid = (uint16_t)(vcpu->vm->vm_id << 8) + (uint16_t)(bit + 1); - hax_info("vcpu_vpid_alloc: succeed! vpid: 0x%x. vcpu_id: %u, vm_id: %d.\n", - vcpu->vpid, vcpu->vcpu_id, vcpu->vm->vm_id); + hax_log(HAX_LOGI, "vcpu_vpid_alloc: succeed! vpid: 0x%x. vcpu_id: %u, " + "vm_id: %d.\n", vcpu->vpid, vcpu->vcpu_id, vcpu->vm->vm_id); return 0; } @@ -350,18 +352,18 @@ static int vcpu_vpid_free(struct vcpu_t *vcpu) uint8_t bit = (vcpu->vpid & 0xff) - 1; if (0 == vcpu->vpid) { - hax_warning("vcpu_vpid_free: vcpu %u in vm %d does not have a valid " - "VPID.\n", vcpu->vcpu_id, vcpu->vm->vm_id); + hax_log(HAX_LOGW, "vcpu_vpid_free: vcpu %u in vm %d does not have a " + "valid VPID.\n", vcpu->vcpu_id, vcpu->vm->vm_id); return -1; } - hax_info("vcpu_vpid_free: Clearing bit: 0x%x, vpid_seed: 0x%llx. " - "vcpu_id: %u, vm_id: %d.\n", bit, *(uint64_t *)vcpu->vm->vpid_seed, - vcpu->vcpu_id, vcpu->vm->vm_id); + hax_log(HAX_LOGI, "vcpu_vpid_free: Clearing bit: 0x%x, vpid_seed: 0x%llx. " + "vcpu_id: %u, vm_id: %d.\n", bit, *(uint64_t *)vcpu->vm->vpid_seed, + vcpu->vcpu_id, vcpu->vm->vm_id); if (0 != hax_test_and_clear_bit(bit, (uint64_t *)(vcpu->vm->vpid_seed))) { - hax_warning("vcpu_vpid_free: bit for vpid 0x%x of vcpu %u in vm %d was " - "already clear.\n", vcpu->vpid, vcpu->vcpu_id, - vcpu->vm->vm_id); + hax_log(HAX_LOGW, "vcpu_vpid_free: bit for vpid 0x%x of vcpu %u in vm " + "%d was already clear.\n", vcpu->vpid, vcpu->vcpu_id, + vcpu->vm->vm_id); } vcpu->vpid = 0; @@ -395,7 +397,7 @@ struct vcpu_t *vcpu_create(struct vm_t *vm, void *vm_host, int vcpu_id) struct hax_tunnel_info info; struct vcpu_t *vcpu; - hax_debug("vcpu_create vcpu_id %x\n", vcpu_id); + hax_log(HAX_LOGD, "vcpu_create vcpu_id %x\n", vcpu_id); if (!valid_vcpu_id(vcpu_id)) return NULL; @@ -409,7 +411,7 @@ struct vcpu_t *vcpu_create(struct vm_t *vm, void *vm_host, int vcpu_id) memset(vcpu, 0, sizeof(struct vcpu_t)); if (hax_vcpu_setup_hax_tunnel(vcpu, &info) < 0) { - hax_error("cannot setup hax_tunnel for vcpu.\n"); + hax_log(HAX_LOGE, "cannot setup hax_tunnel for vcpu.\n"); goto fail_1; } @@ -466,7 +468,7 @@ struct vcpu_t *vcpu_create(struct vm_t *vm, void *vm_host, int vcpu_id) // Initialize emulator vcpu_init_emulator(vcpu); - hax_debug("vcpu %d is created.\n", vcpu->vcpu_id); + hax_log(HAX_LOGD, "vcpu %d is created.\n", vcpu->vcpu_id); return vcpu; fail_7: vcpu_vtlb_free(vcpu); @@ -485,7 +487,7 @@ struct vcpu_t *vcpu_create(struct vm_t *vm, void *vm_host, int vcpu_id) fail_1: hax_vfree(vcpu, sizeof(struct vcpu_t)); fail_0: - hax_error("Cannot allocate memory to create vcpu.\n"); + hax_log(HAX_LOGE, "Cannot allocate memory to create vcpu.\n"); return NULL; } @@ -526,7 +528,7 @@ static int _vcpu_teardown(struct vcpu_t *vcpu) hax_mutex_free(vcpu->tmutex); hax_vfree(vcpu, sizeof(struct vcpu_t)); - hax_info("vcpu %d is teardown.\n", vcpu_id); + hax_log(HAX_LOGI, "vcpu %d is teardown.\n", vcpu_id); return 0; } @@ -641,7 +643,7 @@ static void dump_segment(uint32_t seg) gdtr_base = get_kernel_gdtr_base(); sd = (struct seg_desc_t *)gdtr_base + (seg >> 3); - hax_debug("seg %x value %llx\n", seg, sd->_raw); + hax_log(HAX_LOGD, "seg %x value %llx\n", seg, sd->_raw); } static int check_cs(uint32_t seg) @@ -776,7 +778,7 @@ static int check_fgs_seg(uint32_t seg, uint fs) mword base; if (seg == 0) { - hax_debug("fgs_seg seg is %x fs %x\n", seg, fs); + hax_log(HAX_LOGD, "fgs_seg seg is %x fs %x\n", seg, fs); return 0; } gdtr_base = get_kernel_gdtr_base(); @@ -790,8 +792,8 @@ static int check_fgs_seg(uint32_t seg, uint fs) if ((base & 0xffffff) != sd->_base0 || ((base >> 24) & 0xff) != sd->_base1) { - // hax_debug("%s base address mismatch base %llx sd %llx\n", - // fs ? "fs" : "gs", base, sd->_raw); + // hax_log(HAX_LOGD, "%s base address mismatch base %llx sd %llx\n", + // fs ? "fs" : "gs", base, sd->_raw); // return 1; return 0; } @@ -860,33 +862,33 @@ int vcpu_get_host_state(struct vcpu_t *vcpu, int pre) if (pre) { if (check_cs(hsc->cs)) { - hax_debug("CS does not pass the check.\n"); + hax_log(HAX_LOGD, "CS does not pass the check.\n"); dump_segment(hsc->cs); // check_panic(); } if (check_stack_seg(hsc->ss)) { - hax_debug("SS does not pass the check.\n"); + hax_log(HAX_LOGD, "SS does not pass the check.\n"); dump_segment(hsc->ss); // check_panic(); } if (check_fgs_seg(hsc->fs, 1)) { - hax_debug("FS does not pass the check.\n"); + hax_log(HAX_LOGD, "FS does not pass the check.\n"); dump_segment(hsc->fs); // check_panic(); } if (check_fgs_seg(hsc->gs, 0)) { - hax_debug("GS does not pass the check.\n"); + hax_log(HAX_LOGD, "GS does not pass the check.\n"); dump_segment(hsc->gs); // check_panic(); } if (check_data_seg(hsc->ds) || check_data_seg(hsc->es)) { - hax_debug("DS or ES does not pass the check.\n"); + hax_log(HAX_LOGD, "DS or ES does not pass the check.\n"); dump_segment(hsc->ds); dump_segment(hsc->es); // check_panic(); } if (check_tr_seg(hsc->tr)) { - hax_debug("TR does not pass the check.\n"); + hax_log(HAX_LOGD, "TR does not pass the check.\n"); dump_segment(hsc->tr); // check_panic(); } @@ -906,7 +908,7 @@ int compare_host_state(struct vcpu_t *vcpu) pre = &vcpu->hsc_pre; post = &vcpu->hsc_post; if (memcmp(pre, post, sizeof(struct host_state_compare))) { - hax_debug("The previous and next is not same.\n"); + hax_log(HAX_LOGD, "The previous and next is not same.\n"); dump_hsc_state(pre); dump_hsc_state(post); check_panic(); @@ -1202,7 +1204,7 @@ void vcpu_save_host_state(struct vcpu_t *vcpu) vmwrite(vcpu, HOST_SS_SELECTOR, get_kernel_ss() & 0xfff8); if (get_kernel_ss() & 0x7) { - hax_debug("Kernel SS %x with 0x7\n", get_kernel_ss()); + hax_log(HAX_LOGD, "Kernel SS %x with 0x7\n", get_kernel_ss()); } vmwrite(vcpu, HOST_DS_SELECTOR, get_kernel_ds() & 0xfff8); @@ -1246,7 +1248,7 @@ void vcpu_save_host_state(struct vcpu_t *vcpu) } if (fs & 0x7) { - hax_debug("fs %x\n", fs); + hax_log(HAX_LOGD, "fs %x\n", fs); hstate->fs = fs; hstate->fs_base = ia32_rdmsr(IA32_FS_BASE); hstate->seg_valid |= HOST_SEG_VALID_FS; @@ -1370,8 +1372,9 @@ static void fill_common_vmcs(struct vcpu_t *vcpu) entry_ctls = ENTRY_CONTROL_LOAD_DEBUG_CONTROLS; if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "load_vmcs failed while vcpu_prepare: %x", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "load_vmcs failed while vcpu_prepare: %x", + vmcs_err); hax_panic_log(vcpu); return; } @@ -1465,16 +1468,17 @@ static void fill_common_vmcs(struct vcpu_t *vcpu) load_vmcs_common(vcpu); if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "put_vmcs() failed in vcpu_prepare, %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs() failed in vcpu_prepare, %x\n", + vmcs_err); hax_panic_log(vcpu); } } static void vcpu_prepare(struct vcpu_t *vcpu) { - hax_debug("vcpu_prepare current %x, CPU %x\n", vcpu->vcpu_id, - hax_cpuid()); + hax_log(HAX_LOGD, "vcpu_prepare current %x, CPU %x\n", vcpu->vcpu_id, + hax_cpuid()); hax_mutex_lock(vcpu->tmutex); fill_common_vmcs(vcpu); hax_mutex_unlock(vcpu->tmutex); @@ -1572,7 +1576,7 @@ static int read_low_bits(uint64_t *pdst, uint64_t src, uint8_t size) } default: { // Should never happen - hax_error("read_low_bits: Invalid size %u\n", size); + hax_log(HAX_LOGE, "read_low_bits: Invalid size %u\n", size); return -1; } } @@ -1607,7 +1611,7 @@ static int write_low_bits(uint64_t *pdst, uint64_t src, uint8_t size) } default: { // Should never happen - hax_error("write_low_bits: Invalid size %u\n", size); + hax_log(HAX_LOGE, "write_low_bits: Invalid size %u\n", size); return -1; } } @@ -1626,7 +1630,8 @@ static void handle_io_post(struct vcpu_t *vcpu, struct hax_tunnel *htun) size = htun->io._count * htun->io._size; if (!vcpu_write_guest_virtual(vcpu, htun->io._vaddr, IOS_MAX_BUFFER, (void *)vcpu->io_buf, size, 0)) { - hax_panic_vcpu(vcpu, "Unexpected page fault, kill the VM!\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Unexpected page fault, kill the VM!\n"); dump_vmcs(vcpu); } } else { @@ -1658,15 +1663,15 @@ int vcpu_execute(struct vcpu_t *vcpu) int err = 0; hax_mutex_lock(vcpu->tmutex); - hax_debug("vcpu begin to run....\n"); + hax_log(HAX_LOGD, "vcpu begin to run....\n"); // QEMU will do realmode stuff for us if (!hax->ug_enable_flag && !(vcpu->state->_cr0 & CR0_PE)) { htun->_exit_reason = 0; htun->_exit_status = HAX_EXIT_REALMODE; - hax_debug("Guest is in realmode.\n"); + hax_log(HAX_LOGD, "Guest is in realmode.\n"); goto out; } - hax_debug("vcpu begin to run....in PE\n"); + hax_log(HAX_LOGD, "vcpu begin to run....in PE\n"); if (htun->_exit_status == HAX_EXIT_IO) { handle_io_post(vcpu, htun); @@ -1675,8 +1680,9 @@ int vcpu_execute(struct vcpu_t *vcpu) if (!em_ctxt->finished) { rc = em_emulate_insn(em_ctxt); if (rc < 0) { - hax_panic_vcpu(vcpu, "%s: em_emulate_insn() failed: vcpu_id=%u\n", - __func__, vcpu->vcpu_id); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s: em_emulate_insn() failed: vcpu_id=%u\n", + __func__, vcpu->vcpu_id); err = HAX_RESUME; goto out; } @@ -1724,7 +1730,7 @@ int vtlb_active(struct vcpu_t *vcpu) if (hax->ug_enable_flag) return 0; - hax_debug("vtlb active: cr0, %llx\n", state->_cr0); + hax_log(HAX_LOGD, "vtlb active: cr0, %llx\n", state->_cr0); if ((state->_cr0 & CR0_PG) == 0) return 1; @@ -1763,8 +1769,9 @@ void vcpu_vmread_all(struct vcpu_t *vcpu) // CRs were already updated // TODO: Always read RIP, RFLAGs, maybe reduce them in future! if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "load_vmcs failed while vcpu_vmread_all: %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "load_vmcs failed while " + "vcpu_vmread_all: %x\n", vmcs_err); hax_panic_log(vcpu); return; } @@ -1791,8 +1798,9 @@ void vcpu_vmread_all(struct vcpu_t *vcpu) vmx(vcpu, interruptibility_state).raw = vmread( vcpu, GUEST_INTERRUPTIBILITY); if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "put_vmcs failed while vcpu_vmread_all: %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs failed while vcpu_vmread_all: %x\n", + vmcs_err); hax_panic_log(vcpu); } @@ -1856,8 +1864,8 @@ static int vcpu_prepare_pae_pdpt(struct vcpu_t *vcpu) (uint8_t *)vcpu->pae_pdptes); // The PAE PDPT cannot span two page frames if (ret != pdpt_size) { - hax_error("%s: Failed to read PAE PDPT: cr3=0x%llx, ret=%d\n", __func__, - cr3, ret); + hax_log(HAX_LOGE, "%s: Failed to read PAE PDPT: cr3=0x%llx, ret=%d\n", + __func__, cr3, ret); return ret < 0 ? ret : -EIO; } vcpu->pae_pdpt_dirty = 1; @@ -1871,8 +1879,8 @@ static int vcpu_prepare_pae_pdpt(struct vcpu_t *vcpu) buf = hax_map_gpfn(vcpu->vm, gpfn, false, cr3 & 0xfffff000, 1); #endif // HAX_ARCH_X86_64 if (!buf) { - hax_error("%s: Failed to map guest page frame containing PAE PDPT:" - " cr3=0x%llx\n", __func__, cr3); + hax_log(HAX_LOGE, "%s: Failed to map guest page frame containing PAE " + "PDPT: cr3=0x%llx\n", __func__, cr3); return -ENOMEM; } pdpt = buf + (cr3 & 0xfe0); @@ -1939,7 +1947,7 @@ static void vmwrite_cr(struct vcpu_t *vcpu) } if (vtlb_active(vcpu)) { - hax_debug("vTLB mode, cr0 %llx\n", vcpu->state->_cr0); + hax_log(HAX_LOGD, "vTLB mode, cr0 %llx\n", vcpu->state->_cr0); vcpu->mmu->mmu_mode = MMU_MODE_VTLB; exc_bitmap |= 1u << VECTOR_PF; cr0 |= CR0_WP; @@ -1965,8 +1973,8 @@ static void vmwrite_cr(struct vcpu_t *vcpu) cr4_mask |= CR4_PAE; eptp = vm_get_eptp(vcpu->vm); hax_assert(eptp != INVALID_EPTP); - // hax_debug("Guest eip:%llx, EPT mode, eptp:%llx\n", vcpu->state->_rip, - // eptp); + // hax_log(HAX_LOGD, "Guest eip:%llx, EPT mode, eptp:%llx\n", + // vcpu->state->_rip, eptp); vmwrite(vcpu, GUEST_CR3, state->_cr3); scpu_ctls |= ENABLE_EPT; if (vcpu->pae_pdpt_dirty) { @@ -1991,7 +1999,7 @@ static void vmwrite_cr(struct vcpu_t *vcpu) vmwrite(vcpu, GUEST_CR0, cr0); vmwrite(vcpu, VMX_CR0_MASK, cr0_mask); - hax_debug("vmwrite_cr cr0 %llx, cr0_mask %llx\n", cr0, cr0_mask); + hax_log(HAX_LOGD, "vmwrite_cr cr0 %llx, cr0_mask %llx\n", cr0, cr0_mask); cr0_shadow = (state->_cr0 & ~vmx(vcpu, cr0_mask)) | (vmx(vcpu, cr0_shadow) & vmx(vcpu, cr0_mask)); vmwrite(vcpu, VMX_CR0_READ_SHADOW, cr0_shadow); @@ -2136,17 +2144,19 @@ static int vcpu_emulate_insn(struct vcpu_t *vcpu) va = (mode == EM_MODE_PROT64) ? rip : cs_base + rip; #ifdef CONFIG_HAX_EPT2 if (mmio_fetch_instruction(vcpu, va, instr, INSTR_MAX_LEN)) { - hax_panic_vcpu(vcpu, "%s: mmio_fetch_instruction() failed: vcpu_id=%u," - " gva=0x%llx (CS:IP=0x%llx:0x%llx)\n", - __func__, vcpu->vcpu_id, va, cs_base, rip); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s: mmio_fetch_instruction() failed: vcpu_id=%u," + " gva=0x%llx (CS:IP=0x%llx:0x%llx)\n", + __func__, vcpu->vcpu_id, va, cs_base, rip); dump_vmcs(vcpu); return -1; } #else // !CONFIG_HAX_EPT2 if (!vcpu_read_guest_virtual(vcpu, va, &instr, INSTR_MAX_LEN, INSTR_MAX_LEN, 0)) { - hax_panic_vcpu(vcpu, "Error reading instruction at 0x%llx for decoding" - " (CS:IP=0x%llx:0x%llx)\n", va, cs_base, rip); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Error reading instruction at 0x%llx for decoding" + " (CS:IP=0x%llx:0x%llx)\n", va, cs_base, rip); dump_vmcs(vcpu); return -1; } @@ -2155,28 +2165,30 @@ static int vcpu_emulate_insn(struct vcpu_t *vcpu) em_ctxt->rip = rip; rc = em_decode_insn(em_ctxt, instr); if (rc < 0) { - hax_panic_vcpu(vcpu, "em_decode_insn() failed: vcpu_id=%u," - " len=%u, CS:IP=0x%llx:0x%llx, instr[0..5]=" - "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", vcpu->vcpu_id, - vcpu->vmx.exit_instr_length, cs_base, rip, instr[0], - instr[1], instr[2], instr[3], instr[4], instr[5]); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "em_decode_insn() failed: vcpu_id=%u," + " len=%u, CS:IP=0x%llx:0x%llx, instr[0..5]=" + "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", vcpu->vcpu_id, + vcpu->vmx.exit_instr_length, cs_base, rip, instr[0], + instr[1], instr[2], instr[3], instr[4], instr[5]); dump_vmcs(vcpu); return HAX_RESUME; } if (em_ctxt->len != vcpu->vmx.exit_instr_length) { - hax_debug("Inferred instruction length %u does not match VM-exit" - " instruction length %u (CS:IP=0x%llx:0x%llx, instr[0..5]=" - "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x)\n", em_ctxt->len, - vcpu->vmx.exit_instr_length, cs_base, rip, instr[0], instr[1], - instr[2], instr[3], instr[4], instr[5]); + hax_log(HAX_LOGD, "Inferred instruction length %u does not match " + "VM-exit instruction length %u (CS:IP=0x%llx:0x%llx, " + "instr[0..5]=0x%x 0x%x 0x%x 0x%x 0x%x 0x%x)\n", em_ctxt->len, + vcpu->vmx.exit_instr_length, cs_base, rip, instr[0], instr[1], + instr[2], instr[3], instr[4], instr[5]); } rc = em_emulate_insn(em_ctxt); if (rc < 0) { - hax_panic_vcpu(vcpu, "em_emulate_insn() failed: vcpu_id=%u," - " len=%u, CS:IP=0x%llx:0x%llx, instr[0..5]=" - "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", vcpu->vcpu_id, - vcpu->vmx.exit_instr_length, cs_base, rip, instr[0], - instr[1], instr[2], instr[3], instr[4], instr[5]); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "em_emulate_insn() failed: vcpu_id=%u," + " len=%u, CS:IP=0x%llx:0x%llx, instr[0..5]=" + "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", vcpu->vcpu_id, + vcpu->vmx.exit_instr_length, cs_base, rip, instr[0], + instr[1], instr[2], instr[3], instr[4], instr[5]); dump_vmcs(vcpu); return HAX_RESUME; } @@ -2187,7 +2199,8 @@ static uint64_t vcpu_read_gpr(void *obj, uint32_t reg_index) { struct vcpu_t *vcpu = obj; if (reg_index >= 16) { - hax_panic_vcpu(vcpu, "vcpu_read_gpr: Invalid register index\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "vcpu_read_gpr: Invalid register index\n"); return 0; } return vcpu->state->_regs[reg_index]; @@ -2197,7 +2210,8 @@ static void vcpu_write_gpr(void *obj, uint32_t reg_index, uint64_t value) { struct vcpu_t *vcpu = obj; if (reg_index >= 16) { - hax_panic_vcpu(vcpu, "vcpu_write_gpr: Invalid register index\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "vcpu_write_gpr: Invalid register index\n"); return; } vcpu->state->_regs[reg_index] = value; @@ -2340,7 +2354,7 @@ static int exit_exc_nmi(struct vcpu_t *vcpu, struct hax_tunnel *htun) exit_intr_info.raw = vmx(vcpu, exit_intr_info).raw; htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason; - hax_debug("exception vmexit vector:%x\n", exit_intr_info.vector); + hax_log(HAX_LOGD, "exception vmexit vector:%x\n", exit_intr_info.vector); switch (exit_intr_info.vector) { case VECTOR_NMI: { @@ -2354,20 +2368,23 @@ static int exit_exc_nmi(struct vcpu_t *vcpu, struct hax_tunnel *htun) return vcpu_emulate_insn(vcpu); } else { - hax_panic_vcpu(vcpu, "Page fault shouldn't happen when EPT is " - "enabled.\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Page fault shouldn't happen when EPT is" + " enabled.\n"); dump_vmcs(vcpu); } break; } case VECTOR_MC: { - hax_panic_vcpu(vcpu, "Machine check happens!\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Machine check happens!\n"); dump_vmcs(vcpu); handle_machine_check(vcpu); break; } case VECTOR_DF: { - hax_panic_vcpu(vcpu, "Double fault!\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Double fault!\n"); dump_vmcs(vcpu); break; } @@ -2401,7 +2418,7 @@ static void handle_machine_check(struct vcpu_t *vcpu) uint n, i; #define MSR_TRACE(msr) \ - hax_debug("MSR %s (%x): %08llx\n", #msr, msr, ia32_rdmsr(msr)) + hax_log(HAX_LOGD, "MSR %s (%x): %08llx\n", #msr, msr, ia32_rdmsr(msr)) MSR_TRACE(IA32_MCG_CAP); MSR_TRACE(IA32_MCG_STATUS); @@ -2410,8 +2427,8 @@ static void handle_machine_check(struct vcpu_t *vcpu) } #define MSR_TRACEi(n, a) \ - hax_debug("MSR IA32_MC%d_%s (%x): %08llx\n", i, #n, a + i * 4, \ - ia32_rdmsr(a + i * 4)) + hax_log(HAX_LOGD, "MSR IA32_MC%d_%s (%x): %08llx\n", i, #n, \ + a + i * 4, ia32_rdmsr(a + i * 4)) n = mcg_cap & 0xff; for (i = 0; i < n; i++) { @@ -2444,7 +2461,7 @@ static void handle_machine_check(struct vcpu_t *vcpu) #undef MSR_TRACE #undef MSR_TRACEi - hax_warning("Machine check"); + hax_log(HAX_LOGW, "Machine check"); } static int exit_interrupt(struct vcpu_t *vcpu, struct hax_tunnel *htun) @@ -2456,7 +2473,8 @@ static int exit_interrupt(struct vcpu_t *vcpu, struct hax_tunnel *htun) static int exit_triple_fault(struct vcpu_t *vcpu, struct hax_tunnel *htun) { - hax_panic_vcpu(vcpu, "Triple fault\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Triple fault\n"); dump_vmcs(vcpu); return HAX_RESUME; } @@ -2476,7 +2494,7 @@ static int exit_cpuid(struct vcpu_t *vcpu, struct hax_tunnel *htun) { handle_cpuid(vcpu, htun); advance_rip(vcpu); - hax_debug("...........exit_cpuid\n"); + hax_log(HAX_LOGD, "...........exit_cpuid\n"); return HAX_RESUME; } @@ -2496,8 +2514,8 @@ static void handle_cpuid(struct vcpu_t *vcpu, struct hax_tunnel *htun) handle_cpuid_virtual(vcpu, a, c); - hax_debug("CPUID %08x %08x: %08x %08x %08x %08x\n", a, c, state->_eax, - state->_ebx, state->_ecx, state->_edx); + hax_log(HAX_LOGD, "CPUID %08x %08x: %08x %08x %08x %08x\n", a, c, + state->_eax, state->_ebx, state->_ecx, state->_edx); htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason; } @@ -2762,7 +2780,7 @@ static int exit_invlpg(struct vcpu_t *vcpu, struct hax_tunnel *htun) static int exit_rdtsc(struct vcpu_t *vcpu, struct hax_tunnel *htun) { - hax_debug("rdtsc exiting: rip: %llx\n", vcpu->state->_rip); + hax_log(HAX_LOGD, "rdtsc exiting: rip: %llx\n", vcpu->state->_rip); return HAX_RESUME; } @@ -2813,7 +2831,7 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) if (cr == 8) { // TODO: Redirect CR8 write to user space (emulated APIC.TPR) - hax_warning("Ignored guest CR8 write, val=0x%llx\n", val); + hax_log(HAX_LOGW, "Ignored guest CR8 write, val=0x%llx\n", val); break; } @@ -2821,9 +2839,9 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) if (cr == 0) { uint64_t cr0_pae_triggers; - hax_info("Guest writing to CR0[%u]: 0x%llx -> 0x%llx," - " _cr4=0x%llx, _efer=0x%x\n", vcpu->vcpu_id, - old_val, val, state->_cr4, state->_efer); + hax_log(HAX_LOGI, "Guest writing to CR0[%u]: 0x%llx -> 0x%llx," + " _cr4=0x%llx, _efer=0x%x\n", vcpu->vcpu_id, + old_val, val, state->_cr4, state->_efer); if ((val & CR0_PG) && !(val & CR0_PE)) { hax_inject_exception(vcpu, VECTOR_GP, 0); return HAX_RESUME; @@ -2838,7 +2856,7 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) if (!hax->ug_enable_flag && (old_val & CR0_PE) && !(val & CR0_PE)) { htun->_exit_status = HAX_EXIT_REALMODE; - hax_debug("Enter NON-PE from PE\n"); + hax_log(HAX_LOGD, "Enter NON-PE from PE\n"); return HAX_EXIT; } @@ -2847,16 +2865,17 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) if ((val & CR0_PG) && (state->_cr4 & CR4_PAE) && !(state->_efer & IA32_EFER_LME) && !vtlb_active(vcpu) && ((val ^ old_val) & cr0_pae_triggers)) { - hax_info("%s: vCPU #%u triggers PDPT (re)load for EPT+PAE" - " mode (CR0 path)\n", __func__, vcpu->vcpu_id); + hax_log(HAX_LOGI, "%s: vCPU #%u triggers PDPT (re)load for" + " EPT+PAE mode (CR0 path)\n", __func__, + vcpu->vcpu_id); is_ept_pae = true; } } else if (cr == 4) { uint64_t cr4_pae_triggers; - hax_info("Guest writing to CR4[%u]: 0x%llx -> 0x%llx," - "_cr0=0x%llx, _efer=0x%x\n", vcpu->vcpu_id, - old_val, val, state->_cr0, state->_efer); + hax_log(HAX_LOGI, "Guest writing to CR4[%u]: 0x%llx -> 0x%llx," + "_cr0=0x%llx, _efer=0x%x\n", vcpu->vcpu_id, + old_val, val, state->_cr0, state->_efer); if ((state->_efer & IA32_EFER_LMA) && !(val & CR4_PAE)) { hax_inject_exception(vcpu, VECTOR_GP, 0); return HAX_RESUME; @@ -2868,12 +2887,14 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) if ((val & CR4_PAE) && (state->_cr0 & CR0_PG) && !(state->_efer & IA32_EFER_LME) && !vtlb_active(vcpu) && ((val ^ old_val) & cr4_pae_triggers)) { - hax_info("%s: vCPU #%u triggers PDPT (re)load for EPT+PAE" - " mode (CR4 path)\n", __func__, vcpu->vcpu_id); + hax_log(HAX_LOGI, "%s: vCPU #%u triggers PDPT (re)load for " + "EPT+PAE mode (CR4 path)\n", __func__, + vcpu->vcpu_id); is_ept_pae = true; } } else { - hax_error("Unsupported CR%d write, val=0x%llx\n", cr, val); + hax_log(HAX_LOGE, "Unsupported CR%d write, val=0x%llx\n", + cr, val); break; } check_flush(vcpu, old_val ^ val); @@ -2886,18 +2907,18 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) // registers int ret = vcpu_prepare_pae_pdpt(vcpu); if (ret) { - hax_panic_vcpu(vcpu, "vCPU #%u failed to (re)load PDPT for" - " EPT+PAE mode: ret=%d\n", - vcpu->vcpu_id, ret); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "vCPU #%u failed to (re)load PDPT for" + " EPT+PAE mode: ret=%d\n", vcpu->vcpu_id, ret); dump_vmcs(vcpu); return HAX_RESUME; } } if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, - "load_vmcs failed while exit_cr_access %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, + "load_vmcs failed while exit_cr_access %x\n", vmcs_err); hax_panic_log(vcpu); return HAX_RESUME; } @@ -2905,9 +2926,9 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) vmwrite_cr(vcpu); if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, - "put_vmcs failed while exit_cr_access %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, + "put_vmcs failed while exit_cr_access %x\n", vmcs_err); hax_panic_log(vcpu); } @@ -2916,7 +2937,7 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) case 1: { // MOV CR -> GPR uint64_t val; - hax_info("cr_access R CR%d\n", cr); + hax_log(HAX_LOGI, "cr_access R CR%d\n", cr); val = vcpu_read_cr(state, cr); // TODO: Redirect CR8 read to user space (emulated APIC.TPR) @@ -2924,42 +2945,46 @@ static int exit_cr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) break; } case 2: { // CLTS - hax_info("CLTS\n"); + hax_log(HAX_LOGI, "CLTS\n"); state->_cr0 &= ~(uint64_t)CR0_TS; if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "load_vmcs failed while CLTS: %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "load_vmcs failed while CLTS: %x\n", + vmcs_err); hax_panic_log(vcpu); return HAX_RESUME; } vmwrite_cr(vcpu); if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "put_vmcs failed while CLTS: %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs failed while CLTS: %x\n", + vmcs_err); hax_panic_log(vcpu); } break; } case 3: { // LMSW - hax_info("LMSW\n"); + hax_log(HAX_LOGI, "LMSW\n"); state->_cr0 = (state->_cr0 & ~0xfULL) | (vmx(vcpu, exit_qualification).cr.lmsw_source & 0xf); if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "load_vmcs failed while LMSW %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "load_vmcs failed while LMSW %x\n", + vmcs_err); hax_panic_log(vcpu); return HAX_RESUME; } vmwrite_cr(vcpu); if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "put_vmcs failed while LMSW %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs failed while LMSW %x\n", + vmcs_err); hax_panic_log(vcpu); } break; } default: { - hax_error("Unsupported Control Register access type.\n"); + hax_log(HAX_LOGE, "Unsupported Control Register access type.\n"); break; } } @@ -3045,14 +3070,16 @@ static int exit_dr_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) else state->_regs[gpr_reg] = 0; - hax_debug("Ignore guest DR%d read due to hw bp enabled.\n", dreg); + hax_log(HAX_LOGD, "Ignore guest DR%d read due to hw bp enabled.\n", + dreg); } else { state->_regs[gpr_reg] = *dr; } } else { // MOV DR <- GPR if (hbreak_enabled) { - hax_debug("Ignore guest DR%d write due to hw bp enabled.\n", dreg); + hax_log(HAX_LOGD, "Ignore guest DR%d write due to hw bp enabled.\n", + dreg); } else { *dr = state->_regs[gpr_reg]; vcpu->dr_dirty = 1; @@ -3104,9 +3131,10 @@ static int handle_string_io(struct vcpu_t *vcpu, exit_qualification_t *qual, if (qual->io.direction == HAX_IO_OUT) { if (!vcpu_read_guest_virtual(vcpu, start_gva, vcpu->io_buf, IOS_MAX_BUFFER, copy_size, 0)) { - hax_panic_vcpu(vcpu, "%s: vcpu_read_guest_virtual() failed," - " start_gva=0x%llx, elem_size=%u, count=%llu\n", - __func__, start_gva, elem_size, count); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s: vcpu_read_guest_virtual() failed," + " start_gva=0x%llx, elem_size=%u, count=%llu\n", + __func__, start_gva, elem_size, count); dump_vmcs(vcpu); return HAX_RESUME; } @@ -3184,8 +3212,8 @@ static int exit_io_access(struct vcpu_t *vcpu, struct hax_tunnel *htun) htun->io._size = qual->io.size + 1; htun->io._direction = qual->io.direction; - hax_debug("exit_io_access port %x, size %d\n", htun->io._port, - htun->io._size); + hax_log(HAX_LOGD, "exit_io_access port %x, size %d\n", htun->io._port, + htun->io._size); if (qual->io.string) return handle_string_io(vcpu, qual, htun); @@ -3424,7 +3452,7 @@ static int handle_msr_read(struct vcpu_t *vcpu, uint32_t msr, uint64_t *val) // the vCPU supports the "pdcm" feature (which it does not) case IA32_PERF_CAPABILITIES: { *val = 0; - hax_info("handle_msr_read: IA32_PERF_CAPABILITIES\n"); + hax_log(HAX_LOGI, "handle_msr_read: IA32_PERF_CAPABILITIES\n"); break; } // In case the host CPU does not support MSR bitmaps, emulate MSR reads @@ -3435,8 +3463,8 @@ static int handle_msr_read(struct vcpu_t *vcpu, uint32_t msr, uint64_t *val) case IA32_PMC3: { *val = hax->apm_version ? gstate->apm_pmc_msrs[msr - IA32_PMC0] & hax->apm_general_mask : 0; - hax_info("handle_msr_read: IA32_PMC%u value=0x%llx\n", - msr - IA32_PMC0, *val); + hax_log(HAX_LOGI, "handle_msr_read: IA32_PMC%u value=0x%llx\n", + msr - IA32_PMC0, *val); break; } case IA32_PERFEVTSEL0: @@ -3446,8 +3474,8 @@ static int handle_msr_read(struct vcpu_t *vcpu, uint32_t msr, uint64_t *val) *val = hax->apm_version ? gstate->apm_pes_msrs[msr - IA32_PERFEVTSEL0] : 0; - hax_info("handle_msr_read: IA32_PERFEVTSEL%u value=0x%llx\n", - msr - IA32_PERFEVTSEL0, *val); + hax_log(HAX_LOGI, "handle_msr_read: IA32_PERFEVTSEL%u " + "value=0x%llx\n", msr - IA32_PERFEVTSEL0, *val); break; } default: { @@ -3540,18 +3568,18 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, break; } case IA32_EFER: { - hax_info("%s writing to EFER[%u]: 0x%x -> 0x%llx, _cr0=0x%llx," - " _cr4=0x%llx\n", by_host ? "Host" : "Guest", - vcpu->vcpu_id, state->_efer, val, - state->_cr0, state->_cr4); + hax_log(HAX_LOGI, "%s writing to EFER[%u]: 0x%x -> 0x%llx, " + "_cr0=0x%llx, _cr4=0x%llx\n", by_host ? "Host" : "Guest", + vcpu->vcpu_id, state->_efer, val, + state->_cr0, state->_cr4); /* val - "new" EFER, state->_efer - "old" EFER.*/ if ((val & ~((uint64_t)(IA32_EFER_SCE | IA32_EFER_LME | IA32_EFER_LMA | IA32_EFER_XD)))) { - hax_error("Illegal value 0x%llx written to EFER. " - "Reserved bits were set. EFER was 0x%llx\n", - val, (uint64_t) state->_efer); + hax_log(HAX_LOGE, "Illegal value 0x%llx written to EFER. " + "Reserved bits were set. EFER was 0x%llx\n", + val, (uint64_t) state->_efer); r = 1; break; } @@ -3570,9 +3598,9 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, */ if (((val & IA32_EFER_LMA) ^ (state->_efer & IA32_EFER_LMA))) { - hax_warning("Ignoring guest write to IA32_EFER.LMA. " - "EFER: 0x%llx -> 0x%llx\n", - (uint64_t) state->_efer,val); + hax_log(HAX_LOGW, "Ignoring guest write to IA32_EFER.LMA. " + "EFER: 0x%llx -> 0x%llx\n", + (uint64_t) state->_efer,val); /* * No need to explicitly fix the LMA bit here: * val ^= IA32_EFER_LMA; @@ -3583,9 +3611,9 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, if ((state->_cr0 & CR0_PG) && ((val & IA32_EFER_LME) ^ (state->_efer & IA32_EFER_LME))) { - hax_error("Attempted to enable or disable Long Mode with " - "paging enabled. EFER: 0x%llx -> 0x%llx\n", - (uint64_t) state->_efer, val); + hax_log(HAX_LOGE, "Attempted to enable or disable Long Mode" + " with paging enabled. EFER: 0x%llx -> 0x%llx\n", + (uint64_t) state->_efer, val); r = 1; break; } @@ -3594,11 +3622,13 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, if (!(ia32_rdmsr(IA32_EFER) & IA32_EFER_LMA) && (state->_efer & IA32_EFER_LME)) { - hax_panic_vcpu( - vcpu, "64-bit guest is not allowed on 32-bit host.\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, + "64-bit guest is not allowed on 32-bit host.\n"); } else if ((state->_efer & IA32_EFER_LME) && vtlb_active(vcpu)) { - hax_panic_vcpu(vcpu, "64-bit guest is not allowed on core 2 " - "machine.\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "64-bit guest is not allowed on core 2 " + "machine.\n"); } else { vmwrite_efer(vcpu); } @@ -3695,8 +3725,8 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, // According to IA SDM Vol. 3B 18.2.5, writes to IA_PMCx use // only bits 31..0 of the input value gstate->apm_pmc_msrs[msr - IA32_PMC0] = val & 0xffffffff; - hax_info("handle_msr_write: IA32_PMC%u value=0x%llx\n", - msr - IA32_PMC0, val); + hax_log(HAX_LOGI, "handle_msr_write: IA32_PMC%u value=0x%llx\n", + msr - IA32_PMC0, val); } break; } @@ -3708,8 +3738,8 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, // According to IA SDM Vol. 3B Figure 18-1 (APM v1) and Figure // 18-6 (APM v3), bits 63..32 of IA_PERFEVTSELx are reserved gstate->apm_pes_msrs[msr - IA32_PERFEVTSEL0] = val & 0xffffffff; - hax_info("handle_msr_write: IA32_PERFEVTSEL%u value=0x%llx\n", - msr - IA32_PERFEVTSEL0, val); + hax_log(HAX_LOGI, "handle_msr_write: IA32_PERFEVTSEL%u " + "value=0x%llx\n", msr - IA32_PERFEVTSEL0, val); } break; } @@ -3724,7 +3754,8 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val, static int exit_invalid_guest_state(struct vcpu_t *vcpu, struct hax_tunnel *htun) { - hax_panic_vcpu(vcpu, "vcpu->tr:%x\n", vcpu->state->_tr.ar); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "vcpu->tr:%x\n", vcpu->state->_tr.ar); dump_vmcs(vcpu); return HAX_RESUME; } @@ -3748,8 +3779,9 @@ static int exit_ept_misconfiguration(struct vcpu_t *vcpu, } #endif // CONFIG_HAX_EPT2 - hax_panic_vcpu(vcpu, "%s: Unexpected EPT misconfiguration: gpa=0x%llx\n", - __func__, gpa); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s: Unexpected EPT misconfiguration: gpa=0x%llx\n", + __func__, gpa); dump_vmcs(vcpu); return HAX_RESUME; } @@ -3766,7 +3798,8 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun) htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason; if (qual->ept.gla1 == 0 && qual->ept.gla2 == 1) { - hax_panic_vcpu(vcpu, "Incorrect EPT seting\n"); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Incorrect EPT setting\n"); dump_vmcs(vcpu); return HAX_RESUME; } @@ -3794,14 +3827,16 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun) * (read-only) and thus result in EPT violations. * TODO: Handle this case properly. */ - hax_warning("%s: Unexpected EPT violation cause. Skipping instruction" - " (len=%u)\n", __func__, vcpu->vmx.exit_instr_length); + hax_log(HAX_LOGW, "%s: Unexpected EPT violation cause. Skipping " + "instruction (len=%u)\n", __func__, + vcpu->vmx.exit_instr_length); advance_rip(vcpu); return HAX_EXIT; } if (ret < 0) { - hax_panic_vcpu(vcpu, "%s: ept_handle_access_violation() returned %d.\n", - __func__, ret); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s: ept_handle_access_violation() " + "returned %d.\n", __func__, ret); dump_vmcs(vcpu); return HAX_RESUME; } @@ -3817,15 +3852,17 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun) static void handle_mem_fault(struct vcpu_t *vcpu, struct hax_tunnel *htun) { - hax_warning("handle_mem_fault: Setting exit status to HAX_EXIT_MMIO.\n"); + hax_log(HAX_LOGW, + "handle_mem_fault: Setting exit status to HAX_EXIT_MMIO.\n"); htun->_exit_status = HAX_EXIT_MMIO; } static int null_handler(struct vcpu_t *vcpu, struct hax_tunnel *htun) { htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason; - hax_panic_vcpu(vcpu, "Unhandled vmx vmexit reason:%d\n", - htun->_exit_reason); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Unhandled vmx vmexit reason:%d\n", + htun->_exit_reason); dump_vmcs(vcpu); return HAX_RESUME; } @@ -3921,8 +3958,9 @@ int vcpu_set_regs(struct vcpu_t *vcpu, struct vcpu_state_t *ustate) } if ((vmcs_err = load_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "load_vmcs failed on vcpu_set_regs: %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "load_vmcs failed on vcpu_set_regs: %x\n", + vmcs_err); hax_panic_log(vcpu); return -EFAULT; } @@ -3978,8 +4016,8 @@ int vcpu_set_regs(struct vcpu_t *vcpu, struct vcpu_state_t *ustate) * 3B 17.3.1.1), which can't be done in user space. */ if (vcpu->debug_control & HAX_DEBUG_ENABLE) { - hax_info("%s: Ignore DR updates because hax debugging has been enabled" - " in %d.\n", __func__, vcpu->vcpu_id); + hax_log(HAX_LOGI, "%s: Ignore DR updates because hax debugging has " + "been enabled in %d.\n", __func__, vcpu->vcpu_id); } else { UPDATE_VCPU_STATE(_dr0, dr_dirty); UPDATE_VCPU_STATE(_dr1, dr_dirty); @@ -4014,8 +4052,9 @@ int vcpu_set_regs(struct vcpu_t *vcpu, struct vcpu_state_t *ustate) } if ((vmcs_err = put_vmcs(vcpu, &flags))) { - hax_panic_vcpu(vcpu, "put_vmcs failed on vcpu_set_regs: %x\n", - vmcs_err); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "put_vmcs failed on vcpu_set_regs: %x\n", + vmcs_err); hax_panic_log(vcpu); } @@ -4132,7 +4171,7 @@ static void vcpu_dump(struct vcpu_t *vcpu, uint32_t mask, const char *caption) static void vcpu_state_dump(struct vcpu_t *vcpu) { - hax_debug( + hax_log(HAX_LOGD, "RIP: %08llx RSP: %08llx RFLAGS: %08llx\n" "RAX: %08llx RBX: %08llx RCX: %08llx RDX: %08llx\n" "RSI: %08llx RDI: %08llx RBP: %08llx\n" @@ -4283,27 +4322,29 @@ static int vcpu_set_apic_base(struct vcpu_t *vcpu, uint64_t val) struct gstate *gstate = &vcpu->gstate; if (val & ~APIC_BASE_MASK) { - hax_error("Try to set reserved bits of IA32_APIC_BASE MSR and failed " - "to set APIC base msr to 0x%llx.\n", val); + hax_log(HAX_LOGE, "Try to set reserved bits of IA32_APIC_BASE MSR and" + " failed to set APIC base msr to 0x%llx.\n", val); return -EINVAL; } if ((val & APIC_BASE_ADDR_MASK) != APIC_BASE_DEFAULT_ADDR) { - hax_error("APIC base cannot be relocated to 0x%llx.\n",\ - val & APIC_BASE_ADDR_MASK); + hax_log(HAX_LOGE, "APIC base cannot be relocated to 0x%llx.\n", + val & APIC_BASE_ADDR_MASK); return -EINVAL; } if (!(val & APIC_BASE_ENABLE)) { - hax_warning("APIC is disabled for vCPU %u.\n", vcpu->vcpu_id); + hax_log(HAX_LOGW, "APIC is disabled for vCPU %u.\n", vcpu->vcpu_id); } if (val & APIC_BASE_BSP) { if (vcpu_is_bsp(vcpu)) { - hax_info("vCPU %u is set to bootstrap processor.\n", vcpu->vcpu_id); + hax_log(HAX_LOGI, "vCPU %u is set to bootstrap processor.\n", + vcpu->vcpu_id); } else { - hax_error("Bootstrap processor is vCPU %u and cannot changed to " - "vCPU %u.\n", vcpu->vm->bsp_vcpu_id, vcpu->vcpu_id); + hax_log(HAX_LOGE, "Bootstrap processor is vCPU %u and cannot " + "changed to vCPU %u.\n", vcpu->vm->bsp_vcpu_id, + vcpu->vcpu_id); return -EINVAL; } } diff --git a/core/vm.c b/core/vm.c index 45fb6323..a5c02091 100644 --- a/core/vm.c +++ b/core/vm.c @@ -59,7 +59,7 @@ static int get_free_vm_mid(void) static void hax_put_vm_mid(int id) { if (hax_test_and_clear_bit(id, (uint64_t *)&vm_mid_bits)) - hax_warning("Clear a non-set vmid %x\n", id); + hax_log(HAX_LOGW, "Clear a non-set vmid %x\n", id); } static int valid_vm_mid(int vm_id) @@ -100,21 +100,21 @@ struct vm_t * hax_create_vm(int *vm_id) #endif // CONFIG_HAX_EPT2 if ((!hax->vmx_enable_flag) || (!hax->nx_enable_flag)) { - hax_error("VT or NX is not enabled, can not setup VM!\n"); + hax_log(HAX_LOGE, "VT or NX is not enabled, can not setup VM!\n"); return NULL; } id = get_free_vm_mid(); if (!valid_vm_mid(id)) { - hax_error("Failed to allocate vm id\n"); + hax_log(HAX_LOGE, "Failed to allocate vm id\n"); return NULL; } *vm_id = id; hvm = hax_vmalloc(sizeof(struct vm_t), HAX_MEM_NONPAGE); if (!hvm) { hax_put_vm_mid(id); - hax_error("Failed to allocate vm\n"); + hax_log(HAX_LOGE, "Failed to allocate vm\n"); return NULL; } memset(hvm, 0, sizeof(struct vm_t)); @@ -143,12 +143,12 @@ struct vm_t * hax_create_vm(int *vm_id) #ifdef CONFIG_HAX_EPT2 ret = gpa_space_init(&hvm->gpa_space); if (ret) { - hax_error("%s: gpa_space_init() returned %d\n", __func__, ret); + hax_log(HAX_LOGE, "%s: gpa_space_init() returned %d\n", __func__, ret); goto fail0; } ret = ept_tree_init(&hvm->ept_tree); if (ret) { - hax_error("%s: ept_tree_init() returned %d\n", __func__, ret); + hax_log(HAX_LOGE, "%s: ept_tree_init() returned %d\n", __func__, ret); goto fail0; } @@ -158,7 +158,7 @@ struct vm_t * hax_create_vm(int *vm_id) hvm->gpa_space_listener.opaque = (void *)&hvm->ept_tree; gpa_space_add_listener(&hvm->gpa_space, &hvm->gpa_space_listener); - hax_info("%s: Invoking INVEPT for VM %d\n", __func__, hvm->vm_id); + hax_log(HAX_LOGI, "%s: Invoking INVEPT for VM %d\n", __func__, hvm->vm_id); invept(hvm, EPT_INVEPT_SINGLE_CONTEXT); #else // !CONFIG_HAX_EPT2 if (!ept_init(hvm)) @@ -226,7 +226,7 @@ int hax_vm_core_open(struct vm_t *vm) int hax_teardown_vm(struct vm_t *vm) { if (!hax_list_empty(&(vm->vcpu_list))) { - hax_info("Try to teardown non-empty vm\n"); + hax_log(HAX_LOGI, "Try to teardown non-empty vm\n"); return -1; } #ifdef HAX_ARCH_X86_32 @@ -250,7 +250,7 @@ int hax_teardown_vm(struct vm_t *vm) gpa_space_free(&vm->gpa_space); #endif // CONFIG_HAX_EPT2 hax_vfree(vm, sizeof(struct vm_t)); - hax_error("\n...........hax_teardown_vm\n"); + hax_log(HAX_LOGE, "...........hax_teardown_vm\n"); return 0; } @@ -619,7 +619,7 @@ void hax_unmap_gpfn(struct vm_t *vm, void *va, uint64_t gpfn) entry = hax_get_p2m_entry(vm, gpfn); if (!entry) { - hax_error("We cannot find the p2m entry!\n"); + hax_log(HAX_LOGE, "We cannot find the p2m entry!\n"); return; } @@ -638,8 +638,8 @@ int hax_core_set_p2m(struct vm_t *vm, uint64_t gpfn, uint64_t hpfn, uint64_t hva ret = set_p2m_mapping(vm, gpfn, hva & ~HAX_PAGE_MASK, hpfn << 12); if (ret < 0) { - hax_error("Failed to set p2m mapping, gpfn:%llx, hva:%llx, hpa:%llx," - "ret:%d\n", gpfn, hva, hpfn << 12, ret); + hax_log(HAX_LOGE, "Failed to set p2m mapping, gpfn:%llx, hva:%llx, " + "hpa:%llx, ret:%d\n", gpfn, hva, hpfn << 12, ret); return 0; } diff --git a/core/vmx.c b/core/vmx.c index ef960983..6973aa15 100644 --- a/core/vmx.c +++ b/core/vmx.c @@ -87,7 +87,8 @@ static void __vmx_vmwrite_common(struct vcpu_t *vcpu, const char *name, break; } default: { - hax_error("Unsupported component %x, val %x\n", component, val); + hax_log(HAX_LOGE, "Unsupported component %x, val %x\n", + component, val); break; } } @@ -105,6 +106,7 @@ void vmx_vmwrite(struct vcpu_t *vcpu, const char *name, if (!loaded) { if (load_vmcs(vcpu, &flags)) { vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s load_vmcs fail\n", __FUNCTION__); hax_panic_log(vcpu); return; } @@ -115,6 +117,7 @@ void vmx_vmwrite(struct vcpu_t *vcpu, const char *name, if (!loaded) { if (put_vmcs(vcpu, &flags)) { vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s put_vmcs fail\n", __FUNCTION__); hax_panic_log(vcpu); return; } @@ -171,7 +174,8 @@ static uint64_t __vmread_common(struct vcpu_t *vcpu, break; } default: { - hax_error("Unsupported component %x val %x\n", component, val); + hax_log(HAX_LOGE, "Unsupported component %x val %x\n", + component, val); break; } } @@ -190,6 +194,7 @@ uint64_t vmread(struct vcpu_t *vcpu, component_index_t component) if (!loaded) { if (load_vmcs(vcpu, &flags)) { vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s load_vmcs fail\n", __FUNCTION__); hax_panic_log(vcpu); return 0; } @@ -200,6 +205,7 @@ uint64_t vmread(struct vcpu_t *vcpu, component_index_t component) if (!loaded) { if (put_vmcs(vcpu, &flags)) { vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "%s put_vmcs fail\n", __FUNCTION__); hax_panic_log(vcpu); return 0; } @@ -216,21 +222,21 @@ uint64_t vmread_dump(struct vcpu_t *vcpu, unsigned enc, const char *name) case 0: case 2: { val = vmread(vcpu, enc); - hax_warning("%04x %s: %llx\n", enc, name, val); + hax_log(HAX_LOGW, "%04x %s: %llx\n", enc, name, val); break; } case 1: { val = vmread(vcpu, enc); - hax_warning("%04x %s: %llx\n", enc, name, val); + hax_log(HAX_LOGW, "%04x %s: %llx\n", enc, name, val); break; } case 3: { val = vmread(vcpu, enc); - hax_warning("%04x %s: %llx\n", enc, name, val); + hax_log(HAX_LOGW, "%04x %s: %llx\n", enc, name, val); break; } default: { - hax_error("unsupported enc %x\n", enc); + hax_log(HAX_LOGE, "Unsupported enc %x\n", enc); break; } } diff --git a/core/vtlb.c b/core/vtlb.c index 9943caf6..58702027 100644 --- a/core/vtlb.c +++ b/core/vtlb.c @@ -254,7 +254,8 @@ uint vcpu_vtlb_alloc(struct vcpu_t *vcpu) mmu = hax_vmalloc(sizeof(hax_mmu_t), 0); if (!mmu) { - hax_error("No memory to create mmu for vcpu:%d\n", vcpu->vcpu_id); + hax_log(HAX_LOGE, "No memory to create mmu for vcpu:%d\n", + vcpu->vcpu_id); return 0; } memset(mmu, 0, sizeof(hax_mmu_t)); @@ -264,7 +265,7 @@ uint vcpu_vtlb_alloc(struct vcpu_t *vcpu) // Must ensure the first page should be lower than 4G page = hax_alloc_page(HAX_MEM_LOW_4G, 1); if (!page) { - hax_debug("No enough memory for creating vTLB root page!\n"); + hax_log(HAX_LOGD, "No enough memory for creating vTLB root page!\n"); goto alloc_fail0; } mmu->hpd_page = page; @@ -380,7 +381,7 @@ void vtlb_invalidate_addr(hax_mmu_t *mmu, hax_vaddr_t va) hax_assert(mmu->host_mode == PM_PAE); - hax_debug("Flush address 0x%llx\n", va); + hax_log(HAX_LOGD, "Flush address 0x%llx\n", va); pde = vtlb_get_pde(mmu, va, 0); @@ -407,7 +408,7 @@ void vtlb_invalidate(hax_mmu_t *mmu) return; hax_assert(mmu->host_mode == PM_PAE); - hax_debug("Flush whole vTLB\n"); + hax_log(HAX_LOGD, "Flush whole vTLB\n"); mmu_recycle_vtlb_pages(mmu); mmu->clean = 1; @@ -422,8 +423,8 @@ static uint vtlb_handle_page_fault(struct vcpu_t *vcpu, pagemode_t guest_mode, uint need_invalidation = 0; hax_mmu_t *mmu = vcpu->mmu; - hax_debug("vTLB::handle_pagefault %08llx, %08llx %x [Mode %u]\n", pdir, va, - access, guest_mode); + hax_log(HAX_LOGD, "vTLB::handle_pagefault %08llx, %08llx %x [Mode %u]\n", + pdir, va, access, guest_mode); hax_assert(guest_mode != PM_INVALID); if (guest_mode != mmu->guest_mode) { @@ -437,8 +438,9 @@ static uint vtlb_handle_page_fault(struct vcpu_t *vcpu, pagemode_t guest_mode, case PM_PAE: case PM_PML4: default: { - hax_panic_vcpu(vcpu, "Invalid guest page table mode %d\n", - mmu->guest_mode); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "Invalid guest page table mode %d\n", + mmu->guest_mode); } } @@ -451,7 +453,7 @@ static uint vtlb_handle_page_fault(struct vcpu_t *vcpu, pagemode_t guest_mode, mmu->guest_mode = guest_mode; mmu->host_mode = new_host_mode; mmu->pdir = pdir; - hax_debug("New vTLB mode %u, pdir %08llx\n", guest_mode, pdir); + hax_log(HAX_LOGD, "New vTLB mode %u, pdir %08llx\n", guest_mode, pdir); } if (need_invalidation || @@ -479,7 +481,8 @@ static uint vtlb_handle_page_fault(struct vcpu_t *vcpu, pagemode_t guest_mode, break; } default: { - hax_error("Invalid guest's paging mode %d\n", mmu->guest_mode); + hax_log(HAX_LOGE, "Invalid guest's paging mode %d\n", + mmu->guest_mode); return TF_FAILED; } } @@ -520,8 +523,8 @@ uint64_t vtlb_get_cr3(struct vcpu_t *vcpu) cr3 = hax_page_pfn(mmu->hpd_page) << 12; - hax_debug("vTLB: guest mode %u, host mode %d, GUEST_CR3: %08llx\n", - mmu->guest_mode, mmu->host_mode, cr3); + hax_log(HAX_LOGD, "vTLB: guest mode %u, host mode %d, GUEST_CR3: %08llx\n", + mmu->guest_mode, mmu->host_mode, cr3); return cr3; } @@ -643,7 +646,8 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce // complete. This matches Atom behavior. if (update && !pte32_is_accessed(&old_pte)) { if (!pte32_atomic_set_accessed(pte, &old_pte)) { - hax_debug("translate walk: atomic PTE update failed\n"); + hax_log(HAX_LOGD, + "translate walk: atomic PTE update failed\n"); #ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); #else // !CONFIG_HAX_EPT2 @@ -700,7 +704,8 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce ((access & TF_WRITE) && !pte32_is_dirty(&old_pte)))) { if (!pte32_atomic_set_ad(pte, lvl, access & TF_WRITE, &old_pte)) { - hax_debug("translate walk: atomic PTE update failed\n"); + hax_log(HAX_LOGD, + "translate walk: atomic PTE update failed\n"); #ifdef CONFIG_HAX_EPT2 gpa_space_unmap_page(&vcpu->vm->gpa_space, &pte_kmap); #else // !CONFIG_HAX_EPT2 @@ -728,7 +733,7 @@ static uint32_t vcpu_mmu_walk(struct vcpu_t *vcpu, hax_vaddr_t va, uint32_t acce uint base_idx = 0; pte32_t pre_pte; uint i; - //hax_error("guest: va %lx\n", va); + //hax_log(HAX_LOGE, "guest: va %lx\n", va); base_idx = idx - idx % 16; for (i = 0; i < 16; i++) { @@ -788,14 +793,14 @@ bool handle_vtlb(struct vcpu_t *vcpu) uint32_t ret = vtlb_handle_page_fault(vcpu, mode, pdir, cr2, access); - hax_debug("handle vtlb fault @%llx\n", cr2); + hax_log(HAX_LOGD, "handle vtlb fault @%llx\n", cr2); if (ret == 0) { vcpu->vmcs_pending_guest_cr3 = 1; return 1; } if (ret & TF_GP2HP) { - hax_debug("G2H translation failed (%08llx, %x)\n", cr2, access); + hax_log(HAX_LOGD, "G2H translation failed (%08llx, %x)\n", cr2, access); return 0; } @@ -803,7 +808,7 @@ bool handle_vtlb(struct vcpu_t *vcpu) access = ret & (vcpu->state->_efer & IA32_EFER_XD ? 0x1f : 0x0f); vcpu->state->_cr2 = cr2; hax_inject_page_fault(vcpu, access); - hax_debug("Page fault (%08llx, %x)\n", cr2, access); + hax_log(HAX_LOGD, "Page fault (%08llx, %x)\n", cr2, access); return 1; } @@ -823,11 +828,11 @@ static inline void * mmio_map_guest_virtual_page_fast(struct vcpu_t *vcpu, vcpu->mmio_fetch.kva = NULL; gpa_space_unmap_page(&vcpu->vm->gpa_space, &vcpu->mmio_fetch.kmap); if (vcpu->mmio_fetch.hit_count < 2) { - hax_debug("%s: Cache miss: cached_gva=0x%llx, cached_cr3=0x%llx," - " gva=0x%llx, cr3=0x%llx, hits=0x%d, vcpu_id=0x%u\n", - __func__, vcpu->mmio_fetch.last_gva, - vcpu->mmio_fetch.last_guest_cr3, gva, vcpu->state->_cr3, - vcpu->mmio_fetch.hit_count, vcpu->vcpu_id); + hax_log(HAX_LOGD, "%s: Cache miss: cached_gva=0x%llx, " + "cached_cr3=0x%llx, gva=0x%llx, cr3=0x%llx, hits=0x%d, " + "vcpu_id=0x%u\n", __func__, vcpu->mmio_fetch.last_gva, + vcpu->mmio_fetch.last_guest_cr3, gva, vcpu->state->_cr3, + vcpu->mmio_fetch.hit_count, vcpu->vcpu_id); } return NULL; } @@ -856,19 +861,19 @@ static void * mmio_map_guest_virtual_page_slow(struct vcpu_t *vcpu, uint64_t gva ret = vcpu_translate(vcpu, gva_aligned, 0, &gpa, NULL, true); if (ret) { - hax_error("%s: vcpu_translate() returned 0x%x: vcpu_id=%u," - " gva=0x%llx\n", __func__, ret, vcpu->vcpu_id, gva); + hax_log(HAX_LOGE, "%s: vcpu_translate() returned 0x%x: vcpu_id=%u," + " gva=0x%llx\n", __func__, ret, vcpu->vcpu_id, gva); // TODO: Inject a guest page fault? return NULL; } - hax_debug("%s: gva=0x%llx => gpa=0x%llx, vcpu_id=0x%u\n", __func__, - gva_aligned, gpa, vcpu->vcpu_id); + hax_log(HAX_LOGD, "%s: gva=0x%llx => gpa=0x%llx, vcpu_id=0x%u\n", __func__, + gva_aligned, gpa, vcpu->vcpu_id); kva = gpa_space_map_page(&vcpu->vm->gpa_space, gpa >> PG_ORDER_4K, kmap, NULL); if (!kva) { - hax_error("%s: gpa_space_map_page() failed: vcpu_id=%u, gva=0x%llx," - " gpa=0x%llx\n", __func__, vcpu->vcpu_id, gva, gpa); + hax_log(HAX_LOGE, "%s: gpa_space_map_page() failed: vcpu_id=%u, " + "gva=0x%llx, gpa=0x%llx\n", __func__, vcpu->vcpu_id, gva, gpa); return NULL; } return kva; @@ -888,13 +893,13 @@ int mmio_fetch_instruction(struct vcpu_t *vcpu, uint64_t gva, uint8_t *buf, int if ((gva >> PG_ORDER_4K) != (end_gva >> PG_ORDER_4K)) { uint32_t ret; - hax_info("%s: GVA range spans two pages: gva=0x%llx, len=%d\n", - __func__, gva, len); + hax_log(HAX_LOGI, "%s: GVA range spans two pages: gva=0x%llx, len=%d\n", + __func__, gva, len); ret = vcpu_read_guest_virtual(vcpu, gva, buf, (uint)len, (uint)len, 0); if (!ret) { - hax_error("%s: vcpu_read_guest_virtual() failed: vcpu_id=%u," - " gva=0x%llx, len=%d\n", __func__, vcpu->vcpu_id, gva, - len); + hax_log(HAX_LOGE, "%s: vcpu_read_guest_virtual() failed: " + "vcpu_id=%u, gva=0x%llx, len=%d\n", __func__, + vcpu->vcpu_id, gva, len); return -ENOMEM; } return 0; @@ -963,24 +968,26 @@ uint32_t vcpu_read_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, void *ds if (flag != 0) return offset; // Number of bytes successfully read if (r & TF_GP2HP) { - hax_error("read_guest_virtual(%llx, %x) failed\n", addr, size); + hax_log(HAX_LOGE, "read_guest_virtual(%llx, %x) failed\n", + addr, size); } - hax_debug("read_guest_virtual(%llx, %x) injecting #PF\n", addr, - size); + hax_log(HAX_LOGD, "read_guest_virtual(%llx, %x) injecting #PF\n", + addr, size); vcpu->state->_cr2 = addr + offset; hax_inject_page_fault(vcpu, r & 0x1f); return false; } // if (addr + offset != gpa) { -// hax_info("%s: gva=0x%llx, gpa=0x%llx, len=0x%llx\n", __func__, -// addr + offset, gpa, len); +// hax_log(HAX_LOGI, "%s: gva=0x%llx, gpa=0x%llx, len=0x%llx\n", +// __func__, addr + offset, gpa, len); // } #ifdef CONFIG_HAX_EPT2 len2 = gpa_space_read_data(&vcpu->vm->gpa_space, gpa, (int)len, (uint8_t *)(dstp + offset)); if (len2 <= 0) { - hax_panic_vcpu( - vcpu, "read guest virtual error, gpa:0x%llx, len:0x%llx\n", + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, + "read guest virtual error, gpa:0x%llx, len:0x%llx\n", gpa, len); return false; } else { @@ -996,7 +1003,8 @@ uint32_t vcpu_read_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, void *ds hva = (uint8_t *)hva_base + (gpa & 0xfff); memcpy_s((void *)(dstp + offset), dst_buflen - offset , hva, len); } else { - hax_panic_vcpu(vcpu, "BUG_ON during the call:%s\n", __FUNCTION__); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "BUG_ON during the call:%s\n", __FUNCTION__); } #ifdef HAX_ARCH_X86_32 hax_unmap_gpfn(vcpu->vm, hva_base, gpa >> 12); @@ -1058,11 +1066,12 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, if (flag != 0) return offset; // Number of bytes successfully written if (r & TF_GP2HP) { - hax_panic_vcpu(vcpu, "write_guest_virtual(%llx, %x) failed\n", - addr, size); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "write_guest_virtual(%llx, %x) failed\n", + addr, size); } - hax_debug("write_guest_virtual(%llx, %x) injecting #PF\n", addr, - size); + hax_log(HAX_LOGD, "write_guest_virtual(%llx, %x) injecting #PF\n", + addr, size); vcpu->state->_cr2 = addr + offset; hax_inject_page_fault(vcpu, r & 0x1f); return false; @@ -1071,8 +1080,9 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, len2 = (uint64_t)gpa_space_write_data(&vcpu->vm->gpa_space, gpa, len, (uint8_t *)(srcp + offset)); if (len2 <= 0) { - hax_panic_vcpu( - vcpu, "write guest virtual error, gpa:0x%llx, len:0x%llx\n", + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, + "write guest virtual error, gpa:0x%llx, len:0x%llx\n", gpa, len); return false; } else { @@ -1088,7 +1098,8 @@ uint32_t vcpu_write_guest_virtual(struct vcpu_t *vcpu, hax_vaddr_t addr, hva = (uint8_t *)hva_base + (gpa & 0xfff); memcpy_s(hva, dst_buflen - offset , (void *)(srcp + offset), len); } else { - hax_panic_vcpu(vcpu, "BUG_ON during the call:%s\n", __FUNCTION__); + vcpu_set_panic(vcpu); + hax_log(HAX_LOGPANIC, "BUG_ON during the call:%s\n", __FUNCTION__); } #ifdef HAX_ARCH_X86_32 hax_unmap_gpfn(vcpu->vm, hva_base, gpa >> 12); @@ -1119,9 +1130,8 @@ uint vcpu_translate(struct vcpu_t *vcpu, hax_vaddr_t va, uint access, hax_paddr_ uint order = 0; uint r = -1; - hax_debug("vcpu_translate: %llx (%s,%s) mode %u\n", va, - access & TF_WRITE ? "W" : "R", access & TF_USER ? "U" : "S", - mode); + hax_log(HAX_LOGD, "vcpu_translate: %llx (%s,%s) mode %u\n", va, + access & TF_WRITE ? "W" : "R", access & TF_USER ? "U" : "S", mode); switch (mode) { case PM_FLAT: { diff --git a/include/darwin/hax_mac.h b/include/darwin/hax_mac.h index 2549e881..45a0ba7f 100644 --- a/include/darwin/hax_mac.h +++ b/include/darwin/hax_mac.h @@ -169,35 +169,6 @@ static inline errno_t memcpy_s(void *dest, size_t destsz, const void *src, return 0; } -extern int default_hax_log_level; - -#define hax_error(x...) { \ - if (HAX_LOGE >= default_hax_log_level) \ - printf("haxm_error: " x); \ - } - -#define hax_warning(x...) { \ - if (HAX_LOGW >= default_hax_log_level) \ - printf("haxm_warn: " x); \ - } - -#define hax_info(x...) { \ - if (HAX_LOGI >= default_hax_log_level) \ - printf("haxm_info: " x); \ - } - -#define hax_debug(x...) { \ - if (HAX_LOGD >= default_hax_log_level) \ - printf("haxm_debug: " x); \ - } - -#define hax_panic panic - -#define hax_panic_vcpu(v, x...) { \ - printf("haxm_panic: " x); \ - v->panicked = 1; \ - } - #define hax_assert(condition) assert(condition) static inline bool cpu_is_online(int cpu) diff --git a/include/hax.h b/include/hax.h index bde01262..2492b641 100644 --- a/include/hax.h +++ b/include/hax.h @@ -215,7 +215,8 @@ void *hax_map_page(struct hax_page *page); void hax_unmap_page(struct hax_page *page); -int hax_log_level(int level, const char *fmt, ...); +void hax_log(int level, const char *fmt, ...); +void hax_panic(const char *fmt, ...); #ifdef __cplusplus } @@ -257,6 +258,7 @@ int hax_em64t_enabled(void); /* Utilities */ #define HAX_NOLOG 0xff +#define HAX_LOGPANIC 5 #define HAX_LOGE 4 #define HAX_LOGW 3 #define HAX_LOGI 2 diff --git a/include/linux/hax_linux.h b/include/linux/hax_linux.h index e4a428e8..7dde8649 100644 --- a/include/linux/hax_linux.h +++ b/include/linux/hax_linux.h @@ -108,19 +108,6 @@ static inline bool cpu_is_online(int cpu) int hax_notify_host_event(enum hax_notify_event event, uint32_t *param, uint32_t size); -extern int default_hax_log_level; - -void hax_error(char *fmt, ...); -void hax_warning(char *fmt, ...); -void hax_info(char *fmt, ...); -void hax_debug(char *fmt, ...); -void hax_log(char *fmt, ...); - -#define hax_log hax_info - -//#define hax_panic DbgPrint -#define hax_panic hax_error - //#define hax_assert(condition) BUG_ON(!(condition)) void hax_assert(bool condition); diff --git a/include/netbsd/hax_netbsd.h b/include/netbsd/hax_netbsd.h index e333e2fd..07d8f0b8 100644 --- a/include/netbsd/hax_netbsd.h +++ b/include/netbsd/hax_netbsd.h @@ -107,18 +107,6 @@ static inline bool cpu_is_online(int cpu) int hax_notify_host_event(enum hax_notify_event event, uint32_t *param, uint32_t size); -extern int default_hax_log_level; - -void hax_error(char *fmt, ...); -void hax_warning(char *fmt, ...); -void hax_info(char *fmt, ...); -void hax_debug(char *fmt, ...); -void hax_log(char *fmt, ...); - -#define hax_log hax_info - -#define hax_panic panic - #define hax_assert(condition) KASSERT(condition) #endif // HAX_NETBSD_HAX_NETBSD_H_ diff --git a/include/windows/hax_windows.h b/include/windows/hax_windows.h index add7d166..c43ef8b4 100644 --- a/include/windows/hax_windows.h +++ b/include/windows/hax_windows.h @@ -194,15 +194,6 @@ static inline bool cpu_is_online(int cpu) int hax_notify_host_event(enum hax_notify_event event, uint32_t *param, uint32_t size); -extern int default_hax_log_level; - -void hax_error(char *fmt, ...); -void hax_warning(char *fmt, ...); -void hax_info(char *fmt, ...); -void hax_debug(char *fmt, ...); - -#define hax_panic DbgPrint - #define hax_assert(condition) ASSERT(condition) #endif // HAX_WINDOWS_HAX_WINDOWS_H_ diff --git a/platforms/darwin/com_intel_hax.c b/platforms/darwin/com_intel_hax.c index 1071bad0..98795e3b 100644 --- a/platforms/darwin/com_intel_hax.c +++ b/platforms/darwin/com_intel_hax.c @@ -94,7 +94,7 @@ static int lock_prim_init(void) return 0; error: - hax_log_level(HAX_LOGE, "Failed to init lock primitive\n"); + hax_log(HAX_LOGE, "Failed to init lock primitive\n"); lock_prim_exit(); return -1; } @@ -109,7 +109,7 @@ void get_online_map(void *param) //printf("%x\n", cpu_number()); omap = param; if (!omap) { - hax_log_level(HAX_LOGE, "NULL pointer in get online map\n"); + hax_log(HAX_LOGE, "NULL pointer in get online map\n"); return; } @@ -142,7 +142,8 @@ static int com_intel_hax_init(void) init_cpu_info(); if (max_cpus > HAX_MAX_CPUS) { - hax_error("Too many cpus in system!, max_cpus:%d\n", real_ncpus); + hax_log(HAX_LOGE, "Too many cpus in system!, max_cpus:%d\n", + real_ncpus); ret = -E2BIG; goto fail0; } @@ -166,25 +167,25 @@ static int com_intel_hax_exit(void) } kern_return_t com_intel_hax_start(kmod_info_t * ki, void * d) { - hax_log_level(HAX_LOGD, "Start HAX module\n"); + hax_log(HAX_LOGD, "Start HAX module\n"); if (com_intel_hax_init() < 0) { - hax_log_level(HAX_LOGE, "Failed to init hax context\n"); + hax_log(HAX_LOGE, "Failed to init hax context\n"); return KERN_FAILURE; } if (hax_module_init() < 0) { - hax_log_level(HAX_LOGE, "Failed to init host hax\n"); + hax_log(HAX_LOGE, "Failed to init host hax\n"); goto fail1; } if (!hax_em64t_enabled()) { - hax_log_level(HAX_LOGE, "Cpu has no EMT64 support!\n"); + hax_log(HAX_LOGE, "Cpu has no EMT64 support!\n"); goto fail2; } if (com_intel_hax_init_ui() < 0) { - hax_log_level(HAX_LOGE, "Failed to init hax UI\n"); + hax_log(HAX_LOGE, "Failed to init hax UI\n"); goto fail2; } @@ -202,11 +203,11 @@ kern_return_t com_intel_hax_stop(kmod_info_t * ki, void * d) { int ret; - hax_log_level(HAX_LOGD, "Stop HAX module\n"); + hax_log(HAX_LOGD, "Stop HAX module\n"); ret = hax_module_exit(); if (ret < 0) { - hax_error("The module can't be removed now, \n" - " close all VM interface and try again\n"); + hax_log(HAX_LOGE, "The module can't be removed now, \n" + " close all VM interface and try again\n"); return KERN_FAILURE; } com_intel_hax_exit(); diff --git a/platforms/darwin/com_intel_hax_mem.cpp b/platforms/darwin/com_intel_hax_mem.cpp index 04285898..9011c198 100644 --- a/platforms/darwin/com_intel_hax_mem.cpp +++ b/platforms/darwin/com_intel_hax_mem.cpp @@ -68,19 +68,19 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, md = IOMemoryDescriptor::withAddressRange(uva, size, options, current_task()); if (!md) { - hax_error("Failed to create mapping for %llx\n", uva); + hax_log(HAX_LOGE, "Failed to create mapping for %llx\n", uva); goto error; } result = md->prepare(); if (result != KERN_SUCCESS) { - hax_error("Failed to prepare\n"); + hax_log(HAX_LOGE, "Failed to prepare\n"); goto error; } mm = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, 0, size); if (!mm) { - hax_error("Failed to map into kernel\n"); + hax_log(HAX_LOGE, "Failed to map into kernel\n"); md->complete(); goto error; } diff --git a/platforms/darwin/com_intel_hax_ui.c b/platforms/darwin/com_intel_hax_ui.c index 3e6a359f..dcce75e1 100644 --- a/platforms/darwin/com_intel_hax_ui.c +++ b/platforms/darwin/com_intel_hax_ui.c @@ -94,14 +94,14 @@ static int hax_vcpu_open(dev_t dev, int flags, __unused int devtype, struct vcpu_t *cvcpu; int ret; - hax_log_level(HAX_LOGD, "HAX vcpu open called\n"); + hax_log(HAX_LOGD, "HAX vcpu open called\n"); cvcpu = get_vcpu_by_dev(dev); if (!cvcpu) return -ENODEV; ret = hax_vcpu_core_open(cvcpu); if (ret) - hax_error("Failed to open core vcpu\n"); + hax_log(HAX_LOGE, "Failed to open core vcpu\n"); hax_put_vcpu(cvcpu); return ret; } @@ -111,12 +111,12 @@ static int hax_vcpu_close(dev_t dev, int flags, __unused int devtype, { int ret = 0; struct vcpu_t *cvcpu; - hax_log_level(HAX_LOGD, "HAX vcpu close called\n"); + hax_log(HAX_LOGD, "HAX vcpu close called\n"); cvcpu = get_vcpu_by_dev(dev); if (!cvcpu) { - hax_error("Failed to find the vcpu, is it closed already? \n"); + hax_log(HAX_LOGE, "Failed to find the vcpu, is it closed already? \n"); return 0; } @@ -168,16 +168,16 @@ static int hax_vcpu_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, msr = msrs->entries; /* nr_msr needs to be verified */ if (msrs->nr_msr >= 0x20) { - hax_error("MSRS invalid!\n"); + hax_log(HAX_LOGE, "MSRS invalid!\n"); ret = -EFAULT; break; } for (i = 0; i < msrs->nr_msr; i++, msr++) { fail = vcpu_set_msr(mvcpu2cvcpu(vcpu), msr->entry, msr->value); if (fail) { - // hax_log_level(HAX_LOGE, - // "Failed to set msr %x index %x\n", - // msr->entry, i); + // hax_log(HAX_LOGE, + // "Failed to set msr %x index %x\n", + // msr->entry, i); break; } } @@ -192,7 +192,7 @@ static int hax_vcpu_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, msrs = (struct hax_msr_data *)data; msr = msrs->entries; if(msrs->nr_msr >= 0x20) { - hax_error("MSRS invalid!\n"); + hax_log(HAX_LOGE, "MSRS invalid!\n"); ret = -EFAULT; break; } @@ -288,7 +288,7 @@ int hax_vcpu_create_ui(struct hax_vcpu_mac *vcpu) minor_id = hax_get_vcpu_mid(vcpu); if (minor_id < 0) { - hax_error("No vcpu minor id left\n"); + hax_log(HAX_LOGE, "No vcpu minor id left\n"); return 0; } @@ -304,12 +304,12 @@ int hax_vcpu_create_ui(struct hax_vcpu_mac *vcpu) pnode = devfs_make_node(makedev(hax_vcpu_major, minor_id), DEVFS_CHAR, vcpu->owner, vcpu->gowner, 0600, devfs_pathname); if (NULL == pnode) { - hax_error("Failed to init the device, %s\n", devfs_pathname); + hax_log(HAX_LOGE, "Failed to init the device, %s\n", devfs_pathname); hax_put_vcpu_mid(vcpu); return -1; } - hax_info("%s: Created devfs node /dev/%s for vCPU #%d\n", __func__, - devfs_pathname, vcpu->vcpu_id); + hax_log(HAX_LOGI, "%s: Created devfs node /dev/%s for vCPU #%d\n", + __func__, devfs_pathname, vcpu->vcpu_id); vcpu->pnode = pnode; return 0; @@ -327,7 +327,7 @@ static int hax_vm_open(dev_t dev, int flags, __unused int devtype, return -ENODEV; ret = hax_vm_core_open(cvm); hax_put_vm(cvm); - hax_log_level(HAX_LOGI, "Open VM\n"); + hax_log(HAX_LOGI, "Open VM\n"); return ret; } @@ -337,7 +337,7 @@ static int hax_vm_close(dev_t dev, int flags, __unused int devtype, struct vm_t *cvm; cvm = hax_get_vm(minor(dev), 1); - hax_log_level(HAX_LOGI, "Close VM\n"); + hax_log(HAX_LOGI, "Close VM\n"); if (cvm) { /* put the ref get just now */ hax_put_vm(cvm); @@ -373,8 +373,8 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, vm_id = vm_mac->vm_id; cvcpu = vcpu_create(cvm, vm_mac, vcpu_id); if (!cvcpu) { - hax_error("Failed to create vcpu %x on vm %x\n", vcpu_id, - vm_id); + hax_log(HAX_LOGE, "Failed to create vcpu %x on vm %x\n", + vcpu_id, vm_id); ret = -EINVAL; hax_put_vm(cvm); return ret; @@ -384,9 +384,9 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, case HAX_VM_IOCTL_ALLOC_RAM: { struct hax_alloc_ram_info *info; info = (struct hax_alloc_ram_info *)data; - hax_info("IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x," - " pad=0x%x\n", vm_mac->vm_id, info->va, info->size, - info->pad); + hax_log(HAX_LOGI, "IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x," + " pad=0x%x\n", vm_mac->vm_id, info->va, info->size, + info->pad); ret = hax_vm_add_ramblock(cvm, info->va, info->size); break; } @@ -394,14 +394,14 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, struct hax_ramblock_info *info; info = (struct hax_ramblock_info *)data; if (info->reserved) { - hax_error("IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", - vm_mac->vm_id, info->reserved); + hax_log(HAX_LOGE, "IOCTL_ADD_RAMBLOCK: vm_id=%d, " + "reserved=0x%llx\n", vm_mac->vm_id, info->reserved); ret = -EINVAL; break; } - hax_info("IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx," - " size=0x%llx\n", vm_mac->vm_id, info->start_va, - info->size); + hax_log(HAX_LOGI, "IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx," + " size=0x%llx\n", vm_mac->vm_id, info->start_va, + info->size); ret = hax_vm_add_ramblock(cvm, info->start_va, info->size); break; } @@ -416,9 +416,9 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, struct hax_set_ram_info2 *info; info = (struct hax_set_ram_info2 *)data; if (info->reserved1 || info->reserved2) { - hax_error("IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x" - " reserved2=0x%llx\n", - vm_mac->vm_id, info->reserved1, info->reserved2); + hax_log(HAX_LOGE, "IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x" + " reserved2=0x%llx\n", + vm_mac->vm_id, info->reserved1, info->reserved2); ret = -EINVAL; break; } @@ -429,8 +429,8 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, struct hax_protect_ram_info *info; info = (struct hax_protect_ram_info *)data; if (info->reserved) { - hax_error("IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", - vm_mac->vm_id, info->reserved); + hax_log(HAX_LOGE, "IOCTL_PROTECT_RAM: vm_id=%d, " + "reserved=0x%x\n", vm_mac->vm_id, info->reserved); ret = -EINVAL; break; } @@ -446,12 +446,12 @@ static int hax_vm_ioctl(dev_t dev, ulong cmd, caddr_t data, int flag, pid = proc_pid(p); proc_name(pid, task_name, sizeof(task_name)); /* - * This message is informational, but hax_warning() makes sure it is + * This message is informational, but HAX_LOGW makes sure it is * printed by default, which helps us identify QEMU PIDs, in case * we ever receive unknown ioctl()s from other processes. */ - hax_warning("%s: Got HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, pid=%d" - " ('%s')\n", __func__, pid, task_name); + hax_log(HAX_LOGW, "%s: Got HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, " + "pid=%d ('%s')\n", __func__, pid, task_name); info = (struct hax_qemu_version *)data; ret = hax_vm_set_qemuversion(cvm, info); @@ -500,12 +500,12 @@ int hax_vm_create_ui(struct hax_vm_mac *vm) pnode = devfs_make_node(makedev(hax_vm_major, vm->vm_id), DEVFS_CHAR, vm->owner, vm->gowner, 0600, devfs_pathname); if (NULL == pnode) { - hax_error("Failed to init the device %s\n", devfs_pathname); + hax_log(HAX_LOGE, "Failed to init the device %s\n", devfs_pathname); cdevsw_remove(hax_vm_major, &hax_vm_devsw); return -1; } - hax_info("%s: Created devfs node /dev/%s for VM #%d\n", __func__, - devfs_pathname, vm->vm_id); + hax_log(HAX_LOGI, "%s: Created devfs node /dev/%s for VM #%d\n", __func__, + devfs_pathname, vm->vm_id); vm->pnode = pnode; return 0; } @@ -548,7 +548,7 @@ static int hax_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, cvm = hax_create_vm(&vm_id); if (!cvm) { - hax_log_level(HAX_LOGE, "Failed to create the HAX VM\n"); + hax_log(HAX_LOGE, "Failed to create the HAX VM\n"); ret = -ENOMEM; break; } @@ -569,14 +569,14 @@ static int hax_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, static int hax_open(dev_t dev, int flags, __unused int devtype, __unused struct proc *p) { - hax_log_level(HAX_LOGI, "HAX module opened\n"); + hax_log(HAX_LOGI, "HAX module opened\n"); return 0; } static int hax_close(__unused dev_t dev, __unused int flags, __unused int devtype, __unused struct proc *p) { - hax_log_level(HAX_LOGI, "hax_close\n"); + hax_log(HAX_LOGI, "hax_close\n"); return (0); } @@ -590,11 +590,11 @@ static void *pnode = NULL; int com_intel_hax_init_ui(void) { - hax_info("%s: XNU version_major=%d\n", __func__, version_major); + hax_log(HAX_LOGI, "%s: XNU version_major=%d\n", __func__, version_major); hax_major = cdevsw_add(-1, &hax_devsw); if (hax_major < 0) { - hax_log_level(HAX_LOGE, "Failed to alloc major number\n"); + hax_log(HAX_LOGE, "Failed to alloc major number\n"); return -1; } @@ -602,14 +602,14 @@ int com_intel_hax_init_ui(void) "HAX", 0); if (NULL == pnode) { - hax_error("Failed to init the device\n"); + hax_log(HAX_LOGE, "Failed to init the device\n"); goto error; } if (hax_vm_major <= 0) { hax_vm_major = cdevsw_add(-1, &hax_vm_devsw); if (hax_vm_major < 0) { - hax_log_level(HAX_LOGE, "Failed to allocate VM major number\n"); + hax_log(HAX_LOGE, "Failed to allocate VM major number\n"); goto error; } } @@ -617,7 +617,7 @@ int com_intel_hax_init_ui(void) if (hax_vcpu_major <= 0) { hax_vcpu_major = cdevsw_add(-1, &hax_vcpu_devsw); if (hax_vcpu_major < 0) { - hax_log_level(HAX_LOGE, "Failed to allocate VCPU major number\n"); + hax_log(HAX_LOGE, "Failed to allocate VCPU major number\n"); goto error; } } @@ -645,7 +645,7 @@ int com_intel_hax_init_ui(void) int com_intel_hax_exit_ui(void) { - hax_log_level(HAX_LOGI, "Exit hax module\n"); + hax_log(HAX_LOGI, "Exit hax module\n"); if (hax_vcpu_major) { cdevsw_remove(hax_vcpu_major, &hax_vcpu_devsw); @@ -694,11 +694,11 @@ static void handle_unknown_ioctl(dev_t dev, ulong cmd, struct proc *p) dev_name = "VCPU"; } else { dev_name = "??"; - hax_error("%s: Unknown device major %d\n", __func__, dev_major); + hax_log(HAX_LOGE, "%s: Unknown device major %d\n", __func__, dev_major); } pid = proc_pid(p); proc_name(pid, task_name, sizeof(task_name)); - hax_warning("Unknown %s ioctl 0x%lx from pid=%d ('%s')\n", dev_name, cmd, - pid, task_name); + hax_log(HAX_LOGW, "Unknown %s ioctl 0x%lx from pid=%d ('%s')\n", dev_name, + cmd, pid, task_name); } diff --git a/platforms/darwin/hax_host_mem.cpp b/platforms/darwin/hax_host_mem.cpp index 41dec6e2..2e583765 100644 --- a/platforms/darwin/hax_host_mem.cpp +++ b/platforms/darwin/hax_host_mem.cpp @@ -44,7 +44,7 @@ extern "C" int hax_pin_user_pages(uint64_t start_uva, uint64_t size, IOReturn ret; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } @@ -54,16 +54,16 @@ extern "C" int hax_pin_user_pages(uint64_t start_uva, uint64_t size, md = IOMemoryDescriptor::withAddressRange(start_uva, size, options, current_task()); if (!md) { - hax_error("%s: Failed to create memory descriptor for UVA range:" - " start_uva=0x%llx, size=0x%llx\n", __func__, start_uva, - size); + hax_log(HAX_LOGE, "%s: Failed to create memory descriptor for UVA " + "range: start_uva=0x%llx, size=0x%llx\n", __func__, start_uva, + size); return -EFAULT; } ret = md->prepare(); if (ret != kIOReturnSuccess) { - hax_error("%s: prepare() failed: ret=%d, start_uva=0x%llx, size=0x%llx" - "\n", __func__, ret, start_uva, size); + hax_log(HAX_LOGE, "%s: prepare() failed: ret=%d, start_uva=0x%llx, " + "size=0x%llx\n", __func__, ret, start_uva, size); md->release(); return -ENOMEM; } @@ -76,17 +76,17 @@ extern "C" int hax_unpin_user_pages(hax_memdesc_user *memdesc) IOReturn ret; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } if (!memdesc->md) { - hax_error("%s: memdesc->md == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->md == NULL\n", __func__); return -EINVAL; } ret = memdesc->md->complete(); if (ret != kIOReturnSuccess) { - hax_warning("%s: complete() failed: ret=%d\n", __func__, ret); + hax_log(HAX_LOGW, "%s: complete() failed: ret=%d\n", __func__, ret); // Still need to release the memory descriptor } memdesc->md->release(); @@ -99,18 +99,18 @@ extern "C" uint64_t hax_get_pfn_user(hax_memdesc_user *memdesc, uint64_t uva_off addr64_t hpa; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return INVALID_PFN; } if (!memdesc->md) { - hax_error("%s: memdesc->md == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->md == NULL\n", __func__); return INVALID_PFN; } hpa = memdesc->md->getPhysicalSegment(uva_offset, NULL); if (!hpa) { - hax_error("%s: getPhysicalSegment() failed: uva_offset=0x%llx\n", - __func__, uva_offset); + hax_log(HAX_LOGE, "%s: getPhysicalSegment() failed: " + "uva_offset=0x%llx\n", __func__, uva_offset); return INVALID_PFN; } return hpa >> PG_ORDER_4K; @@ -125,15 +125,15 @@ extern "C" void * hax_map_user_pages(hax_memdesc_user *memdesc, IOMemoryMap *mm; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return NULL; } if (!memdesc->md) { - hax_error("%s: memdesc->md == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->md == NULL\n", __func__); return NULL; } if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return NULL; } @@ -144,8 +144,9 @@ extern "C" void * hax_map_user_pages(hax_memdesc_user *memdesc, uva_offset_high = (uva_offset + size + PAGE_SIZE_4K - 1) & pgmask(PG_ORDER_4K); if (uva_offset_high > base_size) { - hax_error("%s: Invalid UVA subrange: uva_offset=0x%llx, size=0x%llx," - " base_size=0x%llx\n", __func__, uva_offset, size, base_size); + hax_log(HAX_LOGE, "%s: Invalid UVA subrange: uva_offset=0x%llx, " + "size=0x%llx, base_size=0x%llx\n", __func__, uva_offset, size, + base_size); return NULL; } @@ -154,9 +155,9 @@ extern "C" void * hax_map_user_pages(hax_memdesc_user *memdesc, mm = memdesc->md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, uva_offset_low, size); if (!mm) { - hax_error("%s: Failed to create KVA mapping for UVA range:" - " uva_offset_low=0x%llx, size=0x%llx\n", __func__, - uva_offset_low, size); + hax_log(HAX_LOGE, "%s: Failed to create KVA mapping for UVA range:" + " uva_offset_low=0x%llx, size=0x%llx\n", __func__, + uva_offset_low, size); return NULL; } kmap->mm = mm; @@ -166,11 +167,11 @@ extern "C" void * hax_map_user_pages(hax_memdesc_user *memdesc, extern "C" int hax_unmap_user_pages(hax_kmap_user *kmap) { if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return -EINVAL; } if (!kmap->mm) { - hax_error("%s: kmap->mm == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap->mm == NULL\n", __func__); return -EINVAL; } @@ -186,10 +187,10 @@ extern "C" int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) // TODO: Support HAX_PAGE_ALLOC_BELOW_4G if (flags & HAX_PAGE_ALLOC_BELOW_4G) { - hax_warning("%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); + hax_log(HAX_LOGW, "%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); } if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } @@ -212,7 +213,8 @@ extern "C" int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) PAGE_SIZE_4K, PAGE_SIZE_4K); if (!bmd) { - hax_error("%s: Failed to allocate 4KB of wired memory\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate 4KB of wired memory\n", + __func__); return -ENOMEM; } memdesc->bmd = bmd; @@ -222,11 +224,11 @@ extern "C" int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) extern "C" int hax_free_page_frame(hax_memdesc_phys *memdesc) { if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } if (!memdesc->bmd) { - hax_error("%s: memdesc->bmd == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->bmd == NULL\n", __func__); return -EINVAL; } @@ -240,17 +242,17 @@ extern "C" uint64_t hax_get_pfn_phys(hax_memdesc_phys *memdesc) addr64_t hpa; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return INVALID_PFN; } if (!memdesc->bmd) { - hax_error("%s: memdesc->bmd == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->bmd == NULL\n", __func__); return INVALID_PFN; } hpa = memdesc->bmd->getPhysicalSegment(0, NULL); if (!hpa) { - hax_error("%s: getPhysicalSegment() failed\n", __func__); + hax_log(HAX_LOGE, "%s: getPhysicalSegment() failed\n", __func__); return INVALID_PFN; } return hpa >> PG_ORDER_4K; @@ -259,11 +261,11 @@ extern "C" uint64_t hax_get_pfn_phys(hax_memdesc_phys *memdesc) extern "C" void * hax_get_kva_phys(hax_memdesc_phys *memdesc) { if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return NULL; } if (!memdesc->bmd) { - hax_error("%s: memdesc->bmd == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->bmd == NULL\n", __func__); return NULL; } @@ -276,7 +278,7 @@ extern "C" void * hax_map_page_frame(uint64_t pfn, hax_kmap_phys *kmap) IOMemoryMap *mm; if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return NULL; } @@ -284,14 +286,14 @@ extern "C" void * hax_map_page_frame(uint64_t pfn, hax_kmap_phys *kmap) PAGE_SIZE_4K, kIODirectionOutIn); if (!md) { - hax_error("%s: Failed to create memory descriptor for pfn=0x%llx\n", - __func__, pfn); + hax_log(HAX_LOGE, "%s: Failed to create memory descriptor for " + "pfn=0x%llx\n", __func__, pfn); return NULL; } mm = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere); if (!mm) { - hax_error("%s: Failed to create KVA mapping for pfn=0x%llx\n", __func__, - pfn); + hax_log(HAX_LOGE, "%s: Failed to create KVA mapping for pfn=0x%llx\n", + __func__, pfn); md->release(); return NULL; } @@ -304,7 +306,7 @@ extern "C" int hax_unmap_page_frame(hax_kmap_phys *kmap) IOMemoryDescriptor *md; if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return -EINVAL; } if (!kmap->mm) { @@ -316,7 +318,7 @@ extern "C" int hax_unmap_page_frame(hax_kmap_phys *kmap) kmap->mm->release(); kmap->mm = NULL; if (!md) { - hax_warning("%s: getMemoryDescriptor() failed\n", __func__); + hax_log(HAX_LOGW, "%s: getMemoryDescriptor() failed\n", __func__); return -EINVAL; } md->release(); diff --git a/platforms/darwin/hax_mem_alloc.cpp b/platforms/darwin/hax_mem_alloc.cpp index 17fbca9a..25580190 100644 --- a/platforms/darwin/hax_mem_alloc.cpp +++ b/platforms/darwin/hax_mem_alloc.cpp @@ -39,11 +39,11 @@ flags |= HAX_MEM_NONPAGE; \ if ((flags & (HAX_MEM_NONPAGE | HAX_MEM_PAGABLE)) == \ (HAX_MEM_NONPAGE | HAX_MEM_PAGABLE)) { \ - hax_log_level(HAX_LOGW, "Confilic flags for pageable\n"); \ + hax_log(HAX_LOGW, "Confilic flags for pageable\n"); \ return HAX_ALLOC_CHECK_FAIL; \ } \ if (flags & HAX_MEM_NONBLOCK) { \ - hax_log_level(HAX_LOGE, "No nonblock allocation in mac now\n"); \ + hax_log(HAX_LOGE, "No nonblock allocation in mac now\n"); \ return HAX_ALLOC_CHECK_FAIL; \ } @@ -313,7 +313,7 @@ extern "C" int hax_malloc_init(void) hax_init_list_head(&_vmap_list); vmap_lock = hax_spinlock_alloc_init(); if (!vmap_lock) { - hax_error("%s: Failed to allocate VMAP lock\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate VMAP lock\n", __func__); return -ENOMEM; } diff --git a/platforms/darwin/hax_wrapper.cpp b/platforms/darwin/hax_wrapper.cpp index c28a7509..4a95ae31 100644 --- a/platforms/darwin/hax_wrapper.cpp +++ b/platforms/darwin/hax_wrapper.cpp @@ -41,8 +41,6 @@ extern "C" int vcpu_event_pending(struct vcpu_t *vcpu); -int default_hax_log_level = HAX_LOG_DEFAULT; - /* * From the following list, we have to do tricky things to achieve this simple * action. @@ -51,16 +49,33 @@ int default_hax_log_level = HAX_LOG_DEFAULT; * currently */ -extern "C" int hax_log_level(int level, const char *fmt, ...) +static const char* kLogPrefix[] = { + "haxm: ", + "haxm_debug: ", + "haxm_info: ", + "haxm_warning: ", + "haxm_error: ", + "haxm_panic: " +}; + +extern "C" void hax_log(int level, const char *fmt, ...) { va_list args; va_start(args, fmt); - if (level >= default_hax_log_level) { - printf("haxm: "); + if (level >= HAX_LOG_DEFAULT) { + printf("%s", kLogPrefix[level]); printf(fmt, args); } va_end(args); - return 0; +} + +extern "C" void hax_panic(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + hax_log(HAX_LOGPANIC, fmt, args); + panic(fmt, args); + va_end(args); } struct smp_call_parameter { diff --git a/platforms/linux/components.c b/platforms/linux/components.c index c6bf7ba8..9a0ef1bd 100644 --- a/platforms/linux/components.c +++ b/platforms/linux/components.c @@ -99,7 +99,7 @@ static void hax_component_perm(const char *devname, struct miscdevice *misc) snprintf(devpath, sizeof(devpath), "/dev/%s", devname); err = kern_path(devpath, LOOKUP_FOLLOW, &path); if (err || !path.dentry) { - hax_error("Could not obtain device inode\n"); + hax_log(HAX_LOGE, "Could not obtain device inode\n"); return; } cred = get_current_cred(); @@ -163,12 +163,13 @@ int hax_vcpu_create_host(struct vcpu_t *cvcpu, void *vm_host, int vm_id, err = misc_register(&vcpu->dev); if (err) { - hax_error("Failed to register HAXM-VCPU device\n"); + hax_log(HAX_LOGE, "Failed to register HAXM-VCPU device\n"); hax_vcpu_destroy_linux(vcpu); return -1; } hax_component_perm(vcpu->devname, &vcpu->dev); - hax_info("Created HAXM-VCPU device with minor=%d\n", vcpu->dev.minor); + hax_log(HAX_LOGI, "Created HAXM-VCPU device with minor=%d\n", + vcpu->dev.minor); return 0; } @@ -233,12 +234,12 @@ int hax_vm_create_host(struct vm_t *cvm, int vm_id) err = misc_register(&vm->dev); if (err) { - hax_error("Failed to register HAXM-VM device\n"); + hax_log(HAX_LOGE, "Failed to register HAXM-VM device\n"); hax_vm_destroy_linux(vm); return -1; } hax_component_perm(vm->devname, &vm->dev); - hax_info("Created HAXM-VM device with minor=%d\n", vm->dev.minor); + hax_log(HAX_LOGI, "Created HAXM-VM device with minor=%d\n", vm->dev.minor); return 0; } @@ -274,13 +275,13 @@ static int hax_vcpu_open(struct inode *inodep, struct file *filep) vcpu = container_of(miscdev, struct hax_vcpu_linux_t, dev); cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); - hax_log_level(HAX_LOGD, "HAX vcpu open called\n"); + hax_log(HAX_LOGD, "HAX vcpu open called\n"); if (!cvcpu) return -ENODEV; ret = hax_vcpu_core_open(cvcpu); if (ret) - hax_error("Failed to open core vcpu\n"); + hax_log(HAX_LOGE, "Failed to open core vcpu\n"); hax_put_vcpu(cvcpu); return ret; } @@ -296,9 +297,9 @@ static int hax_vcpu_release(struct inode *inodep, struct file *filep) vcpu = container_of(miscdev, struct hax_vcpu_linux_t, dev); cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); - hax_log_level(HAX_LOGD, "HAX vcpu close called\n"); + hax_log(HAX_LOGD, "HAX vcpu close called\n"); if (!cvcpu) { - hax_error("Failed to find the vcpu, is it closed already?\n"); + hax_log(HAX_LOGE, "Failed to find the vcpu, is it closed already?\n"); return 0; } @@ -349,7 +350,7 @@ static long hax_vcpu_ioctl(struct file *filp, unsigned int cmd, msr = msrs.entries; /* nr_msr needs to be verified */ if (msrs.nr_msr >= 0x20) { - hax_error("MSRS invalid!\n"); + hax_log(HAX_LOGE, "MSRS invalid!\n"); ret = -EFAULT; break; } @@ -373,7 +374,7 @@ static long hax_vcpu_ioctl(struct file *filp, unsigned int cmd, } msr = msrs.entries; if(msrs.nr_msr >= 0x20) { - hax_error("MSRS invalid!\n"); + hax_log(HAX_LOGE, "MSRS invalid!\n"); ret = -EFAULT; break; } @@ -446,7 +447,7 @@ static long hax_vcpu_ioctl(struct file *filp, unsigned int cmd, } default: // TODO: Print information about the process that sent the ioctl. - hax_error("Unknown VCPU IOCTL 0x%lx\n", cmd); + hax_log(HAX_LOGE, "Unknown VCPU IOCTL 0x%lx\n", cmd); ret = -ENOSYS; break; } @@ -471,7 +472,7 @@ static int hax_vm_open(struct inode *inodep, struct file *filep) ret = hax_vm_core_open(cvm); hax_put_vm(cvm); - hax_log_level(HAX_LOGI, "Open VM\n"); + hax_log(HAX_LOGI, "Open VM\n"); return ret; } @@ -485,7 +486,7 @@ static int hax_vm_release(struct inode *inodep, struct file *filep) vm = container_of(miscdev, struct hax_vm_linux_t, dev); cvm = hax_get_vm(vm->id, 1); - hax_log_level(HAX_LOGI, "Close VM\n"); + hax_log(HAX_LOGI, "Close VM\n"); if (cvm) { /* put the ref get just now */ hax_put_vm(cvm); @@ -522,7 +523,8 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, } cvcpu = vcpu_create(cvm, vm, vcpu_id); if (!cvcpu) { - hax_error("Failed to create vcpu %x on vm %x\n", vcpu_id, vm_id); + hax_log(HAX_LOGE, "Failed to create vcpu %x on vm %x\n", + vcpu_id, vm_id); ret = -EINVAL; break; } @@ -534,8 +536,8 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, ret = -EFAULT; break; } - hax_info("IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x, pad=0x%x\n", - vm->id, info.va, info.size, info.pad); + hax_log(HAX_LOGI, "IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x, " + "pad=0x%x\n", vm->id, info.va, info.size, info.pad); ret = hax_vm_add_ramblock(cvm, info.va, info.size); break; } @@ -546,13 +548,13 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, break; } if (info.reserved) { - hax_error("IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", - vm->id, info.reserved); + hax_log(HAX_LOGE, "IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", + vm->id, info.reserved); ret = -EINVAL; break; } - hax_info("IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx, size=0x%llx\n", - vm->id, info.start_va, info.size); + hax_log(HAX_LOGI, "IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx, " + "size=0x%llx\n", vm->id, info.start_va, info.size); ret = hax_vm_add_ramblock(cvm, info.start_va, info.size); break; } @@ -573,8 +575,9 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, break; } if (info.reserved1 || info.reserved2) { - hax_error("IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x reserved2=0x%llx\n", - vm->id, info.reserved1, info.reserved2); + hax_log(HAX_LOGE, "IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x " + "reserved2=0x%llx\n", vm->id, info.reserved1, + info.reserved2); ret = -EINVAL; break; } @@ -588,8 +591,8 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, break; } if (info.reserved) { - hax_error("IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", - vm->id, info.reserved); + hax_log(HAX_LOGE, "IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", + vm->id, info.reserved); ret = -EINVAL; break; } @@ -609,7 +612,7 @@ static long hax_vm_ioctl(struct file *filp, unsigned int cmd, } default: // TODO: Print information about the process that sent the ioctl. - hax_error("Unknown VM IOCTL 0x%lx\n", cmd); + hax_log(HAX_LOGE, "Unknown VM IOCTL 0x%lx\n", cmd); break; } hax_put_vm(cvm); diff --git a/platforms/linux/hax_entry.c b/platforms/linux/hax_entry.c index 893e64f4..a2c96c0a 100644 --- a/platforms/linux/hax_entry.c +++ b/platforms/linux/hax_entry.c @@ -97,7 +97,7 @@ static long hax_dev_ioctl(struct file *filp, unsigned int cmd, cvm = hax_create_vm(&vm_id); if (!cvm) { - hax_log_level(HAX_LOGE, "Failed to create the HAX VM\n"); + hax_log(HAX_LOGE, "Failed to create the HAX VM\n"); ret = -ENOMEM; break; } @@ -125,29 +125,29 @@ static int __init hax_driver_init(void) } if (hax_module_init() < 0) { - hax_error("Failed to initialize HAXM module\n"); + hax_log(HAX_LOGE, "Failed to initialize HAXM module\n"); return -EAGAIN; } err = misc_register(&hax_dev); if (err) { - hax_error("Failed to register HAXM device\n"); + hax_log(HAX_LOGE, "Failed to register HAXM device\n"); hax_module_exit(); return err; } - hax_info("Created HAXM device with minor=%d\n", hax_dev.minor); + hax_log(HAX_LOGI, "Created HAXM device with minor=%d\n", hax_dev.minor); return 0; } static void __exit hax_driver_exit(void) { if (hax_module_exit() < 0) { - hax_error("Failed to finalize HAXM module\n"); + hax_log(HAX_LOGE, "Failed to finalize HAXM module\n"); } misc_deregister(&hax_dev); - hax_info("Removed HAXM device\n"); + hax_log(HAX_LOGI, "Removed HAXM device\n"); } module_init(hax_driver_init); diff --git a/platforms/linux/hax_host_mem.c b/platforms/linux/hax_host_mem.c index 71e689b8..28ed8253 100644 --- a/platforms/linux/hax_host_mem.c +++ b/platforms/linux/hax_host_mem.c @@ -138,7 +138,7 @@ int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) // TODO: Support HAX_PAGE_ALLOC_BELOW_4G if (flags & HAX_PAGE_ALLOC_BELOW_4G) { - hax_warning("%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); + hax_log(HAX_LOGW, "%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); } memdesc->ppage = alloc_page(gfp_flags); diff --git a/platforms/linux/hax_mem_alloc.c b/platforms/linux/hax_mem_alloc.c index 144fa9da..f9e41076 100644 --- a/platforms/linux/hax_mem_alloc.c +++ b/platforms/linux/hax_mem_alloc.c @@ -94,7 +94,7 @@ struct hax_page * hax_alloc_pages(int order, uint32_t flags, bool vmap) gfp_mask = GFP_KERNEL; // TODO: Support HAX_MEM_LOW_4G if (flags & HAX_MEM_LOW_4G) { - hax_warning("%s: HAX_MEM_LOW_4G is ignored\n", __func__); + hax_log(HAX_LOGW, "%s: HAX_MEM_LOW_4G is ignored\n", __func__); } page = alloc_pages(GFP_KERNEL, order); diff --git a/platforms/linux/hax_wrapper.c b/platforms/linux/hax_wrapper.c index d47874e4..1e79d35d 100644 --- a/platforms/linux/hax_wrapper.c +++ b/platforms/linux/hax_wrapper.c @@ -40,24 +40,48 @@ #include -int default_hax_log_level = 3; int max_cpus; hax_cpumap_t cpu_online_map; -int hax_log_level(int level, const char *fmt, ...) +static const char* kLogLevel[] = { + KERN_ERR, + KERN_DEBUG, // HAX_LOGD + KERN_INFO, // HAX_LOGI + KERN_WARNING, // HAX_LOGW + KERN_ERR, // HAX_LOGE + KERN_ERR // HAX_LOGPANIC +}; + +static const char* kLogPrefix[] = { + "haxm: ", + "haxm_debug: ", + "haxm_info: ", + "haxm_warning: ", + "haxm_error: ", + "haxm_panic: " +}; + +void hax_log(int level, const char *fmt, ...) { struct va_format vaf; va_list args; - if (level < default_hax_log_level) - return 0; + if (level < HAX_LOG_DEFAULT) + return; vaf.fmt = fmt; vaf.va = &args; va_start(args, fmt); - printk("%shaxm: %pV", KERN_ERR, &vaf); + printk("%s%s%pV", kLogLevel[level], kLogPrefix[level], &vaf); + va_end(args); +} + +void hax_panic(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + hax_log(HAX_LOGPANIC, fmt, args); va_end(args); - return 0; } uint32_t hax_cpuid(void) @@ -121,79 +145,6 @@ void hax_disable_irq(void) asm_disable_irq(); } -void hax_error(char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - if (HAX_LOGE < default_hax_log_level) - return; - - vaf.fmt = fmt; - vaf.va = &args; - va_start(args, fmt); - printk("%shaxm_error: %pV", KERN_ERR, &vaf); - va_end(args); -} - -void hax_warning(char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - if (HAX_LOGW < default_hax_log_level) - return; - - vaf.fmt = fmt; - vaf.va = &args; - va_start(args, fmt); - printk("%shaxm_warning: %pV", KERN_WARNING, &vaf); - va_end(args); -} - -void hax_info(char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - if (HAX_LOGI < default_hax_log_level) - return; - - vaf.fmt = fmt; - vaf.va = &args; - va_start(args, fmt); - printk("%shaxm_info: %pV", KERN_INFO, &vaf); - va_end(args); -} - -void hax_debug(char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - if (HAX_LOGD < default_hax_log_level) - return; - - vaf.fmt = fmt; - vaf.va = &args; - va_start(args, fmt); - printk("%shaxm_debug: %pV", KERN_DEBUG, &vaf); - va_end(args); -} - -void hax_panic_vcpu(struct vcpu_t *v, char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - vaf.fmt = fmt; - vaf.va = &args; - va_start(args, fmt); - printk("%shaxm_panic: %pV", KERN_ERR, &vaf); - va_end(args); - vcpu_set_panic(v); -} - void hax_assert(bool condition) { if (!condition) @@ -272,7 +223,7 @@ hax_spinlock *hax_spinlock_alloc_init(void) lock = kmalloc(sizeof(struct hax_spinlock), GFP_KERNEL); if (!lock) { - hax_error("Could not allocate spinlock\n"); + hax_log(HAX_LOGE, "Could not allocate spinlock\n"); return NULL; } spin_lock_init(&lock->lock); @@ -304,7 +255,7 @@ hax_mutex hax_mutex_alloc_init(void) lock = kmalloc(sizeof(struct mutex), GFP_KERNEL); if (!lock) { - hax_error("Could not allocate mutex\n"); + hax_log(HAX_LOGE, "Could not allocate mutex\n"); return NULL; } mutex_init(lock); diff --git a/platforms/netbsd/components.c b/platforms/netbsd/components.c index 90122909..99316f0a 100644 --- a/platforms/netbsd/components.c +++ b/platforms/netbsd/components.c @@ -85,7 +85,8 @@ int hax_vcpu_create_host(struct vcpu_t *cvcpu, void *vm_host, int vm_id, minor = vmvcpu2unit(vm_id, vcpu_id); sc = device_lookup_private(&hax_vcpu_cd, minor); if (!sc) { - hax_error("device lookup for hax_vcpu failed (minor %u)\n", minor); + hax_log(HAX_LOGE, "device lookup for hax_vcpu failed (minor %u)\n", + minor); return -1; } @@ -97,7 +98,8 @@ int hax_vcpu_create_host(struct vcpu_t *cvcpu, void *vm_host, int vm_id, vcpu->id = vcpu_id; sc->vcpu = vcpu; - hax_info("Created HAXM-VCPU device 'hax_vm%02d/vcpu%02d'\n", vm_id, vcpu_id); + hax_log(HAX_LOGI, "Created HAXM-VCPU device 'hax_vm%02d/vcpu%02d'\n", + vm_id, vcpu_id); return 0; } @@ -112,7 +114,8 @@ int hax_vcpu_destroy_host(struct vcpu_t *cvcpu, void *vcpu_host) minor = vmvcpu2unit(vcpu->vm->id, vcpu->id); sc = device_lookup_private(&hax_vcpu_cd, minor); if (!sc) { - hax_error("device lookup for hax_vcpu failed (minor %u)\n", minor); + hax_log(HAX_LOGE, "device lookup for hax_vcpu failed (minor %u)\n", + minor); return -1; } @@ -146,7 +149,8 @@ static void hax_vm_destroy_netbsd(hax_vm_netbsd_t *vm) minor = vm->id; sc = device_lookup_private(&hax_vm_cd, minor); if (!sc) { - hax_error("device lookup for hax_vm failed (minor %u)\n", minor); + hax_log(HAX_LOGE, "device lookup for hax_vm failed (minor %u)\n", + minor); return; } @@ -172,7 +176,8 @@ int hax_vm_create_host(struct vm_t *cvm, int vm_id) minor = vm_id; sc = device_lookup_private(&hax_vm_cd, minor); if (!sc) { - hax_error("device lookup for hax_vm failed (minor %u)\n", minor); + hax_log(HAX_LOGE, "device lookup for hax_vm failed (minor %u)\n", + minor); return -1; } @@ -182,7 +187,7 @@ int hax_vm_create_host(struct vm_t *cvm, int vm_id) sc->vm = vm; - hax_info("Created HAXM-VM device 'hax_vm/vm%02d'\n", vm_id); + hax_log(HAX_LOGI, "Created HAXM-VM device 'hax_vm/vm%02d'\n", vm_id); return 0; } diff --git a/platforms/netbsd/hax_entry.c b/platforms/netbsd/hax_entry.c index 39fc0984..b4a72afd 100644 --- a/platforms/netbsd/hax_entry.c +++ b/platforms/netbsd/hax_entry.c @@ -80,7 +80,7 @@ hax_vm_attach(device_t parent, device_t self, void *aux) sc = device_private(self); if (sc == NULL) { - hax_error("device_private() for hax_vm failed\n"); + hax_log(HAX_LOGE, "device_private() for hax_vm failed\n"); return; } @@ -102,7 +102,7 @@ hax_vm_detach(device_t self, int flags) sc = device_private(self); if (sc == NULL) { - hax_error("device_private() for hax_vm failed\n"); + hax_log(HAX_LOGE, "device_private() for hax_vm failed\n"); return -ENODEV; } pmf_device_deregister(self); @@ -124,7 +124,7 @@ hax_vcpu_attach(device_t parent, device_t self, void *aux) sc = device_private(self); if (sc == NULL) { - hax_error("device_private() for hax_vcpu failed\n"); + hax_log(HAX_LOGE, "device_private() for hax_vcpu failed\n"); return; } @@ -149,7 +149,7 @@ hax_vcpu_detach(device_t self, int flags) sc = device_private(self); if (sc == NULL) { - hax_error("device_private() for hax_vcpu failed\n"); + hax_log(HAX_LOGE, "device_private() for hax_vcpu failed\n"); return -ENODEV; } pmf_device_deregister(self); @@ -238,38 +238,38 @@ haxm_modcmd(modcmd_t cmd, void *arg __unused) // Register hax_vm err = config_cfdriver_attach(&hax_vm_cd); if (err) { - hax_error("Unable to register cfdriver hax_vm\n"); + hax_log(HAX_LOGE, "Unable to register cfdriver hax_vm\n"); goto init_err1; } err = config_cfattach_attach(hax_vm_cd.cd_name, &hax_vm_ca); if (err) { - hax_error("Unable to register cfattch hax_vm\n"); + hax_log(HAX_LOGE, "Unable to register cfattch hax_vm\n"); goto init_err2; } err = config_cfdata_attach(hax_vm_cfdata, 1); if (err) { - hax_error("Unable to register cfdata hax_vm\n"); + hax_log(HAX_LOGE, "Unable to register cfdata hax_vm\n"); goto init_err3; } // Register hax_vcpu err = config_cfdriver_attach(&hax_vcpu_cd); if (err) { - hax_error("Unable to register cfdriver hax_vcpu\n"); + hax_log(HAX_LOGE, "Unable to register cfdriver hax_vcpu\n"); goto init_err4; } err = config_cfattach_attach(hax_vcpu_cd.cd_name, &hax_vcpu_ca); if (err) { - hax_error("Unable to register cfattch hax_vcpu\n"); + hax_log(HAX_LOGE, "Unable to register cfattch hax_vcpu\n"); goto init_err5; } err = config_cfdata_attach(hax_vcpu_cfdata, 1); if (err) { - hax_error("Unable to register cfdata hax_vcpu\n"); + hax_log(HAX_LOGE, "Unable to register cfdata hax_vcpu\n"); goto init_err6; } @@ -277,19 +277,19 @@ haxm_modcmd(modcmd_t cmd, void *arg __unused) err = devsw_attach(HAX_DEVICE_NAME, NULL, &hax_bmajor, &hax_cdevsw, &hax_cmajor); if (err) { - hax_error("Failed to register HAXM device\n"); + hax_log(HAX_LOGE, "Failed to register HAXM device\n"); goto init_err7; } err = devsw_attach(HAX_VM_DEVICE_NAME, NULL, &hax_vm_bmajor, &hax_vm_cdevsw, &hax_vm_cmajor); if (err) { - hax_error("Failed to register HAXM VM device\n"); + hax_log(HAX_LOGE, "Failed to register HAXM VM device\n"); goto init_err8; } err = devsw_attach(HAX_VCPU_DEVICE_NAME, NULL, &hax_vcpu_bmajor, &hax_vcpu_cdevsw, &hax_vcpu_cmajor); if (err) { - hax_error("Failed to register HAXM VCPU device\n"); + hax_log(HAX_LOGE, "Failed to register HAXM VCPU device\n"); goto init_err9; } @@ -301,11 +301,11 @@ haxm_modcmd(modcmd_t cmd, void *arg __unused) // Initialize HAXM if (hax_module_init() < 0) { - hax_error("Failed to initialize HAXM module\n"); + hax_log(HAX_LOGE, "Failed to initialize HAXM module\n"); goto init_err10; } - hax_info("Created HAXM device\n"); + hax_log(HAX_LOGI, "Created HAXM device\n"); return 0; init_err10: @@ -331,7 +331,7 @@ haxm_modcmd(modcmd_t cmd, void *arg __unused) } case MODULE_CMD_FINI: { if (hax_module_exit() < 0) { - hax_error("Failed to finalize HAXM module\n"); + hax_log(HAX_LOGE, "Failed to finalize HAXM module\n"); return EBUSY; } @@ -347,7 +347,7 @@ haxm_modcmd(modcmd_t cmd, void *arg __unused) config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca); config_cfdriver_detach(&hax_vm_cd); - hax_info("Removed HAXM device\n"); + hax_log(HAX_LOGI, "Removed HAXM device\n"); return 0; } default: diff --git a/platforms/netbsd/hax_entry_hax.c b/platforms/netbsd/hax_entry_hax.c index 616bede6..0367116d 100644 --- a/platforms/netbsd/hax_entry_hax.c +++ b/platforms/netbsd/hax_entry_hax.c @@ -66,14 +66,14 @@ struct cdevsw hax_cdevsw = { int hax_open(dev_t dev __unused, int flags __unused, int mode __unused, struct lwp *l __unused) { - hax_log_level(HAX_LOGI, "HAX module opened\n"); + hax_log(HAX_LOGI, "HAX module opened\n"); return 0; } int hax_close(dev_t self __unused, int flag __unused, int mode __unused, struct lwp *l __unused) { - hax_log_level(HAX_LOGI, "hax_close\n"); + hax_log(HAX_LOGI, "hax_close\n"); return 0; } @@ -108,7 +108,7 @@ int hax_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, cvm = hax_create_vm(&vm_id); if (!cvm) { - hax_log_level(HAX_LOGE, "Failed to create the HAX VM\n"); + hax_log(HAX_LOGE, "Failed to create the HAX VM\n"); ret = -ENOMEM; break; } @@ -117,8 +117,8 @@ int hax_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, break; } default: - hax_error("Unknown ioctl %#lx, pid=%d ('%s')\n", cmd, - l->l_proc->p_pid, l->l_proc->p_comm); + hax_log(HAX_LOGE, "Unknown ioctl %#lx, pid=%d ('%s')\n", cmd, + l->l_proc->p_pid, l->l_proc->p_comm); ret = -ENOSYS; break; } diff --git a/platforms/netbsd/hax_entry_vcpu.c b/platforms/netbsd/hax_entry_vcpu.c index e4079a1a..02094072 100644 --- a/platforms/netbsd/hax_entry_vcpu.c +++ b/platforms/netbsd/hax_entry_vcpu.c @@ -71,7 +71,7 @@ int hax_vcpu_open(dev_t self, int flag __unused, int mode __unused, sc = device_lookup_private(&hax_vcpu_cd, minor(self)); if (sc == NULL) { - hax_error("device_lookup_private() for hax_vcpu failed\n"); + hax_log(HAX_LOGE, "device_lookup_private() for hax_vcpu failed\n"); return -ENODEV; } @@ -82,7 +82,8 @@ int hax_vcpu_open(dev_t self, int flag __unused, int mode __unused, vcpu_id = unit2vcpuid(unit); if (!vcpu) { - hax_error("HAX VCPU 'hax_vm%02d/vcpu%02d' is not ready\n", vm_id, vcpu_id); + hax_log(HAX_LOGE, "HAX VCPU 'hax_vm%02d/vcpu%02d' is not ready\n", + vm_id, vcpu_id); return -ENODEV; } @@ -91,13 +92,14 @@ int hax_vcpu_open(dev_t self, int flag __unused, int mode __unused, cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); - hax_log_level(HAX_LOGD, "HAX VM%02d vcpu%02d open called\n", vcpu->vm->id, vcpu->id); + hax_log(HAX_LOGD, "HAX VM%02d vcpu%02d open called\n", vcpu->vm->id, + vcpu->id); if (!cvcpu) return -ENODEV; ret = hax_vcpu_core_open(cvcpu); if (ret) - hax_error("Failed to open core vcpu\n"); + hax_log(HAX_LOGE, "Failed to open core vcpu\n"); hax_put_vcpu(cvcpu); return ret; } @@ -111,15 +113,16 @@ int hax_vcpu_close(dev_t self, int flag __unused, int mode __unused, sc = device_lookup_private(&hax_vcpu_cd, minor(self)); if (sc == NULL) { - hax_error("device_lookup_private() for hax_vcpu failed\n"); + hax_log(HAX_LOGE, "device_lookup_private() for hax_vcpu failed\n"); return -ENODEV; } vcpu = sc->vcpu; cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); - hax_log_level(HAX_LOGD, "HAX VM%02d vcpu%02d close called\n", vcpu->vm->id, vcpu->id); + hax_log(HAX_LOGD, "HAX VM%02d vcpu%02d close called\n", vcpu->vm->id, + vcpu->id); if (!cvcpu) { - hax_error("Failed to find the vcpu, is it closed already?\n"); + hax_log(HAX_LOGE, "Failed to find the vcpu, is it closed already?\n"); return 0; } @@ -141,7 +144,7 @@ int hax_vcpu_ioctl(dev_t self, u_long cmd, void *data, int flag, sc = device_lookup_private(&hax_vcpu_cd, minor(self)); if (sc == NULL) { - hax_error("device_lookup_private() for hax_vcpu failed\n"); + hax_log(HAX_LOGE, "device_lookup_private() for hax_vcpu failed\n"); return -ENODEV; } vcpu = sc->vcpu; @@ -169,7 +172,7 @@ int hax_vcpu_ioctl(dev_t self, u_long cmd, void *data, int flag, msr = msrs->entries; /* nr_msr needs to be verified */ if (msrs->nr_msr >= 0x20) { - hax_error("MSRS invalid!\n"); + hax_log(HAX_LOGE, "MSRS invalid!\n"); ret = -EFAULT; break; } @@ -190,7 +193,7 @@ int hax_vcpu_ioctl(dev_t self, u_long cmd, void *data, int flag, msr = msrs->entries; if(msrs->nr_msr >= 0x20) { - hax_error("MSRS invalid!\n"); + hax_log(HAX_LOGE, "MSRS invalid!\n"); ret = -EFAULT; break; } @@ -241,8 +244,8 @@ int hax_vcpu_ioctl(dev_t self, u_long cmd, void *data, int flag, } default: // TODO: Print information about the process that sent the ioctl. - hax_error("Unknown VCPU IOCTL %#lx, pid=%d ('%s')\n", cmd, - l->l_proc->p_pid, l->l_proc->p_comm); + hax_log(HAX_LOGE, "Unknown VCPU IOCTL %#lx, pid=%d ('%s')\n", cmd, + l->l_proc->p_pid, l->l_proc->p_comm); ret = -ENOSYS; break; } diff --git a/platforms/netbsd/hax_entry_vm.c b/platforms/netbsd/hax_entry_vm.c index 334d564d..3e112c94 100644 --- a/platforms/netbsd/hax_entry_vm.c +++ b/platforms/netbsd/hax_entry_vm.c @@ -69,7 +69,7 @@ int hax_vm_open(dev_t self, int flag __unused, int mode __unused, sc = device_lookup_private(&hax_vm_cd, minor(self)); if (sc == NULL) { - hax_error("device_lookup_private() for hax_vm failed\n"); + hax_log(HAX_LOGE, "device_lookup_private() for hax_vm failed\n"); return -ENODEV; } @@ -78,7 +78,7 @@ int hax_vm_open(dev_t self, int flag __unused, int mode __unused, unit = device_unit(sc->sc_dev); if (!vm) { - hax_error("HAX VM 'hax_vm/vm%02d' is not ready\n", unit); + hax_log(HAX_LOGE, "HAX VM 'hax_vm/vm%02d' is not ready\n", unit); return -ENODEV; } @@ -90,7 +90,7 @@ int hax_vm_open(dev_t self, int flag __unused, int mode __unused, ret = hax_vm_core_open(cvm); hax_put_vm(cvm); - hax_log_level(HAX_LOGI, "Open VM%02d\n", vm->id); + hax_log(HAX_LOGI, "Open VM%02d\n", vm->id); return ret; } @@ -103,14 +103,14 @@ int hax_vm_close(dev_t self __unused, int flag __unused, int mode __unused, sc = device_lookup_private(&hax_vm_cd, minor(self)); if (sc == NULL) { - hax_error("device_lookup_private() for hax_vm failed\n"); + hax_log(HAX_LOGE, "device_lookup_private() for hax_vm failed\n"); return -ENODEV; } vm = sc->vm; cvm = hax_get_vm(vm->id, 1); - hax_log_level(HAX_LOGI, "Close VM%02d\n", vm->id); + hax_log(HAX_LOGI, "Close VM%02d\n", vm->id); if (cvm) { /* put the ref get just now */ hax_put_vm(cvm); @@ -129,7 +129,7 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, sc = device_lookup_private(&hax_vm_cd, minor(self)); if (sc == NULL) { - hax_error("device_lookup_private() for hax_vm failed\n"); + hax_log(HAX_LOGE, "device_lookup_private() for hax_vm failed\n"); return -ENODEV; } vm = sc->vm; @@ -147,7 +147,8 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, vm_id = vm->id; cvcpu = vcpu_create(cvm, vm, *vcpu_id); if (!cvcpu) { - hax_error("Failed to create vcpu %x on vm %x\n", *vcpu_id, vm_id); + hax_log(HAX_LOGE, "Failed to create vcpu %x on vm %x\n", + *vcpu_id, vm_id); ret = -EINVAL; break; } @@ -156,8 +157,8 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, case HAX_VM_IOCTL_ALLOC_RAM: { struct hax_alloc_ram_info *info; info = (struct hax_alloc_ram_info *)data; - hax_info("IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x, pad=0x%x\n", - vm->id, info->va, info->size, info->pad); + hax_log(HAX_LOGI, "IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x, " + "pad=0x%x\n", vm->id, info->va, info->size, info->pad); ret = hax_vm_add_ramblock(cvm, info->va, info->size); break; } @@ -165,13 +166,13 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, struct hax_ramblock_info *info; info = (struct hax_ramblock_info *)data; if (info->reserved) { - hax_error("IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", - vm->id, info->reserved); + hax_log(HAX_LOGE, "IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", + vm->id, info->reserved); ret = -EINVAL; break; } - hax_info("IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx, size=0x%llx\n", - vm->id, info->start_va, info->size); + hax_log(HAX_LOGI, "IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx, " + "size=0x%llx\n", vm->id, info->start_va, info->size); ret = hax_vm_add_ramblock(cvm, info->start_va, info->size); break; } @@ -186,8 +187,9 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, struct hax_set_ram_info2 *info; info = (struct hax_set_ram_info2 *)data; if (info->reserved1 || info->reserved2) { - hax_error("IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x reserved2=0x%llx\n", - vm->id, info->reserved1, info->reserved2); + hax_log(HAX_LOGE, "IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x " + "reserved2=0x%llx\n", vm->id, info->reserved1, + info->reserved2); ret = -EINVAL; break; } @@ -198,8 +200,8 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, struct hax_protect_ram_info *info; info = (struct hax_protect_ram_info *)data; if (info->reserved) { - hax_error("IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", - vm->id, info->reserved); + hax_log(HAX_LOGE, "IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", + vm->id, info->reserved); ret = -EINVAL; break; } @@ -216,8 +218,8 @@ int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag, } default: // TODO: Print information about the process that sent the ioctl. - hax_error("Unknown VM IOCTL %#lx, pid=%d ('%s')\n", cmd, - l->l_proc->p_pid, l->l_proc->p_comm); + hax_log(HAX_LOGE, "Unknown VM IOCTL %#lx, pid=%d ('%s')\n", cmd, + l->l_proc->p_pid, l->l_proc->p_comm); break; } hax_put_vm(cvm); diff --git a/platforms/netbsd/hax_host_mem.c b/platforms/netbsd/hax_host_mem.c index d408f845..fae8060a 100644 --- a/platforms/netbsd/hax_host_mem.c +++ b/platforms/netbsd/hax_host_mem.c @@ -42,11 +42,12 @@ int hax_pin_user_pages(uint64_t start_uva, uint64_t size, hax_memdesc_user *memdesc) { if (start_uva & PAGE_MASK) { - hax_error("Failed 'start_uva & ~PAGE_MASK', start_uva=%llx\n", start_uva); + hax_log(HAX_LOGE, "Failed 'start_uva & ~PAGE_MASK', start_uva=%llx\n", + start_uva); return -EINVAL; } if (!size) { - hax_error("Failed '!size'\n"); + hax_log(HAX_LOGE, "Failed '!size'\n"); return -EINVAL; } @@ -177,7 +178,7 @@ int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) // TODO: Support HAX_PAGE_ALLOC_BELOW_4G if (flags & HAX_PAGE_ALLOC_BELOW_4G) { - hax_warning("%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); + hax_log(HAX_LOGW, "%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); } memdesc->page = uvm_pagealloc(NULL, 0, NULL, ISSET(flags, HAX_PAGE_ALLOC_ZEROED) ? UVM_PGA_ZERO : 0); diff --git a/platforms/netbsd/hax_mem_alloc.c b/platforms/netbsd/hax_mem_alloc.c index e6a97b94..0458104c 100644 --- a/platforms/netbsd/hax_mem_alloc.c +++ b/platforms/netbsd/hax_mem_alloc.c @@ -144,7 +144,7 @@ struct hax_page * hax_alloc_pages(int order, uint32_t flags, bool vmap) // TODO: Support HAX_MEM_LOW_4G if (flags & HAX_MEM_LOW_4G) { - hax_warning("%s: HAX_MEM_LOW_4G is ignored\n", __func__); + hax_log(HAX_LOGW, "%s: HAX_MEM_LOW_4G is ignored\n", __func__); } ppage->pglist = kmem_zalloc(sizeof(struct pglist), KM_SLEEP); diff --git a/platforms/netbsd/hax_mm.c b/platforms/netbsd/hax_mm.c index 8b377f3e..78dd1987 100644 --- a/platforms/netbsd/hax_mm.c +++ b/platforms/netbsd/hax_mm.c @@ -108,7 +108,7 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, UVM_ADV_RANDOM, 0)); uao_reference(uao); if (err) { - hax_error("Failed to map into user\n"); + hax_log(HAX_LOGE, "Failed to map into user\n"); uao_detach(uao); kmem_free(hinfo, sizeof(struct hax_vcpu_mem_hinfo_t)); return -ENOMEM; @@ -119,7 +119,7 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, err = uvm_map_extract(map, uva, size, kernel_map, &kva, UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG | UVM_EXTRACT_FIXPROT); if (err) { - hax_error("Failed to map into kernel\n"); + hax_log(HAX_LOGE, "Failed to map into kernel\n"); if (!ISSET(flags, HAX_VCPUMEM_VALIDVA)) { uvm_unmap(map, uva, uva + size); uao_detach(uao); diff --git a/platforms/netbsd/hax_wrapper.c b/platforms/netbsd/hax_wrapper.c index 07364540..d62535b8 100644 --- a/platforms/netbsd/hax_wrapper.c +++ b/platforms/netbsd/hax_wrapper.c @@ -44,20 +44,36 @@ #include "../../core/include/hax_core_interface.h" #include "../../core/include/ia32.h" -int default_hax_log_level = HAX_LOG_DEFAULT; int max_cpus; hax_cpumap_t cpu_online_map; -int hax_log_level(int level, const char *fmt, ...) +static const char* kLogPrefix[] = { + "haxm: ", + "haxm_debug: ", + "haxm_info: ", + "haxm_warning: ", + "haxm_error: ", + "haxm_panic: " +}; + +void hax_log(int level, const char *fmt, ...) { va_list args; va_start(args, fmt); - if (level >= default_hax_log_level) { - printf("haxm: "); + if (level >= HAX_LOG_DEFAULT) { + printf("%s", kLogPrefix[level]); vprintf(fmt, args); } va_end(args); - return 0; +} + +void hax_panic(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + hax_log(HAX_LOGPANIC, fmt, args); + panic(fmt, args); + va_end(args); } uint32_t hax_cpuid(void) @@ -123,70 +139,6 @@ void hax_disable_irq(void) x86_disable_intr(); } -void hax_error(char *fmt, ...) -{ - va_list args; - - if (HAX_LOGE < default_hax_log_level) - return; - - va_start(args, fmt); - printf("haxm_error: "); - vprintf(fmt, args); - va_end(args); -} - -void hax_warning(char *fmt, ...) -{ - va_list args; - - if (HAX_LOGW < default_hax_log_level) - return; - - va_start(args, fmt); - printf("haxm_warning: "); - vprintf(fmt, args); - va_end(args); -} - -void hax_info(char *fmt, ...) -{ - va_list args; - - if (HAX_LOGI < default_hax_log_level) - return; - - va_start(args, fmt); - printf("haxm_info: "); - vprintf(fmt, args); - va_end(args); -} - -void hax_debug(char *fmt, ...) -{ - va_list args; - - if (HAX_LOGD < default_hax_log_level) - return; - - va_start(args, fmt); - printf("haxm_debug: "); - vprintf(fmt, args); - va_end(args); -} - -void hax_panic_vcpu(struct vcpu_t *v, char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - printf("haxm_panic: "); - vprintf(fmt, args); - va_end(args); - - vcpu_set_panic(v); -} - /* Misc */ void hax_smp_mb(void) { @@ -293,7 +245,7 @@ hax_spinlock *hax_spinlock_alloc_init(void) lock = kmem_alloc(sizeof(struct hax_spinlock), KM_SLEEP); if (!lock) { - hax_error("Could not allocate spinlock\n"); + hax_log(HAX_LOGE, "Could not allocate spinlock\n"); return NULL; } mutex_init(&lock->lock, MUTEX_DEFAULT, IPL_VM); @@ -327,7 +279,7 @@ hax_mutex hax_mutex_alloc_init(void) lock = kmem_alloc(sizeof(kmutex_t), KM_SLEEP); if (!lock) { - hax_error("Could not allocate mutex\n"); + hax_log(HAX_LOGE, "Could not allocate mutex\n"); return NULL; } mutex_init(lock, MUTEX_DEFAULT, IPL_NONE); diff --git a/platforms/windows/components.c b/platforms/windows/components.c index 85cd03b2..7201e269 100644 --- a/platforms/windows/components.c +++ b/platforms/windows/components.c @@ -71,14 +71,14 @@ int hax_vcpu_create_host(struct vcpu_t *cvcpu, void *vm_host, int vm_id, TRUE, vm->ssdl, (LPGUID)&HAX_VCPU_GUID, &pDevObj); if (!NT_SUCCESS(ntStatus)) { - hax_error("Failed to create VCPU device\n"); + hax_log(HAX_LOGE, "Failed to create VCPU device\n"); return -1; } DevExt = (struct hax_dev_ext *)pDevObj->DeviceExtension; ntStatus = IoCreateSymbolicLink(&ntWin32NameString, &ntUnicodeString); if (!NT_SUCCESS(ntStatus)) { - hax_error("Failed to creaet symbolic link \n"); + hax_log(HAX_LOGE, "Failed to creaet symbolic link \n"); IoDeleteDevice(pDevObj); return -1; } @@ -133,11 +133,11 @@ static PUNICODE_STRING hax_win_construct_ssdl(void) ssdl = hax_vmalloc(sizeof(UNICODE_STRING), 0); if (!ssdl) { - hax_error("Failed to alloc ssdl\n"); + hax_log(HAX_LOGE, "Failed to alloc ssdl\n"); goto error; } if (hax_init_unicodestring(ssdl, length)) { - hax_error("Failed to get the ssid unicode string\n"); + hax_log(HAX_LOGE, "Failed to get the ssid unicode string\n"); goto error; } ssdlValid = 1; @@ -145,7 +145,7 @@ static PUNICODE_STRING hax_win_construct_ssdl(void) ssdl, L"%ws", L"D:P(A;;GA;;;SY)(A;;GA;;;BA)(A;;GA;;;WD)"); if (!NT_SUCCESS(ntStatus)) { - hax_error("Failed to get the SSDL string\n"); + hax_log(HAX_LOGE, "Failed to get the SSDL string\n"); goto error; } @@ -174,7 +174,7 @@ int hax_vm_create_host(struct vm_t *cvm, int vm_id) ssdl = hax_win_construct_ssdl(); if (!ssdl) { - hax_error("Failed to construct ssdl for current thread\n"); + hax_log(HAX_LOGE, "Failed to construct ssdl for current thread\n"); return -1; } @@ -187,7 +187,7 @@ int hax_vm_create_host(struct vm_t *cvm, int vm_id) (LPGUID)&HAX_VM_GUID, &pDevObj); if (!NT_SUCCESS(ntStatus)) { - hax_error("Failed to create VM device\n"); + hax_log(HAX_LOGE, "Failed to create VM device\n"); hax_win_destruct_ssdl(ssdl); return -1; } @@ -196,7 +196,7 @@ int hax_vm_create_host(struct vm_t *cvm, int vm_id) ntStatus = IoCreateSymbolicLink(&ntWin32NameString, &ntUnicodeString); if (!NT_SUCCESS(ntStatus)) { - hax_error("Failed to creaet symbolic link \n"); + hax_log(HAX_LOGE, "Failed to creaet symbolic link \n"); hax_win_destruct_ssdl(ssdl); IoDeleteDevice(pDevObj); return -1; diff --git a/platforms/windows/hax_entry.c b/platforms/windows/hax_entry.c index 952a7773..f56335e8 100644 --- a/platforms/windows/hax_entry.c +++ b/platforms/windows/hax_entry.c @@ -70,7 +70,7 @@ static int hax_host_init(void) } if (hax_module_init() < 0) { - hax_error("Hax module init failed\n"); + hax_log(HAX_LOGE, "Hax module init failed\n"); smpc_dpc_exit(); return -1; } @@ -110,7 +110,7 @@ NTSTATUS DriverEntry(__in PDRIVER_OBJECT DriverObject, FALSE, // Not an exclusive device &pDevObj); // Returned ptr to Device Object if (!NT_SUCCESS(ntStatus)) { - DbgPrint("Couldn't create the device object\n"); + hax_log(HAX_LOGE, "Couldn't create the device object\n"); write_event(HaxDriverCreateUpDevFailure, DriverObject, NULL, 0); return ntStatus; } @@ -126,7 +126,7 @@ NTSTATUS DriverEntry(__in PDRIVER_OBJECT DriverObject, // // Delete everything that this routine has allocated. // - DbgPrint("Couldn't create symbolic link\n"); + hax_log(HAX_LOGE, "Couldn't create symbolic link\n"); write_event(HaxDriverCreateUpSymFailure, DriverObject, NULL, 0); goto error_0; } @@ -135,7 +135,7 @@ NTSTATUS DriverEntry(__in PDRIVER_OBJECT DriverObject, ret = hax_host_init(); if (ret < 0) { ntStatus = STATUS_UNSUCCESSFUL; - hax_error("Hax host init failed\n"); + hax_log(HAX_LOGE, "Hax host init failed\n"); write_event(HaxDriverHostInitFailure, DriverObject, NULL, 0); goto error_1; } @@ -191,7 +191,7 @@ NTSTATUS HaxClose(PDEVICE_OBJECT DeviceObject, PIRP Irp) NTSTATUS ret = STATUS_SUCCESS; devext = (struct hax_dev_ext *)DeviceObject->DeviceExtension; - hax_info("HaxClose device %x at process %p\n", devext->type, + hax_log(HAX_LOGI, "HaxClose device %x at process %p\n", devext->type, (ULONG_PTR)PsGetCurrentThread()); switch (devext->type) { case HAX_DEVEXT_TYPE_UP: @@ -201,7 +201,7 @@ NTSTATUS HaxClose(PDEVICE_OBJECT DeviceObject, PIRP Irp) case HAX_DEVEXT_TYPE_VM: vm = &devext->vmdev_ext; cvm = vm->cvm; - hax_info("Close VM %x\n", vm->vm_id); + hax_log(HAX_LOGI, "Close VM %x\n", vm->vm_id); if (cvm) hax_put_vm(cvm); break; @@ -209,8 +209,8 @@ NTSTATUS HaxClose(PDEVICE_OBJECT DeviceObject, PIRP Irp) vcpu = &devext->vcpudev_ext; cvcpu = hax_get_vcpu(vcpu->vm_id, vcpu->vcpu_id, 1); if (!cvcpu) { - hax_error("Failed to get cvcpu for vm %x vcpu %x\n", - vcpu->vm_id, vcpu->vcpu_id); + hax_log(HAX_LOGE, "Failed to get cvcpu for vm %x vcpu %x\n", + vcpu->vm_id, vcpu->vcpu_id); ret = STATUS_UNSUCCESSFUL; goto done; } @@ -218,7 +218,7 @@ NTSTATUS HaxClose(PDEVICE_OBJECT DeviceObject, PIRP Irp) hax_put_vcpu(vcpu->cvcpu); break; default: - hax_error("Invalid device type %x\n", devext->type); + hax_log(HAX_LOGE, "Invalid device type %x\n", devext->type); ret = STATUS_UNSUCCESSFUL; break; } @@ -320,9 +320,9 @@ NTSTATUS HaxVcpuControl(PDEVICE_OBJECT DeviceObject, for (i = 0; i < msrs->nr_msr; i++, msr++) { fail = vcpu_set_msr(cvcpu, msr->entry, msr->value); if (fail) { - // hax_log_level(HAX_LOGE, - // "Failed to set msr %x index %x\n", - // msr->entry, i); + // hax_log(HAX_LOGE, + // "Failed to set msr %x index %x\n", + // msr->entry, i); break; } } @@ -431,10 +431,10 @@ NTSTATUS HaxVcpuControl(PDEVICE_OBJECT DeviceObject, break; } default: - hax_error("Unknow vcpu ioctl %lx\n", - irpSp->Parameters.DeviceIoControl.IoControlCode); - hax_info("set regs ioctl %lx get regs %lx", HAX_VCPU_SET_REGS, - HAX_VCPU_GET_REGS ); + hax_log(HAX_LOGE, "Unknow vcpu ioctl %lx\n", + irpSp->Parameters.DeviceIoControl.IoControlCode); + hax_log(HAX_LOGI, "set regs ioctl %lx get regs %lx", + HAX_VCPU_SET_REGS, HAX_VCPU_GET_REGS ); ret = STATUS_INVALID_PARAMETER; break; } @@ -487,7 +487,8 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, vm_id = vm->vm_id; cvcpu = vcpu_create(cvm, vm, vcpu_id); if (!cvcpu) { - hax_info("Failed to create vcpu %x on vm %x\n", vcpu_id, vm_id); + hax_log(HAX_LOGI, "Failed to create vcpu %x on vm %x\n", + vcpu_id, vm_id); ret = STATUS_UNSUCCESSFUL; goto done; } @@ -501,8 +502,8 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, goto done; } info = (struct hax_alloc_ram_info *)inBuf; - hax_info("IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x," - " pad=0x%x\n", vm->vm_id, info->va, info->size, info->pad); + hax_log(HAX_LOGI, "IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x," + " pad=0x%x\n", vm->vm_id, info->va, info->size, info->pad); if (hax_vm_add_ramblock(cvm, info->va, info->size)) { ret = STATUS_UNSUCCESSFUL; } @@ -511,20 +512,20 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, case HAX_VM_IOCTL_ADD_RAMBLOCK: { struct hax_ramblock_info *info; if (inBufLength < sizeof(struct hax_ramblock_info)) { - hax_error("IOCTL_ADD_RAMBLOCK: inBufLength=%u < %u\n", - inBufLength, sizeof(struct hax_ramblock_info)); + hax_log(HAX_LOGE, "IOCTL_ADD_RAMBLOCK: inBufLength=%u < %u\n", + inBufLength, sizeof(struct hax_ramblock_info)); ret = STATUS_INVALID_PARAMETER; break; } info = (struct hax_ramblock_info *)inBuf; if (info->reserved) { - hax_error("IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", - vm->vm_id, info->reserved); + hax_log(HAX_LOGE, "IOCTL_ADD_RAMBLOCK: vm_id=%d, " + "reserved=0x%llx\n", vm->vm_id, info->reserved); ret = STATUS_INVALID_PARAMETER; break; } - hax_info("IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx," - " size=0x%llx\n", vm->vm_id, info->start_va, info->size); + hax_log(HAX_LOGI, "IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx," + " size=0x%llx\n", vm->vm_id, info->start_va, info->size); if (hax_vm_add_ramblock(cvm, info->start_va, info->size)) { ret = STATUS_UNSUCCESSFUL; } @@ -560,9 +561,9 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, } info = (struct hax_set_ram_info2 *)inBuf; if (info->reserved1 || info->reserved2) { - hax_error("IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x" - " reserved2=0xllx\n", - vm->vm_id, info->reserved1, info->reserved2); + hax_log(HAX_LOGE, "IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x" + " reserved2=0xllx\n", + vm->vm_id, info->reserved1, info->reserved2); ret = STATUS_INVALID_PARAMETER; break; } @@ -587,8 +588,8 @@ NTSTATUS HaxVmControl(PDEVICE_OBJECT DeviceObject, struct hax_vm_windows *ext, } info = (struct hax_protect_ram_info *)inBuf; if (info->reserved) { - hax_error("IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", - vm->vm_id, info->reserved); + hax_log(HAX_LOGE, "IOCTL_PROTECT_RAM: vm_id=%d, " + "reserved=0x%x\n", vm->vm_id, info->reserved); ret = STATUS_INVALID_PARAMETER; break; } @@ -693,7 +694,7 @@ NTSTATUS HaxDeviceControl(PDEVICE_OBJECT DeviceObject, } cvm = hax_create_vm(&vm_id); if (!cvm) { - hax_log_level(HAX_LOGE, "Failed to create the HAX VM\n"); + hax_log(HAX_LOGE, "Failed to create the HAX VM\n"); ret = STATUS_UNSUCCESSFUL; break; } @@ -703,8 +704,8 @@ NTSTATUS HaxDeviceControl(PDEVICE_OBJECT DeviceObject, break; default: ret = STATUS_INVALID_DEVICE_REQUEST; - hax_log_level(HAX_LOGE, "Invalid hax ioctl %x\n", - irpSp->Parameters.DeviceIoControl.IoControlCode); + hax_log(HAX_LOGE, "Invalid hax ioctl %x\n", + irpSp->Parameters.DeviceIoControl.IoControlCode); break; } done: @@ -744,7 +745,7 @@ VOID HaxUnloadDriver(__in PDRIVER_OBJECT DriverObject) RtlInitUnicodeString(&ntWin32NameString, DOS_DEVICE_NAME); IoDeleteSymbolicLink(&ntWin32NameString); IoDeleteDevice(HaxDeviceObject); - hax_info("Unload the driver\n"); + hax_log(HAX_LOGI, "Unload the driver\n"); hax_host_exit(); write_event(HaxDriverUnloaded, DriverObject, NULL, 0); HaxDeviceObject = NULL; diff --git a/platforms/windows/hax_host_mem.c b/platforms/windows/hax_host_mem.c index 769fa676..ee6a18d5 100644 --- a/platforms/windows/hax_host_mem.c +++ b/platforms/windows/hax_host_mem.c @@ -37,7 +37,7 @@ int hax_pin_user_pages(uint64_t start_uva, uint64_t size, hax_memdesc_user *memd PMDL pmdl = NULL; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } @@ -45,16 +45,16 @@ int hax_pin_user_pages(uint64_t start_uva, uint64_t size, hax_memdesc_user *memd pmdl = IoAllocateMdl((PVOID)start_uva, size, FALSE, FALSE, NULL); if (!pmdl) { - hax_error("Failed to allocate MDL for va: 0x%llx, size: 0x%llx.\n", - start_uva, size); + hax_log(HAX_LOGE, "Failed to allocate MDL for va: 0x%llx, " + "size: 0x%llx.\n", start_uva, size); return -EFAULT; } try { MmProbeAndLockPages(pmdl, UserMode, IoWriteAccess); } except (EXCEPTION_EXECUTE_HANDLER) { - hax_error("Failed to probe pages for guest's memory! va: 0x%llx\n", - start_uva); + hax_log(HAX_LOGE, "Failed to probe pages for guest's memory! " + "va: 0x%llx\n", start_uva); IoFreeMdl(pmdl); return -ENOMEM; } @@ -66,12 +66,12 @@ int hax_pin_user_pages(uint64_t start_uva, uint64_t size, hax_memdesc_user *memd int hax_unpin_user_pages(hax_memdesc_user *memdesc) { if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } if (!memdesc->pmdl) { - hax_error("%s: memdesc->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->pmdl == NULL\n", __func__); return -EINVAL; } @@ -89,12 +89,12 @@ uint64_t hax_get_pfn_user(hax_memdesc_user *memdesc, uint64_t uva_offset) uint64_t len; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return INVALID_PFN; } if (!memdesc->pmdl) { - hax_error("%s: memdesc->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->pmdl == NULL\n", __func__); return INVALID_PFN; } @@ -102,15 +102,15 @@ uint64_t hax_get_pfn_user(hax_memdesc_user *memdesc, uint64_t uva_offset) len = MmGetMdlByteCount(pmdl); if (uva_offset >= len) { - hax_error("The uva_offset 0x%llx exceeds the buffer length 0x%llx.\n", - uva_offset, len); + hax_log(HAX_LOGE, "The uva_offset 0x%llx exceeds the buffer " + "length 0x%llx.\n", uva_offset, len); return INVALID_PFN; } ppfn = MmGetMdlPfnArray(pmdl); if (NULL == ppfn) { - hax_error("Get MDL pfn array failed. uva_offset: 0x%llx.\n", - uva_offset); + hax_log(HAX_LOGE, "Get MDL pfn array failed. uva_offset: 0x%llx.\n", + uva_offset); return INVALID_PFN; } @@ -127,15 +127,15 @@ void * hax_map_user_pages(hax_memdesc_user *memdesc, uint64_t uva_offset, PVOID kva; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return NULL; } if (!memdesc->pmdl) { - hax_error("%s: memdesc->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->pmdl == NULL\n", __func__); return NULL; } if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return NULL; } @@ -147,8 +147,9 @@ void * hax_map_user_pages(hax_memdesc_user *memdesc, uint64_t uva_offset, uva_offset_high = (uva_offset + size + PAGE_SIZE_4K - 1) & pgmask(PG_ORDER_4K); if (uva_offset_high > base_size) { - hax_error("%s: Invalid UVA subrange: uva_offset=0x%llx, size=0x%llx," - " base_size=0x%llx\n", __func__, uva_offset, size, base_size); + hax_log(HAX_LOGE, "%s: Invalid UVA subrange: uva_offset=0x%llx, " + "size=0x%llx, base_size=0x%llx\n", __func__, uva_offset, + size, base_size); return NULL; } @@ -161,17 +162,16 @@ void * hax_map_user_pages(hax_memdesc_user *memdesc, uint64_t uva_offset, // Create a new MDL for the UVA subrange pmdl = IoAllocateMdl((PVOID)start_uva, size, FALSE, FALSE, NULL); if (!pmdl) { - hax_error("%s: Failed to create MDL for UVA subrange: start_uva=0x%llx," - " size=0x%llx\n", __func__, start_uva, size); + hax_log(HAX_LOGE, "%s: Failed to create MDL for UVA subrange: " + "start_uva=0x%llx, size=0x%llx\n", __func__, start_uva, size); return NULL; } // Associate the new MDL with the existing MDL IoBuildPartialMdl(memdesc->pmdl, pmdl, (PVOID)start_uva, size); kva = MmGetSystemAddressForMdlSafe(pmdl, NormalPagePriority); if (!kva) { - hax_error("%s: Failed to create KVA mapping for UVA subrange:" - " start_uva=0x%llx, size=0x%llx\n", __func__, start_uva, - size); + hax_log(HAX_LOGE, "%s: Failed to create KVA mapping for UVA subrange:" + " start_uva=0x%llx, size=0x%llx\n", __func__, start_uva, size); IoFreeMdl(pmdl); return NULL; } @@ -182,11 +182,11 @@ void * hax_map_user_pages(hax_memdesc_user *memdesc, uint64_t uva_offset, int hax_unmap_user_pages(hax_kmap_user *kmap) { if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return -EINVAL; } if (!kmap->pmdl) { - hax_error("%s: kmap->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap->pmdl == NULL\n", __func__); return -EINVAL; } @@ -204,10 +204,10 @@ int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) // TODO: Support HAX_PAGE_ALLOC_BELOW_4G if (flags & HAX_PAGE_ALLOC_BELOW_4G) { - hax_warning("%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); + hax_log(HAX_LOGW, "%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); } if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } @@ -223,7 +223,7 @@ int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) pmdl = MmAllocatePagesForMdlEx(low_addr, high_addr, skip_bytes, PAGE_SIZE_4K, MmCached, options); if (!pmdl) { - hax_error("%s: Failed to allocate 4KB of nonpaged memory\n", __func__); + hax_log(HAX_LOGE, "%s: Failed to allocate 4KB of nonpaged memory\n", __func__); return -ENOMEM; } memdesc->pmdl = pmdl; @@ -233,11 +233,11 @@ int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) int hax_free_page_frame(hax_memdesc_phys *memdesc) { if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return -EINVAL; } if (!memdesc->pmdl) { - hax_error("%s: memdesc->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->pmdl == NULL\n", __func__); return -EINVAL; } @@ -252,17 +252,17 @@ uint64_t hax_get_pfn_phys(hax_memdesc_phys *memdesc) PPFN_NUMBER pfns; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return INVALID_PFN; } if (!memdesc->pmdl) { - hax_error("%s: memdesc->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->pmdl == NULL\n", __func__); return INVALID_PFN; } pfns = MmGetMdlPfnArray(memdesc->pmdl); if (!pfns) { - hax_error("%s: MmGetMdlPfnArray() failed\n", __func__); + hax_log(HAX_LOGE, "%s: MmGetMdlPfnArray() failed\n", __func__); return INVALID_PFN; } return pfns[0]; @@ -273,17 +273,18 @@ void * hax_get_kva_phys(hax_memdesc_phys *memdesc) PVOID kva; if (!memdesc) { - hax_error("%s: memdesc == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc == NULL\n", __func__); return NULL; } if (!memdesc->pmdl) { - hax_error("%s: memdesc->pmdl == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: memdesc->pmdl == NULL\n", __func__); return NULL; } kva = MmGetSystemAddressForMdlSafe(memdesc->pmdl, NormalPagePriority); if (!kva) { - hax_error("%s: MmGetSystemAddressForMdlSafe() failed\n", __func__); + hax_log(HAX_LOGE, "%s: MmGetSystemAddressForMdlSafe() failed\n", + __func__); return NULL; } return kva; @@ -295,7 +296,7 @@ void * hax_map_page_frame(uint64_t pfn, hax_kmap_phys *kmap) PVOID kva; if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return NULL; } @@ -308,7 +309,7 @@ void * hax_map_page_frame(uint64_t pfn, hax_kmap_phys *kmap) int hax_unmap_page_frame(hax_kmap_phys *kmap) { if (!kmap) { - hax_error("%s: kmap == NULL\n", __func__); + hax_log(HAX_LOGE, "%s: kmap == NULL\n", __func__); return -EINVAL; } if (!kmap->kva) { diff --git a/platforms/windows/hax_mem_alloc.c b/platforms/windows/hax_mem_alloc.c index e7399633..05db2dba 100644 --- a/platforms/windows/hax_mem_alloc.c +++ b/platforms/windows/hax_mem_alloc.c @@ -84,7 +84,7 @@ void * hax_vmap(hax_pa_t pa, uint32_t size) phys_addr.QuadPart = pa; if ((pa & (PAGE_SIZE - 1)) + size > PAGE_SIZE) { - hax_warning("hax_vmap can't handle cross-page case!\n"); + hax_log(HAX_LOGW, "hax_vmap can't handle cross-page case!\n"); return NULL; } return MmMapIoSpace(phys_addr, size, MmCached); diff --git a/platforms/windows/hax_mm.c b/platforms/windows/hax_mm.c index 05886864..87e71656 100644 --- a/platforms/windows/hax_mm.c +++ b/platforms/windows/hax_mm.c @@ -101,14 +101,14 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, if (flags & HAX_VCPUMEM_VALIDVA) { pmdl = IoAllocateMdl((void*)uva, size, FALSE, FALSE, NULL); if (!pmdl) { - hax_error("Failed to allocate memory for va: %llx\n", uva); + hax_log(HAX_LOGE, "Failed to allocate memory for va: %llx\n", uva); goto fail; } try { MmProbeAndLockPages(pmdl, UserMode, IoReadAccess|IoWriteAccess); } except (EXCEPTION_EXECUTE_HANDLER) { - hax_error("Failed to probe pages for guest's memory!\n"); + hax_log(HAX_LOGE, "Failed to probe pages for guest's memory!\n"); IoFreeMdl(pmdl); pmdl = NULL; goto fail; @@ -118,7 +118,7 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, #ifdef _WIN64 mem->kva = MmGetSystemAddressForMdlSafe(pmdl, NormalPagePriority); if (!mem->kva) { - hax_error("Failed to map the pmdl to system address!\n"); + hax_log(HAX_LOGE, "Failed to map the pmdl to system address!\n"); goto fail; } #else @@ -137,10 +137,10 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, #endif if (!pmdl || MmGetMdlByteCount(pmdl) < size) { - hax_error("Failed to alloate pmdl!\n"); + hax_log(HAX_LOGE, "Failed to alloate pmdl!\n"); if (pmdl) - DbgPrint("allocated size:%d, size:%d\n", - MmGetMdlByteCount(pmdl), size); + hax_log(HAX_LOGD, "allocated size:%d, size:%d\n", + MmGetMdlByteCount(pmdl), size); goto fail; } @@ -149,17 +149,17 @@ int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, FALSE, NormalPagePriority); if (!mem->uva) { - hax_error("Failed to map tunnel to user space\n"); + hax_log(HAX_LOGE, "Failed to map tunnel to user space\n"); goto fail; } mem->kva = MmMapLockedPagesSpecifyCache(pmdl, KernelMode, MmCached, NULL, FALSE, NormalPagePriority); if (!mem->kva) { - hax_error("Failed to map tunnel to kernel space\n"); + hax_log(HAX_LOGE, "Failed to map tunnel to kernel space\n"); goto fail; } - hax_debug("kva %llx va %llx\n", (uint64_t)mem->kva, mem->uva); + hax_log(HAX_LOGD, "kva %llx va %llx\n", (uint64_t)mem->kva, mem->uva); } mem->size = size; mem->hinfo = hinfo; @@ -191,7 +191,7 @@ uint64_t get_hpfn_from_pmem(struct hax_vcpu_mem *pmem, uint64_t va) kva = (uint64_t)pmem->kva + (va - pmem->uva); kphys = MmGetPhysicalAddress((PVOID)kva); if (kphys.QuadPart == 0) - hax_error("kva phys is 0\n"); + hax_log(HAX_LOGE, "kva phys is 0\n"); else return kphys.QuadPart >> PAGE_SHIFT; } else { @@ -241,7 +241,7 @@ uint64_t hax_get_memory_threshold(void) if (NT_SUCCESS(status)) { result = (uint64_t)memlimit_megs << 20; - hax_info("%s: result = 0x%x\n", __func__, result); + hax_log(HAX_LOGI, "%s: result = 0x%x\n", __func__, result); } return result; diff --git a/platforms/windows/hax_wrapper.c b/platforms/windows/hax_wrapper.c index ced9996f..4176a063 100644 --- a/platforms/windows/hax_wrapper.c +++ b/platforms/windows/hax_wrapper.c @@ -31,20 +31,9 @@ #include "hax_win.h" #include "../../core/include/ia32.h" -int default_hax_log_level = 3; int max_cpus; hax_cpumap_t cpu_online_map; -int hax_log_level(int level, const char *fmt, ...) -{ - va_list arglist; - va_start(arglist, fmt); - - if (level >= default_hax_log_level) - vDbgPrintExWithPrefix("haxm: ", -1, 0, fmt, arglist); - return 0; -} - uint32_t hax_cpuid() { return KeGetCurrentProcessorNumber(); @@ -107,7 +96,7 @@ int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *), void * par event_result = KeWaitForSingleObject(&dpc_event, Executive, KernelMode, FALSE, NULL); if (event_result!= STATUS_SUCCESS) { - hax_error("Failed to get the smp_call event object\n"); + hax_log(HAX_LOGE, "Failed to get the smp_call event object\n"); return -1; } @@ -127,16 +116,16 @@ int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *), void * par cur_dpc = smpc_dpcs + i; result = KeInsertQueueDpc(cur_dpc, &done, sp); if (result != TRUE) - hax_error("Failed to insert queue on CPU %x\n", i); + hax_log(HAX_LOGE, "Failed to insert queue on CPU %x\n", i); } /* Delay 100 ms */ delay.QuadPart = 100 * -1 *((LONGLONG) 1 * 10 * 1000); if (KeDelayExecutionThread( KernelMode, TRUE, &delay ) != STATUS_SUCCESS) - hax_error("Delay execution is not success\n"); + hax_log(HAX_LOGE, "Delay execution is not success\n"); if (done != *cpus) - hax_error("sm call function is not called in all required CPUs\n"); + hax_log(HAX_LOGE, "sm call function is not called in all required CPUs\n"); KeSetEvent(&dpc_event, 0, FALSE); @@ -237,47 +226,40 @@ void hax_disable_irq(void) asm_disable_irq(); } -void hax_error(char *fmt, ...) -{ - va_list arglist; - - va_start(arglist, fmt); - if (HAX_LOGE >= default_hax_log_level) - vDbgPrintExWithPrefix("haxm_error:", -1, 0, fmt, arglist); -} - -void hax_warning(char *fmt, ...) -{ - va_list arglist; - va_start(arglist, fmt); +static const int kLogLevel[] = { + DPFLTR_ERROR_LEVEL, + DPFLTR_TRACE_LEVEL, // HAX_LOGD + DPFLTR_INFO_LEVEL, // HAX_LOGI + DPFLTR_WARNING_LEVEL, // HAX_LOGW + DPFLTR_ERROR_LEVEL, // HAX_LOGE + DPFLTR_ERROR_LEVEL // HAX_LOGPANIC +}; - if (HAX_LOGW >= default_hax_log_level) - vDbgPrintExWithPrefix("haxm_warning:", -1, 0, fmt, arglist); -} +static const char* kLogPrefix[] = { + "haxm: ", + "haxm_debug: ", + "haxm_info: ", + "haxm_warning: ", + "haxm_error: ", + "haxm_panic: " +}; -void hax_info(char *fmt, ...) +void hax_log(int level, const char *fmt, ...) { va_list arglist; va_start(arglist, fmt); - if (HAX_LOGI >= default_hax_log_level) - vDbgPrintExWithPrefix("haxm_info:", -1, 0, fmt, arglist); -} - -void hax_debug(char *fmt, ...) -{ - va_list arglist; - va_start(arglist, fmt); + if (level >= HAX_LOG_DEFAULT) + vDbgPrintExWithPrefix(kLogPrefix[level], DPFLTR_IHVDRIVER_ID, + kLogLevel[level], fmt, arglist); - if (HAX_LOGD >= default_hax_log_level) - vDbgPrintExWithPrefix("haxm_debug:", -1, 0, fmt, arglist); + va_end(arglist); } -void hax_panic_vcpu(struct vcpu_t *v, char *fmt, ...) +void hax_panic(const char *fmt, ...) { va_list arglist; - va_start(arglist, fmt); - vDbgPrintExWithPrefix("haxm_panic:", -1, 0, fmt, arglist); - vcpu_set_panic(v); + hax_log(HAX_LOGPANIC, fmt, arglist); + va_end(arglist); }