Skip to content

Commit f680ed5

Browse files
Shawnshhwenlingz
authored andcommitted
hv: hypercall: fix simple violations of "Procedure has more than one exit point"
This patch fix the violations by the "if ... else ..." fmt. V1->V2: merge branches. Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 5ebaaaf commit f680ed5

File tree

1 file changed

+78
-75
lines changed

1 file changed

+78
-75
lines changed

hypervisor/common/hypercall.c

Lines changed: 78 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -386,16 +386,17 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
386386
struct acrn_msi_entry msi;
387387
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
388388

389-
if (target_vm == NULL) {
390-
return -1;
391-
}
392-
393-
(void)memset((void *)&msi, 0U, sizeof(msi));
394-
if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) {
395-
pr_err("%s: Unable copy param to vm\n", __func__);
396-
return -1;
389+
if (target_vm != NULL) {
390+
(void)memset((void *)&msi, 0U, sizeof(msi));
391+
if (copy_from_gpa(vm, &msi, param, sizeof(msi)) != 0) {
392+
pr_err("%s: Unable copy param to vm\n", __func__);
393+
ret = -1;
394+
} else {
395+
ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data);
396+
}
397+
} else {
398+
ret = -1;
397399
}
398-
ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data);
399400

400401
return ret;
401402
}
@@ -420,35 +421,33 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
420421
struct acrn_set_ioreq_buffer iobuf;
421422
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
422423
uint16_t i;
423-
424-
if (target_vm == NULL) {
425-
return -1;
426-
}
424+
int32_t ret;
427425

428426
(void)memset((void *)&iobuf, 0U, sizeof(iobuf));
427+
if ((target_vm == NULL) || (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) != 0)) {
428+
pr_err("%p %s: target_vm is not valid or Unable copy param to vm\n", target_vm, __func__);
429+
ret = -1;
430+
} else {
431+
dev_dbg(ACRN_DBG_HYCALL, "[%d] SET BUFFER=0x%p",
432+
vmid, iobuf.req_buf);
429433

430-
if (copy_from_gpa(vm, &iobuf, param, sizeof(iobuf)) != 0) {
431-
pr_err("%s: Unable copy param to vm\n", __func__);
432-
return -1;
433-
}
434-
435-
dev_dbg(ACRN_DBG_HYCALL, "[%d] SET BUFFER=0x%p",
436-
vmid, iobuf.req_buf);
437-
438-
hpa = gpa2hpa(vm, iobuf.req_buf);
439-
if (hpa == INVALID_HPA) {
440-
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
441-
__func__, vm->vm_id, iobuf.req_buf);
442-
target_vm->sw.io_shared_page = NULL;
443-
return -EINVAL;
444-
}
434+
hpa = gpa2hpa(vm, iobuf.req_buf);
435+
if (hpa == INVALID_HPA) {
436+
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
437+
__func__, vm->vm_id, iobuf.req_buf);
438+
target_vm->sw.io_shared_page = NULL;
439+
ret = -EINVAL;
440+
} else {
441+
target_vm->sw.io_shared_page = hpa2hva(hpa);
442+
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
443+
set_vhm_req_state(target_vm, i, REQ_STATE_FREE);
444+
}
445445

446-
target_vm->sw.io_shared_page = hpa2hva(hpa);
447-
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
448-
set_vhm_req_state(target_vm, i, REQ_STATE_FREE);
446+
ret = 0;
447+
}
449448
}
450449

451-
return 0;
450+
return ret;
452451
}
453452

454453
/**
@@ -466,26 +465,28 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
466465
{
467466
struct acrn_vcpu *vcpu;
468467
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
468+
int32_t ret;
469469

470470
/* make sure we have set req_buf */
471471
if ((target_vm == NULL) || (target_vm->sw.io_shared_page == NULL)) {
472472
pr_err("%s, invalid parameter\n", __func__);
473-
return -EINVAL;
474-
}
475-
476-
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
473+
ret = -EINVAL;
474+
} else {
475+
dev_dbg(ACRN_DBG_HYCALL, "[%d] NOTIFY_FINISH for vcpu %d",
477476
vmid, vcpu_id);
478477

479-
if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
480-
pr_err("%s, failed to get VCPU %d context from VM %d\n",
481-
__func__, vcpu_id, target_vm->vm_id);
482-
return -EINVAL;
478+
if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM) {
479+
pr_err("%s, failed to get VCPU %d context from VM %d\n",
480+
__func__, vcpu_id, target_vm->vm_id);
481+
ret = -EINVAL;
482+
} else {
483+
vcpu = vcpu_from_vid(target_vm, vcpu_id);
484+
emulate_io_post(vcpu);
485+
ret = 0;
486+
}
483487
}
484488

485-
vcpu = vcpu_from_vid(target_vm, vcpu_id);
486-
emulate_io_post(vcpu);
487-
488-
return 0;
489+
return ret;
489490
}
490491

491492
/**
@@ -628,31 +629,34 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
628629
uint64_t hpa, base_paddr;
629630
uint64_t prot_set;
630631
uint64_t prot_clr;
632+
int32_t ret;
631633

632634
hpa = gpa2hpa(vm, wp->gpa);
633635
if (hpa == INVALID_HPA) {
634636
pr_err("%s,vm[%hu] gpa 0x%llx,GPA is unmapping.",
635637
__func__, vm->vm_id, wp->gpa);
636-
return -EINVAL;
637-
}
638-
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
639-
vm->vm_id, wp->gpa, hpa);
640-
641-
base_paddr = get_hv_image_base();
642-
if (((hpa <= base_paddr) && ((hpa + PAGE_SIZE) > base_paddr)) ||
643-
((hpa >= base_paddr) &&
644-
(hpa < (base_paddr + CONFIG_HV_RAM_SIZE)))) {
645-
pr_err("%s: overlap the HV memory region.", __func__);
646-
return -EINVAL;
647-
}
638+
ret = -EINVAL;
639+
} else {
640+
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
641+
vm->vm_id, wp->gpa, hpa);
648642

649-
prot_set = (wp->set != 0U) ? 0UL : EPT_WR;
650-
prot_clr = (wp->set != 0U) ? EPT_WR : 0UL;
643+
base_paddr = get_hv_image_base();
644+
if (((hpa <= base_paddr) && ((hpa + PAGE_SIZE) > base_paddr)) ||
645+
((hpa >= base_paddr) &&
646+
(hpa < (base_paddr + CONFIG_HV_RAM_SIZE)))) {
647+
pr_err("%s: overlap the HV memory region.", __func__);
648+
ret = -EINVAL;
649+
} else {
650+
prot_set = (wp->set != 0U) ? 0UL : EPT_WR;
651+
prot_clr = (wp->set != 0U) ? EPT_WR : 0UL;
651652

652-
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
653-
wp->gpa, PAGE_SIZE, prot_set, prot_clr);
653+
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
654+
wp->gpa, PAGE_SIZE, prot_set, prot_clr);
655+
ret = 0;
656+
}
657+
}
654658

655-
return 0;
659+
return ret;
656660
}
657661

658662
/**
@@ -670,24 +674,23 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_
670674
{
671675
struct wp_data wp;
672676
struct acrn_vm *target_vm = get_vm_from_vmid(vmid);
677+
int32_t ret;
673678

674-
if (target_vm == NULL) {
675-
return -EINVAL;
676-
}
677-
678-
if (is_vm0(target_vm)) {
679-
pr_err("%s: Targeting to service vm", __func__);
680-
return -EINVAL;
681-
}
682-
683-
(void)memset((void *)&wp, 0U, sizeof(wp));
679+
if ((target_vm == NULL) || is_vm0(target_vm)) {
680+
pr_err("%p %s: target_vm is invalid or Targeting to service vm", target_vm, __func__);
681+
ret = -EINVAL;
682+
} else {
683+
(void)memset((void *)&wp, 0U, sizeof(wp));
684684

685-
if (copy_from_gpa(vm, &wp, wp_gpa, sizeof(wp)) != 0) {
686-
pr_err("%s: Unable copy param to vm\n", __func__);
687-
return -EFAULT;
685+
if (copy_from_gpa(vm, &wp, wp_gpa, sizeof(wp)) != 0) {
686+
pr_err("%s: Unable copy param to vm\n", __func__);
687+
ret = -EFAULT;
688+
} else {
689+
ret = write_protect_page(target_vm, &wp);
690+
}
688691
}
689692

690-
return write_protect_page(target_vm, &wp);
693+
return ret;
691694
}
692695

693696
/**

0 commit comments

Comments
 (0)