@@ -386,16 +386,17 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
386
386
struct acrn_msi_entry msi ;
387
387
struct acrn_vm * target_vm = get_vm_from_vmid (vmid );
388
388
389
- if (target_vm == NULL ) {
390
- return -1 ;
391
- }
392
-
393
- (void )memset ((void * )& msi , 0U , sizeof (msi ));
394
- if (copy_from_gpa (vm , & msi , param , sizeof (msi )) != 0 ) {
395
- pr_err ("%s: Unable copy param to vm\n" , __func__ );
396
- return -1 ;
389
+ if (target_vm != NULL ) {
390
+ (void )memset ((void * )& msi , 0U , sizeof (msi ));
391
+ if (copy_from_gpa (vm , & msi , param , sizeof (msi )) != 0 ) {
392
+ pr_err ("%s: Unable copy param to vm\n" , __func__ );
393
+ ret = -1 ;
394
+ } else {
395
+ ret = vlapic_intr_msi (target_vm , msi .msi_addr , msi .msi_data );
396
+ }
397
+ } else {
398
+ ret = -1 ;
397
399
}
398
- ret = vlapic_intr_msi (target_vm , msi .msi_addr , msi .msi_data );
399
400
400
401
return ret ;
401
402
}
@@ -420,35 +421,33 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
420
421
struct acrn_set_ioreq_buffer iobuf ;
421
422
struct acrn_vm * target_vm = get_vm_from_vmid (vmid );
422
423
uint16_t i ;
423
-
424
- if (target_vm == NULL ) {
425
- return -1 ;
426
- }
424
+ int32_t ret ;
427
425
428
426
(void )memset ((void * )& iobuf , 0U , sizeof (iobuf ));
427
+ if ((target_vm == NULL ) || (copy_from_gpa (vm , & iobuf , param , sizeof (iobuf )) != 0 )) {
428
+ pr_err ("%p %s: target_vm is not valid or Unable copy param to vm\n" , target_vm , __func__ );
429
+ ret = -1 ;
430
+ } else {
431
+ dev_dbg (ACRN_DBG_HYCALL , "[%d] SET BUFFER=0x%p" ,
432
+ vmid , iobuf .req_buf );
429
433
430
- if (copy_from_gpa (vm , & iobuf , param , sizeof (iobuf )) != 0 ) {
431
- pr_err ("%s: Unable copy param to vm\n" , __func__ );
432
- return -1 ;
433
- }
434
-
435
- dev_dbg (ACRN_DBG_HYCALL , "[%d] SET BUFFER=0x%p" ,
436
- vmid , iobuf .req_buf );
437
-
438
- hpa = gpa2hpa (vm , iobuf .req_buf );
439
- if (hpa == INVALID_HPA ) {
440
- pr_err ("%s,vm[%hu] gpa 0x%llx,GPA is unmapping." ,
441
- __func__ , vm -> vm_id , iobuf .req_buf );
442
- target_vm -> sw .io_shared_page = NULL ;
443
- return - EINVAL ;
444
- }
434
+ hpa = gpa2hpa (vm , iobuf .req_buf );
435
+ if (hpa == INVALID_HPA ) {
436
+ pr_err ("%s,vm[%hu] gpa 0x%llx,GPA is unmapping." ,
437
+ __func__ , vm -> vm_id , iobuf .req_buf );
438
+ target_vm -> sw .io_shared_page = NULL ;
439
+ ret = - EINVAL ;
440
+ } else {
441
+ target_vm -> sw .io_shared_page = hpa2hva (hpa );
442
+ for (i = 0U ; i < VHM_REQUEST_MAX ; i ++ ) {
443
+ set_vhm_req_state (target_vm , i , REQ_STATE_FREE );
444
+ }
445
445
446
- target_vm -> sw .io_shared_page = hpa2hva (hpa );
447
- for (i = 0U ; i < VHM_REQUEST_MAX ; i ++ ) {
448
- set_vhm_req_state (target_vm , i , REQ_STATE_FREE );
446
+ ret = 0 ;
447
+ }
449
448
}
450
449
451
- return 0 ;
450
+ return ret ;
452
451
}
453
452
454
453
/**
@@ -466,26 +465,28 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
466
465
{
467
466
struct acrn_vcpu * vcpu ;
468
467
struct acrn_vm * target_vm = get_vm_from_vmid (vmid );
468
+ int32_t ret ;
469
469
470
470
/* make sure we have set req_buf */
471
471
if ((target_vm == NULL ) || (target_vm -> sw .io_shared_page == NULL )) {
472
472
pr_err ("%s, invalid parameter\n" , __func__ );
473
- return - EINVAL ;
474
- }
475
-
476
- dev_dbg (ACRN_DBG_HYCALL , "[%d] NOTIFY_FINISH for vcpu %d" ,
473
+ ret = - EINVAL ;
474
+ } else {
475
+ dev_dbg (ACRN_DBG_HYCALL , "[%d] NOTIFY_FINISH for vcpu %d" ,
477
476
vmid , vcpu_id );
478
477
479
- if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM ) {
480
- pr_err ("%s, failed to get VCPU %d context from VM %d\n" ,
481
- __func__ , vcpu_id , target_vm -> vm_id );
482
- return - EINVAL ;
478
+ if (vcpu_id >= CONFIG_MAX_VCPUS_PER_VM ) {
479
+ pr_err ("%s, failed to get VCPU %d context from VM %d\n" ,
480
+ __func__ , vcpu_id , target_vm -> vm_id );
481
+ ret = - EINVAL ;
482
+ } else {
483
+ vcpu = vcpu_from_vid (target_vm , vcpu_id );
484
+ emulate_io_post (vcpu );
485
+ ret = 0 ;
486
+ }
483
487
}
484
488
485
- vcpu = vcpu_from_vid (target_vm , vcpu_id );
486
- emulate_io_post (vcpu );
487
-
488
- return 0 ;
489
+ return ret ;
489
490
}
490
491
491
492
/**
@@ -628,31 +629,34 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
628
629
uint64_t hpa , base_paddr ;
629
630
uint64_t prot_set ;
630
631
uint64_t prot_clr ;
632
+ int32_t ret ;
631
633
632
634
hpa = gpa2hpa (vm , wp -> gpa );
633
635
if (hpa == INVALID_HPA ) {
634
636
pr_err ("%s,vm[%hu] gpa 0x%llx,GPA is unmapping." ,
635
637
__func__ , vm -> vm_id , wp -> gpa );
636
- return - EINVAL ;
637
- }
638
- dev_dbg (ACRN_DBG_HYCALL , "[vm%d] gpa=0x%x hpa=0x%x" ,
639
- vm -> vm_id , wp -> gpa , hpa );
640
-
641
- base_paddr = get_hv_image_base ();
642
- if (((hpa <= base_paddr ) && ((hpa + PAGE_SIZE ) > base_paddr )) ||
643
- ((hpa >= base_paddr ) &&
644
- (hpa < (base_paddr + CONFIG_HV_RAM_SIZE )))) {
645
- pr_err ("%s: overlap the HV memory region." , __func__ );
646
- return - EINVAL ;
647
- }
638
+ ret = - EINVAL ;
639
+ } else {
640
+ dev_dbg (ACRN_DBG_HYCALL , "[vm%d] gpa=0x%x hpa=0x%x" ,
641
+ vm -> vm_id , wp -> gpa , hpa );
648
642
649
- prot_set = (wp -> set != 0U ) ? 0UL : EPT_WR ;
650
- prot_clr = (wp -> set != 0U ) ? EPT_WR : 0UL ;
643
+ base_paddr = get_hv_image_base ();
644
+ if (((hpa <= base_paddr ) && ((hpa + PAGE_SIZE ) > base_paddr )) ||
645
+ ((hpa >= base_paddr ) &&
646
+ (hpa < (base_paddr + CONFIG_HV_RAM_SIZE )))) {
647
+ pr_err ("%s: overlap the HV memory region." , __func__ );
648
+ ret = - EINVAL ;
649
+ } else {
650
+ prot_set = (wp -> set != 0U ) ? 0UL : EPT_WR ;
651
+ prot_clr = (wp -> set != 0U ) ? EPT_WR : 0UL ;
651
652
652
- ept_mr_modify (vm , (uint64_t * )vm -> arch_vm .nworld_eptp ,
653
- wp -> gpa , PAGE_SIZE , prot_set , prot_clr );
653
+ ept_mr_modify (vm , (uint64_t * )vm -> arch_vm .nworld_eptp ,
654
+ wp -> gpa , PAGE_SIZE , prot_set , prot_clr );
655
+ ret = 0 ;
656
+ }
657
+ }
654
658
655
- return 0 ;
659
+ return ret ;
656
660
}
657
661
658
662
/**
@@ -670,24 +674,23 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_
670
674
{
671
675
struct wp_data wp ;
672
676
struct acrn_vm * target_vm = get_vm_from_vmid (vmid );
677
+ int32_t ret ;
673
678
674
- if (target_vm == NULL ) {
675
- return - EINVAL ;
676
- }
677
-
678
- if (is_vm0 (target_vm )) {
679
- pr_err ("%s: Targeting to service vm" , __func__ );
680
- return - EINVAL ;
681
- }
682
-
683
- (void )memset ((void * )& wp , 0U , sizeof (wp ));
679
+ if ((target_vm == NULL ) || is_vm0 (target_vm )) {
680
+ pr_err ("%p %s: target_vm is invalid or Targeting to service vm" , target_vm , __func__ );
681
+ ret = - EINVAL ;
682
+ } else {
683
+ (void )memset ((void * )& wp , 0U , sizeof (wp ));
684
684
685
- if (copy_from_gpa (vm , & wp , wp_gpa , sizeof (wp )) != 0 ) {
686
- pr_err ("%s: Unable copy param to vm\n" , __func__ );
687
- return - EFAULT ;
685
+ if (copy_from_gpa (vm , & wp , wp_gpa , sizeof (wp )) != 0 ) {
686
+ pr_err ("%s: Unable copy param to vm\n" , __func__ );
687
+ ret = - EFAULT ;
688
+ } else {
689
+ ret = write_protect_page (target_vm , & wp );
690
+ }
688
691
}
689
692
690
- return write_protect_page ( target_vm , & wp ) ;
693
+ return ret ;
691
694
}
692
695
693
696
/**
0 commit comments