@@ -30,7 +30,7 @@ bool is_hypercall_from_ring0(void)
30
30
cs_sel = exec_vmread16 (VMX_GUEST_CS_SEL );
31
31
/* cs_selector[1:0] is CPL */
32
32
if ((cs_sel & 0x3U ) == 0U ) {
33
- ret = true;
33
+ ret = true;
34
34
} else {
35
35
ret = false;
36
36
}
@@ -90,15 +90,8 @@ int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param)
90
90
91
91
version .major_version = HV_API_MAJOR_VERSION ;
92
92
version .minor_version = HV_API_MINOR_VERSION ;
93
- int32_t ret ;
94
-
95
- if (copy_to_gpa (vm , & version , param , sizeof (version )) != 0 ) {
96
- ret = -1 ;
97
- } else {
98
- ret = 0 ;
99
- }
100
93
101
- return ret ;
94
+ return copy_to_gpa ( vm , & version , param , sizeof ( version )) ;
102
95
}
103
96
104
97
/**
@@ -111,21 +104,17 @@ int32_t hcall_get_api_version(struct acrn_vm *vm, uint64_t param)
111
104
* @param param GPA pointer to struct hc_platform_info.
112
105
*
113
106
* @pre Pointer vm shall point to SOS_VM
114
- * @return 0 on success, -1 in case of error.
107
+ * @return 0 on success, non zero in case of error.
115
108
*/
116
109
int32_t hcall_get_platform_info (struct acrn_vm * vm , uint64_t param )
117
110
{
118
- int32_t ret = 0 ;
119
111
struct hc_platform_info platform_info ;
120
112
121
113
platform_info .cpu_num = get_pcpu_nums ();
122
114
platform_info .max_vcpus_per_vm = MAX_VCPUS_PER_VM ;
123
115
platform_info .max_kata_containers = CONFIG_MAX_KATA_VM_NUM ;
124
- if (copy_to_gpa (vm , & platform_info , param , sizeof (platform_info )) != 0 ) {
125
- ret = -1 ;
126
- }
127
116
128
- return ret ;
117
+ return copy_to_gpa ( vm , & platform_info , param , sizeof ( platform_info )) ;
129
118
}
130
119
131
120
/**
@@ -163,27 +152,19 @@ int32_t hcall_create_vm(struct acrn_vm *vm, uint64_t param)
163
152
if (((vm_config -> guest_flags & GUEST_FLAG_LAPIC_PASSTHROUGH ) != 0U )
164
153
&& ((vm_config -> guest_flags & GUEST_FLAG_RT ) == 0U )) {
165
154
pr_err ("Wrong guest flags 0x%lx\n" , vm_config -> guest_flags );
166
- ret = -1 ;
167
155
} else {
168
- ret = create_vm (vm_id , vm_config , & target_vm );
169
- if (ret != 0 ) {
156
+ if (create_vm (vm_id , vm_config , & target_vm ) != 0 ) {
170
157
dev_dbg (DBG_LEVEL_HYCALL , "HCALL: Create VM failed" );
171
158
cv .vmid = ACRN_INVALID_VMID ;
172
- ret = -1 ;
173
159
} else {
174
160
/* return a relative vm_id from SOS view */
175
161
cv .vmid = vmid_2_rel_vmid (vm -> vm_id , vm_id );
176
162
cv .vcpu_num = vm_config -> vcpu_num ;
177
- ret = 0 ;
178
163
}
179
164
180
- if (copy_to_gpa (vm , & cv , param , sizeof (cv )) != 0 ) {
181
- ret = -1 ;
182
- }
165
+ ret = copy_to_gpa (vm , & cv , param , sizeof (cv ));
183
166
}
184
167
}
185
- } else {
186
- ret = -1 ;
187
168
}
188
169
189
170
return ret ;
@@ -281,7 +262,7 @@ int32_t hcall_reset_vm(uint16_t vmid)
281
262
282
263
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
283
264
/* TODO: check target_vm guest_flags */
284
- ret = reset_vm (target_vm );
265
+ ret = reset_vm (target_vm );
285
266
}
286
267
return ret ;
287
268
}
@@ -357,7 +338,7 @@ int32_t hcall_set_irqline(const struct acrn_vm *vm, uint16_t vmid,
357
338
*/
358
339
irq_pic = (ops -> gsi == 2U ) ? 0U : ops -> gsi ;
359
340
vpic_set_irqline (vm_pic (target_vm ), irq_pic , ops -> op );
360
- }
341
+ }
361
342
362
343
/* handle IOAPIC irqline */
363
344
vioapic_set_irqline_lock (target_vm , ops -> gsi , ops -> op );
@@ -411,7 +392,7 @@ static void inject_msi_lapic_pt(struct acrn_vm *vm, const struct acrn_msi_entry
411
392
icr .bits .dest_field = dest ;
412
393
icr .bits .vector = vmsi_data .bits .vector ;
413
394
icr .bits .delivery_mode = vmsi_data .bits .delivery_mode ;
414
- icr .bits .destination_mode = MSI_ADDR_DESTMODE_LOGICAL ;
395
+ icr .bits .destination_mode = MSI_ADDR_DESTMODE_LOGICAL ;
415
396
416
397
msr_write (MSR_IA32_EXT_APIC_ICR , icr .value );
417
398
dev_dbg (DBG_LEVEL_LAPICPT , "%s: icr.value 0x%016lx" , __func__ , icr .value );
@@ -439,8 +420,7 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
439
420
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
440
421
struct acrn_msi_entry msi ;
441
422
442
- if (copy_from_gpa (vm , & msi , param , sizeof (msi )) != 0 ) {
443
- } else {
423
+ if (copy_from_gpa (vm , & msi , param , sizeof (msi )) == 0 ) {
444
424
/* For target cpu with lapic pt, send ipi instead of injection via vlapic */
445
425
if (is_lapic_pt_configured (target_vm )) {
446
426
enum vm_vlapic_state vlapic_state = check_vm_vlapic_state (target_vm );
@@ -496,8 +476,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
496
476
if (is_created_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
497
477
struct acrn_set_ioreq_buffer iobuf ;
498
478
499
- if (copy_from_gpa (vm , & iobuf , param , sizeof (iobuf )) != 0 ) {
500
- } else {
479
+ if (copy_from_gpa (vm , & iobuf , param , sizeof (iobuf )) == 0 ) {
501
480
dev_dbg (DBG_LEVEL_HYCALL , "[%d] SET BUFFER=0x%p" ,
502
481
vmid , iobuf .req_buf );
503
482
@@ -513,7 +492,7 @@ int32_t hcall_set_ioreq_buffer(struct acrn_vm *vm, uint16_t vmid, uint64_t param
513
492
}
514
493
ret = 0 ;
515
494
}
516
- }
495
+ }
517
496
}
518
497
519
498
return ret ;
@@ -560,7 +539,7 @@ int32_t hcall_notify_ioreq_finish(uint16_t vmid, uint16_t vcpu_id)
560
539
*@pre Pointer vm shall point to SOS_VM
561
540
*/
562
541
static int32_t add_vm_memory_region (struct acrn_vm * vm , struct acrn_vm * target_vm ,
563
- const struct vm_memory_region * region ,uint64_t * pml4_page )
542
+ const struct vm_memory_region * region ,uint64_t * pml4_page )
564
543
{
565
544
int32_t ret ;
566
545
uint64_t prot ;
@@ -758,8 +737,7 @@ int32_t hcall_write_protect_page(struct acrn_vm *vm, uint16_t vmid, uint64_t wp_
758
737
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
759
738
struct wp_data wp ;
760
739
761
- if (copy_from_gpa (vm , & wp , wp_gpa , sizeof (wp )) != 0 ) {
762
- } else {
740
+ if (copy_from_gpa (vm , & wp , wp_gpa , sizeof (wp )) == 0 ) {
763
741
ret = write_protect_page (target_vm , & wp );
764
742
}
765
743
} else {
@@ -795,9 +773,8 @@ int32_t hcall_gpa_to_hpa(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
795
773
if (v_gpa2hpa .hpa == INVALID_HPA ) {
796
774
pr_err ("%s,vm[%hu] gpa 0x%lx,GPA is unmapping." ,
797
775
__func__ , target_vm -> vm_id , v_gpa2hpa .gpa );
798
- } else if (copy_to_gpa (vm , & v_gpa2hpa , param , sizeof (v_gpa2hpa )) != 0 ) {
799
776
} else {
800
- ret = 0 ;
777
+ ret = copy_to_gpa ( vm , & v_gpa2hpa , param , sizeof ( v_gpa2hpa )) ;
801
778
}
802
779
} else {
803
780
pr_err ("target_vm is invalid or HCALL gpa2hpa: Unable copy param from vm\n" );
@@ -824,10 +801,9 @@ int32_t hcall_assign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
824
801
struct acrn_vm * target_vm = get_vm_from_vmid (vmid );
825
802
826
803
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
827
- if (copy_from_gpa (vm , & pcidev , param , sizeof (pcidev )) != 0 ) {
828
- } else {
804
+ if (copy_from_gpa (vm , & pcidev , param , sizeof (pcidev )) == 0 ) {
829
805
ret = vpci_assign_pcidev (target_vm , & pcidev );
830
- }
806
+ }
831
807
} else {
832
808
pr_err ("%s, vm[%d] is invalid\n" , __func__ , vm -> vm_id );
833
809
}
@@ -853,10 +829,9 @@ int32_t hcall_deassign_pcidev(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
853
829
struct acrn_vm * target_vm = get_vm_from_vmid (vmid );
854
830
855
831
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
856
- if (copy_from_gpa (vm , & pcidev , param , sizeof (pcidev )) != 0 ) {
857
- } else {
832
+ if (copy_from_gpa (vm , & pcidev , param , sizeof (pcidev )) == 0 ) {
858
833
ret = vpci_deassign_pcidev (target_vm , & pcidev );
859
- }
834
+ }
860
835
} else {
861
836
pr_err ("%s, vm[%d] is invalid\n" , __func__ , vm -> vm_id );
862
837
}
@@ -883,8 +858,7 @@ int32_t hcall_set_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t pa
883
858
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
884
859
struct hc_ptdev_irq irq ;
885
860
886
- if (copy_from_gpa (vm , & irq , param , sizeof (irq )) != 0 ) {
887
- } else {
861
+ if (copy_from_gpa (vm , & irq , param , sizeof (irq )) == 0 ) {
888
862
if (irq .type == IRQ_INTX ) {
889
863
struct pci_vdev * vdev ;
890
864
union pci_bdf bdf = {.value = irq .virt_bdf };
@@ -936,8 +910,7 @@ hcall_reset_ptdev_intr_info(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
936
910
if (!is_poweroff_vm (target_vm ) && is_postlaunched_vm (target_vm )) {
937
911
struct hc_ptdev_irq irq ;
938
912
939
- if (copy_from_gpa (vm , & irq , param , sizeof (irq )) != 0 ) {
940
- } else {
913
+ if (copy_from_gpa (vm , & irq , param , sizeof (irq )) == 0 ) {
941
914
if (irq .type == IRQ_INTX ) {
942
915
struct pci_vdev * vdev ;
943
916
union pci_bdf bdf = {.value = irq .virt_bdf };
@@ -993,14 +966,8 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param)
993
966
if ((target_vm != NULL ) && (!is_poweroff_vm (target_vm )) && (is_postlaunched_vm (target_vm ))) {
994
967
switch (cmd & PMCMD_TYPE_MASK ) {
995
968
case PMCMD_GET_PX_CNT : {
996
-
997
- if (target_vm -> pm .px_cnt == 0U ) {
998
- ret = -1 ;
999
- } else if (copy_to_gpa (vm , & (target_vm -> pm .px_cnt ), param ,
1000
- sizeof (target_vm -> pm .px_cnt )) != 0 ) {
1001
- ret = -1 ;
1002
- } else {
1003
- ret = 0 ;
969
+ if (target_vm -> pm .px_cnt != 0U ) {
970
+ ret = copy_to_gpa (vm , & (target_vm -> pm .px_cnt ), param , sizeof (target_vm -> pm .px_cnt ));
1004
971
}
1005
972
break ;
1006
973
}
@@ -1013,35 +980,21 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param)
1013
980
* we need to check PMCMD_VCPUID_MASK in cmd.
1014
981
*/
1015
982
if (target_vm -> pm .px_cnt == 0U ) {
1016
- ret = -1 ;
1017
983
break ;
1018
984
}
1019
985
1020
986
pn = (uint8_t )((cmd & PMCMD_STATE_NUM_MASK ) >> PMCMD_STATE_NUM_SHIFT );
1021
987
if (pn >= target_vm -> pm .px_cnt ) {
1022
- ret = -1 ;
1023
988
break ;
1024
989
}
1025
990
1026
991
px_data = target_vm -> pm .px_data + pn ;
1027
- if (copy_to_gpa (vm , px_data , param ,
1028
- sizeof (struct cpu_px_data )) != 0 ) {
1029
- ret = -1 ;
1030
- break ;
1031
- }
1032
-
1033
- ret = 0 ;
992
+ ret = copy_to_gpa (vm , px_data , param , sizeof (struct cpu_px_data ));
1034
993
break ;
1035
994
}
1036
995
case PMCMD_GET_CX_CNT : {
1037
-
1038
- if (target_vm -> pm .cx_cnt == 0U ) {
1039
- ret = -1 ;
1040
- } else if (copy_to_gpa (vm , & (target_vm -> pm .cx_cnt ), param ,
1041
- sizeof (target_vm -> pm .cx_cnt )) != 0 ) {
1042
- ret = -1 ;
1043
- } else {
1044
- ret = 0 ;
996
+ if (target_vm -> pm .cx_cnt != 0U ) {
997
+ ret = copy_to_gpa (vm , & (target_vm -> pm .cx_cnt ), param , sizeof (target_vm -> pm .cx_cnt ));
1045
998
}
1046
999
break ;
1047
1000
}
@@ -1050,32 +1003,22 @@ int32_t hcall_get_cpu_pm_state(struct acrn_vm *vm, uint64_t cmd, uint64_t param)
1050
1003
struct cpu_cx_data * cx_data ;
1051
1004
1052
1005
if (target_vm -> pm .cx_cnt == 0U ) {
1053
- ret = -1 ;
1054
1006
break ;
1055
1007
}
1056
1008
1057
1009
cx_idx = (uint8_t )
1058
1010
((cmd & PMCMD_STATE_NUM_MASK ) >> PMCMD_STATE_NUM_SHIFT );
1059
1011
if ((cx_idx == 0U ) || (cx_idx > target_vm -> pm .cx_cnt )) {
1060
- ret = -1 ;
1061
1012
break ;
1062
1013
}
1063
1014
1064
1015
cx_data = target_vm -> pm .cx_data + cx_idx ;
1065
-
1066
- if (copy_to_gpa (vm , cx_data , param ,
1067
- sizeof (struct cpu_cx_data )) != 0 ) {
1068
- ret = -1 ;
1069
- break ;
1070
- }
1071
-
1072
- ret = 0 ;
1016
+ ret = copy_to_gpa (vm , cx_data , param , sizeof (struct cpu_cx_data ));
1073
1017
break ;
1074
1018
}
1075
1019
default :
1076
- ret = -1 ;
1020
+ /* invalid command */
1077
1021
break ;
1078
-
1079
1022
}
1080
1023
}
1081
1024
@@ -1152,7 +1095,7 @@ int32_t hcall_set_callback_vector(const struct acrn_vm *vm, uint64_t param)
1152
1095
1153
1096
if (!is_sos_vm (vm )) {
1154
1097
pr_err ("%s: Targeting to service vm" , __func__ );
1155
- ret = - EPERM ;
1098
+ ret = - EPERM ;
1156
1099
} else if ((param > NR_MAX_VECTOR ) || (param < VECTOR_DYNAMIC_START )) {
1157
1100
pr_err ("%s: Invalid passed vector\n" , __func__ );
1158
1101
ret = - EINVAL ;
0 commit comments