@@ -41,7 +41,7 @@ inline uint64_t vcpu_get_rip(struct acrn_vcpu *vcpu)
41
41
struct run_context * ctx =
42
42
& vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
43
43
44
- if (bitmap_test (CPU_REG_RIP , & vcpu -> reg_updated ) == 0 &&
44
+ if (! bitmap_test (CPU_REG_RIP , & vcpu -> reg_updated ) &&
45
45
bitmap_test_and_set_lock (CPU_REG_RIP , & vcpu -> reg_cached ) == 0 )
46
46
ctx -> rip = exec_vmread (VMX_GUEST_RIP );
47
47
return ctx -> rip ;
@@ -75,9 +75,10 @@ inline uint64_t vcpu_get_efer(struct acrn_vcpu *vcpu)
75
75
struct run_context * ctx =
76
76
& vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
77
77
78
- if (bitmap_test (CPU_REG_EFER , & vcpu -> reg_updated ) == 0 &&
79
- bitmap_test_and_set_lock (CPU_REG_EFER , & vcpu -> reg_cached ) == 0 )
78
+ if (! bitmap_test (CPU_REG_EFER , & vcpu -> reg_updated ) &&
79
+ ! bitmap_test_and_set_lock (CPU_REG_EFER , & vcpu -> reg_cached )) {
80
80
ctx -> ia32_efer = exec_vmread64 (VMX_GUEST_IA32_EFER_FULL );
81
+ }
81
82
return ctx -> ia32_efer ;
82
83
}
83
84
@@ -93,10 +94,11 @@ inline uint64_t vcpu_get_rflags(struct acrn_vcpu *vcpu)
93
94
struct run_context * ctx =
94
95
& vcpu -> arch .contexts [vcpu -> arch .cur_context ].run_ctx ;
95
96
96
- if (bitmap_test (CPU_REG_RFLAGS , & vcpu -> reg_updated ) == 0 &&
97
- bitmap_test_and_set_lock (CPU_REG_RFLAGS ,
98
- & vcpu -> reg_cached ) == 0 && vcpu -> launched )
97
+ if (! bitmap_test (CPU_REG_RFLAGS , & vcpu -> reg_updated ) &&
98
+ ! bitmap_test_and_set_lock (CPU_REG_RFLAGS ,
99
+ & vcpu -> reg_cached ) && vcpu -> launched ) {
99
100
ctx -> rflags = exec_vmread (VMX_GUEST_RFLAGS );
101
+ }
100
102
return ctx -> rflags ;
101
103
}
102
104
@@ -186,14 +188,14 @@ struct acrn_vcpu *get_ever_run_vcpu(uint16_t pcpu_id)
186
188
static void set_vcpu_mode (struct acrn_vcpu * vcpu , uint32_t cs_attr , uint64_t ia32_efer ,
187
189
uint64_t cr0 )
188
190
{
189
- if (ia32_efer & MSR_IA32_EFER_LMA_BIT ) {
190
- if (cs_attr & 0x2000U ) {
191
+ if (( ia32_efer & MSR_IA32_EFER_LMA_BIT ) != 0UL ) {
192
+ if (( cs_attr & 0x2000U ) != 0U ) {
191
193
/* CS.L = 1 */
192
194
vcpu -> arch .cpu_mode = CPU_MODE_64BIT ;
193
195
} else {
194
196
vcpu -> arch .cpu_mode = CPU_MODE_COMPATIBILITY ;
195
197
}
196
- } else if (cr0 & CR0_PE ) {
198
+ } else if (( cr0 & CR0_PE ) != 0UL ) {
197
199
vcpu -> arch .cpu_mode = CPU_MODE_PROTECTED ;
198
200
} else {
199
201
vcpu -> arch .cpu_mode = CPU_MODE_REAL ;
@@ -216,7 +218,7 @@ void set_vcpu_regs(struct acrn_vcpu *vcpu, struct acrn_vcpu_regs *vcpu_regs)
216
218
* If the set_vcpu_regs is used not only for vcpu state
217
219
* initialization, this part of code needs be revised.
218
220
*/
219
- if (vcpu_regs -> cr0 & CR0_PE ) {
221
+ if (( vcpu_regs -> cr0 & CR0_PE ) != 0UL ) {
220
222
attr = PROTECTED_MODE_DATA_SEG_AR ;
221
223
limit = PROTECTED_MODE_SEG_LIMIT ;
222
224
} else {
@@ -451,7 +453,7 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
451
453
pr_info ("VM %d Starting VCPU %hu" ,
452
454
vcpu -> vm -> vm_id , vcpu -> vcpu_id );
453
455
454
- if (vcpu -> arch .vpid )
456
+ if (vcpu -> arch .vpid != 0U )
455
457
exec_vmwrite16 (VMX_VPID , vcpu -> arch .vpid );
456
458
457
459
/*
@@ -518,10 +520,11 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
518
520
519
521
if (status != 0 ) {
520
522
/* refer to 64-ia32 spec section 24.9.1 volume#3 */
521
- if (vcpu -> arch .exit_reason & VMX_VMENTRY_FAIL )
523
+ if (( vcpu -> arch .exit_reason & VMX_VMENTRY_FAIL ) != 0U ) {
522
524
pr_fatal ("vmentry fail reason=%lx" , vcpu -> arch .exit_reason );
523
- else
525
+ } else {
524
526
pr_fatal ("vmexit fail err_inst=%x" , exec_vmread32 (VMX_INSTR_ERROR ));
527
+ }
525
528
526
529
ASSERT (status == 0 , "vm fail" );
527
530
}
0 commit comments