9
9
#include <multiboot.h>
10
10
#include <reloc.h>
11
11
12
- #define ACRN_DBG_GUEST 6
12
+ #define ACRN_DBG_GUEST 6U
13
13
14
14
/* for VM0 e820 */
15
15
uint32_t e820_entries ;
@@ -18,7 +18,7 @@ struct e820_mem_params e820_mem;
18
18
19
19
struct page_walk_info {
20
20
uint64_t top_entry ; /* Top level paging structure entry */
21
- int level ;
21
+ uint32_t level ;
22
22
uint32_t width ;
23
23
bool is_user_mode ;
24
24
bool is_write_access ;
@@ -32,7 +32,7 @@ struct page_walk_info {
32
32
inline bool
33
33
is_vm0 (struct vm * vm )
34
34
{
35
- return (vm -> attr .boot_idx & 0x7FU ) == 0 ;
35
+ return (vm -> attr .boot_idx & 0x7FU ) == 0U ;
36
36
}
37
37
38
38
inline struct vcpu * vcpu_from_vid (struct vm * vm , uint16_t vcpu_id )
@@ -77,7 +77,7 @@ inline struct vcpu *get_primary_vcpu(struct vm *vm)
77
77
inline uint64_t vcpumask2pcpumask (struct vm * vm , uint64_t vdmask )
78
78
{
79
79
uint16_t vcpu_id ;
80
- uint64_t dmask = 0 ;
80
+ uint64_t dmask = 0UL ;
81
81
struct vcpu * vcpu ;
82
82
83
83
for (vcpu_id = ffs64 (vdmask ); vcpu_id != INVALID_BIT_INDEX ;
@@ -129,8 +129,9 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
129
129
static int _gva2gpa_common (struct vcpu * vcpu , struct page_walk_info * pw_info ,
130
130
uint64_t gva , uint64_t * gpa , uint32_t * err_code )
131
131
{
132
- int i ;
133
- uint32_t index , shift ;
132
+ uint32_t i ;
133
+ uint64_t index ;
134
+ uint32_t shift ;
134
135
uint8_t * base ;
135
136
uint64_t entry ;
136
137
uint64_t addr , page_size ;
@@ -141,15 +142,18 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
141
142
return - EINVAL ;
142
143
143
144
addr = pw_info -> top_entry ;
144
- for (i = pw_info -> level - 1 ; i >= 0 ; i -- ) {
145
+ i = pw_info -> level ;
146
+ while (i != 0U ) {
147
+ i -- ;
148
+
145
149
addr = addr & IA32E_REF_MASK ;
146
150
base = GPA2HVA (vcpu -> vm , addr );
147
151
if (base == NULL ) {
148
152
ret = - EFAULT ;
149
153
goto out ;
150
154
}
151
155
152
- shift = ( uint32_t ) i * pw_info -> width + 12U ;
156
+ shift = i * pw_info -> width + 12U ;
153
157
index = (gva >> shift ) & ((1UL << pw_info -> width ) - 1UL );
154
158
page_size = 1UL << shift ;
155
159
@@ -188,9 +192,9 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
188
192
189
193
entry >>= shift ;
190
194
/* shift left 12bit more and back to clear XD/Prot Key/Ignored bits */
191
- entry <<= (shift + 12 );
192
- entry >>= 12 ;
193
- * gpa = entry | (gva & (page_size - 1 ));
195
+ entry <<= (shift + 12U );
196
+ entry >>= 12U ;
197
+ * gpa = entry | (gva & (page_size - 1UL ));
194
198
out :
195
199
196
200
if (fault != 0 ) {
@@ -224,7 +228,7 @@ static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
224
228
goto out ;
225
229
}
226
230
227
- pw_info -> level = 2 ;
231
+ pw_info -> level = 2U ;
228
232
pw_info -> top_entry = entry ;
229
233
ret = _gva2gpa_common (vcpu , pw_info , gva , gpa , err_code );
230
234
@@ -265,24 +269,25 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
265
269
266
270
pw_info .top_entry = cur_context -> cr3 ;
267
271
pw_info .level = pm ;
268
- pw_info .is_write_access = !!( * err_code & PAGE_FAULT_WR_FLAG );
269
- pw_info .is_inst_fetch = !!( * err_code & PAGE_FAULT_ID_FLAG );
272
+ pw_info .is_write_access = (( * err_code & PAGE_FAULT_WR_FLAG ) != 0U );
273
+ pw_info .is_inst_fetch = (( * err_code & PAGE_FAULT_ID_FLAG ) != 0U );
270
274
pw_info .is_user_mode = ((exec_vmread (VMX_GUEST_CS_SEL ) & 0x3UL ) == 3UL );
271
275
pw_info .pse = true;
272
- pw_info .nxe = cur_context -> ia32_efer & MSR_IA32_EFER_NXE_BIT ;
273
- pw_info .wp = !!(cur_context -> cr0 & CR0_WP );
276
+ pw_info .nxe =
277
+ ((cur_context -> ia32_efer & MSR_IA32_EFER_NXE_BIT ) != 0UL );
278
+ pw_info .wp = ((cur_context -> cr0 & CR0_WP ) != 0UL );
274
279
275
280
* err_code &= ~PAGE_FAULT_P_FLAG ;
276
281
277
282
if (pm == PAGING_MODE_4_LEVEL ) {
278
- pw_info .width = 9 ;
283
+ pw_info .width = 9U ;
279
284
ret = _gva2gpa_common (vcpu , & pw_info , gva , gpa , err_code );
280
285
} else if (pm == PAGING_MODE_3_LEVEL ) {
281
- pw_info .width = 9 ;
286
+ pw_info .width = 9U ;
282
287
ret = _gva2gpa_pae (vcpu , & pw_info , gva , gpa , err_code );
283
288
} else if (pm == PAGING_MODE_2_LEVEL ) {
284
- pw_info .width = 10 ;
285
- pw_info .pse = !!( cur_context -> cr4 & CR4_PSE );
289
+ pw_info .width = 10U ;
290
+ pw_info .pse = (( cur_context -> cr4 & CR4_PSE ) != 0UL );
286
291
pw_info .nxe = false;
287
292
ret = _gva2gpa_common (vcpu , & pw_info , gva , gpa , err_code );
288
293
} else
@@ -296,25 +301,25 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
296
301
return ret ;
297
302
}
298
303
299
- static inline int32_t _copy_gpa (struct vm * vm , void * h_ptr , uint64_t gpa ,
304
+ static inline uint32_t _copy_gpa (struct vm * vm , void * h_ptr , uint64_t gpa ,
300
305
uint32_t size , uint32_t fix_pg_size , bool cp_from_vm )
301
306
{
302
307
uint64_t hpa ;
303
- uint32_t off_in_pg , len , pg_size ;
308
+ uint32_t offset_in_pg , len , pg_size ;
304
309
void * g_ptr ;
305
310
306
311
hpa = _gpa2hpa (vm , gpa , & pg_size );
307
312
if (pg_size == 0U ) {
308
313
pr_err ("GPA2HPA not found" );
309
- return - EINVAL ;
314
+ return 0 ;
310
315
}
311
316
312
317
if (fix_pg_size != 0U )
313
318
pg_size = fix_pg_size ;
314
319
315
- off_in_pg = gpa & (pg_size - 1 );
316
- len = (size > pg_size - off_in_pg ) ?
317
- (pg_size - off_in_pg ) : size ;
320
+ offset_in_pg = ( uint32_t ) gpa & (pg_size - 1U );
321
+ len = (size > ( pg_size - offset_in_pg ) ) ?
322
+ (pg_size - offset_in_pg ) : size ;
318
323
319
324
g_ptr = HPA2HVA (hpa );
320
325
@@ -329,32 +334,30 @@ static inline int32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
329
334
static inline int copy_gpa (struct vm * vm , void * h_ptr , uint64_t gpa ,
330
335
uint32_t size , bool cp_from_vm )
331
336
{
332
- int32_t ret ;
333
337
uint32_t len ;
334
338
335
339
if (vm == NULL ) {
336
340
pr_err ("guest phy addr copy need vm param" );
337
341
return - EINVAL ;
338
342
}
339
343
340
- do {
341
- ret = _copy_gpa (vm , h_ptr , gpa , size , 0 , cp_from_vm );
342
- if (ret < 0 )
343
- return ret ;
344
+ while ( size > 0U ) {
345
+ len = _copy_gpa (vm , h_ptr , gpa , size , 0U , cp_from_vm );
346
+ if (len == 0U )
347
+ return - EINVAL ;
344
348
345
- len = (uint32_t ) ret ;
346
349
gpa += len ;
347
350
h_ptr += len ;
348
351
size -= len ;
349
- } while ( size > 0U );
352
+ }
350
353
351
354
return 0 ;
352
355
}
353
356
354
357
static inline int copy_gva (struct vcpu * vcpu , void * h_ptr , uint64_t gva ,
355
358
uint32_t size , uint32_t * err_code , bool cp_from_vm )
356
359
{
357
- uint64_t gpa = 0 ;
360
+ uint64_t gpa = 0UL ;
358
361
int32_t ret ;
359
362
uint32_t len ;
360
363
@@ -367,24 +370,23 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
367
370
return - EINVAL ;
368
371
}
369
372
370
- do {
373
+ while ( size > 0U ) {
371
374
ret = gva2gpa (vcpu , gva , & gpa , err_code );
372
375
if (ret < 0 ) {
373
376
pr_err ("error[%d] in GVA2GPA, err_code=0x%x" ,
374
377
ret , * err_code );
375
378
return ret ;
376
379
}
377
380
378
- ret = _copy_gpa (vcpu -> vm , h_ptr , gpa , size ,
381
+ len = _copy_gpa (vcpu -> vm , h_ptr , gpa , size ,
379
382
PAGE_SIZE_4K , cp_from_vm );
380
- if (ret < 0 )
381
- return ret ;
383
+ if (len == 0U )
384
+ return - EINVAL ;
382
385
383
- len = (uint32_t ) ret ;
384
386
gva += len ;
385
387
h_ptr += len ;
386
388
size -= len ;
387
- } while ( size > 0U );
389
+ }
388
390
389
391
return 0 ;
390
392
}
@@ -569,11 +571,11 @@ static void rebuild_vm0_e820(void)
569
571
int prepare_vm0_memmap_and_e820 (struct vm * vm )
570
572
{
571
573
uint32_t i ;
572
- uint32_t attr_wb = (IA32E_EPT_R_BIT |
574
+ uint64_t attr_wb = (IA32E_EPT_R_BIT |
573
575
IA32E_EPT_W_BIT |
574
576
IA32E_EPT_X_BIT |
575
577
IA32E_EPT_WB );
576
- uint32_t attr_uc = (IA32E_EPT_R_BIT |
578
+ uint64_t attr_uc = (IA32E_EPT_R_BIT |
577
579
IA32E_EPT_W_BIT |
578
580
IA32E_EPT_X_BIT |
579
581
IA32E_EPT_UNCACHED );
@@ -598,7 +600,6 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
598
600
entry -> length , MAP_MEM , attr_wb );
599
601
}
600
602
601
-
602
603
dev_dbg (ACRN_DBG_GUEST , "VM0 e820 layout:\n" );
603
604
for (i = 0U ; i < e820_entries ; i ++ ) {
604
605
entry = & e820 [i ];
@@ -613,7 +614,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
613
614
* will cause EPT violation if sos accesses hv memory
614
615
*/
615
616
hv_hpa = get_hv_image_base ();
616
- ept_mmap (vm , hv_hpa , hv_hpa , CONFIG_RAM_SIZE , MAP_UNMAP , 0 );
617
+ ept_mmap (vm , hv_hpa , hv_hpa , CONFIG_RAM_SIZE , MAP_UNMAP , 0U );
617
618
return 0 ;
618
619
}
619
620
@@ -623,7 +624,8 @@ uint64_t e820_alloc_low_memory(uint32_t size)
623
624
struct e820_entry * entry , * new_entry ;
624
625
625
626
/* We want memory in page boundary and integral multiple of pages */
626
- size = ROUND_PAGE_UP (size );
627
+ size = ((size + CPU_PAGE_SIZE - 1U ) >> CPU_PAGE_SHIFT )
628
+ << CPU_PAGE_SHIFT ;
627
629
628
630
for (i = 0U ; i < e820_entries ; i ++ ) {
629
631
entry = & e820 [i ];
@@ -823,12 +825,12 @@ static const uint64_t guest_init_gdt[] = {
823
825
GUEST_INIT_GDT_DESC_3 ,
824
826
};
825
827
826
- uint32_t create_guest_init_gdt (struct vm * vm , uint32_t * limit )
828
+ uint64_t create_guest_init_gdt (struct vm * vm , uint32_t * limit )
827
829
{
828
830
void * gtd_addr = GPA2HVA (vm , GUEST_INIT_GDT_START );
829
831
830
- * limit = sizeof (guest_init_gdt ) - 1 ;
831
- (void )memcpy_s (gtd_addr , 64 , guest_init_gdt , sizeof (guest_init_gdt ));
832
+ * limit = sizeof (guest_init_gdt ) - 1U ;
833
+ (void )memcpy_s (gtd_addr , 64U , guest_init_gdt , sizeof (guest_init_gdt ));
832
834
833
835
return GUEST_INIT_GDT_START ;
834
836
};
0 commit comments