@@ -127,8 +127,8 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
127
127
static int _gva2gpa_common (struct vcpu * vcpu , struct page_walk_info * pw_info ,
128
128
uint64_t gva , uint64_t * gpa , uint32_t * err_code )
129
129
{
130
- int i , index ;
131
- uint32_t shift ;
130
+ int i ;
131
+ uint32_t index , shift ;
132
132
uint8_t * base ;
133
133
uint64_t entry ;
134
134
uint64_t addr , page_size ;
@@ -147,15 +147,15 @@ static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
147
147
goto out ;
148
148
}
149
149
150
- shift = i * pw_info -> width + 12 ;
151
- index = (gva >> shift ) & ((1UL << pw_info -> width ) - 1 );
150
+ shift = ( uint32_t ) i * pw_info -> width + 12U ;
151
+ index = (gva >> shift ) & ((1UL << pw_info -> width ) - 1UL );
152
152
page_size = 1UL << shift ;
153
153
154
- if (pw_info -> width == 10 )
154
+ if (pw_info -> width == 10U )
155
155
/* 32bit entry */
156
- entry = * ((uint32_t * )(base + 4 * index ));
156
+ entry = * ((uint32_t * )(base + 4U * index ));
157
157
else
158
- entry = * ((uint64_t * )(base + 8 * index ));
158
+ entry = * ((uint64_t * )(base + 8U * index ));
159
159
160
160
/* check if the entry present */
161
161
if ((entry & MMU_32BIT_PDE_P ) == 0U ) {
@@ -259,7 +259,7 @@ int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
259
259
260
260
if ((gpa == NULL ) || (err_code == NULL ))
261
261
return - EINVAL ;
262
- * gpa = 0 ;
262
+ * gpa = 0UL ;
263
263
264
264
pw_info .top_entry = cur_context -> cr3 ;
265
265
pw_info .level = pm ;
@@ -302,12 +302,12 @@ static inline int32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
302
302
void * g_ptr ;
303
303
304
304
hpa = _gpa2hpa (vm , gpa , & pg_size );
305
- if (pg_size == 0 ) {
305
+ if (pg_size == 0U ) {
306
306
pr_err ("GPA2HPA not found" );
307
307
return - EINVAL ;
308
308
}
309
309
310
- if (fix_pg_size != 0 )
310
+ if (fix_pg_size != 0U )
311
311
pg_size = fix_pg_size ;
312
312
313
313
off_in_pg = gpa & (pg_size - 1 );
@@ -327,22 +327,24 @@ static inline int32_t _copy_gpa(struct vm *vm, void *h_ptr, uint64_t gpa,
327
327
static inline int copy_gpa (struct vm * vm , void * h_ptr , uint64_t gpa ,
328
328
uint32_t size , bool cp_from_vm )
329
329
{
330
- int32_t len ;
330
+ int32_t ret ;
331
+ uint32_t len ;
331
332
332
333
if (vm == NULL ) {
333
334
pr_err ("guest phy addr copy need vm param" );
334
335
return - EINVAL ;
335
336
}
336
337
337
338
do {
338
- len = _copy_gpa (vm , h_ptr , gpa , size , 0 , cp_from_vm );
339
- if (len < 0 )
340
- return len ;
339
+ ret = _copy_gpa (vm , h_ptr , gpa , size , 0 , cp_from_vm );
340
+ if (ret < 0 )
341
+ return ret ;
341
342
343
+ len = (uint32_t ) ret ;
342
344
gpa += len ;
343
345
h_ptr += len ;
344
346
size -= len ;
345
- } while (size > 0 );
347
+ } while (size > 0U );
346
348
347
349
return 0 ;
348
350
}
@@ -351,7 +353,8 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
351
353
uint32_t size , uint32_t * err_code , bool cp_from_vm )
352
354
{
353
355
uint64_t gpa = 0 ;
354
- int32_t len , ret ;
356
+ int32_t ret ;
357
+ uint32_t len ;
355
358
356
359
if (vcpu == NULL ) {
357
360
pr_err ("guest virt addr copy need vcpu param" );
@@ -370,15 +373,16 @@ static inline int copy_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
370
373
return ret ;
371
374
}
372
375
373
- len = ret = _copy_gpa (vcpu -> vm , h_ptr , gpa , size ,
376
+ ret = _copy_gpa (vcpu -> vm , h_ptr , gpa , size ,
374
377
PAGE_SIZE_4K , cp_from_vm );
375
378
if (ret < 0 )
376
379
return ret ;
377
380
381
+ len = (uint32_t ) ret ;
378
382
gva += len ;
379
383
h_ptr += len ;
380
384
size -= len ;
381
- } while (size > 0 );
385
+ } while (size > 0U );
382
386
383
387
return 0 ;
384
388
}
@@ -413,7 +417,7 @@ int copy_to_gva(struct vcpu *vcpu, void *h_ptr, uint64_t gva,
413
417
414
418
void init_e820 (void )
415
419
{
416
- unsigned int i ;
420
+ uint32_t i ;
417
421
418
422
if (boot_regs [0 ] == MULTIBOOT_INFO_MAGIC ) {
419
423
struct multiboot_info * mbi = (struct multiboot_info * )
@@ -435,7 +439,7 @@ void init_e820(void)
435
439
"mmap length 0x%x addr 0x%x entries %d\n" ,
436
440
mbi -> mi_mmap_length , mbi -> mi_mmap_addr ,
437
441
e820_entries );
438
- for (i = 0 ; i < e820_entries ; i ++ ) {
442
+ for (i = 0U ; i < e820_entries ; i ++ ) {
439
443
e820 [i ].baseaddr = mmap [i ].baseaddr ;
440
444
e820 [i ].length = mmap [i ].length ;
441
445
e820 [i ].type = mmap [i ].type ;
@@ -455,16 +459,16 @@ void init_e820(void)
455
459
456
460
void obtain_e820_mem_info (void )
457
461
{
458
- unsigned int i ;
462
+ uint32_t i ;
459
463
struct e820_entry * entry ;
460
464
461
465
e820_mem .mem_bottom = UINT64_MAX ;
462
- e820_mem .mem_top = 0x00 ;
463
- e820_mem .total_mem_size = 0 ;
464
- e820_mem .max_ram_blk_base = 0 ;
465
- e820_mem .max_ram_blk_size = 0 ;
466
+ e820_mem .mem_top = 0x0UL ;
467
+ e820_mem .total_mem_size = 0UL ;
468
+ e820_mem .max_ram_blk_base = 0UL ;
469
+ e820_mem .max_ram_blk_size = 0UL ;
466
470
467
- for (i = 0 ; i < e820_entries ; i ++ ) {
471
+ for (i = 0U ; i < e820_entries ; i ++ ) {
468
472
entry = & e820 [i ];
469
473
if (e820_mem .mem_bottom > entry -> baseaddr )
470
474
e820_mem .mem_bottom = entry -> baseaddr ;
@@ -488,7 +492,7 @@ void obtain_e820_mem_info(void)
488
492
489
493
static void rebuild_vm0_e820 (void )
490
494
{
491
- unsigned int i ;
495
+ uint32_t i ;
492
496
uint64_t entry_start ;
493
497
uint64_t entry_end ;
494
498
uint64_t hv_start = CONFIG_RAM_START ;
@@ -498,7 +502,7 @@ static void rebuild_vm0_e820(void)
498
502
/* hypervisor mem need be filter out from e820 table
499
503
* it's hv itself + other hv reserved mem like vgt etc
500
504
*/
501
- for (i = 0 ; i < e820_entries ; i ++ ) {
505
+ for (i = 0U ; i < e820_entries ; i ++ ) {
502
506
entry = & e820 [i ];
503
507
entry_start = entry -> baseaddr ;
504
508
entry_end = entry -> baseaddr + entry -> length ;
@@ -539,7 +543,7 @@ static void rebuild_vm0_e820(void)
539
543
540
544
}
541
545
542
- if (new_entry .length > 0 ) {
546
+ if (new_entry .length > 0UL ) {
543
547
e820_entries ++ ;
544
548
ASSERT (e820_entries <= E820_MAX_ENTRIES ,
545
549
"e820 entry overflow" );
@@ -562,7 +566,7 @@ static void rebuild_vm0_e820(void)
562
566
*/
563
567
int prepare_vm0_memmap_and_e820 (struct vm * vm )
564
568
{
565
- unsigned int i ;
569
+ uint32_t i ;
566
570
uint32_t attr_wb = (IA32E_EPT_R_BIT |
567
571
IA32E_EPT_W_BIT |
568
572
IA32E_EPT_X_BIT |
@@ -584,7 +588,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
584
588
MAP_MMIO , attr_uc );
585
589
586
590
/* update ram entries to WB attr */
587
- for (i = 0 ; i < e820_entries ; i ++ ) {
591
+ for (i = 0U ; i < e820_entries ; i ++ ) {
588
592
entry = & e820 [i ];
589
593
if (entry -> type == E820_TYPE_RAM )
590
594
ept_mmap (vm , entry -> baseaddr , entry -> baseaddr ,
@@ -593,7 +597,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
593
597
594
598
595
599
dev_dbg (ACRN_DBG_GUEST , "VM0 e820 layout:\n" );
596
- for (i = 0 ; i < e820_entries ; i ++ ) {
600
+ for (i = 0U ; i < e820_entries ; i ++ ) {
597
601
entry = & e820 [i ];
598
602
dev_dbg (ACRN_DBG_GUEST ,
599
603
"e820 table: %d type: 0x%x" , i , entry -> type );
@@ -618,7 +622,7 @@ uint64_t e820_alloc_low_memory(uint32_t size)
618
622
/* We want memory in page boundary and integral multiple of pages */
619
623
size = ROUND_PAGE_UP (size );
620
624
621
- for (i = 0 ; i < e820_entries ; i ++ ) {
625
+ for (i = 0U ; i < e820_entries ; i ++ ) {
622
626
entry = & e820 [i ];
623
627
uint64_t start , end , length ;
624
628
0 commit comments