@@ -296,13 +296,13 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
296
296
default :
297
297
298
298
/* Set mapping size to 0 - can't map memory in PML4 */
299
- mapped_size = 0 ;
299
+ mapped_size = 0U ;
300
300
301
301
break ;
302
302
}
303
303
304
304
/* Check to see if mapping should occur */
305
- if (mapped_size != 0 ) {
305
+ if (mapped_size != 0U ) {
306
306
/* Get current table entry */
307
307
uint64_t entry = MEM_READ64 (table_base + table_offset );
308
308
bool prev_entry_present = false;
@@ -415,7 +415,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
415
415
* TODO: add shootdown APs operation if MMU will be
416
416
* modified after AP start in the future.
417
417
*/
418
- if ((phys_cpu_num != 0 ) &&
418
+ if ((phys_cpu_num != 0U ) &&
419
419
((pcpu_active_bitmap &
420
420
((1UL << phys_cpu_num ) - 1 ))
421
421
!= (1UL << CPU_BOOT_ID ))) {
@@ -620,7 +620,7 @@ void init_paging(void)
620
620
attr_uc );
621
621
622
622
/* Modify WB attribute for E820_TYPE_RAM */
623
- for (i = 0 , entry = & e820 [0 ];
623
+ for (i = 0U , entry = & e820 [0 ];
624
624
i < e820_entries ;
625
625
i ++ , entry = & e820 [i ]) {
626
626
if (entry -> type == E820_TYPE_RAM ) {
@@ -864,10 +864,10 @@ static uint64_t update_page_table_entry(struct map_params *map_params,
864
864
static uint64_t break_page_table (struct map_params * map_params , void * paddr ,
865
865
void * vaddr , uint64_t page_size , bool direct )
866
866
{
867
- uint32_t i = 0 ;
867
+ uint32_t i = 0U ;
868
868
uint64_t pa ;
869
- uint64_t attr = 0x00 ;
870
- uint64_t next_page_size = 0x00 ;
869
+ uint64_t attr = 0x0UL ;
870
+ uint64_t next_page_size = 0x0UL ;
871
871
void * sub_tab_addr = NULL ;
872
872
struct entry_params entry ;
873
873
@@ -930,7 +930,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
930
930
attr |= (entry .entry_val & 0x7fUL );
931
931
}
932
932
/* write all entries and keep original attr*/
933
- for (i = 0 ; i < IA32E_NUM_ENTRIES ; i ++ ) {
933
+ for (i = 0U ; i < IA32E_NUM_ENTRIES ; i ++ ) {
934
934
MEM_WRITE64 (sub_tab_addr + (i * IA32E_COMM_ENTRY_SIZE ),
935
935
(attr | (pa + (i * next_page_size ))));
936
936
}
@@ -1033,7 +1033,7 @@ static int modify_paging(struct map_params *map_params, void *paddr,
1033
1033
*/
1034
1034
page_size = break_page_table (map_params ,
1035
1035
paddr , vaddr , page_size , direct );
1036
- if (page_size == 0 )
1036
+ if (page_size == 0UL )
1037
1037
return - EINVAL ;
1038
1038
}
1039
1039
} else {
@@ -1043,7 +1043,7 @@ static int modify_paging(struct map_params *map_params, void *paddr,
1043
1043
/* The function return the memory size that one entry can map */
1044
1044
adjust_size = update_page_table_entry (map_params , paddr , vaddr ,
1045
1045
page_size , attr , request_type , direct );
1046
- if (adjust_size == 0 )
1046
+ if (adjust_size == 0UL )
1047
1047
return - EINVAL ;
1048
1048
vaddr += adjust_size ;
1049
1049
paddr += adjust_size ;
0 commit comments