@@ -45,37 +45,37 @@ static void split_large_page(uint64_t *pte, enum _page_table_level level,
45
45
46
46
paddr = ref_paddr ;
47
47
for (i = 0UL ; i < PTRS_PER_PTE ; i ++ ) {
48
- set_pgentry (pbase + i , paddr | ref_prot );
48
+ set_pgentry (pbase + i , paddr | ref_prot , mem_ops );
49
49
paddr += paddrinc ;
50
50
}
51
51
52
52
ref_prot = mem_ops -> get_default_access_right ();
53
- set_pgentry (pte , hva2hpa ((void * )pbase ) | ref_prot );
53
+ set_pgentry (pte , hva2hpa ((void * )pbase ) | ref_prot , mem_ops );
54
54
55
55
/* TODO: flush the TLB */
56
56
}
57
57
58
58
static inline void local_modify_or_del_pte (uint64_t * pte ,
59
- uint64_t prot_set , uint64_t prot_clr , uint32_t type )
59
+ uint64_t prot_set , uint64_t prot_clr , uint32_t type , const struct memory_ops * mem_ops )
60
60
{
61
61
if (type == MR_MODIFY ) {
62
62
uint64_t new_pte = * pte ;
63
63
new_pte &= ~prot_clr ;
64
64
new_pte |= prot_set ;
65
- set_pgentry (pte , new_pte );
65
+ set_pgentry (pte , new_pte , mem_ops );
66
66
} else {
67
- sanitize_pte_entry (pte );
67
+ sanitize_pte_entry (pte , mem_ops );
68
68
}
69
69
}
70
70
71
71
/*
72
72
* pgentry may means pml4e/pdpte/pde
73
73
*/
74
- static inline void construct_pgentry (uint64_t * pde , void * pd_page , uint64_t prot )
74
+ static inline void construct_pgentry (uint64_t * pde , void * pd_page , uint64_t prot , const struct memory_ops * mem_ops )
75
75
{
76
- sanitize_pte ((uint64_t * )pd_page );
76
+ sanitize_pte ((uint64_t * )pd_page , mem_ops );
77
77
78
- set_pgentry (pde , hva2hpa (pd_page ) | prot );
78
+ set_pgentry (pde , hva2hpa (pd_page ) | prot , mem_ops );
79
79
}
80
80
81
81
/*
@@ -99,7 +99,7 @@ static void modify_or_del_pte(const uint64_t *pde, uint64_t vaddr_start, uint64_
99
99
if (mem_ops -> pgentry_present (* pte ) == 0UL ) {
100
100
ASSERT (false, "invalid op, pte not present" );
101
101
} else {
102
- local_modify_or_del_pte (pte , prot_set , prot_clr , type );
102
+ local_modify_or_del_pte (pte , prot_set , prot_clr , type , mem_ops );
103
103
vaddr += PTE_SIZE ;
104
104
if (vaddr >= vaddr_end ) {
105
105
break ;
@@ -134,7 +134,7 @@ static void modify_or_del_pde(const uint64_t *pdpte, uint64_t vaddr_start, uint6
134
134
if ((vaddr_next > vaddr_end ) || (!mem_aligned_check (vaddr , PDE_SIZE ))) {
135
135
split_large_page (pde , IA32E_PD , vaddr , mem_ops );
136
136
} else {
137
- local_modify_or_del_pte (pde , prot_set , prot_clr , type );
137
+ local_modify_or_del_pte (pde , prot_set , prot_clr , type , mem_ops );
138
138
if (vaddr_next < vaddr_end ) {
139
139
vaddr = vaddr_next ;
140
140
continue ;
@@ -178,7 +178,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
178
178
(!mem_aligned_check (vaddr , PDPTE_SIZE ))) {
179
179
split_large_page (pdpte , IA32E_PDPT , vaddr , mem_ops );
180
180
} else {
181
- local_modify_or_del_pte (pdpte , prot_set , prot_clr , type );
181
+ local_modify_or_del_pte (pdpte , prot_set , prot_clr , type , mem_ops );
182
182
if (vaddr_next < vaddr_end ) {
183
183
vaddr = vaddr_next ;
184
184
continue ;
@@ -251,7 +251,7 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
251
251
if (mem_ops -> pgentry_present (* pte ) != 0UL ) {
252
252
ASSERT (false, "invalid op, pte present" );
253
253
} else {
254
- set_pgentry (pte , paddr | prot );
254
+ set_pgentry (pte , paddr | prot , mem_ops );
255
255
paddr += PTE_SIZE ;
256
256
vaddr += PTE_SIZE ;
257
257
@@ -284,7 +284,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
284
284
if (mem_aligned_check (paddr , PDE_SIZE ) &&
285
285
mem_aligned_check (vaddr , PDE_SIZE ) &&
286
286
(vaddr_next <= vaddr_end )) {
287
- set_pgentry (pde , paddr | (prot | PAGE_PSE ));
287
+ set_pgentry (pde , paddr | (prot | PAGE_PSE ), mem_ops );
288
288
if (vaddr_next < vaddr_end ) {
289
289
paddr += (vaddr_next - vaddr );
290
290
vaddr = vaddr_next ;
@@ -293,7 +293,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
293
293
break ; /* done */
294
294
} else {
295
295
void * pt_page = mem_ops -> get_pt_page (mem_ops -> info , vaddr );
296
- construct_pgentry (pde , pt_page , mem_ops -> get_default_access_right ());
296
+ construct_pgentry (pde , pt_page , mem_ops -> get_default_access_right (), mem_ops );
297
297
}
298
298
}
299
299
add_pte (pde , paddr , vaddr , vaddr_end , prot , mem_ops );
@@ -326,7 +326,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
326
326
if (mem_aligned_check (paddr , PDPTE_SIZE ) &&
327
327
mem_aligned_check (vaddr , PDPTE_SIZE ) &&
328
328
(vaddr_next <= vaddr_end )) {
329
- set_pgentry (pdpte , paddr | (prot | PAGE_PSE ));
329
+ set_pgentry (pdpte , paddr | (prot | PAGE_PSE ), mem_ops );
330
330
if (vaddr_next < vaddr_end ) {
331
331
paddr += (vaddr_next - vaddr );
332
332
vaddr = vaddr_next ;
@@ -335,7 +335,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
335
335
break ; /* done */
336
336
} else {
337
337
void * pd_page = mem_ops -> get_pd_page (mem_ops -> info , vaddr );
338
- construct_pgentry (pdpte , pd_page , mem_ops -> get_default_access_right ());
338
+ construct_pgentry (pdpte , pd_page , mem_ops -> get_default_access_right (), mem_ops );
339
339
}
340
340
}
341
341
add_pde (pdpte , paddr , vaddr , vaddr_end , prot , mem_ops );
@@ -371,7 +371,7 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint
371
371
pml4e = pml4e_offset (pml4_page , vaddr );
372
372
if (mem_ops -> pgentry_present (* pml4e ) == 0UL ) {
373
373
void * pdpt_page = mem_ops -> get_pdpt_page (mem_ops -> info , vaddr );
374
- construct_pgentry (pml4e , pdpt_page , mem_ops -> get_default_access_right ());
374
+ construct_pgentry (pml4e , pdpt_page , mem_ops -> get_default_access_right (), mem_ops );
375
375
}
376
376
add_pdpte (pml4e , paddr , vaddr , vaddr_end , prot , mem_ops );
377
377
0 commit comments