Skip to content

Commit 0391f84

Browse files
lifeixlijinxia
authored andcommitted
hv: mmu: replace dynamic memory allocation in memory
Replace dynamic memory allocation in memory management with static memory allocation. Since the static memory allocation can guarantee the allocation never failed, so split_large_page and construct_pgentry don't need to return a errno any more. Besides, the destroy_ept don't need to free page any more. Instead, it would memset the eptp to 0 to make sure we can't walk this paging table again. Tracked-On: #861 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
1 parent 9c7c0de commit 0391f84

File tree

13 files changed

+146
-235
lines changed

13 files changed

+146
-235
lines changed

hypervisor/arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ config HV_RAM_START
222222

223223
config HV_RAM_SIZE
224224
hex "Size of the RAM region used by the hypervisor"
225-
default 0x04000000
225+
default 0x06000000
226226
help
227227
A 64-bit integer indicating the size of RAM used by the hypervisor.
228228
It is ensured at link time that the footprint of the hypervisor

hypervisor/arch/x86/ept.c

Lines changed: 16 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -10,55 +10,18 @@
1010

1111
#define ACRN_DBG_EPT 6U
1212

13-
/**
14-
* @pre pml4_addr != NULL
15-
*/
16-
void free_ept_mem(uint64_t *pml4_page)
13+
void destroy_ept(struct vm *vm)
1714
{
18-
uint64_t *pdpt_page, *pd_page, *pt_page;
19-
uint64_t *pml4e, *pdpte, *pde;
20-
uint64_t pml4e_idx, pdpte_idx, pde_idx;
21-
22-
for (pml4e_idx = 0U; pml4e_idx < PTRS_PER_PML4E; pml4e_idx++) {
23-
pml4e = pml4_page + pml4e_idx;
24-
if (pgentry_present(PTT_EPT, *pml4e) == 0UL) {
25-
continue;
26-
}
27-
pdpt_page = pml4e_page_vaddr(*pml4e);
28-
29-
for (pdpte_idx = 0U; pdpte_idx < PTRS_PER_PDPTE; pdpte_idx++) {
30-
pdpte = pdpt_page + pdpte_idx;
31-
if ((pgentry_present(PTT_EPT, *pdpte) == 0UL) ||
32-
pdpte_large(*pdpte) != 0UL) {
33-
continue;
34-
}
35-
pd_page = pdpte_page_vaddr(*pdpte);
36-
37-
for (pde_idx = 0U; pde_idx < PTRS_PER_PDE; pde_idx++) {
38-
pde = pd_page + pde_idx;
39-
if ((pgentry_present(PTT_EPT, *pde) == 0UL) ||
40-
pde_large(*pde) != 0UL) {
41-
continue;
42-
}
43-
pt_page = pde_page_vaddr(*pde);
44-
45-
/* Free page table entry table */
46-
free_paging_struct((void *)pt_page);
47-
}
48-
/* Free page directory entry table */
49-
free_paging_struct((void *)pd_page);
50-
}
51-
free_paging_struct((void *)pdpt_page);
15+
/* Destroy secure world */
16+
if (vm->sworld_control.flag.active != 0UL) {
17+
destroy_secure_world(vm, true);
5218
}
53-
free_paging_struct((void *)pml4_page);
54-
}
5519

56-
void destroy_ept(struct vm *vm)
57-
{
5820
if (vm->arch_vm.nworld_eptp != NULL) {
59-
free_ept_mem((uint64_t *)vm->arch_vm.nworld_eptp);
21+
(void)memset(vm->arch_vm.nworld_eptp, 0U, CPU_PAGE_SIZE);
6022
}
6123
}
24+
6225
/* using return value INVALID_HPA as error code */
6326
uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
6427
{
@@ -73,7 +36,7 @@ uint64_t local_gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
7336
eptp = vm->arch_vm.nworld_eptp;
7437
}
7538

76-
pgentry = lookup_address((uint64_t *)eptp, gpa, &pg_size, PTT_EPT);
39+
pgentry = lookup_address((uint64_t *)eptp, gpa, &pg_size, &vm->arch_vm.ept_mem_ops);
7740
if (pgentry != NULL) {
7841
hpa = ((*pgentry & (~(pg_size - 1UL)))
7942
| (gpa & (pg_size - 1UL)));
@@ -222,9 +185,8 @@ void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
222185
struct vcpu *vcpu;
223186
uint64_t prot = prot_orig;
224187

225-
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx ",
226-
__func__, vm->vm_id, hpa, gpa);
227-
dev_dbg(ACRN_DBG_EPT, "size: 0x%016llx prot: 0x%016x\n", size, prot);
188+
dev_dbg(ACRN_DBG_EPT, "%s, vm[%d] hpa: 0x%016llx gpa: 0x%016llx size: 0x%016llx prot: 0x%016x\n",
189+
__func__, vm->vm_id, hpa, gpa, size, prot);
228190

229191
/* EPT & VT-d share the same page tables, set SNP bit
230192
* to force snooping of PCIe devices if the page
@@ -234,7 +196,7 @@ void ept_mr_add(struct vm *vm, uint64_t *pml4_page,
234196
prot |= EPT_SNOOP_CTRL;
235197
}
236198

237-
mmu_add(pml4_page, hpa, gpa, size, prot, PTT_EPT);
199+
mmu_add(pml4_page, hpa, gpa, size, prot, &vm->arch_vm.ept_mem_ops);
238200

239201
foreach_vcpu(i, vm, vcpu) {
240202
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
@@ -248,8 +210,9 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
248210
struct vcpu *vcpu;
249211
uint16_t i;
250212

251-
mmu_modify_or_del(pml4_page, gpa, size,
252-
prot_set, prot_clr, PTT_EPT, MR_MODIFY);
213+
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
214+
215+
mmu_modify_or_del(pml4_page, gpa, size, prot_set, prot_clr, &vm->arch_vm.ept_mem_ops, MR_MODIFY);
253216

254217
foreach_vcpu(i, vm, vcpu) {
255218
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
@@ -258,17 +221,14 @@ void ept_mr_modify(struct vm *vm, uint64_t *pml4_page,
258221
/**
259222
* @pre [gpa,gpa+size) has been mapped into host physical memory region
260223
*/
261-
void ept_mr_del(struct vm *vm, uint64_t *pml4_page,
262-
uint64_t gpa, uint64_t size)
224+
void ept_mr_del(struct vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
263225
{
264226
struct vcpu *vcpu;
265227
uint16_t i;
266228

267-
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n",
268-
__func__, vm->vm_id, gpa, size);
229+
dev_dbg(ACRN_DBG_EPT, "%s,vm[%d] gpa 0x%llx size 0x%llx\n", __func__, vm->vm_id, gpa, size);
269230

270-
mmu_modify_or_del(pml4_page, gpa, size,
271-
0UL, 0UL, PTT_EPT, MR_DEL);
231+
mmu_modify_or_del(pml4_page, gpa, size, 0UL, 0UL, &vm->arch_vm.ept_mem_ops, MR_DEL);
272232

273233
foreach_vcpu(i, vm, vcpu) {
274234
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);

hypervisor/arch/x86/guest/guest.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,10 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
619619
"vm0: bottom memory - 0x%llx, top memory - 0x%llx\n",
620620
e820_mem.mem_bottom, e820_mem.mem_top);
621621

622+
if (e820_mem.mem_top > EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE)) {
623+
panic("Please configure VM0_ADDRESS_SPACE correctly!\n");
624+
}
625+
622626
/* create real ept map for all ranges with UC */
623627
ept_mr_add(vm, pml4_page,
624628
e820_mem.mem_bottom, e820_mem.mem_bottom,

hypervisor/arch/x86/guest/vm.c

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,8 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
8787
/* gpa_lowtop are used for system start up */
8888
vm->hw.gpa_lowtop = 0UL;
8989

90-
vm->arch_vm.nworld_eptp = alloc_paging_struct();
91-
if (vm->arch_vm.nworld_eptp == NULL) {
92-
pr_fatal("%s, alloc memory for EPTP failed\n", __func__);
93-
status = -ENOMEM;
94-
goto err;
95-
}
90+
init_ept_mem_ops(vm);
91+
vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info, 0UL);
9692
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
9793

9894
/* Only for SOS: Configure VM software information */
@@ -180,7 +176,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
180176
vioapic_cleanup(vm_ioapic(vm));
181177

182178
if (vm->arch_vm.nworld_eptp != NULL) {
183-
free(vm->arch_vm.nworld_eptp);
179+
(void)memset(vm->arch_vm.nworld_eptp, 0U, CPU_PAGE_SIZE);
184180
}
185181

186182
return status;
@@ -212,10 +208,6 @@ int shutdown_vm(struct vm *vm)
212208
/* cleanup vioapic */
213209
vioapic_cleanup(vm_ioapic(vm));
214210

215-
/* Destroy secure world */
216-
if (vm->sworld_control.flag.active != 0UL) {
217-
destroy_secure_world(vm, true);
218-
}
219211
/* Free EPT allocated resources assigned to VM */
220212
destroy_ept(vm);
221213

hypervisor/arch/x86/mmu.c

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -238,39 +238,57 @@ void init_paging(void)
238238
struct e820_entry *entry;
239239
uint64_t hv_hpa;
240240
uint32_t i;
241+
uint64_t low32_max_ram = 0UL;
242+
uint64_t high64_max_ram;
241243
uint64_t attr_uc = (PAGE_TABLE | PAGE_CACHE_UC);
242244

243245
pr_dbg("HV MMU Initialization");
244246

245247
/* Allocate memory for Hypervisor PML4 table */
246-
mmu_pml4_addr = alloc_paging_struct();
248+
mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL);
247249

248250
init_e820();
249251
obtain_e820_mem_info();
250252

253+
/* align to 2MB */
254+
high64_max_ram = (e820_mem.mem_top + PDE_SIZE - 1UL) & PDE_MASK;
255+
256+
if (high64_max_ram > (CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE) ||
257+
high64_max_ram < (1UL << 32U)) {
258+
panic("Please configure HV_ADDRESS_SPACE correctly!\n");
259+
}
260+
251261
/* Map all memory regions to UC attribute */
252-
mmu_add((uint64_t *)mmu_pml4_addr, e820_mem.mem_bottom,
253-
e820_mem.mem_bottom, e820_mem.mem_top - e820_mem.mem_bottom,
254-
attr_uc, PTT_PRIMARY);
262+
mmu_add((uint64_t *)mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom,
263+
high64_max_ram - e820_mem.mem_bottom, attr_uc, &ppt_mem_ops);
255264

256265
/* Modify WB attribute for E820_TYPE_RAM */
257266
for (i = 0U; i < e820_entries; i++) {
258267
entry = &e820[i];
259268
if (entry->type == E820_TYPE_RAM) {
260-
mmu_modify_or_del((uint64_t *)mmu_pml4_addr,
261-
entry->baseaddr, entry->length,
262-
PAGE_CACHE_WB, PAGE_CACHE_MASK,
263-
PTT_PRIMARY, MR_MODIFY);
269+
if (entry->baseaddr < (1UL << 32U)) {
270+
uint64_t end = entry->baseaddr + entry->length;
271+
if (end < (1UL << 32U) && (end > low32_max_ram)) {
272+
low32_max_ram = end;
273+
}
274+
}
264275
}
265276
}
266277

278+
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK,
279+
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
280+
281+
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
282+
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
283+
267284
/* set the paging-structure entries' U/S flag
268285
* to supervisor-mode for hypervisor owned memroy.
269286
*/
270287
hv_hpa = get_hv_image_base();
271-
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_HV_RAM_SIZE,
288+
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa & PDE_MASK,
289+
CONFIG_HV_RAM_SIZE + ((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL,
272290
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
273-
PTT_PRIMARY, MR_MODIFY);
291+
&ppt_mem_ops, MR_MODIFY);
274292

275293
/* Enable paging */
276294
enable_paging(hva2hpa(mmu_pml4_addr));

hypervisor/arch/x86/page.c

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,6 @@
55
*/
66
#include <hypervisor.h>
77

8-
#define PML4_PAGE_NUM(size) 1UL
9-
#define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT)
10-
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
11-
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
12-
138
#define DEFINE_PGTABLE_PAGE(prefix, lvl, LVL, size) \
149
static struct page prefix ## lvl ## _pages[LVL ## _PAGE_NUM(size)]
1510

@@ -66,8 +61,6 @@ const struct memory_ops ppt_mem_ops = {
6661
.get_pd_page = ppt_get_pd_page,
6762
};
6863

69-
/* The size of the guest physical address space, covered by the EPT page table of a VM */
70-
#define EPT_ADDRESS_SPACE(size) ((size != 0UL) ? (size + PLATFORM_LO_MMIO_SIZE) : 0UL)
7164
DEFINE_PGTABLE_PAGE(vm0_, pml4, PML4, EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE));
7265
DEFINE_PGTABLE_PAGE(vm0_, pdpt, PDPT, EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE));
7366
DEFINE_PGTABLE_PAGE(vm0_, pd, PD, EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE));
@@ -79,13 +72,6 @@ static struct page uos_nworld_pdpt_pages[CONFIG_MAX_VM_NUM - 1U][PDPT_PAGE_NUM(E
7972
static struct page uos_nworld_pd_pages[CONFIG_MAX_VM_NUM - 1U][PD_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_UOS_RAM_SIZE))];
8073
static struct page uos_nworld_pt_pages[CONFIG_MAX_VM_NUM - 1U][PT_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_UOS_RAM_SIZE))];
8174

82-
#define TRUSTY_PML4_PAGE_NUM(size) (1UL)
83-
#define TRUSTY_PDPT_PAGE_NUM(size) (1UL)
84-
#define TRUSTY_PD_PAGE_NUM(size) (PD_PAGE_NUM(size))
85-
#define TRUSTY_PT_PAGE_NUM(size) (PT_PAGE_NUM(size))
86-
#define TRUSTY_PGTABLE_PAGE_NUM(size) \
87-
(TRUSTY_PML4_PAGE_NUM(size) + TRUSTY_PDPT_PAGE_NUM(size) + TRUSTY_PD_PAGE_NUM(size) + TRUSTY_PT_PAGE_NUM(size))
88-
8975
static struct page uos_sworld_pgtable_pages[CONFIG_MAX_VM_NUM - 1U][TRUSTY_PGTABLE_PAGE_NUM(TRUSTY_RAM_SIZE)];
9076

9177
/* ept: extended page table*/

0 commit comments

Comments
 (0)