Skip to content

Commit f1ed6c5

Browse files
lifeixlijinxia
authored andcommitted
hv: mmu: remove alloc_page() API
No one would call this API for now. So remove it. Tracked-On: #861 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
1 parent 0391f84 commit f1ed6c5

File tree

8 files changed

+15
-115
lines changed

8 files changed

+15
-115
lines changed

hypervisor/arch/x86/Kconfig

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -164,10 +164,6 @@ config MALLOC_ALIGN
164164
range 8 32
165165
default 16
166166

167-
config NUM_ALLOC_PAGES
168-
hex "Capacity in pages of the heap for page_alloc()"
169-
default 0x1000
170-
171167
config HEAP_SIZE
172168
hex "Capacity of the heap for malloc()"
173169
default 0x100000
@@ -222,7 +218,7 @@ config HV_RAM_START
222218

223219
config HV_RAM_SIZE
224220
hex "Size of the RAM region used by the hypervisor"
225-
default 0x06000000
221+
default 0x04800000
226222
help
227223
A 64-bit integer indicating the size of RAM used by the hypervisor.
228224
It is ensured at link time that the footprint of the hypervisor

hypervisor/arch/x86/cpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,7 @@ void cpu_secondary_init(void)
533533
/* Switch this CPU to use the same page tables set-up by the
534534
* primary/boot CPU
535535
*/
536-
enable_paging(get_paging_pml4());
536+
enable_paging();
537537

538538
enable_smep();
539539

hypervisor/arch/x86/mmu.c

Lines changed: 10 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
#include <hypervisor.h>
3131
#include <reloc.h>
3232

33-
static void *mmu_pml4_addr;
33+
static void *ppt_mmu_pml4_addr;
3434
static void *sanitized_page[CPU_PAGE_SIZE];
3535

3636
static struct vmx_capability {
@@ -206,21 +206,15 @@ void sanitize_pte(uint64_t *pt_page)
206206
}
207207
}
208208

209-
uint64_t get_paging_pml4(void)
210-
{
211-
/* Return address to caller */
212-
return hva2hpa(mmu_pml4_addr);
213-
}
214-
215-
void enable_paging(uint64_t pml4_base_addr)
209+
void enable_paging(void)
216210
{
217211
uint64_t tmp64 = 0UL;
218212

219213
/* Enable Write Protect, inhibiting writing to read-only pages */
220214
CPU_CR_READ(cr0, &tmp64);
221215
CPU_CR_WRITE(cr0, tmp64 | CR0_WP);
222216

223-
CPU_CR_WRITE(cr3, pml4_base_addr);
217+
CPU_CR_WRITE(cr3, hva2hpa(ppt_mmu_pml4_addr));
224218
}
225219

226220
void enable_smep(void)
@@ -240,12 +234,12 @@ void init_paging(void)
240234
uint32_t i;
241235
uint64_t low32_max_ram = 0UL;
242236
uint64_t high64_max_ram;
243-
uint64_t attr_uc = (PAGE_TABLE | PAGE_CACHE_UC);
237+
uint64_t attr_uc = (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_CACHE_UC);
244238

245239
pr_dbg("HV MMU Initialization");
246240

247241
/* Allocate memory for Hypervisor PML4 table */
248-
mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL);
242+
ppt_mmu_pml4_addr = ppt_mem_ops.get_pml4_page(ppt_mem_ops.info, 0UL);
249243

250244
init_e820();
251245
obtain_e820_mem_info();
@@ -259,7 +253,7 @@ void init_paging(void)
259253
}
260254

261255
/* Map all memory regions to UC attribute */
262-
mmu_add((uint64_t *)mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom,
256+
mmu_add((uint64_t *)ppt_mmu_pml4_addr, e820_mem.mem_bottom, e820_mem.mem_bottom,
263257
high64_max_ram - e820_mem.mem_bottom, attr_uc, &ppt_mem_ops);
264258

265259
/* Modify WB attribute for E820_TYPE_RAM */
@@ -275,49 +269,28 @@ void init_paging(void)
275269
}
276270
}
277271

278-
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK,
272+
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, 0UL, (low32_max_ram + PDE_SIZE - 1UL) & PDE_MASK,
279273
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
280274

281-
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
275+
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, (1UL << 32U), high64_max_ram - (1UL << 32U),
282276
PAGE_CACHE_WB, PAGE_CACHE_MASK, &ppt_mem_ops, MR_MODIFY);
283277

284278
/* set the paging-structure entries' U/S flag
285279
* to supervisor-mode for hypervisor owned memroy.
286280
*/
287281
hv_hpa = get_hv_image_base();
288-
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa & PDE_MASK,
282+
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hpa & PDE_MASK,
289283
CONFIG_HV_RAM_SIZE + ((hv_hpa & (PDE_SIZE - 1UL)) != 0UL) ? PDE_SIZE : 0UL,
290284
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
291285
&ppt_mem_ops, MR_MODIFY);
292286

293287
/* Enable paging */
294-
enable_paging(hva2hpa(mmu_pml4_addr));
288+
enable_paging();
295289

296290
/* set ptep in sanitized_page point to itself */
297291
sanitize_pte((uint64_t *)sanitized_page);
298292
}
299293

300-
void *alloc_paging_struct(void)
301-
{
302-
void *ptr = NULL;
303-
304-
/* Allocate a page from Hypervisor heap */
305-
ptr = alloc_page();
306-
307-
ASSERT(ptr != NULL, "page alloc failed!");
308-
(void)memset(ptr, 0U, CPU_PAGE_SIZE);
309-
310-
return ptr;
311-
}
312-
313-
void free_paging_struct(void *ptr)
314-
{
315-
if (ptr != NULL) {
316-
(void)memset(ptr, 0U, CPU_PAGE_SIZE);
317-
free(ptr);
318-
}
319-
}
320-
321294
bool check_continuous_hpa(struct vm *vm, uint64_t gpa_arg, uint64_t size_arg)
322295
{
323296
uint64_t curr_hpa;

hypervisor/arch/x86/vtd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,10 +134,10 @@ struct iommu_domain {
134134
};
135135

136136
struct context_table {
137-
struct cpu_page buses[CONFIG_IOMMU_INIT_BUS_LIMIT];
137+
struct page buses[CONFIG_IOMMU_INIT_BUS_LIMIT];
138138
};
139139

140-
static struct cpu_page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
140+
static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
141141
static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
142142

143143
static inline uint8_t*

hypervisor/include/arch/x86/mmu.h

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -67,16 +67,9 @@ enum _page_table_level {
6767
#define PAGE_SIZE_2M MEM_2M
6868
#define PAGE_SIZE_1G MEM_1G
6969

70-
struct cpu_page {
71-
uint8_t contents[CPU_PAGE_SIZE];
72-
};
73-
7470
void sanitize_pte_entry(uint64_t *ptep);
7571
void sanitize_pte(uint64_t *pt_page);
76-
uint64_t get_paging_pml4(void);
77-
void *alloc_paging_struct(void);
78-
void free_paging_struct(void *ptr);
79-
void enable_paging(uint64_t pml4_base_addr);
72+
void enable_paging(void);
8073
void enable_smep(void);
8174
void init_paging(void);
8275
void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base,

hypervisor/include/arch/x86/pgtable.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,6 @@
2525
#define PAGE_CACHE_UC_MINUS PAGE_PCD
2626
#define PAGE_CACHE_UC (PAGE_PCD | PAGE_PWT)
2727

28-
#define PAGE_TABLE (PAGE_PRESENT | PAGE_RW | PAGE_USER)
29-
30-
3128
#define EPT_RD (1UL << 0U)
3229
#define EPT_WR (1UL << 1U)
3330
#define EPT_EXE (1UL << 2U)

hypervisor/include/lib/mem_mgt.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ struct mem_pool {
2424
/* APIs exposing memory allocation/deallocation abstractions */
2525
void *malloc(unsigned int num_bytes);
2626
void *calloc(unsigned int num_elements, unsigned int element_size);
27-
void *alloc_page(void);
28-
void *alloc_pages(unsigned int page_num);
2927
void free(const void *ptr);
3028

3129
#endif /* MEM_MGT_H */

hypervisor/lib/memory.c

Lines changed: 0 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -31,30 +31,6 @@ static struct mem_pool Memory_Pool = {
3131
.contiguity_bitmap = Malloc_Heap_Contiguity_Bitmap
3232
};
3333

34-
/************************************************************************/
35-
/* Memory pool declaration (block size = CPU_PAGE_SIZE) */
36-
/************************************************************************/
37-
static uint8_t __bss_noinit
38-
Paging_Heap[CONFIG_NUM_ALLOC_PAGES][CPU_PAGE_SIZE] __aligned(CPU_PAGE_SIZE);
39-
40-
#define PAGING_HEAP_BUFF_SIZE CPU_PAGE_SIZE
41-
#define PAGING_HEAP_TOTAL_BUFF CONFIG_NUM_ALLOC_PAGES
42-
#define PAGING_HEAP_BITMAP_SIZE \
43-
INT_DIV_ROUNDUP(PAGING_HEAP_TOTAL_BUFF, BITMAP_WORD_SIZE)
44-
static uint32_t Paging_Heap_Bitmap[PAGING_HEAP_BITMAP_SIZE];
45-
static uint32_t Paging_Heap_Contiguity_Bitmap[MALLOC_HEAP_BITMAP_SIZE];
46-
47-
static struct mem_pool Paging_Memory_Pool = {
48-
.start_addr = Paging_Heap,
49-
.spinlock = {.head = 0U, .tail = 0U},
50-
.size = CONFIG_NUM_ALLOC_PAGES * CPU_PAGE_SIZE,
51-
.buff_size = PAGING_HEAP_BUFF_SIZE,
52-
.total_buffs = PAGING_HEAP_TOTAL_BUFF,
53-
.bmp_size = PAGING_HEAP_BITMAP_SIZE,
54-
.bitmap = Paging_Heap_Bitmap,
55-
.contiguity_bitmap = Paging_Heap_Contiguity_Bitmap
56-
};
57-
5834
static void *allocate_mem(struct mem_pool *pool, unsigned int num_bytes)
5935
{
6036

@@ -247,11 +223,6 @@ void *malloc(unsigned int num_bytes)
247223
* Request memory allocation from smaller segmented memory pool
248224
*/
249225
memory = allocate_mem(&Memory_Pool, num_bytes);
250-
} else {
251-
uint32_t page_num =
252-
((num_bytes + CPU_PAGE_SIZE) - 1U) >> CPU_PAGE_SHIFT;
253-
/* Request memory allocation through alloc_page */
254-
memory = alloc_pages(page_num);
255226
}
256227

257228
/* Check if memory allocation is successful */
@@ -263,26 +234,6 @@ void *malloc(unsigned int num_bytes)
263234
return memory;
264235
}
265236

266-
void *alloc_pages(unsigned int page_num)
267-
{
268-
void *memory = NULL;
269-
270-
/* Request memory allocation from Page-aligned memory pool */
271-
memory = allocate_mem(&Paging_Memory_Pool, page_num * CPU_PAGE_SIZE);
272-
273-
/* Check if memory allocation is successful */
274-
if (memory == NULL) {
275-
pr_err("%s: failed to alloc %d pages", __func__, page_num);
276-
}
277-
278-
return memory;
279-
}
280-
281-
void *alloc_page(void)
282-
{
283-
return alloc_pages(1U);
284-
}
285-
286237
void *calloc(unsigned int num_elements, unsigned int element_size)
287238
{
288239
void *memory = malloc(num_elements * element_size);
@@ -306,14 +257,6 @@ void free(const void *ptr)
306257
/* Free buffer in 16-Bytes aligned Memory Pool */
307258
deallocate_mem(&Memory_Pool, ptr);
308259
}
309-
/* Check if ptr belongs to page aligned Memory Pool */
310-
else if ((Paging_Memory_Pool.start_addr < ptr) &&
311-
(ptr < (Paging_Memory_Pool.start_addr +
312-
(Paging_Memory_Pool.total_buffs *
313-
Paging_Memory_Pool.buff_size)))) {
314-
/* Free buffer in page aligned Memory Pool */
315-
deallocate_mem(&Paging_Memory_Pool, ptr);
316-
}
317260
}
318261

319262
void *memchr(const void *void_s, int c, size_t n)

0 commit comments

Comments
 (0)