Skip to content

Commit dbb4157

Browse files
shiqinggacrnsi
authored andcommitted
hv: remove dynamic memory allocation APIs
This patch removes dynamic memory allocation APIs, including: - calloc - malloc - free The corresponding data structures, MACROs, and Kconfig items are also removed. v1 -> v2: no change Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
1 parent 773889b commit dbb4157

File tree

4 files changed

+0
-285
lines changed

4 files changed

+0
-285
lines changed

hypervisor/arch/x86/Kconfig

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -147,15 +147,6 @@ config COM_IRQ
147147
help
148148
IRQ of the vuart port.
149149

150-
config MALLOC_ALIGN
151-
int "Block size in the heap for malloc()"
152-
range 8 32
153-
default 16
154-
155-
config HEAP_SIZE
156-
hex "Capacity of the heap for malloc()"
157-
default 0x100000
158-
159150
config CONSOLE_LOGLEVEL_DEFAULT
160151
int "Default loglevel on the serial console"
161152
depends on !RELEASE

hypervisor/boot/dmar_parse.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
#include <host_pm.h>
1111
#include <io.h>
1212
#include <spinlock.h>
13-
#include <mem_mgt.h>
1413
#include "pci.h"
1514
#include "vtd.h"
1615
#include "acpi_priv.h"

hypervisor/include/lib/mem_mgt.h

Lines changed: 0 additions & 29 deletions
This file was deleted.

hypervisor/lib/memory.c

Lines changed: 0 additions & 246 deletions
Original file line numberDiff line numberDiff line change
@@ -3,252 +3,6 @@
33
* SPDX-License-Identifier: BSD-3-Clause
44
*/
55
#include <types.h>
6-
#include <util.h>
7-
#include <bits.h>
8-
#include <spinlock.h>
9-
#include <page.h>
10-
#include <mem_mgt.h>
11-
#include <logmsg.h>
12-
13-
/*
14-
* Memory pool declaration (block size = CONFIG_MALLOC_ALIGN)
15-
*/
16-
#define __bss_noinit __attribute__((__section__(".bss_noinit")))
17-
18-
static uint8_t __bss_noinit malloc_heap[CONFIG_HEAP_SIZE] __aligned(CONFIG_MALLOC_ALIGN);
19-
20-
#define MALLOC_HEAP_BUFF_SIZE CONFIG_MALLOC_ALIGN
21-
#define MALLOC_HEAP_TOTAL_BUFF (CONFIG_HEAP_SIZE/MALLOC_HEAP_BUFF_SIZE)
22-
#define MALLOC_HEAP_BITMAP_SIZE INT_DIV_ROUNDUP(MALLOC_HEAP_TOTAL_BUFF, BITMAP_WORD_SIZE)
23-
static uint32_t malloc_heap_bitmap[MALLOC_HEAP_BITMAP_SIZE];
24-
static uint32_t malloc_heap_contiguity_bitmap[MALLOC_HEAP_BITMAP_SIZE];
25-
26-
static struct mem_pool memory_pool = {
27-
.start_addr = malloc_heap,
28-
.spinlock = {.head = 0U, .tail = 0U},
29-
.size = CONFIG_HEAP_SIZE,
30-
.buff_size = MALLOC_HEAP_BUFF_SIZE,
31-
.total_buffs = MALLOC_HEAP_TOTAL_BUFF,
32-
.bmp_size = MALLOC_HEAP_BITMAP_SIZE,
33-
.bitmap = malloc_heap_bitmap,
34-
.contiguity_bitmap = malloc_heap_contiguity_bitmap
35-
};
36-
37-
static void *allocate_mem(struct mem_pool *pool, uint32_t num_bytes)
38-
{
39-
void *memory = NULL;
40-
uint32_t idx;
41-
uint16_t bit_idx;
42-
uint32_t requested_buffs;
43-
44-
/* Check if provided memory pool exists */
45-
if (pool == NULL) {
46-
return NULL;
47-
}
48-
49-
/* Acquire the pool lock */
50-
spinlock_obtain(&pool->spinlock);
51-
52-
/* Calculate number of buffers to be allocated from memory pool */
53-
requested_buffs = INT_DIV_ROUNDUP(num_bytes, pool->buff_size);
54-
55-
for (idx = 0U; idx < pool->bmp_size; idx++) {
56-
/* Find the first occurrence of requested_buffs number of free
57-
* buffers. The 0th bit in bitmap represents a free buffer.
58-
*/
59-
for (bit_idx = ffz64(pool->bitmap[idx]); bit_idx < BITMAP_WORD_SIZE; bit_idx++) {
60-
/* Check if selected buffer is free */
61-
if ((pool->bitmap[idx] & (1U << bit_idx)) != 0U) {
62-
continue;
63-
}
64-
65-
/* Declare temporary variables to be used locally in this block */
66-
uint32_t i;
67-
uint16_t tmp_bit_idx = bit_idx;
68-
uint32_t tmp_idx = idx;
69-
70-
/* Check requested_buffs number of buffers availability
71-
* in memory-pool right after selected buffer
72-
*/
73-
for (i = 1U; i < requested_buffs; i++) {
74-
/* Check if tmp_bit_idx is out-of-range */
75-
tmp_bit_idx++;
76-
if (tmp_bit_idx == BITMAP_WORD_SIZE) {
77-
/* Break the loop if tmp_idx is
78-
* out-of-range
79-
*/
80-
tmp_idx++;
81-
if (tmp_idx == pool->bmp_size) {
82-
break;
83-
}
84-
/* Reset tmp_bit_idx */
85-
tmp_bit_idx = 0U;
86-
}
87-
88-
/* Break if selected buffer is not free */
89-
if ((pool->bitmap[tmp_idx] & (1U << tmp_bit_idx)) != 0U) {
90-
break;
91-
}
92-
}
93-
94-
/* Check if requested_buffs number of free contiguous
95-
* buffers are found in memory pool
96-
*/
97-
if (i == requested_buffs) {
98-
/* Get start address of first buffer among
99-
* selected free contiguous buffer in the
100-
* memory pool
101-
*/
102-
memory = pool->start_addr + pool->buff_size * (idx * BITMAP_WORD_SIZE + bit_idx);
103-
104-
/* Update allocation bitmaps information for
105-
* selected buffers
106-
*/
107-
for (i = 0U; i < requested_buffs; i++) {
108-
/* Set allocation bit in bitmap for
109-
* this buffer
110-
*/
111-
pool->bitmap[idx] |= (1U << bit_idx);
112-
113-
/* Set contiguity information for this
114-
* buffer in contiguity-bitmap
115-
*/
116-
if (i < (requested_buffs - 1U)) {
117-
/* Set contiguity bit to 1 if
118-
* this buffer is not the last
119-
* of selected contiguous
120-
* buffers array
121-
*/
122-
pool->contiguity_bitmap[idx] |= (1U << bit_idx);
123-
} else {
124-
/* Set contiguity bit to 0 if
125-
* this buffer is not the last
126-
* of selected contiguous
127-
* buffers array
128-
*/
129-
pool->contiguity_bitmap[idx] &= ~(1U << bit_idx);
130-
}
131-
132-
/* Check if bit_idx is out-of-range */
133-
bit_idx++;
134-
if (bit_idx == BITMAP_WORD_SIZE) {
135-
/* Increment idx */
136-
idx++;
137-
/* Reset bit_idx */
138-
bit_idx = 0U;
139-
}
140-
}
141-
142-
/* Release the pool lock. */
143-
spinlock_release(&pool->spinlock);
144-
145-
return memory;
146-
}
147-
/* Update bit_idx and idx */
148-
bit_idx = tmp_bit_idx;
149-
idx = tmp_idx;
150-
}
151-
}
152-
153-
/* Release the pool lock. */
154-
spinlock_release(&pool->spinlock);
155-
156-
return (void *)NULL;
157-
}
158-
159-
static void deallocate_mem(struct mem_pool *pool, const void *ptr)
160-
{
161-
uint32_t *bitmask, *contiguity_bitmask;
162-
uint32_t bmp_idx, bit_idx, buff_idx;
163-
164-
if ((pool != NULL) && (ptr != NULL)) {
165-
/* Acquire the pool lock */
166-
spinlock_obtain(&pool->spinlock);
167-
168-
/* Map the buffer address to its index. */
169-
buff_idx = (ptr - pool->start_addr) / pool->buff_size;
170-
171-
/* De-allocate all allocated contiguous memory buffers */
172-
while (buff_idx < pool->total_buffs) {
173-
/* Translate the buffer index to bitmap index. */
174-
bmp_idx = buff_idx / BITMAP_WORD_SIZE;
175-
bit_idx = buff_idx % BITMAP_WORD_SIZE;
176-
177-
/* Get bitmap's reference for this buffer */
178-
bitmask = &pool->bitmap[bmp_idx];
179-
contiguity_bitmask = &pool->contiguity_bitmap[bmp_idx];
180-
181-
/* Mark the buffer as free */
182-
if ((*bitmask & (1U << bit_idx)) != 0U) {
183-
*bitmask ^= (1U << bit_idx);
184-
} else {
185-
break;
186-
}
187-
188-
/* Reset the Contiguity bit of buffer */
189-
if ((*contiguity_bitmask & (1U << bit_idx)) != 0U) {
190-
*contiguity_bitmask ^= (1U << bit_idx);
191-
} else {
192-
break;
193-
}
194-
195-
/* Increment buff_idx */
196-
buff_idx++;
197-
}
198-
199-
/* Release the pool lock. */
200-
spinlock_release(&pool->spinlock);
201-
}
202-
}
203-
204-
/*
205-
* The return address will be PAGE_SIZE aligned if 'num_bytes' is greater
206-
* than PAGE_SIZE.
207-
*/
208-
void *malloc(uint32_t num_bytes)
209-
{
210-
void *memory = NULL;
211-
212-
/* Check if bytes requested extend page-size */
213-
if (num_bytes < PAGE_SIZE) {
214-
/*
215-
* Request memory allocation from smaller segmented memory pool
216-
*/
217-
memory = allocate_mem(&memory_pool, num_bytes);
218-
}
219-
220-
/* Check if memory allocation is successful */
221-
if (memory == NULL) {
222-
pr_err("%s: failed to alloc 0x%x Bytes", __func__, num_bytes);
223-
}
224-
225-
/* Return memory pointer to caller */
226-
return memory;
227-
}
228-
229-
void *calloc(uint32_t num_elements, uint32_t element_size)
230-
{
231-
void *memory = malloc(num_elements * element_size);
232-
233-
/* Determine if memory was allocated */
234-
if (memory != NULL) {
235-
/* Zero all the memory */
236-
(void)memset(memory, 0U, num_elements * element_size);
237-
}
238-
239-
/* Return pointer to memory */
240-
return memory;
241-
}
242-
243-
void free(const void *ptr)
244-
{
245-
/* Check if ptr belongs to 16-Bytes aligned Memory Pool */
246-
if ((memory_pool.start_addr < ptr) &&
247-
(ptr < (memory_pool.start_addr + memory_pool.total_buffs * memory_pool.buff_size))) {
248-
/* Free buffer in 16-Bytes aligned Memory Pool */
249-
deallocate_mem(&memory_pool, ptr);
250-
}
251-
}
2526

2537
static inline void memcpy_erms(void *d, const void *s, size_t slen)
2548
{

0 commit comments

Comments
 (0)