Skip to content

Commit

Permalink
core: reimplement page_alloc to allow aligned allocations
Browse files Browse the repository at this point in the history
The function page_alloc allows us to allocate any number of
pages, however they will always be aligned on page boundaries.

The new page_alloc implementation takes an extra bool align
parameter, which allows us to allocate N pages that will be
aligned by N * PAGE_SIZE. N needs to be a power of two.

This will be used on the AArch64 port of Jailhouse to support
physical address ranges from 40 to 44 bits: in these
configurations, the initial page table level may take up
multiple consecutive pages.

Signed-off-by: Antonios Motakis <antonios.motakis@huawei.com>
  • Loading branch information
tvelocity committed Jan 25, 2016
1 parent acea654 commit ea171ff
Show file tree
Hide file tree
Showing 15 changed files with 63 additions and 51 deletions.
3 changes: 2 additions & 1 deletion hypervisor/arch/arm/include/asm/paging.h
Expand Up @@ -18,7 +18,8 @@
#include <asm/processor.h>
#include <asm/sysregs.h>

#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK ~(PAGE_SIZE - 1)
#define PAGE_OFFS_MASK (PAGE_SIZE - 1)

Expand Down
3 changes: 2 additions & 1 deletion hypervisor/arch/arm/irqchip.c
Expand Up @@ -39,7 +39,8 @@ static int irqchip_init_pending(struct per_cpu *cpu_data)
struct pending_irq *pend_array;

if (cpu_data->pending_irqs == NULL) {
cpu_data->pending_irqs = pend_array = page_alloc(&mem_pool, 1);
cpu_data->pending_irqs = pend_array =
page_alloc(&mem_pool, 1, 0);
if (pend_array == NULL)
return -ENOMEM;
} else {
Expand Down
2 changes: 1 addition & 1 deletion hypervisor/arch/arm/mmu_cell.c
Expand Up @@ -58,7 +58,7 @@ unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
int arch_mmu_cell_init(struct cell *cell)
{
cell->arch.mm.root_paging = hv_paging;
cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
cell->arch.mm.root_table = page_alloc(&mem_pool, 1, 0);
if (!cell->arch.mm.root_table)
return -ENOMEM;

Expand Down
2 changes: 1 addition & 1 deletion hypervisor/arch/x86/apic.c
Expand Up @@ -170,7 +170,7 @@ int apic_init(void)
apic_ops.send_ipi = send_x2apic_ipi;
using_x2apic = true;
} else if (apicbase & APIC_BASE_EN) {
xapic_page = page_alloc(&remap_pool, 1);
xapic_page = page_alloc(&remap_pool, 1, 0);
if (!xapic_page)
return trace_error(-ENOMEM);
err = paging_create(&hv_paging_structs, XAPIC_BASE, PAGE_SIZE,
Expand Down
3 changes: 2 additions & 1 deletion hypervisor/arch/x86/include/asm/paging.h
Expand Up @@ -16,7 +16,8 @@
#include <jailhouse/types.h>
#include <asm/processor.h>

#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK ~(PAGE_SIZE - 1)
#define PAGE_OFFS_MASK (PAGE_SIZE - 1)

Expand Down
4 changes: 2 additions & 2 deletions hypervisor/arch/x86/ioapic.c
Expand Up @@ -230,7 +230,7 @@ ioapic_get_or_add_phys(const struct jailhouse_irqchip *irqchip)
if (num_phys_ioapics == IOAPIC_MAX_CHIPS)
return trace_error(NULL);

phys_ioapic->reg_base = page_alloc(&remap_pool, 1);
phys_ioapic->reg_base = page_alloc(&remap_pool, 1, 0);
if (!phys_ioapic->reg_base)
return trace_error(NULL);
err = paging_create(&hv_paging_structs, irqchip->address, PAGE_SIZE,
Expand Down Expand Up @@ -343,7 +343,7 @@ int ioapic_cell_init(struct cell *cell)
if (cell->config->num_irqchips > IOAPIC_MAX_CHIPS)
return trace_error(-ERANGE);

cell->arch.ioapics = page_alloc(&mem_pool, 1);
cell->arch.ioapics = page_alloc(&mem_pool, 1, 0);
if (!cell->arch.ioapics)
return -ENOMEM;

Expand Down
6 changes: 3 additions & 3 deletions hypervisor/arch/x86/svm.c
Expand Up @@ -269,7 +269,7 @@ int vcpu_vendor_init(void)

/* Map guest parking code (shared between cells and CPUs) */
parking_pt.root_paging = npt_paging;
parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1, 0);
if (!parked_mode_npt)
return -ENOMEM;
err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
Expand All @@ -288,7 +288,7 @@ int vcpu_vendor_init(void)
msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
} else {
if (has_avic) {
avic_page = page_alloc(&remap_pool, 1);
avic_page = page_alloc(&remap_pool, 1, 0);
if (!avic_page)
return trace_error(-ENOMEM);
}
Expand All @@ -303,7 +303,7 @@ int vcpu_vendor_cell_init(struct cell *cell)
u64 flags;

/* allocate iopm */
cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES);
cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES, 0);
if (!cell->arch.svm.iopm)
return err;

Expand Down
2 changes: 1 addition & 1 deletion hypervisor/arch/x86/vmx.c
Expand Up @@ -329,7 +329,7 @@ int vcpu_vendor_cell_init(struct cell *cell)
int err;

/* allocate io_bitmap */
cell->arch.vmx.io_bitmap = page_alloc(&mem_pool, PIO_BITMAP_PAGES);
cell->arch.vmx.io_bitmap = page_alloc(&mem_pool, PIO_BITMAP_PAGES, 0);
if (!cell->arch.vmx.io_bitmap)
return -ENOMEM;

Expand Down
12 changes: 6 additions & 6 deletions hypervisor/arch/x86/vtd.c
Expand Up @@ -429,7 +429,7 @@ static int vtd_init_ir_emulation(unsigned int unit_no, void *reg_base)
unit->irt_entries = 2 << (unit->irta & VTD_IRTA_SIZE_MASK);

size = PAGE_ALIGN(sizeof(struct vtd_irte_usage) * unit->irt_entries);
unit->irte_map = page_alloc(&mem_pool, size / PAGE_SIZE);
unit->irte_map = page_alloc(&mem_pool, size / PAGE_SIZE, 0);
if (!unit->irte_map)
return -ENOMEM;

Expand Down Expand Up @@ -465,7 +465,7 @@ int iommu_init(void)
return trace_error(-EINVAL);

int_remap_table =
page_alloc(&mem_pool, PAGES(sizeof(union vtd_irte) << n));
page_alloc(&mem_pool, PAGES(sizeof(union vtd_irte) << n), 0);
if (!int_remap_table)
return -ENOMEM;

Expand All @@ -475,11 +475,11 @@ int iommu_init(void)
if (units == 0)
return trace_error(-EINVAL);

dmar_reg_base = page_alloc(&remap_pool, units);
dmar_reg_base = page_alloc(&remap_pool, units, 0);
if (!dmar_reg_base)
return trace_error(-ENOMEM);

unit_inv_queue = page_alloc(&mem_pool, units);
unit_inv_queue = page_alloc(&mem_pool, units, 0);
if (!unit_inv_queue)
return -ENOMEM;

Expand Down Expand Up @@ -673,7 +673,7 @@ int iommu_add_pci_device(struct cell *cell, struct pci_device *device)
context_entry_table =
paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
} else {
context_entry_table = page_alloc(&mem_pool, 1);
context_entry_table = page_alloc(&mem_pool, 1, 0);
if (!context_entry_table)
goto error_nomem;
*root_entry_lo = VTD_ROOT_PRESENT |
Expand Down Expand Up @@ -741,7 +741,7 @@ int iommu_cell_init(struct cell *cell)
return trace_error(-ERANGE);

cell->arch.vtd.pg_structs.root_paging = vtd_paging;
cell->arch.vtd.pg_structs.root_table = page_alloc(&mem_pool, 1);
cell->arch.vtd.pg_structs.root_table = page_alloc(&mem_pool, 1, 0);
if (!cell->arch.vtd.pg_structs.root_table)
return -ENOMEM;

Expand Down
4 changes: 2 additions & 2 deletions hypervisor/control.c
Expand Up @@ -183,7 +183,7 @@ int cell_init(struct cell *cell)
if (cpu_set_size > PAGE_SIZE)
return trace_error(-EINVAL);
if (cpu_set_size > sizeof(cell->small_cpu_set.bitmap)) {
cpu_set = page_alloc(&mem_pool, 1);
cpu_set = page_alloc(&mem_pool, 1, 0);
if (!cpu_set)
return -ENOMEM;
} else {
Expand Down Expand Up @@ -386,7 +386,7 @@ static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
}

cell_pages = PAGES(sizeof(*cell) + cfg_total_size);
cell = page_alloc(&mem_pool, cell_pages);
cell = page_alloc(&mem_pool, cell_pages, 0);
if (!cell) {
err = -ENOMEM;
goto err_resume;
Expand Down
2 changes: 1 addition & 1 deletion hypervisor/include/jailhouse/paging.h
Expand Up @@ -182,7 +182,7 @@ extern struct paging_structures hv_paging_structs;

unsigned long paging_get_phys_invalid(pt_entry_t pte, unsigned long virt);

void *page_alloc(struct page_pool *pool, unsigned int num);
void *page_alloc(struct page_pool *pool, unsigned int num, bool aligned);
void page_free(struct page_pool *pool, void *first_page, unsigned int num);

/**
Expand Down
2 changes: 1 addition & 1 deletion hypervisor/mmio.c
Expand Up @@ -40,7 +40,7 @@ int mmio_cell_init(struct cell *cell)
pages = page_alloc(&mem_pool,
PAGES(cell->max_mmio_regions *
(sizeof(struct mmio_region_location) +
sizeof(struct mmio_region_handler))));
sizeof(struct mmio_region_handler))), 0);
if (!pages)
return -ENOMEM;

Expand Down
58 changes: 33 additions & 25 deletions hypervisor/paging.c
Expand Up @@ -91,38 +91,45 @@ static unsigned long find_next_free_page(struct page_pool *pool,
* Allocate consecutive pages from the specified pool.
* @param pool Page pool to allocate from.
* @param num Number of pages.
* @param align Pages should be aligned by num * PAGE_SIZE.
* In this case, num absolutely needs to be a power
* of 2, or a giant octopus will eat your machine.
*
* @return Pointer to first page or NULL if allocation failed.
*
* @see page_free
*/
void *page_alloc(struct page_pool *pool, unsigned int num)
void *page_alloc(struct page_pool *pool, unsigned int num, bool align)
{
unsigned long start, last, next;
unsigned int allocated;
unsigned long start, next, i;
/* the pool itself might not be aligned to our desired size */
unsigned long offset_mask = num - 1;
unsigned int offset = ((unsigned long) pool->base_address >> PAGE_SHIFT)
& offset_mask;

start = find_next_free_page(pool, 0);
if (start == INVALID_PAGE_NR)
return NULL;
next = align ? offset : 0;

restart:
for (allocated = 1, last = start; allocated < num;
allocated++, last = next) {
next = find_next_free_page(pool, last + 1);
if (next == INVALID_PAGE_NR)
return NULL;
if (next != last + 1) {
start = next;
goto restart;
}
}
while ((start = find_next_free_page(pool, next)) != INVALID_PAGE_NR) {

if (align && (start - offset) & offset_mask)
goto next_chunk; /* not aligned */

for (allocated = 0; allocated < num; allocated++)
set_bit(start + allocated, pool->used_bitmap);
for (i = start; i < start + num; i++)
if (test_bit(i, pool->used_bitmap))
goto next_chunk; /* not available */

pool->used_pages += num;
for (i = start; i < start + num; i++)
set_bit(i, pool->used_bitmap);

pool->used_pages += num;

return pool->base_address + start * PAGE_SIZE;

next_chunk:
next += align ? num - ((start - offset) & offset_mask) : 1;
}

return pool->base_address + start * PAGE_SIZE;
return NULL;
}

/**
Expand Down Expand Up @@ -208,7 +215,7 @@ static int split_hugepage(const struct paging *paging, pt_entry_t pte,
flags = paging->get_flags(pte);

sub_structs.root_paging = paging + 1;
sub_structs.root_table = page_alloc(&mem_pool, 1);
sub_structs.root_table = page_alloc(&mem_pool, 1, 0);
if (!sub_structs.root_table)
return -ENOMEM;
paging->set_next_pt(pte, paging_hvirt2phys(sub_structs.root_table));
Expand Down Expand Up @@ -277,7 +284,7 @@ int paging_create(const struct paging_structures *pg_structs,
pt = paging_phys2hvirt(
paging->get_next_pt(pte));
} else {
pt = page_alloc(&mem_pool, 1);
pt = page_alloc(&mem_pool, 1, 0);
if (!pt)
return -ENOMEM;
paging->set_next_pt(pte,
Expand Down Expand Up @@ -491,7 +498,8 @@ int paging_init(void)
set_bit(n, mem_pool.used_bitmap);
mem_pool.flags = PAGE_SCRUB_ON_FREE;

remap_pool.used_bitmap = page_alloc(&mem_pool, NUM_REMAP_BITMAP_PAGES);
remap_pool.used_bitmap =
page_alloc(&mem_pool, NUM_REMAP_BITMAP_PAGES, 0);
remap_pool.used_pages =
hypervisor_header.max_cpus * NUM_TEMPORARY_PAGES;
for (n = 0; n < remap_pool.used_pages; n++)
Expand All @@ -500,7 +508,7 @@ int paging_init(void)
arch_paging_init();

hv_paging_structs.root_paging = hv_paging;
hv_paging_structs.root_table = page_alloc(&mem_pool, 1);
hv_paging_structs.root_table = page_alloc(&mem_pool, 1, 0);
if (!hv_paging_structs.root_table)
return -ENOMEM;

Expand Down
9 changes: 5 additions & 4 deletions hypervisor/pci.c
Expand Up @@ -365,7 +365,7 @@ int pci_init(void)
end_bus = system_config->platform_info.x86.mmconfig_end_bus;
mmcfg_size = (end_bus + 1) * 256 * 4096;

pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE, 0);
if (!pci_space)
return trace_error(-ENOMEM);

Expand Down Expand Up @@ -572,7 +572,8 @@ static int pci_add_physical_device(struct cell *cell, struct pci_device *device)
err = arch_pci_add_physical_device(cell, device);

if (!err && device->info->msix_address) {
device->msix_table = page_alloc(&remap_pool, size / PAGE_SIZE);
device->msix_table =
page_alloc(&remap_pool, size / PAGE_SIZE, 0);
if (!device->msix_table) {
err = trace_error(-ENOMEM);
goto error_remove_dev;
Expand All @@ -589,7 +590,7 @@ static int pci_add_physical_device(struct cell *cell, struct pci_device *device)
if (device->info->num_msix_vectors > PCI_EMBEDDED_MSIX_VECTS) {
pages = PAGES(sizeof(union pci_msix_vector) *
device->info->num_msix_vectors);
device->msix_vectors = page_alloc(&mem_pool, pages);
device->msix_vectors = page_alloc(&mem_pool, pages, 0);
if (!device->msix_vectors) {
err = -ENOMEM;
goto error_unmap_table;
Expand Down Expand Up @@ -661,7 +662,7 @@ int pci_cell_init(struct cell *cell)
mmio_region_register(cell, mmcfg_start, mmcfg_size,
pci_mmconfig_access_handler, NULL);

cell->pci_devices = page_alloc(&mem_pool, devlist_pages);
cell->pci_devices = page_alloc(&mem_pool, devlist_pages, 0);
if (!cell->pci_devices)
return -ENOMEM;

Expand Down
2 changes: 1 addition & 1 deletion hypervisor/pci_ivshmem.c
Expand Up @@ -448,7 +448,7 @@ int pci_ivshmem_init(struct cell *cell, struct pci_device *device)
/* this is the first endpoint, allocate a new datastructure */
for (ivp = &ivshmem_list; *ivp; ivp = &((*ivp)->next))
; /* empty loop */
*ivp = page_alloc(&mem_pool, 1);
*ivp = page_alloc(&mem_pool, 1, 0);
if (!(*ivp))
return -ENOMEM;
ivshmem_connect_cell(*ivp, device, mem, 0);
Expand Down

0 comments on commit ea171ff

Please sign in to comment.