Skip to content

Commit

Permalink
hypervisor: arm64: hypervisor disable support
Browse files Browse the repository at this point in the history
Add hypervisor disable support to the Jailhouse firmware. Handle
Jailhouse disable calls from the root cell, and also disable the
hypervisor in case of an error during initialization.

Signed-off-by: Antonios Motakis <antonios.motakis@huawei.com>
  • Loading branch information
tvelocity committed Jan 25, 2016
1 parent dac7904 commit 5dbd248
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 5 deletions.
7 changes: 5 additions & 2 deletions hypervisor/arch/arm64/control.c
Expand Up @@ -124,8 +124,11 @@ void arch_config_commit(struct cell *cell_added_removed)

void arch_shutdown(void)
{
trace_error(-EINVAL);
while (1);
unsigned int cpu;

/* turn off the hypervisor when we return from the exit handler */
for_each_cpu(cpu, root_cell.cpu_set)
per_cpu(cpu)->shutdown = true;
}

void arch_suspend_cpu(unsigned int cpu_id)
Expand Down
33 changes: 33 additions & 0 deletions hypervisor/arch/arm64/entry.S
Expand Up @@ -105,6 +105,39 @@ el2_entry:
bl entry
b .

.globl arch_shutdown_mmu
arch_shutdown_mmu:
/* x0: struct percpu* */
mov x19, x0

/* Note: no memory accesses must be done after turning MMU off. There
* is non-zero probability that cached data can be not syncronized with
* system memory. CPU can access data bypassing D-cache when MMU is off.
*/

/* hand over control of EL2 back to Linux */
add x1, x19, #PERCPU_LINUX_SAVED_VECTORS
ldr x2, [x1]
msr vbar_el2, x2

/* disable the hypervisor MMU */
mrs x1, sctlr_el2
ldr x2, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
bic x1, x1, x2
msr sctlr_el2, x1
isb

msr mair_el2, xzr
msr ttbr0_el2, xzr
msr tcr_el2, xzr
isb

msr tpidr_el2, xzr

/* Call vmreturn(guest_registers) */
add x0, x19, #(PERCPU_STACK_END - 32 * 8)
b vmreturn

.globl enable_mmu_el2
enable_mmu_el2:
/*
Expand Down
15 changes: 14 additions & 1 deletion hypervisor/arch/arm64/include/asm/paging.h
Expand Up @@ -238,7 +238,20 @@ static inline void arch_paging_flush_page_tlbs(unsigned long page_addr)
/* Used to clean the PAGE_MAP_COHERENT page table changes */
static inline void arch_paging_flush_cpu_caches(void *addr, long size)
{
/* AARCH64_TODO */
unsigned int cache_line_size;
u64 ctr;

arm_read_sysreg(CTR_EL0, ctr);
/* Extract the minimal cache line size */
cache_line_size = 4 << (ctr >> 16 & 0xf);

do {
/* Clean & invalidate by MVA to PoC */
asm volatile ("dc civac, %0" : : "r" (addr));
size -= cache_line_size;
addr += cache_line_size;
} while (size > 0);

}

#endif /* !__ASSEMBLY__ */
Expand Down
1 change: 1 addition & 0 deletions hypervisor/arch/arm64/include/asm/percpu.h
Expand Up @@ -67,6 +67,7 @@ struct per_cpu {

unsigned int virt_id;
union mpidr mpidr;
bool shutdown;
} __attribute__((aligned(PAGE_SIZE)));

static inline struct per_cpu *this_cpu_data(void)
Expand Down
36 changes: 34 additions & 2 deletions hypervisor/arch/arm64/setup.c
Expand Up @@ -87,8 +87,40 @@ int arch_unmap_device(void *vaddr, unsigned long size)
PAGING_NON_COHERENT);
}

/* disable the hypervisor on the current CPU */
void arch_shutdown_self(struct per_cpu *cpu_data)
{
irqchip_cpu_shutdown(cpu_data);

/* Free the guest */
arm_write_sysreg(HCR_EL2, HCR_RW_BIT);
arm_write_sysreg(VTCR_EL2, VTCR_RES1);

/* Remove stage-2 mappings */
arch_cpu_tlb_flush(cpu_data);

/* TLB flush needs the cell's VMID */
isb();
arm_write_sysreg(VTTBR_EL2, 0);

/* we will restore the root cell state with the MMU turned off,
* so we need to make sure it has been commited to memory */
arch_paging_flush_cpu_caches(guest_regs(cpu_data),
sizeof(struct registers));
dsb(ish);

/* Return to EL1 */
arch_shutdown_mmu(cpu_data);
}

void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
{
trace_error(-EINVAL);
while (1);
struct registers *regs = guest_regs(cpu_data);

/* Jailhouse initialization failed; return to the caller in EL1 */
arm_write_sysreg(ELR_EL2, regs->usr[30]);

regs->usr[0] = return_code;

arch_shutdown_self(cpu_data);
}
4 changes: 4 additions & 0 deletions hypervisor/arch/arm64/traps.c
Expand Up @@ -195,5 +195,9 @@ struct registers *arch_handle_exit(struct per_cpu *cpu_data,
panic_stop();
}

if (cpu_data->shutdown)
/* Won't return here. */
arch_shutdown_self(cpu_data);

vmreturn(regs);
}

0 comments on commit 5dbd248

Please sign in to comment.