Skip to content

Commit

Permalink
arm64: Mitigate CVE 2017-5715 (aka Spectre v2)
Browse files Browse the repository at this point in the history
Define an alternative exit vector. This exit vector will be used if
SMCCC_ARCH_WORKAROUND_1 is available, and makes the assumption that
mitigations are required if the workaround is available.

Technically, the mitigations takes place in the monitor, its implementation
depends on the processor. Refer [1].

Similarly to KVM, Jailhouse calls the monitor's mitigation on each exit: IRQs
and guest aborts.

[1] https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability

Signed-off-by: Ralf Ramsauer <ralf.ramsauer@oth-regensburg.de>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
  • Loading branch information
rralf authored and jan-kiszka committed Jan 4, 2019
1 parent ea924a3 commit f6a60e6
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 1 deletion.
4 changes: 4 additions & 0 deletions hypervisor/arch/arm-common/include/asm/smccc.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,11 @@

#define SMCCC_IS_CONV_64(function_id) !!(function_id & (1 << 30))

#ifndef __ASSEMBLY__

struct trap_context;

void smccc_discover(void);
enum trap_return handle_smc(struct trap_context *ctx);

#endif /* !__ASSEMBLY__ */
36 changes: 36 additions & 0 deletions hypervisor/arch/arm64/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

#include <asm/asm-defines.h>
#include <asm/paging.h>
#include <asm/smccc.h>
#include <asm/jailhouse_hypercall.h>
#include <jailhouse/header.h>

Expand Down Expand Up @@ -321,6 +322,17 @@ bootstrap_vectors:
handle_vmexit_late \handler
.endm

.macro handle_vmexit_hardened handler
.align 7
handle_vmexit_early

/* Mitigate CVE 2017-5715 (aka Spectre v2) */
mov w0, #SMCCC_ARCH_WORKAROUND_1
smc #0

handle_vmexit_late \handler
.endm

/*
* These are the default vectors. They are used on early startup and if no
* Spectre v2 mitigation is available.
Expand Down Expand Up @@ -348,6 +360,30 @@ hyp_vectors:
ventry .


.align 11
.globl hyp_vectors_hardened
hyp_vectors_hardened:
ventry .
ventry .
ventry .
ventry .

handle_vmexit arch_el2_abt /* no mitigation, we're doomed anyway... */
ventry .
ventry .
ventry .

handle_vmexit_hardened arch_handle_trap
handle_vmexit_hardened irqchip_handle_irq
ventry .
ventry .

ventry .
ventry .
ventry .
ventry .


.pushsection .trampoline, "ax"
.globl enable_mmu_el2
enable_mmu_el2:
Expand Down
2 changes: 2 additions & 0 deletions hypervisor/arch/arm64/include/asm/entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

#include <jailhouse/percpu.h>

extern unsigned long hyp_vectors_hardened;

void enable_mmu_el2(u64 ttbr0_el2);
void __attribute__((noreturn)) shutdown_el2(struct per_cpu *cpu_data);

Expand Down
11 changes: 10 additions & 1 deletion hypervisor/arch/arm64/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <asm/entry.h>
#include <asm/irqchip.h>
#include <asm/setup.h>
#include <asm/smccc.h>

extern u8 __trampoline_start[];

Expand Down Expand Up @@ -60,7 +61,15 @@ int arch_cpu_init(struct per_cpu *cpu_data)
/* Setup guest traps */
arm_write_sysreg(HCR_EL2, hcr);

return arm_cpu_init(cpu_data);
err = arm_cpu_init(cpu_data);
if (err)
return err;

/* Conditionally switch to hardened vectors */
if (this_cpu_data()->smccc_has_workaround_1)
arm_write_sysreg(vbar_el2, &hyp_vectors_hardened);

return 0;
}

void __attribute__((noreturn)) arch_cpu_activate_vmm(void)
Expand Down

0 comments on commit f6a60e6

Please sign in to comment.