Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
FROMLIST: KVM: arm64: Wrap the host with a stage 2
When KVM runs in protected nVHE mode, make use of a stage 2 page-table to give the hypervisor some control over the host memory accesses. The host stage 2 is created lazily using large block mappings if possible, and will default to page mappings in absence of a better solution. >From this point on, memory accesses from the host to protected memory regions (e.g. not 'owned' by the host) are fatal and lead to hyp_panic(). Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20210315143536.214621-34-qperret@google.com Bug: 178098380 Change-Id: Id663fd6d2c183cf3a44c5677df29b8cab94f5e4e
- Loading branch information
Quentin Perret
committed
Mar 17, 2021
1 parent
5a128f0
commit 68bd98a
Showing
11 changed files
with
317 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
/* SPDX-License-Identifier: GPL-2.0-only */ | ||
/* | ||
* Copyright (C) 2020 Google LLC | ||
* Author: Quentin Perret <qperret@google.com> | ||
*/ | ||
|
||
#ifndef __KVM_NVHE_MEM_PROTECT__ | ||
#define __KVM_NVHE_MEM_PROTECT__ | ||
#include <linux/kvm_host.h> | ||
#include <asm/kvm_hyp.h> | ||
#include <asm/kvm_pgtable.h> | ||
#include <asm/virt.h> | ||
#include <nvhe/spinlock.h> | ||
|
||
struct host_kvm { | ||
struct kvm_arch arch; | ||
struct kvm_pgtable pgt; | ||
struct kvm_pgtable_mm_ops mm_ops; | ||
hyp_spinlock_t lock; | ||
}; | ||
extern struct host_kvm host_kvm; | ||
|
||
int __pkvm_prot_finalize(void); | ||
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool); | ||
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt); | ||
|
||
static __always_inline void __load_host_stage2(void) | ||
{ | ||
if (static_branch_likely(&kvm_protected_mode_initialized)) | ||
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); | ||
else | ||
write_sysreg(0, vttbr_el2); | ||
} | ||
#endif /* __KVM_NVHE_MEM_PROTECT__ */ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,246 @@ | ||
// SPDX-License-Identifier: GPL-2.0-only | ||
/* | ||
* Copyright (C) 2020 Google LLC | ||
* Author: Quentin Perret <qperret@google.com> | ||
*/ | ||
|
||
#include <linux/kvm_host.h> | ||
#include <asm/kvm_cpufeature.h> | ||
#include <asm/kvm_emulate.h> | ||
#include <asm/kvm_hyp.h> | ||
#include <asm/kvm_mmu.h> | ||
#include <asm/kvm_pgtable.h> | ||
#include <asm/stage2_pgtable.h> | ||
|
||
#include <hyp/switch.h> | ||
|
||
#include <nvhe/gfp.h> | ||
#include <nvhe/memory.h> | ||
#include <nvhe/mem_protect.h> | ||
#include <nvhe/mm.h> | ||
|
||
extern unsigned long hyp_nr_cpus; | ||
struct host_kvm host_kvm; | ||
|
||
struct hyp_pool host_s2_mem; | ||
struct hyp_pool host_s2_dev; | ||
|
||
static void *host_s2_zalloc_pages_exact(size_t size) | ||
{ | ||
return hyp_alloc_pages(&host_s2_mem, get_order(size)); | ||
} | ||
|
||
static void *host_s2_zalloc_page(void *pool) | ||
{ | ||
return hyp_alloc_pages(pool, 0); | ||
} | ||
|
||
static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool) | ||
{ | ||
unsigned long nr_pages, pfn; | ||
int ret; | ||
|
||
pfn = hyp_virt_to_pfn(mem_pgt_pool); | ||
nr_pages = host_s2_mem_pgtable_pages(); | ||
ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0); | ||
if (ret) | ||
return ret; | ||
|
||
pfn = hyp_virt_to_pfn(dev_pgt_pool); | ||
nr_pages = host_s2_dev_pgtable_pages(); | ||
ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0); | ||
if (ret) | ||
return ret; | ||
|
||
host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { | ||
.zalloc_pages_exact = host_s2_zalloc_pages_exact, | ||
.zalloc_page = host_s2_zalloc_page, | ||
.phys_to_virt = hyp_phys_to_virt, | ||
.virt_to_phys = hyp_virt_to_phys, | ||
.page_count = hyp_page_count, | ||
.get_page = hyp_get_page, | ||
.put_page = hyp_put_page, | ||
}; | ||
|
||
return 0; | ||
} | ||
|
||
static void prepare_host_vtcr(void) | ||
{ | ||
u32 parange, phys_shift; | ||
u64 mmfr0, mmfr1; | ||
|
||
mmfr0 = arm64_ftr_reg_id_aa64mmfr0_el1.sys_val; | ||
mmfr1 = arm64_ftr_reg_id_aa64mmfr1_el1.sys_val; | ||
|
||
/* The host stage 2 is id-mapped, so use parange for T0SZ */ | ||
parange = kvm_get_parange(mmfr0); | ||
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); | ||
|
||
host_kvm.arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); | ||
} | ||
|
||
int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool) | ||
{ | ||
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; | ||
int ret; | ||
|
||
prepare_host_vtcr(); | ||
hyp_spin_lock_init(&host_kvm.lock); | ||
|
||
ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool); | ||
if (ret) | ||
return ret; | ||
|
||
ret = kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch, | ||
&host_kvm.mm_ops); | ||
if (ret) | ||
return ret; | ||
|
||
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); | ||
mmu->arch = &host_kvm.arch; | ||
mmu->pgt = &host_kvm.pgt; | ||
mmu->vmid.vmid_gen = 0; | ||
mmu->vmid.vmid = 0; | ||
|
||
return 0; | ||
} | ||
|
||
int __pkvm_prot_finalize(void) | ||
{ | ||
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; | ||
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); | ||
|
||
params->vttbr = kvm_get_vttbr(mmu); | ||
params->vtcr = host_kvm.arch.vtcr; | ||
params->hcr_el2 |= HCR_VM; | ||
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) | ||
params->hcr_el2 |= HCR_FWB; | ||
kvm_flush_dcache_to_poc(params, sizeof(*params)); | ||
|
||
write_sysreg(params->hcr_el2, hcr_el2); | ||
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); | ||
|
||
/* | ||
* Make sure to have an ISB before the TLB maintenance below but only | ||
* when __load_stage2() doesn't include one already. | ||
*/ | ||
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); | ||
|
||
/* Invalidate stale HCR bits that may be cached in TLBs */ | ||
__tlbi(vmalls12e1); | ||
dsb(nsh); | ||
isb(); | ||
|
||
return 0; | ||
} | ||
|
||
static int host_stage2_unmap_dev_all(void) | ||
{ | ||
struct kvm_pgtable *pgt = &host_kvm.pgt; | ||
struct memblock_region *reg; | ||
u64 addr = 0; | ||
int i, ret; | ||
|
||
/* Unmap all non-memory regions to recycle the pages */ | ||
for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { | ||
reg = &hyp_memory[i]; | ||
ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); | ||
if (ret) | ||
return ret; | ||
} | ||
return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); | ||
} | ||
|
||
static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) | ||
{ | ||
int cur, left = 0, right = hyp_memblock_nr; | ||
struct memblock_region *reg; | ||
phys_addr_t end; | ||
|
||
range->start = 0; | ||
range->end = ULONG_MAX; | ||
|
||
/* The list of memblock regions is sorted, binary search it */ | ||
while (left < right) { | ||
cur = (left + right) >> 1; | ||
reg = &hyp_memory[cur]; | ||
end = reg->base + reg->size; | ||
if (addr < reg->base) { | ||
right = cur; | ||
range->end = reg->base; | ||
} else if (addr >= end) { | ||
left = cur + 1; | ||
range->start = end; | ||
} else { | ||
range->start = reg->base; | ||
range->end = end; | ||
return true; | ||
} | ||
} | ||
|
||
return false; | ||
} | ||
|
||
static inline int __host_stage2_idmap(u64 start, u64 end, | ||
enum kvm_pgtable_prot prot, | ||
struct hyp_pool *pool) | ||
{ | ||
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, | ||
prot, pool); | ||
} | ||
|
||
static int host_stage2_idmap(u64 addr) | ||
{ | ||
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; | ||
struct kvm_mem_range range; | ||
bool is_memory = find_mem_range(addr, &range); | ||
struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev; | ||
int ret; | ||
|
||
if (is_memory) | ||
prot |= KVM_PGTABLE_PROT_X; | ||
|
||
hyp_spin_lock(&host_kvm.lock); | ||
ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range); | ||
if (ret) | ||
goto unlock; | ||
|
||
ret = __host_stage2_idmap(range.start, range.end, prot, pool); | ||
if (is_memory || ret != -ENOMEM) | ||
goto unlock; | ||
|
||
/* | ||
* host_s2_mem has been provided with enough pages to cover all of | ||
* memory with page granularity, so we should never hit the ENOMEM case. | ||
* However, it is difficult to know how much of the MMIO range we will | ||
* need to cover upfront, so we may need to 'recycle' the pages if we | ||
* run out. | ||
*/ | ||
ret = host_stage2_unmap_dev_all(); | ||
if (ret) | ||
goto unlock; | ||
|
||
ret = __host_stage2_idmap(range.start, range.end, prot, pool); | ||
|
||
unlock: | ||
hyp_spin_unlock(&host_kvm.lock); | ||
|
||
return ret; | ||
} | ||
|
||
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) | ||
{ | ||
struct kvm_vcpu_fault_info fault; | ||
u64 esr, addr; | ||
int ret = 0; | ||
|
||
esr = read_sysreg_el2(SYS_ESR); | ||
if (!__get_fault_info(esr, &fault)) | ||
hyp_panic(); | ||
|
||
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8; | ||
ret = host_stage2_idmap(addr); | ||
if (ret && ret != -EAGAIN) | ||
hyp_panic(); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.