Skip to content

Commit

Permalink
KVM: arm64: Add per-cpu fixmap infrastructure at EL2
Browse files Browse the repository at this point in the history
Mapping pages in a guest page-table from within the pKVM hypervisor at
EL2 may require cache maintenance to ensure that the initialised page
contents is visible even to non-cacheable (e.g. MMU-off) accesses from
the guest.

In preparation for performing this maintenance at EL2, introduce a
per-vCPU fixmap which allows the pKVM hypervisor to map guest pages
temporarily into its stage-1 page-table for the purposes of cache
maintenance and, in future, poisoning on the reclaim path. The use of a
fixmap avoids the need for memory allocation or locking on the map()
path.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
  • Loading branch information
Quentin Perret authored and intel-lab-lkp committed Sep 14, 2022
1 parent fc7a179 commit 1b331dc
Show file tree
Hide file tree
Showing 7 changed files with 116 additions and 13 deletions.
12 changes: 12 additions & 0 deletions arch/arm64/include/asm/kvm_pgtable.h
Expand Up @@ -30,6 +30,8 @@ typedef u64 kvm_pte_t;
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)

#define KVM_PHYS_INVALID (-1ULL)

static inline bool kvm_pte_valid(kvm_pte_t pte)
{
return pte & KVM_PTE_VALID;
Expand All @@ -45,6 +47,16 @@ static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
return pa;
}

static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
{
kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;

if (PAGE_SHIFT == 16)
pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);

return pte;
}

static inline u64 kvm_granule_shift(u32 level)
{
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
Expand Up @@ -59,6 +59,8 @@ enum pkvm_component_id {
PKVM_ID_HYP,
};

extern unsigned long hyp_nr_cpus;

int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/kvm/hyp/include/nvhe/mm.h
Expand Up @@ -13,6 +13,10 @@
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;

int hyp_create_pcpu_fixmap(void);
void *hyp_fixmap_map(phys_addr_t phys);
void hyp_fixmap_unmap(void);

int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
int hyp_back_vmemmap(phys_addr_t back);
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/kvm/hyp/nvhe/mem_protect.c
Expand Up @@ -21,7 +21,6 @@

#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)

extern unsigned long hyp_nr_cpus;
struct host_mmu host_mmu;

static struct hyp_pool host_s2_pool;
Expand Down
94 changes: 94 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/mm.c
Expand Up @@ -14,6 +14,7 @@
#include <nvhe/early_alloc.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/spinlock.h>

Expand All @@ -25,6 +26,12 @@ unsigned int hyp_memblock_nr;

static u64 __io_map_base;

struct hyp_fixmap_slot {
u64 addr;
kvm_pte_t *ptep;
};
static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);

static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
Expand Down Expand Up @@ -212,6 +219,93 @@ int hyp_map_vectors(void)
return 0;
}

void *hyp_fixmap_map(phys_addr_t phys)
{
struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
kvm_pte_t pte, *ptep = slot->ptep;

pte = *ptep;
pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
WRITE_ONCE(*ptep, pte);
dsb(nshst);

return (void *)slot->addr;
}

static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
{
kvm_pte_t *ptep = slot->ptep;
u64 addr = slot->addr;

WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
dsb(nshst);
__tlbi_level(vale2, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1));
dsb(nsh);
isb();
}

void hyp_fixmap_unmap(void)
{
fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
}

static int __create_fixmap_slot_cb(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)arg);

if (!kvm_pte_valid(*ptep) || level != KVM_PGTABLE_MAX_LEVELS - 1)
return -EINVAL;

slot->addr = addr;
slot->ptep = ptep;

/*
* Clear the PTE, but keep the page-table page refcount elevated to
* prevent it from ever being freed. This lets us manipulate the PTEs
* by hand safely without ever needing to allocate memory.
*/
fixmap_clear_slot(slot);

return 0;
}

static int create_fixmap_slot(u64 addr, u64 cpu)
{
struct kvm_pgtable_walker walker = {
.cb = __create_fixmap_slot_cb,
.flags = KVM_PGTABLE_WALK_LEAF,
.arg = (void *)cpu,
};

return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
}

int hyp_create_pcpu_fixmap(void)
{
unsigned long addr, i;
int ret;

for (i = 0; i < hyp_nr_cpus; i++) {
ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
if (ret)
return ret;

ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
__hyp_pa(__hyp_bss_start), PAGE_HYP);
if (ret)
return ret;

ret = create_fixmap_slot(addr, i);
if (ret)
return ret;
}

return 0;
}

int hyp_create_idmap(u32 hyp_va_bits)
{
unsigned long start, end;
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/setup.c
Expand Up @@ -321,6 +321,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;

ret = hyp_create_pcpu_fixmap();
if (ret)
goto out;

pkvm_hyp_vm_table_init(vm_table_base);
out:
/*
Expand Down
12 changes: 0 additions & 12 deletions arch/arm64/kvm/hyp/pgtable.c
Expand Up @@ -57,8 +57,6 @@ struct kvm_pgtable_walk_data {
u64 end;
};

#define KVM_PHYS_INVALID (-1ULL)

static bool kvm_phys_is_valid(u64 phys)
{
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
Expand Down Expand Up @@ -122,16 +120,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level)
return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
}

static kvm_pte_t kvm_phys_to_pte(u64 pa)
{
kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;

if (PAGE_SHIFT == 16)
pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);

return pte;
}

static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
Expand Down

0 comments on commit 1b331dc

Please sign in to comment.