Skip to content

Commit

Permalink
KVM: arm64: Remove __pkvm_mark_hyp
Browse files Browse the repository at this point in the history
Now that we mark memory owned by the hypervisor in the host stage-2
during __pkvm_init(), we no longer need to rely on the host to
explicitly mark the hyp sections later on.

Remove the __pkvm_mark_hyp() hypercall altogether.

Signed-off-by: Quentin Perret <qperret@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210809152448.1810400-19-qperret@google.com
  • Loading branch information
Quentin Perret authored and Marc Zyngier committed Aug 11, 2021
1 parent 2c50166 commit ad0e013
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 77 deletions.
3 changes: 1 addition & 2 deletions arch/arm64/include/asm/kvm_asm.h
Expand Up @@ -63,8 +63,7 @@
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20

#ifndef __ASSEMBLY__

Expand Down
46 changes: 0 additions & 46 deletions arch/arm64/kvm/arm.c
Expand Up @@ -1954,57 +1954,11 @@ static void _kvm_host_prot_finalize(void *discard)
WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
}

static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
{
return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end);
}

#define pkvm_mark_hyp_section(__section) \
pkvm_mark_hyp(__pa_symbol(__section##_start), \
__pa_symbol(__section##_end))

static int finalize_hyp_mode(void)
{
int cpu, ret;

if (!is_protected_kvm_enabled())
return 0;

ret = pkvm_mark_hyp_section(__hyp_idmap_text);
if (ret)
return ret;

ret = pkvm_mark_hyp_section(__hyp_text);
if (ret)
return ret;

ret = pkvm_mark_hyp_section(__hyp_rodata);
if (ret)
return ret;

ret = pkvm_mark_hyp_section(__hyp_bss);
if (ret)
return ret;

ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size);
if (ret)
return ret;

for_each_possible_cpu(cpu) {
phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());

ret = pkvm_mark_hyp(start, end);
if (ret)
return ret;

start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
end = start + PAGE_SIZE;
ret = pkvm_mark_hyp(start, end);
if (ret)
return ret;
}

/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
Expand Up @@ -49,7 +49,6 @@ extern struct host_kvm host_kvm;
extern const u8 pkvm_hyp_id;

int __pkvm_prot_finalize(void);
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);

bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
Expand Down
9 changes: 0 additions & 9 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Expand Up @@ -163,14 +163,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
}

static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
DECLARE_REG(phys_addr_t, end, host_ctxt, 2);

cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
}
typedef void (*hcall_t)(struct kvm_cpu_context *);

#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
Expand All @@ -196,7 +188,6 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_create_mappings),
HANDLE_FUNC(__pkvm_create_private_mapping),
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_mark_hyp),
};

static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
Expand Down
19 changes: 0 additions & 19 deletions arch/arm64/kvm/hyp/nvhe/mem_protect.c
Expand Up @@ -339,25 +339,6 @@ static int host_stage2_idmap(u64 addr)
return ret;
}

int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
{
int ret;

/*
* host_stage2_unmap_dev_all() currently relies on MMIO mappings being
* non-persistent, so don't allow changing page ownership in MMIO range.
*/
if (!range_is_memory(start, end))
return -EINVAL;

hyp_spin_lock(&host_kvm.lock);
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
start, end - start, &host_s2_pool, pkvm_hyp_id);
hyp_spin_unlock(&host_kvm.lock);

return ret != -EAGAIN ? ret : 0;
}

void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu_fault_info fault;
Expand Down

0 comments on commit ad0e013

Please sign in to comment.