Skip to content

Commit

Permalink
From patchwork series 384755
Browse files Browse the repository at this point in the history
  • Loading branch information
Fox Snowpatch committed Dec 1, 2023
1 parent 9a15ae6 commit 64b5960
Show file tree
Hide file tree
Showing 8 changed files with 107 additions and 39 deletions.
10 changes: 7 additions & 3 deletions arch/powerpc/include/asm/kvm_book3s.h
Expand Up @@ -302,6 +302,7 @@ void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
void kvmhv_flush_lpid(u64 lpid);
void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
Expand Down Expand Up @@ -593,21 +594,24 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \


KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)

static inline u64 kvmppc_get_tb_offset(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vcore->tb_offset;
}

static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
{
WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
return vcpu->arch.dec_expires;
}

static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.dec_expires = val;
WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
}

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/kvm_book3s_64.h
Expand Up @@ -682,6 +682,7 @@ void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *i
int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa);

#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kvm/book3s.c
Expand Up @@ -302,11 +302,11 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,

switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
case BOOK3S_IRQPRIO_SYSTEM_RESET:
Expand Down
7 changes: 6 additions & 1 deletion arch/powerpc/kvm/book3s_64_mmu_radix.c
Expand Up @@ -40,6 +40,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
unsigned long quadrant, ret = n;
bool is_load = !!to;

if (kvmhv_is_nestedv2())
return H_UNSUPPORTED;

/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
if (kvmhv_on_pseries())
return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
Expand Down Expand Up @@ -97,7 +100,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
{
int lpid = vcpu->kvm->arch.lpid;
int pid = kvmppc_get_pid(vcpu);
int pid;

/* This would cause a data segment intr so don't allow the access */
if (eaddr & (0x3FFUL << 52))
Expand All @@ -110,6 +113,8 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
/* If accessing quadrant 3 then pid is expected to be 0 */
if (((eaddr >> 62) & 0x3) == 0x3)
pid = 0;
else
pid = kvmppc_get_pid(vcpu);

eaddr &= ~(0xFFFUL << 52);

Expand Down
72 changes: 51 additions & 21 deletions arch/powerpc/kvm/book3s_hv.c
Expand Up @@ -650,7 +650,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
return err;
}

static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap,
struct kvmppc_vpa *old_vpap)
{
struct kvm *kvm = vcpu->kvm;
void *va;
Expand Down Expand Up @@ -690,9 +691,8 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
kvmppc_unpin_guest_page(kvm, va, gpa, false);
va = NULL;
}
if (vpap->pinned_addr)
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
vpap->dirty);
*old_vpap = *vpap;

vpap->gpa = gpa;
vpap->pinned_addr = va;
vpap->dirty = false;
Expand All @@ -702,24 +702,44 @@ static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)

static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_vpa old_vpa = { 0 };

if (!(vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending))
return;

spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
if (vcpu->arch.vpa.pinned_addr)
kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa);
if (old_vpa.pinned_addr) {
if (kvmhv_is_nestedv2())
kvmhv_nestedv2_set_vpa(vcpu, ~0ull);
kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
old_vpa.dirty);
}
if (vcpu->arch.vpa.pinned_addr) {
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
if (kvmhv_is_nestedv2())
kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr));
}
}
if (vcpu->arch.dtl.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa);
if (old_vpa.pinned_addr)
kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
old_vpa.dirty);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0;
}
if (vcpu->arch.slb_shadow.update_pending)
kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
if (vcpu->arch.slb_shadow.update_pending) {
kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa);
if (old_vpa.pinned_addr)
kvmppc_unpin_guest_page(kvm, old_vpa.pinned_addr, old_vpa.gpa,
old_vpa.dirty);
}

spin_unlock(&vcpu->arch.vpa_update_lock);
}

Expand Down Expand Up @@ -1597,7 +1617,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
if (!kvmhv_is_nestedv2() && (__kvmppc_get_msr_hv(vcpu) & MSR_HV)) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
Expand Down Expand Up @@ -1688,7 +1708,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
{
int i;

if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
if (!kvmhv_is_nestedv2() && unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* Guest userspace executed sc 1. This can only be
* reached by the P9 path because the old path
Expand Down Expand Up @@ -4084,6 +4104,8 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
if (rc < 0)
return -EINVAL;

kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr);

accumulate_time(vcpu, &vcpu->arch.in_guest);
rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
&trap, &i);
Expand Down Expand Up @@ -4736,13 +4758,19 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,

if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) {
if (xive_interrupt_pending(vcpu))
if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
&vcpu->arch.pending_exceptions) ||
xive_interrupt_pending(vcpu)) {
/*
* For nested HV, don't synthesize but always pass MER,
* the L0 will be able to optimise that more
* effectively than manipulating registers directly.
*/
if (!kvmhv_on_pseries() && (__kvmppc_get_msr_hv(vcpu) & MSR_EE))
kvmppc_inject_interrupt_hv(vcpu,
BOOK3S_INTERRUPT_EXTERNAL, 0);
} else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
&vcpu->arch.pending_exceptions)) {
lpcr |= LPCR_MER;
BOOK3S_INTERRUPT_EXTERNAL, 0);
else
lpcr |= LPCR_MER;
}
} else if (vcpu->arch.pending_exceptions ||
vcpu->arch.doorbell_request ||
Expand Down Expand Up @@ -4806,7 +4834,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* entering a nested guest in which case the decrementer is now owned
* by L2 and the L1 decrementer is provided in hdec_expires
*/
if (kvmppc_core_pending_dec(vcpu) &&
if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) &&
((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
Expand Down Expand Up @@ -4949,7 +4977,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
accumulate_time(vcpu, &vcpu->arch.hcall);

if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
if (!kvmhv_is_nestedv2() && WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* These should have been caught reflected
* into the guest by now. Final sanity check:
Expand Down Expand Up @@ -5691,10 +5719,12 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
}

if (kvmhv_is_nestedv2())
if (kvmhv_is_nestedv2()) {
kvmhv_flush_lpid(kvm->arch.lpid);
plpar_guest_delete(0, kvm->arch.lpid);
else
} else {
kvmppc_free_lpid(kvm->arch.lpid);
}

kvmppc_free_pimap(kvm);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv_nested.c
Expand Up @@ -503,7 +503,7 @@ void kvmhv_nested_exit(void)
}
}

static void kvmhv_flush_lpid(u64 lpid)
void kvmhv_flush_lpid(u64 lpid)
{
long rc;

Expand Down
29 changes: 29 additions & 0 deletions arch/powerpc/kvm/book3s_hv_nestedv2.c
Expand Up @@ -855,6 +855,35 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
}
EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);

/**
* kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
* @vcpu: vcpu
* @vpa: L1 logical real address
*/
int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
{
struct kvmhv_nestedv2_io *io;
struct kvmppc_gs_buff *gsb;
int rc = 0;

io = &vcpu->arch.nestedv2_io;
gsb = io->vcpu_run_input;

kvmppc_gsb_reset(gsb);
rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
if (rc < 0)
goto out;

rc = kvmppc_gsb_send(gsb, 0);
if (rc < 0)
pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);

out:
kvmppc_gsb_reset(gsb);
return rc;
}
EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);

/**
* kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
* @vcpu: vcpu
Expand Down
21 changes: 10 additions & 11 deletions arch/powerpc/kvm/emulate_loadstore.c
Expand Up @@ -93,7 +93,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)

emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
Expand All @@ -112,7 +111,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
op.reg, size, !instr_byte_swap);

if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);

break;
}
Expand All @@ -132,7 +131,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
KVM_MMIO_REG_FPR|op.reg, size, 1);

if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);

break;
#endif
Expand Down Expand Up @@ -224,16 +223,17 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
break;
}
#endif
case STORE:
/* if need byte reverse, op.val has been reversed by
* analyse_instr().
*/
emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
case STORE: {
int instr_byte_swap = op.type & BYTEREV;

emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
size, !instr_byte_swap);

if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);

break;
}
#ifdef CONFIG_PPC_FPU
case STORE_FP:
if (kvmppc_check_fp_disabled(vcpu))
Expand All @@ -254,7 +254,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
kvmppc_get_fpr(vcpu, op.reg), size, 1);

if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);

break;
#endif
Expand Down Expand Up @@ -358,7 +358,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}

trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);

/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
Expand Down

0 comments on commit 64b5960

Please sign in to comment.