Skip to content

Commit

Permalink
KVM: PPC: Add vr getters and setters
Browse files Browse the repository at this point in the history
Add wrappers for vr registers to prepare for supporting PAPR nested
guests.

Signed-off-by: Jordan Niethe <jpn@linux.vnet.ibm.com>
  • Loading branch information
Jordan Niethe authored and intel-lab-lkp committed May 8, 2023
1 parent 297035b commit dcfb26d
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 25 deletions.
20 changes: 20 additions & 0 deletions arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,26 @@ static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, u64 v
vcpu->arch.fp.fpr[i][j] = val;
}

static inline vector128 kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i)
{
return vcpu->arch.vr.vr[i];
}

static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 val)
{
vcpu->arch.vr.vr[i] = val;
}

static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vr.vscr.u[3];
}

static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.vr.vscr.u[3] = val;
}

#define BOOK3S_WRAPPER_SET(reg, size) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
Expand Down
50 changes: 25 additions & 25 deletions arch/powerpc/kvm/powerpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -926,9 +926,9 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
return;

if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vval = kvmppc_get_vsx_vr(vcpu, index - 32);
val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
} else {
kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
}
Expand All @@ -941,10 +941,10 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;

if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vval = kvmppc_get_vsx_vr(vcpu, index - 32);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
} else {
kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
Expand All @@ -962,7 +962,7 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
Expand All @@ -983,9 +983,9 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
return;

if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vval = kvmppc_get_vsx_vr(vcpu, index - 32);
val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, val.vval);
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
Expand Down Expand Up @@ -1050,9 +1050,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;

val.vval = VCPU_VSX_VR(vcpu, index);
val.vval = kvmppc_get_vsx_vr(vcpu, index);
val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, val.vval);
}

static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
Expand All @@ -1066,9 +1066,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;

val.vval = VCPU_VSX_VR(vcpu, index);
val.vval = kvmppc_get_vsx_vr(vcpu, index);
val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, val.vval);
}

static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
Expand All @@ -1082,9 +1082,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;

val.vval = VCPU_VSX_VR(vcpu, index);
val.vval = kvmppc_get_vsx_vr(vcpu, index);
val.vsx16val[offset] = gpr16;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, val.vval);
}

static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
Expand All @@ -1098,9 +1098,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
if (offset == -1)
return;

val.vval = VCPU_VSX_VR(vcpu, index);
val.vval = kvmppc_get_vsx_vr(vcpu, index);
val.vsx8val[offset] = gpr8;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, val.vval);
}
#endif /* CONFIG_ALTIVEC */

Expand Down Expand Up @@ -1413,7 +1413,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
if (rs < 32) {
*val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
reg.vval = kvmppc_get_vsx_vr(vcpu, rs - 32);
*val = reg.vsxval[vsx_offset];
}
break;
Expand All @@ -1433,7 +1433,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
reg.vval = kvmppc_get_vsx_vr(vcpu, rs - 32);
*val = reg.vsx32val[vsx_offset];
}
break;
Expand Down Expand Up @@ -1548,7 +1548,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;

reg.vval = VCPU_VSX_VR(vcpu, index);
reg.vval = kvmppc_get_vsx_vr(vcpu, index);
*val = reg.vsxval[vmx_offset];

return result;
Expand All @@ -1566,7 +1566,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;

reg.vval = VCPU_VSX_VR(vcpu, index);
reg.vval = kvmppc_get_vsx_vr(vcpu, index);
*val = reg.vsx32val[vmx_offset];

return result;
Expand All @@ -1584,7 +1584,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;

reg.vval = VCPU_VSX_VR(vcpu, index);
reg.vval = kvmppc_get_vsx_vr(vcpu, index);
*val = reg.vsx16val[vmx_offset];

return result;
Expand All @@ -1602,7 +1602,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;

reg.vval = VCPU_VSX_VR(vcpu, index);
reg.vval = kvmppc_get_vsx_vr(vcpu, index);
*val = reg.vsx8val[vmx_offset];

return result;
Expand Down Expand Up @@ -1711,14 +1711,14 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
val.vval = kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
break;
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
Expand Down Expand Up @@ -1762,14 +1762,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
Expand Down

0 comments on commit dcfb26d

Please sign in to comment.