Skip to content

Commit

Permalink
KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer
Browse files Browse the repository at this point in the history
With per guest ID registers, PMUver settings from userspace
can be stored in its corresponding ID register.

No functional change intended.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
  • Loading branch information
jingzhangos authored and intel-lab-lkp committed Apr 4, 2023
1 parent f8b3d29 commit 66ece30
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 23 deletions.
11 changes: 6 additions & 5 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,12 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_EL1_32BIT 4
/* PSCI SYSTEM_SUSPEND enabled for the guest */
#define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
/*
* AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
* or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
* userspace for VCPUs without PMU.
*/
#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU 6

unsigned long flags;

Expand All @@ -249,11 +255,6 @@ struct kvm_arch {

cpumask_var_t supported_cpus;

struct {
u8 imp:4;
u8 unimp:4;
} dfr0_pmuver;

/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;

Expand Down
6 changes: 0 additions & 6 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,12 +138,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_arm_init_hypercalls(kvm);
kvm_arm_init_id_regs(kvm);

/*
* Initialise the default PMUver before there is a chance to
* create an actual PMU.
*/
kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();

return 0;

err_free_cpumask:
Expand Down
50 changes: 40 additions & 10 deletions arch/arm64/kvm/id_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,12 @@
static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_has_pmu(vcpu))
return vcpu->kvm->arch.dfr0_pmuver.imp;
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;

return vcpu->kvm->arch.dfr0_pmuver.unimp;
return 0;
}

static u8 perfmon_to_pmuver(u8 perfmon)
Expand Down Expand Up @@ -254,10 +257,20 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
if (val)
return -EINVAL;

if (valid_pmu)
vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
else
vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
if (valid_pmu) {
mutex_lock(&vcpu->kvm->arch.config_lock);
IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
pmuver);

IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
pmuver_to_perfmon(pmuver));
mutex_unlock(&vcpu->kvm->arch.config_lock);
} else {
assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
}

return 0;
}
Expand Down Expand Up @@ -294,10 +307,19 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
if (val)
return -EINVAL;

if (valid_pmu)
vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
else
vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
if (valid_pmu) {
mutex_lock(&vcpu->kvm->arch.config_lock);
IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);

IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
perfmon_to_pmuver(perfmon));
mutex_unlock(&vcpu->kvm->arch.config_lock);
} else {
assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
}

return 0;
}
Expand Down Expand Up @@ -503,4 +525,12 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
}

IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;

/*
* Initialise the default PMUver before there is a chance to
* create an actual PMU.
*/
IDREG(kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
IDREG(kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
kvm_arm_pmu_get_pmuver_limit());
}
5 changes: 3 additions & 2 deletions include/kvm/arm_pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
/*
* Evaluates as true when emulating PMUv3p5, and false otherwise.
*/
#define kvm_pmu_is_3p5(vcpu) \
(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
#define kvm_pmu_is_3p5(vcpu) \
(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), \
IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)

u8 kvm_arm_pmu_get_pmuver_limit(void);

Expand Down

0 comments on commit 66ece30

Please sign in to comment.