Skip to content

Commit

Permalink
KVM: arm64: PMU: Don't use the sanitized value for PMUVer
Browse files Browse the repository at this point in the history
Now, the guest has the default PMU on the first vCPU reset
when a PMU is configured for any of vCPUs on the guest.

For a vCPU with PMU configured, use the PMUver of the guest's
PMU as the default value of ID_AA64DFR0_EL1.PMUVer and
as the limit value of the field, instead of its sanitized
value (The sanitized value could be inappropriate for these
on some heterogeneous PMU systems, as only one of PMUs on
the system can be associated with the guest. See the previous
patch for more details).

When a PMU for the guest is changed, the PMUVer for the guest
will be reset based on the new PMU.  On heterogeneous systems,
this might end up changing the PMUVer that is set by userspace
for the guest if userspace changes the PMUVer before using
KVM_ARM_VCPU_PMU_V3_SET_PMU.
This change isn't nice though.  Other options considered are not
updating the PMUVer even when the PMU for the guest is changed,
or setting PMUVer to the new limit value only when it is larger
than the limit.  The former might end up exposing PMUVer that
KVM can't support. The latter is inconvenient as the default
PMUVer for the PMU set by KVM_ARM_VCPU_PMU_V3_SET_PMU will be
an unknown (but supported) value, and userspace explicitly need
to set the PMUVer for the guest to use the host PMUVer value.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
  • Loading branch information
reijiw-kvm authored and intel-lab-lkp committed Feb 11, 2023
1 parent 8da1d24 commit 2975338
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 19 deletions.
1 change: 1 addition & 0 deletions arch/arm64/include/asm/kvm_host.h
Expand Up @@ -230,6 +230,7 @@ struct kvm_arch {
struct {
u8 imp:4;
u8 unimp:4;
u8 imp_limit;
} dfr0_pmuver;

/* Hypercall features firmware registers' descriptor */
Expand Down
6 changes: 0 additions & 6 deletions arch/arm64/kvm/arm.c
Expand Up @@ -164,12 +164,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
set_default_spectre(kvm);
kvm_arm_init_hypercalls(kvm);

/*
* Initialise the default PMUver before there is a chance to
* create an actual PMU.
*/
kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();

return 0;

err_free_cpumask:
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/kvm/pmu-emul.c
Expand Up @@ -878,6 +878,8 @@ int kvm_arm_set_vm_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
}

kvm->arch.arm_pmu = arm_pmu;
kvm->arch.dfr0_pmuver.imp_limit = min_t(u8, arm_pmu->pmuver, ID_AA64DFR0_EL1_PMUVer_V3P5);
kvm->arch.dfr0_pmuver.imp = kvm->arch.dfr0_pmuver.imp_limit;

return 0;
}
Expand Down
38 changes: 26 additions & 12 deletions arch/arm64/kvm/sys_regs.c
Expand Up @@ -1259,8 +1259,11 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
{
u8 pmuver, host_pmuver;
bool valid_pmu;
u64 current_val = read_id_reg(vcpu, rd);
int ret = -EINVAL;

host_pmuver = kvm_arm_pmu_get_pmuver_limit();
mutex_lock(&vcpu->kvm->lock);
host_pmuver = vcpu->kvm->arch.dfr0_pmuver.imp_limit;

/*
* Allow AA64DFR0_EL1.PMUver to be set from userspace as long
Expand All @@ -1270,26 +1273,30 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
*/
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
return -EINVAL;
goto out;

valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);

/* Make sure view register and PMU support do match */
if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
return -EINVAL;
goto out;

/* We can only differ with PMUver, and anything else is an error */
val ^= read_id_reg(vcpu, rd);
val ^= current_val;
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
if (val)
return -EINVAL;
goto out;

if (valid_pmu)
vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
else
vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;

return 0;
ret = 0;
out:
mutex_unlock(&vcpu->kvm->lock);

return ret;
}

static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
Expand All @@ -1298,8 +1305,11 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
{
u8 perfmon, host_perfmon;
bool valid_pmu;
u64 current_val = read_id_reg(vcpu, rd);
int ret = -EINVAL;

host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
mutex_lock(&vcpu->kvm->lock);
host_perfmon = pmuver_to_perfmon(vcpu->kvm->arch.dfr0_pmuver.imp_limit);

/*
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
Expand All @@ -1310,26 +1320,30 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
(perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
return -EINVAL;
goto out;

valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);

/* Make sure view register and PMU support do match */
if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
return -EINVAL;
goto out;

/* We can only differ with PerfMon, and anything else is an error */
val ^= read_id_reg(vcpu, rd);
val ^= current_val;
val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
if (val)
return -EINVAL;
goto out;

if (valid_pmu)
vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
else
vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);

return 0;
ret = 0;
out:
mutex_unlock(&vcpu->kvm->lock);

return ret;
}

/*
Expand Down
1 change: 0 additions & 1 deletion include/kvm/arm_pmu.h
Expand Up @@ -95,7 +95,6 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
#define kvm_pmu_is_3p5(vcpu) \
(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)

u8 kvm_arm_pmu_get_pmuver_limit(void);
int kvm_arm_set_vm_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu);

#else
Expand Down

0 comments on commit 2975338

Please sign in to comment.