Skip to content

Commit

Permalink
KVM: x86/pmu: Move pmc_idx => pmc translation helper to common code
Browse files Browse the repository at this point in the history
Add a common helper for *internal* PMC lookups, and delete the ops hook
and Intel's implementation.  Keep AMD's implementation, but rename it to
amd_pmu_get_pmc() to make it somewhat more obvious that it's suited for
both KVM-internal and guest-initiated lookups.

Because KVM tracks all counters in a single bitmap, getting a counter
when iterating over a bitmap, e.g. of all valid PMCs, requires a small
amount of math, that while simple, isn't super obvious and doesn't use the
same semantics as PMC lookups from RDPMC!  Although AMD doesn't support
fixed counters, the common PMU code still behaves as if there a split, the
high half of which just happens to always be empty.

Opportunstically add a comment to explain both what is going on, and why
KVM uses a single bitmap, e.g. the boilerplate for iterating over separate
bitmaps could be done via macros, so it's not (just) about deduplicating
code.

Link: https://lore.kernel.org/r/20231110022857.1273836-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
sean-jc committed Feb 1, 2024
1 parent be6b067 commit b31880c
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 24 deletions.
1 change: 0 additions & 1 deletion arch/x86/include/asm/kvm-x86-pmu-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ BUILD_BUG_ON(1)
* a NULL definition, for example if "static_call_cond()" will be used
* at the call sites.
*/
KVM_X86_PMU_OP(pmc_idx_to_pmc)
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
KVM_X86_PMU_OP(msr_idx_to_pmc)
KVM_X86_PMU_OP_OPTIONAL(check_rdpmc_early)
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kvm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
int bit;

for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
struct kvm_pmc *pmc = kvm_pmc_idx_to_pmc(pmu, bit);

if (unlikely(!pmc)) {
clear_bit(bit, pmu->reprogram_pmi);
Expand Down Expand Up @@ -725,7 +725,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);

for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
pmc = kvm_pmc_idx_to_pmc(pmu, i);
if (!pmc)
continue;

Expand Down Expand Up @@ -801,7 +801,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmu->pmc_in_use, X86_PMC_IDX_MAX);

for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
pmc = kvm_pmc_idx_to_pmc(pmu, i);

if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
pmc_stop_counter(pmc);
Expand Down Expand Up @@ -856,7 +856,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
int i;

for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
pmc = kvm_pmc_idx_to_pmc(pmu, i);

if (!pmc || !pmc_event_is_allowed(pmc))
continue;
Expand Down
29 changes: 28 additions & 1 deletion arch/x86/kvm/pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

#include <linux/nospec.h>

#include <asm/kvm_host.h>

#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
Expand All @@ -21,7 +23,6 @@
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED

struct kvm_pmu_ops {
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask);
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
Expand Down Expand Up @@ -56,6 +57,32 @@ static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
return pmu->version > 1;
}

/*
* KVM tracks all counters in 64-bit bitmaps, with general purpose counters
* mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
* is tracked internally via index 32. On Intel, (AMD doesn't support fixed
* counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
* and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
* amounter of boilerplate needed to iterate over PMCs *and* simplifies common
* enabling/disable/reset operations.
*
* WARNING! This helper is only for lookups that are initiated by KVM, it is
* NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
* ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
* for RDPMC, not by adding 32 to the fixed counter index).
*/
static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{
if (idx < pmu->nr_arch_gp_counters)
return &pmu->gp_counters[idx];

idx -= KVM_FIXED_PMC_BASE_IDX;
if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
return &pmu->fixed_counters[idx];

return NULL;
}

static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
Expand Down
7 changes: 3 additions & 4 deletions arch/x86/kvm/svm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ enum pmu_type {
PMU_TYPE_EVNTSEL,
};

static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
unsigned int num_counters = pmu->nr_arch_gp_counters;

Expand Down Expand Up @@ -70,7 +70,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
return NULL;
}

return amd_pmc_idx_to_pmc(pmu, idx);
return amd_pmu_get_pmc(pmu, idx);
}

static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
Expand All @@ -87,7 +87,7 @@ static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx);
return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
}

static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
Expand Down Expand Up @@ -229,7 +229,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
}

struct kvm_pmu_ops amd_pmu_ops __initdata = {
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
.check_rdpmc_early = amd_check_rdpmc_early,
Expand Down
15 changes: 1 addition & 14 deletions arch/x86/kvm/vmx/pmu_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,18 +55,6 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
}
}

static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
if (pmc_idx < KVM_FIXED_PMC_BASE_IDX) {
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
MSR_P6_EVNTSEL0);
} else {
u32 idx = pmc_idx - KVM_FIXED_PMC_BASE_IDX;

return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
}
}

static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
Expand Down Expand Up @@ -718,7 +706,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)

for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
X86_PMC_IDX_MAX) {
pmc = intel_pmc_idx_to_pmc(pmu, bit);
pmc = kvm_pmc_idx_to_pmc(pmu, bit);

if (!pmc || !pmc_speculative_in_use(pmc) ||
!pmc_is_globally_enabled(pmc) || !pmc->perf_event)
Expand All @@ -735,7 +723,6 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}

struct kvm_pmu_ops intel_pmu_ops __initdata = {
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
.is_valid_msr = intel_is_valid_msr,
Expand Down

0 comments on commit b31880c

Please sign in to comment.