Skip to content

Commit cbb359d

Browse files
committed
KVM: x86/pmu: Move PMU reset logic to common x86 code
Move the common (or at least "ignored") aspects of resetting the vPMU to common x86 code, along with the stop/release helpers that are no used only by the common pmu.c. There is no need to manually handle fixed counters as all_valid_pmc_idx tracks both fixed and general purpose counters, and resetting the vPMU is far from a hot path, i.e. the extra bit of overhead to the PMC from the index is a non-issue. Zero fixed_ctr_ctrl in common code even though it's Intel specific. Ensuring it's zero doesn't harm AMD/SVM in any way, and stopping the fixed counters via all_valid_pmc_idx, but not clearing the associated control bits, would be odd/confusing. Make the .reset() hook optional as SVM no longer needs vendor specific handling. Cc: stable@vger.kernel.org Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20231103230541.352265-2-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent e9e60c8 commit cbb359d

File tree

5 files changed

+40
-56
lines changed

5 files changed

+40
-56
lines changed

arch/x86/include/asm/kvm-x86-pmu-ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ KVM_X86_PMU_OP(get_msr)
2222
KVM_X86_PMU_OP(set_msr)
2323
KVM_X86_PMU_OP(refresh)
2424
KVM_X86_PMU_OP(init)
25-
KVM_X86_PMU_OP(reset)
25+
KVM_X86_PMU_OP_OPTIONAL(reset)
2626
KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
2727
KVM_X86_PMU_OP_OPTIONAL(cleanup)
2828

arch/x86/kvm/pmu.c

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,24 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
250250
return true;
251251
}
252252

253+
static void pmc_release_perf_event(struct kvm_pmc *pmc)
254+
{
255+
if (pmc->perf_event) {
256+
perf_event_release_kernel(pmc->perf_event);
257+
pmc->perf_event = NULL;
258+
pmc->current_config = 0;
259+
pmc_to_pmu(pmc)->event_count--;
260+
}
261+
}
262+
263+
static void pmc_stop_counter(struct kvm_pmc *pmc)
264+
{
265+
if (pmc->perf_event) {
266+
pmc->counter = pmc_read_counter(pmc);
267+
pmc_release_perf_event(pmc);
268+
}
269+
}
270+
253271
static int filter_cmp(const void *pa, const void *pb, u64 mask)
254272
{
255273
u64 a = *(u64 *)pa & mask;
@@ -654,7 +672,27 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
654672

655673
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
656674
{
657-
static_call(kvm_x86_pmu_reset)(vcpu);
675+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
676+
struct kvm_pmc *pmc;
677+
int i;
678+
679+
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
680+
681+
for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
682+
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
683+
if (!pmc)
684+
continue;
685+
686+
pmc_stop_counter(pmc);
687+
pmc->counter = 0;
688+
689+
if (pmc_is_gp(pmc))
690+
pmc->eventsel = 0;
691+
}
692+
693+
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
694+
695+
static_call_cond(kvm_x86_pmu_reset)(vcpu);
658696
}
659697

660698
void kvm_pmu_init(struct kvm_vcpu *vcpu)

arch/x86/kvm/pmu.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -80,24 +80,6 @@ static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
8080
pmc->counter &= pmc_bitmask(pmc);
8181
}
8282

83-
static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
84-
{
85-
if (pmc->perf_event) {
86-
perf_event_release_kernel(pmc->perf_event);
87-
pmc->perf_event = NULL;
88-
pmc->current_config = 0;
89-
pmc_to_pmu(pmc)->event_count--;
90-
}
91-
}
92-
93-
static inline void pmc_stop_counter(struct kvm_pmc *pmc)
94-
{
95-
if (pmc->perf_event) {
96-
pmc->counter = pmc_read_counter(pmc);
97-
pmc_release_perf_event(pmc);
98-
}
99-
}
100-
10183
static inline bool pmc_is_gp(struct kvm_pmc *pmc)
10284
{
10385
return pmc->type == KVM_PMC_GP;

arch/x86/kvm/svm/pmu.c

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -233,21 +233,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
233233
}
234234
}
235235

236-
static void amd_pmu_reset(struct kvm_vcpu *vcpu)
237-
{
238-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
239-
int i;
240-
241-
for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
242-
struct kvm_pmc *pmc = &pmu->gp_counters[i];
243-
244-
pmc_stop_counter(pmc);
245-
pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
246-
}
247-
248-
pmu->global_ctrl = pmu->global_status = 0;
249-
}
250-
251236
struct kvm_pmu_ops amd_pmu_ops __initdata = {
252237
.hw_event_available = amd_hw_event_available,
253238
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
@@ -259,7 +244,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
259244
.set_msr = amd_pmu_set_msr,
260245
.refresh = amd_pmu_refresh,
261246
.init = amd_pmu_init,
262-
.reset = amd_pmu_reset,
263247
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
264248
.MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
265249
.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -632,26 +632,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
632632

633633
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
634634
{
635-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
636-
struct kvm_pmc *pmc = NULL;
637-
int i;
638-
639-
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
640-
pmc = &pmu->gp_counters[i];
641-
642-
pmc_stop_counter(pmc);
643-
pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
644-
}
645-
646-
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
647-
pmc = &pmu->fixed_counters[i];
648-
649-
pmc_stop_counter(pmc);
650-
pmc->counter = pmc->prev_counter = 0;
651-
}
652-
653-
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
654-
655635
intel_pmu_release_guest_lbr_event(vcpu);
656636
}
657637

0 commit comments

Comments
 (0)