Skip to content

Commit c85cdc1

Browse files
Like Xusean-jc
authored andcommitted
KVM: x86/pmu: Move handling PERF_GLOBAL_CTRL and friends to common x86
Move the handling of GLOBAL_CTRL, GLOBAL_STATUS, and GLOBAL_OVF_CTRL, a.k.a. GLOBAL_STATUS_RESET, from Intel PMU code to generic x86 PMU code. AMD PerfMonV2 defines three registers that have the same semantics as Intel's variants, just with different names and indices. Conveniently, since KVM virtualizes GLOBAL_CTRL on Intel only for PMU v2 and above, and AMD's version shows up in v2, KVM can use common code for the existence check as well. Signed-off-by: Like Xu <likexu@tencent.com> Co-developed-by: Sean Christopherson <seanjc@google.com> Link: https://lore.kernel.org/r/20230603011058.1038821-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 30dab5c commit c85cdc1

File tree

5 files changed

+86
-62
lines changed

5 files changed

+86
-62
lines changed

arch/x86/kvm/pmu.c

Lines changed: 68 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -562,6 +562,14 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
562562

563563
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
564564
{
565+
switch (msr) {
566+
case MSR_CORE_PERF_GLOBAL_STATUS:
567+
case MSR_CORE_PERF_GLOBAL_CTRL:
568+
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
569+
return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
570+
default:
571+
break;
572+
}
565573
return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
566574
static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
567575
}
@@ -577,13 +585,70 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
577585

578586
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
579587
{
580-
return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
588+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
589+
u32 msr = msr_info->index;
590+
591+
switch (msr) {
592+
case MSR_CORE_PERF_GLOBAL_STATUS:
593+
msr_info->data = pmu->global_status;
594+
break;
595+
case MSR_CORE_PERF_GLOBAL_CTRL:
596+
msr_info->data = pmu->global_ctrl;
597+
break;
598+
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
599+
msr_info->data = 0;
600+
break;
601+
default:
602+
return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
603+
}
604+
605+
return 0;
581606
}
582607

583608
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
584609
{
585-
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
586-
return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
610+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
611+
u32 msr = msr_info->index;
612+
u64 data = msr_info->data;
613+
u64 diff;
614+
615+
switch (msr) {
616+
case MSR_CORE_PERF_GLOBAL_STATUS:
617+
if (!msr_info->host_initiated)
618+
return 1; /* RO MSR */
619+
620+
if (data & pmu->global_status_mask)
621+
return 1;
622+
623+
pmu->global_status = data;
624+
break;
625+
case MSR_CORE_PERF_GLOBAL_CTRL:
626+
if (!kvm_valid_perf_global_ctrl(pmu, data))
627+
return 1;
628+
629+
if (pmu->global_ctrl != data) {
630+
diff = pmu->global_ctrl ^ data;
631+
pmu->global_ctrl = data;
632+
reprogram_counters(pmu, diff);
633+
}
634+
break;
635+
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
636+
/*
637+
* GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
638+
* GLOBAL_STATUS, and so the set of reserved bits is the same.
639+
*/
640+
if (data & pmu->global_status_mask)
641+
return 1;
642+
643+
if (!msr_info->host_initiated)
644+
pmu->global_status &= ~data;
645+
break;
646+
default:
647+
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
648+
return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
649+
}
650+
651+
return 0;
587652
}
588653

589654
/* refresh PMU settings. This function generally is called when underlying

arch/x86/kvm/pmu.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,20 @@ struct kvm_pmu_ops {
4141

4242
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
4343

44+
static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
45+
{
46+
/*
47+
* Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
48+
* supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
49+
* greater than zero. However, KVM only exposes and emulates the MSR
50+
* to/for the guest if the guest PMU supports at least "Architectural
51+
* Performance Monitoring Version 2".
52+
*
53+
* AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
54+
*/
55+
return pmu->version > 1;
56+
}
57+
4458
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
4559
{
4660
struct kvm_pmu *pmu = pmc_to_pmu(pmc);

arch/x86/kvm/vmx/nested.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2649,7 +2649,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
26492649
}
26502650

26512651
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2652-
intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
2652+
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
26532653
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
26542654
vmcs12->guest_ia32_perf_global_ctrl))) {
26552655
*entry_failure_code = ENTRY_FAIL_DEFAULT;
@@ -4524,7 +4524,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
45244524
vcpu->arch.pat = vmcs12->host_ia32_pat;
45254525
}
45264526
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
4527-
intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
4527+
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
45284528
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
45294529
vmcs12->host_ia32_perf_global_ctrl));
45304530

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 2 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
100100
{
101101
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
102102

103-
if (!intel_pmu_has_perf_global_ctrl(pmu))
103+
if (!kvm_pmu_has_perf_global_ctrl(pmu))
104104
return true;
105105

106106
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
@@ -186,11 +186,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
186186

187187
switch (msr) {
188188
case MSR_CORE_PERF_FIXED_CTR_CTRL:
189-
case MSR_CORE_PERF_GLOBAL_STATUS:
190-
case MSR_CORE_PERF_GLOBAL_CTRL:
191-
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
192-
return intel_pmu_has_perf_global_ctrl(pmu);
193-
break;
189+
return kvm_pmu_has_perf_global_ctrl(pmu);
194190
case MSR_IA32_PEBS_ENABLE:
195191
ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
196192
break;
@@ -340,15 +336,6 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
340336
case MSR_CORE_PERF_FIXED_CTR_CTRL:
341337
msr_info->data = pmu->fixed_ctr_ctrl;
342338
break;
343-
case MSR_CORE_PERF_GLOBAL_STATUS:
344-
msr_info->data = pmu->global_status;
345-
break;
346-
case MSR_CORE_PERF_GLOBAL_CTRL:
347-
msr_info->data = pmu->global_ctrl;
348-
break;
349-
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
350-
msr_info->data = 0;
351-
break;
352339
case MSR_IA32_PEBS_ENABLE:
353340
msr_info->data = pmu->pebs_enable;
354341
break;
@@ -398,36 +385,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
398385
if (pmu->fixed_ctr_ctrl != data)
399386
reprogram_fixed_counters(pmu, data);
400387
break;
401-
case MSR_CORE_PERF_GLOBAL_STATUS:
402-
if (!msr_info->host_initiated)
403-
return 1; /* RO MSR */
404-
405-
if (data & pmu->global_status_mask)
406-
return 1;
407-
408-
pmu->global_status = data;
409-
break;
410-
case MSR_CORE_PERF_GLOBAL_CTRL:
411-
if (!kvm_valid_perf_global_ctrl(pmu, data))
412-
return 1;
413-
414-
if (pmu->global_ctrl != data) {
415-
diff = pmu->global_ctrl ^ data;
416-
pmu->global_ctrl = data;
417-
reprogram_counters(pmu, diff);
418-
}
419-
break;
420-
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
421-
/*
422-
* GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
423-
* GLOBAL_STATUS, and so the set of reserved bits is the same.
424-
*/
425-
if (data & pmu->global_status_mask)
426-
return 1;
427-
428-
if (!msr_info->host_initiated)
429-
pmu->global_status &= ~data;
430-
break;
431388
case MSR_IA32_PEBS_ENABLE:
432389
if (data & pmu->pebs_enable_mask)
433390
return 1;

arch/x86/kvm/vmx/vmx.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -93,18 +93,6 @@ union vmx_exit_reason {
9393
u32 full;
9494
};
9595

96-
static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
97-
{
98-
/*
99-
* Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
100-
* supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
101-
* greater than zero. However, KVM only exposes and emulates the MSR
102-
* to/for the guest if the guest PMU supports at least "Architectural
103-
* Performance Monitoring Version 2".
104-
*/
105-
return pmu->version > 1;
106-
}
107-
10896
struct lbr_desc {
10997
/* Basic info about guest LBR records. */
11098
struct x86_pmu_lbr records;

0 commit comments

Comments
 (0)