Skip to content

Commit a3e80bf

Browse files
committed
KVM: x86/pmu: Move kvm_init_pmu_capability() to pmu.c
Move kvm_init_pmu_capability() to pmu.c so that future changes can access variables that have no business being visible outside of pmu.c. kvm_init_pmu_capability() is called once per module load, there's is zero reason it needs to be inlined. No functional change intended. Cc: Dapeng Mi <dapeng1.mi@linux.intel.com> Cc: Sandipan Das <sandipan.das@amd.com> Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20250805190526.1453366-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent d618fb4 commit a3e80bf

File tree

2 files changed

+48
-46
lines changed

2 files changed

+48
-46
lines changed

arch/x86/kvm/pmu.c

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,53 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
9696
#undef __KVM_X86_PMU_OP
9797
}
9898

99+
void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
100+
{
101+
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
102+
int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
103+
104+
/*
105+
* Hybrid PMUs don't play nice with virtualization without careful
106+
* configuration by userspace, and KVM's APIs for reporting supported
107+
* vPMU features do not account for hybrid PMUs. Disable vPMU support
108+
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
109+
*/
110+
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
111+
enable_pmu = false;
112+
113+
if (enable_pmu) {
114+
perf_get_x86_pmu_capability(&kvm_pmu_cap);
115+
116+
/*
117+
* WARN if perf did NOT disable hardware PMU if the number of
118+
* architecturally required GP counters aren't present, i.e. if
119+
* there are a non-zero number of counters, but fewer than what
120+
* is architecturally required.
121+
*/
122+
if (!kvm_pmu_cap.num_counters_gp ||
123+
WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
124+
enable_pmu = false;
125+
else if (is_intel && !kvm_pmu_cap.version)
126+
enable_pmu = false;
127+
}
128+
129+
if (!enable_pmu) {
130+
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
131+
return;
132+
}
133+
134+
kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
135+
kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
136+
pmu_ops->MAX_NR_GP_COUNTERS);
137+
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
138+
KVM_MAX_NR_FIXED_COUNTERS);
139+
140+
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
141+
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
142+
kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
143+
perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
144+
}
145+
99146
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
100147
{
101148
struct kvm_pmu *pmu = pmc_to_pmu(pmc);

arch/x86/kvm/pmu.h

Lines changed: 1 addition & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -180,52 +180,7 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
180180
extern struct x86_pmu_capability kvm_pmu_cap;
181181
extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
182182

183-
static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
184-
{
185-
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
186-
int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
187-
188-
/*
189-
* Hybrid PMUs don't play nice with virtualization without careful
190-
* configuration by userspace, and KVM's APIs for reporting supported
191-
* vPMU features do not account for hybrid PMUs. Disable vPMU support
192-
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
193-
*/
194-
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
195-
enable_pmu = false;
196-
197-
if (enable_pmu) {
198-
perf_get_x86_pmu_capability(&kvm_pmu_cap);
199-
200-
/*
201-
* WARN if perf did NOT disable hardware PMU if the number of
202-
* architecturally required GP counters aren't present, i.e. if
203-
* there are a non-zero number of counters, but fewer than what
204-
* is architecturally required.
205-
*/
206-
if (!kvm_pmu_cap.num_counters_gp ||
207-
WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
208-
enable_pmu = false;
209-
else if (is_intel && !kvm_pmu_cap.version)
210-
enable_pmu = false;
211-
}
212-
213-
if (!enable_pmu) {
214-
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
215-
return;
216-
}
217-
218-
kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
219-
kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
220-
pmu_ops->MAX_NR_GP_COUNTERS);
221-
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
222-
KVM_MAX_NR_FIXED_COUNTERS);
223-
224-
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
225-
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
226-
kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
227-
perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
228-
}
183+
void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
229184

230185
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
231186
{

0 commit comments

Comments
 (0)