Skip to content

Commit 89acf12

Browse files
committed
KVM: x86/pmu: Update sample period in pmc_write_counter()
Update a PMC's sample period in pmc_write_counter() to deduplicate code across all callers of pmc_write_counter(). Opportunistically move pmc_write_counter() into pmc.c now that it's doing more work. WRMSR isn't such a hot path that an extra CALL+RET pair will be problematic, and the order of function definitions needs to be changed anyways, i.e. now is a convenient time to eat the churn. Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20231103230541.352265-6-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent ec61b23 commit 89acf12

File tree

4 files changed

+28
-27
lines changed

4 files changed

+28
-27
lines changed

arch/x86/kvm/pmu.c

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,15 @@ static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
161161
return 1;
162162
}
163163

164+
static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
165+
{
166+
u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
167+
168+
if (!sample_period)
169+
sample_period = pmc_bitmask(pmc) + 1;
170+
return sample_period;
171+
}
172+
164173
static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
165174
bool exclude_user, bool exclude_kernel,
166175
bool intr)
@@ -268,6 +277,24 @@ static void pmc_stop_counter(struct kvm_pmc *pmc)
268277
}
269278
}
270279

280+
static void pmc_update_sample_period(struct kvm_pmc *pmc)
281+
{
282+
if (!pmc->perf_event || pmc->is_paused ||
283+
!is_sampling_event(pmc->perf_event))
284+
return;
285+
286+
perf_event_period(pmc->perf_event,
287+
get_sample_period(pmc, pmc->counter));
288+
}
289+
290+
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
291+
{
292+
pmc->counter += val - pmc_read_counter(pmc);
293+
pmc->counter &= pmc_bitmask(pmc);
294+
pmc_update_sample_period(pmc);
295+
}
296+
EXPORT_SYMBOL_GPL(pmc_write_counter);
297+
271298
static int filter_cmp(const void *pa, const void *pb, u64 mask)
272299
{
273300
u64 a = *(u64 *)pa & mask;

arch/x86/kvm/pmu.h

Lines changed: 1 addition & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -74,11 +74,7 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
7474
return counter & pmc_bitmask(pmc);
7575
}
7676

77-
static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
78-
{
79-
pmc->counter += val - pmc_read_counter(pmc);
80-
pmc->counter &= pmc_bitmask(pmc);
81-
}
77+
void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
8278

8379
static inline bool pmc_is_gp(struct kvm_pmc *pmc)
8480
{
@@ -128,25 +124,6 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
128124
return NULL;
129125
}
130126

131-
static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
132-
{
133-
u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
134-
135-
if (!sample_period)
136-
sample_period = pmc_bitmask(pmc) + 1;
137-
return sample_period;
138-
}
139-
140-
static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
141-
{
142-
if (!pmc->perf_event || pmc->is_paused ||
143-
!is_sampling_event(pmc->perf_event))
144-
return;
145-
146-
perf_event_period(pmc->perf_event,
147-
get_sample_period(pmc, pmc->counter));
148-
}
149-
150127
static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
151128
{
152129
struct kvm_pmu *pmu = pmc_to_pmu(pmc);

arch/x86/kvm/svm/pmu.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,6 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
161161
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
162162
if (pmc) {
163163
pmc_write_counter(pmc, data);
164-
pmc_update_sample_period(pmc);
165164
return 0;
166165
}
167166
/* MSR_EVNTSELn */

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -437,11 +437,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
437437
!(msr & MSR_PMC_FULL_WIDTH_BIT))
438438
data = (s64)(s32)data;
439439
pmc_write_counter(pmc, data);
440-
pmc_update_sample_period(pmc);
441440
break;
442441
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
443442
pmc_write_counter(pmc, data);
444-
pmc_update_sample_period(pmc);
445443
break;
446444
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
447445
reserved_bits = pmu->reserved_bits;

0 commit comments

Comments
 (0)