Skip to content

Commit e9f322b

Browse files
committed
KVM: selftests: Copy full counter values from guest in PMU event filter test
Use a single struct to track all PMC event counts in the PMU filter test, and copy the full struct to/from the guest when running and measuring each guest workload. Using a common struct avoids naming conflicts, e.g. the loads/stores testcase has claimed "perf_counter", and eliminates the unnecessary truncation of the counter values when they are propagated from the guest MSRs to the host structs. Zero the struct before running the guest workload to ensure that the test doesn't get a false pass due to consuming data from a previous run. Link: https://lore.kernel.org/r/20230407233254.957013-6-seanjc@google.com Reviewed by: Aaron Lewis <aaronlewis@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent c02c744 commit e9f322b

File tree

1 file changed

+80
-90
lines changed

1 file changed

+80
-90
lines changed

tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c

Lines changed: 80 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,13 @@ static const uint64_t event_list[] = {
7171
AMD_ZEN_BR_RETIRED,
7272
};
7373

74+
struct {
75+
uint64_t loads;
76+
uint64_t stores;
77+
uint64_t loads_stores;
78+
uint64_t branches_retired;
79+
} pmc_results;
80+
7481
/*
7582
* If we encounter a #GP during the guest PMU sanity check, then the guest
7683
* PMU is not functional. Inform the hypervisor via GUEST_SYNC(0).
@@ -100,13 +107,13 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
100107
GUEST_SYNC(-EIO);
101108
}
102109

103-
static uint64_t run_and_measure_loop(uint32_t msr_base)
110+
static void run_and_measure_loop(uint32_t msr_base)
104111
{
105-
uint64_t branches_retired = rdmsr(msr_base + 0);
112+
const uint64_t branches_retired = rdmsr(msr_base + 0);
106113

107114
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
108115

109-
return rdmsr(msr_base + 0) - branches_retired;
116+
pmc_results.branches_retired = rdmsr(msr_base + 0) - branches_retired;
110117
}
111118

112119
static void intel_guest_code(void)
@@ -117,15 +124,13 @@ static void intel_guest_code(void)
117124
GUEST_SYNC(0);
118125

119126
for (;;) {
120-
uint64_t count;
121-
122127
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
123128
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
124129
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
125130
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
126131

127-
count = run_and_measure_loop(MSR_IA32_PMC0);
128-
GUEST_SYNC(count);
132+
run_and_measure_loop(MSR_IA32_PMC0);
133+
GUEST_SYNC(0);
129134
}
130135
}
131136

@@ -141,14 +146,12 @@ static void amd_guest_code(void)
141146
GUEST_SYNC(0);
142147

143148
for (;;) {
144-
uint64_t count;
145-
146149
wrmsr(MSR_K7_EVNTSEL0, 0);
147150
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
148151
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
149152

150-
count = run_and_measure_loop(MSR_K7_PERFCTR0);
151-
GUEST_SYNC(count);
153+
run_and_measure_loop(MSR_K7_PERFCTR0);
154+
GUEST_SYNC(0);
152155
}
153156
}
154157

@@ -168,6 +171,19 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
168171
return uc.args[1];
169172
}
170173

174+
static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu)
175+
{
176+
uint64_t r;
177+
178+
memset(&pmc_results, 0, sizeof(pmc_results));
179+
sync_global_to_guest(vcpu->vm, pmc_results);
180+
181+
r = run_vcpu_to_sync(vcpu);
182+
TEST_ASSERT(!r, "Unexpected sync value: 0x%lx", r);
183+
184+
sync_global_from_guest(vcpu->vm, pmc_results);
185+
}
186+
171187
/*
172188
* In a nested environment or if the vPMU is disabled, the guest PMU
173189
* might not work as architected (accessing the PMU MSRs may raise
@@ -244,92 +260,93 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
244260
return f;
245261
}
246262

247-
#define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \
263+
#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
248264
do { \
249-
if (count && count != NUM_BRANCHES) \
265+
uint64_t br = pmc_results.branches_retired; \
266+
\
267+
if (br && br != NUM_BRANCHES) \
250268
pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
251-
__func__, count, NUM_BRANCHES); \
252-
TEST_ASSERT(count, "%s: Branch instructions retired = %lu (expected > 0)", \
253-
__func__, count); \
269+
__func__, br, NUM_BRANCHES); \
270+
TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
271+
__func__, br); \
254272
} while (0)
255273

256-
#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \
274+
#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \
257275
do { \
258-
TEST_ASSERT(!count, "%s: Branch instructions retired = %lu (expected 0)", \
259-
__func__, count); \
276+
uint64_t br = pmc_results.branches_retired; \
277+
\
278+
TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
279+
__func__, br); \
260280
} while (0)
261281

262282
static void test_without_filter(struct kvm_vcpu *vcpu)
263283
{
264-
uint64_t count = run_vcpu_to_sync(vcpu);
284+
run_vcpu_and_sync_pmc_results(vcpu);
265285

266-
ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
286+
ASSERT_PMC_COUNTING_INSTRUCTIONS();
267287
}
268288

269-
static uint64_t test_with_filter(struct kvm_vcpu *vcpu,
270-
struct kvm_pmu_event_filter *f)
289+
static void test_with_filter(struct kvm_vcpu *vcpu,
290+
struct kvm_pmu_event_filter *f)
271291
{
272292
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
273-
return run_vcpu_to_sync(vcpu);
293+
run_vcpu_and_sync_pmc_results(vcpu);
274294
}
275295

276296
static void test_amd_deny_list(struct kvm_vcpu *vcpu)
277297
{
278298
uint64_t event = EVENT(0x1C2, 0);
279299
struct kvm_pmu_event_filter *f;
280-
uint64_t count;
281300

282301
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
283-
count = test_with_filter(vcpu, f);
302+
test_with_filter(vcpu, f);
284303
free(f);
285304

286-
ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
305+
ASSERT_PMC_COUNTING_INSTRUCTIONS();
287306
}
288307

289308
static void test_member_deny_list(struct kvm_vcpu *vcpu)
290309
{
291310
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
292-
uint64_t count = test_with_filter(vcpu, f);
293311

312+
test_with_filter(vcpu, f);
294313
free(f);
295314

296-
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count);
315+
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
297316
}
298317

299318
static void test_member_allow_list(struct kvm_vcpu *vcpu)
300319
{
301320
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
302-
uint64_t count = test_with_filter(vcpu, f);
303321

322+
test_with_filter(vcpu, f);
304323
free(f);
305324

306-
ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
325+
ASSERT_PMC_COUNTING_INSTRUCTIONS();
307326
}
308327

309328
static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
310329
{
311330
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
312-
uint64_t count;
313331

314332
remove_event(f, INTEL_BR_RETIRED);
315333
remove_event(f, AMD_ZEN_BR_RETIRED);
316-
count = test_with_filter(vcpu, f);
334+
test_with_filter(vcpu, f);
317335
free(f);
318336

319-
ASSERT_PMC_COUNTING_INSTRUCTIONS(count);
337+
ASSERT_PMC_COUNTING_INSTRUCTIONS();
320338
}
321339

322340
static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
323341
{
324342
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
325-
uint64_t count;
326343

327344
remove_event(f, INTEL_BR_RETIRED);
328345
remove_event(f, AMD_ZEN_BR_RETIRED);
329-
count = test_with_filter(vcpu, f);
346+
test_with_filter(vcpu, f);
330347
free(f);
331348

332-
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count);
349+
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
333350
}
334351

335352
/*
@@ -458,51 +475,30 @@ static bool supports_event_mem_inst_retired(void)
458475
#define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \
459476
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
460477

461-
struct perf_counter {
462-
union {
463-
uint64_t raw;
464-
struct {
465-
uint64_t loads:22;
466-
uint64_t stores:22;
467-
uint64_t loads_stores:20;
468-
};
469-
};
470-
};
471-
472-
static uint64_t masked_events_guest_test(uint32_t msr_base)
478+
static void masked_events_guest_test(uint32_t msr_base)
473479
{
474-
uint64_t ld0, ld1, st0, st1, ls0, ls1;
475-
struct perf_counter c;
476-
int val;
477-
478480
/*
479-
* The acutal value of the counters don't determine the outcome of
481+
* The actual value of the counters don't determine the outcome of
480482
* the test. Only that they are zero or non-zero.
481483
*/
482-
ld0 = rdmsr(msr_base + 0);
483-
st0 = rdmsr(msr_base + 1);
484-
ls0 = rdmsr(msr_base + 2);
484+
const uint64_t loads = rdmsr(msr_base + 0);
485+
const uint64_t stores = rdmsr(msr_base + 1);
486+
const uint64_t loads_stores = rdmsr(msr_base + 2);
487+
int val;
488+
485489

486490
__asm__ __volatile__("movl $0, %[v];"
487491
"movl %[v], %%eax;"
488492
"incl %[v];"
489493
: [v]"+m"(val) :: "eax");
490494

491-
ld1 = rdmsr(msr_base + 0);
492-
st1 = rdmsr(msr_base + 1);
493-
ls1 = rdmsr(msr_base + 2);
494-
495-
c.loads = ld1 - ld0;
496-
c.stores = st1 - st0;
497-
c.loads_stores = ls1 - ls0;
498-
499-
return c.raw;
495+
pmc_results.loads = rdmsr(msr_base + 0) - loads;
496+
pmc_results.stores = rdmsr(msr_base + 1) - stores;
497+
pmc_results.loads_stores = rdmsr(msr_base + 2) - loads_stores;
500498
}
501499

502500
static void intel_masked_events_guest_code(void)
503501
{
504-
uint64_t r;
505-
506502
for (;;) {
507503
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
508504

@@ -515,16 +511,13 @@ static void intel_masked_events_guest_code(void)
515511

516512
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7);
517513

518-
r = masked_events_guest_test(MSR_IA32_PMC0);
519-
520-
GUEST_SYNC(r);
514+
masked_events_guest_test(MSR_IA32_PMC0);
515+
GUEST_SYNC(0);
521516
}
522517
}
523518

524519
static void amd_masked_events_guest_code(void)
525520
{
526-
uint64_t r;
527-
528521
for (;;) {
529522
wrmsr(MSR_K7_EVNTSEL0, 0);
530523
wrmsr(MSR_K7_EVNTSEL1, 0);
@@ -537,26 +530,22 @@ static void amd_masked_events_guest_code(void)
537530
wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE |
538531
ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE);
539532

540-
r = masked_events_guest_test(MSR_K7_PERFCTR0);
541-
542-
GUEST_SYNC(r);
533+
masked_events_guest_test(MSR_K7_PERFCTR0);
534+
GUEST_SYNC(0);
543535
}
544536
}
545537

546-
static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu,
547-
const uint64_t masked_events[],
548-
const int nmasked_events)
538+
static void run_masked_events_test(struct kvm_vcpu *vcpu,
539+
const uint64_t masked_events[],
540+
const int nmasked_events)
549541
{
550542
struct kvm_pmu_event_filter *f;
551-
struct perf_counter r;
552543

553544
f = create_pmu_event_filter(masked_events, nmasked_events,
554545
KVM_PMU_EVENT_ALLOW,
555546
KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
556-
r.raw = test_with_filter(vcpu, f);
547+
test_with_filter(vcpu, f);
557548
free(f);
558-
559-
return r;
560549
}
561550

562551
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
@@ -681,7 +670,6 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
681670
int nevents)
682671
{
683672
int ntests = ARRAY_SIZE(test_cases);
684-
struct perf_counter c;
685673
int i, n;
686674

687675
for (i = 0; i < ntests; i++) {
@@ -693,13 +681,15 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
693681

694682
n = append_test_events(test, events, nevents);
695683

696-
c = run_masked_events_test(vcpu, events, n);
697-
TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) &&
698-
bool_eq(c.stores, test->flags & ALLOW_STORES) &&
699-
bool_eq(c.loads_stores,
684+
run_masked_events_test(vcpu, events, n);
685+
686+
TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) &&
687+
bool_eq(pmc_results.stores, test->flags & ALLOW_STORES) &&
688+
bool_eq(pmc_results.loads_stores,
700689
test->flags & ALLOW_LOADS_STORES),
701-
"%s loads: %u, stores: %u, loads + stores: %u",
702-
test->msg, c.loads, c.stores, c.loads_stores);
690+
"%s loads: %lu, stores: %lu, loads + stores: %lu",
691+
test->msg, pmc_results.loads, pmc_results.stores,
692+
pmc_results.loads_stores);
703693
}
704694
}
705695

0 commit comments

Comments
 (0)