@@ -71,6 +71,13 @@ static const uint64_t event_list[] = {
71
71
AMD_ZEN_BR_RETIRED ,
72
72
};
73
73
74
+ struct {
75
+ uint64_t loads ;
76
+ uint64_t stores ;
77
+ uint64_t loads_stores ;
78
+ uint64_t branches_retired ;
79
+ } pmc_results ;
80
+
74
81
/*
75
82
* If we encounter a #GP during the guest PMU sanity check, then the guest
76
83
* PMU is not functional. Inform the hypervisor via GUEST_SYNC(0).
@@ -100,13 +107,13 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
100
107
GUEST_SYNC (- EIO );
101
108
}
102
109
103
- static uint64_t run_and_measure_loop (uint32_t msr_base )
110
+ static void run_and_measure_loop (uint32_t msr_base )
104
111
{
105
- uint64_t branches_retired = rdmsr (msr_base + 0 );
112
+ const uint64_t branches_retired = rdmsr (msr_base + 0 );
106
113
107
114
__asm__ __volatile__("loop ." : "+c" ((int ){NUM_BRANCHES }));
108
115
109
- return rdmsr (msr_base + 0 ) - branches_retired ;
116
+ pmc_results . branches_retired = rdmsr (msr_base + 0 ) - branches_retired ;
110
117
}
111
118
112
119
static void intel_guest_code (void )
@@ -117,15 +124,13 @@ static void intel_guest_code(void)
117
124
GUEST_SYNC (0 );
118
125
119
126
for (;;) {
120
- uint64_t count ;
121
-
122
127
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
123
128
wrmsr (MSR_P6_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
124
129
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED );
125
130
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0x1 );
126
131
127
- count = run_and_measure_loop (MSR_IA32_PMC0 );
128
- GUEST_SYNC (count );
132
+ run_and_measure_loop (MSR_IA32_PMC0 );
133
+ GUEST_SYNC (0 );
129
134
}
130
135
}
131
136
@@ -141,14 +146,12 @@ static void amd_guest_code(void)
141
146
GUEST_SYNC (0 );
142
147
143
148
for (;;) {
144
- uint64_t count ;
145
-
146
149
wrmsr (MSR_K7_EVNTSEL0 , 0 );
147
150
wrmsr (MSR_K7_EVNTSEL0 , ARCH_PERFMON_EVENTSEL_ENABLE |
148
151
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED );
149
152
150
- count = run_and_measure_loop (MSR_K7_PERFCTR0 );
151
- GUEST_SYNC (count );
153
+ run_and_measure_loop (MSR_K7_PERFCTR0 );
154
+ GUEST_SYNC (0 );
152
155
}
153
156
}
154
157
@@ -168,6 +171,19 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
168
171
return uc .args [1 ];
169
172
}
170
173
174
+ static void run_vcpu_and_sync_pmc_results (struct kvm_vcpu * vcpu )
175
+ {
176
+ uint64_t r ;
177
+
178
+ memset (& pmc_results , 0 , sizeof (pmc_results ));
179
+ sync_global_to_guest (vcpu -> vm , pmc_results );
180
+
181
+ r = run_vcpu_to_sync (vcpu );
182
+ TEST_ASSERT (!r , "Unexpected sync value: 0x%lx" , r );
183
+
184
+ sync_global_from_guest (vcpu -> vm , pmc_results );
185
+ }
186
+
171
187
/*
172
188
* In a nested environment or if the vPMU is disabled, the guest PMU
173
189
* might not work as architected (accessing the PMU MSRs may raise
@@ -244,92 +260,93 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
244
260
return f ;
245
261
}
246
262
247
- #define ASSERT_PMC_COUNTING_INSTRUCTIONS (count ) \
263
+ #define ASSERT_PMC_COUNTING_INSTRUCTIONS () \
248
264
do { \
249
- if (count && count != NUM_BRANCHES) \
265
+ uint64_t br = pmc_results.branches_retired; \
266
+ \
267
+ if (br && br != NUM_BRANCHES) \
250
268
pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
251
- __func__, count , NUM_BRANCHES); \
252
- TEST_ASSERT(count , "%s: Branch instructions retired = %lu (expected > 0)", \
253
- __func__, count ); \
269
+ __func__, br , NUM_BRANCHES); \
270
+ TEST_ASSERT(br , "%s: Branch instructions retired = %lu (expected > 0)", \
271
+ __func__, br ); \
254
272
} while (0)
255
273
256
- #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS (count ) \
274
+ #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS () \
257
275
do { \
258
- TEST_ASSERT(!count, "%s: Branch instructions retired = %lu (expected 0)", \
259
- __func__, count); \
276
+ uint64_t br = pmc_results.branches_retired; \
277
+ \
278
+ TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
279
+ __func__, br); \
260
280
} while (0)
261
281
262
282
static void test_without_filter (struct kvm_vcpu * vcpu )
263
283
{
264
- uint64_t count = run_vcpu_to_sync (vcpu );
284
+ run_vcpu_and_sync_pmc_results (vcpu );
265
285
266
- ASSERT_PMC_COUNTING_INSTRUCTIONS (count );
286
+ ASSERT_PMC_COUNTING_INSTRUCTIONS ();
267
287
}
268
288
269
- static uint64_t test_with_filter (struct kvm_vcpu * vcpu ,
270
- struct kvm_pmu_event_filter * f )
289
+ static void test_with_filter (struct kvm_vcpu * vcpu ,
290
+ struct kvm_pmu_event_filter * f )
271
291
{
272
292
vm_ioctl (vcpu -> vm , KVM_SET_PMU_EVENT_FILTER , f );
273
- return run_vcpu_to_sync (vcpu );
293
+ run_vcpu_and_sync_pmc_results (vcpu );
274
294
}
275
295
276
296
static void test_amd_deny_list (struct kvm_vcpu * vcpu )
277
297
{
278
298
uint64_t event = EVENT (0x1C2 , 0 );
279
299
struct kvm_pmu_event_filter * f ;
280
- uint64_t count ;
281
300
282
301
f = create_pmu_event_filter (& event , 1 , KVM_PMU_EVENT_DENY , 0 );
283
- count = test_with_filter (vcpu , f );
302
+ test_with_filter (vcpu , f );
284
303
free (f );
285
304
286
- ASSERT_PMC_COUNTING_INSTRUCTIONS (count );
305
+ ASSERT_PMC_COUNTING_INSTRUCTIONS ();
287
306
}
288
307
289
308
static void test_member_deny_list (struct kvm_vcpu * vcpu )
290
309
{
291
310
struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_DENY );
292
- uint64_t count = test_with_filter (vcpu , f );
293
311
312
+ test_with_filter (vcpu , f );
294
313
free (f );
295
314
296
- ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS (count );
315
+ ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS ();
297
316
}
298
317
299
318
static void test_member_allow_list (struct kvm_vcpu * vcpu )
300
319
{
301
320
struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_ALLOW );
302
- uint64_t count = test_with_filter (vcpu , f );
303
321
322
+ test_with_filter (vcpu , f );
304
323
free (f );
305
324
306
- ASSERT_PMC_COUNTING_INSTRUCTIONS (count );
325
+ ASSERT_PMC_COUNTING_INSTRUCTIONS ();
307
326
}
308
327
309
328
static void test_not_member_deny_list (struct kvm_vcpu * vcpu )
310
329
{
311
330
struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_DENY );
312
- uint64_t count ;
313
331
314
332
remove_event (f , INTEL_BR_RETIRED );
315
333
remove_event (f , AMD_ZEN_BR_RETIRED );
316
- count = test_with_filter (vcpu , f );
334
+ test_with_filter (vcpu , f );
317
335
free (f );
318
336
319
- ASSERT_PMC_COUNTING_INSTRUCTIONS (count );
337
+ ASSERT_PMC_COUNTING_INSTRUCTIONS ();
320
338
}
321
339
322
340
static void test_not_member_allow_list (struct kvm_vcpu * vcpu )
323
341
{
324
342
struct kvm_pmu_event_filter * f = event_filter (KVM_PMU_EVENT_ALLOW );
325
- uint64_t count ;
326
343
327
344
remove_event (f , INTEL_BR_RETIRED );
328
345
remove_event (f , AMD_ZEN_BR_RETIRED );
329
- count = test_with_filter (vcpu , f );
346
+ test_with_filter (vcpu , f );
330
347
free (f );
331
348
332
- ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS (count );
349
+ ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS ();
333
350
}
334
351
335
352
/*
@@ -458,51 +475,30 @@ static bool supports_event_mem_inst_retired(void)
458
475
#define EXCLUDE_MASKED_ENTRY (event_select , mask , match ) \
459
476
KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
460
477
461
- struct perf_counter {
462
- union {
463
- uint64_t raw ;
464
- struct {
465
- uint64_t loads :22 ;
466
- uint64_t stores :22 ;
467
- uint64_t loads_stores :20 ;
468
- };
469
- };
470
- };
471
-
472
- static uint64_t masked_events_guest_test (uint32_t msr_base )
478
+ static void masked_events_guest_test (uint32_t msr_base )
473
479
{
474
- uint64_t ld0 , ld1 , st0 , st1 , ls0 , ls1 ;
475
- struct perf_counter c ;
476
- int val ;
477
-
478
480
/*
479
- * The acutal value of the counters don't determine the outcome of
481
+ * The actual value of the counters don't determine the outcome of
480
482
* the test. Only that they are zero or non-zero.
481
483
*/
482
- ld0 = rdmsr (msr_base + 0 );
483
- st0 = rdmsr (msr_base + 1 );
484
- ls0 = rdmsr (msr_base + 2 );
484
+ const uint64_t loads = rdmsr (msr_base + 0 );
485
+ const uint64_t stores = rdmsr (msr_base + 1 );
486
+ const uint64_t loads_stores = rdmsr (msr_base + 2 );
487
+ int val ;
488
+
485
489
486
490
__asm__ __volatile__("movl $0, %[v];"
487
491
"movl %[v], %%eax;"
488
492
"incl %[v];"
489
493
: [v ]"+m" (val ) :: "eax" );
490
494
491
- ld1 = rdmsr (msr_base + 0 );
492
- st1 = rdmsr (msr_base + 1 );
493
- ls1 = rdmsr (msr_base + 2 );
494
-
495
- c .loads = ld1 - ld0 ;
496
- c .stores = st1 - st0 ;
497
- c .loads_stores = ls1 - ls0 ;
498
-
499
- return c .raw ;
495
+ pmc_results .loads = rdmsr (msr_base + 0 ) - loads ;
496
+ pmc_results .stores = rdmsr (msr_base + 1 ) - stores ;
497
+ pmc_results .loads_stores = rdmsr (msr_base + 2 ) - loads_stores ;
500
498
}
501
499
502
500
static void intel_masked_events_guest_code (void )
503
501
{
504
- uint64_t r ;
505
-
506
502
for (;;) {
507
503
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0 );
508
504
@@ -515,16 +511,13 @@ static void intel_masked_events_guest_code(void)
515
511
516
512
wrmsr (MSR_CORE_PERF_GLOBAL_CTRL , 0x7 );
517
513
518
- r = masked_events_guest_test (MSR_IA32_PMC0 );
519
-
520
- GUEST_SYNC (r );
514
+ masked_events_guest_test (MSR_IA32_PMC0 );
515
+ GUEST_SYNC (0 );
521
516
}
522
517
}
523
518
524
519
static void amd_masked_events_guest_code (void )
525
520
{
526
- uint64_t r ;
527
-
528
521
for (;;) {
529
522
wrmsr (MSR_K7_EVNTSEL0 , 0 );
530
523
wrmsr (MSR_K7_EVNTSEL1 , 0 );
@@ -537,26 +530,22 @@ static void amd_masked_events_guest_code(void)
537
530
wrmsr (MSR_K7_EVNTSEL2 , ARCH_PERFMON_EVENTSEL_ENABLE |
538
531
ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE );
539
532
540
- r = masked_events_guest_test (MSR_K7_PERFCTR0 );
541
-
542
- GUEST_SYNC (r );
533
+ masked_events_guest_test (MSR_K7_PERFCTR0 );
534
+ GUEST_SYNC (0 );
543
535
}
544
536
}
545
537
546
- static struct perf_counter run_masked_events_test (struct kvm_vcpu * vcpu ,
547
- const uint64_t masked_events [],
548
- const int nmasked_events )
538
+ static void run_masked_events_test (struct kvm_vcpu * vcpu ,
539
+ const uint64_t masked_events [],
540
+ const int nmasked_events )
549
541
{
550
542
struct kvm_pmu_event_filter * f ;
551
- struct perf_counter r ;
552
543
553
544
f = create_pmu_event_filter (masked_events , nmasked_events ,
554
545
KVM_PMU_EVENT_ALLOW ,
555
546
KVM_PMU_EVENT_FLAG_MASKED_EVENTS );
556
- r . raw = test_with_filter (vcpu , f );
547
+ test_with_filter (vcpu , f );
557
548
free (f );
558
-
559
- return r ;
560
549
}
561
550
562
551
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
@@ -681,7 +670,6 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
681
670
int nevents )
682
671
{
683
672
int ntests = ARRAY_SIZE (test_cases );
684
- struct perf_counter c ;
685
673
int i , n ;
686
674
687
675
for (i = 0 ; i < ntests ; i ++ ) {
@@ -693,13 +681,15 @@ static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
693
681
694
682
n = append_test_events (test , events , nevents );
695
683
696
- c = run_masked_events_test (vcpu , events , n );
697
- TEST_ASSERT (bool_eq (c .loads , test -> flags & ALLOW_LOADS ) &&
698
- bool_eq (c .stores , test -> flags & ALLOW_STORES ) &&
699
- bool_eq (c .loads_stores ,
684
+ run_masked_events_test (vcpu , events , n );
685
+
686
+ TEST_ASSERT (bool_eq (pmc_results .loads , test -> flags & ALLOW_LOADS ) &&
687
+ bool_eq (pmc_results .stores , test -> flags & ALLOW_STORES ) &&
688
+ bool_eq (pmc_results .loads_stores ,
700
689
test -> flags & ALLOW_LOADS_STORES ),
701
- "%s loads: %u, stores: %u, loads + stores: %u" ,
702
- test -> msg , c .loads , c .stores , c .loads_stores );
690
+ "%s loads: %lu, stores: %lu, loads + stores: %lu" ,
691
+ test -> msg , pmc_results .loads , pmc_results .stores ,
692
+ pmc_results .loads_stores );
703
693
}
704
694
}
705
695
0 commit comments