Skip to content

Commit 4669de4

Browse files
Maxim Levitskysean-jc
authored andcommitted
KVM: selftests: Increase robustness of LLC cache misses in PMU counters test
Currently the PMU counters test does a single CLFLUSH{,OPT} on the loop's code, but due to speculative execution this might not cause LLC misses within the measured section. Instead of doing a single flush before the loop, do a cache flush on each iteration of the loop to confuse the prediction and ensure that at least one cache miss occurs within the measured section. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> [sean: keep MFENCE, massage changelog] Link: https://lore.kernel.org/r/20240628005558.3835480-3-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 5bb9af0 commit 4669de4

File tree

1 file changed

+14
-10
lines changed

1 file changed

+14
-10
lines changed

tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,18 @@
1313
/* Each iteration of the loop retires one branch instruction. */
1414
#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
1515

16-
/* Number of instructions in each loop. */
17-
#define NUM_INSNS_PER_LOOP 1
16+
/*
17+
* Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
18+
* 1 LOOP.
19+
*/
20+
#define NUM_INSNS_PER_LOOP 3
1821

1922
/*
2023
* Number of "extra" instructions that will be counted, i.e. the number of
2124
* instructions that are needed to set up the loop and then disable the
22-
* counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
25+
* counter. 2 MOV, 2 XOR, 1 WRMSR.
2326
*/
24-
#define NUM_EXTRA_INSNS 7
27+
#define NUM_EXTRA_INSNS 5
2528

2629
/* Total number of instructions retired within the measured section. */
2730
#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
@@ -144,8 +147,8 @@ static void guest_assert_event_count(uint8_t idx,
144147
* before the end of the sequence.
145148
*
146149
* If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
147-
* start of the loop to force LLC references and misses, i.e. to allow testing
148-
* that those events actually count.
150+
* CLFUSH{,OPT} instruction on each loop iteration to force LLC references and
151+
* misses, i.e. to allow testing that those events actually count.
149152
*
150153
* If forced emulation is enabled (and specified), force emulation on a subset
151154
* of the measured code to verify that KVM correctly emulates instructions and
@@ -155,10 +158,11 @@ static void guest_assert_event_count(uint8_t idx,
155158
#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \
156159
do { \
157160
__asm__ __volatile__("wrmsr\n\t" \
161+
" mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
162+
"1:\n\t" \
158163
clflush "\n\t" \
159164
"mfence\n\t" \
160-
"1: mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
161-
FEP "loop .\n\t" \
165+
FEP "loop 1b\n\t" \
162166
FEP "mov %%edi, %%ecx\n\t" \
163167
FEP "xor %%eax, %%eax\n\t" \
164168
FEP "xor %%edx, %%edx\n\t" \
@@ -173,9 +177,9 @@ do { \
173177
wrmsr(pmc_msr, 0); \
174178
\
175179
if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
176-
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \
180+
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \
177181
else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
178-
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \
182+
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP); \
179183
else \
180184
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
181185
\

0 commit comments

Comments
 (0)