Commit 4669de42 authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Sean Christopherson

KVM: selftests: Increase robustness of LLC cache misses in PMU counters test

Currently the PMU counters test does a single CLFLUSH{,OPT} on the loop's
code, but due to speculative execution this might not cause LLC misses
within the measured section.

Instead of doing a single flush before the loop, do a cache flush on each
iteration of the loop to confuse the prediction and ensure that at least
one cache miss occurs within the measured section.
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
[sean: keep MFENCE, massage changelog]
Link: https://lore.kernel.org/r/20240628005558.3835480-3-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 5bb9af07
...@@ -13,15 +13,18 @@ ...@@ -13,15 +13,18 @@
/* Each iteration of the loop retires one branch instruction. */ /* Each iteration of the loop retires one branch instruction. */
#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS) #define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
/* Number of instructions in each loop. */ /*
#define NUM_INSNS_PER_LOOP 1 * Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
* 1 LOOP.
*/
#define NUM_INSNS_PER_LOOP 3
/* /*
* Number of "extra" instructions that will be counted, i.e. the number of * Number of "extra" instructions that will be counted, i.e. the number of
* instructions that are needed to set up the loop and then disable the * instructions that are needed to set up the loop and then disable the
* counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR. * counter. 2 MOV, 2 XOR, 1 WRMSR.
*/ */
#define NUM_EXTRA_INSNS 7 #define NUM_EXTRA_INSNS 5
/* Total number of instructions retired within the measured section. */ /* Total number of instructions retired within the measured section. */
#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
...@@ -144,8 +147,8 @@ static void guest_assert_event_count(uint8_t idx, ...@@ -144,8 +147,8 @@ static void guest_assert_event_count(uint8_t idx,
* before the end of the sequence. * before the end of the sequence.
* *
* If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
* start of the loop to force LLC references and misses, i.e. to allow testing * CLFUSH{,OPT} instruction on each loop iteration to force LLC references and
* that those events actually count. * misses, i.e. to allow testing that those events actually count.
* *
* If forced emulation is enabled (and specified), force emulation on a subset * If forced emulation is enabled (and specified), force emulation on a subset
* of the measured code to verify that KVM correctly emulates instructions and * of the measured code to verify that KVM correctly emulates instructions and
...@@ -155,10 +158,11 @@ static void guest_assert_event_count(uint8_t idx, ...@@ -155,10 +158,11 @@ static void guest_assert_event_count(uint8_t idx,
#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \ #define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \
do { \ do { \
__asm__ __volatile__("wrmsr\n\t" \ __asm__ __volatile__("wrmsr\n\t" \
" mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
"1:\n\t" \
clflush "\n\t" \ clflush "\n\t" \
"mfence\n\t" \ "mfence\n\t" \
"1: mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \ FEP "loop 1b\n\t" \
FEP "loop .\n\t" \
FEP "mov %%edi, %%ecx\n\t" \ FEP "mov %%edi, %%ecx\n\t" \
FEP "xor %%eax, %%eax\n\t" \ FEP "xor %%eax, %%eax\n\t" \
FEP "xor %%edx, %%edx\n\t" \ FEP "xor %%edx, %%edx\n\t" \
...@@ -173,9 +177,9 @@ do { \ ...@@ -173,9 +177,9 @@ do { \
wrmsr(pmc_msr, 0); \ wrmsr(pmc_msr, 0); \
\ \
if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \ if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \
else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \ else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP); \
else \ else \
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
\ \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment