Commit 5bb9af07 authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Rework macros in PMU counters test to prep for multi-insn loop

Tweak the macros in the PMU counters test to prepare for moving the
CLFLUSH+MFENCE instructions into the loop body, to fix an issue where
a single CLFUSH doesn't guarantee an LLC miss.

Link: https://lore.kernel.org/r/20240628005558.3835480-2-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent ea09ace3
...@@ -7,15 +7,25 @@ ...@@ -7,15 +7,25 @@
#include "pmu.h" #include "pmu.h"
#include "processor.h" #include "processor.h"
/* Number of LOOP instructions for the guest measurement payload. */ /* Number of iterations of the loop for the guest measurement payload. */
#define NUM_BRANCHES 10 #define NUM_LOOPS 10
/* Each iteration of the loop retires one branch instruction. */
#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
/* Number of instructions in each loop. */
#define NUM_INSNS_PER_LOOP 1
/* /*
* Number of "extra" instructions that will be counted, i.e. the number of * Number of "extra" instructions that will be counted, i.e. the number of
* instructions that are needed to set up the loop and then disabled the * instructions that are needed to set up the loop and then disable the
* counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR. * counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
*/ */
#define NUM_EXTRA_INSNS 7 #define NUM_EXTRA_INSNS 7
#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS)
/* Total number of instructions retired within the measured section. */
#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
static uint8_t kvm_pmu_version; static uint8_t kvm_pmu_version;
static bool kvm_has_perf_caps; static bool kvm_has_perf_caps;
...@@ -100,7 +110,7 @@ static void guest_assert_event_count(uint8_t idx, ...@@ -100,7 +110,7 @@ static void guest_assert_event_count(uint8_t idx,
GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
break; break;
case INTEL_ARCH_BRANCHES_RETIRED_INDEX: case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
GUEST_ASSERT_EQ(count, NUM_BRANCHES); GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
break; break;
case INTEL_ARCH_LLC_REFERENCES_INDEX: case INTEL_ARCH_LLC_REFERENCES_INDEX:
case INTEL_ARCH_LLC_MISSES_INDEX: case INTEL_ARCH_LLC_MISSES_INDEX:
...@@ -120,7 +130,7 @@ static void guest_assert_event_count(uint8_t idx, ...@@ -120,7 +130,7 @@ static void guest_assert_event_count(uint8_t idx,
} }
sanity_checks: sanity_checks:
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
GUEST_ASSERT_EQ(_rdpmc(pmc), count); GUEST_ASSERT_EQ(_rdpmc(pmc), count);
wrmsr(pmc_msr, 0xdead); wrmsr(pmc_msr, 0xdead);
...@@ -147,7 +157,7 @@ do { \ ...@@ -147,7 +157,7 @@ do { \
__asm__ __volatile__("wrmsr\n\t" \ __asm__ __volatile__("wrmsr\n\t" \
clflush "\n\t" \ clflush "\n\t" \
"mfence\n\t" \ "mfence\n\t" \
"1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \ "1: mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
FEP "loop .\n\t" \ FEP "loop .\n\t" \
FEP "mov %%edi, %%ecx\n\t" \ FEP "mov %%edi, %%ecx\n\t" \
FEP "xor %%eax, %%eax\n\t" \ FEP "xor %%eax, %%eax\n\t" \
...@@ -500,7 +510,7 @@ static void guest_test_fixed_counters(void) ...@@ -500,7 +510,7 @@ static void guest_test_fixed_counters(void)
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0); wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL)); wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i)); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i); val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment