Skip to content

Commit dbfd50c

Browse files
committed
Merge tag 'kvm-x86-selftests-6.11' of https://github.com/kvm-x86/linux into HEAD
KVM selftests for 6.11 - Remove dead code in the memslot modification stress test. - Treat "branch instructions retired" as supported on all AMD Family 17h+ CPUs. - Print the guest pseudo-RNG seed only when it changes, to avoid spamming the log for tests that create lots of VMs. - Make the PMU counters test less flaky when counting LLC cache misses by doing CLFLUSH{OPT} in every loop iteration.
2 parents cda231c + 4669de4 commit dbfd50c

File tree

4 files changed

+41
-53
lines changed

4 files changed

+41
-53
lines changed

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121

2222
uint32_t guest_random_seed;
2323
struct guest_random_state guest_rng;
24+
static uint32_t last_guest_seed;
2425

2526
static int vcpu_mmap_sz(void);
2627

@@ -434,7 +435,10 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
434435
slot0 = memslot2region(vm, 0);
435436
ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
436437

437-
pr_info("Random seed: 0x%x\n", guest_random_seed);
438+
if (guest_random_seed != last_guest_seed) {
439+
pr_info("Random seed: 0x%x\n", guest_random_seed);
440+
last_guest_seed = guest_random_seed;
441+
}
438442
guest_rng = new_guest_random_state(guest_random_seed);
439443
sync_global_to_guest(vm, guest_rng);
440444

@@ -2319,7 +2323,8 @@ void __attribute((constructor)) kvm_selftest_init(void)
23192323
/* Tell stdout not to buffer its content. */
23202324
setbuf(stdout, NULL);
23212325

2322-
guest_random_seed = random();
2326+
guest_random_seed = last_guest_seed = random();
2327+
pr_info("Random seed: 0x%x\n", guest_random_seed);
23232328

23242329
kvm_selftest_arch_init();
23252330
}

tools/testing/selftests/kvm/memslot_modification_stress_test.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,6 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
5353
}
5454
}
5555

56-
struct memslot_antagonist_args {
57-
struct kvm_vm *vm;
58-
useconds_t delay;
59-
uint64_t nr_modifications;
60-
};
61-
6256
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
6357
uint64_t nr_modifications)
6458
{

tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

Lines changed: 29 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,28 @@
77
#include "pmu.h"
88
#include "processor.h"
99

10-
/* Number of LOOP instructions for the guest measurement payload. */
11-
#define NUM_BRANCHES 10
10+
/* Number of iterations of the loop for the guest measurement payload. */
11+
#define NUM_LOOPS 10
12+
13+
/* Each iteration of the loop retires one branch instruction. */
14+
#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
15+
16+
/*
17+
* Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
18+
* 1 LOOP.
19+
*/
20+
#define NUM_INSNS_PER_LOOP 3
21+
1222
/*
1323
* Number of "extra" instructions that will be counted, i.e. the number of
14-
* instructions that are needed to set up the loop and then disabled the
15-
* counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
24+
* instructions that are needed to set up the loop and then disable the
25+
* counter. 2 MOV, 2 XOR, 1 WRMSR.
1626
*/
17-
#define NUM_EXTRA_INSNS 7
18-
#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS)
27+
#define NUM_EXTRA_INSNS 5
28+
29+
/* Total number of instructions retired within the measured section. */
30+
#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
31+
1932

2033
static uint8_t kvm_pmu_version;
2134
static bool kvm_has_perf_caps;
@@ -100,7 +113,7 @@ static void guest_assert_event_count(uint8_t idx,
100113
GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
101114
break;
102115
case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
103-
GUEST_ASSERT_EQ(count, NUM_BRANCHES);
116+
GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED);
104117
break;
105118
case INTEL_ARCH_LLC_REFERENCES_INDEX:
106119
case INTEL_ARCH_LLC_MISSES_INDEX:
@@ -120,7 +133,7 @@ static void guest_assert_event_count(uint8_t idx,
120133
}
121134

122135
sanity_checks:
123-
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
136+
__asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
124137
GUEST_ASSERT_EQ(_rdpmc(pmc), count);
125138

126139
wrmsr(pmc_msr, 0xdead);
@@ -134,8 +147,8 @@ static void guest_assert_event_count(uint8_t idx,
134147
* before the end of the sequence.
135148
*
136149
* If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
137-
* start of the loop to force LLC references and misses, i.e. to allow testing
138-
* that those events actually count.
150+
* CLFUSH{,OPT} instruction on each loop iteration to force LLC references and
151+
* misses, i.e. to allow testing that those events actually count.
139152
*
140153
* If forced emulation is enabled (and specified), force emulation on a subset
141154
* of the measured code to verify that KVM correctly emulates instructions and
@@ -145,10 +158,11 @@ static void guest_assert_event_count(uint8_t idx,
145158
#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \
146159
do { \
147160
__asm__ __volatile__("wrmsr\n\t" \
161+
" mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
162+
"1:\n\t" \
148163
clflush "\n\t" \
149164
"mfence\n\t" \
150-
"1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \
151-
FEP "loop .\n\t" \
165+
FEP "loop 1b\n\t" \
152166
FEP "mov %%edi, %%ecx\n\t" \
153167
FEP "xor %%eax, %%eax\n\t" \
154168
FEP "xor %%edx, %%edx\n\t" \
@@ -163,9 +177,9 @@ do { \
163177
wrmsr(pmc_msr, 0); \
164178
\
165179
if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
166-
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \
180+
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \
167181
else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
168-
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \
182+
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP); \
169183
else \
170184
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
171185
\
@@ -500,7 +514,7 @@ static void guest_test_fixed_counters(void)
500514
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
501515
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
502516
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
503-
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
517+
__asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS}));
504518
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
505519
val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
506520

tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c

Lines changed: 5 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ struct __kvm_pmu_event_filter {
3232

3333
/*
3434
* This event list comprises Intel's known architectural events, plus AMD's
35-
* "retired branch instructions" for Zen1-Zen3 (and* possibly other AMD CPUs).
36-
* Note, AMD and Intel use the same encoding for instructions retired.
35+
* Branch Instructions Retired for Zen CPUs. Note, AMD and Intel use the
36+
* same encoding for Instructions Retired.
3737
*/
3838
kvm_static_assert(INTEL_ARCH_INSTRUCTIONS_RETIRED == AMD_ZEN_INSTRUCTIONS_RETIRED);
3939

@@ -353,38 +353,13 @@ static bool use_intel_pmu(void)
353353
kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
354354
}
355355

356-
static bool is_zen1(uint32_t family, uint32_t model)
357-
{
358-
return family == 0x17 && model <= 0x0f;
359-
}
360-
361-
static bool is_zen2(uint32_t family, uint32_t model)
362-
{
363-
return family == 0x17 && model >= 0x30 && model <= 0x3f;
364-
}
365-
366-
static bool is_zen3(uint32_t family, uint32_t model)
367-
{
368-
return family == 0x19 && model <= 0x0f;
369-
}
370-
371356
/*
372-
* Determining AMD support for a PMU event requires consulting the AMD
373-
* PPR for the CPU or reference material derived therefrom. The AMD
374-
* test code herein has been verified to work on Zen1, Zen2, and Zen3.
375-
*
376-
* Feel free to add more AMD CPUs that are documented to support event
377-
* select 0xc2 umask 0 as "retired branch instructions."
357+
* On AMD, all Family 17h+ CPUs (Zen and its successors) use event encoding
358+
* 0xc2,0 for Branch Instructions Retired.
378359
*/
379360
static bool use_amd_pmu(void)
380361
{
381-
uint32_t family = kvm_cpu_family();
382-
uint32_t model = kvm_cpu_model();
383-
384-
return host_cpu_is_amd &&
385-
(is_zen1(family, model) ||
386-
is_zen2(family, model) ||
387-
is_zen3(family, model));
362+
return host_cpu_is_amd && kvm_cpu_family() >= 0x17;
388363
}
389364

390365
/*

0 commit comments

Comments
 (0)