21
21
22
22
static uint8_t kvm_pmu_version ;
23
23
static bool kvm_has_perf_caps ;
24
+ static bool is_forced_emulation_enabled ;
24
25
25
26
static struct kvm_vm * pmu_vm_create_with_one_vcpu (struct kvm_vcpu * * vcpu ,
26
27
void * guest_code ,
@@ -34,6 +35,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
34
35
vcpu_init_descriptor_tables (* vcpu );
35
36
36
37
sync_global_to_guest (vm , kvm_pmu_version );
38
+ sync_global_to_guest (vm , is_forced_emulation_enabled );
37
39
38
40
/*
39
41
* Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
@@ -138,37 +140,50 @@ static void guest_assert_event_count(uint8_t idx,
138
140
* If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
139
141
* start of the loop to force LLC references and misses, i.e. to allow testing
140
142
* that those events actually count.
143
+ *
144
+ * If forced emulation is enabled (and specified), force emulation on a subset
145
+ * of the measured code to verify that KVM correctly emulates instructions and
146
+ * branches retired events in conjunction with hardware also counting said
147
+ * events.
141
148
*/
142
- #define GUEST_MEASURE_EVENT (_msr , _value , clflush ) \
149
+ #define GUEST_MEASURE_EVENT (_msr , _value , clflush , FEP ) \
143
150
do { \
144
151
__asm__ __volatile__("wrmsr\n\t" \
145
152
clflush "\n\t" \
146
153
"mfence\n\t" \
147
154
"1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \
148
- "loop .\n\t" \
149
- "mov %%edi, %%ecx\n\t" \
150
- "xor %%eax, %%eax\n\t" \
151
- "xor %%edx, %%edx\n\t" \
155
+ FEP "loop .\n\t" \
156
+ FEP "mov %%edi, %%ecx\n\t" \
157
+ FEP "xor %%eax, %%eax\n\t" \
158
+ FEP "xor %%edx, %%edx\n\t" \
152
159
"wrmsr\n\t" \
153
160
:: "a"((uint32_t)_value), "d"(_value >> 32), \
154
161
"c"(_msr), "D"(_msr) \
155
162
); \
156
163
} while (0)
157
164
165
+ #define GUEST_TEST_EVENT (_idx , _event , _pmc , _pmc_msr , _ctrl_msr , _value , FEP ) \
166
+ do { \
167
+ wrmsr(pmc_msr, 0); \
168
+ \
169
+ if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
170
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP); \
171
+ else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
172
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP); \
173
+ else \
174
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
175
+ \
176
+ guest_assert_event_count(_idx, _event, _pmc, _pmc_msr); \
177
+ } while (0)
178
+
158
179
static void __guest_test_arch_event (uint8_t idx , struct kvm_x86_pmu_feature event ,
159
180
uint32_t pmc , uint32_t pmc_msr ,
160
181
uint32_t ctrl_msr , uint64_t ctrl_msr_value )
161
182
{
162
- wrmsr (pmc_msr , 0 );
163
-
164
- if (this_cpu_has (X86_FEATURE_CLFLUSHOPT ))
165
- GUEST_MEASURE_EVENT (ctrl_msr , ctrl_msr_value , "clflushopt 1f" );
166
- else if (this_cpu_has (X86_FEATURE_CLFLUSH ))
167
- GUEST_MEASURE_EVENT (ctrl_msr , ctrl_msr_value , "clflush 1f" );
168
- else
169
- GUEST_MEASURE_EVENT (ctrl_msr , ctrl_msr_value , "nop" );
183
+ GUEST_TEST_EVENT (idx , event , pmc , pmc_msr , ctrl_msr , ctrl_msr_value , "" );
170
184
171
- guest_assert_event_count (idx , event , pmc , pmc_msr );
185
+ if (is_forced_emulation_enabled )
186
+ GUEST_TEST_EVENT (idx , event , pmc , pmc_msr , ctrl_msr , ctrl_msr_value , KVM_FEP );
172
187
}
173
188
174
189
#define X86_PMU_FEATURE_NULL \
@@ -553,6 +568,7 @@ int main(int argc, char *argv[])
553
568
554
569
kvm_pmu_version = kvm_cpu_property (X86_PROPERTY_PMU_VERSION );
555
570
kvm_has_perf_caps = kvm_cpu_has (X86_FEATURE_PDCM );
571
+ is_forced_emulation_enabled = kvm_is_forced_emulation_enabled ();
556
572
557
573
test_intel_counters ();
558
574
0 commit comments