Skip to content

Commit 8714f7b

Browse files
committed
xen/pv: add fault recovery control to pmu msr accesses
Today pmu_msr_read() and pmu_msr_write() fall back to the safe variants of read/write MSR in case the MSR access isn't emulated via Xen. Allow the caller to select that faults should not be recovered from by passing NULL for the error pointer. Restructure the code to make it more readable. Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Jan Beulich <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
1 parent 6136768 commit 8714f7b

File tree

1 file changed

+38
-28
lines changed

1 file changed

+38
-28
lines changed

arch/x86/xen/pmu.c

Lines changed: 38 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,9 @@ static inline uint32_t get_fam15h_addr(u32 addr)
131131

132132
static inline bool is_amd_pmu_msr(unsigned int msr)
133133
{
134+
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
135+
return false;
136+
134137
if ((msr >= MSR_F15H_PERF_CTL &&
135138
msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
136139
(msr >= MSR_K7_EVNTSEL0 &&
@@ -144,6 +147,9 @@ static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
144147
{
145148
u32 msr_index_pmc;
146149

150+
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
151+
return false;
152+
147153
switch (msr_index) {
148154
case MSR_CORE_PERF_FIXED_CTR_CTRL:
149155
case MSR_IA32_DS_AREA:
@@ -290,48 +296,52 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
290296
return false;
291297
}
292298

299+
static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read,
300+
bool *emul)
301+
{
302+
int type, index;
303+
304+
if (is_amd_pmu_msr(msr))
305+
*emul = xen_amd_pmu_emulate(msr, val, is_read);
306+
else if (is_intel_pmu_msr(msr, &type, &index))
307+
*emul = xen_intel_pmu_emulate(msr, val, type, index, is_read);
308+
else
309+
return false;
310+
311+
return true;
312+
}
313+
293314
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
294315
{
295-
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
296-
if (is_amd_pmu_msr(msr)) {
297-
if (!xen_amd_pmu_emulate(msr, val, 1))
298-
*val = native_read_msr_safe(msr, err);
299-
return true;
300-
}
301-
} else {
302-
int type, index;
316+
bool emulated;
303317

304-
if (is_intel_pmu_msr(msr, &type, &index)) {
305-
if (!xen_intel_pmu_emulate(msr, val, type, index, 1))
306-
*val = native_read_msr_safe(msr, err);
307-
return true;
308-
}
318+
if (!pmu_msr_chk_emulated(msr, val, true, &emulated))
319+
return false;
320+
321+
if (!emulated) {
322+
*val = err ? native_read_msr_safe(msr, err)
323+
: native_read_msr(msr);
309324
}
310325

311-
return false;
326+
return true;
312327
}
313328

314329
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
315330
{
316331
uint64_t val = ((uint64_t)high << 32) | low;
332+
bool emulated;
317333

318-
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
319-
if (is_amd_pmu_msr(msr)) {
320-
if (!xen_amd_pmu_emulate(msr, &val, 0))
321-
*err = native_write_msr_safe(msr, low, high);
322-
return true;
323-
}
324-
} else {
325-
int type, index;
334+
if (!pmu_msr_chk_emulated(msr, &val, false, &emulated))
335+
return false;
326336

327-
if (is_intel_pmu_msr(msr, &type, &index)) {
328-
if (!xen_intel_pmu_emulate(msr, &val, type, index, 0))
329-
*err = native_write_msr_safe(msr, low, high);
330-
return true;
331-
}
337+
if (!emulated) {
338+
if (err)
339+
*err = native_write_msr_safe(msr, low, high);
340+
else
341+
native_write_msr(msr, low, high);
332342
}
333343

334-
return false;
344+
return true;
335345
}
336346

337347
static unsigned long long xen_amd_read_pmc(int counter)

0 commit comments

Comments
 (0)