Skip to content

Commit 3adef90

Browse files
committed
KVM: x86: Hoist x86.c's global msr_* variables up above kvm_do_msr_access()
Move the definitions of the various MSR arrays above kvm_do_msr_access() so that kvm_do_msr_access() can query the arrays when handling failures, e.g. to squash errors if userspace tries to read an MSR that isn't fully supported, but that KVM advertised as being an MSR-to-save. No functional change intended. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 1cec203 commit 3adef90

File tree

1 file changed

+184
-184
lines changed

1 file changed

+184
-184
lines changed

arch/x86/kvm/x86.c

Lines changed: 184 additions & 184 deletions
Original file line numberDiff line numberDiff line change
@@ -304,6 +304,190 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
304304

305305
static struct kmem_cache *x86_emulator_cache;
306306

307+
/*
308+
* The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
309+
* the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
310+
* KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
311+
* require host support, i.e. should be probed via RDMSR. emulated_msrs holds
312+
* MSRs that KVM emulates without strictly requiring host support.
313+
* msr_based_features holds MSRs that enumerate features, i.e. are effectively
314+
* CPUID leafs. Note, msr_based_features isn't mutually exclusive with
315+
* msrs_to_save and emulated_msrs.
316+
*/
317+
318+
static const u32 msrs_to_save_base[] = {
319+
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
320+
MSR_STAR,
321+
#ifdef CONFIG_X86_64
322+
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
323+
#endif
324+
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
325+
MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
326+
MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
327+
MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
328+
MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
329+
MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
330+
MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
331+
MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
332+
MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
333+
MSR_IA32_UMWAIT_CONTROL,
334+
335+
MSR_IA32_XFD, MSR_IA32_XFD_ERR,
336+
};
337+
338+
static const u32 msrs_to_save_pmu[] = {
339+
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
340+
MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
341+
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
342+
MSR_CORE_PERF_GLOBAL_CTRL,
343+
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
344+
345+
/* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
346+
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
347+
MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
348+
MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
349+
MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
350+
MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
351+
MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
352+
MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
353+
MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
354+
355+
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
356+
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
357+
358+
/* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
359+
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
360+
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
361+
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
362+
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
363+
364+
MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
365+
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
366+
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
367+
};
368+
369+
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
370+
ARRAY_SIZE(msrs_to_save_pmu)];
371+
static unsigned num_msrs_to_save;
372+
373+
static const u32 emulated_msrs_all[] = {
374+
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
375+
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
376+
377+
#ifdef CONFIG_KVM_HYPERV
378+
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
379+
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
380+
HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
381+
HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
382+
HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
383+
HV_X64_MSR_RESET,
384+
HV_X64_MSR_VP_INDEX,
385+
HV_X64_MSR_VP_RUNTIME,
386+
HV_X64_MSR_SCONTROL,
387+
HV_X64_MSR_STIMER0_CONFIG,
388+
HV_X64_MSR_VP_ASSIST_PAGE,
389+
HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
390+
HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
391+
HV_X64_MSR_SYNDBG_OPTIONS,
392+
HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
393+
HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
394+
HV_X64_MSR_SYNDBG_PENDING_BUFFER,
395+
#endif
396+
397+
MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
398+
MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
399+
400+
MSR_IA32_TSC_ADJUST,
401+
MSR_IA32_TSC_DEADLINE,
402+
MSR_IA32_ARCH_CAPABILITIES,
403+
MSR_IA32_PERF_CAPABILITIES,
404+
MSR_IA32_MISC_ENABLE,
405+
MSR_IA32_MCG_STATUS,
406+
MSR_IA32_MCG_CTL,
407+
MSR_IA32_MCG_EXT_CTL,
408+
MSR_IA32_SMBASE,
409+
MSR_SMI_COUNT,
410+
MSR_PLATFORM_INFO,
411+
MSR_MISC_FEATURES_ENABLES,
412+
MSR_AMD64_VIRT_SPEC_CTRL,
413+
MSR_AMD64_TSC_RATIO,
414+
MSR_IA32_POWER_CTL,
415+
MSR_IA32_UCODE_REV,
416+
417+
/*
418+
* KVM always supports the "true" VMX control MSRs, even if the host
419+
* does not. The VMX MSRs as a whole are considered "emulated" as KVM
420+
* doesn't strictly require them to exist in the host (ignoring that
421+
* KVM would refuse to load in the first place if the core set of MSRs
422+
* aren't supported).
423+
*/
424+
MSR_IA32_VMX_BASIC,
425+
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
426+
MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
427+
MSR_IA32_VMX_TRUE_EXIT_CTLS,
428+
MSR_IA32_VMX_TRUE_ENTRY_CTLS,
429+
MSR_IA32_VMX_MISC,
430+
MSR_IA32_VMX_CR0_FIXED0,
431+
MSR_IA32_VMX_CR4_FIXED0,
432+
MSR_IA32_VMX_VMCS_ENUM,
433+
MSR_IA32_VMX_PROCBASED_CTLS2,
434+
MSR_IA32_VMX_EPT_VPID_CAP,
435+
MSR_IA32_VMX_VMFUNC,
436+
437+
MSR_K7_HWCR,
438+
MSR_KVM_POLL_CONTROL,
439+
};
440+
441+
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
442+
static unsigned num_emulated_msrs;
443+
444+
/*
445+
* List of MSRs that control the existence of MSR-based features, i.e. MSRs
446+
* that are effectively CPUID leafs. VMX MSRs are also included in the set of
447+
* feature MSRs, but are handled separately to allow expedited lookups.
448+
*/
449+
static const u32 msr_based_features_all_except_vmx[] = {
450+
MSR_AMD64_DE_CFG,
451+
MSR_IA32_UCODE_REV,
452+
MSR_IA32_ARCH_CAPABILITIES,
453+
MSR_IA32_PERF_CAPABILITIES,
454+
};
455+
456+
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
457+
(KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
458+
static unsigned int num_msr_based_features;
459+
460+
/*
461+
* All feature MSRs except uCode revID, which tracks the currently loaded uCode
462+
* patch, are immutable once the vCPU model is defined.
463+
*/
464+
static bool kvm_is_immutable_feature_msr(u32 msr)
465+
{
466+
int i;
467+
468+
if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
469+
return true;
470+
471+
for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
472+
if (msr == msr_based_features_all_except_vmx[i])
473+
return msr != MSR_IA32_UCODE_REV;
474+
}
475+
476+
return false;
477+
}
478+
479+
static bool kvm_is_msr_to_save(u32 msr_index)
480+
{
481+
unsigned int i;
482+
483+
for (i = 0; i < num_msrs_to_save; i++) {
484+
if (msrs_to_save[i] == msr_index)
485+
return true;
486+
}
487+
488+
return false;
489+
}
490+
307491
typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
308492
bool host_initiated);
309493

@@ -1425,178 +1609,6 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
14251609
}
14261610
EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
14271611

1428-
/*
1429-
* The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
1430-
* the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
1431-
* KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that
1432-
* require host support, i.e. should be probed via RDMSR. emulated_msrs holds
1433-
* MSRs that KVM emulates without strictly requiring host support.
1434-
* msr_based_features holds MSRs that enumerate features, i.e. are effectively
1435-
* CPUID leafs. Note, msr_based_features isn't mutually exclusive with
1436-
* msrs_to_save and emulated_msrs.
1437-
*/
1438-
1439-
static const u32 msrs_to_save_base[] = {
1440-
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1441-
MSR_STAR,
1442-
#ifdef CONFIG_X86_64
1443-
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1444-
#endif
1445-
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1446-
MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1447-
MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
1448-
MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1449-
MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1450-
MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
1451-
MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
1452-
MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
1453-
MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
1454-
MSR_IA32_UMWAIT_CONTROL,
1455-
1456-
MSR_IA32_XFD, MSR_IA32_XFD_ERR,
1457-
};
1458-
1459-
static const u32 msrs_to_save_pmu[] = {
1460-
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
1461-
MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
1462-
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
1463-
MSR_CORE_PERF_GLOBAL_CTRL,
1464-
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
1465-
1466-
/* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
1467-
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
1468-
MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
1469-
MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
1470-
MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
1471-
MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1472-
MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1473-
MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
1474-
MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
1475-
1476-
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
1477-
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
1478-
1479-
/* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
1480-
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
1481-
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
1482-
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
1483-
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
1484-
1485-
MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
1486-
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
1487-
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
1488-
};
1489-
1490-
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
1491-
ARRAY_SIZE(msrs_to_save_pmu)];
1492-
static unsigned num_msrs_to_save;
1493-
1494-
static const u32 emulated_msrs_all[] = {
1495-
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1496-
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1497-
1498-
#ifdef CONFIG_KVM_HYPERV
1499-
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1500-
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1501-
HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1502-
HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1503-
HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1504-
HV_X64_MSR_RESET,
1505-
HV_X64_MSR_VP_INDEX,
1506-
HV_X64_MSR_VP_RUNTIME,
1507-
HV_X64_MSR_SCONTROL,
1508-
HV_X64_MSR_STIMER0_CONFIG,
1509-
HV_X64_MSR_VP_ASSIST_PAGE,
1510-
HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
1511-
HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
1512-
HV_X64_MSR_SYNDBG_OPTIONS,
1513-
HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
1514-
HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
1515-
HV_X64_MSR_SYNDBG_PENDING_BUFFER,
1516-
#endif
1517-
1518-
MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1519-
MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
1520-
1521-
MSR_IA32_TSC_ADJUST,
1522-
MSR_IA32_TSC_DEADLINE,
1523-
MSR_IA32_ARCH_CAPABILITIES,
1524-
MSR_IA32_PERF_CAPABILITIES,
1525-
MSR_IA32_MISC_ENABLE,
1526-
MSR_IA32_MCG_STATUS,
1527-
MSR_IA32_MCG_CTL,
1528-
MSR_IA32_MCG_EXT_CTL,
1529-
MSR_IA32_SMBASE,
1530-
MSR_SMI_COUNT,
1531-
MSR_PLATFORM_INFO,
1532-
MSR_MISC_FEATURES_ENABLES,
1533-
MSR_AMD64_VIRT_SPEC_CTRL,
1534-
MSR_AMD64_TSC_RATIO,
1535-
MSR_IA32_POWER_CTL,
1536-
MSR_IA32_UCODE_REV,
1537-
1538-
/*
1539-
* KVM always supports the "true" VMX control MSRs, even if the host
1540-
* does not. The VMX MSRs as a whole are considered "emulated" as KVM
1541-
* doesn't strictly require them to exist in the host (ignoring that
1542-
* KVM would refuse to load in the first place if the core set of MSRs
1543-
* aren't supported).
1544-
*/
1545-
MSR_IA32_VMX_BASIC,
1546-
MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1547-
MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1548-
MSR_IA32_VMX_TRUE_EXIT_CTLS,
1549-
MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1550-
MSR_IA32_VMX_MISC,
1551-
MSR_IA32_VMX_CR0_FIXED0,
1552-
MSR_IA32_VMX_CR4_FIXED0,
1553-
MSR_IA32_VMX_VMCS_ENUM,
1554-
MSR_IA32_VMX_PROCBASED_CTLS2,
1555-
MSR_IA32_VMX_EPT_VPID_CAP,
1556-
MSR_IA32_VMX_VMFUNC,
1557-
1558-
MSR_K7_HWCR,
1559-
MSR_KVM_POLL_CONTROL,
1560-
};
1561-
1562-
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
1563-
static unsigned num_emulated_msrs;
1564-
1565-
/*
1566-
* List of MSRs that control the existence of MSR-based features, i.e. MSRs
1567-
* that are effectively CPUID leafs. VMX MSRs are also included in the set of
1568-
* feature MSRs, but are handled separately to allow expedited lookups.
1569-
*/
1570-
static const u32 msr_based_features_all_except_vmx[] = {
1571-
MSR_AMD64_DE_CFG,
1572-
MSR_IA32_UCODE_REV,
1573-
MSR_IA32_ARCH_CAPABILITIES,
1574-
MSR_IA32_PERF_CAPABILITIES,
1575-
};
1576-
1577-
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
1578-
(KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
1579-
static unsigned int num_msr_based_features;
1580-
1581-
/*
1582-
* All feature MSRs except uCode revID, which tracks the currently loaded uCode
1583-
* patch, are immutable once the vCPU model is defined.
1584-
*/
1585-
static bool kvm_is_immutable_feature_msr(u32 msr)
1586-
{
1587-
int i;
1588-
1589-
if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
1590-
return true;
1591-
1592-
for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
1593-
if (msr == msr_based_features_all_except_vmx[i])
1594-
return msr != MSR_IA32_UCODE_REV;
1595-
}
1596-
1597-
return false;
1598-
}
1599-
16001612
/*
16011613
* Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
16021614
* does not yet virtualize. These include:
@@ -3744,18 +3756,6 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
37443756
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
37453757
}
37463758

3747-
static bool kvm_is_msr_to_save(u32 msr_index)
3748-
{
3749-
unsigned int i;
3750-
3751-
for (i = 0; i < num_msrs_to_save; i++) {
3752-
if (msrs_to_save[i] == msr_index)
3753-
return true;
3754-
}
3755-
3756-
return false;
3757-
}
3758-
37593759
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
37603760
{
37613761
u32 msr = msr_info->index;

0 commit comments

Comments
 (0)