Skip to content

Commit cda231c

Browse files
committed
Merge tag 'kvm-x86-pmu-6.11' of https://github.com/kvm-x86/linux into HEAD
KVM x86/pmu changes for 6.11 - Don't advertise IA32_PERF_GLOBAL_OVF_CTRL as an MSR-to-be-saved, as it reads '0' and writes from userspace are ignored. - Update to the newfangled Intel CPU FMS infrastructure. - Use macros instead of open-coded literals to clean up KVM's manipulation of FIXED_CTR_CTRL MSRs.
2 parents 5c5ddf7 + f287bef commit cda231c

File tree

7 files changed

+94
-82
lines changed

7 files changed

+94
-82
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -533,29 +533,33 @@ struct kvm_pmc {
533533
};
534534

535535
/* More counters may conflict with other existing Architectural MSRs */
536-
#define KVM_INTEL_PMC_MAX_GENERIC 8
537-
#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
538-
#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
539-
#define KVM_PMC_MAX_FIXED 3
540-
#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
541-
#define KVM_AMD_PMC_MAX_GENERIC 6
536+
#define KVM_MAX(a, b) ((a) >= (b) ? (a) : (b))
537+
#define KVM_MAX_NR_INTEL_GP_COUNTERS 8
538+
#define KVM_MAX_NR_AMD_GP_COUNTERS 6
539+
#define KVM_MAX_NR_GP_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \
540+
KVM_MAX_NR_AMD_GP_COUNTERS)
541+
542+
#define KVM_MAX_NR_INTEL_FIXED_COUTNERS 3
543+
#define KVM_MAX_NR_AMD_FIXED_COUTNERS 0
544+
#define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUTNERS, \
545+
KVM_MAX_NR_AMD_FIXED_COUTNERS)
542546

543547
struct kvm_pmu {
544548
u8 version;
545549
unsigned nr_arch_gp_counters;
546550
unsigned nr_arch_fixed_counters;
547551
unsigned available_event_types;
548552
u64 fixed_ctr_ctrl;
549-
u64 fixed_ctr_ctrl_mask;
553+
u64 fixed_ctr_ctrl_rsvd;
550554
u64 global_ctrl;
551555
u64 global_status;
552556
u64 counter_bitmask[2];
553-
u64 global_ctrl_mask;
554-
u64 global_status_mask;
557+
u64 global_ctrl_rsvd;
558+
u64 global_status_rsvd;
555559
u64 reserved_bits;
556560
u64 raw_event_mask;
557-
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
558-
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
561+
struct kvm_pmc gp_counters[KVM_MAX_NR_GP_COUNTERS];
562+
struct kvm_pmc fixed_counters[KVM_MAX_NR_FIXED_COUNTERS];
559563

560564
/*
561565
* Overlay the bitmap with a 64-bit atomic so that all bits can be
@@ -571,9 +575,9 @@ struct kvm_pmu {
571575

572576
u64 ds_area;
573577
u64 pebs_enable;
574-
u64 pebs_enable_mask;
578+
u64 pebs_enable_rsvd;
575579
u64 pebs_data_cfg;
576-
u64 pebs_data_cfg_mask;
580+
u64 pebs_data_cfg_rsvd;
577581

578582
/*
579583
* If a guest counter is cross-mapped to host counter with different

arch/x86/kvm/pmu.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -34,16 +34,16 @@ EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
3434

3535
/* Precise Distribution of Instructions Retired (PDIR) */
3636
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
37-
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
38-
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
37+
X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
38+
X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
3939
/* Instruction-Accurate PDIR (PDIR++) */
40-
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
40+
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
4141
{}
4242
};
4343

4444
/* Precise Distribution (PDist) */
4545
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
46-
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
46+
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
4747
{}
4848
};
4949

@@ -69,7 +69,7 @@ static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
6969
* code. Each pmc, stored in kvm_pmc.idx field, is unique across
7070
* all perf counters (both gp and fixed). The mapping relationship
7171
* between pmc and perf counters is as the following:
72-
* * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
72+
* * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
7373
* [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
7474
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
7575
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
@@ -469,11 +469,11 @@ static int reprogram_counter(struct kvm_pmc *pmc)
469469
if (pmc_is_fixed(pmc)) {
470470
fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
471471
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
472-
if (fixed_ctr_ctrl & 0x1)
472+
if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL)
473473
eventsel |= ARCH_PERFMON_EVENTSEL_OS;
474-
if (fixed_ctr_ctrl & 0x2)
474+
if (fixed_ctr_ctrl & INTEL_FIXED_0_USER)
475475
eventsel |= ARCH_PERFMON_EVENTSEL_USR;
476-
if (fixed_ctr_ctrl & 0x8)
476+
if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI)
477477
eventsel |= ARCH_PERFMON_EVENTSEL_INT;
478478
new_config = (u64)fixed_ctr_ctrl;
479479
}
@@ -681,13 +681,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
681681
if (!msr_info->host_initiated)
682682
break;
683683

684-
if (data & pmu->global_status_mask)
684+
if (data & pmu->global_status_rsvd)
685685
return 1;
686686

687687
pmu->global_status = data;
688688
break;
689689
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
690-
data &= ~pmu->global_ctrl_mask;
690+
data &= ~pmu->global_ctrl_rsvd;
691691
fallthrough;
692692
case MSR_CORE_PERF_GLOBAL_CTRL:
693693
if (!kvm_valid_perf_global_ctrl(pmu, data))
@@ -704,7 +704,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
704704
* GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
705705
* GLOBAL_STATUS, and so the set of reserved bits is the same.
706706
*/
707-
if (data & pmu->global_status_mask)
707+
if (data & pmu->global_status_rsvd)
708708
return 1;
709709
fallthrough;
710710
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
@@ -768,11 +768,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
768768
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
769769
pmu->reserved_bits = 0xffffffff00200000ull;
770770
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
771-
pmu->global_ctrl_mask = ~0ull;
772-
pmu->global_status_mask = ~0ull;
773-
pmu->fixed_ctr_ctrl_mask = ~0ull;
774-
pmu->pebs_enable_mask = ~0ull;
775-
pmu->pebs_data_cfg_mask = ~0ull;
771+
pmu->global_ctrl_rsvd = ~0ull;
772+
pmu->global_status_rsvd = ~0ull;
773+
pmu->fixed_ctr_ctrl_rsvd = ~0ull;
774+
pmu->pebs_enable_rsvd = ~0ull;
775+
pmu->pebs_data_cfg_rsvd = ~0ull;
776776
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
777777

778778
if (!vcpu->kvm->arch.enable_pmu)
@@ -846,8 +846,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
846846
} else {
847847
config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
848848
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
849-
select_os = config & 0x1;
850-
select_user = config & 0x2;
849+
select_os = config & INTEL_FIXED_0_KERNEL;
850+
select_user = config & INTEL_FIXED_0_USER;
851851
}
852852

853853
/*

arch/x86/kvm/pmu.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@
1414
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
1515

1616
/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
17-
#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
17+
#define fixed_ctrl_field(ctrl_reg, idx) \
18+
(((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
1819

1920
#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
2021
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
@@ -129,7 +130,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
129130
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
130131
u64 data)
131132
{
132-
return !(pmu->global_ctrl_mask & data);
133+
return !(pmu->global_ctrl_rsvd & data);
133134
}
134135

135136
/* returns general purpose PMC with the specified MSR. Note that it can be
@@ -170,7 +171,8 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
170171

171172
if (pmc_is_fixed(pmc))
172173
return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
173-
pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3;
174+
pmc->idx - KVM_FIXED_PMC_BASE_IDX) &
175+
(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER);
174176

175177
return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
176178
}
@@ -217,7 +219,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
217219
kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
218220
pmu_ops->MAX_NR_GP_COUNTERS);
219221
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
220-
KVM_PMC_MAX_FIXED);
222+
KVM_MAX_NR_FIXED_COUNTERS);
221223

222224
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
223225
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);

arch/x86/kvm/svm/pmu.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
199199
kvm_pmu_cap.num_counters_gp);
200200

201201
if (pmu->version > 1) {
202-
pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
203-
pmu->global_status_mask = pmu->global_ctrl_mask;
202+
pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
203+
pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
204204
}
205205

206206
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
@@ -217,10 +217,9 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
217217
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218218
int i;
219219

220-
BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
221-
BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
220+
BUILD_BUG_ON(KVM_MAX_NR_AMD_GP_COUNTERS > AMD64_NUM_COUNTERS_CORE);
222221

223-
for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
222+
for (i = 0; i < KVM_MAX_NR_AMD_GP_COUNTERS; i++) {
224223
pmu->gp_counters[i].type = KVM_PMC_GP;
225224
pmu->gp_counters[i].vcpu = vcpu;
226225
pmu->gp_counters[i].idx = i;
@@ -238,6 +237,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
238237
.refresh = amd_pmu_refresh,
239238
.init = amd_pmu_init,
240239
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
241-
.MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
240+
.MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
242241
.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
243242
};

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 29 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
348348

349349
switch (msr) {
350350
case MSR_CORE_PERF_FIXED_CTR_CTRL:
351-
if (data & pmu->fixed_ctr_ctrl_mask)
351+
if (data & pmu->fixed_ctr_ctrl_rsvd)
352352
return 1;
353353

354354
if (pmu->fixed_ctr_ctrl != data)
355355
reprogram_fixed_counters(pmu, data);
356356
break;
357357
case MSR_IA32_PEBS_ENABLE:
358-
if (data & pmu->pebs_enable_mask)
358+
if (data & pmu->pebs_enable_rsvd)
359359
return 1;
360360

361361
if (pmu->pebs_enable != data) {
@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
371371
pmu->ds_area = data;
372372
break;
373373
case MSR_PEBS_DATA_CFG:
374-
if (data & pmu->pebs_data_cfg_mask)
374+
if (data & pmu->pebs_data_cfg_rsvd)
375375
return 1;
376376

377377
pmu->pebs_data_cfg = data;
@@ -436,8 +436,8 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
436436
};
437437
u64 eventsel;
438438

439-
BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_PMC_MAX_FIXED);
440-
BUILD_BUG_ON(index >= KVM_PMC_MAX_FIXED);
439+
BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUTNERS);
440+
BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUTNERS);
441441

442442
/*
443443
* Yell if perf reports support for a fixed counter but perf doesn't
@@ -448,6 +448,14 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
448448
return eventsel;
449449
}
450450

451+
static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
452+
{
453+
int i;
454+
455+
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
456+
pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
457+
}
458+
451459
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
452460
{
453461
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -456,8 +464,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
456464
union cpuid10_eax eax;
457465
union cpuid10_edx edx;
458466
u64 perf_capabilities;
459-
u64 counter_mask;
460-
int i;
467+
u64 counter_rsvd;
461468

462469
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
463470

@@ -501,22 +508,24 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
501508
((u64)1 << edx.split.bit_width_fixed) - 1;
502509
}
503510

504-
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
505-
pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
506-
counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
511+
intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
512+
INTEL_FIXED_0_USER |
513+
INTEL_FIXED_0_ENABLE_PMI);
514+
515+
counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
507516
(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
508-
pmu->global_ctrl_mask = counter_mask;
517+
pmu->global_ctrl_rsvd = counter_rsvd;
509518

510519
/*
511520
* GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
512521
* share reserved bit definitions. The kernel just happens to use
513522
* OVF_CTRL for the names.
514523
*/
515-
pmu->global_status_mask = pmu->global_ctrl_mask
524+
pmu->global_status_rsvd = pmu->global_ctrl_rsvd
516525
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
517526
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
518527
if (vmx_pt_mode_is_host_guest())
519-
pmu->global_status_mask &=
528+
pmu->global_status_rsvd &=
520529
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
521530

522531
entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
@@ -544,15 +553,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
544553

545554
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
546555
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
547-
pmu->pebs_enable_mask = counter_mask;
556+
pmu->pebs_enable_rsvd = counter_rsvd;
548557
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
549-
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
550-
pmu->fixed_ctr_ctrl_mask &=
551-
~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
552-
}
553-
pmu->pebs_data_cfg_mask = ~0xff00000full;
558+
pmu->pebs_data_cfg_rsvd = ~0xff00000full;
559+
intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
554560
} else {
555-
pmu->pebs_enable_mask =
561+
pmu->pebs_enable_rsvd =
556562
~((1ull << pmu->nr_arch_gp_counters) - 1);
557563
}
558564
}
@@ -564,14 +570,14 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
564570
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
565571
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
566572

567-
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
573+
for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
568574
pmu->gp_counters[i].type = KVM_PMC_GP;
569575
pmu->gp_counters[i].vcpu = vcpu;
570576
pmu->gp_counters[i].idx = i;
571577
pmu->gp_counters[i].current_config = 0;
572578
}
573579

574-
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
580+
for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUTNERS; i++) {
575581
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
576582
pmu->fixed_counters[i].vcpu = vcpu;
577583
pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
@@ -731,6 +737,6 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
731737
.deliver_pmi = intel_pmu_deliver_pmi,
732738
.cleanup = intel_pmu_cleanup,
733739
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
734-
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
740+
.MAX_NR_GP_COUNTERS = KVM_MAX_NR_INTEL_GP_COUNTERS,
735741
.MIN_NR_GP_COUNTERS = 1,
736742
};

arch/x86/kvm/vmx/vmx.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2561,17 +2561,15 @@ static bool cpu_has_sgx(void)
25612561
*/
25622562
static bool cpu_has_perf_global_ctrl_bug(void)
25632563
{
2564-
if (boot_cpu_data.x86 == 0x6) {
2565-
switch (boot_cpu_data.x86_model) {
2566-
case INTEL_FAM6_NEHALEM_EP: /* AAK155 */
2567-
case INTEL_FAM6_NEHALEM: /* AAP115 */
2568-
case INTEL_FAM6_WESTMERE: /* AAT100 */
2569-
case INTEL_FAM6_WESTMERE_EP: /* BC86,AAY89,BD102 */
2570-
case INTEL_FAM6_NEHALEM_EX: /* BA97 */
2571-
return true;
2572-
default:
2573-
break;
2574-
}
2564+
switch (boot_cpu_data.x86_vfm) {
2565+
case INTEL_NEHALEM_EP: /* AAK155 */
2566+
case INTEL_NEHALEM: /* AAP115 */
2567+
case INTEL_WESTMERE: /* AAT100 */
2568+
case INTEL_WESTMERE_EP: /* BC86,AAY89,BD102 */
2569+
case INTEL_NEHALEM_EX: /* BA97 */
2570+
return true;
2571+
default:
2572+
break;
25752573
}
25762574

25772575
return false;

0 commit comments

Comments
 (0)