Skip to content

Commit 1f82e8e

Browse files
author
Ingo Molnar
committed
Merge branch 'x86/msr' into x86/core, to resolve conflicts
Conflicts: arch/x86/boot/startup/sme.c arch/x86/coco/sev/core.c arch/x86/kernel/fpu/core.c arch/x86/kernel/fpu/xstate.c Semantic conflict: arch/x86/include/asm/sev-internal.h Signed-off-by: Ingo Molnar <[email protected]>
2 parents 34be751 + 9cf7872 commit 1f82e8e

File tree

184 files changed

+1429
-1359
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

184 files changed

+1429
-1359
lines changed

arch/x86/boot/startup/sme.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ void __head sme_enable(struct boot_params *bp)
523523
me_mask = 1UL << (ebx & 0x3f);
524524

525525
/* Check the SEV MSR whether SEV or SME is enabled */
526-
sev_status = msr = __rdmsr(MSR_AMD64_SEV);
526+
sev_status = msr = native_rdmsrq(MSR_AMD64_SEV);
527527
feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
528528

529529
/*
@@ -554,7 +554,7 @@ void __head sme_enable(struct boot_params *bp)
554554
return;
555555

556556
/* For SME, check the SYSCFG MSR */
557-
msr = __rdmsr(MSR_AMD64_SYSCFG);
557+
msr = native_rdmsrq(MSR_AMD64_SYSCFG);
558558
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
559559
return;
560560
}

arch/x86/coco/sev/core.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
#include <asm/apic.h>
4545
#include <asm/cpuid.h>
4646
#include <asm/cmdline.h>
47+
#include <asm/msr.h>
4748

4849
/* AP INIT values as documented in the APM2 section "Processor Initialization State" */
4950
#define AP_INIT_CS_LIMIT 0xffff
@@ -2039,7 +2040,7 @@ void __init snp_secure_tsc_init(void)
20392040
return;
20402041

20412042
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
2042-
rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
2043+
rdmsrq(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
20432044
snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
20442045

20452046
x86_platform.calibrate_cpu = securetsc_get_tsc_khz;

arch/x86/events/amd/brs.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,12 @@ static inline unsigned int brs_to(int idx)
4444
static __always_inline void set_debug_extn_cfg(u64 val)
4545
{
4646
/* bits[4:3] must always be set to 11b */
47-
__wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
47+
native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
4848
}
4949

5050
static __always_inline u64 get_debug_extn_cfg(void)
5151
{
52-
return __rdmsr(MSR_AMD_DBG_EXTN_CFG);
52+
return native_rdmsrq(MSR_AMD_DBG_EXTN_CFG);
5353
}
5454

5555
static bool __init amd_brs_detect(void)
@@ -187,7 +187,7 @@ void amd_brs_reset(void)
187187
/*
188188
* Mark first entry as poisoned
189189
*/
190-
wrmsrl(brs_to(0), BRS_POISON);
190+
wrmsrq(brs_to(0), BRS_POISON);
191191
}
192192

193193
int __init amd_brs_init(void)
@@ -325,7 +325,7 @@ void amd_brs_drain(void)
325325
u32 brs_idx = tos - i;
326326
u64 from, to;
327327

328-
rdmsrl(brs_to(brs_idx), to);
328+
rdmsrq(brs_to(brs_idx), to);
329329

330330
/* Entry does not belong to us (as marked by kernel) */
331331
if (to == BRS_POISON)
@@ -341,7 +341,7 @@ void amd_brs_drain(void)
341341
if (!amd_brs_match_plm(event, to))
342342
continue;
343343

344-
rdmsrl(brs_from(brs_idx), from);
344+
rdmsrq(brs_from(brs_idx), from);
345345

346346
perf_clear_branch_entry_bitfields(br+nr);
347347

@@ -371,7 +371,7 @@ static void amd_brs_poison_buffer(void)
371371
idx = amd_brs_get_tos(&cfg);
372372

373373
/* Poison target of entry */
374-
wrmsrl(brs_to(idx), BRS_POISON);
374+
wrmsrq(brs_to(idx), BRS_POISON);
375375
}
376376

377377
/*

arch/x86/events/amd/core.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <linux/jiffies.h>
1010
#include <asm/apicdef.h>
1111
#include <asm/apic.h>
12+
#include <asm/msr.h>
1213
#include <asm/nmi.h>
1314

1415
#include "../perf_event.h"
@@ -563,13 +564,13 @@ static void amd_pmu_cpu_reset(int cpu)
563564
return;
564565

565566
/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
566-
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
567+
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
567568

568569
/*
569570
* Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
570571
* and PerfCntrGLobalStatus.PerfCntrOvfl
571572
*/
572-
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
573+
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
573574
GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
574575
}
575576

@@ -651,15 +652,15 @@ static void amd_pmu_cpu_dead(int cpu)
651652

652653
static __always_inline void amd_pmu_set_global_ctl(u64 ctl)
653654
{
654-
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
655+
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
655656
}
656657

657658
static inline u64 amd_pmu_get_global_status(void)
658659
{
659660
u64 status;
660661

661662
/* PerfCntrGlobalStatus is read-only */
662-
rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
663+
rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
663664

664665
return status;
665666
}
@@ -672,14 +673,14 @@ static inline void amd_pmu_ack_global_status(u64 status)
672673
* clears the same bit in PerfCntrGlobalStatus
673674
*/
674675

675-
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
676+
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
676677
}
677678

678679
static bool amd_pmu_test_overflow_topbit(int idx)
679680
{
680681
u64 counter;
681682

682-
rdmsrl(x86_pmu_event_addr(idx), counter);
683+
rdmsrq(x86_pmu_event_addr(idx), counter);
683684

684685
return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
685686
}

arch/x86/events/amd/ibs.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/sched/clock.h>
1616

1717
#include <asm/apic.h>
18+
#include <asm/msr.h>
1819

1920
#include "../perf_event.h"
2021

@@ -424,7 +425,7 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
424425
* prev count manually on overflow.
425426
*/
426427
while (!perf_event_try_update(event, count, 64)) {
427-
rdmsrl(event->hw.config_base, *config);
428+
rdmsrq(event->hw.config_base, *config);
428429
count = perf_ibs->get_count(*config);
429430
}
430431
}
@@ -435,9 +436,9 @@ static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
435436
u64 tmp = hwc->config | config;
436437

437438
if (perf_ibs->fetch_count_reset_broken)
438-
wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
439+
wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask);
439440

440-
wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
441+
wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask);
441442
}
442443

443444
/*
@@ -452,9 +453,9 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
452453
{
453454
config &= ~perf_ibs->cnt_mask;
454455
if (boot_cpu_data.x86 == 0x10)
455-
wrmsrl(hwc->config_base, config);
456+
wrmsrq(hwc->config_base, config);
456457
config &= ~perf_ibs->enable_mask;
457-
wrmsrl(hwc->config_base, config);
458+
wrmsrq(hwc->config_base, config);
458459
}
459460

460461
/*
@@ -513,7 +514,7 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
513514
if (!stopping && (hwc->state & PERF_HES_UPTODATE))
514515
return;
515516

516-
rdmsrl(hwc->config_base, config);
517+
rdmsrq(hwc->config_base, config);
517518

518519
if (stopping) {
519520
/*
@@ -1256,7 +1257,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
12561257
hwc = &event->hw;
12571258
msr = hwc->config_base;
12581259
buf = ibs_data.regs;
1259-
rdmsrl(msr, *buf);
1260+
rdmsrq(msr, *buf);
12601261
if (!(*buf++ & perf_ibs->valid_mask))
12611262
goto fail;
12621263

@@ -1274,7 +1275,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
12741275
offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
12751276

12761277
do {
1277-
rdmsrl(msr + offset, *buf++);
1278+
rdmsrq(msr + offset, *buf++);
12781279
size++;
12791280
offset = find_next_bit(perf_ibs->offset_mask,
12801281
perf_ibs->offset_max,
@@ -1304,17 +1305,17 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
13041305
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
13051306
if (perf_ibs == &perf_ibs_op) {
13061307
if (ibs_caps & IBS_CAPS_BRNTRGT) {
1307-
rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
1308+
rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++);
13081309
br_target_idx = size;
13091310
size++;
13101311
}
13111312
if (ibs_caps & IBS_CAPS_OPDATA4) {
1312-
rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
1313+
rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++);
13131314
size++;
13141315
}
13151316
}
13161317
if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
1317-
rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
1318+
rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++);
13181319
size++;
13191320
}
13201321
}
@@ -1565,7 +1566,7 @@ static inline int ibs_eilvt_valid(void)
15651566

15661567
preempt_disable();
15671568

1568-
rdmsrl(MSR_AMD64_IBSCTL, val);
1569+
rdmsrq(MSR_AMD64_IBSCTL, val);
15691570
offset = val & IBSCTL_LVT_OFFSET_MASK;
15701571

15711572
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
@@ -1680,7 +1681,7 @@ static inline int get_ibs_lvt_offset(void)
16801681
{
16811682
u64 val;
16821683

1683-
rdmsrl(MSR_AMD64_IBSCTL, val);
1684+
rdmsrq(MSR_AMD64_IBSCTL, val);
16841685
if (!(val & IBSCTL_LVT_OFFSET_VALID))
16851686
return -EINVAL;
16861687

arch/x86/events/amd/iommu.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
#include <linux/slab.h>
1717
#include <linux/amd-iommu.h>
1818

19+
#include <asm/msr.h>
20+
1921
#include "../perf_event.h"
2022
#include "iommu.h"
2123

arch/x86/events/amd/lbr.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0
22
#include <linux/perf_event.h>
3+
#include <asm/msr.h>
34
#include <asm/perf_event.h>
45

56
#include "../perf_event.h"
@@ -61,19 +62,19 @@ struct branch_entry {
6162

6263
static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val)
6364
{
64-
wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
65+
wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
6566
}
6667

6768
static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val)
6869
{
69-
wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
70+
wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
7071
}
7172

7273
static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx)
7374
{
7475
u64 val;
7576

76-
rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
77+
rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
7778

7879
return val;
7980
}
@@ -82,7 +83,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx)
8283
{
8384
u64 val;
8485

85-
rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
86+
rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
8687

8788
return val;
8889
}
@@ -333,7 +334,7 @@ void amd_pmu_lbr_reset(void)
333334

334335
cpuc->last_task_ctx = NULL;
335336
cpuc->last_log_id = 0;
336-
wrmsrl(MSR_AMD64_LBR_SELECT, 0);
337+
wrmsrq(MSR_AMD64_LBR_SELECT, 0);
337338
}
338339

339340
void amd_pmu_lbr_add(struct perf_event *event)
@@ -396,16 +397,16 @@ void amd_pmu_lbr_enable_all(void)
396397
/* Set hardware branch filter */
397398
if (cpuc->lbr_select) {
398399
lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK;
399-
wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
400+
wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select);
400401
}
401402

402403
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
403-
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
404-
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
404+
rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
405+
wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
405406
}
406407

407-
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
408-
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
408+
rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
409+
wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
409410
}
410411

411412
void amd_pmu_lbr_disable_all(void)

arch/x86/events/amd/power.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/slab.h>
1212
#include <linux/perf_event.h>
1313
#include <asm/cpu_device_id.h>
14+
#include <asm/msr.h>
1415
#include "../perf_event.h"
1516

1617
/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */
@@ -48,8 +49,8 @@ static void event_update(struct perf_event *event)
4849

4950
prev_pwr_acc = hwc->pwr_acc;
5051
prev_ptsc = hwc->ptsc;
51-
rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
52-
rdmsrl(MSR_F15H_PTSC, new_ptsc);
52+
rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
53+
rdmsrq(MSR_F15H_PTSC, new_ptsc);
5354

5455
/*
5556
* Calculate the CU power consumption over a time period, the unit of
@@ -75,8 +76,8 @@ static void __pmu_event_start(struct perf_event *event)
7576

7677
event->hw.state = 0;
7778

78-
rdmsrl(MSR_F15H_PTSC, event->hw.ptsc);
79-
rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
79+
rdmsrq(MSR_F15H_PTSC, event->hw.ptsc);
80+
rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
8081
}
8182

8283
static void pmu_event_start(struct perf_event *event, int mode)
@@ -272,7 +273,7 @@ static int __init amd_power_pmu_init(void)
272273

273274
cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
274275

275-
if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
276+
if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
276277
pr_err("Failed to read max compute unit power accumulator MSR\n");
277278
return -ENODEV;
278279
}

0 commit comments

Comments
 (0)