Skip to content

Commit 17b6c49

Browse files
committed
Merge tag 'x86_urgent_for_v5.11_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Add a new Intel model number for Alder Lake - Differentiate which aspects of the FPU state get saved/restored when the FPU is used in-kernel and fix a boot crash on K7 due to early MXCSR access before CR4.OSFXSR is even set. - A couple of noinstr annotation fixes - Correct die ID setting on AMD for users of topology information which need the correct die ID - A SEV-ES fix to handle string port IO to/from kernel memory properly * tag 'x86_urgent_for_v5.11_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/cpu: Add another Alder Lake CPU to the Intel family x86/mmx: Use KFPU_387 for MMX string operations x86/fpu: Add kernel_fpu_begin_mask() to selectively initialize state x86/topology: Make __max_die_per_package available unconditionally x86: __always_inline __{rd,wr}msr() x86/mce: Remove explicit/superfluous tracing locking/lockdep: Avoid noinstr warning for DEBUG_LOCKDEP locking/lockdep: Cure noinstr fail x86/sev: Fix nonistr violation x86/entry: Fix noinstr fail x86/cpu/amd: Set __max_die_per_package on AMD x86/sev-es: Handle string port IO to kernel memory properly
2 parents 14c50a6 + 6e1239c commit 17b6c49

File tree

12 files changed

+72
-27
lines changed

12 files changed

+72
-27
lines changed

arch/x86/entry/common.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
7373
unsigned int nr)
7474
{
7575
if (likely(nr < IA32_NR_syscalls)) {
76-
instrumentation_begin();
7776
nr = array_index_nospec(nr, IA32_NR_syscalls);
7877
regs->ax = ia32_sys_call_table[nr](regs);
79-
instrumentation_end();
8078
}
8179
}
8280

@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
9189
* or may not be necessary, but it matches the old asm behavior.
9290
*/
9391
nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
92+
instrumentation_begin();
9493

9594
do_syscall_32_irqs_on(regs, nr);
95+
96+
instrumentation_end();
9697
syscall_exit_to_user_mode(regs);
9798
}
9899

@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
121122
res = get_user(*(u32 *)&regs->bp,
122123
(u32 __user __force *)(unsigned long)(u32)regs->sp);
123124
}
124-
instrumentation_end();
125125

126126
if (res) {
127127
/* User code screwed up. */
128128
regs->ax = -EFAULT;
129+
130+
instrumentation_end();
129131
syscall_exit_to_user_mode(regs);
130132
return false;
131133
}
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
135137

136138
/* Now this is just like a normal syscall. */
137139
do_syscall_32_irqs_on(regs, nr);
140+
141+
instrumentation_end();
138142
syscall_exit_to_user_mode(regs);
139143
return true;
140144
}

arch/x86/include/asm/fpu/api.h

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,25 @@
1616
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
1717
* disables preemption so be careful if you intend to use it for long periods
1818
* of time.
19-
* If you intend to use the FPU in softirq you need to check first with
19+
* If you intend to use the FPU in irq/softirq you need to check first with
2020
* irq_fpu_usable() if it is possible.
2121
*/
22-
extern void kernel_fpu_begin(void);
22+
23+
/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
24+
#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
25+
#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
26+
27+
extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
2328
extern void kernel_fpu_end(void);
2429
extern bool irq_fpu_usable(void);
2530
extern void fpregs_mark_activate(void);
2631

32+
/* Code that is unaware of kernel_fpu_begin_mask() can use this */
33+
static inline void kernel_fpu_begin(void)
34+
{
35+
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
36+
}
37+
2738
/*
2839
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
2940
* A context switch will (and softirq might) save CPU's FPU registers to

arch/x86/include/asm/intel-family.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@
9797

9898
#define INTEL_FAM6_LAKEFIELD 0x8A
9999
#define INTEL_FAM6_ALDERLAKE 0x97
100+
#define INTEL_FAM6_ALDERLAKE_L 0x9A
100101

101102
/* "Small Core" Processors (Atom) */
102103

arch/x86/include/asm/msr.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
8686
* think of extending them - you will be slapped with a stinking trout or a frozen
8787
* shark will reach you, wherever you are! You've been warned.
8888
*/
89-
static inline unsigned long long notrace __rdmsr(unsigned int msr)
89+
static __always_inline unsigned long long __rdmsr(unsigned int msr)
9090
{
9191
DECLARE_ARGS(val, low, high);
9292

@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
9898
return EAX_EDX_VAL(val, low, high);
9999
}
100100

101-
static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
101+
static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
102102
{
103103
asm volatile("1: wrmsr\n"
104104
"2:\n"

arch/x86/include/asm/topology.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
110110
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
111111
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
112112

113+
extern unsigned int __max_die_per_package;
114+
113115
#ifdef CONFIG_SMP
114116
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
115117
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
118120
extern unsigned int __max_logical_packages;
119121
#define topology_max_packages() (__max_logical_packages)
120122

121-
extern unsigned int __max_die_per_package;
122-
123123
static inline int topology_max_die_per_package(void)
124124
{
125125
return __max_die_per_package;

arch/x86/kernel/cpu/amd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
542542
u32 ecx;
543543

544544
ecx = cpuid_ecx(0x8000001e);
545-
nodes_per_socket = ((ecx >> 8) & 7) + 1;
545+
__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
546546
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
547547
u64 value;
548548

549549
rdmsrl(MSR_FAM10H_NODE_ID, value);
550-
nodes_per_socket = ((value >> 3) & 7) + 1;
550+
__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
551551
}
552552

553553
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&

arch/x86/kernel/cpu/mce/core.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
19921992
* that out because it's an indirect call. Annotate it.
19931993
*/
19941994
instrumentation_begin();
1995-
trace_hardirqs_off_finish();
1995+
19961996
machine_check_vector(regs);
1997-
if (regs->flags & X86_EFLAGS_IF)
1998-
trace_hardirqs_on_prepare();
1997+
19991998
instrumentation_end();
20001999
irqentry_nmi_exit(regs, irq_state);
20012000
}
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
20042003
{
20052004
irqentry_enter_from_user_mode(regs);
20062005
instrumentation_begin();
2006+
20072007
machine_check_vector(regs);
2008+
20082009
instrumentation_end();
20092010
irqentry_exit_to_user_mode(regs);
20102011
}

arch/x86/kernel/cpu/topology.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@
2525
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
2626
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
2727

28-
#ifdef CONFIG_SMP
2928
unsigned int __max_die_per_package __read_mostly = 1;
3029
EXPORT_SYMBOL(__max_die_per_package);
3130

31+
#ifdef CONFIG_SMP
3232
/*
3333
* Check if given CPUID extended toplogy "leaf" is implemented
3434
*/

arch/x86/kernel/fpu/core.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
121121
}
122122
EXPORT_SYMBOL(copy_fpregs_to_fpstate);
123123

124-
void kernel_fpu_begin(void)
124+
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
125125
{
126126
preempt_disable();
127127

@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
141141
}
142142
__cpu_invalidate_fpregs_state();
143143

144-
if (boot_cpu_has(X86_FEATURE_XMM))
144+
/* Put sane initial values into the control registers. */
145+
if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
145146
ldmxcsr(MXCSR_DEFAULT);
146147

147-
if (boot_cpu_has(X86_FEATURE_FPU))
148+
if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
148149
asm volatile ("fninit");
149150
}
150-
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
151+
EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
151152

152153
void kernel_fpu_end(void)
153154
{

arch/x86/kernel/sev-es.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
225225
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
226226
}
227227

228-
static inline void sev_es_wr_ghcb_msr(u64 val)
228+
static __always_inline void sev_es_wr_ghcb_msr(u64 val)
229229
{
230230
u32 low, high;
231231

@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
286286
u16 d2;
287287
u8 d1;
288288

289+
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
290+
if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
291+
memcpy(dst, buf, size);
292+
return ES_OK;
293+
}
294+
289295
switch (size) {
290296
case 1:
291297
memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
335341
u16 d2;
336342
u8 d1;
337343

344+
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
345+
if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
346+
memcpy(buf, src, size);
347+
return ES_OK;
348+
}
349+
338350
switch (size) {
339351
case 1:
340352
if (get_user(d1, s))

0 commit comments

Comments
 (0)