Skip to content

Commit 25f8c77

Browse files
committed
Merge tag 'x86_cpu_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpuid updates from Borislav Petkov: - Enable the short string copies for CPUs which support them, in copy_user_enhanced_fast_string() - Avoid writing MSR_CSTAR on Intel due to TDX guests raising a #VE trap * tag 'x86_cpu_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/lib: Add fast-short-rep-movs check to copy_user_enhanced_fast_string() x86/cpu: Don't write CSTAR MSR on Intel CPUs
2 parents 308319e + 244122b commit 25f8c77

File tree

2 files changed

+15
-4
lines changed

2 files changed

+15
-4
lines changed

arch/x86/kernel/cpu/common.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1787,14 +1787,25 @@ EXPORT_PER_CPU_SYMBOL(__preempt_count);
17871787

17881788
DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
17891789

1790+
static void wrmsrl_cstar(unsigned long val)
1791+
{
1792+
/*
1793+
* Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
1794+
* is so far ignored by the CPU, but raises a #VE trap in a TDX
1795+
* guest. Avoid the pointless write on all Intel CPUs.
1796+
*/
1797+
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1798+
wrmsrl(MSR_CSTAR, val);
1799+
}
1800+
17901801
/* May not be marked __init: used by software suspend */
17911802
void syscall_init(void)
17921803
{
17931804
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
17941805
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
17951806

17961807
#ifdef CONFIG_IA32_EMULATION
1797-
wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1808+
wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
17981809
/*
17991810
* This only works on Intel CPUs.
18001811
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
@@ -1806,7 +1817,7 @@ void syscall_init(void)
18061817
(unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
18071818
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
18081819
#else
1809-
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1820+
wrmsrl_cstar((unsigned long)ignore_sysret);
18101821
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
18111822
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
18121823
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);

arch/x86/lib/copy_user_64.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -200,8 +200,8 @@ EXPORT_SYMBOL(copy_user_generic_string)
200200
*/
201201
SYM_FUNC_START(copy_user_enhanced_fast_string)
202202
ASM_STAC
203-
cmpl $64,%edx
204-
jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
203+
/* CPUs without FSRM should avoid rep movsb for short copies */
204+
ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM
205205
movl %edx,%ecx
206206
1: rep
207207
movsb

0 commit comments

Comments
 (0)