Skip to content

Commit 0720e43

Browse files
bp3tk0vgregkh
authored andcommitted
x86/bugs: Rename MDS machinery to something more generic
Commit f9af88a upstream. It will be used by other x86 mitigations. No functional changes. Signed-off-by: Borislav Petkov (AMD) <[email protected]> Reviewed-by: Pawan Gupta <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 4c44304 commit 0720e43

File tree

8 files changed

+36
-36
lines changed

8 files changed

+36
-36
lines changed

Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
157157
combination with a microcode update. The microcode clears the affected CPU
158158
buffers when the VERW instruction is executed.
159159

160-
Kernel reuses the MDS function to invoke the buffer clearing:
161-
162-
mds_clear_cpu_buffers()
160+
Kernel does the buffer clearing with x86_clear_cpu_buffers().
163161

164162
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
165163
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

Documentation/arch/x86/mds.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ enters a C-state.
9393

9494
The kernel provides a function to invoke the buffer clearing:
9595

96-
mds_clear_cpu_buffers()
96+
x86_clear_cpu_buffers()
9797

9898
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
9999
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@@ -185,9 +185,9 @@ Mitigation points
185185
idle clearing would be a window dressing exercise and is therefore not
186186
activated.
187187

188-
The invocation is controlled by the static key mds_idle_clear which is
189-
switched depending on the chosen mitigation mode and the SMT state of
190-
the system.
188+
The invocation is controlled by the static key cpu_buf_idle_clear which is
189+
switched depending on the chosen mitigation mode and the SMT state of the
190+
system.
191191

192192
The buffer clear is only invoked before entering the C-State to prevent
193193
that stale data from the idling CPU from spilling to the Hyper-Thread

arch/x86/entry/entry.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,20 +33,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
3333

3434
/*
3535
* Define the VERW operand that is disguised as entry code so that
36-
* it can be referenced with KPTI enabled. This ensure VERW can be
36+
* it can be referenced with KPTI enabled. This ensures VERW can be
3737
* used late in exit-to-user path after page tables are switched.
3838
*/
3939
.pushsection .entry.text, "ax"
4040

4141
.align L1_CACHE_BYTES, 0xcc
42-
SYM_CODE_START_NOALIGN(mds_verw_sel)
42+
SYM_CODE_START_NOALIGN(x86_verw_sel)
4343
UNWIND_HINT_UNDEFINED
4444
ANNOTATE_NOENDBR
4545
.word __KERNEL_DS
4646
.align L1_CACHE_BYTES, 0xcc
47-
SYM_CODE_END(mds_verw_sel);
47+
SYM_CODE_END(x86_verw_sel);
4848
/* For KVM */
49-
EXPORT_SYMBOL_GPL(mds_verw_sel);
49+
EXPORT_SYMBOL_GPL(x86_verw_sel);
5050

5151
.popsection
5252

arch/x86/include/asm/irqflags.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
4444

4545
static __always_inline void native_safe_halt(void)
4646
{
47-
mds_idle_clear_cpu_buffers();
47+
x86_idle_clear_cpu_buffers();
4848
asm volatile("sti; hlt": : :"memory");
4949
}
5050

5151
static __always_inline void native_halt(void)
5252
{
53-
mds_idle_clear_cpu_buffers();
53+
x86_idle_clear_cpu_buffers();
5454
asm volatile("hlt": : :"memory");
5555
}
5656

arch/x86/include/asm/mwait.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx,
4444

4545
static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
4646
{
47-
mds_idle_clear_cpu_buffers();
47+
x86_idle_clear_cpu_buffers();
4848

4949
/* "mwait %eax, %ecx;" */
5050
asm volatile(".byte 0x0f, 0x01, 0xc9;"
@@ -98,7 +98,8 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
9898
*/
9999
static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
100100
{
101-
mds_idle_clear_cpu_buffers();
101+
x86_idle_clear_cpu_buffers();
102+
102103
/* "mwait %eax, %ecx;" */
103104
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
104105
:: "a" (eax), "c" (ecx));

arch/x86/include/asm/nospec-branch.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -315,22 +315,22 @@
315315
.endm
316316

317317
/*
318-
* Macro to execute VERW instruction that mitigate transient data sampling
319-
* attacks such as MDS. On affected systems a microcode update overloaded VERW
320-
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
321-
*
318+
* Macro to execute VERW insns that mitigate transient data sampling
319+
* attacks such as MDS or TSA. On affected systems a microcode update
320+
* overloaded VERW insns to also clear the CPU buffers. VERW clobbers
321+
* CFLAGS.ZF.
322322
* Note: Only the memory operand variant of VERW clears the CPU buffers.
323323
*/
324324
.macro CLEAR_CPU_BUFFERS
325325
#ifdef CONFIG_X86_64
326-
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
326+
ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
327327
#else
328328
/*
329329
* In 32bit mode, the memory operand must be a %cs reference. The data
330330
* segments may not be usable (vm86 mode), and the stack segment may not
331331
* be flat (ESPFIX32).
332332
*/
333-
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
333+
ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
334334
#endif
335335
.endm
336336

@@ -582,24 +582,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
582582
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
583583
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
584584

585-
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
585+
DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
586586

587587
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
588588

589589
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
590590

591-
extern u16 mds_verw_sel;
591+
extern u16 x86_verw_sel;
592592

593593
#include <asm/segment.h>
594594

595595
/**
596-
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
596+
* x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
597597
*
598598
* This uses the otherwise unused and obsolete VERW instruction in
599599
* combination with microcode which triggers a CPU buffer flush when the
600600
* instruction is executed.
601601
*/
602-
static __always_inline void mds_clear_cpu_buffers(void)
602+
static __always_inline void x86_clear_cpu_buffers(void)
603603
{
604604
static const u16 ds = __KERNEL_DS;
605605

@@ -616,14 +616,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
616616
}
617617

618618
/**
619-
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
619+
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
620+
* vulnerability
620621
*
621622
* Clear CPU buffers if the corresponding static key is enabled
622623
*/
623-
static __always_inline void mds_idle_clear_cpu_buffers(void)
624+
static __always_inline void x86_idle_clear_cpu_buffers(void)
624625
{
625-
if (static_branch_likely(&mds_idle_clear))
626-
mds_clear_cpu_buffers();
626+
if (static_branch_likely(&cpu_buf_idle_clear))
627+
x86_clear_cpu_buffers();
627628
}
628629

629630
#endif /* __ASSEMBLY__ */

arch/x86/kernel/cpu/bugs.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
122122
/* Control unconditional IBPB in switch_mm() */
123123
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
124124

125-
/* Control MDS CPU buffer clear before idling (halt, mwait) */
126-
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
127-
EXPORT_SYMBOL_GPL(mds_idle_clear);
125+
/* Control CPU buffer clear before idling (halt, mwait) */
126+
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
127+
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
128128

129129
/*
130130
* Controls whether l1d flush based mitigations are enabled,
@@ -448,7 +448,7 @@ static void __init mmio_select_mitigation(void)
448448
* is required irrespective of SMT state.
449449
*/
450450
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
451-
static_branch_enable(&mds_idle_clear);
451+
static_branch_enable(&cpu_buf_idle_clear);
452452

453453
/*
454454
* Check if the system has the right microcode.
@@ -2092,10 +2092,10 @@ static void update_mds_branch_idle(void)
20922092
return;
20932093

20942094
if (sched_smt_active()) {
2095-
static_branch_enable(&mds_idle_clear);
2095+
static_branch_enable(&cpu_buf_idle_clear);
20962096
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
20972097
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2098-
static_branch_disable(&mds_idle_clear);
2098+
static_branch_disable(&cpu_buf_idle_clear);
20992099
}
21002100
}
21012101

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7313,7 +7313,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
73137313
vmx_l1d_flush(vcpu);
73147314
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
73157315
kvm_arch_has_assigned_device(vcpu->kvm))
7316-
mds_clear_cpu_buffers();
7316+
x86_clear_cpu_buffers();
73177317

73187318
vmx_disable_fb_clear(vmx);
73197319

0 commit comments

Comments
 (0)