Skip to content

Commit f14eec0

Browse files
committed
KVM: SVM: move more vmentry code to assembly
Manipulate IF around vmload/vmsave to remove the confusing usage of local_irq_enable where interrupts are actually disabled via GIF. And stuff the RSB immediately without waiting for a RET to avoid Spectre-v2 attacks. Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 9ef1530 commit f14eec0

File tree

3 files changed

+9
-28
lines changed

3 files changed

+9
-28
lines changed

arch/x86/include/asm/nospec-branch.h

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -237,27 +237,6 @@ enum ssb_mitigation {
237237
extern char __indirect_thunk_start[];
238238
extern char __indirect_thunk_end[];
239239

240-
/*
241-
* On VMEXIT we must ensure that no RSB predictions learned in the guest
242-
* can be followed in the host, by overwriting the RSB completely. Both
243-
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
244-
* CPUs with IBRS_ALL *might* it be avoided.
245-
*/
246-
static inline void vmexit_fill_RSB(void)
247-
{
248-
#ifdef CONFIG_RETPOLINE
249-
unsigned long loops;
250-
251-
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
252-
ALTERNATIVE("jmp 910f",
253-
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
254-
X86_FEATURE_RETPOLINE)
255-
"910:"
256-
: "=r" (loops), ASM_CALL_CONSTRAINT
257-
: : "memory" );
258-
#endif
259-
}
260-
261240
static __always_inline
262241
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
263242
{

arch/x86/kvm/svm/svm.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3330,13 +3330,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
33303330
*/
33313331
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
33323332

3333-
local_irq_enable();
3334-
33353333
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
33363334

3337-
/* Eliminate branch target predictions from guest mode */
3338-
vmexit_fill_RSB();
3339-
33403335
#ifdef CONFIG_X86_64
33413336
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
33423337
#else
@@ -3366,8 +3361,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
33663361

33673362
reload_tss(vcpu);
33683363

3369-
local_irq_disable();
3370-
33713364
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
33723365

33733366
vcpu->arch.cr2 = svm->vmcb->save.cr2;

arch/x86/kvm/svm/vmenter.S

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <asm/asm.h>
44
#include <asm/bitsperlong.h>
55
#include <asm/kvm_vcpu_regs.h>
6+
#include <asm/nospec-branch.h>
67

78
#define WORD_SIZE (BITS_PER_LONG / 8)
89

@@ -78,6 +79,7 @@ SYM_FUNC_START(__svm_vcpu_run)
7879
pop %_ASM_AX
7980

8081
/* Enter guest mode */
82+
sti
8183
1: vmload %_ASM_AX
8284
jmp 3f
8385
2: cmpb $0, kvm_rebooting
@@ -99,6 +101,13 @@ SYM_FUNC_START(__svm_vcpu_run)
99101
ud2
100102
_ASM_EXTABLE(5b, 6b)
101103
7:
104+
cli
105+
106+
#ifdef CONFIG_RETPOLINE
107+
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
108+
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
109+
#endif
110+
102111
/* "POP" @regs to RAX. */
103112
pop %_ASM_AX
104113

0 commit comments

Comments
 (0)