Skip to content

Commit bb06650

Browse files
jpoimboesuryasaimadhu
authored andcommitted
KVM: VMX: Convert launched argument to flags
Convert __vmx_vcpu_run()'s 'launched' argument to 'flags', in preparation for doing SPEC_CTRL handling immediately after vmexit, which will need another flag. This is much easier than adding a fourth argument, because this code supports both 32-bit and 64-bit, and the fourth argument on 32-bit would have to be pushed on the stack. Note that __vmx_vcpu_run_flags() is called outside of the noinstr critical section because it will soon start calling potentially traceable functions. Signed-off-by: Josh Poimboeuf <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Borislav Petkov <[email protected]>
1 parent 8bd200d commit bb06650

File tree

5 files changed

+31
-9
lines changed

5 files changed

+31
-9
lines changed

arch/x86/kvm/vmx/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3087,7 +3087,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
30873087
}
30883088

30893089
vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3090-
vmx->loaded_vmcs->launched);
3090+
__vmx_vcpu_run_flags(vmx));
30913091

30923092
if (vmx->msr_autoload.host.nr)
30933093
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);

arch/x86/kvm/vmx/run_flags.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
3+
#define __KVM_X86_VMX_RUN_FLAGS_H
4+
5+
#define VMX_RUN_VMRESUME (1 << 0)
6+
7+
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

arch/x86/kvm/vmx/vmenter.S

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <asm/kvm_vcpu_regs.h>
66
#include <asm/nospec-branch.h>
77
#include <asm/segment.h>
8+
#include "run_flags.h"
89

910
#define WORD_SIZE (BITS_PER_LONG / 8)
1011

@@ -34,7 +35,7 @@
3435
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
3536
* @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
3637
* @regs: unsigned long * (to guest registers)
37-
* @launched: %true if the VMCS has been launched
38+
* @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
3839
*
3940
* Returns:
4041
* 0 on VM-Exit, 1 on VM-Fail
@@ -59,7 +60,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
5960
*/
6061
push %_ASM_ARG2
6162

62-
/* Copy @launched to BL, _ASM_ARG3 is volatile. */
63+
/* Copy @flags to BL, _ASM_ARG3 is volatile. */
6364
mov %_ASM_ARG3B, %bl
6465

6566
lea (%_ASM_SP), %_ASM_ARG2
@@ -69,7 +70,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
6970
mov (%_ASM_SP), %_ASM_AX
7071

7172
/* Check if vmlaunch or vmresume is needed */
72-
testb %bl, %bl
73+
testb $VMX_RUN_VMRESUME, %bl
7374

7475
/* Load guest registers. Don't clobber flags. */
7576
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -92,7 +93,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
9293
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
9394

9495
/* Check EFLAGS.ZF from 'testb' above */
95-
je .Lvmlaunch
96+
jz .Lvmlaunch
9697

9798
/*
9899
* After a successful VMRESUME/VMLAUNCH, control flow "magically"

arch/x86/kvm/vmx/vmx.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -839,6 +839,16 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
839839
MSR_IA32_SPEC_CTRL);
840840
}
841841

842+
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
843+
{
844+
unsigned int flags = 0;
845+
846+
if (vmx->loaded_vmcs->launched)
847+
flags |= VMX_RUN_VMRESUME;
848+
849+
return flags;
850+
}
851+
842852
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
843853
unsigned long entry, unsigned long exit)
844854
{
@@ -6826,7 +6836,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
68266836
}
68276837

68286838
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
6829-
struct vcpu_vmx *vmx)
6839+
struct vcpu_vmx *vmx,
6840+
unsigned long flags)
68306841
{
68316842
guest_state_enter_irqoff();
68326843

@@ -6845,7 +6856,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
68456856
native_write_cr2(vcpu->arch.cr2);
68466857

68476858
vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
6848-
vmx->loaded_vmcs->launched);
6859+
flags);
68496860

68506861
vcpu->arch.cr2 = native_read_cr2();
68516862

@@ -6953,7 +6964,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
69536964
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
69546965

69556966
/* The actual VMENTER/EXIT is in the .noinstr.text section. */
6956-
vmx_vcpu_enter_exit(vcpu, vmx);
6967+
vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
69576968

69586969
/*
69596970
* We do not use IBRS in the kernel. If this vCPU has used the

arch/x86/kvm/vmx/vmx.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include "vmcs.h"
1414
#include "vmx_ops.h"
1515
#include "cpuid.h"
16+
#include "run_flags.h"
1617

1718
#define MSR_TYPE_R 1
1819
#define MSR_TYPE_W 2
@@ -404,7 +405,9 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
404405
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
405406
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
406407
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
407-
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
408+
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
409+
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
410+
unsigned int flags);
408411
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
409412
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
410413

0 commit comments

Comments
 (0)