Skip to content

Commit aeb904f

Browse files
committed
KVM: x86: Refactor can_emulate_instruction() return to be more expressive
Refactor and rename can_emulate_instruction() to allow vendor code to return more than true/false, e.g. to explicitly differentiate between "retry", "fault", and "unhandleable". For now, just do the plumbing, a future patch will expand SVM's implementation to signal outright failure if KVM attempts EMULTYPE_SKIP on an SEV guest. No functional change intended (or rather, none that are visible to the guest or userspace). Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent bc3d7c5 commit aeb904f

File tree

5 files changed

+35
-29
lines changed

5 files changed

+35
-29
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
126126
KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
127127
KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
128128
KVM_X86_OP(get_msr_feature)
129-
KVM_X86_OP(can_emulate_instruction)
129+
KVM_X86_OP(check_emulate_instruction)
130130
KVM_X86_OP(apic_init_signal_blocked)
131131
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
132132
KVM_X86_OP_OPTIONAL(migrate_timers)

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1734,8 +1734,8 @@ struct kvm_x86_ops {
17341734

17351735
int (*get_msr_feature)(struct kvm_msr_entry *entry);
17361736

1737-
bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1738-
void *insn, int insn_len);
1737+
int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1738+
void *insn, int insn_len);
17391739

17401740
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
17411741
int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);

arch/x86/kvm/svm/svm.c

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -364,8 +364,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
364364
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
365365

366366
}
367-
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
368-
void *insn, int insn_len);
367+
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
368+
void *insn, int insn_len);
369369

370370
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
371371
bool commit_side_effects)
@@ -391,7 +391,7 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
391391
* right thing and treats "can't emulate" as outright failure
392392
* for EMULTYPE_SKIP.
393393
*/
394-
if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
394+
if (svm_check_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0) != X86EMUL_CONTINUE)
395395
return 0;
396396

397397
if (unlikely(!commit_side_effects))
@@ -4727,15 +4727,15 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
47274727
}
47284728
#endif
47294729

4730-
static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4731-
void *insn, int insn_len)
4730+
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4731+
void *insn, int insn_len)
47324732
{
47334733
bool smep, smap, is_user;
47344734
u64 error_code;
47354735

47364736
/* Emulation is always possible when KVM has access to all guest state. */
47374737
if (!sev_guest(vcpu->kvm))
4738-
return true;
4738+
return X86EMUL_CONTINUE;
47394739

47404740
/* #UD and #GP should never be intercepted for SEV guests. */
47414741
WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
@@ -4747,14 +4747,14 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
47474747
* to guest register state.
47484748
*/
47494749
if (sev_es_guest(vcpu->kvm))
4750-
return false;
4750+
return X86EMUL_RETRY_INSTR;
47514751

47524752
/*
47534753
* Emulation is possible if the instruction is already decoded, e.g.
47544754
* when completing I/O after returning from userspace.
47554755
*/
47564756
if (emul_type & EMULTYPE_NO_DECODE)
4757-
return true;
4757+
return X86EMUL_CONTINUE;
47584758

47594759
/*
47604760
* Emulation is possible for SEV guests if and only if a prefilled
@@ -4780,9 +4780,11 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
47804780
* success (and in practice it will work the vast majority of the time).
47814781
*/
47824782
if (unlikely(!insn)) {
4783-
if (!(emul_type & EMULTYPE_SKIP))
4784-
kvm_queue_exception(vcpu, UD_VECTOR);
4785-
return false;
4783+
if (emul_type & EMULTYPE_SKIP)
4784+
return X86EMUL_RETRY_INSTR;
4785+
4786+
kvm_queue_exception(vcpu, UD_VECTOR);
4787+
return X86EMUL_PROPAGATE_FAULT;
47864788
}
47874789

47884790
/*
@@ -4793,7 +4795,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
47934795
* table used to translate CS:RIP resides in emulated MMIO.
47944796
*/
47954797
if (likely(insn_len))
4796-
return true;
4798+
return X86EMUL_CONTINUE;
47974799

47984800
/*
47994801
* Detect and workaround Errata 1096 Fam_17h_00_0Fh.
@@ -4851,6 +4853,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
48514853
kvm_inject_gp(vcpu, 0);
48524854
else
48534855
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4856+
return X86EMUL_PROPAGATE_FAULT;
48544857
}
48554858

48564859
resume_guest:
@@ -4868,7 +4871,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
48684871
* doesn't explicitly define "ignored", i.e. doing nothing and letting
48694872
* the guest spin is technically "ignoring" the access.
48704873
*/
4871-
return false;
4874+
return X86EMUL_RETRY_INSTR;
48724875
}
48734876

48744877
static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
@@ -5028,7 +5031,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
50285031
.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
50295032
.vm_move_enc_context_from = sev_vm_move_enc_context_from,
50305033

5031-
.can_emulate_instruction = svm_can_emulate_instruction,
5034+
.check_emulate_instruction = svm_check_emulate_instruction,
50325035

50335036
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
50345037

arch/x86/kvm/vmx/vmx.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1657,8 +1657,8 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
16571657
return 0;
16581658
}
16591659

1660-
static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1661-
void *insn, int insn_len)
1660+
static int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1661+
void *insn, int insn_len)
16621662
{
16631663
/*
16641664
* Emulation of instructions in SGX enclaves is impossible as RIP does
@@ -1669,9 +1669,9 @@ static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
16691669
*/
16701670
if (to_vmx(vcpu)->exit_reason.enclave_mode) {
16711671
kvm_queue_exception(vcpu, UD_VECTOR);
1672-
return false;
1672+
return X86EMUL_PROPAGATE_FAULT;
16731673
}
1674-
return true;
1674+
return X86EMUL_CONTINUE;
16751675
}
16761676

16771677
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
@@ -5792,7 +5792,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
57925792
{
57935793
gpa_t gpa;
57945794

5795-
if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5795+
if (vmx_check_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
57965796
return 1;
57975797

57985798
/*
@@ -8341,7 +8341,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
83418341
.enable_smi_window = vmx_enable_smi_window,
83428342
#endif
83438343

8344-
.can_emulate_instruction = vmx_can_emulate_instruction,
8344+
.check_emulate_instruction = vmx_check_emulate_instruction,
83458345
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
83468346
.migrate_timers = vmx_migrate_timers,
83478347

arch/x86/kvm/x86.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7474,11 +7474,11 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
74747474
}
74757475
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
74767476

7477-
static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
7478-
void *insn, int insn_len)
7477+
static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
7478+
void *insn, int insn_len)
74797479
{
7480-
return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type,
7481-
insn, insn_len);
7480+
return static_call(kvm_x86_check_emulate_instruction)(vcpu, emul_type,
7481+
insn, insn_len);
74827482
}
74837483

74847484
int handle_ud(struct kvm_vcpu *vcpu)
@@ -7488,8 +7488,10 @@ int handle_ud(struct kvm_vcpu *vcpu)
74887488
int emul_type = EMULTYPE_TRAP_UD;
74897489
char sig[5]; /* ud2; .ascii "kvm" */
74907490
struct x86_exception e;
7491+
int r;
74917492

7492-
if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
7493+
r = kvm_check_emulate_insn(vcpu, emul_type, NULL, 0);
7494+
if (r != X86EMUL_CONTINUE)
74937495
return 1;
74947496

74957497
if (fep_flags &&
@@ -8871,7 +8873,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
88718873
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
88728874
bool writeback = true;
88738875

8874-
if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len)))
8876+
r = kvm_check_emulate_insn(vcpu, emulation_type, insn, insn_len);
8877+
if (r != X86EMUL_CONTINUE)
88758878
return 1;
88768879

88778880
vcpu->arch.l1tf_flush_l1d = true;

0 commit comments

Comments
 (0)