Skip to content

Commit 70cdd23

Browse files
committed
KVM: x86: Reorganize code in x86.c to co-locate vCPU blocking/running helpers
Shuffle code around in x86.c so that the various helpers related to vCPU blocking/running logic are (a) located near each other and (b) ordered so that HLT emulation can use kvm_vcpu_has_events() in a future path. No functional change intended. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent f7f39c5 commit 70cdd23

File tree

1 file changed

+132
-132
lines changed

1 file changed

+132
-132
lines changed

arch/x86/kvm/x86.c

Lines changed: 132 additions & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -9917,51 +9917,6 @@ void kvm_x86_vendor_exit(void)
99179917
}
99189918
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
99199919

9920-
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
9921-
{
9922-
/*
9923-
* The vCPU has halted, e.g. executed HLT. Update the run state if the
9924-
* local APIC is in-kernel, the run loop will detect the non-runnable
9925-
* state and halt the vCPU. Exit to userspace if the local APIC is
9926-
* managed by userspace, in which case userspace is responsible for
9927-
* handling wake events.
9928-
*/
9929-
++vcpu->stat.halt_exits;
9930-
if (lapic_in_kernel(vcpu)) {
9931-
vcpu->arch.mp_state = state;
9932-
return 1;
9933-
} else {
9934-
vcpu->run->exit_reason = reason;
9935-
return 0;
9936-
}
9937-
}
9938-
9939-
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
9940-
{
9941-
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
9942-
}
9943-
EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
9944-
9945-
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
9946-
{
9947-
int ret = kvm_skip_emulated_instruction(vcpu);
9948-
/*
9949-
* TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
9950-
* KVM_EXIT_DEBUG here.
9951-
*/
9952-
return kvm_emulate_halt_noskip(vcpu) && ret;
9953-
}
9954-
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
9955-
9956-
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
9957-
{
9958-
int ret = kvm_skip_emulated_instruction(vcpu);
9959-
9960-
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
9961-
KVM_EXIT_AP_RESET_HOLD) && ret;
9962-
}
9963-
EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
9964-
99659920
#ifdef CONFIG_X86_64
99669921
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
99679922
unsigned long clock_type)
@@ -11214,6 +11169,67 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1121411169
return r;
1121511170
}
1121611171

11172+
static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
11173+
{
11174+
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
11175+
!vcpu->arch.apf.halted);
11176+
}
11177+
11178+
static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
11179+
{
11180+
if (!list_empty_careful(&vcpu->async_pf.done))
11181+
return true;
11182+
11183+
if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
11184+
kvm_apic_init_sipi_allowed(vcpu))
11185+
return true;
11186+
11187+
if (vcpu->arch.pv.pv_unhalted)
11188+
return true;
11189+
11190+
if (kvm_is_exception_pending(vcpu))
11191+
return true;
11192+
11193+
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11194+
(vcpu->arch.nmi_pending &&
11195+
kvm_x86_call(nmi_allowed)(vcpu, false)))
11196+
return true;
11197+
11198+
#ifdef CONFIG_KVM_SMM
11199+
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
11200+
(vcpu->arch.smi_pending &&
11201+
kvm_x86_call(smi_allowed)(vcpu, false)))
11202+
return true;
11203+
#endif
11204+
11205+
if (kvm_test_request(KVM_REQ_PMI, vcpu))
11206+
return true;
11207+
11208+
if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
11209+
return true;
11210+
11211+
if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
11212+
return true;
11213+
11214+
if (kvm_hv_has_stimer_pending(vcpu))
11215+
return true;
11216+
11217+
if (is_guest_mode(vcpu) &&
11218+
kvm_x86_ops.nested_ops->has_events &&
11219+
kvm_x86_ops.nested_ops->has_events(vcpu, false))
11220+
return true;
11221+
11222+
if (kvm_xen_has_pending_events(vcpu))
11223+
return true;
11224+
11225+
return false;
11226+
}
11227+
11228+
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
11229+
{
11230+
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
11231+
}
11232+
1121711233
/* Called within kvm->srcu read side. */
1121811234
static inline int vcpu_block(struct kvm_vcpu *vcpu)
1121911235
{
@@ -11285,12 +11301,6 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
1128511301
return 1;
1128611302
}
1128711303

11288-
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
11289-
{
11290-
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
11291-
!vcpu->arch.apf.halted);
11292-
}
11293-
1129411304
/* Called within kvm->srcu read side. */
1129511305
static int vcpu_run(struct kvm_vcpu *vcpu)
1129611306
{
@@ -11342,6 +11352,77 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
1134211352
return r;
1134311353
}
1134411354

11355+
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
11356+
{
11357+
/*
11358+
* The vCPU has halted, e.g. executed HLT. Update the run state if the
11359+
* local APIC is in-kernel, the run loop will detect the non-runnable
11360+
* state and halt the vCPU. Exit to userspace if the local APIC is
11361+
* managed by userspace, in which case userspace is responsible for
11362+
* handling wake events.
11363+
*/
11364+
++vcpu->stat.halt_exits;
11365+
if (lapic_in_kernel(vcpu)) {
11366+
vcpu->arch.mp_state = state;
11367+
return 1;
11368+
} else {
11369+
vcpu->run->exit_reason = reason;
11370+
return 0;
11371+
}
11372+
}
11373+
11374+
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
11375+
{
11376+
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
11377+
}
11378+
EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
11379+
11380+
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
11381+
{
11382+
int ret = kvm_skip_emulated_instruction(vcpu);
11383+
/*
11384+
* TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
11385+
* KVM_EXIT_DEBUG here.
11386+
*/
11387+
return kvm_emulate_halt_noskip(vcpu) && ret;
11388+
}
11389+
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
11390+
11391+
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
11392+
{
11393+
int ret = kvm_skip_emulated_instruction(vcpu);
11394+
11395+
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
11396+
KVM_EXIT_AP_RESET_HOLD) && ret;
11397+
}
11398+
EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
11399+
11400+
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
11401+
{
11402+
return kvm_vcpu_apicv_active(vcpu) &&
11403+
kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
11404+
}
11405+
11406+
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
11407+
{
11408+
return vcpu->arch.preempted_in_kernel;
11409+
}
11410+
11411+
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
11412+
{
11413+
if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
11414+
return true;
11415+
11416+
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
11417+
#ifdef CONFIG_KVM_SMM
11418+
kvm_test_request(KVM_REQ_SMI, vcpu) ||
11419+
#endif
11420+
kvm_test_request(KVM_REQ_EVENT, vcpu))
11421+
return true;
11422+
11423+
return kvm_arch_dy_has_pending_interrupt(vcpu);
11424+
}
11425+
1134511426
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
1134611427
{
1134711428
return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
@@ -13156,87 +13237,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
1315613237
kvm_arch_free_memslot(kvm, old);
1315713238
}
1315813239

13159-
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
13160-
{
13161-
if (!list_empty_careful(&vcpu->async_pf.done))
13162-
return true;
13163-
13164-
if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
13165-
kvm_apic_init_sipi_allowed(vcpu))
13166-
return true;
13167-
13168-
if (vcpu->arch.pv.pv_unhalted)
13169-
return true;
13170-
13171-
if (kvm_is_exception_pending(vcpu))
13172-
return true;
13173-
13174-
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
13175-
(vcpu->arch.nmi_pending &&
13176-
kvm_x86_call(nmi_allowed)(vcpu, false)))
13177-
return true;
13178-
13179-
#ifdef CONFIG_KVM_SMM
13180-
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
13181-
(vcpu->arch.smi_pending &&
13182-
kvm_x86_call(smi_allowed)(vcpu, false)))
13183-
return true;
13184-
#endif
13185-
13186-
if (kvm_test_request(KVM_REQ_PMI, vcpu))
13187-
return true;
13188-
13189-
if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
13190-
return true;
13191-
13192-
if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
13193-
return true;
13194-
13195-
if (kvm_hv_has_stimer_pending(vcpu))
13196-
return true;
13197-
13198-
if (is_guest_mode(vcpu) &&
13199-
kvm_x86_ops.nested_ops->has_events &&
13200-
kvm_x86_ops.nested_ops->has_events(vcpu, false))
13201-
return true;
13202-
13203-
if (kvm_xen_has_pending_events(vcpu))
13204-
return true;
13205-
13206-
return false;
13207-
}
13208-
13209-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
13210-
{
13211-
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
13212-
}
13213-
13214-
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
13215-
{
13216-
return kvm_vcpu_apicv_active(vcpu) &&
13217-
kvm_x86_call(dy_apicv_has_pending_interrupt)(vcpu);
13218-
}
13219-
13220-
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
13221-
{
13222-
return vcpu->arch.preempted_in_kernel;
13223-
}
13224-
13225-
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
13226-
{
13227-
if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
13228-
return true;
13229-
13230-
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
13231-
#ifdef CONFIG_KVM_SMM
13232-
kvm_test_request(KVM_REQ_SMI, vcpu) ||
13233-
#endif
13234-
kvm_test_request(KVM_REQ_EVENT, vcpu))
13235-
return true;
13236-
13237-
return kvm_arch_dy_has_pending_interrupt(vcpu);
13238-
}
13239-
1324013240
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
1324113241
{
1324213242
if (vcpu->arch.guest_state_protected)

0 commit comments

Comments
 (0)