Skip to content

Commit 6caa581

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: Use generic KVM xfer to guest work function
Clean up handling of checks for pending work by switching to the generic infrastructure to do so. We pick up handling for TIF_NOTIFY_RESUME from this switch, meaning that task work will be correctly handled. Signed-off-by: Oliver Upton <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent e1c6b9e commit 6caa581

File tree

2 files changed

+45
-28
lines changed

2 files changed

+45
-28
lines changed

arch/arm64/kvm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ menuconfig KVM
2626
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
2727
select KVM_MMIO
2828
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
29+
select KVM_XFER_TO_GUEST_WORK
2930
select SRCU
3031
select KVM_VFIO
3132
select HAVE_KVM_EVENTFD

arch/arm64/kvm/arm.c

Lines changed: 44 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
#include <linux/bug.h>
88
#include <linux/cpu_pm.h>
9+
#include <linux/entry-kvm.h>
910
#include <linux/errno.h>
1011
#include <linux/err.h>
1112
#include <linux/kvm_host.h>
@@ -714,6 +715,45 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
714715
static_branch_unlikely(&arm64_mismatched_32bit_el0);
715716
}
716717

718+
/**
719+
* kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
720+
* @vcpu: The VCPU pointer
721+
* @ret: Pointer to write optional return code
722+
*
723+
* Returns: true if the VCPU needs to return to a preemptible + interruptible
724+
* and skip guest entry.
725+
*
726+
* This function disambiguates between two different types of exits: exits to a
727+
* preemptible + interruptible kernel context and exits to userspace. For an
728+
* exit to userspace, this function will write the return code to ret and return
729+
* true. For an exit to preemptible + interruptible kernel context (i.e. check
730+
* for pending work and re-enter), return true without writing to ret.
731+
*/
732+
static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
733+
{
734+
struct kvm_run *run = vcpu->run;
735+
736+
/*
737+
* If we're using a userspace irqchip, then check if we need
738+
* to tell a userspace irqchip about timer or PMU level
739+
* changes and if so, exit to userspace (the actual level
740+
* state gets updated in kvm_timer_update_run and
741+
* kvm_pmu_update_run below).
742+
*/
743+
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
744+
if (kvm_timer_should_notify_user(vcpu) ||
745+
kvm_pmu_should_notify_user(vcpu)) {
746+
*ret = -EINTR;
747+
run->exit_reason = KVM_EXIT_INTR;
748+
return true;
749+
}
750+
}
751+
752+
return kvm_request_pending(vcpu) ||
753+
need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
754+
xfer_to_guest_mode_work_pending();
755+
}
756+
717757
/**
718758
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
719759
* @vcpu: The VCPU pointer
@@ -757,7 +797,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
757797
/*
758798
* Check conditions before entering the guest
759799
*/
760-
cond_resched();
800+
ret = xfer_to_guest_mode_handle_work(vcpu);
801+
if (!ret)
802+
ret = 1;
761803

762804
update_vmid(&vcpu->arch.hw_mmu->vmid);
763805

@@ -776,31 +818,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
776818

777819
kvm_vgic_flush_hwstate(vcpu);
778820

779-
/*
780-
* Exit if we have a signal pending so that we can deliver the
781-
* signal to user space.
782-
*/
783-
if (signal_pending(current)) {
784-
ret = -EINTR;
785-
run->exit_reason = KVM_EXIT_INTR;
786-
++vcpu->stat.signal_exits;
787-
}
788-
789-
/*
790-
* If we're using a userspace irqchip, then check if we need
791-
* to tell a userspace irqchip about timer or PMU level
792-
* changes and if so, exit to userspace (the actual level
793-
* state gets updated in kvm_timer_update_run and
794-
* kvm_pmu_update_run below).
795-
*/
796-
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
797-
if (kvm_timer_should_notify_user(vcpu) ||
798-
kvm_pmu_should_notify_user(vcpu)) {
799-
ret = -EINTR;
800-
run->exit_reason = KVM_EXIT_INTR;
801-
}
802-
}
803-
804821
/*
805822
* Ensure we set mode to IN_GUEST_MODE after we disable
806823
* interrupts and before the final VCPU requests check.
@@ -809,8 +826,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
809826
*/
810827
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
811828

812-
if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
813-
kvm_request_pending(vcpu)) {
829+
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
814830
vcpu->mode = OUTSIDE_GUEST_MODE;
815831
isb(); /* Ensure work in x_flush_hwstate is committed */
816832
kvm_pmu_sync_hwstate(vcpu);

0 commit comments

Comments
 (0)