6
6
7
7
#include <linux/bug.h>
8
8
#include <linux/cpu_pm.h>
9
+ #include <linux/entry-kvm.h>
9
10
#include <linux/errno.h>
10
11
#include <linux/err.h>
11
12
#include <linux/kvm_host.h>
@@ -715,6 +716,45 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
715
716
static_branch_unlikely (& arm64_mismatched_32bit_el0 );
716
717
}
717
718
719
+ /**
720
+ * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
721
+ * @vcpu: The VCPU pointer
722
+ * @ret: Pointer to write optional return code
723
+ *
724
+ * Returns: true if the VCPU needs to return to a preemptible + interruptible
725
+ * and skip guest entry.
726
+ *
727
+ * This function disambiguates between two different types of exits: exits to a
728
+ * preemptible + interruptible kernel context and exits to userspace. For an
729
+ * exit to userspace, this function will write the return code to ret and return
730
+ * true. For an exit to preemptible + interruptible kernel context (i.e. check
731
+ * for pending work and re-enter), return true without writing to ret.
732
+ */
733
+ static bool kvm_vcpu_exit_request (struct kvm_vcpu * vcpu , int * ret )
734
+ {
735
+ struct kvm_run * run = vcpu -> run ;
736
+
737
+ /*
738
+ * If we're using a userspace irqchip, then check if we need
739
+ * to tell a userspace irqchip about timer or PMU level
740
+ * changes and if so, exit to userspace (the actual level
741
+ * state gets updated in kvm_timer_update_run and
742
+ * kvm_pmu_update_run below).
743
+ */
744
+ if (static_branch_unlikely (& userspace_irqchip_in_use )) {
745
+ if (kvm_timer_should_notify_user (vcpu ) ||
746
+ kvm_pmu_should_notify_user (vcpu )) {
747
+ * ret = - EINTR ;
748
+ run -> exit_reason = KVM_EXIT_INTR ;
749
+ return true;
750
+ }
751
+ }
752
+
753
+ return kvm_request_pending (vcpu ) ||
754
+ need_new_vmid_gen (& vcpu -> arch .hw_mmu -> vmid ) ||
755
+ xfer_to_guest_mode_work_pending ();
756
+ }
757
+
718
758
/**
719
759
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
720
760
* @vcpu: The VCPU pointer
@@ -758,7 +798,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
758
798
/*
759
799
* Check conditions before entering the guest
760
800
*/
761
- cond_resched ();
801
+ ret = xfer_to_guest_mode_handle_work (vcpu );
802
+ if (!ret )
803
+ ret = 1 ;
762
804
763
805
update_vmid (& vcpu -> arch .hw_mmu -> vmid );
764
806
@@ -777,30 +819,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
777
819
778
820
kvm_vgic_flush_hwstate (vcpu );
779
821
780
- /*
781
- * Exit if we have a signal pending so that we can deliver the
782
- * signal to user space.
783
- */
784
- if (signal_pending (current )) {
785
- ret = - EINTR ;
786
- run -> exit_reason = KVM_EXIT_INTR ;
787
- }
788
-
789
- /*
790
- * If we're using a userspace irqchip, then check if we need
791
- * to tell a userspace irqchip about timer or PMU level
792
- * changes and if so, exit to userspace (the actual level
793
- * state gets updated in kvm_timer_update_run and
794
- * kvm_pmu_update_run below).
795
- */
796
- if (static_branch_unlikely (& userspace_irqchip_in_use )) {
797
- if (kvm_timer_should_notify_user (vcpu ) ||
798
- kvm_pmu_should_notify_user (vcpu )) {
799
- ret = - EINTR ;
800
- run -> exit_reason = KVM_EXIT_INTR ;
801
- }
802
- }
803
-
804
822
/*
805
823
* Ensure we set mode to IN_GUEST_MODE after we disable
806
824
* interrupts and before the final VCPU requests check.
@@ -809,8 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
809
827
*/
810
828
smp_store_mb (vcpu -> mode , IN_GUEST_MODE );
811
829
812
- if (ret <= 0 || need_new_vmid_gen (& vcpu -> arch .hw_mmu -> vmid ) ||
813
- kvm_request_pending (vcpu )) {
830
+ if (ret <= 0 || kvm_vcpu_exit_request (vcpu , & ret )) {
814
831
vcpu -> mode = OUTSIDE_GUEST_MODE ;
815
832
isb (); /* Ensure work in x_flush_hwstate is committed */
816
833
kvm_pmu_sync_hwstate (vcpu );
0 commit comments