Skip to content

Commit b3f15ec

Browse files
mrutland-armMarc Zyngier
authored andcommitted
kvm: arm/arm64: Fold VHE entry/exit work into kvm_vcpu_run_vhe()
With VHE, running a vCPU always requires the sequence: 1. kvm_arm_vhe_guest_enter(); 2. kvm_vcpu_run_vhe(); 3. kvm_arm_vhe_guest_exit() ... and as we invoke this from the shared arm/arm64 KVM code, 32-bit arm has to provide stubs for all three functions. To simplify the common code, and make it easier to make further modifications to the arm64-specific portions in the near future, let's fold kvm_arm_vhe_guest_enter() and kvm_arm_vhe_guest_exit() into kvm_vcpu_run_vhe(). The 32-bit stubs for kvm_arm_vhe_guest_enter() and kvm_arm_vhe_guest_exit() are removed, as they are no longer used. The 32-bit stub for kvm_vcpu_run_vhe() is left as-is. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 51b2569 commit b3f15ec

File tree

4 files changed

+37
-39
lines changed

4 files changed

+37
-39
lines changed

arch/arm/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -394,9 +394,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
394394
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
395395
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
396396

397-
static inline void kvm_arm_vhe_guest_enter(void) {}
398-
static inline void kvm_arm_vhe_guest_exit(void) {}
399-
400397
#define KVM_BP_HARDEN_UNKNOWN -1
401398
#define KVM_BP_HARDEN_WA_NEEDED 0
402399
#define KVM_BP_HARDEN_NOT_REQUIRED 1

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -628,38 +628,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
628628
static inline void kvm_clr_pmu_events(u32 clr) {}
629629
#endif
630630

631-
static inline void kvm_arm_vhe_guest_enter(void)
632-
{
633-
local_daif_mask();
634-
635-
/*
636-
* Having IRQs masked via PMR when entering the guest means the GIC
637-
* will not signal the CPU of interrupts of lower priority, and the
638-
* only way to get out will be via guest exceptions.
639-
* Naturally, we want to avoid this.
640-
*
641-
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
642-
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
643-
*/
644-
pmr_sync();
645-
}
646-
647-
static inline void kvm_arm_vhe_guest_exit(void)
648-
{
649-
/*
650-
* local_daif_restore() takes care to properly restore PSTATE.DAIF
651-
* and the GIC PMR if the host is using IRQ priorities.
652-
*/
653-
local_daif_restore(DAIF_PROCCTX_NOIRQ);
654-
655-
/*
656-
* When we exit from the guest we change a number of CPU configuration
657-
* parameters, such as traps. Make sure these changes take effect
658-
* before running the host or additional guests.
659-
*/
660-
isb();
661-
}
662-
663631
#define KVM_BP_HARDEN_UNKNOWN -1
664632
#define KVM_BP_HARDEN_WA_NEEDED 0
665633
#define KVM_BP_HARDEN_NOT_REQUIRED 1

arch/arm64/kvm/hyp/switch.c

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
617617
}
618618

619619
/* Switch to the guest for VHE systems running in EL2 */
620-
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
620+
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
621621
{
622622
struct kvm_cpu_context *host_ctxt;
623623
struct kvm_cpu_context *guest_ctxt;
@@ -670,7 +670,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
670670

671671
return exit_code;
672672
}
673-
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
673+
NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
674+
675+
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
676+
{
677+
int ret;
678+
679+
local_daif_mask();
680+
681+
/*
682+
* Having IRQs masked via PMR when entering the guest means the GIC
683+
* will not signal the CPU of interrupts of lower priority, and the
684+
* only way to get out will be via guest exceptions.
685+
* Naturally, we want to avoid this.
686+
*
687+
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
688+
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
689+
*/
690+
pmr_sync();
691+
692+
ret = __kvm_vcpu_run_vhe(vcpu);
693+
694+
/*
695+
* local_daif_restore() takes care to properly restore PSTATE.DAIF
696+
* and the GIC PMR if the host is using IRQ priorities.
697+
*/
698+
local_daif_restore(DAIF_PROCCTX_NOIRQ);
699+
700+
/*
701+
* When we exit from the guest we change a number of CPU configuration
702+
* parameters, such as traps. Make sure these changes take effect
703+
* before running the host or additional guests.
704+
*/
705+
isb();
706+
707+
return ret;
708+
}
674709

675710
/* Switch to the guest for legacy non-VHE systems */
676711
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)

virt/kvm/arm/arm.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -797,9 +797,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
797797
guest_enter_irqoff();
798798

799799
if (has_vhe()) {
800-
kvm_arm_vhe_guest_enter();
801800
ret = kvm_vcpu_run_vhe(vcpu);
802-
kvm_arm_vhe_guest_exit();
803801
} else {
804802
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
805803
}

0 commit comments

Comments
 (0)