Skip to content

Commit e525523

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/vcpu-first-run into kvmarm-master/next
* kvm-arm64/vcpu-first-run: : Rework the "vcpu first run" sequence to be driven by KVM's : "PID change" callback, removing the need for extra state. KVM: arm64: Drop vcpu->arch.has_run_once for vcpu->pid KVM: arm64: Merge kvm_arch_vcpu_run_pid_change() and kvm_vcpu_first_run_init() KVM: arm64: Restructure the point where has_run_once is advertised KVM: arm64: Move kvm_arch_vcpu_run_pid_change() out of line KVM: arm64: Move SVE state mapping at HYP to finalize-time Signed-off-by: Marc Zyngier <[email protected]>
2 parents d58071a + cc5705f commit e525523

File tree

5 files changed

+46
-47
lines changed

5 files changed

+46
-47
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -367,9 +367,6 @@ struct kvm_vcpu_arch {
367367
int target;
368368
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
369369

370-
/* Detect first run of a vcpu */
371-
bool has_run_once;
372-
373370
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
374371
u64 vsesr_el2;
375372

@@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
606603
void kvm_arm_halt_guest(struct kvm *kvm);
607604
void kvm_arm_resume_guest(struct kvm *kvm);
608605

606+
#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
607+
609608
#ifndef __KVM_NVHE_HYPERVISOR__
610609
#define kvm_call_hyp_nvhe(f, ...) \
611610
({ \
@@ -749,12 +748,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
749748
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
750749
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
751750

752-
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
753-
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
754-
{
755-
return kvm_arch_vcpu_run_map_fp(vcpu);
756-
}
757-
751+
#ifdef CONFIG_KVM
758752
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
759753
void kvm_clr_pmu_events(u32 clr);
760754

arch/arm64/kvm/arm.c

Lines changed: 32 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
351351

352352
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
353353
{
354-
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
354+
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
355355
static_branch_dec(&userspace_irqchip_in_use);
356356

357357
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
@@ -584,18 +584,33 @@ static void update_vmid(struct kvm_vmid *vmid)
584584
spin_unlock(&kvm_vmid_lock);
585585
}
586586

587-
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
587+
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
588+
{
589+
return vcpu->arch.target >= 0;
590+
}
591+
592+
/*
593+
* Handle both the initialisation that is being done when the vcpu is
594+
* run for the first time, as well as the updates that must be
595+
* performed each time we get a new thread dealing with this vcpu.
596+
*/
597+
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
588598
{
589599
struct kvm *kvm = vcpu->kvm;
590-
int ret = 0;
600+
int ret;
591601

592-
if (likely(vcpu->arch.has_run_once))
593-
return 0;
602+
if (!kvm_vcpu_initialized(vcpu))
603+
return -ENOEXEC;
594604

595605
if (!kvm_arm_vcpu_is_finalized(vcpu))
596606
return -EPERM;
597607

598-
vcpu->arch.has_run_once = true;
608+
ret = kvm_arch_vcpu_run_map_fp(vcpu);
609+
if (ret)
610+
return ret;
611+
612+
if (likely(vcpu_has_run_once(vcpu)))
613+
return 0;
599614

600615
kvm_arm_vcpu_init_debug(vcpu);
601616

@@ -607,19 +622,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
607622
ret = kvm_vgic_map_resources(kvm);
608623
if (ret)
609624
return ret;
610-
} else {
611-
/*
612-
* Tell the rest of the code that there are userspace irqchip
613-
* VMs in the wild.
614-
*/
615-
static_branch_inc(&userspace_irqchip_in_use);
616625
}
617626

618627
ret = kvm_timer_enable(vcpu);
619628
if (ret)
620629
return ret;
621630

622631
ret = kvm_arm_pmu_v3_enable(vcpu);
632+
if (ret)
633+
return ret;
634+
635+
if (!irqchip_in_kernel(kvm)) {
636+
/*
637+
* Tell the rest of the code that there are userspace irqchip
638+
* VMs in the wild.
639+
*/
640+
static_branch_inc(&userspace_irqchip_in_use);
641+
}
623642

624643
/*
625644
* Initialize traps for protected VMs.
@@ -679,11 +698,6 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
679698
smp_rmb();
680699
}
681700

682-
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
683-
{
684-
return vcpu->arch.target >= 0;
685-
}
686-
687701
static void check_vcpu_requests(struct kvm_vcpu *vcpu)
688702
{
689703
if (kvm_request_pending(vcpu)) {
@@ -779,13 +793,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
779793
struct kvm_run *run = vcpu->run;
780794
int ret;
781795

782-
if (unlikely(!kvm_vcpu_initialized(vcpu)))
783-
return -ENOEXEC;
784-
785-
ret = kvm_vcpu_first_run_init(vcpu);
786-
if (ret)
787-
return ret;
788-
789796
if (run->exit_reason == KVM_EXIT_MMIO) {
790797
ret = kvm_handle_mmio_return(vcpu);
791798
if (ret)
@@ -1123,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
11231130
* need to invalidate the I-cache though, as FWB does *not*
11241131
* imply CTR_EL0.DIC.
11251132
*/
1126-
if (vcpu->arch.has_run_once) {
1133+
if (vcpu_has_run_once(vcpu)) {
11271134
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
11281135
stage2_unmap_vm(vcpu->kvm);
11291136
else

arch/arm64/kvm/fpsimd.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -43,17 +43,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
4343
if (ret)
4444
goto error;
4545

46-
if (vcpu->arch.sve_state) {
47-
void *sve_end;
48-
49-
sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu);
50-
51-
ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end,
52-
PAGE_HYP);
53-
if (ret)
54-
goto error;
55-
}
56-
5746
vcpu->arch.host_thread_info = kern_hyp_va(ti);
5847
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
5948
error:

arch/arm64/kvm/reset.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
9494
{
9595
void *buf;
9696
unsigned int vl;
97+
size_t reg_sz;
98+
int ret;
9799

98100
vl = vcpu->arch.sve_max_vl;
99101

@@ -106,10 +108,17 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
106108
vl > SVE_VL_ARCH_MAX))
107109
return -EIO;
108110

109-
buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT);
111+
reg_sz = vcpu_sve_state_size(vcpu);
112+
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
110113
if (!buf)
111114
return -ENOMEM;
112115

116+
ret = create_hyp_mappings(buf, buf + reg_sz, PAGE_HYP);
117+
if (ret) {
118+
kfree(buf);
119+
return ret;
120+
}
121+
113122
vcpu->arch.sve_state = buf;
114123
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
115124
return 0;

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
9191
return ret;
9292

9393
kvm_for_each_vcpu(i, vcpu, kvm) {
94-
if (vcpu->arch.has_run_once)
94+
if (vcpu_has_run_once(vcpu))
9595
goto out_unlock;
9696
}
9797
ret = 0;

0 commit comments

Comments
 (0)