Skip to content

Commit f7d03fc

Browse files
author
Marc Zyngier
committed
KVM: arm64: Introduce __pkvm_vcpu_{load,put}()
Rather than look-up the hyp vCPU on every run hypercall at EL2, introduce a per-CPU 'loaded_hyp_vcpu' tracking variable which is updated by a pair of load/put hypercalls called directly from kvm_arch_vcpu_{load,put}() when pKVM is enabled. Tested-by: Fuad Tabba <[email protected]> Reviewed-by: Fuad Tabba <[email protected]> Signed-off-by: Quentin Perret <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 99996d5 commit f7d03fc

File tree

6 files changed

+93
-12
lines changed

6 files changed

+93
-12
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,8 @@ enum __kvm_host_smccc_func {
7979
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
8080
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
8181
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
82+
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
83+
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
8284
};
8385

8486
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]

arch/arm64/kvm/arm.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,12 +619,26 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
619619

620620
kvm_arch_vcpu_load_debug_state_flags(vcpu);
621621

622+
if (is_protected_kvm_enabled()) {
623+
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
624+
vcpu->kvm->arch.pkvm.handle,
625+
vcpu->vcpu_idx, vcpu->arch.hcr_el2);
626+
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
627+
&vcpu->arch.vgic_cpu.vgic_v3);
628+
}
629+
622630
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
623631
vcpu_set_on_unsupported_cpu(vcpu);
624632
}
625633

626634
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
627635
{
636+
if (is_protected_kvm_enabled()) {
637+
kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
638+
&vcpu->arch.vgic_cpu.vgic_v3);
639+
kvm_call_hyp_nvhe(__pkvm_vcpu_put);
640+
}
641+
628642
kvm_arch_vcpu_put_debug_state_flags(vcpu);
629643
kvm_arch_vcpu_put_fp(vcpu);
630644
if (has_vhe())

arch/arm64/kvm/hyp/include/nvhe/pkvm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,12 @@ struct pkvm_hyp_vcpu {
2020

2121
/* Backpointer to the host's (untrusted) vCPU instance. */
2222
struct kvm_vcpu *host_vcpu;
23+
24+
/*
25+
* If this hyp vCPU is loaded, then this is a backpointer to the
26+
* per-cpu pointer tracking us. Otherwise, NULL if not loaded.
27+
*/
28+
struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
2329
};
2430

2531
/*
@@ -69,6 +75,7 @@ int __pkvm_teardown_vm(pkvm_handle_t handle);
6975
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
7076
unsigned int vcpu_idx);
7177
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
78+
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
7279

7380
struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
7481
void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 37 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -141,16 +141,46 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
141141
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
142142
}
143143

144+
static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
145+
{
146+
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
147+
DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
148+
DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
149+
struct pkvm_hyp_vcpu *hyp_vcpu;
150+
151+
if (!is_protected_kvm_enabled())
152+
return;
153+
154+
hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
155+
if (!hyp_vcpu)
156+
return;
157+
158+
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
159+
/* Propagate WFx trapping flags */
160+
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
161+
hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
162+
}
163+
}
164+
165+
static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
166+
{
167+
struct pkvm_hyp_vcpu *hyp_vcpu;
168+
169+
if (!is_protected_kvm_enabled())
170+
return;
171+
172+
hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
173+
if (hyp_vcpu)
174+
pkvm_put_hyp_vcpu(hyp_vcpu);
175+
}
176+
144177
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
145178
{
146179
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
147180
int ret;
148181

149-
host_vcpu = kern_hyp_va(host_vcpu);
150-
151182
if (unlikely(is_protected_kvm_enabled())) {
152-
struct pkvm_hyp_vcpu *hyp_vcpu;
153-
struct kvm *host_kvm;
183+
struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
154184

155185
/*
156186
* KVM (and pKVM) doesn't support SME guests for now, and
@@ -163,9 +193,6 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
163193
goto out;
164194
}
165195

166-
host_kvm = kern_hyp_va(host_vcpu->kvm);
167-
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
168-
host_vcpu->vcpu_idx);
169196
if (!hyp_vcpu) {
170197
ret = -EINVAL;
171198
goto out;
@@ -176,12 +203,10 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
176203
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
177204

178205
sync_hyp_vcpu(hyp_vcpu);
179-
pkvm_put_hyp_vcpu(hyp_vcpu);
180206
} else {
181207
/* The host is fully trusted, run its vCPU directly. */
182-
ret = __kvm_vcpu_run(host_vcpu);
208+
ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
183209
}
184-
185210
out:
186211
cpu_reg(host_ctxt, 1) = ret;
187212
}
@@ -409,6 +434,8 @@ static const hcall_t host_hcall[] = {
409434
HANDLE_FUNC(__pkvm_init_vm),
410435
HANDLE_FUNC(__pkvm_init_vcpu),
411436
HANDLE_FUNC(__pkvm_teardown_vm),
437+
HANDLE_FUNC(__pkvm_vcpu_load),
438+
HANDLE_FUNC(__pkvm_vcpu_put),
412439
};
413440

414441
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,12 @@ unsigned int kvm_arm_vmid_bits;
2323

2424
unsigned int kvm_host_sve_max_vl;
2525

26+
/*
27+
* The currently loaded hyp vCPU for each physical CPU. Used only when
28+
* protected KVM is enabled, but for both protected and non-protected VMs.
29+
*/
30+
static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
31+
2632
/*
2733
* Set trap register values based on features in ID_AA64PFR0.
2834
*/
@@ -306,15 +312,30 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
306312
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
307313
struct pkvm_hyp_vm *hyp_vm;
308314

315+
/* Cannot load a new vcpu without putting the old one first. */
316+
if (__this_cpu_read(loaded_hyp_vcpu))
317+
return NULL;
318+
309319
hyp_spin_lock(&vm_table_lock);
310320
hyp_vm = get_vm_by_handle(handle);
311321
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
312322
goto unlock;
313323

314324
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
325+
326+
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
327+
if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
328+
hyp_vcpu = NULL;
329+
goto unlock;
330+
}
331+
332+
hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
315333
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
316334
unlock:
317335
hyp_spin_unlock(&vm_table_lock);
336+
337+
if (hyp_vcpu)
338+
__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
318339
return hyp_vcpu;
319340
}
320341

@@ -323,10 +344,18 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
323344
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
324345

325346
hyp_spin_lock(&vm_table_lock);
347+
hyp_vcpu->loaded_hyp_vcpu = NULL;
348+
__this_cpu_write(loaded_hyp_vcpu, NULL);
326349
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
327350
hyp_spin_unlock(&vm_table_lock);
328351
}
329352

353+
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
354+
{
355+
return __this_cpu_read(loaded_hyp_vcpu);
356+
357+
}
358+
330359
struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
331360
{
332361
struct pkvm_hyp_vm *hyp_vm;

arch/arm64/kvm/vgic/vgic-v3.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -734,7 +734,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
734734
{
735735
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
736736

737-
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
737+
if (likely(!is_protected_kvm_enabled()))
738+
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
738739

739740
if (has_vhe())
740741
__vgic_v3_activate_traps(cpu_if);
@@ -746,7 +747,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
746747
{
747748
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
748749

749-
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
750+
if (likely(!is_protected_kvm_enabled()))
751+
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
750752
WARN_ON(vgic_v4_put(vcpu));
751753

752754
if (has_vhe())

0 commit comments

Comments
 (0)