Skip to content

Commit be66e67

Browse files
willdeaconMarc Zyngier
authored andcommitted
KVM: arm64: Use the pKVM hyp vCPU structure in handle___kvm_vcpu_run()
As a stepping stone towards deprivileging the host's access to the guest's vCPU structures, introduce some naive flush/sync routines to copy most of the host vCPU into the hyp vCPU on vCPU run and back again on return to EL1. This allows us to run using the pKVM hyp structures when KVM is initialised in protected mode. Tested-by: Vincent Donnefort <[email protected]> Co-developed-by: Fuad Tabba <[email protected]> Signed-off-by: Fuad Tabba <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 169cd0f commit be66e67

File tree

3 files changed

+109
-2
lines changed

3 files changed

+109
-2
lines changed

arch/arm64/kvm/hyp/include/nvhe/pkvm.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,4 +61,8 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
6161
unsigned long vcpu_hva);
6262
int __pkvm_teardown_vm(pkvm_handle_t handle);
6363

64+
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
65+
unsigned int vcpu_idx);
66+
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
67+
6468
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 77 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,86 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
2222

2323
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
2424

25+
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
26+
{
27+
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
28+
29+
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
30+
31+
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
32+
hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
33+
34+
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
35+
36+
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
37+
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
38+
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
39+
40+
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
41+
hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
42+
43+
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
44+
hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
45+
46+
hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
47+
48+
hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
49+
}
50+
51+
static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
52+
{
53+
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
54+
struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
55+
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
56+
unsigned int i;
57+
58+
host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
59+
60+
host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
61+
host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
62+
63+
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
64+
65+
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
66+
host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
67+
68+
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
69+
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
70+
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
71+
}
72+
2573
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
2674
{
27-
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
75+
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
76+
int ret;
77+
78+
host_vcpu = kern_hyp_va(host_vcpu);
79+
80+
if (unlikely(is_protected_kvm_enabled())) {
81+
struct pkvm_hyp_vcpu *hyp_vcpu;
82+
struct kvm *host_kvm;
83+
84+
host_kvm = kern_hyp_va(host_vcpu->kvm);
85+
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
86+
host_vcpu->vcpu_idx);
87+
if (!hyp_vcpu) {
88+
ret = -EINVAL;
89+
goto out;
90+
}
91+
92+
flush_hyp_vcpu(hyp_vcpu);
93+
94+
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
95+
96+
sync_hyp_vcpu(hyp_vcpu);
97+
pkvm_put_hyp_vcpu(hyp_vcpu);
98+
} else {
99+
/* The host is fully trusted, run its vCPU directly. */
100+
ret = __kvm_vcpu_run(host_vcpu);
101+
}
28102

29-
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
103+
out:
104+
cpu_reg(host_ctxt, 1) = ret;
30105
}
31106

32107
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,33 @@ static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
241241
return vm_table[idx];
242242
}
243243

244+
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
245+
unsigned int vcpu_idx)
246+
{
247+
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
248+
struct pkvm_hyp_vm *hyp_vm;
249+
250+
hyp_spin_lock(&vm_table_lock);
251+
hyp_vm = get_vm_by_handle(handle);
252+
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
253+
goto unlock;
254+
255+
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
256+
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
257+
unlock:
258+
hyp_spin_unlock(&vm_table_lock);
259+
return hyp_vcpu;
260+
}
261+
262+
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
263+
{
264+
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
265+
266+
hyp_spin_lock(&vm_table_lock);
267+
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
268+
hyp_spin_unlock(&vm_table_lock);
269+
}
270+
244271
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
245272
{
246273
if (host_vcpu)
@@ -286,6 +313,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
286313
hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
287314

288315
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
316+
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
289317
done:
290318
if (ret)
291319
unpin_host_vcpu(host_vcpu);

0 commit comments

Comments
 (0)