Skip to content

Commit 52b2865

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: pkvm: Unshare guest structs during teardown
Make use of the newly introduced unshare hypercall during guest teardown to unmap guest-related data structures from the hyp stage-1. Signed-off-by: Quentin Perret <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent b8cc6eb commit 52b2865

File tree

6 files changed

+85
-4
lines changed

6 files changed

+85
-4
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,7 @@ struct kvm_vcpu_arch {
321321
struct kvm_guest_debug_arch external_debug_state;
322322

323323
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
324+
struct task_struct *parent_task;
324325

325326
struct {
326327
/* {Break,watch}point registers */
@@ -737,6 +738,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
737738
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
738739
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
739740
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
741+
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
740742

741743
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
742744
{

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
151151
#include <asm/stage2_pgtable.h>
152152

153153
int kvm_share_hyp(void *from, void *to);
154+
void kvm_unshare_hyp(void *from, void *to);
154155
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
155156
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
156157
void __iomem **kaddr,

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
188188
}
189189
}
190190
atomic_set(&kvm->online_vcpus, 0);
191+
192+
kvm_unshare_hyp(kvm, kvm + 1);
191193
}
192194

193195
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)

arch/arm64/kvm/fpsimd.c

Lines changed: 31 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,19 @@
1414
#include <asm/kvm_mmu.h>
1515
#include <asm/sysreg.h>
1616

17+
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
18+
{
19+
struct task_struct *p = vcpu->arch.parent_task;
20+
struct user_fpsimd_state *fpsimd;
21+
22+
if (!is_protected_kvm_enabled() || !p)
23+
return;
24+
25+
fpsimd = &p->thread.uw.fpsimd_state;
26+
kvm_unshare_hyp(fpsimd, fpsimd + 1);
27+
put_task_struct(p);
28+
}
29+
1730
/*
1831
* Called on entry to KVM_RUN unless this vcpu previously ran at least
1932
* once and the most recent prior KVM_RUN for this vcpu was called from
@@ -29,12 +42,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
2942

3043
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
3144

45+
kvm_vcpu_unshare_task_fp(vcpu);
46+
3247
/* Make sure the host task fpsimd state is visible to hyp: */
3348
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
34-
if (!ret)
35-
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
49+
if (ret)
50+
return ret;
51+
52+
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
53+
54+
/*
55+
* We need to keep current's task_struct pinned until its data has been
56+
* unshared with the hypervisor to make sure it is not re-used by the
57+
* kernel and donated to someone else while already shared -- see
58+
* kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
59+
*/
60+
if (is_protected_kvm_enabled()) {
61+
get_task_struct(current);
62+
vcpu->arch.parent_task = current;
63+
}
3664

37-
return ret;
65+
return 0;
3866
}
3967

4068
/*

arch/arm64/kvm/mmu.c

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -341,6 +341,32 @@ static int share_pfn_hyp(u64 pfn)
341341
return ret;
342342
}
343343

344+
static int unshare_pfn_hyp(u64 pfn)
345+
{
346+
struct rb_node **node, *parent;
347+
struct hyp_shared_pfn *this;
348+
int ret = 0;
349+
350+
mutex_lock(&hyp_shared_pfns_lock);
351+
this = find_shared_pfn(pfn, &node, &parent);
352+
if (WARN_ON(!this)) {
353+
ret = -ENOENT;
354+
goto unlock;
355+
}
356+
357+
this->count--;
358+
if (this->count)
359+
goto unlock;
360+
361+
rb_erase(&this->node, &hyp_shared_pfns);
362+
kfree(this);
363+
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
364+
unlock:
365+
mutex_unlock(&hyp_shared_pfns_lock);
366+
367+
return ret;
368+
}
369+
344370
int kvm_share_hyp(void *from, void *to)
345371
{
346372
phys_addr_t start, end, cur;
@@ -373,6 +399,22 @@ int kvm_share_hyp(void *from, void *to)
373399
return 0;
374400
}
375401

402+
void kvm_unshare_hyp(void *from, void *to)
403+
{
404+
phys_addr_t start, end, cur;
405+
u64 pfn;
406+
407+
if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
408+
return;
409+
410+
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
411+
end = PAGE_ALIGN(__pa(to));
412+
for (cur = start; cur < end; cur += PAGE_SIZE) {
413+
pfn = __phys_to_pfn(cur);
414+
WARN_ON(unshare_pfn_hyp(pfn));
415+
}
416+
}
417+
376418
/**
377419
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
378420
* @from: The virtual kernel start address of the range

arch/arm64/kvm/reset.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
150150

151151
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
152152
{
153-
kfree(vcpu->arch.sve_state);
153+
void *sve_state = vcpu->arch.sve_state;
154+
155+
kvm_vcpu_unshare_task_fp(vcpu);
156+
kvm_unshare_hyp(vcpu, vcpu + 1);
157+
if (sve_state)
158+
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
159+
kfree(sve_state);
154160
}
155161

156162
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)