Skip to content

Commit f41dff4

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache
Rather than relying on the host to free the previously-donated pKVM hypervisor VM pages explicitly on teardown, introduce a dedicated teardown memcache which allows the host to reclaim guest memory resources without having to keep track of all of the allocations made by the pKVM hypervisor at EL2. Tested-by: Vincent Donnefort <[email protected]> Co-developed-by: Fuad Tabba <[email protected]> Signed-off-by: Fuad Tabba <[email protected]> Signed-off-by: Quentin Perret <[email protected]> Signed-off-by: Will Deacon <[email protected]> [maz: dropped __maybe_unused from unmap_donated_memory_noclear()] Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 60dfe09 commit f41dff4

File tree

5 files changed

+40
-44
lines changed

5 files changed

+40
-44
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -176,12 +176,7 @@ typedef unsigned int pkvm_handle_t;
176176

177177
struct kvm_protected_vm {
178178
pkvm_handle_t handle;
179-
180-
struct {
181-
void *pgd;
182-
void *vm;
183-
void *vcpus[KVM_MAX_VCPUS];
184-
} hyp_donations;
179+
struct kvm_hyp_memcache teardown_mc;
185180
};
186181

187182
struct kvm_arch {

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
7676

7777
int hyp_pin_shared_mem(void *from, void *to);
7878
void hyp_unpin_shared_mem(void *from, void *to);
79-
void reclaim_guest_pages(struct pkvm_hyp_vm *vm);
79+
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
8080
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
8181
struct kvm_hyp_memcache *host_mc);
8282

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -260,19 +260,24 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
260260
return 0;
261261
}
262262

263-
void reclaim_guest_pages(struct pkvm_hyp_vm *vm)
263+
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
264264
{
265-
void *pgd = vm->pgt.pgd;
266-
unsigned long nr_pages;
267-
268-
nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
265+
void *addr;
269266

267+
/* Dump all pgtable pages in the hyp_pool */
270268
guest_lock_component(vm);
271269
kvm_pgtable_stage2_destroy(&vm->pgt);
272270
vm->kvm.arch.mmu.pgd_phys = 0ULL;
273271
guest_unlock_component(vm);
274272

275-
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(pgd), nr_pages));
273+
/* Drain the hyp_pool into the memcache */
274+
addr = hyp_alloc_pages(&vm->pool, 0);
275+
while (addr) {
276+
memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
277+
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
278+
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
279+
addr = hyp_alloc_pages(&vm->pool, 0);
280+
}
276281
}
277282

278283
int __pkvm_prot_finalize(void)

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ static void unmap_donated_memory(void *va, size_t size)
393393
__unmap_donated_memory(va, size);
394394
}
395395

396-
static void __maybe_unused unmap_donated_memory_noclear(void *va, size_t size)
396+
static void unmap_donated_memory_noclear(void *va, size_t size)
397397
{
398398
if (!va)
399399
return;
@@ -527,8 +527,21 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
527527
return ret;
528528
}
529529

530+
static void
531+
teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
532+
{
533+
size = PAGE_ALIGN(size);
534+
memset(addr, 0, size);
535+
536+
for (void *start = addr; start < addr + size; start += PAGE_SIZE)
537+
push_hyp_memcache(mc, start, hyp_virt_to_phys);
538+
539+
unmap_donated_memory_noclear(addr, size);
540+
}
541+
530542
int __pkvm_teardown_vm(pkvm_handle_t handle)
531543
{
544+
struct kvm_hyp_memcache *mc;
532545
struct pkvm_hyp_vm *hyp_vm;
533546
struct kvm *host_kvm;
534547
unsigned int idx;
@@ -547,25 +560,27 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
547560
goto err_unlock;
548561
}
549562

563+
host_kvm = hyp_vm->host_kvm;
564+
550565
/* Ensure the VMID is clean before it can be reallocated */
551566
__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
552567
remove_vm_table_entry(handle);
553568
hyp_spin_unlock(&vm_table_lock);
554569

555570
/* Reclaim guest pages (including page-table pages) */
556-
reclaim_guest_pages(hyp_vm);
571+
mc = &host_kvm->arch.pkvm.teardown_mc;
572+
reclaim_guest_pages(hyp_vm, mc);
557573
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
558574

559-
/* Return the metadata pages to the host */
575+
/* Push the metadata pages to the teardown memcache */
560576
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
561577
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
562578

563-
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
579+
teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
564580
}
565581

566-
host_kvm = hyp_vm->host_kvm;
567582
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
568-
unmap_donated_memory(hyp_vm, vm_size);
583+
teardown_donated_memory(mc, hyp_vm, vm_size);
569584
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
570585
return 0;
571586

arch/arm64/kvm/pkvm.c

Lines changed: 6 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,6 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
147147
handle = ret;
148148

149149
host_kvm->arch.pkvm.handle = handle;
150-
host_kvm->arch.pkvm.hyp_donations.pgd = pgd;
151-
host_kvm->arch.pkvm.hyp_donations.vm = hyp_vm;
152150

153151
/* Donate memory for the vcpus at hyp and initialize it. */
154152
hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
@@ -167,12 +165,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
167165
goto destroy_vm;
168166
}
169167

170-
host_kvm->arch.pkvm.hyp_donations.vcpus[idx] = hyp_vcpu;
171-
172168
ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
173169
hyp_vcpu);
174-
if (ret)
170+
if (ret) {
171+
free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
175172
goto destroy_vm;
173+
}
176174
}
177175

178176
return 0;
@@ -201,30 +199,13 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
201199

202200
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
203201
{
204-
unsigned long idx, nr_vcpus = host_kvm->created_vcpus;
205-
size_t pgd_sz, hyp_vm_sz;
206-
207-
if (host_kvm->arch.pkvm.handle)
202+
if (host_kvm->arch.pkvm.handle) {
208203
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
209204
host_kvm->arch.pkvm.handle));
210-
211-
host_kvm->arch.pkvm.handle = 0;
212-
213-
for (idx = 0; idx < nr_vcpus; ++idx) {
214-
void *hyp_vcpu = host_kvm->arch.pkvm.hyp_donations.vcpus[idx];
215-
216-
if (!hyp_vcpu)
217-
break;
218-
219-
free_pages_exact(hyp_vcpu, PAGE_ALIGN(PKVM_HYP_VCPU_SIZE));
220205
}
221206

222-
hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
223-
size_mul(sizeof(void *), nr_vcpus)));
224-
pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
225-
226-
free_pages_exact(host_kvm->arch.pkvm.hyp_donations.vm, hyp_vm_sz);
227-
free_pages_exact(host_kvm->arch.pkvm.hyp_donations.pgd, pgd_sz);
207+
host_kvm->arch.pkvm.handle = 0;
208+
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
228209
}
229210

230211
int pkvm_init_host_vm(struct kvm *host_kvm)

0 commit comments

Comments
 (0)