Skip to content

Commit 717a7ee

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Add generic hyp_memcache helpers
The host at EL1 and the pKVM hypervisor at EL2 will soon need to exchange memory pages dynamically for creating and destroying VM state. Indeed, the hypervisor will rely on the host to donate memory pages it can use to create guest stage-2 page-tables and to store VM and vCPU metadata. In order to ease this process, introduce a 'struct hyp_memcache' which is essentially a linked list of available pages, indexed by physical addresses so that it can be passed meaningfully between the different virtual address spaces configured at EL1 and EL2. Tested-by: Vincent Donnefort <[email protected]> Signed-off-by: Quentin Perret <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 13e248a commit 717a7ee

File tree

4 files changed

+118
-0
lines changed

4 files changed

+118
-0
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,63 @@ u32 __attribute_const__ kvm_target_cpu(void);
7373
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
7474
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
7575

76+
struct kvm_hyp_memcache {
77+
phys_addr_t head;
78+
unsigned long nr_pages;
79+
};
80+
81+
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
82+
phys_addr_t *p,
83+
phys_addr_t (*to_pa)(void *virt))
84+
{
85+
*p = mc->head;
86+
mc->head = to_pa(p);
87+
mc->nr_pages++;
88+
}
89+
90+
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
91+
void *(*to_va)(phys_addr_t phys))
92+
{
93+
phys_addr_t *p = to_va(mc->head);
94+
95+
if (!mc->nr_pages)
96+
return NULL;
97+
98+
mc->head = *p;
99+
mc->nr_pages--;
100+
101+
return p;
102+
}
103+
104+
static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
105+
unsigned long min_pages,
106+
void *(*alloc_fn)(void *arg),
107+
phys_addr_t (*to_pa)(void *virt),
108+
void *arg)
109+
{
110+
while (mc->nr_pages < min_pages) {
111+
phys_addr_t *p = alloc_fn(arg);
112+
113+
if (!p)
114+
return -ENOMEM;
115+
push_hyp_memcache(mc, p, to_pa);
116+
}
117+
118+
return 0;
119+
}
120+
121+
static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
122+
void (*free_fn)(void *virt, void *arg),
123+
void *(*to_va)(phys_addr_t phys),
124+
void *arg)
125+
{
126+
while (mc->nr_pages)
127+
free_fn(pop_hyp_memcache(mc, to_va), arg);
128+
}
129+
130+
void free_hyp_memcache(struct kvm_hyp_memcache *mc);
131+
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
132+
76133
struct kvm_vmid {
77134
atomic64_t id;
78135
};

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
7777
int hyp_pin_shared_mem(void *from, void *to);
7878
void hyp_unpin_shared_mem(void *from, void *to);
7979
void reclaim_guest_pages(struct pkvm_hyp_vm *vm);
80+
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
81+
struct kvm_hyp_memcache *host_mc);
8082

8183
static __always_inline void __load_host_stage2(void)
8284
{

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -340,3 +340,36 @@ int hyp_create_idmap(u32 hyp_va_bits)
340340

341341
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
342342
}
343+
344+
static void *admit_host_page(void *arg)
345+
{
346+
struct kvm_hyp_memcache *host_mc = arg;
347+
348+
if (!host_mc->nr_pages)
349+
return NULL;
350+
351+
/*
352+
* The host still owns the pages in its memcache, so we need to go
353+
* through a full host-to-hyp donation cycle to change it. Fortunately,
354+
* __pkvm_host_donate_hyp() takes care of races for us, so if it
355+
* succeeds we're good to go.
356+
*/
357+
if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
358+
return NULL;
359+
360+
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
361+
}
362+
363+
/* Refill our local memcache by poping pages from the one provided by the host. */
364+
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
365+
struct kvm_hyp_memcache *host_mc)
366+
{
367+
struct kvm_hyp_memcache tmp = *host_mc;
368+
int ret;
369+
370+
ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
371+
hyp_virt_to_phys, &tmp);
372+
*host_mc = tmp;
373+
374+
return ret;
375+
}

arch/arm64/kvm/mmu.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -807,6 +807,32 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
807807
}
808808
}
809809

810+
static void hyp_mc_free_fn(void *addr, void *unused)
811+
{
812+
free_page((unsigned long)addr);
813+
}
814+
815+
static void *hyp_mc_alloc_fn(void *unused)
816+
{
817+
return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
818+
}
819+
820+
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
821+
{
822+
if (is_protected_kvm_enabled())
823+
__free_hyp_memcache(mc, hyp_mc_free_fn,
824+
kvm_host_va, NULL);
825+
}
826+
827+
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
828+
{
829+
if (!is_protected_kvm_enabled())
830+
return 0;
831+
832+
return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
833+
kvm_host_pa, NULL);
834+
}
835+
810836
/**
811837
* kvm_phys_addr_ioremap - map a device range to guest IPA
812838
*

0 commit comments

Comments
 (0)