Skip to content

Commit a83e219

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: pkvm: Refcount the pages shared with EL2
In order to simplify the page tracking infrastructure at EL2 in nVHE protected mode, move the responsibility of refcounting pages that are shared multiple times on the host. In order to do so, let's create a red-black tree tracking all the PFNs that have been shared, along with a refcount. Acked-by: Will Deacon <[email protected]> Signed-off-by: Quentin Perret <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 3f868e1 commit a83e219

File tree

1 file changed

+68
-10
lines changed

1 file changed

+68
-10
lines changed

arch/arm64/kvm/mmu.c

Lines changed: 68 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -281,23 +281,72 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
281281
}
282282
}
283283

284-
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
284+
struct hyp_shared_pfn {
285+
u64 pfn;
286+
int count;
287+
struct rb_node node;
288+
};
289+
290+
static DEFINE_MUTEX(hyp_shared_pfns_lock);
291+
static struct rb_root hyp_shared_pfns = RB_ROOT;
292+
293+
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
294+
struct rb_node **parent)
285295
{
286-
phys_addr_t addr;
287-
int ret;
296+
struct hyp_shared_pfn *this;
297+
298+
*node = &hyp_shared_pfns.rb_node;
299+
*parent = NULL;
300+
while (**node) {
301+
this = container_of(**node, struct hyp_shared_pfn, node);
302+
*parent = **node;
303+
if (this->pfn < pfn)
304+
*node = &((**node)->rb_left);
305+
else if (this->pfn > pfn)
306+
*node = &((**node)->rb_right);
307+
else
308+
return this;
309+
}
288310

289-
for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
290-
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
291-
__phys_to_pfn(addr));
292-
if (ret)
293-
return ret;
311+
return NULL;
312+
}
313+
314+
static int share_pfn_hyp(u64 pfn)
315+
{
316+
struct rb_node **node, *parent;
317+
struct hyp_shared_pfn *this;
318+
int ret = 0;
319+
320+
mutex_lock(&hyp_shared_pfns_lock);
321+
this = find_shared_pfn(pfn, &node, &parent);
322+
if (this) {
323+
this->count++;
324+
goto unlock;
294325
}
295326

296-
return 0;
327+
this = kzalloc(sizeof(*this), GFP_KERNEL);
328+
if (!this) {
329+
ret = -ENOMEM;
330+
goto unlock;
331+
}
332+
333+
this->pfn = pfn;
334+
this->count = 1;
335+
rb_link_node(&this->node, parent, node);
336+
rb_insert_color(&this->node, &hyp_shared_pfns);
337+
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
338+
unlock:
339+
mutex_unlock(&hyp_shared_pfns_lock);
340+
341+
return ret;
297342
}
298343

299344
int kvm_share_hyp(void *from, void *to)
300345
{
346+
phys_addr_t start, end, cur;
347+
u64 pfn;
348+
int ret;
349+
301350
if (is_kernel_in_hyp_mode())
302351
return 0;
303352

@@ -312,7 +361,16 @@ int kvm_share_hyp(void *from, void *to)
312361
if (kvm_host_owns_hyp_mappings())
313362
return create_hyp_mappings(from, to, PAGE_HYP);
314363

315-
return pkvm_share_hyp(__pa(from), __pa(to));
364+
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
365+
end = PAGE_ALIGN(__pa(to));
366+
for (cur = start; cur < end; cur += PAGE_SIZE) {
367+
pfn = __phys_to_pfn(cur);
368+
ret = share_pfn_hyp(pfn);
369+
if (ret)
370+
return ret;
371+
}
372+
373+
return 0;
316374
}
317375

318376
/**

0 commit comments

Comments
 (0)