Skip to content

Commit e82edcc

Browse files
willdeaconMarc Zyngier
authored andcommitted
KVM: arm64: Implement do_share() helper for sharing memory
By default, protected KVM isolates memory pages so that they are accessible only to their owner: be it the host kernel, the hypervisor at EL2 or (in future) the guest. Establishing shared-memory regions between these components therefore involves a transition for each page so that the owner can share memory with a borrower under a certain set of permissions. Introduce a do_share() helper for safely sharing a memory region between two components. Currently, only host-to-hyp sharing is implemented, but the code is easily extended to handle other combinations and the permission checks for each component are reusable. Reviewed-by: Andrew Walbran <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Quentin Perret <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 61d99e3 commit e82edcc

File tree

1 file changed

+237
-0
lines changed

1 file changed

+237
-0
lines changed

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 237 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -471,3 +471,240 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
471471
ret = host_stage2_idmap(addr);
472472
BUG_ON(ret && ret != -EAGAIN);
473473
}
474+
475+
/* This corresponds to locking order */
476+
enum pkvm_component_id {
477+
PKVM_ID_HOST,
478+
PKVM_ID_HYP,
479+
};
480+
481+
struct pkvm_mem_transition {
482+
u64 nr_pages;
483+
484+
struct {
485+
enum pkvm_component_id id;
486+
/* Address in the initiator's address space */
487+
u64 addr;
488+
489+
union {
490+
struct {
491+
/* Address in the completer's address space */
492+
u64 completer_addr;
493+
} host;
494+
};
495+
} initiator;
496+
497+
struct {
498+
enum pkvm_component_id id;
499+
} completer;
500+
};
501+
502+
struct pkvm_mem_share {
503+
const struct pkvm_mem_transition tx;
504+
const enum kvm_pgtable_prot completer_prot;
505+
};
506+
507+
struct check_walk_data {
508+
enum pkvm_page_state desired;
509+
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
510+
};
511+
512+
static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
513+
kvm_pte_t *ptep,
514+
enum kvm_pgtable_walk_flags flag,
515+
void * const arg)
516+
{
517+
struct check_walk_data *d = arg;
518+
kvm_pte_t pte = *ptep;
519+
520+
if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
521+
return -EINVAL;
522+
523+
return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
524+
}
525+
526+
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
527+
struct check_walk_data *data)
528+
{
529+
struct kvm_pgtable_walker walker = {
530+
.cb = __check_page_state_visitor,
531+
.arg = data,
532+
.flags = KVM_PGTABLE_WALK_LEAF,
533+
};
534+
535+
return kvm_pgtable_walk(pgt, addr, size, &walker);
536+
}
537+
538+
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
539+
{
540+
if (!kvm_pte_valid(pte) && pte)
541+
return PKVM_NOPAGE;
542+
543+
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
544+
}
545+
546+
static int __host_check_page_state_range(u64 addr, u64 size,
547+
enum pkvm_page_state state)
548+
{
549+
struct check_walk_data d = {
550+
.desired = state,
551+
.get_page_state = host_get_page_state,
552+
};
553+
554+
hyp_assert_lock_held(&host_kvm.lock);
555+
return check_page_state_range(&host_kvm.pgt, addr, size, &d);
556+
}
557+
558+
static int __host_set_page_state_range(u64 addr, u64 size,
559+
enum pkvm_page_state state)
560+
{
561+
enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
562+
563+
return host_stage2_idmap_locked(addr, size, prot);
564+
}
565+
566+
static int host_request_owned_transition(u64 *completer_addr,
567+
const struct pkvm_mem_transition *tx)
568+
{
569+
u64 size = tx->nr_pages * PAGE_SIZE;
570+
u64 addr = tx->initiator.addr;
571+
572+
*completer_addr = tx->initiator.host.completer_addr;
573+
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
574+
}
575+
576+
static int host_initiate_share(u64 *completer_addr,
577+
const struct pkvm_mem_transition *tx)
578+
{
579+
u64 size = tx->nr_pages * PAGE_SIZE;
580+
u64 addr = tx->initiator.addr;
581+
582+
*completer_addr = tx->initiator.host.completer_addr;
583+
return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
584+
}
585+
586+
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
587+
{
588+
if (!kvm_pte_valid(pte))
589+
return PKVM_NOPAGE;
590+
591+
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
592+
}
593+
594+
static int __hyp_check_page_state_range(u64 addr, u64 size,
595+
enum pkvm_page_state state)
596+
{
597+
struct check_walk_data d = {
598+
.desired = state,
599+
.get_page_state = hyp_get_page_state,
600+
};
601+
602+
hyp_assert_lock_held(&pkvm_pgd_lock);
603+
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
604+
}
605+
606+
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
607+
{
608+
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
609+
tx->initiator.id != PKVM_ID_HOST);
610+
}
611+
612+
static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
613+
enum kvm_pgtable_prot perms)
614+
{
615+
u64 size = tx->nr_pages * PAGE_SIZE;
616+
617+
if (perms != PAGE_HYP)
618+
return -EPERM;
619+
620+
if (__hyp_ack_skip_pgtable_check(tx))
621+
return 0;
622+
623+
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
624+
}
625+
626+
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
627+
enum kvm_pgtable_prot perms)
628+
{
629+
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
630+
enum kvm_pgtable_prot prot;
631+
632+
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
633+
return pkvm_create_mappings_locked(start, end, prot);
634+
}
635+
636+
static int check_share(struct pkvm_mem_share *share)
637+
{
638+
const struct pkvm_mem_transition *tx = &share->tx;
639+
u64 completer_addr;
640+
int ret;
641+
642+
switch (tx->initiator.id) {
643+
case PKVM_ID_HOST:
644+
ret = host_request_owned_transition(&completer_addr, tx);
645+
break;
646+
default:
647+
ret = -EINVAL;
648+
}
649+
650+
if (ret)
651+
return ret;
652+
653+
switch (tx->completer.id) {
654+
case PKVM_ID_HYP:
655+
ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
656+
break;
657+
default:
658+
ret = -EINVAL;
659+
}
660+
661+
return ret;
662+
}
663+
664+
static int __do_share(struct pkvm_mem_share *share)
665+
{
666+
const struct pkvm_mem_transition *tx = &share->tx;
667+
u64 completer_addr;
668+
int ret;
669+
670+
switch (tx->initiator.id) {
671+
case PKVM_ID_HOST:
672+
ret = host_initiate_share(&completer_addr, tx);
673+
break;
674+
default:
675+
ret = -EINVAL;
676+
}
677+
678+
if (ret)
679+
return ret;
680+
681+
switch (tx->completer.id) {
682+
case PKVM_ID_HYP:
683+
ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
684+
break;
685+
default:
686+
ret = -EINVAL;
687+
}
688+
689+
return ret;
690+
}
691+
692+
/*
693+
* do_share():
694+
*
695+
* The page owner grants access to another component with a given set
696+
* of permissions.
697+
*
698+
* Initiator: OWNED => SHARED_OWNED
699+
* Completer: NOPAGE => SHARED_BORROWED
700+
*/
701+
static int do_share(struct pkvm_mem_share *share)
702+
{
703+
int ret;
704+
705+
ret = check_share(share);
706+
if (ret)
707+
return ret;
708+
709+
return WARN_ON(__do_share(share));
710+
}

0 commit comments

Comments
 (0)