Skip to content

Commit d0bd3e6

Browse files
Quentin PerretMarc Zyngier
authored andcommitted
KVM: arm64: Introduce __pkvm_host_share_guest()
In preparation for handling guest stage-2 mappings at EL2, introduce a new pKVM hypercall allowing to share pages with non-protected guests. Tested-by: Fuad Tabba <[email protected]> Reviewed-by: Fuad Tabba <[email protected]> Signed-off-by: Quentin Perret <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent f7d03fc commit d0bd3e6

File tree

7 files changed

+123
-1
lines changed

7 files changed

+123
-1
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ enum __kvm_host_smccc_func {
6565
/* Hypercalls available after pKVM finalisation */
6666
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
6767
__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
68+
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
6869
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
6970
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
7071
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -771,6 +771,9 @@ struct kvm_vcpu_arch {
771771
/* Cache some mmu pages needed inside spinlock regions */
772772
struct kvm_mmu_memory_cache mmu_page_cache;
773773

774+
/* Pages to top-up the pKVM/EL2 guest pool */
775+
struct kvm_hyp_memcache pkvm_memcache;
776+
774777
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
775778
u64 vsesr_el2;
776779

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
3939
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
4040
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
4141
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
42+
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
43+
enum kvm_pgtable_prot prot);
4244

4345
bool addr_is_memory(phys_addr_t phys);
4446
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);

arch/arm64/kvm/hyp/include/nvhe/memory.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ struct hyp_page {
4646

4747
/* Host (non-meta) state. Guarded by the host stage-2 lock. */
4848
enum pkvm_page_state host_state : 8;
49+
50+
u32 host_share_guest_count;
4951
};
5052

5153
extern u64 __hyp_vmemmap;
@@ -68,7 +70,7 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr)
6870

6971
static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
7072
{
71-
BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u32));
73+
BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
7274
return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
7375
}
7476

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,39 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
211211
cpu_reg(host_ctxt, 1) = ret;
212212
}
213213

214+
static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
215+
{
216+
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
217+
218+
return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
219+
host_vcpu->arch.pkvm_memcache.nr_pages,
220+
&host_vcpu->arch.pkvm_memcache);
221+
}
222+
223+
static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
224+
{
225+
DECLARE_REG(u64, pfn, host_ctxt, 1);
226+
DECLARE_REG(u64, gfn, host_ctxt, 2);
227+
DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
228+
struct pkvm_hyp_vcpu *hyp_vcpu;
229+
int ret = -EINVAL;
230+
231+
if (!is_protected_kvm_enabled())
232+
goto out;
233+
234+
hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
235+
if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
236+
goto out;
237+
238+
ret = pkvm_refill_memcache(hyp_vcpu);
239+
if (ret)
240+
goto out;
241+
242+
ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
243+
out:
244+
cpu_reg(host_ctxt, 1) = ret;
245+
}
246+
214247
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
215248
{
216249
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
@@ -420,6 +453,7 @@ static const hcall_t host_hcall[] = {
420453

421454
HANDLE_FUNC(__pkvm_host_share_hyp),
422455
HANDLE_FUNC(__pkvm_host_unshare_hyp),
456+
HANDLE_FUNC(__pkvm_host_share_guest),
423457
HANDLE_FUNC(__kvm_adjust_pc),
424458
HANDLE_FUNC(__kvm_vcpu_run),
425459
HANDLE_FUNC(__kvm_flush_vm_context),

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -867,6 +867,27 @@ static int hyp_complete_donation(u64 addr,
867867
return pkvm_create_mappings_locked(start, end, prot);
868868
}
869869

870+
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
871+
{
872+
if (!kvm_pte_valid(pte))
873+
return PKVM_NOPAGE;
874+
875+
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
876+
}
877+
878+
static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
879+
u64 size, enum pkvm_page_state state)
880+
{
881+
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
882+
struct check_walk_data d = {
883+
.desired = state,
884+
.get_page_state = guest_get_page_state,
885+
};
886+
887+
hyp_assert_lock_held(&vm->lock);
888+
return check_page_state_range(&vm->pgt, addr, size, &d);
889+
}
890+
870891
static int check_share(struct pkvm_mem_share *share)
871892
{
872893
const struct pkvm_mem_transition *tx = &share->tx;
@@ -1349,3 +1370,54 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
13491370

13501371
return ret;
13511372
}
1373+
1374+
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
1375+
enum kvm_pgtable_prot prot)
1376+
{
1377+
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
1378+
u64 phys = hyp_pfn_to_phys(pfn);
1379+
u64 ipa = hyp_pfn_to_phys(gfn);
1380+
struct hyp_page *page;
1381+
int ret;
1382+
1383+
if (prot & ~KVM_PGTABLE_PROT_RWX)
1384+
return -EINVAL;
1385+
1386+
ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
1387+
if (ret)
1388+
return ret;
1389+
1390+
host_lock_component();
1391+
guest_lock_component(vm);
1392+
1393+
ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
1394+
if (ret)
1395+
goto unlock;
1396+
1397+
page = hyp_phys_to_page(phys);
1398+
switch (page->host_state) {
1399+
case PKVM_PAGE_OWNED:
1400+
WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
1401+
break;
1402+
case PKVM_PAGE_SHARED_OWNED:
1403+
if (page->host_share_guest_count)
1404+
break;
1405+
/* Only host to np-guest multi-sharing is tolerated */
1406+
WARN_ON(1);
1407+
fallthrough;
1408+
default:
1409+
ret = -EPERM;
1410+
goto unlock;
1411+
}
1412+
1413+
WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
1414+
pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
1415+
&vcpu->vcpu.arch.pkvm_memcache, 0));
1416+
page->host_share_guest_count++;
1417+
1418+
unlock:
1419+
guest_unlock_component(vm);
1420+
host_unlock_component();
1421+
1422+
return ret;
1423+
}

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -795,6 +795,14 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
795795
/* Push the metadata pages to the teardown memcache */
796796
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
797797
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
798+
struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
799+
800+
while (vcpu_mc->nr_pages) {
801+
void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
802+
803+
push_hyp_memcache(mc, addr, hyp_virt_to_phys);
804+
unmap_donated_memory_noclear(addr, PAGE_SIZE);
805+
}
798806

799807
teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
800808
}

0 commit comments

Comments
 (0)