@@ -201,8 +201,8 @@ static void *guest_s2_zalloc_page(void *mc)
201
201
202
202
memset (addr , 0 , PAGE_SIZE );
203
203
p = hyp_virt_to_page (addr );
204
- memset (p , 0 , sizeof (* p ));
205
204
p -> refcount = 1 ;
205
+ p -> order = 0 ;
206
206
207
207
return addr ;
208
208
}
@@ -268,6 +268,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
268
268
269
269
void reclaim_guest_pages (struct pkvm_hyp_vm * vm , struct kvm_hyp_memcache * mc )
270
270
{
271
+ struct hyp_page * page ;
271
272
void * addr ;
272
273
273
274
/* Dump all pgtable pages in the hyp_pool */
@@ -279,7 +280,9 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
279
280
/* Drain the hyp_pool into the memcache */
280
281
addr = hyp_alloc_pages (& vm -> pool , 0 );
281
282
while (addr ) {
282
- memset (hyp_virt_to_page (addr ), 0 , sizeof (struct hyp_page ));
283
+ page = hyp_virt_to_page (addr );
284
+ page -> refcount = 0 ;
285
+ page -> order = 0 ;
283
286
push_hyp_memcache (mc , addr , hyp_virt_to_phys );
284
287
WARN_ON (__pkvm_hyp_donate_host (hyp_virt_to_pfn (addr ), 1 ));
285
288
addr = hyp_alloc_pages (& vm -> pool , 0 );
@@ -382,19 +385,28 @@ bool addr_is_memory(phys_addr_t phys)
382
385
return !!find_mem_range (phys , & range );
383
386
}
384
387
385
- static bool addr_is_allowed_memory (phys_addr_t phys )
388
+ static bool is_in_mem_range (u64 addr , struct kvm_mem_range * range )
389
+ {
390
+ return range -> start <= addr && addr < range -> end ;
391
+ }
392
+
393
+ static int check_range_allowed_memory (u64 start , u64 end )
386
394
{
387
395
struct memblock_region * reg ;
388
396
struct kvm_mem_range range ;
389
397
390
- reg = find_mem_range (phys , & range );
398
+ /*
399
+ * Callers can't check the state of a range that overlaps memory and
400
+ * MMIO regions, so ensure [start, end[ is in the same kvm_mem_range.
401
+ */
402
+ reg = find_mem_range (start , & range );
403
+ if (!is_in_mem_range (end - 1 , & range ))
404
+ return - EINVAL ;
391
405
392
- return reg && !( reg -> flags & MEMBLOCK_NOMAP );
393
- }
406
+ if (! reg || reg -> flags & MEMBLOCK_NOMAP )
407
+ return - EPERM ;
394
408
395
- static bool is_in_mem_range (u64 addr , struct kvm_mem_range * range )
396
- {
397
- return range -> start <= addr && addr < range -> end ;
409
+ return 0 ;
398
410
}
399
411
400
412
static bool range_is_memory (u64 start , u64 end )
@@ -454,8 +466,10 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
454
466
if (kvm_pte_valid (pte ))
455
467
return - EAGAIN ;
456
468
457
- if (pte )
469
+ if (pte ) {
470
+ WARN_ON (addr_is_memory (addr ) && hyp_phys_to_page (addr )-> host_state != PKVM_NOPAGE );
458
471
return - EPERM ;
472
+ }
459
473
460
474
do {
461
475
u64 granule = kvm_granule_size (level );
@@ -477,10 +491,33 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
477
491
return host_stage2_try (__host_stage2_idmap , addr , addr + size , prot );
478
492
}
479
493
494
+ static void __host_update_page_state (phys_addr_t addr , u64 size , enum pkvm_page_state state )
495
+ {
496
+ phys_addr_t end = addr + size ;
497
+
498
+ for (; addr < end ; addr += PAGE_SIZE )
499
+ hyp_phys_to_page (addr )-> host_state = state ;
500
+ }
501
+
480
502
int host_stage2_set_owner_locked (phys_addr_t addr , u64 size , u8 owner_id )
481
503
{
482
- return host_stage2_try (kvm_pgtable_stage2_set_owner , & host_mmu .pgt ,
483
- addr , size , & host_s2_pool , owner_id );
504
+ int ret ;
505
+
506
+ if (!addr_is_memory (addr ))
507
+ return - EPERM ;
508
+
509
+ ret = host_stage2_try (kvm_pgtable_stage2_set_owner , & host_mmu .pgt ,
510
+ addr , size , & host_s2_pool , owner_id );
511
+ if (ret )
512
+ return ret ;
513
+
514
+ /* Don't forget to update the vmemmap tracking for the host */
515
+ if (owner_id == PKVM_ID_HOST )
516
+ __host_update_page_state (addr , size , PKVM_PAGE_OWNED );
517
+ else
518
+ __host_update_page_state (addr , size , PKVM_NOPAGE );
519
+
520
+ return 0 ;
484
521
}
485
522
486
523
static bool host_stage2_force_pte_cb (u64 addr , u64 end , enum kvm_pgtable_prot prot )
@@ -604,35 +641,38 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
604
641
return kvm_pgtable_walk (pgt , addr , size , & walker );
605
642
}
606
643
607
- static enum pkvm_page_state host_get_page_state (kvm_pte_t pte , u64 addr )
608
- {
609
- if (!addr_is_allowed_memory (addr ))
610
- return PKVM_NOPAGE ;
611
-
612
- if (!kvm_pte_valid (pte ) && pte )
613
- return PKVM_NOPAGE ;
614
-
615
- return pkvm_getstate (kvm_pgtable_stage2_pte_prot (pte ));
616
- }
617
-
618
644
static int __host_check_page_state_range (u64 addr , u64 size ,
619
645
enum pkvm_page_state state )
620
646
{
621
- struct check_walk_data d = {
622
- .desired = state ,
623
- .get_page_state = host_get_page_state ,
624
- };
647
+ u64 end = addr + size ;
648
+ int ret ;
649
+
650
+ ret = check_range_allowed_memory (addr , end );
651
+ if (ret )
652
+ return ret ;
625
653
626
654
hyp_assert_lock_held (& host_mmu .lock );
627
- return check_page_state_range (& host_mmu .pgt , addr , size , & d );
655
+ for (; addr < end ; addr += PAGE_SIZE ) {
656
+ if (hyp_phys_to_page (addr )-> host_state != state )
657
+ return - EPERM ;
658
+ }
659
+
660
+ return 0 ;
628
661
}
629
662
630
663
static int __host_set_page_state_range (u64 addr , u64 size ,
631
664
enum pkvm_page_state state )
632
665
{
633
- enum kvm_pgtable_prot prot = pkvm_mkstate (PKVM_HOST_MEM_PROT , state );
666
+ if (hyp_phys_to_page (addr )-> host_state == PKVM_NOPAGE ) {
667
+ int ret = host_stage2_idmap_locked (addr , size , PKVM_HOST_MEM_PROT );
634
668
635
- return host_stage2_idmap_locked (addr , size , prot );
669
+ if (ret )
670
+ return ret ;
671
+ }
672
+
673
+ __host_update_page_state (addr , size , state );
674
+
675
+ return 0 ;
636
676
}
637
677
638
678
static int host_request_owned_transition (u64 * completer_addr ,
0 commit comments