@@ -60,6 +60,11 @@ static void hyp_unlock_component(void)
6060 hyp_spin_unlock (& pkvm_pgd_lock );
6161}
6262
63+ #define for_each_hyp_page (__p , __st , __sz ) \
64+ for (struct hyp_page *__p = hyp_phys_to_page(__st), \
65+ *__e = __p + ((__sz) >> PAGE_SHIFT); \
66+ __p < __e; __p++)
67+
6368static void * host_s2_zalloc_pages_exact (size_t size )
6469{
6570 void * addr = hyp_alloc_pages (& host_s2_pool , get_order (size ));
@@ -485,7 +490,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
485490 return - EAGAIN ;
486491
487492 if (pte ) {
488- WARN_ON (addr_is_memory (addr ) && get_host_state (addr ) != PKVM_NOPAGE );
493+ WARN_ON (addr_is_memory (addr ) &&
494+ get_host_state (hyp_phys_to_page (addr )) != PKVM_NOPAGE );
489495 return - EPERM ;
490496 }
491497
@@ -511,10 +517,8 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
511517
512518static void __host_update_page_state (phys_addr_t addr , u64 size , enum pkvm_page_state state )
513519{
514- phys_addr_t end = addr + size ;
515-
516- for (; addr < end ; addr += PAGE_SIZE )
517- set_host_state (addr , state );
520+ for_each_hyp_page (page , addr , size )
521+ set_host_state (page , state );
518522}
519523
520524int host_stage2_set_owner_locked (phys_addr_t addr , u64 size , u8 owner_id )
@@ -636,16 +640,16 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
636640static int __host_check_page_state_range (u64 addr , u64 size ,
637641 enum pkvm_page_state state )
638642{
639- u64 end = addr + size ;
640643 int ret ;
641644
642- ret = check_range_allowed_memory (addr , end );
645+ ret = check_range_allowed_memory (addr , addr + size );
643646 if (ret )
644647 return ret ;
645648
646649 hyp_assert_lock_held (& host_mmu .lock );
647- for (; addr < end ; addr += PAGE_SIZE ) {
648- if (get_host_state (addr ) != state )
650+
651+ for_each_hyp_page (page , addr , size ) {
652+ if (get_host_state (page ) != state )
649653 return - EPERM ;
650654 }
651655
@@ -655,7 +659,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
655659static int __host_set_page_state_range (u64 addr , u64 size ,
656660 enum pkvm_page_state state )
657661{
658- if (get_host_state (addr ) == PKVM_NOPAGE ) {
662+ if (get_host_state (hyp_phys_to_page ( addr ) ) == PKVM_NOPAGE ) {
659663 int ret = host_stage2_idmap_locked (addr , size , PKVM_HOST_MEM_PROT );
660664
661665 if (ret )
@@ -669,18 +673,14 @@ static int __host_set_page_state_range(u64 addr, u64 size,
669673
670674static void __hyp_set_page_state_range (phys_addr_t phys , u64 size , enum pkvm_page_state state )
671675{
672- phys_addr_t end = phys + size ;
673-
674- for (; phys < end ; phys += PAGE_SIZE )
675- set_hyp_state (phys , state );
676+ for_each_hyp_page (page , phys , size )
677+ set_hyp_state (page , state );
676678}
677679
678680static int __hyp_check_page_state_range (phys_addr_t phys , u64 size , enum pkvm_page_state state )
679681{
680- phys_addr_t end = phys + size ;
681-
682- for (; phys < end ; phys += PAGE_SIZE ) {
683- if (get_hyp_state (phys ) != state )
682+ for_each_hyp_page (page , phys , size ) {
683+ if (get_hyp_state (page ) != state )
684684 return - EPERM ;
685685 }
686686
@@ -931,7 +931,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
931931 goto unlock ;
932932
933933 page = hyp_phys_to_page (phys );
934- switch (get_host_state (phys )) {
934+ switch (get_host_state (page )) {
935935 case PKVM_PAGE_OWNED :
936936 WARN_ON (__host_set_page_state_range (phys , PAGE_SIZE , PKVM_PAGE_SHARED_OWNED ));
937937 break ;
@@ -983,9 +983,9 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
983983 if (WARN_ON (ret ))
984984 return ret ;
985985
986- if (get_host_state (phys ) != PKVM_PAGE_SHARED_OWNED )
987- return - EPERM ;
988986 page = hyp_phys_to_page (phys );
987+ if (get_host_state (page ) != PKVM_PAGE_SHARED_OWNED )
988+ return - EPERM ;
989989 if (WARN_ON (!page -> host_share_guest_count ))
990990 return - EINVAL ;
991991
0 commit comments