@@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
507
507
unsigned long pfn , pinned ;
508
508
509
509
while ((map = vhost_iotlb_itree_first (iotlb , start , last )) != NULL ) {
510
- pinned = map -> size >> PAGE_SHIFT ;
511
- for (pfn = map -> addr >> PAGE_SHIFT ;
510
+ pinned = PFN_DOWN ( map -> size ) ;
511
+ for (pfn = PFN_DOWN ( map -> addr ) ;
512
512
pinned > 0 ; pfn ++ , pinned -- ) {
513
513
page = pfn_to_page (pfn );
514
514
if (map -> perm & VHOST_ACCESS_WO )
515
515
set_page_dirty_lock (page );
516
516
unpin_user_page (page );
517
517
}
518
- atomic64_sub (map -> size >> PAGE_SHIFT , & dev -> mm -> pinned_vm );
518
+ atomic64_sub (PFN_DOWN ( map -> size ) , & dev -> mm -> pinned_vm );
519
519
vhost_iotlb_map_free (iotlb , map );
520
520
}
521
521
}
@@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
577
577
if (r )
578
578
vhost_iotlb_del_range (dev -> iotlb , iova , iova + size - 1 );
579
579
else
580
- atomic64_add (size >> PAGE_SHIFT , & dev -> mm -> pinned_vm );
580
+ atomic64_add (PFN_DOWN ( size ) , & dev -> mm -> pinned_vm );
581
581
582
582
return r ;
583
583
}
@@ -631,15 +631,15 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
631
631
if (msg -> perm & VHOST_ACCESS_WO )
632
632
gup_flags |= FOLL_WRITE ;
633
633
634
- npages = PAGE_ALIGN (msg -> size + (iova & ~PAGE_MASK )) >> PAGE_SHIFT ;
634
+ npages = PFN_UP (msg -> size + (iova & ~PAGE_MASK ));
635
635
if (!npages ) {
636
636
ret = - EINVAL ;
637
637
goto free ;
638
638
}
639
639
640
640
mmap_read_lock (dev -> mm );
641
641
642
- lock_limit = rlimit (RLIMIT_MEMLOCK ) >> PAGE_SHIFT ;
642
+ lock_limit = PFN_DOWN ( rlimit (RLIMIT_MEMLOCK )) ;
643
643
if (npages + atomic64_read (& dev -> mm -> pinned_vm ) > lock_limit ) {
644
644
ret = - ENOMEM ;
645
645
goto unlock ;
@@ -673,9 +673,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
673
673
674
674
if (last_pfn && (this_pfn != last_pfn + 1 )) {
675
675
/* Pin a contiguous chunk of memory */
676
- csize = (last_pfn - map_pfn + 1 ) << PAGE_SHIFT ;
676
+ csize = PFN_PHYS (last_pfn - map_pfn + 1 );
677
677
ret = vhost_vdpa_map (v , iova , csize ,
678
- map_pfn << PAGE_SHIFT ,
678
+ PFN_PHYS ( map_pfn ) ,
679
679
msg -> perm );
680
680
if (ret ) {
681
681
/*
@@ -699,13 +699,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
699
699
last_pfn = this_pfn ;
700
700
}
701
701
702
- cur_base += pinned << PAGE_SHIFT ;
702
+ cur_base += PFN_PHYS ( pinned ) ;
703
703
npages -= pinned ;
704
704
}
705
705
706
706
/* Pin the rest chunk */
707
- ret = vhost_vdpa_map (v , iova , (last_pfn - map_pfn + 1 ) << PAGE_SHIFT ,
708
- map_pfn << PAGE_SHIFT , msg -> perm );
707
+ ret = vhost_vdpa_map (v , iova , PFN_PHYS (last_pfn - map_pfn + 1 ),
708
+ PFN_PHYS ( map_pfn ) , msg -> perm );
709
709
out :
710
710
if (ret ) {
711
711
if (nchunks ) {
@@ -945,7 +945,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
945
945
946
946
vma -> vm_page_prot = pgprot_noncached (vma -> vm_page_prot );
947
947
if (remap_pfn_range (vma , vmf -> address & PAGE_MASK ,
948
- notify .addr >> PAGE_SHIFT , PAGE_SIZE ,
948
+ PFN_DOWN ( notify .addr ) , PAGE_SIZE ,
949
949
vma -> vm_page_prot ))
950
950
return VM_FAULT_SIGBUS ;
951
951
0 commit comments