@@ -2860,13 +2860,11 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2860
2860
unsigned long addr , bool write_fault ,
2861
2861
bool * writable , kvm_pfn_t * p_pfn )
2862
2862
{
2863
+ struct follow_pfnmap_args args = { .vma = vma , .address = addr };
2863
2864
kvm_pfn_t pfn ;
2864
- pte_t * ptep ;
2865
- pte_t pte ;
2866
- spinlock_t * ptl ;
2867
2865
int r ;
2868
2866
2869
- r = follow_pte ( vma , addr , & ptep , & ptl );
2867
+ r = follow_pfnmap_start ( & args );
2870
2868
if (r ) {
2871
2869
/*
2872
2870
* get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
@@ -2881,21 +2879,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2881
2879
if (r )
2882
2880
return r ;
2883
2881
2884
- r = follow_pte ( vma , addr , & ptep , & ptl );
2882
+ r = follow_pfnmap_start ( & args );
2885
2883
if (r )
2886
2884
return r ;
2887
2885
}
2888
2886
2889
- pte = ptep_get (ptep );
2890
-
2891
- if (write_fault && !pte_write (pte )) {
2887
+ if (write_fault && !args .writable ) {
2892
2888
pfn = KVM_PFN_ERR_RO_FAULT ;
2893
2889
goto out ;
2894
2890
}
2895
2891
2896
2892
if (writable )
2897
- * writable = pte_write ( pte ) ;
2898
- pfn = pte_pfn ( pte ) ;
2893
+ * writable = args . writable ;
2894
+ pfn = args . pfn ;
2899
2895
2900
2896
/*
2901
2897
* Get a reference here because callers of *hva_to_pfn* and
@@ -2916,9 +2912,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2916
2912
*/
2917
2913
if (!kvm_try_get_pfn (pfn ))
2918
2914
r = - EFAULT ;
2919
-
2920
2915
out :
2921
- pte_unmap_unlock ( ptep , ptl );
2916
+ follow_pfnmap_end ( & args );
2922
2917
* p_pfn = pfn ;
2923
2918
2924
2919
return r ;
0 commit comments