@@ -6071,7 +6071,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
6071
6071
* same region.
6072
6072
*/
6073
6073
static void unmap_ref_private (struct mm_struct * mm , struct vm_area_struct * vma ,
6074
- struct page * page , unsigned long address )
6074
+ struct folio * folio , unsigned long address )
6075
6075
{
6076
6076
struct hstate * h = hstate_vma (vma );
6077
6077
struct vm_area_struct * iter_vma ;
@@ -6115,7 +6115,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
6115
6115
*/
6116
6116
if (!is_vma_resv_set (iter_vma , HPAGE_RESV_OWNER ))
6117
6117
unmap_hugepage_range (iter_vma , address ,
6118
- address + huge_page_size (h ), page , 0 );
6118
+ address + huge_page_size (h ),
6119
+ & folio -> page , 0 );
6119
6120
}
6120
6121
i_mmap_unlock_write (mapping );
6121
6122
}
@@ -6238,8 +6239,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
6238
6239
hugetlb_vma_unlock_read (vma );
6239
6240
mutex_unlock (& hugetlb_fault_mutex_table [hash ]);
6240
6241
6241
- unmap_ref_private (mm , vma , & old_folio -> page ,
6242
- vmf -> address );
6242
+ unmap_ref_private (mm , vma , old_folio , vmf -> address );
6243
6243
6244
6244
mutex_lock (& hugetlb_fault_mutex_table [hash ]);
6245
6245
hugetlb_vma_lock_read (vma );
0 commit comments