Skip to content

Commit 81edb1b

Browse files
fancxlakpm00
authored andcommitted
mm/hugetlb: refactor unmap_hugepage_range() to take folio instead of page
The function unmap_hugepage_range() has two kinds of users: 1) unmap_ref_private(), which passes in the head page of a folio. Since unmap_ref_private() already takes folio and there are no other uses of the folio struct in the function, it is natural for unmap_hugepage_range() to take folio also. 2) All other uses, which pass in NULL pointer. In both cases, we can pass in folio. Refactor unmap_hugepage_range() to take folio. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Fan Ni <[email protected]> Reviewed-by: Muchun Song <[email protected]> Reviewed-by: Sidhartha Kumar <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Acked-by: David Hildenbrand <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: "Vishal Moola (Oracle)" <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent b0752f1 commit 81edb1b

File tree

2 files changed

+6
-5
lines changed

2 files changed

+6
-5
lines changed

include/linux/hugetlb.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,8 +129,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
129129
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
130130
struct vm_area_struct *, struct vm_area_struct *);
131131
void unmap_hugepage_range(struct vm_area_struct *,
132-
unsigned long, unsigned long, struct page *,
133-
zap_flags_t);
132+
unsigned long start, unsigned long end,
133+
struct folio *, zap_flags_t);
134134
void __unmap_hugepage_range(struct mmu_gather *tlb,
135135
struct vm_area_struct *vma,
136136
unsigned long start, unsigned long end,

mm/hugetlb.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6046,7 +6046,7 @@ void __hugetlb_zap_end(struct vm_area_struct *vma,
60466046
}
60476047

60486048
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
6049-
unsigned long end, struct page *ref_page,
6049+
unsigned long end, struct folio *folio,
60506050
zap_flags_t zap_flags)
60516051
{
60526052
struct mmu_notifier_range range;
@@ -6058,7 +6058,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
60586058
mmu_notifier_invalidate_range_start(&range);
60596059
tlb_gather_mmu(&tlb, vma->vm_mm);
60606060

6061-
__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
6061+
__unmap_hugepage_range(&tlb, vma, start, end,
6062+
&folio->page, zap_flags);
60626063

60636064
mmu_notifier_invalidate_range_end(&range);
60646065
tlb_finish_mmu(&tlb);
@@ -6116,7 +6117,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
61166117
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
61176118
unmap_hugepage_range(iter_vma, address,
61186119
address + huge_page_size(h),
6119-
&folio->page, 0);
6120+
folio, 0);
61206121
}
61216122
i_mmap_unlock_write(mapping);
61226123
}

0 commit comments

Comments
 (0)