Skip to content

Commit 0527559

Browse files
fancxlakpm00
authored andcommitted
mm/hugetlb: convert use of struct page to folio in __unmap_hugepage_range()
In __unmap_hugepage_range(), the "page" pointer always points to the first page of a huge page, which guarantees there is a folio associating with it. Convert the "page" pointer to use folio. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Fan Ni <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Acked-by: David Hildenbrand <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Muchun Song <[email protected]> Cc: Sidhartha Kumar <[email protected]> Cc: "Vishal Moola (Oracle)" <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7f4b606 commit 0527559

File tree

1 file changed

+13
-11
lines changed

1 file changed

+13
-11
lines changed

mm/hugetlb.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5843,11 +5843,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
58435843
struct folio *folio, zap_flags_t zap_flags)
58445844
{
58455845
struct mm_struct *mm = vma->vm_mm;
5846+
const bool folio_provided = !!folio;
58465847
unsigned long address;
58475848
pte_t *ptep;
58485849
pte_t pte;
58495850
spinlock_t *ptl;
5850-
struct page *page;
58515851
struct hstate *h = hstate_vma(vma);
58525852
unsigned long sz = huge_page_size(h);
58535853
bool adjust_reservation = false;
@@ -5911,14 +5911,13 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
59115911
continue;
59125912
}
59135913

5914-
page = pte_page(pte);
59155914
/*
59165915
* If a folio is supplied, it is because a specific
59175916
* folio is being unmapped, not a range. Ensure the folio we
59185917
* are about to unmap is the actual folio of interest.
59195918
*/
5920-
if (folio) {
5921-
if (page_folio(page) != folio) {
5919+
if (folio_provided) {
5920+
if (folio != page_folio(pte_page(pte))) {
59225921
spin_unlock(ptl);
59235922
continue;
59245923
}
@@ -5928,20 +5927,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
59285927
* looking like data was lost
59295928
*/
59305929
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5930+
} else {
5931+
folio = page_folio(pte_page(pte));
59315932
}
59325933

59335934
pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
59345935
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
59355936
if (huge_pte_dirty(pte))
5936-
set_page_dirty(page);
5937+
folio_mark_dirty(folio);
59375938
/* Leave a uffd-wp pte marker if needed */
59385939
if (huge_pte_uffd_wp(pte) &&
59395940
!(zap_flags & ZAP_FLAG_DROP_MARKER))
59405941
set_huge_pte_at(mm, address, ptep,
59415942
make_pte_marker(PTE_MARKER_UFFD_WP),
59425943
sz);
59435944
hugetlb_count_sub(pages_per_huge_page(h), mm);
5944-
hugetlb_remove_rmap(page_folio(page));
5945+
hugetlb_remove_rmap(folio);
59455946

59465947
/*
59475948
* Restore the reservation for anonymous page, otherwise the
@@ -5950,8 +5951,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
59505951
* reservation bit.
59515952
*/
59525953
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5953-
folio_test_anon(page_folio(page))) {
5954-
folio_set_hugetlb_restore_reserve(page_folio(page));
5954+
folio_test_anon(folio)) {
5955+
folio_set_hugetlb_restore_reserve(folio);
59555956
/* Reservation to be adjusted after the spin lock */
59565957
adjust_reservation = true;
59575958
}
@@ -5975,16 +5976,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
59755976
* count will not be incremented by free_huge_folio.
59765977
* Act as if we consumed the reservation.
59775978
*/
5978-
folio_clear_hugetlb_restore_reserve(page_folio(page));
5979+
folio_clear_hugetlb_restore_reserve(folio);
59795980
else if (rc)
59805981
vma_add_reservation(h, vma, address);
59815982
}
59825983

5983-
tlb_remove_page_size(tlb, page, huge_page_size(h));
5984+
tlb_remove_page_size(tlb, folio_page(folio, 0),
5985+
folio_size(folio));
59845986
/*
59855987
* If we were instructed to unmap a specific folio, we're done.
59865988
*/
5987-
if (folio)
5989+
if (folio_provided)
59885990
break;
59895991
}
59905992
tlb_end_vma(tlb, vma);

0 commit comments

Comments
 (0)