@@ -5843,11 +5843,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5843
5843
struct folio * folio , zap_flags_t zap_flags )
5844
5844
{
5845
5845
struct mm_struct * mm = vma -> vm_mm ;
5846
+ const bool folio_provided = !!folio ;
5846
5847
unsigned long address ;
5847
5848
pte_t * ptep ;
5848
5849
pte_t pte ;
5849
5850
spinlock_t * ptl ;
5850
- struct page * page ;
5851
5851
struct hstate * h = hstate_vma (vma );
5852
5852
unsigned long sz = huge_page_size (h );
5853
5853
bool adjust_reservation = false;
@@ -5911,14 +5911,13 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5911
5911
continue ;
5912
5912
}
5913
5913
5914
- page = pte_page (pte );
5915
5914
/*
5916
5915
* If a folio is supplied, it is because a specific
5917
5916
* folio is being unmapped, not a range. Ensure the folio we
5918
5917
* are about to unmap is the actual folio of interest.
5919
5918
*/
5920
- if (folio ) {
5921
- if (page_folio ( page ) != folio ) {
5919
+ if (folio_provided ) {
5920
+ if (folio != page_folio ( pte_page ( pte )) ) {
5922
5921
spin_unlock (ptl );
5923
5922
continue ;
5924
5923
}
@@ -5928,20 +5927,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5928
5927
* looking like data was lost
5929
5928
*/
5930
5929
set_vma_resv_flags (vma , HPAGE_RESV_UNMAPPED );
5930
+ } else {
5931
+ folio = page_folio (pte_page (pte ));
5931
5932
}
5932
5933
5933
5934
pte = huge_ptep_get_and_clear (mm , address , ptep , sz );
5934
5935
tlb_remove_huge_tlb_entry (h , tlb , ptep , address );
5935
5936
if (huge_pte_dirty (pte ))
5936
- set_page_dirty ( page );
5937
+ folio_mark_dirty ( folio );
5937
5938
/* Leave a uffd-wp pte marker if needed */
5938
5939
if (huge_pte_uffd_wp (pte ) &&
5939
5940
!(zap_flags & ZAP_FLAG_DROP_MARKER ))
5940
5941
set_huge_pte_at (mm , address , ptep ,
5941
5942
make_pte_marker (PTE_MARKER_UFFD_WP ),
5942
5943
sz );
5943
5944
hugetlb_count_sub (pages_per_huge_page (h ), mm );
5944
- hugetlb_remove_rmap (page_folio ( page ) );
5945
+ hugetlb_remove_rmap (folio );
5945
5946
5946
5947
/*
5947
5948
* Restore the reservation for anonymous page, otherwise the
@@ -5950,8 +5951,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5950
5951
* reservation bit.
5951
5952
*/
5952
5953
if (!h -> surplus_huge_pages && __vma_private_lock (vma ) &&
5953
- folio_test_anon (page_folio ( page ) )) {
5954
- folio_set_hugetlb_restore_reserve (page_folio ( page ) );
5954
+ folio_test_anon (folio )) {
5955
+ folio_set_hugetlb_restore_reserve (folio );
5955
5956
/* Reservation to be adjusted after the spin lock */
5956
5957
adjust_reservation = true;
5957
5958
}
@@ -5975,16 +5976,17 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5975
5976
* count will not be incremented by free_huge_folio.
5976
5977
* Act as if we consumed the reservation.
5977
5978
*/
5978
- folio_clear_hugetlb_restore_reserve (page_folio ( page ) );
5979
+ folio_clear_hugetlb_restore_reserve (folio );
5979
5980
else if (rc )
5980
5981
vma_add_reservation (h , vma , address );
5981
5982
}
5982
5983
5983
- tlb_remove_page_size (tlb , page , huge_page_size (h ));
5984
+ tlb_remove_page_size (tlb , folio_page (folio , 0 ),
5985
+ folio_size (folio ));
5984
5986
/*
5985
5987
* If we were instructed to unmap a specific folio, we're done.
5986
5988
*/
5987
- if (folio )
5989
+ if (folio_provided )
5988
5990
break ;
5989
5991
}
5990
5992
tlb_end_vma (tlb , vma );
0 commit comments