@@ -1845,23 +1845,32 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,
1845
1845
#endif
1846
1846
}
1847
1847
1848
- /* We support batch unmapping of PTEs for lazyfree large folios */
1849
- static inline bool can_batch_unmap_folio_ptes ( unsigned long addr ,
1850
- struct folio * folio , pte_t * ptep )
1848
+ static inline unsigned int folio_unmap_pte_batch ( struct folio * folio ,
1849
+ struct page_vma_mapped_walk * pvmw ,
1850
+ enum ttu_flags flags , pte_t pte )
1851
1851
{
1852
1852
const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY ;
1853
- int max_nr = folio_nr_pages (folio );
1854
- pte_t pte = ptep_get (ptep );
1853
+ unsigned long end_addr , addr = pvmw -> address ;
1854
+ struct vm_area_struct * vma = pvmw -> vma ;
1855
+ unsigned int max_nr ;
1856
+
1857
+ if (flags & TTU_HWPOISON )
1858
+ return 1 ;
1859
+ if (!folio_test_large (folio ))
1860
+ return 1 ;
1855
1861
1862
+ /* We may only batch within a single VMA and a single page table. */
1863
+ end_addr = pmd_addr_end (addr , vma -> vm_end );
1864
+ max_nr = (end_addr - addr ) >> PAGE_SHIFT ;
1865
+
1866
+ /* We only support lazyfree batching for now ... */
1856
1867
if (!folio_test_anon (folio ) || folio_test_swapbacked (folio ))
1857
- return false ;
1868
+ return 1 ;
1858
1869
if (pte_unused (pte ))
1859
- return false;
1860
- if (pte_pfn (pte ) != folio_pfn (folio ))
1861
- return false;
1870
+ return 1 ;
1862
1871
1863
- return folio_pte_batch (folio , addr , ptep , pte , max_nr , fpb_flags , NULL ,
1864
- NULL , NULL ) == max_nr ;
1872
+ return folio_pte_batch (folio , addr , pvmw -> pte , pte , max_nr , fpb_flags ,
1873
+ NULL , NULL , NULL ) ;
1865
1874
}
1866
1875
1867
1876
/*
@@ -2024,9 +2033,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
2024
2033
if (pte_dirty (pteval ))
2025
2034
folio_mark_dirty (folio );
2026
2035
} else if (likely (pte_present (pteval ))) {
2027
- if (folio_test_large (folio ) && !(flags & TTU_HWPOISON ) &&
2028
- can_batch_unmap_folio_ptes (address , folio , pvmw .pte ))
2029
- nr_pages = folio_nr_pages (folio );
2036
+ nr_pages = folio_unmap_pte_batch (folio , & pvmw , flags , pteval );
2030
2037
end_addr = address + nr_pages * PAGE_SIZE ;
2031
2038
flush_cache_range (vma , address , end_addr );
2032
2039
@@ -2206,13 +2213,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
2206
2213
hugetlb_remove_rmap (folio );
2207
2214
} else {
2208
2215
folio_remove_rmap_ptes (folio , subpage , nr_pages , vma );
2209
- folio_ref_sub (folio , nr_pages - 1 );
2210
2216
}
2211
2217
if (vma -> vm_flags & VM_LOCKED )
2212
2218
mlock_drain_local ();
2213
- folio_put (folio );
2214
- /* We have already batched the entire folio */
2215
- if (nr_pages > 1 )
2219
+ folio_put_refs (folio , nr_pages );
2220
+
2221
+ /*
2222
+ * If we are sure that we batched the entire folio and cleared
2223
+ * all PTEs, we can just optimize and stop right here.
2224
+ */
2225
+ if (nr_pages == folio_nr_pages (folio ))
2216
2226
goto walk_done ;
2217
2227
continue ;
2218
2228
walk_abort :
0 commit comments