@@ -1719,7 +1719,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1719
1719
pmd_t pmdval ;
1720
1720
unsigned long start = addr ;
1721
1721
bool can_reclaim_pt = reclaim_pt_is_enabled (start , end , details );
1722
- bool direct_reclaim = false ;
1722
+ bool direct_reclaim = true ;
1723
1723
int nr ;
1724
1724
1725
1725
retry :
@@ -1734,20 +1734,31 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1734
1734
do {
1735
1735
bool any_skipped = false;
1736
1736
1737
- if (need_resched ())
1737
+ if (need_resched ()) {
1738
+ direct_reclaim = false;
1738
1739
break ;
1740
+ }
1739
1741
1740
1742
nr = do_zap_pte_range (tlb , vma , pte , addr , end , details , rss ,
1741
1743
& force_flush , & force_break , & any_skipped );
1742
1744
if (any_skipped )
1743
1745
can_reclaim_pt = false;
1744
1746
if (unlikely (force_break )) {
1745
1747
addr += nr * PAGE_SIZE ;
1748
+ direct_reclaim = false;
1746
1749
break ;
1747
1750
}
1748
1751
} while (pte += nr , addr += PAGE_SIZE * nr , addr != end );
1749
1752
1750
- if (can_reclaim_pt && addr == end )
1753
+ /*
1754
+ * Fast path: try to hold the pmd lock and unmap the PTE page.
1755
+ *
1756
+ * If the pte lock was released midway (retry case), or if the attempt
1757
+ * to hold the pmd lock failed, then we need to recheck all pte entries
1758
+ * to ensure they are still none, thereby preventing the pte entries
1759
+ * from being repopulated by another thread.
1760
+ */
1761
+ if (can_reclaim_pt && direct_reclaim && addr == end )
1751
1762
direct_reclaim = try_get_and_clear_pmd (mm , pmd , & pmdval );
1752
1763
1753
1764
add_mm_rss_vec (mm , rss );
0 commit comments