@@ -1477,7 +1477,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1477
1477
bool notified = false;
1478
1478
unsigned long haddr = addr & HPAGE_PMD_MASK ;
1479
1479
struct vm_area_struct * vma = vma_lookup (mm , haddr );
1480
- struct page * hpage ;
1480
+ struct folio * folio ;
1481
1481
pte_t * start_pte , * pte ;
1482
1482
pmd_t * pmd , pgt_pmd ;
1483
1483
spinlock_t * pml = NULL , * ptl ;
@@ -1510,19 +1510,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1510
1510
if (userfaultfd_wp (vma ))
1511
1511
return SCAN_PTE_UFFD_WP ;
1512
1512
1513
- hpage = find_lock_page (vma -> vm_file -> f_mapping ,
1513
+ folio = filemap_lock_folio (vma -> vm_file -> f_mapping ,
1514
1514
linear_page_index (vma , haddr ));
1515
- if (! hpage )
1515
+ if (IS_ERR ( folio ) )
1516
1516
return SCAN_PAGE_NULL ;
1517
1517
1518
- if (!PageHead (hpage )) {
1519
- result = SCAN_FAIL ;
1520
- goto drop_hpage ;
1521
- }
1522
-
1523
- if (compound_order (hpage ) != HPAGE_PMD_ORDER ) {
1518
+ if (folio_order (folio ) != HPAGE_PMD_ORDER ) {
1524
1519
result = SCAN_PAGE_COMPOUND ;
1525
- goto drop_hpage ;
1520
+ goto drop_folio ;
1526
1521
}
1527
1522
1528
1523
result = find_pmd_or_thp_or_none (mm , haddr , & pmd );
@@ -1536,13 +1531,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1536
1531
*/
1537
1532
goto maybe_install_pmd ;
1538
1533
default :
1539
- goto drop_hpage ;
1534
+ goto drop_folio ;
1540
1535
}
1541
1536
1542
1537
result = SCAN_FAIL ;
1543
1538
start_pte = pte_offset_map_lock (mm , pmd , haddr , & ptl );
1544
1539
if (!start_pte ) /* mmap_lock + page lock should prevent this */
1545
- goto drop_hpage ;
1540
+ goto drop_folio ;
1546
1541
1547
1542
/* step 1: check all mapped PTEs are to the right huge page */
1548
1543
for (i = 0 , addr = haddr , pte = start_pte ;
@@ -1567,7 +1562,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1567
1562
* Note that uprobe, debugger, or MAP_PRIVATE may change the
1568
1563
* page table, but the new page will not be a subpage of hpage.
1569
1564
*/
1570
- if (hpage + i != page )
1565
+ if (folio_page ( folio , i ) != page )
1571
1566
goto abort ;
1572
1567
}
1573
1568
@@ -1582,7 +1577,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1582
1577
* page_table_lock) ptl nests inside pml. The less time we hold pml,
1583
1578
* the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1584
1579
* inserts a valid as-if-COWed PTE without even looking up page cache.
1585
- * So page lock of hpage does not protect from it, so we must not drop
1580
+ * So page lock of folio does not protect from it, so we must not drop
1586
1581
* ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1587
1582
*/
1588
1583
if (userfaultfd_armed (vma ) && !(vma -> vm_flags & VM_SHARED ))
@@ -1606,7 +1601,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1606
1601
continue ;
1607
1602
/*
1608
1603
* We dropped ptl after the first scan, to do the mmu_notifier:
1609
- * page lock stops more PTEs of the hpage being faulted in, but
1604
+ * page lock stops more PTEs of the folio being faulted in, but
1610
1605
* does not stop write faults COWing anon copies from existing
1611
1606
* PTEs; and does not stop those being swapped out or migrated.
1612
1607
*/
@@ -1615,7 +1610,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1615
1610
goto abort ;
1616
1611
}
1617
1612
page = vm_normal_page (vma , addr , ptent );
1618
- if (hpage + i != page )
1613
+ if (folio_page ( folio , i ) != page )
1619
1614
goto abort ;
1620
1615
1621
1616
/*
@@ -1634,8 +1629,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1634
1629
1635
1630
/* step 3: set proper refcount and mm_counters. */
1636
1631
if (nr_ptes ) {
1637
- page_ref_sub ( hpage , nr_ptes );
1638
- add_mm_counter (mm , mm_counter_file (hpage ), - nr_ptes );
1632
+ folio_ref_sub ( folio , nr_ptes );
1633
+ add_mm_counter (mm , mm_counter_file (& folio -> page ), - nr_ptes );
1639
1634
}
1640
1635
1641
1636
/* step 4: remove empty page table */
@@ -1659,24 +1654,24 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1659
1654
maybe_install_pmd :
1660
1655
/* step 5: install pmd entry */
1661
1656
result = install_pmd
1662
- ? set_huge_pmd (vma , haddr , pmd , hpage )
1657
+ ? set_huge_pmd (vma , haddr , pmd , & folio -> page )
1663
1658
: SCAN_SUCCEED ;
1664
- goto drop_hpage ;
1659
+ goto drop_folio ;
1665
1660
abort :
1666
1661
if (nr_ptes ) {
1667
1662
flush_tlb_mm (mm );
1668
- page_ref_sub ( hpage , nr_ptes );
1669
- add_mm_counter (mm , mm_counter_file (hpage ), - nr_ptes );
1663
+ folio_ref_sub ( folio , nr_ptes );
1664
+ add_mm_counter (mm , mm_counter_file (& folio -> page ), - nr_ptes );
1670
1665
}
1671
1666
if (start_pte )
1672
1667
pte_unmap_unlock (start_pte , ptl );
1673
1668
if (pml && pml != ptl )
1674
1669
spin_unlock (pml );
1675
1670
if (notified )
1676
1671
mmu_notifier_invalidate_range_end (& range );
1677
- drop_hpage :
1678
- unlock_page ( hpage );
1679
- put_page ( hpage );
1672
+ drop_folio :
1673
+ folio_unlock ( folio );
1674
+ folio_put ( folio );
1680
1675
return result ;
1681
1676
}
1682
1677
0 commit comments