Skip to content

Commit 98b32d2

Browse files
VMoolaakpm00
authored andcommitted
mm/khugepaged: convert collapse_pte_mapped_thp() to use folios
This removes 2 calls to compound_head() and helps convert khugepaged to use folios throughout. Previously, if the address passed to collapse_pte_mapped_thp() corresponded to a tail page, the scan would fail immediately. Using filemap_lock_folio() we get the corresponding folio back and try to operate on the folio instead. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Vishal Moola (Oracle) <[email protected]> Reviewed-by: Rik van Riel <[email protected]> Reviewed-by: Yang Shi <[email protected]> Cc: Kefeng Wang <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent b455f39 commit 98b32d2

File tree

1 file changed

+20
-25
lines changed

1 file changed

+20
-25
lines changed

mm/khugepaged.c

Lines changed: 20 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1477,7 +1477,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
14771477
bool notified = false;
14781478
unsigned long haddr = addr & HPAGE_PMD_MASK;
14791479
struct vm_area_struct *vma = vma_lookup(mm, haddr);
1480-
struct page *hpage;
1480+
struct folio *folio;
14811481
pte_t *start_pte, *pte;
14821482
pmd_t *pmd, pgt_pmd;
14831483
spinlock_t *pml = NULL, *ptl;
@@ -1510,19 +1510,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
15101510
if (userfaultfd_wp(vma))
15111511
return SCAN_PTE_UFFD_WP;
15121512

1513-
hpage = find_lock_page(vma->vm_file->f_mapping,
1513+
folio = filemap_lock_folio(vma->vm_file->f_mapping,
15141514
linear_page_index(vma, haddr));
1515-
if (!hpage)
1515+
if (IS_ERR(folio))
15161516
return SCAN_PAGE_NULL;
15171517

1518-
if (!PageHead(hpage)) {
1519-
result = SCAN_FAIL;
1520-
goto drop_hpage;
1521-
}
1522-
1523-
if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1518+
if (folio_order(folio) != HPAGE_PMD_ORDER) {
15241519
result = SCAN_PAGE_COMPOUND;
1525-
goto drop_hpage;
1520+
goto drop_folio;
15261521
}
15271522

15281523
result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
@@ -1536,13 +1531,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
15361531
*/
15371532
goto maybe_install_pmd;
15381533
default:
1539-
goto drop_hpage;
1534+
goto drop_folio;
15401535
}
15411536

15421537
result = SCAN_FAIL;
15431538
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
15441539
if (!start_pte) /* mmap_lock + page lock should prevent this */
1545-
goto drop_hpage;
1540+
goto drop_folio;
15461541

15471542
/* step 1: check all mapped PTEs are to the right huge page */
15481543
for (i = 0, addr = haddr, pte = start_pte;
@@ -1567,7 +1562,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
15671562
* Note that uprobe, debugger, or MAP_PRIVATE may change the
15681563
* page table, but the new page will not be a subpage of hpage.
15691564
*/
1570-
if (hpage + i != page)
1565+
if (folio_page(folio, i) != page)
15711566
goto abort;
15721567
}
15731568

@@ -1582,7 +1577,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
15821577
* page_table_lock) ptl nests inside pml. The less time we hold pml,
15831578
* the better; but userfaultfd's mfill_atomic_pte() on a private VMA
15841579
* inserts a valid as-if-COWed PTE without even looking up page cache.
1585-
* So page lock of hpage does not protect from it, so we must not drop
1580+
* So page lock of folio does not protect from it, so we must not drop
15861581
* ptl before pgt_pmd is removed, so uffd private needs pml taken now.
15871582
*/
15881583
if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
@@ -1606,7 +1601,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
16061601
continue;
16071602
/*
16081603
* We dropped ptl after the first scan, to do the mmu_notifier:
1609-
* page lock stops more PTEs of the hpage being faulted in, but
1604+
* page lock stops more PTEs of the folio being faulted in, but
16101605
* does not stop write faults COWing anon copies from existing
16111606
* PTEs; and does not stop those being swapped out or migrated.
16121607
*/
@@ -1615,7 +1610,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
16151610
goto abort;
16161611
}
16171612
page = vm_normal_page(vma, addr, ptent);
1618-
if (hpage + i != page)
1613+
if (folio_page(folio, i) != page)
16191614
goto abort;
16201615

16211616
/*
@@ -1634,8 +1629,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
16341629

16351630
/* step 3: set proper refcount and mm_counters. */
16361631
if (nr_ptes) {
1637-
page_ref_sub(hpage, nr_ptes);
1638-
add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
1632+
folio_ref_sub(folio, nr_ptes);
1633+
add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
16391634
}
16401635

16411636
/* step 4: remove empty page table */
@@ -1659,24 +1654,24 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
16591654
maybe_install_pmd:
16601655
/* step 5: install pmd entry */
16611656
result = install_pmd
1662-
? set_huge_pmd(vma, haddr, pmd, hpage)
1657+
? set_huge_pmd(vma, haddr, pmd, &folio->page)
16631658
: SCAN_SUCCEED;
1664-
goto drop_hpage;
1659+
goto drop_folio;
16651660
abort:
16661661
if (nr_ptes) {
16671662
flush_tlb_mm(mm);
1668-
page_ref_sub(hpage, nr_ptes);
1669-
add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
1663+
folio_ref_sub(folio, nr_ptes);
1664+
add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
16701665
}
16711666
if (start_pte)
16721667
pte_unmap_unlock(start_pte, ptl);
16731668
if (pml && pml != ptl)
16741669
spin_unlock(pml);
16751670
if (notified)
16761671
mmu_notifier_invalidate_range_end(&range);
1677-
drop_hpage:
1678-
unlock_page(hpage);
1679-
put_page(hpage);
1672+
drop_folio:
1673+
folio_unlock(folio);
1674+
folio_put(folio);
16801675
return result;
16811676
}
16821677

0 commit comments

Comments
 (0)