Skip to content

Commit 9030fb0

Browse files
committed
Merge tag 'folio-5.18c' of git://git.infradead.org/users/willy/pagecache
Pull folio updates from Matthew Wilcox: - Rewrite how munlock works to massively reduce the contention on i_mmap_rwsem (Hugh Dickins): https://lore.kernel.org/linux-mm/[email protected]/ - Sort out the page refcount mess for ZONE_DEVICE pages (Christoph Hellwig): https://lore.kernel.org/linux-mm/[email protected]/ - Convert GUP to use folios and make pincount available for order-1 pages. (Matthew Wilcox) - Convert a few more truncation functions to use folios (Matthew Wilcox) - Convert page_vma_mapped_walk to use PFNs instead of pages (Matthew Wilcox) - Convert rmap_walk to use folios (Matthew Wilcox) - Convert most of shrink_page_list() to use a folio (Matthew Wilcox) - Add support for creating large folios in readahead (Matthew Wilcox) * tag 'folio-5.18c' of git://git.infradead.org/users/willy/pagecache: (114 commits) mm/damon: minor cleanup for damon_pa_young selftests/vm/transhuge-stress: Support file-backed PMD folios mm/filemap: Support VM_HUGEPAGE for file mappings mm/readahead: Switch to page_cache_ra_order mm/readahead: Align file mappings for non-DAX mm/readahead: Add large folio readahead mm: Support arbitrary THP sizes mm: Make large folios depend on THP mm: Fix READ_ONLY_THP warning mm/filemap: Allow large folios to be added to the page cache mm: Turn can_split_huge_page() into can_split_folio() mm/vmscan: Convert pageout() to take a folio mm/vmscan: Turn page_check_references() into folio_check_references() mm/vmscan: Account large folios correctly mm/vmscan: Optimise shrink_page_list for non-PMD-sized folios mm/vmscan: Free non-shmem folios without splitting them mm/rmap: Constify the rmap_walk_control argument mm/rmap: Convert rmap_walk() to take a folio mm: Turn page_anon_vma() into folio_anon_vma() mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() ...
2 parents 3bf03b9 + 2a3c4bc commit 9030fb0

File tree

100 files changed

+2900
-3020
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

100 files changed

+2900
-3020
lines changed

Documentation/core-api/pin_user_pages.rst

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -55,18 +55,18 @@ flags the caller provides. The caller is required to pass in a non-null struct
5555
pages* array, and the function then pins pages by incrementing each by a special
5656
value: GUP_PIN_COUNTING_BIAS.
5757

58-
For huge pages (and in fact, any compound page of more than 2 pages), the
59-
GUP_PIN_COUNTING_BIAS scheme is not used. Instead, an exact form of pin counting
60-
is achieved, by using the 3rd struct page in the compound page. A new struct
61-
page field, hpage_pinned_refcount, has been added in order to support this.
58+
For compound pages, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead,
59+
an exact form of pin counting is achieved, by using the 2nd struct page
60+
in the compound page. A new struct page field, compound_pincount, has
61+
been added in order to support this.
6262

6363
This approach for compound pages avoids the counting upper limit problems that
6464
are discussed below. Those limitations would have been aggravated severely by
6565
huge pages, because each tail page adds a refcount to the head page. And in
66-
fact, testing revealed that, without a separate hpage_pinned_refcount field,
66+
fact, testing revealed that, without a separate compound_pincount field,
6767
page overflows were seen in some huge page stress tests.
6868

69-
This also means that huge pages and compound pages (of order > 1) do not suffer
69+
This also means that huge pages and compound pages do not suffer
7070
from the false positives problem that is mentioned below.::
7171

7272
Function
@@ -264,9 +264,9 @@ place.)
264264
Other diagnostics
265265
=================
266266

267-
dump_page() has been enhanced slightly, to handle these new counting fields, and
268-
to better report on compound pages in general. Specifically, for compound pages
269-
with order > 1, the exact (hpage_pinned_refcount) pincount is reported.
267+
dump_page() has been enhanced slightly, to handle these new counting
268+
fields, and to better report on compound pages in general. Specifically,
269+
for compound pages, the exact (compound_pincount) pincount is reported.
270270

271271
References
272272
==========

arch/alpha/include/asm/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ pmd_page_vaddr(pmd_t pmd)
233233
return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
234234
}
235235

236+
#define pmd_pfn(pmd) (pmd_val(pmd) >> 32)
236237
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
237238
#define pud_page(pud) (pfn_to_page(pud_val(pud) >> 32))
238239

arch/arc/include/asm/hugepage.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ static inline pmd_t pte_pmd(pte_t pte)
3131

3232
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
3333
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
34-
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
3534
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
3635

3736
#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))

arch/arc/include/asm/pgtable-levels.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@
161161
#define pmd_present(x) (pmd_val(x))
162162
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
163163
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
164+
#define pmd_pfn(pmd) ((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
164165
#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
165166
#define set_pmd(pmdp, pmd) (*(pmdp) = pmd)
166167
#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))

arch/arm/include/asm/pgtable-2level.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
208208
}
209209
#define pmd_offset pmd_offset
210210

211+
#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
212+
211213
#define pmd_large(pmd) (pmd_val(pmd) & 2)
212214
#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
213215
#define pmd_bad(pmd) (pmd_val(pmd) & 2)

arch/arm64/mm/mmu.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/mman.h>
1818
#include <linux/nodemask.h>
1919
#include <linux/memblock.h>
20+
#include <linux/memremap.h>
2021
#include <linux/memory.h>
2122
#include <linux/fs.h>
2223
#include <linux/io.h>

arch/csky/include/asm/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
#define pgd_ERROR(e) \
3131
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
3232

33+
#define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
3334
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
3435
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
3536
(((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))

arch/hexagon/include/asm/pgtable.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,11 @@ static inline int pmd_bad(pmd_t pmd)
235235
return 0;
236236
}
237237

238+
/*
239+
* pmd_pfn - converts a PMD entry to a page frame number
240+
*/
241+
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
242+
238243
/*
239244
* pmd_page - converts a PMD entry to a page pointer
240245
*/

arch/ia64/include/asm/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ ia64_phys_addr_valid (unsigned long addr)
267267
#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
268268
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
269269
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
270+
#define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> PAGE_SHIFT)
270271
#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
271272

272273
#define pud_none(pud) (!pud_val(pud))

arch/m68k/include/asm/mcf_pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -322,6 +322,7 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
322322
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
323323
#define __swp_entry_to_pte(x) (__pte((x).val))
324324

325+
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
325326
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
326327

327328
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))

0 commit comments

Comments
 (0)