Skip to content

Commit 0e499ed

Browse files
author
Matthew Wilcox (Oracle)
committed
filemap: Return only folios from find_get_entries()
The callers have all been converted to work on folios, so convert find_get_entries() to return a batch of folios instead of pages. We also now return multiple large folios in a single call. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Jan Kara <[email protected]> Reviewed-by: William Kucharski <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]>
1 parent 25d6a23 commit 0e499ed

File tree

5 files changed

+59
-69
lines changed

5 files changed

+59
-69
lines changed

include/linux/pagemap.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -592,8 +592,6 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
592592
return head + (index & (thp_nr_pages(head) - 1));
593593
}
594594

595-
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
596-
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
597595
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
598596
pgoff_t end, unsigned int nr_pages,
599597
struct page **pages);

mm/filemap.c

Lines changed: 11 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -2015,57 +2015,36 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
20152015
* @mapping: The address_space to search
20162016
* @start: The starting page cache index
20172017
* @end: The final page index (inclusive).
2018-
* @pvec: Where the resulting entries are placed.
2018+
* @fbatch: Where the resulting entries are placed.
20192019
* @indices: The cache indices corresponding to the entries in @entries
20202020
*
20212021
* find_get_entries() will search for and return a batch of entries in
2022-
* the mapping. The entries are placed in @pvec. find_get_entries()
2023-
* takes a reference on any actual pages it returns.
2022+
* the mapping. The entries are placed in @fbatch. find_get_entries()
2023+
* takes a reference on any actual folios it returns.
20242024
*
2025-
* The search returns a group of mapping-contiguous page cache entries
2026-
* with ascending indexes. There may be holes in the indices due to
2027-
* not-present pages.
2025+
* The entries have ascending indexes. The indices may not be consecutive
2026+
* due to not-present entries or large folios.
20282027
*
2029-
* Any shadow entries of evicted pages, or swap entries from
2028+
* Any shadow entries of evicted folios, or swap entries from
20302029
* shmem/tmpfs, are included in the returned array.
20312030
*
2032-
* If it finds a Transparent Huge Page, head or tail, find_get_entries()
2033-
* stops at that page: the caller is likely to have a better way to handle
2034-
* the compound page as a whole, and then skip its extent, than repeatedly
2035-
* calling find_get_entries() to return all its tails.
2036-
*
2037-
* Return: the number of pages and shadow entries which were found.
2031+
* Return: The number of entries which were found.
20382032
*/
20392033
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
2040-
pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
2034+
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
20412035
{
20422036
XA_STATE(xas, &mapping->i_pages, start);
20432037
struct folio *folio;
2044-
unsigned int ret = 0;
2045-
unsigned nr_entries = PAGEVEC_SIZE;
20462038

20472039
rcu_read_lock();
20482040
while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2049-
struct page *page = &folio->page;
2050-
/*
2051-
* Terminate early on finding a THP, to allow the caller to
2052-
* handle it all at once; but continue if this is hugetlbfs.
2053-
*/
2054-
if (!xa_is_value(folio) && folio_test_large(folio) &&
2055-
!folio_test_hugetlb(folio)) {
2056-
page = folio_file_page(folio, xas.xa_index);
2057-
nr_entries = ret + 1;
2058-
}
2059-
2060-
indices[ret] = xas.xa_index;
2061-
pvec->pages[ret] = page;
2062-
if (++ret == nr_entries)
2041+
indices[fbatch->nr] = xas.xa_index;
2042+
if (!folio_batch_add(fbatch, folio))
20632043
break;
20642044
}
20652045
rcu_read_unlock();
20662046

2067-
pvec->nr = ret;
2068-
return ret;
2047+
return folio_batch_count(fbatch);
20692048
}
20702049

20712050
/**

mm/internal.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
#include <linux/pagemap.h>
1313
#include <linux/tracepoint-defs.h>
1414

15+
struct folio_batch;
16+
1517
/*
1618
* The set of flags that only affect watermark checking and reclaim
1719
* behaviour. This is used by the MM to obey the caller constraints
@@ -92,6 +94,8 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
9294

9395
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
9496
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
97+
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
98+
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
9599
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
96100
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
97101

mm/shmem.c

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -920,6 +920,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
920920
unsigned int partial_start = lstart & (PAGE_SIZE - 1);
921921
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
922922
struct pagevec pvec;
923+
struct folio_batch fbatch;
923924
pgoff_t indices[PAGEVEC_SIZE];
924925
long nr_swaps_freed = 0;
925926
pgoff_t index;
@@ -987,11 +988,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
987988
if (start >= end)
988989
return;
989990

991+
folio_batch_init(&fbatch);
990992
index = start;
991993
while (index < end) {
992994
cond_resched();
993995

994-
if (!find_get_entries(mapping, index, end - 1, &pvec,
996+
if (!find_get_entries(mapping, index, end - 1, &fbatch,
995997
indices)) {
996998
/* If all gone or hole-punch or unfalloc, we're done */
997999
if (index == start || end != -1)
@@ -1000,14 +1002,14 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
10001002
index = start;
10011003
continue;
10021004
}
1003-
for (i = 0; i < pagevec_count(&pvec); i++) {
1004-
struct page *page = pvec.pages[i];
1005+
for (i = 0; i < folio_batch_count(&fbatch); i++) {
1006+
struct folio *folio = fbatch.folios[i];
10051007

10061008
index = indices[i];
1007-
if (xa_is_value(page)) {
1009+
if (xa_is_value(folio)) {
10081010
if (unfalloc)
10091011
continue;
1010-
if (shmem_free_swap(mapping, index, page)) {
1012+
if (shmem_free_swap(mapping, index, folio)) {
10111013
/* Swap was replaced by page: retry */
10121014
index--;
10131015
break;
@@ -1016,33 +1018,35 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
10161018
continue;
10171019
}
10181020

1019-
lock_page(page);
1021+
folio_lock(folio);
10201022

1021-
if (!unfalloc || !PageUptodate(page)) {
1022-
if (page_mapping(page) != mapping) {
1023+
if (!unfalloc || !folio_test_uptodate(folio)) {
1024+
struct page *page = folio_file_page(folio,
1025+
index);
1026+
if (folio_mapping(folio) != mapping) {
10231027
/* Page was replaced by swap: retry */
1024-
unlock_page(page);
1028+
folio_unlock(folio);
10251029
index--;
10261030
break;
10271031
}
1028-
VM_BUG_ON_PAGE(PageWriteback(page), page);
1032+
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1033+
folio);
10291034
if (shmem_punch_compound(page, start, end))
1030-
truncate_inode_folio(mapping,
1031-
page_folio(page));
1035+
truncate_inode_folio(mapping, folio);
10321036
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
10331037
/* Wipe the page and don't get stuck */
10341038
clear_highpage(page);
10351039
flush_dcache_page(page);
1036-
set_page_dirty(page);
1040+
folio_mark_dirty(folio);
10371041
if (index <
10381042
round_up(start, HPAGE_PMD_NR))
10391043
start = index + 1;
10401044
}
10411045
}
1042-
unlock_page(page);
1046+
folio_unlock(folio);
10431047
}
1044-
pagevec_remove_exceptionals(&pvec);
1045-
pagevec_release(&pvec);
1048+
folio_batch_remove_exceptionals(&fbatch);
1049+
folio_batch_release(&fbatch);
10461050
index++;
10471051
}
10481052

mm/truncate.c

Lines changed: 24 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,13 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
108108
pvec->nr = j;
109109
}
110110

111+
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
112+
struct folio_batch *fbatch, pgoff_t *indices)
113+
{
114+
truncate_exceptional_pvec_entries(mapping, (struct pagevec *)fbatch,
115+
indices);
116+
}
117+
111118
/*
112119
* Invalidate exceptional entry if easily possible. This handles exceptional
113120
* entries for invalidate_inode_pages().
@@ -297,6 +304,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
297304
unsigned int partial_start; /* inclusive */
298305
unsigned int partial_end; /* exclusive */
299306
struct pagevec pvec;
307+
struct folio_batch fbatch;
300308
pgoff_t indices[PAGEVEC_SIZE];
301309
pgoff_t index;
302310
int i;
@@ -379,10 +387,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
379387
if (start >= end)
380388
goto out;
381389

390+
folio_batch_init(&fbatch);
382391
index = start;
383392
for ( ; ; ) {
384393
cond_resched();
385-
if (!find_get_entries(mapping, index, end - 1, &pvec,
394+
if (!find_get_entries(mapping, index, end - 1, &fbatch,
386395
indices)) {
387396
/* If all gone from start onwards, we're done */
388397
if (index == start)
@@ -392,16 +401,14 @@ void truncate_inode_pages_range(struct address_space *mapping,
392401
continue;
393402
}
394403

395-
for (i = 0; i < pagevec_count(&pvec); i++) {
396-
struct page *page = pvec.pages[i];
397-
struct folio *folio;
404+
for (i = 0; i < folio_batch_count(&fbatch); i++) {
405+
struct folio *folio = fbatch.folios[i];
398406

399407
/* We rely upon deletion not changing page->index */
400408
index = indices[i];
401409

402-
if (xa_is_value(page))
410+
if (xa_is_value(folio))
403411
continue;
404-
folio = page_folio(page);
405412

406413
folio_lock(folio);
407414
VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
@@ -410,8 +417,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
410417
folio_unlock(folio);
411418
index = folio_index(folio) + folio_nr_pages(folio) - 1;
412419
}
413-
truncate_exceptional_pvec_entries(mapping, &pvec, indices);
414-
pagevec_release(&pvec);
420+
truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
421+
folio_batch_release(&fbatch);
415422
index++;
416423
}
417424

@@ -625,7 +632,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
625632
pgoff_t start, pgoff_t end)
626633
{
627634
pgoff_t indices[PAGEVEC_SIZE];
628-
struct pagevec pvec;
635+
struct folio_batch fbatch;
629636
pgoff_t index;
630637
int i;
631638
int ret = 0;
@@ -635,23 +642,21 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
635642
if (mapping_empty(mapping))
636643
goto out;
637644

638-
pagevec_init(&pvec);
645+
folio_batch_init(&fbatch);
639646
index = start;
640-
while (find_get_entries(mapping, index, end, &pvec, indices)) {
641-
for (i = 0; i < pagevec_count(&pvec); i++) {
642-
struct page *page = pvec.pages[i];
643-
struct folio *folio;
647+
while (find_get_entries(mapping, index, end, &fbatch, indices)) {
648+
for (i = 0; i < folio_batch_count(&fbatch); i++) {
649+
struct folio *folio = fbatch.folios[i];
644650

645651
/* We rely upon deletion not changing folio->index */
646652
index = indices[i];
647653

648-
if (xa_is_value(page)) {
654+
if (xa_is_value(folio)) {
649655
if (!invalidate_exceptional_entry2(mapping,
650-
index, page))
656+
index, folio))
651657
ret = -EBUSY;
652658
continue;
653659
}
654-
folio = page_folio(page);
655660

656661
if (!did_range_unmap && folio_mapped(folio)) {
657662
/*
@@ -684,8 +689,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
684689
ret = ret2;
685690
folio_unlock(folio);
686691
}
687-
pagevec_remove_exceptionals(&pvec);
688-
pagevec_release(&pvec);
692+
folio_batch_remove_exceptionals(&fbatch);
693+
folio_batch_release(&fbatch);
689694
cond_resched();
690695
index++;
691696
}

0 commit comments

Comments
 (0)