Skip to content

Commit 51dcbda

Browse files
author
Matthew Wilcox (Oracle)
committed
mm: Convert find_lock_entries() to use a folio_batch
find_lock_entries() already only returned the head page of folios, so convert it to return a folio_batch instead of a pagevec. That cascades through converting truncate_inode_pages_range() to delete_from_page_cache_batch() and page_cache_delete_batch(). Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: William Kucharski <[email protected]>
1 parent 0e499ed commit 51dcbda

File tree

6 files changed

+69
-80
lines changed

6 files changed

+69
-80
lines changed

fs/f2fs/f2fs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
#include <linux/fscrypt.h>
2929
#include <linux/fsverity.h>
3030

31+
struct pagevec;
32+
3133
#ifdef CONFIG_F2FS_CHECK_FS
3234
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
3335
#else

include/linux/pagemap.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include <linux/hardirq.h> /* for in_interrupt() */
1717
#include <linux/hugetlb_inline.h>
1818

19-
struct pagevec;
19+
struct folio_batch;
2020

2121
static inline bool mapping_empty(struct address_space *mapping)
2222
{
@@ -936,7 +936,7 @@ static inline void __delete_from_page_cache(struct page *page, void *shadow)
936936
}
937937
void replace_page_cache_page(struct page *old, struct page *new);
938938
void delete_from_page_cache_batch(struct address_space *mapping,
939-
struct pagevec *pvec);
939+
struct folio_batch *fbatch);
940940
int try_to_release_page(struct page *page, gfp_t gfp);
941941
bool filemap_release_folio(struct folio *folio, gfp_t gfp);
942942
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,

mm/filemap.c

Lines changed: 29 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -270,30 +270,29 @@ void filemap_remove_folio(struct folio *folio)
270270
}
271271

272272
/*
273-
* page_cache_delete_batch - delete several pages from page cache
274-
* @mapping: the mapping to which pages belong
275-
* @pvec: pagevec with pages to delete
273+
* page_cache_delete_batch - delete several folios from page cache
274+
* @mapping: the mapping to which folios belong
275+
* @fbatch: batch of folios to delete
276276
*
277-
* The function walks over mapping->i_pages and removes pages passed in @pvec
278-
* from the mapping. The function expects @pvec to be sorted by page index
279-
* and is optimised for it to be dense.
280-
* It tolerates holes in @pvec (mapping entries at those indices are not
281-
* modified). The function expects only THP head pages to be present in the
282-
* @pvec.
277+
* The function walks over mapping->i_pages and removes folios passed in
278+
* @fbatch from the mapping. The function expects @fbatch to be sorted
279+
* by page index and is optimised for it to be dense.
280+
* It tolerates holes in @fbatch (mapping entries at those indices are not
281+
* modified).
283282
*
284283
* The function expects the i_pages lock to be held.
285284
*/
286285
static void page_cache_delete_batch(struct address_space *mapping,
287-
struct pagevec *pvec)
286+
struct folio_batch *fbatch)
288287
{
289-
XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
288+
XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
290289
int total_pages = 0;
291290
int i = 0;
292291
struct folio *folio;
293292

294293
mapping_set_update(&xas, mapping);
295294
xas_for_each(&xas, folio, ULONG_MAX) {
296-
if (i >= pagevec_count(pvec))
295+
if (i >= folio_batch_count(fbatch))
297296
break;
298297

299298
/* A swap/dax/shadow entry got inserted? Skip it. */
@@ -306,22 +305,21 @@ static void page_cache_delete_batch(struct address_space *mapping,
306305
* means our page has been removed, which shouldn't be
307306
* possible because we're holding the PageLock.
308307
*/
309-
if (&folio->page != pvec->pages[i]) {
308+
if (folio != fbatch->folios[i]) {
310309
VM_BUG_ON_FOLIO(folio->index >
311-
pvec->pages[i]->index, folio);
310+
fbatch->folios[i]->index, folio);
312311
continue;
313312
}
314313

315314
WARN_ON_ONCE(!folio_test_locked(folio));
316315

317316
if (folio->index == xas.xa_index)
318317
folio->mapping = NULL;
319-
/* Leave page->index set: truncation lookup relies on it */
318+
/* Leave folio->index set: truncation lookup relies on it */
320319

321320
/*
322-
* Move to the next page in the vector if this is a regular
323-
* page or the index is of the last sub-page of this compound
324-
* page.
321+
* Move to the next folio in the batch if this is a regular
322+
* folio or the index is of the last sub-page of this folio.
325323
*/
326324
if (folio->index + folio_nr_pages(folio) - 1 == xas.xa_index)
327325
i++;
@@ -332,29 +330,29 @@ static void page_cache_delete_batch(struct address_space *mapping,
332330
}
333331

334332
void delete_from_page_cache_batch(struct address_space *mapping,
335-
struct pagevec *pvec)
333+
struct folio_batch *fbatch)
336334
{
337335
int i;
338336

339-
if (!pagevec_count(pvec))
337+
if (!folio_batch_count(fbatch))
340338
return;
341339

342340
spin_lock(&mapping->host->i_lock);
343341
xa_lock_irq(&mapping->i_pages);
344-
for (i = 0; i < pagevec_count(pvec); i++) {
345-
struct folio *folio = page_folio(pvec->pages[i]);
342+
for (i = 0; i < folio_batch_count(fbatch); i++) {
343+
struct folio *folio = fbatch->folios[i];
346344

347345
trace_mm_filemap_delete_from_page_cache(folio);
348346
filemap_unaccount_folio(mapping, folio);
349347
}
350-
page_cache_delete_batch(mapping, pvec);
348+
page_cache_delete_batch(mapping, fbatch);
351349
xa_unlock_irq(&mapping->i_pages);
352350
if (mapping_shrinkable(mapping))
353351
inode_add_lru(mapping->host);
354352
spin_unlock(&mapping->host->i_lock);
355353

356-
for (i = 0; i < pagevec_count(pvec); i++)
357-
filemap_free_folio(mapping, page_folio(pvec->pages[i]));
354+
for (i = 0; i < folio_batch_count(fbatch); i++)
355+
filemap_free_folio(mapping, fbatch->folios[i]);
358356
}
359357

360358
int filemap_check_errors(struct address_space *mapping)
@@ -2052,8 +2050,8 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
20522050
* @mapping: The address_space to search.
20532051
* @start: The starting page cache index.
20542052
* @end: The final page index (inclusive).
2055-
* @pvec: Where the resulting entries are placed.
2056-
* @indices: The cache indices of the entries in @pvec.
2053+
* @fbatch: Where the resulting entries are placed.
2054+
* @indices: The cache indices of the entries in @fbatch.
20572055
*
20582056
* find_lock_entries() will return a batch of entries from @mapping.
20592057
* Swap, shadow and DAX entries are included. Folios are returned
@@ -2068,7 +2066,7 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
20682066
* Return: The number of entries which were found.
20692067
*/
20702068
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
2071-
pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
2069+
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
20722070
{
20732071
XA_STATE(xas, &mapping->i_pages, start);
20742072
struct folio *folio;
@@ -2088,8 +2086,8 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
20882086
VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
20892087
folio);
20902088
}
2091-
indices[pvec->nr] = xas.xa_index;
2092-
if (!pagevec_add(pvec, &folio->page))
2089+
indices[fbatch->nr] = xas.xa_index;
2090+
if (!folio_batch_add(fbatch, folio))
20932091
break;
20942092
goto next;
20952093
unlock:
@@ -2106,7 +2104,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
21062104
}
21072105
rcu_read_unlock();
21082106

2109-
return pagevec_count(pvec);
2107+
return folio_batch_count(fbatch);
21102108
}
21112109

21122110
/**

mm/internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
9393
}
9494

9595
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
96-
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
96+
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
9797
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
9898
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
9999
void filemap_free_folio(struct address_space *mapping, struct folio *folio);

mm/shmem.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -919,7 +919,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
919919
pgoff_t end = (lend + 1) >> PAGE_SHIFT;
920920
unsigned int partial_start = lstart & (PAGE_SIZE - 1);
921921
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
922-
struct pagevec pvec;
923922
struct folio_batch fbatch;
924923
pgoff_t indices[PAGEVEC_SIZE];
925924
long nr_swaps_freed = 0;
@@ -932,12 +931,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
932931
if (info->fallocend > start && info->fallocend <= end && !unfalloc)
933932
info->fallocend = start;
934933

935-
pagevec_init(&pvec);
934+
folio_batch_init(&fbatch);
936935
index = start;
937936
while (index < end && find_lock_entries(mapping, index, end - 1,
938-
&pvec, indices)) {
939-
for (i = 0; i < pagevec_count(&pvec); i++) {
940-
struct folio *folio = (struct folio *)pvec.pages[i];
937+
&fbatch, indices)) {
938+
for (i = 0; i < folio_batch_count(&fbatch); i++) {
939+
struct folio *folio = fbatch.folios[i];
941940

942941
index = indices[i];
943942

@@ -954,8 +953,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
954953
truncate_inode_folio(mapping, folio);
955954
folio_unlock(folio);
956955
}
957-
pagevec_remove_exceptionals(&pvec);
958-
pagevec_release(&pvec);
956+
folio_batch_remove_exceptionals(&fbatch);
957+
folio_batch_release(&fbatch);
959958
cond_resched();
960959
index++;
961960
}
@@ -988,7 +987,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
988987
if (start >= end)
989988
return;
990989

991-
folio_batch_init(&fbatch);
992990
index = start;
993991
while (index < end) {
994992
cond_resched();

mm/truncate.c

Lines changed: 29 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -56,11 +56,11 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
5656

5757
/*
5858
* Unconditionally remove exceptional entries. Usually called from truncate
59-
* path. Note that the pagevec may be altered by this function by removing
59+
* path. Note that the folio_batch may be altered by this function by removing
6060
* exceptional entries similar to what pagevec_remove_exceptionals does.
6161
*/
62-
static void truncate_exceptional_pvec_entries(struct address_space *mapping,
63-
struct pagevec *pvec, pgoff_t *indices)
62+
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
63+
struct folio_batch *fbatch, pgoff_t *indices)
6464
{
6565
int i, j;
6666
bool dax;
@@ -69,11 +69,11 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
6969
if (shmem_mapping(mapping))
7070
return;
7171

72-
for (j = 0; j < pagevec_count(pvec); j++)
73-
if (xa_is_value(pvec->pages[j]))
72+
for (j = 0; j < folio_batch_count(fbatch); j++)
73+
if (xa_is_value(fbatch->folios[j]))
7474
break;
7575

76-
if (j == pagevec_count(pvec))
76+
if (j == folio_batch_count(fbatch))
7777
return;
7878

7979
dax = dax_mapping(mapping);
@@ -82,12 +82,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
8282
xa_lock_irq(&mapping->i_pages);
8383
}
8484

85-
for (i = j; i < pagevec_count(pvec); i++) {
86-
struct page *page = pvec->pages[i];
85+
for (i = j; i < folio_batch_count(fbatch); i++) {
86+
struct folio *folio = fbatch->folios[i];
8787
pgoff_t index = indices[i];
8888

89-
if (!xa_is_value(page)) {
90-
pvec->pages[j++] = page;
89+
if (!xa_is_value(folio)) {
90+
fbatch->folios[j++] = folio;
9191
continue;
9292
}
9393

@@ -96,7 +96,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
9696
continue;
9797
}
9898

99-
__clear_shadow_entry(mapping, index, page);
99+
__clear_shadow_entry(mapping, index, folio);
100100
}
101101

102102
if (!dax) {
@@ -105,14 +105,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
105105
inode_add_lru(mapping->host);
106106
spin_unlock(&mapping->host->i_lock);
107107
}
108-
pvec->nr = j;
109-
}
110-
111-
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
112-
struct folio_batch *fbatch, pgoff_t *indices)
113-
{
114-
truncate_exceptional_pvec_entries(mapping, (struct pagevec *)fbatch,
115-
indices);
108+
fbatch->nr = j;
116109
}
117110

118111
/*
@@ -303,7 +296,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
303296
pgoff_t end; /* exclusive */
304297
unsigned int partial_start; /* inclusive */
305298
unsigned int partial_end; /* exclusive */
306-
struct pagevec pvec;
307299
struct folio_batch fbatch;
308300
pgoff_t indices[PAGEVEC_SIZE];
309301
pgoff_t index;
@@ -333,18 +325,18 @@ void truncate_inode_pages_range(struct address_space *mapping,
333325
else
334326
end = (lend + 1) >> PAGE_SHIFT;
335327

336-
pagevec_init(&pvec);
328+
folio_batch_init(&fbatch);
337329
index = start;
338330
while (index < end && find_lock_entries(mapping, index, end - 1,
339-
&pvec, indices)) {
340-
index = indices[pagevec_count(&pvec) - 1] + 1;
341-
truncate_exceptional_pvec_entries(mapping, &pvec, indices);
342-
for (i = 0; i < pagevec_count(&pvec); i++)
343-
truncate_cleanup_folio(page_folio(pvec.pages[i]));
344-
delete_from_page_cache_batch(mapping, &pvec);
345-
for (i = 0; i < pagevec_count(&pvec); i++)
346-
unlock_page(pvec.pages[i]);
347-
pagevec_release(&pvec);
331+
&fbatch, indices)) {
332+
index = indices[folio_batch_count(&fbatch) - 1] + 1;
333+
truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
334+
for (i = 0; i < folio_batch_count(&fbatch); i++)
335+
truncate_cleanup_folio(fbatch.folios[i]);
336+
delete_from_page_cache_batch(mapping, &fbatch);
337+
for (i = 0; i < folio_batch_count(&fbatch); i++)
338+
folio_unlock(fbatch.folios[i]);
339+
folio_batch_release(&fbatch);
348340
cond_resched();
349341
}
350342

@@ -387,7 +379,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
387379
if (start >= end)
388380
goto out;
389381

390-
folio_batch_init(&fbatch);
391382
index = start;
392383
for ( ; ; ) {
393384
cond_resched();
@@ -489,16 +480,16 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
489480
pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
490481
{
491482
pgoff_t indices[PAGEVEC_SIZE];
492-
struct pagevec pvec;
483+
struct folio_batch fbatch;
493484
pgoff_t index = start;
494485
unsigned long ret;
495486
unsigned long count = 0;
496487
int i;
497488

498-
pagevec_init(&pvec);
499-
while (find_lock_entries(mapping, index, end, &pvec, indices)) {
500-
for (i = 0; i < pagevec_count(&pvec); i++) {
501-
struct page *page = pvec.pages[i];
489+
folio_batch_init(&fbatch);
490+
while (find_lock_entries(mapping, index, end, &fbatch, indices)) {
491+
for (i = 0; i < folio_batch_count(&fbatch); i++) {
492+
struct page *page = &fbatch.folios[i]->page;
502493

503494
/* We rely upon deletion not changing page->index */
504495
index = indices[i];
@@ -525,8 +516,8 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
525516
}
526517
count += ret;
527518
}
528-
pagevec_remove_exceptionals(&pvec);
529-
pagevec_release(&pvec);
519+
folio_batch_remove_exceptionals(&fbatch);
520+
folio_batch_release(&fbatch);
530521
cond_resched();
531522
index++;
532523
}

0 commit comments

Comments
 (0)