Skip to content

Commit 6ab5eed

Browse files
committed
erofs: avoid on-stack pagepool directly passed by arguments
On-stack pagepool is used so that short-lived temporary pages could be shared within a single I/O request (e.g. among multiple pclusters). Moving the remaining frontend-related uses into z_erofs_decompress_frontend to avoid too many arguments. Signed-off-by: Gao Xiang <[email protected]> Reviewed-by: Yue Hu <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 05b63d2 commit 6ab5eed

File tree

1 file changed

+30
-34
lines changed

1 file changed

+30
-34
lines changed

fs/erofs/zdata.c

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -240,13 +240,14 @@ static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
240240

241241
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
242242
struct z_erofs_bvec *bvec,
243-
struct page **candidate_bvpage)
243+
struct page **candidate_bvpage,
244+
struct page **pagepool)
244245
{
245246
if (iter->cur >= iter->nr) {
246247
struct page *nextpage = *candidate_bvpage;
247248

248249
if (!nextpage) {
249-
nextpage = alloc_page(GFP_NOFS);
250+
nextpage = erofs_allocpage(pagepool, GFP_NOFS);
250251
if (!nextpage)
251252
return -ENOMEM;
252253
set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
@@ -547,6 +548,7 @@ struct z_erofs_decompress_frontend {
547548
struct erofs_map_blocks map;
548549
struct z_erofs_bvec_iter biter;
549550

551+
struct page *pagepool;
550552
struct page *candidate_bvpage;
551553
struct z_erofs_pcluster *pcl, *tailpcl;
552554
z_erofs_next_pcluster_t owned_head;
@@ -581,8 +583,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
581583
return false;
582584
}
583585

584-
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
585-
struct page **pagepool)
586+
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
586587
{
587588
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
588589
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -623,7 +624,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
623624
* succeeds or fallback to in-place I/O instead
624625
* to avoid any direct reclaim.
625626
*/
626-
newpage = erofs_allocpage(pagepool, gfp);
627+
newpage = erofs_allocpage(&fe->pagepool, gfp);
627628
if (!newpage)
628629
continue;
629630
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
@@ -636,7 +637,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
636637
if (page)
637638
put_page(page);
638639
else if (newpage)
639-
erofs_pagepool_add(pagepool, newpage);
640+
erofs_pagepool_add(&fe->pagepool, newpage);
640641
}
641642

642643
/*
@@ -734,7 +735,8 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
734735
!fe->candidate_bvpage)
735736
fe->candidate_bvpage = bvec->page;
736737
}
737-
ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage);
738+
ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
739+
&fe->pagepool);
738740
fe->pcl->vcnt += (ret >= 0);
739741
return ret;
740742
}
@@ -959,7 +961,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
959961
}
960962

961963
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
962-
struct page *page, struct page **pagepool)
964+
struct page *page)
963965
{
964966
struct inode *const inode = fe->inode;
965967
struct erofs_map_blocks *const map = &fe->map;
@@ -1017,7 +1019,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
10171019
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
10181020
} else {
10191021
/* bind cache first when cached decompression is preferred */
1020-
z_erofs_bind_cache(fe, pagepool);
1022+
z_erofs_bind_cache(fe);
10211023
}
10221024
hitted:
10231025
/*
@@ -1660,7 +1662,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
16601662
}
16611663

16621664
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1663-
struct page **pagepool,
16641665
struct z_erofs_decompressqueue *fgq,
16651666
bool *force_fg, bool readahead)
16661667
{
@@ -1723,8 +1724,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
17231724
do {
17241725
struct page *page;
17251726

1726-
page = pickup_page_for_submission(pcl, i++, pagepool,
1727-
mc);
1727+
page = pickup_page_for_submission(pcl, i++,
1728+
&f->pagepool, mc);
17281729
if (!page)
17291730
continue;
17301731

@@ -1789,16 +1790,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
17891790
}
17901791

17911792
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1792-
struct page **pagepool, bool force_fg, bool ra)
1793+
bool force_fg, bool ra)
17931794
{
17941795
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
17951796

17961797
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
17971798
return;
1798-
z_erofs_submit_queue(f, pagepool, io, &force_fg, ra);
1799+
z_erofs_submit_queue(f, io, &force_fg, ra);
17991800

18001801
/* handle bypass queue (no i/o pclusters) immediately */
1801-
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
1802+
z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
18021803

18031804
if (!force_fg)
18041805
return;
@@ -1807,16 +1808,15 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
18071808
wait_for_completion_io(&io[JQ_SUBMIT].u.done);
18081809

18091810
/* handle synchronous decompress queue in the caller context */
1810-
z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
1811+
z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
18111812
}
18121813

18131814
/*
18141815
* Since partial uptodate is still unimplemented for now, we have to use
18151816
* approximate readmore strategies as a start.
18161817
*/
18171818
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1818-
struct readahead_control *rac,
1819-
struct page **pagepool, bool backmost)
1819+
struct readahead_control *rac, bool backmost)
18201820
{
18211821
struct inode *inode = f->inode;
18221822
struct erofs_map_blocks *map = &f->map;
@@ -1858,7 +1858,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
18581858
if (PageUptodate(page)) {
18591859
unlock_page(page);
18601860
} else {
1861-
err = z_erofs_do_read_page(f, page, pagepool);
1861+
err = z_erofs_do_read_page(f, page);
18621862
if (err)
18631863
erofs_err(inode->i_sb,
18641864
"readmore error at page %lu @ nid %llu",
@@ -1879,27 +1879,24 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
18791879
struct inode *const inode = page->mapping->host;
18801880
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
18811881
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1882-
struct page *pagepool = NULL;
18831882
int err;
18841883

18851884
trace_erofs_readpage(page, false);
18861885
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
18871886

1888-
z_erofs_pcluster_readmore(&f, NULL, &pagepool, true);
1889-
err = z_erofs_do_read_page(&f, page, &pagepool);
1890-
z_erofs_pcluster_readmore(&f, NULL, &pagepool, false);
1891-
1887+
z_erofs_pcluster_readmore(&f, NULL, true);
1888+
err = z_erofs_do_read_page(&f, page);
1889+
z_erofs_pcluster_readmore(&f, NULL, false);
18921890
(void)z_erofs_collector_end(&f);
18931891

18941892
/* if some compressed cluster ready, need submit them anyway */
1895-
z_erofs_runqueue(&f, &pagepool, z_erofs_is_sync_decompress(sbi, 0),
1896-
false);
1893+
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
18971894

18981895
if (err)
18991896
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
19001897

19011898
erofs_put_metabuf(&f.map.buf);
1902-
erofs_release_pages(&pagepool);
1899+
erofs_release_pages(&f.pagepool);
19031900
return err;
19041901
}
19051902

@@ -1908,12 +1905,12 @@ static void z_erofs_readahead(struct readahead_control *rac)
19081905
struct inode *const inode = rac->mapping->host;
19091906
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
19101907
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1911-
struct page *pagepool = NULL, *head = NULL, *page;
1908+
struct page *head = NULL, *page;
19121909
unsigned int nr_pages;
19131910

19141911
f.headoffset = readahead_pos(rac);
19151912

1916-
z_erofs_pcluster_readmore(&f, rac, &pagepool, true);
1913+
z_erofs_pcluster_readmore(&f, rac, true);
19171914
nr_pages = readahead_count(rac);
19181915
trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
19191916

@@ -1929,20 +1926,19 @@ static void z_erofs_readahead(struct readahead_control *rac)
19291926
/* traversal in reverse order */
19301927
head = (void *)page_private(page);
19311928

1932-
err = z_erofs_do_read_page(&f, page, &pagepool);
1929+
err = z_erofs_do_read_page(&f, page);
19331930
if (err)
19341931
erofs_err(inode->i_sb,
19351932
"readahead error at page %lu @ nid %llu",
19361933
page->index, EROFS_I(inode)->nid);
19371934
put_page(page);
19381935
}
1939-
z_erofs_pcluster_readmore(&f, rac, &pagepool, false);
1936+
z_erofs_pcluster_readmore(&f, rac, false);
19401937
(void)z_erofs_collector_end(&f);
19411938

1942-
z_erofs_runqueue(&f, &pagepool,
1943-
z_erofs_is_sync_decompress(sbi, nr_pages), true);
1939+
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
19441940
erofs_put_metabuf(&f.map.buf);
1945-
erofs_release_pages(&pagepool);
1941+
erofs_release_pages(&f.pagepool);
19461942
}
19471943

19481944
const struct address_space_operations z_erofs_aops = {

0 commit comments

Comments
 (0)