Skip to content

Commit 5220cfe

Browse files
hsiangkaogregkh
authored andcommitted
erofs: avoid on-stack pagepool directly passed by arguments
[ Upstream commit 6ab5eed6002edc5a29b683285e90459a7df6ce2b ] On-stack pagepool is used so that short-lived temporary pages could be shared within a single I/O request (e.g. among multiple pclusters). Moving the remaining frontend-related uses into z_erofs_decompress_frontend to avoid too many arguments. Signed-off-by: Gao Xiang <[email protected]> Reviewed-by: Yue Hu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Stable-dep-of: 99f7619a77a0 ("erofs: fix to add missing tracepoint in erofs_read_folio()") Signed-off-by: Sasha Levin <[email protected]>
1 parent cef58a3 commit 5220cfe

File tree

1 file changed

+30
-34
lines changed

1 file changed

+30
-34
lines changed

fs/erofs/zdata.c

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -236,13 +236,14 @@ static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
236236

237237
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
238238
struct z_erofs_bvec *bvec,
239-
struct page **candidate_bvpage)
239+
struct page **candidate_bvpage,
240+
struct page **pagepool)
240241
{
241242
if (iter->cur >= iter->nr) {
242243
struct page *nextpage = *candidate_bvpage;
243244

244245
if (!nextpage) {
245-
nextpage = alloc_page(GFP_NOFS);
246+
nextpage = erofs_allocpage(pagepool, GFP_NOFS);
246247
if (!nextpage)
247248
return -ENOMEM;
248249
set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
@@ -406,6 +407,7 @@ struct z_erofs_decompress_frontend {
406407
struct erofs_map_blocks map;
407408
struct z_erofs_bvec_iter biter;
408409

410+
struct page *pagepool;
409411
struct page *candidate_bvpage;
410412
struct z_erofs_pcluster *pcl;
411413
z_erofs_next_pcluster_t owned_head;
@@ -440,8 +442,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
440442
return false;
441443
}
442444

443-
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
444-
struct page **pagepool)
445+
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
445446
{
446447
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
447448
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -482,7 +483,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
482483
* succeeds or fallback to in-place I/O instead
483484
* to avoid any direct reclaim.
484485
*/
485-
newpage = erofs_allocpage(pagepool, gfp);
486+
newpage = erofs_allocpage(&fe->pagepool, gfp);
486487
if (!newpage)
487488
continue;
488489
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
@@ -495,7 +496,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
495496
if (page)
496497
put_page(page);
497498
else if (newpage)
498-
erofs_pagepool_add(pagepool, newpage);
499+
erofs_pagepool_add(&fe->pagepool, newpage);
499500
}
500501

501502
/*
@@ -593,7 +594,8 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
593594
!fe->candidate_bvpage)
594595
fe->candidate_bvpage = bvec->page;
595596
}
596-
ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage);
597+
ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
598+
&fe->pagepool);
597599
fe->pcl->vcnt += (ret >= 0);
598600
return ret;
599601
}
@@ -797,7 +799,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
797799
}
798800

799801
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
800-
struct page *page, struct page **pagepool)
802+
struct page *page)
801803
{
802804
struct inode *const inode = fe->inode;
803805
struct erofs_map_blocks *const map = &fe->map;
@@ -858,7 +860,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
858860
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
859861
} else {
860862
/* bind cache first when cached decompression is preferred */
861-
z_erofs_bind_cache(fe, pagepool);
863+
z_erofs_bind_cache(fe);
862864
}
863865
hitted:
864866
/*
@@ -1470,7 +1472,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
14701472
}
14711473

14721474
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1473-
struct page **pagepool,
14741475
struct z_erofs_decompressqueue *fgq,
14751476
bool *force_fg, bool readahead)
14761477
{
@@ -1528,8 +1529,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
15281529
do {
15291530
struct page *page;
15301531

1531-
page = pickup_page_for_submission(pcl, i++, pagepool,
1532-
mc);
1532+
page = pickup_page_for_submission(pcl, i++,
1533+
&f->pagepool, mc);
15331534
if (!page)
15341535
continue;
15351536

@@ -1594,16 +1595,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
15941595
}
15951596

15961597
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1597-
struct page **pagepool, bool force_fg, bool ra)
1598+
bool force_fg, bool ra)
15981599
{
15991600
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
16001601

16011602
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
16021603
return;
1603-
z_erofs_submit_queue(f, pagepool, io, &force_fg, ra);
1604+
z_erofs_submit_queue(f, io, &force_fg, ra);
16041605

16051606
/* handle bypass queue (no i/o pclusters) immediately */
1606-
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
1607+
z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
16071608

16081609
if (!force_fg)
16091610
return;
@@ -1612,16 +1613,15 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
16121613
wait_for_completion_io(&io[JQ_SUBMIT].u.done);
16131614

16141615
/* handle synchronous decompress queue in the caller context */
1615-
z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
1616+
z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
16161617
}
16171618

16181619
/*
16191620
* Since partial uptodate is still unimplemented for now, we have to use
16201621
* approximate readmore strategies as a start.
16211622
*/
16221623
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1623-
struct readahead_control *rac,
1624-
struct page **pagepool, bool backmost)
1624+
struct readahead_control *rac, bool backmost)
16251625
{
16261626
struct inode *inode = f->inode;
16271627
struct erofs_map_blocks *map = &f->map;
@@ -1663,7 +1663,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
16631663
if (PageUptodate(page)) {
16641664
unlock_page(page);
16651665
} else {
1666-
err = z_erofs_do_read_page(f, page, pagepool);
1666+
err = z_erofs_do_read_page(f, page);
16671667
if (err)
16681668
erofs_err(inode->i_sb,
16691669
"readmore error at page %lu @ nid %llu",
@@ -1684,27 +1684,24 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
16841684
struct inode *const inode = page->mapping->host;
16851685
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
16861686
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1687-
struct page *pagepool = NULL;
16881687
int err;
16891688

16901689
trace_erofs_readpage(page, false);
16911690
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
16921691

1693-
z_erofs_pcluster_readmore(&f, NULL, &pagepool, true);
1694-
err = z_erofs_do_read_page(&f, page, &pagepool);
1695-
z_erofs_pcluster_readmore(&f, NULL, &pagepool, false);
1696-
1692+
z_erofs_pcluster_readmore(&f, NULL, true);
1693+
err = z_erofs_do_read_page(&f, page);
1694+
z_erofs_pcluster_readmore(&f, NULL, false);
16971695
(void)z_erofs_collector_end(&f);
16981696

16991697
/* if some compressed cluster ready, need submit them anyway */
1700-
z_erofs_runqueue(&f, &pagepool, z_erofs_is_sync_decompress(sbi, 0),
1701-
false);
1698+
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
17021699

17031700
if (err)
17041701
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
17051702

17061703
erofs_put_metabuf(&f.map.buf);
1707-
erofs_release_pages(&pagepool);
1704+
erofs_release_pages(&f.pagepool);
17081705
return err;
17091706
}
17101707

@@ -1713,12 +1710,12 @@ static void z_erofs_readahead(struct readahead_control *rac)
17131710
struct inode *const inode = rac->mapping->host;
17141711
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
17151712
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1716-
struct page *pagepool = NULL, *head = NULL, *page;
1713+
struct page *head = NULL, *page;
17171714
unsigned int nr_pages;
17181715

17191716
f.headoffset = readahead_pos(rac);
17201717

1721-
z_erofs_pcluster_readmore(&f, rac, &pagepool, true);
1718+
z_erofs_pcluster_readmore(&f, rac, true);
17221719
nr_pages = readahead_count(rac);
17231720
trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
17241721

@@ -1734,20 +1731,19 @@ static void z_erofs_readahead(struct readahead_control *rac)
17341731
/* traversal in reverse order */
17351732
head = (void *)page_private(page);
17361733

1737-
err = z_erofs_do_read_page(&f, page, &pagepool);
1734+
err = z_erofs_do_read_page(&f, page);
17381735
if (err)
17391736
erofs_err(inode->i_sb,
17401737
"readahead error at page %lu @ nid %llu",
17411738
page->index, EROFS_I(inode)->nid);
17421739
put_page(page);
17431740
}
1744-
z_erofs_pcluster_readmore(&f, rac, &pagepool, false);
1741+
z_erofs_pcluster_readmore(&f, rac, false);
17451742
(void)z_erofs_collector_end(&f);
17461743

1747-
z_erofs_runqueue(&f, &pagepool,
1748-
z_erofs_is_sync_decompress(sbi, nr_pages), true);
1744+
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
17491745
erofs_put_metabuf(&f.map.buf);
1750-
erofs_release_pages(&pagepool);
1746+
erofs_release_pages(&f.pagepool);
17511747
}
17521748

17531749
const struct address_space_operations z_erofs_aops = {

0 commit comments

Comments
 (0)