@@ -240,13 +240,14 @@ static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
240
240
241
241
static int z_erofs_bvec_enqueue (struct z_erofs_bvec_iter * iter ,
242
242
struct z_erofs_bvec * bvec ,
243
- struct page * * candidate_bvpage )
243
+ struct page * * candidate_bvpage ,
244
+ struct page * * pagepool )
244
245
{
245
246
if (iter -> cur >= iter -> nr ) {
246
247
struct page * nextpage = * candidate_bvpage ;
247
248
248
249
if (!nextpage ) {
249
- nextpage = alloc_page ( GFP_NOFS );
250
+ nextpage = erofs_allocpage ( pagepool , GFP_NOFS );
250
251
if (!nextpage )
251
252
return - ENOMEM ;
252
253
set_page_private (nextpage , Z_EROFS_SHORTLIVED_PAGE );
@@ -547,6 +548,7 @@ struct z_erofs_decompress_frontend {
547
548
struct erofs_map_blocks map ;
548
549
struct z_erofs_bvec_iter biter ;
549
550
551
+ struct page * pagepool ;
550
552
struct page * candidate_bvpage ;
551
553
struct z_erofs_pcluster * pcl , * tailpcl ;
552
554
z_erofs_next_pcluster_t owned_head ;
@@ -581,8 +583,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
581
583
return false;
582
584
}
583
585
584
- static void z_erofs_bind_cache (struct z_erofs_decompress_frontend * fe ,
585
- struct page * * pagepool )
586
+ static void z_erofs_bind_cache (struct z_erofs_decompress_frontend * fe )
586
587
{
587
588
struct address_space * mc = MNGD_MAPPING (EROFS_I_SB (fe -> inode ));
588
589
struct z_erofs_pcluster * pcl = fe -> pcl ;
@@ -623,7 +624,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
623
624
* succeeds or fallback to in-place I/O instead
624
625
* to avoid any direct reclaim.
625
626
*/
626
- newpage = erofs_allocpage (pagepool , gfp );
627
+ newpage = erofs_allocpage (& fe -> pagepool , gfp );
627
628
if (!newpage )
628
629
continue ;
629
630
set_page_private (newpage , Z_EROFS_PREALLOCATED_PAGE );
@@ -636,7 +637,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
636
637
if (page )
637
638
put_page (page );
638
639
else if (newpage )
639
- erofs_pagepool_add (pagepool , newpage );
640
+ erofs_pagepool_add (& fe -> pagepool , newpage );
640
641
}
641
642
642
643
/*
@@ -734,7 +735,8 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
734
735
!fe -> candidate_bvpage )
735
736
fe -> candidate_bvpage = bvec -> page ;
736
737
}
737
- ret = z_erofs_bvec_enqueue (& fe -> biter , bvec , & fe -> candidate_bvpage );
738
+ ret = z_erofs_bvec_enqueue (& fe -> biter , bvec , & fe -> candidate_bvpage ,
739
+ & fe -> pagepool );
738
740
fe -> pcl -> vcnt += (ret >= 0 );
739
741
return ret ;
740
742
}
@@ -959,7 +961,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
959
961
}
960
962
961
963
static int z_erofs_do_read_page (struct z_erofs_decompress_frontend * fe ,
962
- struct page * page , struct page * * pagepool )
964
+ struct page * page )
963
965
{
964
966
struct inode * const inode = fe -> inode ;
965
967
struct erofs_map_blocks * const map = & fe -> map ;
@@ -1017,7 +1019,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
1017
1019
fe -> mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE ;
1018
1020
} else {
1019
1021
/* bind cache first when cached decompression is preferred */
1020
- z_erofs_bind_cache (fe , pagepool );
1022
+ z_erofs_bind_cache (fe );
1021
1023
}
1022
1024
hitted :
1023
1025
/*
@@ -1660,7 +1662,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
1660
1662
}
1661
1663
1662
1664
static void z_erofs_submit_queue (struct z_erofs_decompress_frontend * f ,
1663
- struct page * * pagepool ,
1664
1665
struct z_erofs_decompressqueue * fgq ,
1665
1666
bool * force_fg , bool readahead )
1666
1667
{
@@ -1723,8 +1724,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1723
1724
do {
1724
1725
struct page * page ;
1725
1726
1726
- page = pickup_page_for_submission (pcl , i ++ , pagepool ,
1727
- mc );
1727
+ page = pickup_page_for_submission (pcl , i ++ ,
1728
+ & f -> pagepool , mc );
1728
1729
if (!page )
1729
1730
continue ;
1730
1731
@@ -1789,16 +1790,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1789
1790
}
1790
1791
1791
1792
static void z_erofs_runqueue (struct z_erofs_decompress_frontend * f ,
1792
- struct page * * pagepool , bool force_fg , bool ra )
1793
+ bool force_fg , bool ra )
1793
1794
{
1794
1795
struct z_erofs_decompressqueue io [NR_JOBQUEUES ];
1795
1796
1796
1797
if (f -> owned_head == Z_EROFS_PCLUSTER_TAIL )
1797
1798
return ;
1798
- z_erofs_submit_queue (f , pagepool , io , & force_fg , ra );
1799
+ z_erofs_submit_queue (f , io , & force_fg , ra );
1799
1800
1800
1801
/* handle bypass queue (no i/o pclusters) immediately */
1801
- z_erofs_decompress_queue (& io [JQ_BYPASS ], pagepool );
1802
+ z_erofs_decompress_queue (& io [JQ_BYPASS ], & f -> pagepool );
1802
1803
1803
1804
if (!force_fg )
1804
1805
return ;
@@ -1807,16 +1808,15 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1807
1808
wait_for_completion_io (& io [JQ_SUBMIT ].u .done );
1808
1809
1809
1810
/* handle synchronous decompress queue in the caller context */
1810
- z_erofs_decompress_queue (& io [JQ_SUBMIT ], pagepool );
1811
+ z_erofs_decompress_queue (& io [JQ_SUBMIT ], & f -> pagepool );
1811
1812
}
1812
1813
1813
1814
/*
1814
1815
* Since partial uptodate is still unimplemented for now, we have to use
1815
1816
* approximate readmore strategies as a start.
1816
1817
*/
1817
1818
static void z_erofs_pcluster_readmore (struct z_erofs_decompress_frontend * f ,
1818
- struct readahead_control * rac ,
1819
- struct page * * pagepool , bool backmost )
1819
+ struct readahead_control * rac , bool backmost )
1820
1820
{
1821
1821
struct inode * inode = f -> inode ;
1822
1822
struct erofs_map_blocks * map = & f -> map ;
@@ -1858,7 +1858,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1858
1858
if (PageUptodate (page )) {
1859
1859
unlock_page (page );
1860
1860
} else {
1861
- err = z_erofs_do_read_page (f , page , pagepool );
1861
+ err = z_erofs_do_read_page (f , page );
1862
1862
if (err )
1863
1863
erofs_err (inode -> i_sb ,
1864
1864
"readmore error at page %lu @ nid %llu" ,
@@ -1879,27 +1879,24 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
1879
1879
struct inode * const inode = page -> mapping -> host ;
1880
1880
struct erofs_sb_info * const sbi = EROFS_I_SB (inode );
1881
1881
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT (inode );
1882
- struct page * pagepool = NULL ;
1883
1882
int err ;
1884
1883
1885
1884
trace_erofs_readpage (page , false);
1886
1885
f .headoffset = (erofs_off_t )page -> index << PAGE_SHIFT ;
1887
1886
1888
- z_erofs_pcluster_readmore (& f , NULL , & pagepool , true);
1889
- err = z_erofs_do_read_page (& f , page , & pagepool );
1890
- z_erofs_pcluster_readmore (& f , NULL , & pagepool , false);
1891
-
1887
+ z_erofs_pcluster_readmore (& f , NULL , true);
1888
+ err = z_erofs_do_read_page (& f , page );
1889
+ z_erofs_pcluster_readmore (& f , NULL , false);
1892
1890
(void )z_erofs_collector_end (& f );
1893
1891
1894
1892
/* if some compressed cluster ready, need submit them anyway */
1895
- z_erofs_runqueue (& f , & pagepool , z_erofs_is_sync_decompress (sbi , 0 ),
1896
- false);
1893
+ z_erofs_runqueue (& f , z_erofs_is_sync_decompress (sbi , 0 ), false);
1897
1894
1898
1895
if (err )
1899
1896
erofs_err (inode -> i_sb , "failed to read, err [%d]" , err );
1900
1897
1901
1898
erofs_put_metabuf (& f .map .buf );
1902
- erofs_release_pages (& pagepool );
1899
+ erofs_release_pages (& f . pagepool );
1903
1900
return err ;
1904
1901
}
1905
1902
@@ -1908,12 +1905,12 @@ static void z_erofs_readahead(struct readahead_control *rac)
1908
1905
struct inode * const inode = rac -> mapping -> host ;
1909
1906
struct erofs_sb_info * const sbi = EROFS_I_SB (inode );
1910
1907
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT (inode );
1911
- struct page * pagepool = NULL , * head = NULL , * page ;
1908
+ struct page * head = NULL , * page ;
1912
1909
unsigned int nr_pages ;
1913
1910
1914
1911
f .headoffset = readahead_pos (rac );
1915
1912
1916
- z_erofs_pcluster_readmore (& f , rac , & pagepool , true);
1913
+ z_erofs_pcluster_readmore (& f , rac , true);
1917
1914
nr_pages = readahead_count (rac );
1918
1915
trace_erofs_readpages (inode , readahead_index (rac ), nr_pages , false);
1919
1916
@@ -1929,20 +1926,19 @@ static void z_erofs_readahead(struct readahead_control *rac)
1929
1926
/* traversal in reverse order */
1930
1927
head = (void * )page_private (page );
1931
1928
1932
- err = z_erofs_do_read_page (& f , page , & pagepool );
1929
+ err = z_erofs_do_read_page (& f , page );
1933
1930
if (err )
1934
1931
erofs_err (inode -> i_sb ,
1935
1932
"readahead error at page %lu @ nid %llu" ,
1936
1933
page -> index , EROFS_I (inode )-> nid );
1937
1934
put_page (page );
1938
1935
}
1939
- z_erofs_pcluster_readmore (& f , rac , & pagepool , false);
1936
+ z_erofs_pcluster_readmore (& f , rac , false);
1940
1937
(void )z_erofs_collector_end (& f );
1941
1938
1942
- z_erofs_runqueue (& f , & pagepool ,
1943
- z_erofs_is_sync_decompress (sbi , nr_pages ), true);
1939
+ z_erofs_runqueue (& f , z_erofs_is_sync_decompress (sbi , nr_pages ), true);
1944
1940
erofs_put_metabuf (& f .map .buf );
1945
- erofs_release_pages (& pagepool );
1941
+ erofs_release_pages (& f . pagepool );
1946
1942
}
1947
1943
1948
1944
const struct address_space_operations z_erofs_aops = {
0 commit comments