Skip to content

Commit 54ed3fd

Browse files
committed
erofs: record pclustersize in bytes instead of pages
Currently, compressed sizes are recorded in pages using `pclusterpages`, However, for tailpacking pclusters, `tailpacking_size` is used instead. This approach doesn't work when dealing with sub-page blocks. To address this, let's switch them to the unified `pclustersize` in bytes. Reviewed-by: Yue Hu <[email protected]> Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1923516 commit 54ed3fd

File tree

1 file changed

+26
-38
lines changed

1 file changed

+26
-38
lines changed

fs/erofs/zdata.c

Lines changed: 26 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,9 @@ struct z_erofs_pcluster {
5656
/* L: total number of bvecs */
5757
unsigned int vcnt;
5858

59+
/* I: pcluster size (compressed size) in bytes */
60+
unsigned int pclustersize;
61+
5962
/* I: page offset of start position of decompression */
6063
unsigned short pageofs_out;
6164

@@ -70,14 +73,6 @@ struct z_erofs_pcluster {
7073
struct rcu_head rcu;
7174
};
7275

73-
union {
74-
/* I: physical cluster size in pages */
75-
unsigned short pclusterpages;
76-
77-
/* I: tailpacking inline compressed size */
78-
unsigned short tailpacking_size;
79-
};
80-
8176
/* I: compression algorithm format */
8277
unsigned char algorithmformat;
8378

@@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
115110

116111
static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
117112
{
118-
if (z_erofs_is_inline_pcluster(pcl))
119-
return 1;
120-
return pcl->pclusterpages;
113+
return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT;
121114
}
122115

123116
/*
@@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void)
298291
return 0;
299292
}
300293

301-
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
294+
static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
302295
{
303-
int i;
296+
unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
297+
struct z_erofs_pcluster_slab *pcs = pcluster_pool;
304298

305-
for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
306-
struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
299+
for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
307300
struct z_erofs_pcluster *pcl;
308301

309302
if (nrpages > pcs->maxpages)
@@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
312305
pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
313306
if (!pcl)
314307
return ERR_PTR(-ENOMEM);
315-
pcl->pclusterpages = nrpages;
308+
pcl->pclustersize = size;
316309
return pcl;
317310
}
318311
return ERR_PTR(-EINVAL);
@@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
559552
{
560553
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
561554
struct z_erofs_pcluster *pcl = fe->pcl;
555+
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
562556
bool shouldalloc = z_erofs_should_alloc_cache(fe);
563557
bool standalone = true;
564558
/*
@@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
572566
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
573567
return;
574568

575-
for (i = 0; i < pcl->pclusterpages; ++i) {
576-
struct page *page;
569+
for (i = 0; i < pclusterpages; ++i) {
570+
struct page *page, *newpage;
577571
void *t; /* mark pages just found for debugging */
578-
struct page *newpage = NULL;
579572

580573
/* the compressed page was loaded before */
581574
if (READ_ONCE(pcl->compressed_bvecs[i].page))
@@ -585,16 +578,16 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
585578

586579
if (page) {
587580
t = (void *)((unsigned long)page | 1);
581+
newpage = NULL;
588582
} else {
589583
/* I/O is needed, no possible to decompress directly */
590584
standalone = false;
591585
if (!shouldalloc)
592586
continue;
593587

594588
/*
595-
* try to use cached I/O if page allocation
596-
* succeeds or fallback to in-place I/O instead
597-
* to avoid any direct reclaim.
589+
* Try cached I/O if allocation succeeds or fallback to
590+
* in-place I/O instead to avoid any direct reclaim.
598591
*/
599592
newpage = erofs_allocpage(&fe->pagepool, gfp);
600593
if (!newpage)
@@ -626,14 +619,15 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
626619
{
627620
struct z_erofs_pcluster *const pcl =
628621
container_of(grp, struct z_erofs_pcluster, obj);
622+
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
629623
int i;
630624

631625
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
632626
/*
633627
* refcount of workgroup is now freezed as 0,
634628
* therefore no need to worry about available decompression users.
635629
*/
636-
for (i = 0; i < pcl->pclusterpages; ++i) {
630+
for (i = 0; i < pclusterpages; ++i) {
637631
struct page *page = pcl->compressed_bvecs[i].page;
638632

639633
if (!page)
@@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
657651
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
658652
{
659653
struct z_erofs_pcluster *pcl = folio_get_private(folio);
654+
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
660655
bool ret;
661656
int i;
662657

@@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
669664
goto out;
670665

671666
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
672-
for (i = 0; i < pcl->pclusterpages; ++i) {
667+
for (i = 0; i < pclusterpages; ++i) {
673668
if (pcl->compressed_bvecs[i].page == &folio->page) {
674669
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
675670
ret = true;
@@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
778773
static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
779774
{
780775
struct erofs_map_blocks *map = &fe->map;
776+
struct super_block *sb = fe->inode->i_sb;
781777
bool ztailpacking = map->m_flags & EROFS_MAP_META;
782778
struct z_erofs_pcluster *pcl;
783779
struct erofs_workgroup *grp;
784780
int err;
785781

786782
if (!(map->m_flags & EROFS_MAP_ENCODED) ||
787-
(!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) {
783+
(!ztailpacking && !erofs_blknr(sb, map->m_pa))) {
788784
DBG_BUGON(1);
789785
return -EFSCORRUPTED;
790786
}
791787

792788
/* no available pcluster, let's allocate one */
793-
pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
794-
map->m_plen >> PAGE_SHIFT);
789+
pcl = z_erofs_alloc_pcluster(map->m_plen);
795790
if (IS_ERR(pcl))
796791
return PTR_ERR(pcl);
797792

@@ -816,9 +811,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
816811
if (ztailpacking) {
817812
pcl->obj.index = 0; /* which indicates ztailpacking */
818813
pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa);
819-
pcl->tailpacking_size = map->m_plen;
820814
} else {
821-
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
815+
pcl->obj.index = erofs_blknr(sb, map->m_pa);
822816

823817
grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
824818
if (IS_ERR(grp)) {
@@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
12441238
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
12451239
const struct z_erofs_decompressor *decompressor =
12461240
&erofs_decompressors[pcl->algorithmformat];
1247-
unsigned int i, inputsize;
1248-
int err2;
1241+
int i, err2;
12491242
struct page *page;
12501243
bool overlapped;
12511244

@@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
12821275
if (err)
12831276
goto out;
12841277

1285-
if (z_erofs_is_inline_pcluster(pcl))
1286-
inputsize = pcl->tailpacking_size;
1287-
else
1288-
inputsize = pclusterpages * PAGE_SIZE;
1289-
12901278
err = decompressor->decompress(&(struct z_erofs_decompress_req) {
12911279
.sb = be->sb,
12921280
.in = be->compressed_pages,
12931281
.out = be->decompressed_pages,
12941282
.pageofs_in = pcl->pageofs_in,
12951283
.pageofs_out = pcl->pageofs_out,
1296-
.inputsize = inputsize,
1284+
.inputsize = pcl->pclustersize,
12971285
.outputsize = pcl->length,
12981286
.alg = pcl->algorithmformat,
12991287
.inplace_io = overlapped,
@@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
16681656
(void)erofs_map_dev(sb, &mdev);
16691657

16701658
cur = mdev.m_pa;
1671-
end = cur + (pcl->pclusterpages << PAGE_SHIFT);
1659+
end = cur + pcl->pclustersize;
16721660
do {
16731661
z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc);
16741662
if (!bvec.bv_page)

0 commit comments

Comments
 (0)