@@ -56,6 +56,9 @@ struct z_erofs_pcluster {
56
56
/* L: total number of bvecs */
57
57
unsigned int vcnt ;
58
58
59
+ /* I: pcluster size (compressed size) in bytes */
60
+ unsigned int pclustersize ;
61
+
59
62
/* I: page offset of start position of decompression */
60
63
unsigned short pageofs_out ;
61
64
@@ -70,14 +73,6 @@ struct z_erofs_pcluster {
70
73
struct rcu_head rcu ;
71
74
};
72
75
73
- union {
74
- /* I: physical cluster size in pages */
75
- unsigned short pclusterpages ;
76
-
77
- /* I: tailpacking inline compressed size */
78
- unsigned short tailpacking_size ;
79
- };
80
-
81
76
/* I: compression algorithm format */
82
77
unsigned char algorithmformat ;
83
78
@@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
115
110
116
111
static inline unsigned int z_erofs_pclusterpages (struct z_erofs_pcluster * pcl )
117
112
{
118
- if (z_erofs_is_inline_pcluster (pcl ))
119
- return 1 ;
120
- return pcl -> pclusterpages ;
113
+ return PAGE_ALIGN (pcl -> pclustersize ) >> PAGE_SHIFT ;
121
114
}
122
115
123
116
/*
@@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void)
298
291
return 0 ;
299
292
}
300
293
301
- static struct z_erofs_pcluster * z_erofs_alloc_pcluster (unsigned int nrpages )
294
+ static struct z_erofs_pcluster * z_erofs_alloc_pcluster (unsigned int size )
302
295
{
303
- int i ;
296
+ unsigned int nrpages = PAGE_ALIGN (size ) >> PAGE_SHIFT ;
297
+ struct z_erofs_pcluster_slab * pcs = pcluster_pool ;
304
298
305
- for (i = 0 ; i < ARRAY_SIZE (pcluster_pool ); ++ i ) {
306
- struct z_erofs_pcluster_slab * pcs = pcluster_pool + i ;
299
+ for (; pcs < pcluster_pool + ARRAY_SIZE (pcluster_pool ); ++ pcs ) {
307
300
struct z_erofs_pcluster * pcl ;
308
301
309
302
if (nrpages > pcs -> maxpages )
@@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
312
305
pcl = kmem_cache_zalloc (pcs -> slab , GFP_NOFS );
313
306
if (!pcl )
314
307
return ERR_PTR (- ENOMEM );
315
- pcl -> pclusterpages = nrpages ;
308
+ pcl -> pclustersize = size ;
316
309
return pcl ;
317
310
}
318
311
return ERR_PTR (- EINVAL );
@@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
559
552
{
560
553
struct address_space * mc = MNGD_MAPPING (EROFS_I_SB (fe -> inode ));
561
554
struct z_erofs_pcluster * pcl = fe -> pcl ;
555
+ unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
562
556
bool shouldalloc = z_erofs_should_alloc_cache (fe );
563
557
bool standalone = true;
564
558
/*
@@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
572
566
if (fe -> mode < Z_EROFS_PCLUSTER_FOLLOWED )
573
567
return ;
574
568
575
- for (i = 0 ; i < pcl -> pclusterpages ; ++ i ) {
576
- struct page * page ;
569
+ for (i = 0 ; i < pclusterpages ; ++ i ) {
570
+ struct page * page , * newpage ;
577
571
void * t ; /* mark pages just found for debugging */
578
- struct page * newpage = NULL ;
579
572
580
573
/* the compressed page was loaded before */
581
574
if (READ_ONCE (pcl -> compressed_bvecs [i ].page ))
@@ -585,16 +578,16 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
585
578
586
579
if (page ) {
587
580
t = (void * )((unsigned long )page | 1 );
581
+ newpage = NULL ;
588
582
} else {
589
583
/* I/O is needed, no possible to decompress directly */
590
584
standalone = false;
591
585
if (!shouldalloc )
592
586
continue ;
593
587
594
588
/*
595
- * try to use cached I/O if page allocation
596
- * succeeds or fallback to in-place I/O instead
597
- * to avoid any direct reclaim.
589
+ * Try cached I/O if allocation succeeds or fallback to
590
+ * in-place I/O instead to avoid any direct reclaim.
598
591
*/
599
592
newpage = erofs_allocpage (& fe -> pagepool , gfp );
600
593
if (!newpage )
@@ -626,14 +619,15 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
626
619
{
627
620
struct z_erofs_pcluster * const pcl =
628
621
container_of (grp , struct z_erofs_pcluster , obj );
622
+ unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
629
623
int i ;
630
624
631
625
DBG_BUGON (z_erofs_is_inline_pcluster (pcl ));
632
626
/*
633
627
* refcount of workgroup is now freezed as 0,
634
628
* therefore no need to worry about available decompression users.
635
629
*/
636
- for (i = 0 ; i < pcl -> pclusterpages ; ++ i ) {
630
+ for (i = 0 ; i < pclusterpages ; ++ i ) {
637
631
struct page * page = pcl -> compressed_bvecs [i ].page ;
638
632
639
633
if (!page )
@@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
657
651
static bool z_erofs_cache_release_folio (struct folio * folio , gfp_t gfp )
658
652
{
659
653
struct z_erofs_pcluster * pcl = folio_get_private (folio );
654
+ unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
660
655
bool ret ;
661
656
int i ;
662
657
@@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
669
664
goto out ;
670
665
671
666
DBG_BUGON (z_erofs_is_inline_pcluster (pcl ));
672
- for (i = 0 ; i < pcl -> pclusterpages ; ++ i ) {
667
+ for (i = 0 ; i < pclusterpages ; ++ i ) {
673
668
if (pcl -> compressed_bvecs [i ].page == & folio -> page ) {
674
669
WRITE_ONCE (pcl -> compressed_bvecs [i ].page , NULL );
675
670
ret = true;
@@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
778
773
static int z_erofs_register_pcluster (struct z_erofs_decompress_frontend * fe )
779
774
{
780
775
struct erofs_map_blocks * map = & fe -> map ;
776
+ struct super_block * sb = fe -> inode -> i_sb ;
781
777
bool ztailpacking = map -> m_flags & EROFS_MAP_META ;
782
778
struct z_erofs_pcluster * pcl ;
783
779
struct erofs_workgroup * grp ;
784
780
int err ;
785
781
786
782
if (!(map -> m_flags & EROFS_MAP_ENCODED ) ||
787
- (!ztailpacking && !( map -> m_pa >> PAGE_SHIFT ))) {
783
+ (!ztailpacking && !erofs_blknr ( sb , map -> m_pa ))) {
788
784
DBG_BUGON (1 );
789
785
return - EFSCORRUPTED ;
790
786
}
791
787
792
788
/* no available pcluster, let's allocate one */
793
- pcl = z_erofs_alloc_pcluster (ztailpacking ? 1 :
794
- map -> m_plen >> PAGE_SHIFT );
789
+ pcl = z_erofs_alloc_pcluster (map -> m_plen );
795
790
if (IS_ERR (pcl ))
796
791
return PTR_ERR (pcl );
797
792
@@ -816,9 +811,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
816
811
if (ztailpacking ) {
817
812
pcl -> obj .index = 0 ; /* which indicates ztailpacking */
818
813
pcl -> pageofs_in = erofs_blkoff (fe -> inode -> i_sb , map -> m_pa );
819
- pcl -> tailpacking_size = map -> m_plen ;
820
814
} else {
821
- pcl -> obj .index = map -> m_pa >> PAGE_SHIFT ;
815
+ pcl -> obj .index = erofs_blknr ( sb , map -> m_pa ) ;
822
816
823
817
grp = erofs_insert_workgroup (fe -> inode -> i_sb , & pcl -> obj );
824
818
if (IS_ERR (grp )) {
@@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1244
1238
unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
1245
1239
const struct z_erofs_decompressor * decompressor =
1246
1240
& erofs_decompressors [pcl -> algorithmformat ];
1247
- unsigned int i , inputsize ;
1248
- int err2 ;
1241
+ int i , err2 ;
1249
1242
struct page * page ;
1250
1243
bool overlapped ;
1251
1244
@@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1282
1275
if (err )
1283
1276
goto out ;
1284
1277
1285
- if (z_erofs_is_inline_pcluster (pcl ))
1286
- inputsize = pcl -> tailpacking_size ;
1287
- else
1288
- inputsize = pclusterpages * PAGE_SIZE ;
1289
-
1290
1278
err = decompressor -> decompress (& (struct z_erofs_decompress_req ) {
1291
1279
.sb = be -> sb ,
1292
1280
.in = be -> compressed_pages ,
1293
1281
.out = be -> decompressed_pages ,
1294
1282
.pageofs_in = pcl -> pageofs_in ,
1295
1283
.pageofs_out = pcl -> pageofs_out ,
1296
- .inputsize = inputsize ,
1284
+ .inputsize = pcl -> pclustersize ,
1297
1285
.outputsize = pcl -> length ,
1298
1286
.alg = pcl -> algorithmformat ,
1299
1287
.inplace_io = overlapped ,
@@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1668
1656
(void )erofs_map_dev (sb , & mdev );
1669
1657
1670
1658
cur = mdev .m_pa ;
1671
- end = cur + ( pcl -> pclusterpages << PAGE_SHIFT ) ;
1659
+ end = cur + pcl -> pclustersize ;
1672
1660
do {
1673
1661
z_erofs_fill_bio_vec (& bvec , f , pcl , i ++ , mc );
1674
1662
if (!bvec .bv_page )
0 commit comments