@@ -96,16 +96,9 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
96
96
DBG_BUGON (1 );
97
97
}
98
98
99
- /*
100
- * a compressed_pages[] placeholder in order to avoid
101
- * being filled with file pages for in-place decompression.
102
- */
103
- #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
104
-
105
99
/* how to allocate cached pages for a pcluster */
106
100
enum z_erofs_cache_alloctype {
107
101
DONTALLOC , /* don't allocate any cached pages */
108
- DELAYEDALLOC , /* delayed allocation (at the time of submitting io) */
109
102
/*
110
103
* try to use cached I/O if page allocation succeeds or fallback
111
104
* to in-place I/O instead to avoid any direct reclaim.
@@ -267,10 +260,6 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
267
260
/* I/O is needed, no possible to decompress directly */
268
261
standalone = false;
269
262
switch (type ) {
270
- case DELAYEDALLOC :
271
- t = tagptr_init (compressed_page_t ,
272
- PAGE_UNALLOCATED );
273
- break ;
274
263
case TRYALLOC :
275
264
newpage = erofs_allocpage (pagepool , gfp );
276
265
if (!newpage )
@@ -371,8 +360,8 @@ static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
371
360
372
361
/* callers must be with collection lock held */
373
362
static int z_erofs_attach_page (struct z_erofs_collector * clt ,
374
- struct page * page ,
375
- enum z_erofs_page_type type )
363
+ struct page * page , enum z_erofs_page_type type ,
364
+ bool pvec_safereuse )
376
365
{
377
366
int ret ;
378
367
@@ -382,9 +371,9 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
382
371
z_erofs_try_inplace_io (clt , page ))
383
372
return 0 ;
384
373
385
- ret = z_erofs_pagevec_enqueue (& clt -> vector , page , type );
374
+ ret = z_erofs_pagevec_enqueue (& clt -> vector , page , type ,
375
+ pvec_safereuse );
386
376
clt -> cl -> vcnt += (unsigned int )ret ;
387
-
388
377
return ret ? 0 : - EAGAIN ;
389
378
}
390
379
@@ -727,15 +716,16 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
727
716
tight &= (clt -> mode >= COLLECT_PRIMARY_FOLLOWED );
728
717
729
718
retry :
730
- err = z_erofs_attach_page (clt , page , page_type );
719
+ err = z_erofs_attach_page (clt , page , page_type ,
720
+ clt -> mode >= COLLECT_PRIMARY_FOLLOWED );
731
721
/* should allocate an additional short-lived page for pagevec */
732
722
if (err == - EAGAIN ) {
733
723
struct page * const newpage =
734
724
alloc_page (GFP_NOFS | __GFP_NOFAIL );
735
725
736
726
set_page_private (newpage , Z_EROFS_SHORTLIVED_PAGE );
737
727
err = z_erofs_attach_page (clt , newpage ,
738
- Z_EROFS_PAGE_TYPE_EXCLUSIVE );
728
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE , true );
739
729
if (!err )
740
730
goto retry ;
741
731
}
@@ -1089,15 +1079,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1089
1079
if (!page )
1090
1080
goto out_allocpage ;
1091
1081
1092
- /*
1093
- * the cached page has not been allocated and
1094
- * an placeholder is out there, prepare it now.
1095
- */
1096
- if (page == PAGE_UNALLOCATED ) {
1097
- tocache = true;
1098
- goto out_allocpage ;
1099
- }
1100
-
1101
1082
/* process the target tagged pointer */
1102
1083
t = tagptr_init (compressed_page_t , page );
1103
1084
justfound = tagptr_unfold_tags (t );
0 commit comments