@@ -516,61 +516,54 @@ static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
516516 struct z_erofs_pcluster * pcl = fe -> pcl ;
517517 unsigned int pclusterpages = z_erofs_pclusterpages (pcl );
518518 bool shouldalloc = z_erofs_should_alloc_cache (fe );
519- bool standalone = true;
520- /*
521- * optimistic allocation without direct reclaim since inplace I/O
522- * can be used if low memory otherwise.
523- */
519+ bool may_bypass = true;
520+ /* Optimistic allocation, as in-place I/O can be used as a fallback */
524521 gfp_t gfp = (mapping_gfp_mask (mc ) & ~__GFP_DIRECT_RECLAIM ) |
525522 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN ;
523+ struct folio * folio , * newfolio ;
526524 unsigned int i ;
527525
528526 if (i_blocksize (fe -> inode ) != PAGE_SIZE ||
529527 fe -> mode < Z_EROFS_PCLUSTER_FOLLOWED )
530528 return ;
531529
532530 for (i = 0 ; i < pclusterpages ; ++ i ) {
533- struct page * page , * newpage ;
534-
535531 /* Inaccurate check w/o locking to avoid unneeded lookups */
536532 if (READ_ONCE (pcl -> compressed_bvecs [i ].page ))
537533 continue ;
538534
539- page = find_get_page (mc , pcl -> index + i );
540- if (!page ) {
541- /* I/O is needed, no possible to decompress directly */
542- standalone = false;
535+ folio = filemap_get_folio (mc , pcl -> index + i );
536+ if (IS_ERR (folio )) {
537+ may_bypass = false;
543538 if (!shouldalloc )
544539 continue ;
545540
546541 /*
547- * Try cached I/O if allocation succeeds or fallback to
548- * in-place I/O instead to avoid any direct reclaim.
542+ * Allocate a managed folio for cached I/O, or it may be
543+ * then filled with a file-backed folio for in-place I/O
549544 */
550- newpage = erofs_allocpage ( & fe -> pagepool , gfp );
551- if (!newpage )
545+ newfolio = filemap_alloc_folio ( gfp , 0 );
546+ if (!newfolio )
552547 continue ;
553- set_page_private (newpage , Z_EROFS_PREALLOCATED_PAGE );
548+ newfolio -> private = Z_EROFS_PREALLOCATED_FOLIO ;
549+ folio = NULL ;
554550 }
555551 spin_lock (& pcl -> lockref .lock );
556552 if (!pcl -> compressed_bvecs [i ].page ) {
557- pcl -> compressed_bvecs [i ].page = page ? page : newpage ;
553+ pcl -> compressed_bvecs [i ].page =
554+ folio_page (folio ?: newfolio , 0 );
558555 spin_unlock (& pcl -> lockref .lock );
559556 continue ;
560557 }
561558 spin_unlock (& pcl -> lockref .lock );
562-
563- if (page )
564- put_page (page );
565- else if (newpage )
566- erofs_pagepool_add (& fe -> pagepool , newpage );
559+ folio_put (folio ?: newfolio );
567560 }
568561
569562 /*
570- * don 't do inplace I/O if all compressed pages are available in
571- * managed cache since it can be moved to the bypass queue instead .
563+ * Don 't perform in-place I/O if all compressed pages are available in
564+ * the managed cache, as the pcluster can be moved to the bypass queue.
572565 */
573- if (standalone )
566+ if (may_bypass )
574567 fe -> mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE ;
575568}
576569
@@ -1480,12 +1473,8 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14801473 DBG_BUGON (z_erofs_is_shortlived_page (bvec -> bv_page ));
14811474
14821475 folio = page_folio (zbv .page );
1483- /*
1484- * Handle preallocated cached folios. We tried to allocate such folios
1485- * without triggering direct reclaim. If allocation failed, inplace
1486- * file-backed folios will be used instead.
1487- */
1488- if (folio -> private == (void * )Z_EROFS_PREALLOCATED_PAGE ) {
1476+ /* For preallocated managed folios, add them to page cache here */
1477+ if (folio -> private == Z_EROFS_PREALLOCATED_FOLIO ) {
14891478 tocache = true;
14901479 goto out_tocache ;
14911480 }
0 commit comments