Skip to content

Commit 7b4e372

Browse files
committed
erofs: adapt managed inode operations into folios
This patch gets rid of erofs_try_to_free_cached_page() and fold it into .release_folio(). It also moves managed inode operations into zdata.c, which simplifies the code a bit. No logic changes. Signed-off-by: Gao Xiang <[email protected]> Reviewed-by: Yue Hu <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 967c28b commit 7b4e372

File tree

3 files changed

+53
-71
lines changed

3 files changed

+53
-71
lines changed

fs/erofs/internal.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,6 @@ int __init z_erofs_init_zip_subsystem(void);
500500
void z_erofs_exit_zip_subsystem(void);
501501
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
502502
struct erofs_workgroup *egrp);
503-
int erofs_try_to_free_cached_page(struct page *page);
504503
int z_erofs_load_lz4_config(struct super_block *sb,
505504
struct erofs_super_block *dsb,
506505
struct z_erofs_lz4_cfgs *lz4, int len);
@@ -511,6 +510,7 @@ void erofs_put_pcpubuf(void *ptr);
511510
int erofs_pcpubuf_growsize(unsigned int nrpages);
512511
void __init erofs_pcpubuf_init(void);
513512
void erofs_pcpubuf_exit(void);
513+
int erofs_init_managed_cache(struct super_block *sb);
514514
#else
515515
static inline void erofs_shrinker_register(struct super_block *sb) {}
516516
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
@@ -530,6 +530,7 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
530530
}
531531
static inline void erofs_pcpubuf_init(void) {}
532532
static inline void erofs_pcpubuf_exit(void) {}
533+
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
533534
#endif /* !CONFIG_EROFS_FS_ZIP */
534535

535536
#ifdef CONFIG_EROFS_FS_ZIP_LZMA

fs/erofs/super.c

Lines changed: 0 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -599,68 +599,6 @@ static int erofs_fc_parse_param(struct fs_context *fc,
599599
return 0;
600600
}
601601

602-
#ifdef CONFIG_EROFS_FS_ZIP
603-
static const struct address_space_operations managed_cache_aops;
604-
605-
static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
606-
{
607-
bool ret = true;
608-
struct address_space *const mapping = folio->mapping;
609-
610-
DBG_BUGON(!folio_test_locked(folio));
611-
DBG_BUGON(mapping->a_ops != &managed_cache_aops);
612-
613-
if (folio_test_private(folio))
614-
ret = erofs_try_to_free_cached_page(&folio->page);
615-
616-
return ret;
617-
}
618-
619-
/*
620-
* It will be called only on inode eviction. In case that there are still some
621-
* decompression requests in progress, wait with rescheduling for a bit here.
622-
* We could introduce an extra locking instead but it seems unnecessary.
623-
*/
624-
static void erofs_managed_cache_invalidate_folio(struct folio *folio,
625-
size_t offset, size_t length)
626-
{
627-
const size_t stop = length + offset;
628-
629-
DBG_BUGON(!folio_test_locked(folio));
630-
631-
/* Check for potential overflow in debug mode */
632-
DBG_BUGON(stop > folio_size(folio) || stop < length);
633-
634-
if (offset == 0 && stop == folio_size(folio))
635-
while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
636-
cond_resched();
637-
}
638-
639-
static const struct address_space_operations managed_cache_aops = {
640-
.release_folio = erofs_managed_cache_release_folio,
641-
.invalidate_folio = erofs_managed_cache_invalidate_folio,
642-
};
643-
644-
static int erofs_init_managed_cache(struct super_block *sb)
645-
{
646-
struct erofs_sb_info *const sbi = EROFS_SB(sb);
647-
struct inode *const inode = new_inode(sb);
648-
649-
if (!inode)
650-
return -ENOMEM;
651-
652-
set_nlink(inode, 1);
653-
inode->i_size = OFFSET_MAX;
654-
655-
inode->i_mapping->a_ops = &managed_cache_aops;
656-
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
657-
sbi->managed_cache = inode;
658-
return 0;
659-
}
660-
#else
661-
static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
662-
#endif
663-
664602
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
665603
u64 ino, u32 generation)
666604
{

fs/erofs/zdata.c

Lines changed: 51 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -665,29 +665,72 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
665665
return 0;
666666
}
667667

668-
int erofs_try_to_free_cached_page(struct page *page)
668+
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
669669
{
670-
struct z_erofs_pcluster *const pcl = (void *)page_private(page);
671-
int ret, i;
670+
struct z_erofs_pcluster *pcl = folio_get_private(folio);
671+
bool ret;
672+
int i;
673+
674+
if (!folio_test_private(folio))
675+
return true;
672676

673677
if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
674-
return 0;
678+
return false;
675679

676-
ret = 0;
680+
ret = false;
677681
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
678682
for (i = 0; i < pcl->pclusterpages; ++i) {
679-
if (pcl->compressed_bvecs[i].page == page) {
683+
if (pcl->compressed_bvecs[i].page == &folio->page) {
680684
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
681-
ret = 1;
685+
ret = true;
682686
break;
683687
}
684688
}
685689
erofs_workgroup_unfreeze(&pcl->obj, 1);
690+
686691
if (ret)
687-
detach_page_private(page);
692+
folio_detach_private(folio);
688693
return ret;
689694
}
690695

696+
/*
697+
* It will be called only on inode eviction. In case that there are still some
698+
* decompression requests in progress, wait with rescheduling for a bit here.
699+
* An extra lock could be introduced instead but it seems unnecessary.
700+
*/
701+
static void z_erofs_cache_invalidate_folio(struct folio *folio,
702+
size_t offset, size_t length)
703+
{
704+
const size_t stop = length + offset;
705+
706+
/* Check for potential overflow in debug mode */
707+
DBG_BUGON(stop > folio_size(folio) || stop < length);
708+
709+
if (offset == 0 && stop == folio_size(folio))
710+
while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
711+
cond_resched();
712+
}
713+
714+
static const struct address_space_operations z_erofs_cache_aops = {
715+
.release_folio = z_erofs_cache_release_folio,
716+
.invalidate_folio = z_erofs_cache_invalidate_folio,
717+
};
718+
719+
int erofs_init_managed_cache(struct super_block *sb)
720+
{
721+
struct inode *const inode = new_inode(sb);
722+
723+
if (!inode)
724+
return -ENOMEM;
725+
726+
set_nlink(inode, 1);
727+
inode->i_size = OFFSET_MAX;
728+
inode->i_mapping->a_ops = &z_erofs_cache_aops;
729+
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
730+
EROFS_SB(sb)->managed_cache = inode;
731+
return 0;
732+
}
733+
691734
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
692735
struct z_erofs_bvec *bvec)
693736
{

0 commit comments

Comments
 (0)