Skip to content

Commit 19fb907

Browse files
committed
erofs: get rid of justfound debugging tag
`justfound` is introduced to identify cached folios that are just added to compressed bvecs so that more checks can be applied in the I/O submission path. EROFS is quite now stable compared to the codebase at that stage. `justfound` becomes a burden for upcoming features. Drop it. Reviewed-by: Chao Yu <[email protected]> Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 0e25a78 commit 19fb907

File tree

1 file changed

+3
-17
lines changed

1 file changed

+3
-17
lines changed

fs/erofs/zdata.c

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -565,17 +565,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
565565

566566
for (i = 0; i < pclusterpages; ++i) {
567567
struct page *page, *newpage;
568-
void *t; /* mark pages just found for debugging */
569568

570569
/* Inaccurate check w/o locking to avoid unneeded lookups */
571570
if (READ_ONCE(pcl->compressed_bvecs[i].page))
572571
continue;
573572

574573
page = find_get_page(mc, pcl->obj.index + i);
575-
if (page) {
576-
t = (void *)((unsigned long)page | 1);
577-
newpage = NULL;
578-
} else {
574+
if (!page) {
579575
/* I/O is needed, no possible to decompress directly */
580576
standalone = false;
581577
if (!shouldalloc)
@@ -589,11 +585,10 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
589585
if (!newpage)
590586
continue;
591587
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
592-
t = (void *)((unsigned long)newpage | 1);
593588
}
594589
spin_lock(&pcl->obj.lockref.lock);
595590
if (!pcl->compressed_bvecs[i].page) {
596-
pcl->compressed_bvecs[i].page = t;
591+
pcl->compressed_bvecs[i].page = page ? page : newpage;
597592
spin_unlock(&pcl->obj.lockref.lock);
598593
continue;
599594
}
@@ -1423,7 +1418,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14231418
struct z_erofs_bvec zbv;
14241419
struct address_space *mapping;
14251420
struct page *page;
1426-
int justfound, bs = i_blocksize(f->inode);
1421+
int bs = i_blocksize(f->inode);
14271422

14281423
/* Except for inplace pages, the entire page can be used for I/Os */
14291424
bvec->bv_offset = 0;
@@ -1432,9 +1427,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14321427
spin_lock(&pcl->obj.lockref.lock);
14331428
zbv = pcl->compressed_bvecs[nr];
14341429
page = zbv.page;
1435-
justfound = (unsigned long)page & 1UL;
1436-
page = (struct page *)((unsigned long)page & ~1UL);
1437-
pcl->compressed_bvecs[nr].page = page;
14381430
spin_unlock(&pcl->obj.lockref.lock);
14391431
if (!page)
14401432
goto out_allocpage;
@@ -1465,17 +1457,13 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14651457
}
14661458

14671459
lock_page(page);
1468-
/* only true if page reclaim goes wrong, should never happen */
1469-
DBG_BUGON(justfound && PagePrivate(page));
1470-
14711460
/* the cached page is still in managed cache */
14721461
if (page->mapping == mc) {
14731462
/*
14741463
* The cached page is still available but without a valid
14751464
* `->private` pcluster hint. Let's reconnect them.
14761465
*/
14771466
if (!PagePrivate(page)) {
1478-
DBG_BUGON(!justfound);
14791467
/* compressed_bvecs[] already takes a ref */
14801468
attach_page_private(page, pcl);
14811469
put_page(page);
@@ -1494,8 +1482,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
14941482
* allocate a new page for compressed data.
14951483
*/
14961484
DBG_BUGON(page->mapping);
1497-
DBG_BUGON(!justfound);
1498-
14991485
tocache = true;
15001486
unlock_page(page);
15011487
put_page(page);

0 commit comments

Comments
 (0)