Skip to content

Commit 1001042

Browse files
committed
erofs: avoid refcounting short-lived pages
LZ4 always reuses the decompressed buffer as its LZ77 sliding window (dynamic dictionary) for optimal performance. However, in specific cases, the output buffer may not fully contain valid page cache pages, resulting in the use of short-lived pages for temporary purposes. Due to the limited sliding window size, LZ4 shortlived bounce pages can also be reused in a sliding manner, so each bounce page can be vmapped multiple times in different relative positions by design. In order to avoiding double frees, currently, reuse counts are recorded via page refcount, but it will no longer be used as-is in the future world of Memdescs. Just maintain a lookup table to check if a shortlived page is reused. Signed-off-by: Gao Xiang <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1c076f1 commit 1001042

File tree

3 files changed

+24
-26
lines changed

3 files changed

+24
-26
lines changed

fs/erofs/compress.h

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -54,32 +54,22 @@ struct z_erofs_decompressor {
5454
*/
5555

5656
/*
57-
* short-lived pages are pages directly from buddy system with specific
58-
* page->private (no need to set PagePrivate since these are non-LRU /
59-
* non-movable pages and bypass reclaim / migration code).
57+
* Currently, short-lived pages are pages directly from buddy system
58+
* with specific page->private (Z_EROFS_SHORTLIVED_PAGE).
59+
* In the future world of Memdescs, it should be type 0 (Misc) memory
60+
* which type can be checked with a new helper.
6061
*/
6162
static inline bool z_erofs_is_shortlived_page(struct page *page)
6263
{
63-
if (page->private != Z_EROFS_SHORTLIVED_PAGE)
64-
return false;
65-
66-
DBG_BUGON(page->mapping);
67-
return true;
64+
return page->private == Z_EROFS_SHORTLIVED_PAGE;
6865
}
6966

7067
static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
7168
struct page *page)
7269
{
7370
if (!z_erofs_is_shortlived_page(page))
7471
return false;
75-
76-
/* short-lived pages should not be used by others at the same time */
77-
if (page_ref_count(page) > 1) {
78-
put_page(page);
79-
} else {
80-
/* follow the pcluster rule above. */
81-
erofs_pagepool_add(pagepool, page);
82-
}
72+
erofs_pagepool_add(pagepool, page);
8373
return true;
8474
}
8575

fs/erofs/decompressor.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
110110

111111
if (top) {
112112
victim = availables[--top];
113-
get_page(victim);
114113
} else {
115114
victim = __erofs_allocpage(pagepool, rq->gfp, true);
116115
if (!victim)

fs/erofs/zdata.c

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1221,7 +1221,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
12211221
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
12221222
const struct z_erofs_decompressor *decomp =
12231223
z_erofs_decomp[pcl->algorithmformat];
1224-
int i, err2;
1224+
int i, j, jtop, err2;
12251225
struct page *page;
12261226
bool overlapped;
12271227

@@ -1279,10 +1279,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
12791279
WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
12801280
put_page(page);
12811281
} else {
1282+
/* managed folios are still left in compressed_bvecs[] */
12821283
for (i = 0; i < pclusterpages; ++i) {
1283-
/* consider shortlived pages added when decompressing */
12841284
page = be->compressed_pages[i];
1285-
12861285
if (!page ||
12871286
erofs_folio_is_managed(sbi, page_folio(page)))
12881287
continue;
@@ -1293,21 +1292,31 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
12931292
if (be->compressed_pages < be->onstack_pages ||
12941293
be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
12951294
kvfree(be->compressed_pages);
1296-
z_erofs_fill_other_copies(be, err);
12971295

1296+
jtop = 0;
1297+
z_erofs_fill_other_copies(be, err);
12981298
for (i = 0; i < be->nr_pages; ++i) {
12991299
page = be->decompressed_pages[i];
13001300
if (!page)
13011301
continue;
13021302

13031303
DBG_BUGON(z_erofs_page_is_invalidated(page));
1304-
1305-
/* recycle all individual short-lived pages */
1306-
if (z_erofs_put_shortlivedpage(be->pagepool, page))
1304+
if (!z_erofs_is_shortlived_page(page)) {
1305+
z_erofs_onlinefolio_end(page_folio(page), err);
13071306
continue;
1308-
z_erofs_onlinefolio_end(page_folio(page), err);
1307+
}
1308+
if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
1309+
erofs_pagepool_add(be->pagepool, page);
1310+
continue;
1311+
}
1312+
for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
1313+
;
1314+
if (j >= jtop) /* this bounce page is newly detected */
1315+
be->decompressed_pages[jtop++] = page;
13091316
}
1310-
1317+
while (jtop)
1318+
erofs_pagepool_add(be->pagepool,
1319+
be->decompressed_pages[--jtop]);
13111320
if (be->decompressed_pages != be->onstack_pages)
13121321
kvfree(be->decompressed_pages);
13131322

0 commit comments

Comments
 (0)