Skip to content

Commit d68ecca

Browse files
author
Matthew Wilcox (Oracle)
committed
mm/filemap: Allow large folios to be added to the page cache
We return -EEXIST if there are any non-shadow entries in the page cache in the range covered by the folio. If there are multiple shadow entries in the range, we set *shadowp to one of them (currently the one at the highest index). If that turns out to be the wrong answer, we can implement something more complex. This is mostly modelled after the equivalent function in the shmem code. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
1 parent d4b4084 commit d68ecca

File tree

1 file changed

+22
-17
lines changed

1 file changed

+22
-17
lines changed

mm/filemap.c

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -842,26 +842,27 @@ noinline int __filemap_add_folio(struct address_space *mapping,
842842
{
843843
XA_STATE(xas, &mapping->i_pages, index);
844844
int huge = folio_test_hugetlb(folio);
845-
int error;
846845
bool charged = false;
846+
long nr = 1;
847847

848848
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
849849
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
850850
mapping_set_update(&xas, mapping);
851851

852-
folio_get(folio);
853-
folio->mapping = mapping;
854-
folio->index = index;
855-
856852
if (!huge) {
857-
error = mem_cgroup_charge(folio, NULL, gfp);
853+
int error = mem_cgroup_charge(folio, NULL, gfp);
858854
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
859855
if (error)
860-
goto error;
856+
return error;
861857
charged = true;
858+
xas_set_order(&xas, index, folio_order(folio));
859+
nr = folio_nr_pages(folio);
862860
}
863861

864862
gfp &= GFP_RECLAIM_MASK;
863+
folio_ref_add(folio, nr);
864+
folio->mapping = mapping;
865+
folio->index = xas.xa_index;
865866

866867
do {
867868
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
@@ -885,6 +886,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
885886
/* entry may have been split before we acquired lock */
886887
order = xa_get_order(xas.xa, xas.xa_index);
887888
if (order > folio_order(folio)) {
889+
/* How to handle large swap entries? */
890+
BUG_ON(shmem_mapping(mapping));
888891
xas_split(&xas, old, order);
889892
xas_reset(&xas);
890893
}
@@ -894,29 +897,31 @@ noinline int __filemap_add_folio(struct address_space *mapping,
894897
if (xas_error(&xas))
895898
goto unlock;
896899

897-
mapping->nrpages++;
900+
mapping->nrpages += nr;
898901

899902
/* hugetlb pages do not participate in page cache accounting */
900-
if (!huge)
901-
__lruvec_stat_add_folio(folio, NR_FILE_PAGES);
903+
if (!huge) {
904+
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
905+
if (folio_test_pmd_mappable(folio))
906+
__lruvec_stat_mod_folio(folio,
907+
NR_FILE_THPS, nr);
908+
}
902909
unlock:
903910
xas_unlock_irq(&xas);
904911
} while (xas_nomem(&xas, gfp));
905912

906-
if (xas_error(&xas)) {
907-
error = xas_error(&xas);
908-
if (charged)
909-
mem_cgroup_uncharge(folio);
913+
if (xas_error(&xas))
910914
goto error;
911-
}
912915

913916
trace_mm_filemap_add_to_page_cache(folio);
914917
return 0;
915918
error:
919+
if (charged)
920+
mem_cgroup_uncharge(folio);
916921
folio->mapping = NULL;
917922
/* Leave page->index set: truncation relies upon it */
918-
folio_put(folio);
919-
return error;
923+
folio_put_refs(folio, nr);
924+
return xas_error(&xas);
920925
}
921926
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
922927

0 commit comments

Comments
 (0)