Skip to content

Commit 820b05e

Browse files
author
Matthew Wilcox (Oracle)
committed
filemap: Use a folio in filemap_map_pages
Saves 61 bytes due to fewer calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: William Kucharski <[email protected]>
1 parent 9184a30 commit 820b05e

File tree

1 file changed

+14
-13
lines changed

1 file changed

+14
-13
lines changed

mm/filemap.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3227,7 +3227,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
32273227
return false;
32283228
}
32293229

3230-
static struct page *next_uptodate_page(struct folio *folio,
3230+
static struct folio *next_uptodate_page(struct folio *folio,
32313231
struct address_space *mapping,
32323232
struct xa_state *xas, pgoff_t end_pgoff)
32333233
{
@@ -3258,7 +3258,7 @@ static struct page *next_uptodate_page(struct folio *folio,
32583258
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
32593259
if (xas->xa_index >= max_idx)
32603260
goto unlock;
3261-
return &folio->page;
3261+
return folio;
32623262
unlock:
32633263
folio_unlock(folio);
32643264
skip:
@@ -3268,15 +3268,15 @@ static struct page *next_uptodate_page(struct folio *folio,
32683268
return NULL;
32693269
}
32703270

3271-
static inline struct page *first_map_page(struct address_space *mapping,
3271+
static inline struct folio *first_map_page(struct address_space *mapping,
32723272
struct xa_state *xas,
32733273
pgoff_t end_pgoff)
32743274
{
32753275
return next_uptodate_page(xas_find(xas, end_pgoff),
32763276
mapping, xas, end_pgoff);
32773277
}
32783278

3279-
static inline struct page *next_map_page(struct address_space *mapping,
3279+
static inline struct folio *next_map_page(struct address_space *mapping,
32803280
struct xa_state *xas,
32813281
pgoff_t end_pgoff)
32823282
{
@@ -3293,24 +3293,25 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
32933293
pgoff_t last_pgoff = start_pgoff;
32943294
unsigned long addr;
32953295
XA_STATE(xas, &mapping->i_pages, start_pgoff);
3296-
struct page *head, *page;
3296+
struct folio *folio;
3297+
struct page *page;
32973298
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
32983299
vm_fault_t ret = 0;
32993300

33003301
rcu_read_lock();
3301-
head = first_map_page(mapping, &xas, end_pgoff);
3302-
if (!head)
3302+
folio = first_map_page(mapping, &xas, end_pgoff);
3303+
if (!folio)
33033304
goto out;
33043305

3305-
if (filemap_map_pmd(vmf, head)) {
3306+
if (filemap_map_pmd(vmf, &folio->page)) {
33063307
ret = VM_FAULT_NOPAGE;
33073308
goto out;
33083309
}
33093310

33103311
addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
33113312
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
33123313
do {
3313-
page = find_subpage(head, xas.xa_index);
3314+
page = folio_file_page(folio, xas.xa_index);
33143315
if (PageHWPoison(page))
33153316
goto unlock;
33163317

@@ -3331,12 +3332,12 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
33313332
do_set_pte(vmf, page, addr);
33323333
/* no need to invalidate: a not-present page won't be cached */
33333334
update_mmu_cache(vma, addr, vmf->pte);
3334-
unlock_page(head);
3335+
folio_unlock(folio);
33353336
continue;
33363337
unlock:
3337-
unlock_page(head);
3338-
put_page(head);
3339-
} while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3338+
folio_unlock(folio);
3339+
folio_put(folio);
3340+
} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
33403341
pte_unmap_unlock(vmf->pte, vmf->ptl);
33413342
out:
33423343
rcu_read_unlock();

0 commit comments

Comments
 (0)