Skip to content

Commit d3f368d

Browse files
Matthew Wilcox (Oracle)opsiff
authored andcommitted
mm: convert hugetlb_page_mapping_lock_write to folio
The page is only used to get the mapping, so the folio will do just as well. Both callers already have a folio available, so this saves a call to compound_head(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Jane Chu  <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Acked-by: Miaohe Lin <[email protected]> Cc: Dan Williams <[email protected]> Signed-off-by: Andrew Morton <[email protected]> (cherry picked from commit 6e8cda4)
1 parent 2d1e6f1 commit d3f368d

File tree

4 files changed

+8
-8
lines changed

4 files changed

+8
-8
lines changed

include/linux/hugetlb.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
175175
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
176176
unsigned long addr, pud_t *pud);
177177

178-
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
178+
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
179179

180180
extern int sysctl_hugetlb_shm_group;
181181
extern struct list_head huge_boot_pages;
@@ -298,8 +298,8 @@ static inline unsigned long hugetlb_total_pages(void)
298298
return 0;
299299
}
300300

301-
static inline struct address_space *hugetlb_page_mapping_lock_write(
302-
struct page *hpage)
301+
static inline struct address_space *hugetlb_folio_mapping_lock_write(
302+
struct folio *folio)
303303
{
304304
return NULL;
305305
}

mm/hugetlb.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2087,13 +2087,13 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
20872087
/*
20882088
* Find and lock address space (mapping) in write mode.
20892089
*
2090-
* Upon entry, the page is locked which means that page_mapping() is
2090+
* Upon entry, the folio is locked which means that folio_mapping() is
20912091
* stable. Due to locking order, we can only trylock_write. If we can
20922092
* not get the lock, simply return NULL to caller.
20932093
*/
2094-
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2094+
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
20952095
{
2096-
struct address_space *mapping = page_mapping(hpage);
2096+
struct address_space *mapping = folio_mapping(folio);
20972097

20982098
if (!mapping)
20992099
return mapping;

mm/memory-failure.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1641,7 +1641,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
16411641
* TTU_RMAP_LOCKED to indicate we have taken the lock
16421642
* at this higher level.
16431643
*/
1644-
mapping = hugetlb_page_mapping_lock_write(hpage);
1644+
mapping = hugetlb_folio_mapping_lock_write(folio);
16451645
if (mapping) {
16461646
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
16471647
i_mmap_unlock_write(mapping);

mm/migrate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1419,7 +1419,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
14191419
* semaphore in write mode here and set TTU_RMAP_LOCKED
14201420
* to let lower levels know we have taken the lock.
14211421
*/
1422-
mapping = hugetlb_page_mapping_lock_write(&src->page);
1422+
mapping = hugetlb_folio_mapping_lock_write(src);
14231423
if (unlikely(!mapping))
14241424
goto unlock_put_anon;
14251425

0 commit comments

Comments
 (0)