Skip to content

Commit 205e68b

Browse files
Matthew Wilcox (Oracle)opsiff
authored andcommitted
mm/memory-failure: convert hwpoison_user_mappings to take a folio
Pass the folio from the callers, and use it throughout instead of hpage. Saves dozens of calls to compound_head(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Miaohe Lin <[email protected]> Reviewed-by: Jane Chu <[email protected]> Cc: Dan Williams <[email protected]> Cc: Oscar Salvador <[email protected]> Signed-off-by: Andrew Morton <[email protected]> (cherry picked from commit 03468a0)
1 parent 9dcba1a commit 205e68b

File tree

1 file changed

+15
-15
lines changed

1 file changed

+15
-15
lines changed

mm/memory-failure.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1576,24 +1576,24 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
15761576
* Do all that is necessary to remove user space mappings. Unmap
15771577
* the pages and send SIGBUS to the processes if the data was dirty.
15781578
*/
1579-
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1580-
int flags, struct page *hpage)
1579+
static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
1580+
unsigned long pfn, int flags)
15811581
{
1582-
struct folio *folio = page_folio(hpage);
15831582
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
15841583
struct address_space *mapping;
15851584
LIST_HEAD(tokill);
15861585
bool unmap_success;
15871586
int forcekill;
1588-
bool mlocked = PageMlocked(hpage);
1587+
bool mlocked = folio_test_mlocked(folio);
15891588

15901589
/*
15911590
* Here we are interested only in user-mapped pages, so skip any
15921591
* other types of pages.
15931592
*/
1594-
if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
1593+
if (folio_test_reserved(folio) || folio_test_slab(folio) ||
1594+
folio_test_pgtable(folio) || folio_test_offline(folio))
15951595
return true;
1596-
if (!(PageLRU(hpage) || PageHuge(p)))
1596+
if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
15971597
return true;
15981598

15991599
/*
@@ -1603,7 +1603,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
16031603
if (!page_mapped(p))
16041604
return true;
16051605

1606-
if (PageSwapCache(p)) {
1606+
if (folio_test_swapcache(folio)) {
16071607
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
16081608
ttu &= ~TTU_HWPOISON;
16091609
}
@@ -1614,11 +1614,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
16141614
* XXX: the dirty test could be racy: set_page_dirty() may not always
16151615
* be called inside page lock (it's recommended but not enforced).
16161616
*/
1617-
mapping = page_mapping(hpage);
1618-
if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1617+
mapping = folio_mapping(folio);
1618+
if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
16191619
mapping_can_writeback(mapping)) {
1620-
if (page_mkclean(hpage)) {
1621-
SetPageDirty(hpage);
1620+
if (folio_mkclean(folio)) {
1621+
folio_set_dirty(folio);
16221622
} else {
16231623
ttu &= ~TTU_HWPOISON;
16241624
pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
@@ -1633,7 +1633,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
16331633
*/
16341634
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
16351635

1636-
if (PageHuge(hpage) && !PageAnon(hpage)) {
1636+
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
16371637
/*
16381638
* For hugetlb pages in shared mappings, try_to_unmap
16391639
* could potentially call huge_pmd_unshare. Because of
@@ -1673,7 +1673,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
16731673
* use a more force-full uncatchable kill to prevent
16741674
* any accesses to the poisoned memory.
16751675
*/
1676-
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1676+
forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
16771677
!unmap_success;
16781678
kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
16791679

@@ -2111,7 +2111,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
21112111

21122112
page_flags = folio->flags;
21132113

2114-
if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2114+
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
21152115
folio_unlock(folio);
21162116
return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
21172117
}
@@ -2378,7 +2378,7 @@ int memory_failure(unsigned long pfn, int flags)
23782378
* Now take care of user space mappings.
23792379
* Abort on fail: __filemap_remove_folio() assumes unmapped page.
23802380
*/
2381-
if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2381+
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
23822382
res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
23832383
goto unlock_page;
23842384
}

0 commit comments

Comments
 (0)