Skip to content

Commit b3807a9

Browse files
Hugh Dickinstorvalds
authored andcommitted
mm: page_vma_mapped_walk(): add a level of indentation
page_vma_mapped_walk() cleanup: add a level of indentation to much of the body, making no functional change in this commit, but reducing the later diff when this is all converted to a loop. [[email protected]: : page_vma_mapped_walk(): add a level of indentation fix] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Cc: Alistair Popple <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Peter Xu <[email protected]> Cc: Ralph Campbell <[email protected]> Cc: Wang Yugui <[email protected]> Cc: Will Deacon <[email protected]> Cc: Yang Shi <[email protected]> Cc: Zi Yan <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4482824 commit b3807a9

File tree

1 file changed

+55
-50
lines changed

1 file changed

+55
-50
lines changed

mm/page_vma_mapped.c

Lines changed: 55 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -173,62 +173,67 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
173173
if (pvmw->pte)
174174
goto next_pte;
175175
restart:
176-
pgd = pgd_offset(mm, pvmw->address);
177-
if (!pgd_present(*pgd))
178-
return false;
179-
p4d = p4d_offset(pgd, pvmw->address);
180-
if (!p4d_present(*p4d))
181-
return false;
182-
pud = pud_offset(p4d, pvmw->address);
183-
if (!pud_present(*pud))
184-
return false;
185-
pvmw->pmd = pmd_offset(pud, pvmw->address);
186-
/*
187-
* Make sure the pmd value isn't cached in a register by the
188-
* compiler and used as a stale value after we've observed a
189-
* subsequent update.
190-
*/
191-
pmde = READ_ONCE(*pvmw->pmd);
192-
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
193-
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
194-
pmde = *pvmw->pmd;
195-
if (likely(pmd_trans_huge(pmde))) {
196-
if (pvmw->flags & PVMW_MIGRATION)
197-
return not_found(pvmw);
198-
if (pmd_page(pmde) != page)
199-
return not_found(pvmw);
200-
return true;
201-
}
202-
if (!pmd_present(pmde)) {
203-
swp_entry_t entry;
176+
{
177+
pgd = pgd_offset(mm, pvmw->address);
178+
if (!pgd_present(*pgd))
179+
return false;
180+
p4d = p4d_offset(pgd, pvmw->address);
181+
if (!p4d_present(*p4d))
182+
return false;
183+
pud = pud_offset(p4d, pvmw->address);
184+
if (!pud_present(*pud))
185+
return false;
204186

205-
if (!thp_migration_supported() ||
206-
!(pvmw->flags & PVMW_MIGRATION))
207-
return not_found(pvmw);
208-
entry = pmd_to_swp_entry(pmde);
209-
if (!is_migration_entry(entry) ||
210-
migration_entry_to_page(entry) != page)
211-
return not_found(pvmw);
212-
return true;
213-
}
214-
/* THP pmd was split under us: handle on pte level */
215-
spin_unlock(pvmw->ptl);
216-
pvmw->ptl = NULL;
217-
} else if (!pmd_present(pmde)) {
187+
pvmw->pmd = pmd_offset(pud, pvmw->address);
218188
/*
219-
* If PVMW_SYNC, take and drop THP pmd lock so that we
220-
* cannot return prematurely, while zap_huge_pmd() has
221-
* cleared *pmd but not decremented compound_mapcount().
189+
* Make sure the pmd value isn't cached in a register by the
190+
* compiler and used as a stale value after we've observed a
191+
* subsequent update.
222192
*/
223-
if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
224-
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
193+
pmde = READ_ONCE(*pvmw->pmd);
225194

226-
spin_unlock(ptl);
195+
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
196+
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
197+
pmde = *pvmw->pmd;
198+
if (likely(pmd_trans_huge(pmde))) {
199+
if (pvmw->flags & PVMW_MIGRATION)
200+
return not_found(pvmw);
201+
if (pmd_page(pmde) != page)
202+
return not_found(pvmw);
203+
return true;
204+
}
205+
if (!pmd_present(pmde)) {
206+
swp_entry_t entry;
207+
208+
if (!thp_migration_supported() ||
209+
!(pvmw->flags & PVMW_MIGRATION))
210+
return not_found(pvmw);
211+
entry = pmd_to_swp_entry(pmde);
212+
if (!is_migration_entry(entry) ||
213+
migration_entry_to_page(entry) != page)
214+
return not_found(pvmw);
215+
return true;
216+
}
217+
/* THP pmd was split under us: handle on pte level */
218+
spin_unlock(pvmw->ptl);
219+
pvmw->ptl = NULL;
220+
} else if (!pmd_present(pmde)) {
221+
/*
222+
* If PVMW_SYNC, take and drop THP pmd lock so that we
223+
* cannot return prematurely, while zap_huge_pmd() has
224+
* cleared *pmd but not decremented compound_mapcount().
225+
*/
226+
if ((pvmw->flags & PVMW_SYNC) &&
227+
PageTransCompound(page)) {
228+
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
229+
230+
spin_unlock(ptl);
231+
}
232+
return false;
227233
}
228-
return false;
234+
if (!map_pte(pvmw))
235+
goto next_pte;
229236
}
230-
if (!map_pte(pvmw))
231-
goto next_pte;
232237
while (1) {
233238
unsigned long end;
234239

0 commit comments

Comments
 (0)