Skip to content

Commit 2f92447

Browse files
kvaneeshmpe
authored andcommitted
powerpc/book3s64/hash: Use the pte_t address from the caller
Don't fetch the pte value using lockless page table walk. Instead use the value from the caller. hash_preload is called with ptl lock held. So it is safe to use the pte_t address directly. Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 7900757 commit 2f92447

File tree

1 file changed

+5
-22
lines changed

1 file changed

+5
-22
lines changed

arch/powerpc/mm/book3s64/hash_utils.c

Lines changed: 5 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1546,14 +1546,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
15461546
}
15471547
#endif
15481548

1549-
static void hash_preload(struct mm_struct *mm, unsigned long ea,
1549+
static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
15501550
bool is_exec, unsigned long trap)
15511551
{
1552-
int hugepage_shift;
15531552
unsigned long vsid;
15541553
pgd_t *pgdir;
1555-
pte_t *ptep;
1556-
unsigned long flags;
15571554
int rc, ssize, update_flags = 0;
15581555
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
15591556

@@ -1575,30 +1572,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
15751572
vsid = get_user_vsid(&mm->context, ea, ssize);
15761573
if (!vsid)
15771574
return;
1578-
/*
1579-
* Hash doesn't like irqs. Walking linux page table with irq disabled
1580-
* saves us from holding multiple locks.
1581-
*/
1582-
local_irq_save(flags);
15831575

1584-
/*
1585-
* THP pages use update_mmu_cache_pmd. We don't do
1586-
* hash preload there. Hence can ignore THP here
1587-
*/
1588-
ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
1589-
if (!ptep)
1590-
goto out_exit;
1591-
1592-
WARN_ON(hugepage_shift);
15931576
#ifdef CONFIG_PPC_64K_PAGES
15941577
/* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
15951578
* a 64K kernel), then we don't preload, hash_page() will take
15961579
* care of it once we actually try to access the page.
15971580
* That way we don't have to duplicate all of the logic for segment
15981581
* page size demotion here
1582+
* Called with PTL held, hence can be sure the value won't change in
1583+
* between.
15991584
*/
16001585
if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
1601-
goto out_exit;
1586+
return;
16021587
#endif /* CONFIG_PPC_64K_PAGES */
16031588

16041589
/* Is that local to this CPU ? */
@@ -1623,8 +1608,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
16231608
mm_ctx_user_psize(&mm->context),
16241609
mm_ctx_user_psize(&mm->context),
16251610
pte_val(*ptep));
1626-
out_exit:
1627-
local_irq_restore(flags);
16281611
}
16291612

16301613
/*
@@ -1675,7 +1658,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
16751658
return;
16761659
}
16771660

1678-
hash_preload(vma->vm_mm, address, is_exec, trap);
1661+
hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
16791662
}
16801663

16811664
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM

0 commit comments

Comments
 (0)