Skip to content

Commit f2b67ef

Browse files
chleroympe
authored andcommitted
powerpc/hugetlb: Fix 512k hugepages on 8xx with 16k page size
Commit 55c8fc3 ("powerpc/8xx: reintroduce 16K pages with HW assistance") redefined pte_t as a struct of 4 pte_basic_t, because in 16K pages mode there are four identical entries in the page table. But the size of hugepage tables is calculated based of the size of (void *). Therefore, we end up with page tables of size 1k instead of 4k for 512k pages. As 512k hugepage tables are the same size as standard page tables, ie 4k, use the standard page tables instead of PGT_CACHE tables. Fixes: 3fb69c6 ("powerpc/8xx: Enable 512k hugepage support with HW assistance") Cc: [email protected] # v5.0+ Signed-off-by: Christophe Leroy <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/90ec56a2315be602494619ed0223bba3b0b8d619.1580997007.git.christophe.leroy@c-s.fr
1 parent d4f194e commit f2b67ef

File tree

1 file changed

+18
-11
lines changed

1 file changed

+18
-11
lines changed

arch/powerpc/mm/hugetlbpage.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
5353
if (pshift >= pdshift) {
5454
cachep = PGT_CACHE(PTE_T_ORDER);
5555
num_hugepd = 1 << (pshift - pdshift);
56+
new = NULL;
5657
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
57-
cachep = PGT_CACHE(PTE_INDEX_SIZE);
58+
cachep = NULL;
5859
num_hugepd = 1;
60+
new = pte_alloc_one(mm);
5961
} else {
6062
cachep = PGT_CACHE(pdshift - pshift);
6163
num_hugepd = 1;
64+
new = NULL;
6265
}
6366

64-
if (!cachep) {
67+
if (!cachep && !new) {
6568
WARN_ONCE(1, "No page table cache created for hugetlb tables");
6669
return -ENOMEM;
6770
}
6871

69-
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
72+
if (cachep)
73+
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
7074

7175
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
7276
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
97101
if (i < num_hugepd) {
98102
for (i = i - 1 ; i >= 0; i--, hpdp--)
99103
*hpdp = __hugepd(0);
100-
kmem_cache_free(cachep, new);
104+
if (cachep)
105+
kmem_cache_free(cachep, new);
106+
else
107+
pte_free(mm, new);
101108
} else {
102109
kmemleak_ignore(new);
103110
}
@@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
324331
if (shift >= pdshift)
325332
hugepd_free(tlb, hugepte);
326333
else if (IS_ENABLED(CONFIG_PPC_8xx))
327-
pgtable_free_tlb(tlb, hugepte,
328-
get_hugepd_cache_index(PTE_INDEX_SIZE));
334+
pgtable_free_tlb(tlb, hugepte, 0);
329335
else
330336
pgtable_free_tlb(tlb, hugepte,
331337
get_hugepd_cache_index(pdshift - shift));
@@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
639645
* if we have pdshift and shift value same, we don't
640646
* use pgt cache for hugepd.
641647
*/
642-
if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
643-
pgtable_cache_add(PTE_INDEX_SIZE);
644-
else if (pdshift > shift)
645-
pgtable_cache_add(pdshift - shift);
646-
else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
648+
if (pdshift > shift) {
649+
if (!IS_ENABLED(CONFIG_PPC_8xx))
650+
pgtable_cache_add(pdshift - shift);
651+
} else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
652+
IS_ENABLED(CONFIG_PPC_8xx)) {
647653
pgtable_cache_add(PTE_T_ORDER);
654+
}
648655

649656
configured = true;
650657
}

0 commit comments

Comments
 (0)