Skip to content

Commit f9cb147

Browse files
chleroymaddy-kerneldev
authored andcommitted
powerpc/8xx: Reduce alignment constraint for kernel memory
8xx has three large page sizes: 8M, 512k and 16k. A too big alignment can lead to wasting memory. On a board which has only 32 MBytes of RAM, every single byte is worth it and a 512k alignment is sometimes too much. Allow mapping kernel memory with 16k pages and reduce the constraint on kernel memory alignment. 512k and 16k pages are handled the same way so reverse tests in order to make 8M pages the special case and other ones (512k and 16k) the alternative. Signed-off-by: Christophe Leroy <[email protected]> Signed-off-by: Madhavan Srinivasan <[email protected]> Link: https://patch.msgid.link/fa9927b70df13627cdf10b992ea71d6562c7760e.1746191262.git.christophe.leroy@csgroup.eu
1 parent 5a821e2 commit f9cb147

File tree

2 files changed

+22
-20
lines changed

2 files changed

+22
-20
lines changed

arch/powerpc/Kconfig

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -895,7 +895,7 @@ config DATA_SHIFT
895895
int "Data shift" if DATA_SHIFT_BOOL
896896
default 24 if STRICT_KERNEL_RWX && PPC64
897897
range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
898-
range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
898+
range 14 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
899899
range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
900900
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
901901
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
@@ -908,10 +908,10 @@ config DATA_SHIFT
908908
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
909909
Smaller is the alignment, greater is the number of necessary DBATs.
910910

911-
On 8xx, large pages (512kb or 8M) are used to map kernel linear
912-
memory. Aligning to 8M reduces TLB misses as only 8M pages are used
913-
in that case. If PIN_TLB is selected, it must be aligned to 8M as
914-
8M pages will be pinned.
911+
On 8xx, large pages (16kb or 512kb or 8M) are used to map kernel
912+
linear memory. Aligning to 8M reduces TLB misses as only 8M pages
913+
are used in that case. If PIN_TLB is selected, it must be aligned
914+
to 8M as 8M pages will be pinned.
915915

916916
config ARCH_FORCE_MAX_ORDER
917917
int "Order of maximal physically contiguous allocations"

arch/powerpc/mm/nohash/8xx.c

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -54,20 +54,13 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
5454
{
5555
pmd_t *pmdp = pmd_off_k(va);
5656
pte_t *ptep;
57-
58-
if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
59-
return -EINVAL;
57+
unsigned int shift = mmu_psize_to_shift(psize);
6058

6159
if (new) {
6260
if (WARN_ON(slab_is_available()))
6361
return -EINVAL;
6462

65-
if (psize == MMU_PAGE_512K) {
66-
ptep = early_pte_alloc_kernel(pmdp, va);
67-
/* The PTE should never be already present */
68-
if (WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
69-
return -EINVAL;
70-
} else {
63+
if (psize == MMU_PAGE_8M) {
7164
if (WARN_ON(!pmd_none(*pmdp) || !pmd_none(*(pmdp + 1))))
7265
return -EINVAL;
7366

@@ -78,20 +71,25 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
7871
pmd_populate_kernel(&init_mm, pmdp + 1, ptep);
7972

8073
ptep = (pte_t *)pmdp;
74+
} else {
75+
ptep = early_pte_alloc_kernel(pmdp, va);
76+
/* The PTE should never be already present */
77+
if (WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
78+
return -EINVAL;
8179
}
8280
} else {
83-
if (psize == MMU_PAGE_512K)
84-
ptep = pte_offset_kernel(pmdp, va);
85-
else
81+
if (psize == MMU_PAGE_8M)
8682
ptep = (pte_t *)pmdp;
83+
else
84+
ptep = pte_offset_kernel(pmdp, va);
8785
}
8886

8987
if (WARN_ON(!ptep))
9088
return -ENOMEM;
9189

9290
set_huge_pte_at(&init_mm, va, ptep,
93-
pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)),
94-
1UL << mmu_psize_to_shift(psize));
91+
arch_make_huge_pte(pfn_pte(pa >> PAGE_SHIFT, prot), shift, 0),
92+
1UL << shift);
9593

9694
return 0;
9795
}
@@ -123,14 +121,18 @@ static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
123121
unsigned long p = offset;
124122
int err = 0;
125123

126-
WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
124+
WARN_ON(!IS_ALIGNED(offset, SZ_16K) || !IS_ALIGNED(top, SZ_16K));
127125

126+
for (; p < ALIGN(p, SZ_512K) && p < top && !err; p += SZ_16K, v += SZ_16K)
127+
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new);
128128
for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K)
129129
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
130130
for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M)
131131
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
132132
for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K)
133133
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
134+
for (; p < ALIGN_DOWN(top, SZ_16K) && p < top && !err; p += SZ_16K, v += SZ_16K)
135+
err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new);
134136

135137
if (!new)
136138
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);

0 commit comments

Comments
 (0)