Skip to content

Commit 1bb31cc

Browse files
committed
Merge branch 'for-next/mm' into for-next/core
* for-next/mm: arm64: mm: always map fixmap at page granularity arm64: mm: move fixmap code to its own file arm64: add FIXADDR_TOT_{START,SIZE} Revert "Revert "arm64: dma: Drop cache invalidation from arch_dma_prep_coherent()"" arm: uaccess: Remove memcpy_page_flushcache() mm,kfence: decouple kfence from page granularity mapping judgement
2 parents 81444b7 + 414c109 commit 1bb31cc

File tree

13 files changed

+305
-234
lines changed

13 files changed

+305
-234
lines changed

arch/arm64/include/asm/fixmap.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#ifndef __ASSEMBLY__
1919
#include <linux/kernel.h>
20+
#include <linux/math.h>
2021
#include <linux/sizes.h>
2122
#include <asm/boot.h>
2223
#include <asm/page.h>
@@ -36,17 +37,13 @@ enum fixed_addresses {
3637
FIX_HOLE,
3738

3839
/*
39-
* Reserve a virtual window for the FDT that is 2 MB larger than the
40-
* maximum supported size, and put it at the top of the fixmap region.
41-
* The additional space ensures that any FDT that does not exceed
42-
* MAX_FDT_SIZE can be mapped regardless of whether it crosses any
43-
* 2 MB alignment boundaries.
44-
*
45-
* Keep this at the top so it remains 2 MB aligned.
40+
* Reserve a virtual window for the FDT that is a page bigger than the
41+
* maximum supported size. The additional space ensures that any FDT
42+
* that does not exceed MAX_FDT_SIZE can be mapped regardless of
43+
* whether it crosses any page boundary.
4644
*/
47-
#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
4845
FIX_FDT_END,
49-
FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
46+
FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
5047

5148
FIX_EARLYCON_MEM_BASE,
5249
FIX_TEXT_POKE0,
@@ -95,12 +92,15 @@ enum fixed_addresses {
9592
__end_of_fixed_addresses
9693
};
9794

98-
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
99-
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
95+
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
96+
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
97+
#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
98+
#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE)
10099

101100
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
102101

103102
void __init early_fixmap_init(void);
103+
void __init fixmap_copy(pgd_t *pgdir);
104104

105105
#define __early_set_fixmap __set_fixmap
106106

arch/arm64/include/asm/kernel-pgtable.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,11 @@
5959
#define EARLY_KASLR (0)
6060
#endif
6161

62+
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
63+
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
64+
6265
#define EARLY_ENTRIES(vstart, vend, shift, add) \
63-
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
66+
(SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
6467

6568
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
6669

arch/arm64/include/asm/kfence.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
1919
return true;
2020
}
2121

22+
#ifdef CONFIG_KFENCE
23+
extern bool kfence_early_init;
24+
static inline bool arm64_kfence_can_set_direct_map(void)
25+
{
26+
return !kfence_early_init;
27+
}
28+
#else /* CONFIG_KFENCE */
29+
static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
30+
#endif /* CONFIG_KFENCE */
31+
2232
#endif /* __ASM_KFENCE_H */

arch/arm64/include/asm/mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,8 @@ extern void paging_init(void);
6565
extern void bootmem_init(void);
6666
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
6767
extern void init_mem_pgprot(void);
68+
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
69+
phys_addr_t size, pgprot_t prot);
6870
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
6971
unsigned long virt, phys_addr_t size,
7072
pgprot_t prot, bool page_mappings_only);

arch/arm64/include/asm/uaccess.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
449449
extern __must_check long strnlen_user(const char __user *str, long n);
450450

451451
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
452-
struct page;
453-
void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
454452
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
455453

456454
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)

arch/arm64/lib/uaccess_flushcache.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
1919
}
2020
EXPORT_SYMBOL_GPL(memcpy_flushcache);
2121

22-
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
23-
size_t len)
24-
{
25-
memcpy_flushcache(to, page_address(page) + offset, len);
26-
}
27-
2822
unsigned long __copy_user_flushcache(void *to, const void __user *from,
2923
unsigned long n)
3024
{

arch/arm64/mm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
obj-y := dma-mapping.o extable.o fault.o init.o \
33
cache.o copypage.o flush.o \
44
ioremap.o mmap.o pgd.o mmu.o \
5-
context.o proc.o pageattr.o
5+
context.o proc.o pageattr.o fixmap.o
66
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
77
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
88
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o

arch/arm64/mm/dma-mapping.c

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
3636
{
3737
unsigned long start = (unsigned long)page_address(page);
3838

39-
/*
40-
* The architecture only requires a clean to the PoC here in order to
41-
* meet the requirements of the DMA API. However, some vendors (i.e.
42-
* Qualcomm) abuse the DMA API for transferring buffers from the
43-
* non-secure to the secure world, resetting the system if a non-secure
44-
* access shows up after the buffer has been transferred:
45-
*
46-
* https://lore.kernel.org/r/[email protected]
47-
*
48-
* Using clean+invalidate appears to make this issue less likely, but
49-
* the drivers themselves still need fixing as the CPU could issue a
50-
* speculative read from the buffer via the linear mapping irrespective
51-
* of the cache maintenance we use. Once the drivers are fixed, we can
52-
* relax this to a clean operation.
53-
*/
54-
dcache_clean_inval_poc(start, start + size);
39+
dcache_clean_poc(start, start + size);
5540
}
5641

5742
#ifdef CONFIG_IOMMU_DMA

arch/arm64/mm/fixmap.c

Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Fixmap manipulation code
4+
*/
5+
6+
#include <linux/bug.h>
7+
#include <linux/init.h>
8+
#include <linux/kernel.h>
9+
#include <linux/libfdt.h>
10+
#include <linux/memory.h>
11+
#include <linux/mm.h>
12+
#include <linux/sizes.h>
13+
14+
#include <asm/fixmap.h>
15+
#include <asm/kernel-pgtable.h>
16+
#include <asm/pgalloc.h>
17+
#include <asm/tlbflush.h>
18+
19+
#define NR_BM_PTE_TABLES \
20+
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
21+
#define NR_BM_PMD_TABLES \
22+
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
23+
24+
static_assert(NR_BM_PMD_TABLES == 1);
25+
26+
#define __BM_TABLE_IDX(addr, shift) \
27+
(((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
28+
29+
#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
30+
31+
static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
32+
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
33+
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
34+
35+
static inline pte_t *fixmap_pte(unsigned long addr)
36+
{
37+
return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
38+
}
39+
40+
static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
41+
{
42+
pmd_t pmd = READ_ONCE(*pmdp);
43+
pte_t *ptep;
44+
45+
if (pmd_none(pmd)) {
46+
ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
47+
__pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
48+
}
49+
}
50+
51+
static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr,
52+
unsigned long end)
53+
{
54+
unsigned long next;
55+
pud_t pud = READ_ONCE(*pudp);
56+
pmd_t *pmdp;
57+
58+
if (pud_none(pud))
59+
__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
60+
61+
pmdp = pmd_offset_kimg(pudp, addr);
62+
do {
63+
next = pmd_addr_end(addr, end);
64+
early_fixmap_init_pte(pmdp, addr);
65+
} while (pmdp++, addr = next, addr != end);
66+
}
67+
68+
69+
static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr,
70+
unsigned long end)
71+
{
72+
p4d_t p4d = READ_ONCE(*p4dp);
73+
pud_t *pudp;
74+
75+
if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
76+
p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
77+
/*
78+
* We only end up here if the kernel mapping and the fixmap
79+
* share the top level pgd entry, which should only happen on
80+
* 16k/4 levels configurations.
81+
*/
82+
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
83+
}
84+
85+
if (p4d_none(p4d))
86+
__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
87+
88+
pudp = pud_offset_kimg(p4dp, addr);
89+
early_fixmap_init_pmd(pudp, addr, end);
90+
}
91+
92+
/*
93+
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
94+
* directly on kernel symbols (bm_p*d). This function is called too early to use
95+
* lm_alias so __p*d_populate functions must be used to populate with the
96+
* physical address from __pa_symbol.
97+
*/
98+
void __init early_fixmap_init(void)
99+
{
100+
unsigned long addr = FIXADDR_TOT_START;
101+
unsigned long end = FIXADDR_TOP;
102+
103+
pgd_t *pgdp = pgd_offset_k(addr);
104+
p4d_t *p4dp = p4d_offset(pgdp, addr);
105+
106+
early_fixmap_init_pud(p4dp, addr, end);
107+
}
108+
109+
/*
110+
* Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
111+
* ever need to use IPIs for TLB broadcasting, then we're in trouble here.
112+
*/
113+
void __set_fixmap(enum fixed_addresses idx,
114+
phys_addr_t phys, pgprot_t flags)
115+
{
116+
unsigned long addr = __fix_to_virt(idx);
117+
pte_t *ptep;
118+
119+
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
120+
121+
ptep = fixmap_pte(addr);
122+
123+
if (pgprot_val(flags)) {
124+
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
125+
} else {
126+
pte_clear(&init_mm, addr, ptep);
127+
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
128+
}
129+
}
130+
131+
void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
132+
{
133+
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
134+
phys_addr_t dt_phys_base;
135+
int offset;
136+
void *dt_virt;
137+
138+
/*
139+
* Check whether the physical FDT address is set and meets the minimum
140+
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
141+
* at least 8 bytes so that we can always access the magic and size
142+
* fields of the FDT header after mapping the first chunk, double check
143+
* here if that is indeed the case.
144+
*/
145+
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
146+
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
147+
return NULL;
148+
149+
dt_phys_base = round_down(dt_phys, PAGE_SIZE);
150+
offset = dt_phys % PAGE_SIZE;
151+
dt_virt = (void *)dt_virt_base + offset;
152+
153+
/* map the first chunk so we can read the size from the header */
154+
create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
155+
156+
if (fdt_magic(dt_virt) != FDT_MAGIC)
157+
return NULL;
158+
159+
*size = fdt_totalsize(dt_virt);
160+
if (*size > MAX_FDT_SIZE)
161+
return NULL;
162+
163+
if (offset + *size > PAGE_SIZE) {
164+
create_mapping_noalloc(dt_phys_base, dt_virt_base,
165+
offset + *size, prot);
166+
}
167+
168+
return dt_virt;
169+
}
170+
171+
/*
172+
* Copy the fixmap region into a new pgdir.
173+
*/
174+
void __init fixmap_copy(pgd_t *pgdir)
175+
{
176+
if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
177+
/*
178+
* The fixmap falls in a separate pgd to the kernel, and doesn't
179+
* live in the carveout for the swapper_pg_dir. We can simply
180+
* re-use the existing dir for the fixmap.
181+
*/
182+
set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
183+
READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
184+
} else if (CONFIG_PGTABLE_LEVELS > 3) {
185+
pgd_t *bm_pgdp;
186+
p4d_t *bm_p4dp;
187+
pud_t *bm_pudp;
188+
/*
189+
* The fixmap shares its top level pgd entry with the kernel
190+
* mapping. This can really only occur when we are running
191+
* with 16k/4 levels, so we can simply reuse the pud level
192+
* entry instead.
193+
*/
194+
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
195+
bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
196+
bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
197+
bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
198+
pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
199+
pud_clear_fixmap();
200+
} else {
201+
BUG();
202+
}
203+
}

0 commit comments

Comments
 (0)