Skip to content

Commit 658e2c5

Browse files
AlexGhitipalmer-dabbelt
authored andcommitted
riscv: Introduce structure that group all variables regarding kernel mapping
We have a lot of variables that are used to hold kernel mapping addresses, offsets between physical and virtual mappings and some others used for XIP kernels: they are all defined at different places in mm/init.c, so group them into a single structure with, for some of them, more explicit and concise names. Signed-off-by: Alexandre Ghiti <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 01112e5 commit 658e2c5

File tree

8 files changed

+75
-93
lines changed

8 files changed

+75
-93
lines changed

arch/riscv/include/asm/page.h

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -79,46 +79,52 @@ typedef struct page *pgtable_t;
7979
#endif
8080

8181
#ifdef CONFIG_MMU
82-
extern unsigned long va_pa_offset;
83-
#ifdef CONFIG_64BIT
84-
extern unsigned long va_kernel_pa_offset;
85-
#endif
86-
extern unsigned long va_kernel_xip_pa_offset;
8782
extern unsigned long pfn_base;
88-
extern uintptr_t load_sz;
8983
#define ARCH_PFN_OFFSET (pfn_base)
9084
#else
91-
#define va_pa_offset 0
92-
#ifdef CONFIG_64BIT
93-
#define va_kernel_pa_offset 0
94-
#endif
95-
#define va_kernel_xip_pa_offset 0
9685
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
9786
#endif /* CONFIG_MMU */
9887

99-
extern unsigned long kernel_virt_addr;
88+
struct kernel_mapping {
89+
unsigned long virt_addr;
90+
uintptr_t phys_addr;
91+
uintptr_t size;
92+
/* Offset between linear mapping virtual address and kernel load address */
93+
unsigned long va_pa_offset;
94+
#ifdef CONFIG_64BIT
95+
/* Offset between kernel mapping virtual address and kernel load address */
96+
unsigned long va_kernel_pa_offset;
97+
#endif
98+
unsigned long va_kernel_xip_pa_offset;
99+
#ifdef CONFIG_XIP_KERNEL
100+
uintptr_t xiprom;
101+
uintptr_t xiprom_sz;
102+
#endif
103+
};
104+
105+
extern struct kernel_mapping kernel_map;
100106

101107
#ifdef CONFIG_64BIT
102108
#define is_kernel_mapping(x) \
103-
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
109+
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
104110
#define is_linear_mapping(x) \
105-
((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
111+
((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
106112

107-
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
113+
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
108114
#define kernel_mapping_pa_to_va(y) ({ \
109115
unsigned long _y = y; \
110116
(_y >= CONFIG_PHYS_RAM_BASE) ? \
111-
(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
112-
(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
117+
(void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) : \
118+
(void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset); \
113119
})
114120
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
115121

116-
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
122+
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
117123
#define kernel_mapping_va_to_pa(y) ({ \
118124
unsigned long _y = y; \
119-
(_y < kernel_virt_addr + XIP_OFFSET) ? \
120-
((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
121-
((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
125+
(_y < kernel_map.virt_addr + XIP_OFFSET) ? \
126+
((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
127+
((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
122128
})
123129

124130
#define __va_to_pa_nodebug(x) ({ \
@@ -128,12 +134,12 @@ extern unsigned long kernel_virt_addr;
128134
})
129135
#else
130136
#define is_kernel_mapping(x) \
131-
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
137+
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
132138
#define is_linear_mapping(x) \
133139
((x) >= PAGE_OFFSET)
134140

135-
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
136-
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
141+
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
142+
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
137143
#endif /* CONFIG_64BIT */
138144

139145
#ifdef CONFIG_DEBUG_VIRTUAL

arch/riscv/kernel/asm-offsets.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -311,4 +311,6 @@ void asm_offsets(void)
311311
* ensures the alignment is sane.
312312
*/
313313
DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
314+
315+
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
314316
}

arch/riscv/kernel/head.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,9 +81,9 @@ pe_head_start:
8181
#ifdef CONFIG_MMU
8282
relocate:
8383
/* Relocate return address */
84-
la a1, kernel_virt_addr
84+
la a1, kernel_map
8585
XIP_FIXUP_OFFSET a1
86-
REG_L a1, 0(a1)
86+
REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
8787
la a2, _start
8888
sub a1, a1, a2
8989
add ra, ra, a1

arch/riscv/kernel/kexec_relocate.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ SYM_CODE_START(riscv_kexec_relocate)
2020
* s4: Pointer to the destination address for the relocation
2121
* s5: (const) Number of words per page
2222
* s6: (const) 1, used for subtraction
23-
* s7: (const) va_pa_offset, used when switching MMU off
23+
* s7: (const) kernel_map.va_pa_offset, used when switching MMU off
2424
* s8: (const) Physical address of the main loop
2525
* s9: (debug) indirection page counter
2626
* s10: (debug) entry counter
@@ -159,7 +159,7 @@ SYM_CODE_START(riscv_kexec_norelocate)
159159
* s0: (const) Phys address to jump to
160160
* s1: (const) Phys address of the FDT image
161161
* s2: (const) The hartid of the current hart
162-
* s3: (const) va_pa_offset, used when switching MMU off
162+
* s3: (const) kernel_map.va_pa_offset, used when switching MMU off
163163
*/
164164
mv s0, a1
165165
mv s1, a2

arch/riscv/kernel/machine_kexec.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,6 @@ machine_kexec(struct kimage *image)
188188
/* Jump to the relocation code */
189189
pr_notice("Bye...\n");
190190
kexec_method(first_ind_entry, jump_addr, fdt_addr,
191-
this_hart_id, va_pa_offset);
191+
this_hart_id, kernel_map.va_pa_offset);
192192
unreachable();
193193
}

arch/riscv/mm/init.c

Lines changed: 36 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,13 @@
3030

3131
#include "../kernel/head.h"
3232

33-
unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
34-
EXPORT_SYMBOL(kernel_virt_addr);
33+
struct kernel_mapping kernel_map __ro_after_init;
34+
EXPORT_SYMBOL(kernel_map);
35+
#ifdef CONFIG_XIP_KERNEL
36+
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
37+
#endif
38+
3539
#ifdef CONFIG_XIP_KERNEL
36-
#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr)))
3740
extern char _xiprom[], _exiprom[];
3841
#endif
3942

@@ -211,25 +214,6 @@ static struct pt_alloc_ops _pt_ops __initdata;
211214
#define pt_ops _pt_ops
212215
#endif
213216

214-
/* Offset between linear mapping virtual address and kernel load address */
215-
unsigned long va_pa_offset __ro_after_init;
216-
EXPORT_SYMBOL(va_pa_offset);
217-
#ifdef CONFIG_XIP_KERNEL
218-
#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset)))
219-
#endif
220-
/* Offset between kernel mapping virtual address and kernel load address */
221-
#ifdef CONFIG_64BIT
222-
unsigned long va_kernel_pa_offset __ro_after_init;
223-
EXPORT_SYMBOL(va_kernel_pa_offset);
224-
#endif
225-
#ifdef CONFIG_XIP_KERNEL
226-
#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset)))
227-
#endif
228-
unsigned long va_kernel_xip_pa_offset __ro_after_init;
229-
EXPORT_SYMBOL(va_kernel_xip_pa_offset);
230-
#ifdef CONFIG_XIP_KERNEL
231-
#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset)))
232-
#endif
233217
unsigned long pfn_base __ro_after_init;
234218
EXPORT_SYMBOL(pfn_base);
235219

@@ -345,7 +329,7 @@ static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
345329

346330
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
347331
{
348-
BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
332+
BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
349333

350334
return (uintptr_t)early_pmd;
351335
}
@@ -510,36 +494,24 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
510494
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
511495
#endif
512496

513-
static uintptr_t load_pa __initdata;
514-
uintptr_t load_sz;
515-
#ifdef CONFIG_XIP_KERNEL
516-
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
517-
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
518-
#endif
519-
520497
#ifdef CONFIG_XIP_KERNEL
521-
static uintptr_t xiprom __initdata;
522-
static uintptr_t xiprom_sz __initdata;
523-
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
524-
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
525-
526498
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
527499
__always_unused bool early)
528500
{
529501
uintptr_t va, end_va;
530502

531503
/* Map the flash resident part */
532-
end_va = kernel_virt_addr + xiprom_sz;
533-
for (va = kernel_virt_addr; va < end_va; va += map_size)
504+
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
505+
for (va = kernel_map.virt_addr; va < end_va; va += map_size)
534506
create_pgd_mapping(pgdir, va,
535-
xiprom + (va - kernel_virt_addr),
507+
kernel_map.xiprom + (va - kernel_map.virt_addr),
536508
map_size, PAGE_KERNEL_EXEC);
537509

538510
/* Map the data in RAM */
539-
end_va = kernel_virt_addr + XIP_OFFSET + load_sz;
540-
for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size)
511+
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
512+
for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
541513
create_pgd_mapping(pgdir, va,
542-
load_pa + (va - (kernel_virt_addr + XIP_OFFSET)),
514+
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
543515
map_size, PAGE_KERNEL);
544516
}
545517
#else
@@ -548,10 +520,10 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
548520
{
549521
uintptr_t va, end_va;
550522

551-
end_va = kernel_virt_addr + load_sz;
552-
for (va = kernel_virt_addr; va < end_va; va += map_size)
523+
end_va = kernel_map.virt_addr + kernel_map.size;
524+
for (va = kernel_map.virt_addr; va < end_va; va += map_size)
553525
create_pgd_mapping(pgdir, va,
554-
load_pa + (va - kernel_virt_addr),
526+
kernel_map.phys_addr + (va - kernel_map.virt_addr),
555527
map_size,
556528
early ?
557529
PAGE_KERNEL_EXEC : pgprot_from_va(va));
@@ -566,25 +538,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
566538
pmd_t fix_bmap_spmd, fix_bmap_epmd;
567539
#endif
568540

541+
kernel_map.virt_addr = KERNEL_LINK_ADDR;
542+
569543
#ifdef CONFIG_XIP_KERNEL
570-
xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
571-
xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
544+
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
545+
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
572546

573-
load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE;
574-
load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
547+
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
548+
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
575549

576-
va_kernel_xip_pa_offset = kernel_virt_addr - xiprom;
550+
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
577551
#else
578-
load_pa = (uintptr_t)(&_start);
579-
load_sz = (uintptr_t)(&_end) - load_pa;
552+
kernel_map.phys_addr = (uintptr_t)(&_start);
553+
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
580554
#endif
581555

582-
va_pa_offset = PAGE_OFFSET - load_pa;
556+
kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
583557
#ifdef CONFIG_64BIT
584-
va_kernel_pa_offset = kernel_virt_addr - load_pa;
558+
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
585559
#endif
586560

587-
pfn_base = PFN_DOWN(load_pa);
561+
pfn_base = PFN_DOWN(kernel_map.phys_addr);
588562

589563
/*
590564
* Enforce boot alignment requirements of RV32 and
@@ -594,7 +568,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
594568

595569
/* Sanity check alignment and size */
596570
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
597-
BUG_ON((load_pa % map_size) != 0);
571+
BUG_ON((kernel_map.phys_addr % map_size) != 0);
598572

599573
pt_ops.alloc_pte = alloc_pte_early;
600574
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -611,19 +585,19 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
611585
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
612586
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
613587
/* Setup trampoline PGD and PMD */
614-
create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
588+
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
615589
(uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
616590
#ifdef CONFIG_XIP_KERNEL
617-
create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
618-
xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
591+
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
592+
kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
619593
#else
620-
create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
621-
load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
594+
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
595+
kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
622596
#endif
623597
#else
624598
/* Setup trampoline PGD */
625-
create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
626-
load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
599+
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
600+
kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
627601
#endif
628602

629603
/*

arch/riscv/mm/physaddr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys);
2323

2424
phys_addr_t __phys_addr_symbol(unsigned long x)
2525
{
26-
unsigned long kernel_start = (unsigned long)kernel_virt_addr;
26+
unsigned long kernel_start = kernel_map.virt_addr;
2727
unsigned long kernel_end = (unsigned long)_end;
2828

2929
/*

arch/riscv/mm/ptdump.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ static int __init ptdump_init(void)
379379
address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
380380
#ifdef CONFIG_64BIT
381381
address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
382-
address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr;
382+
address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
383383
#endif
384384

385385
kernel_ptd_info.base_addr = KERN_VIRT_START;

0 commit comments

Comments
 (0)