Skip to content

Commit d8a2c0f

Browse files
committed
Merge branch 'for-next/kexec' into for-next/core
* for-next/kexec: arm64: trans_pgd: remove trans_pgd_map_page() arm64: kexec: remove cpu-reset.h arm64: kexec: remove the pre-kexec PoC maintenance arm64: kexec: keep MMU enabled during kexec relocation arm64: kexec: install a copy of the linear-map arm64: kexec: use ld script for relocation function arm64: kexec: relocate in EL1 mode arm64: kexec: configure EL2 vectors for kexec arm64: kexec: pass kimage as the only argument to relocation function arm64: kexec: Use dcache ops macros instead of open-coding arm64: kexec: skip relocation code for inplace kexec arm64: kexec: flush image and lists during kexec load time arm64: hibernate: abstract ttrb0 setup function arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors arm64: kernel: add helper for booted at EL2 and not VHE
2 parents 99fe09c + 6091dd9 commit d8a2c0f

19 files changed

+357
-340
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1135,7 +1135,7 @@ config CRASH_DUMP
11351135

11361136
config TRANS_TABLE
11371137
def_bool y
1138-
depends on HIBERNATION
1138+
depends on HIBERNATION || KEXEC_CORE
11391139

11401140
config XEN_DOM0
11411141
def_bool y

arch/arm64/include/asm/assembler.h

Lines changed: 42 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -380,19 +380,19 @@ alternative_endif
380380

381381
/*
382382
* Macro to perform a data cache maintenance for the interval
383-
* [start, end)
383+
* [start, end) with dcache line size explicitly provided.
384384
*
385385
* op: operation passed to dc instruction
386386
* domain: domain used in dsb instruciton
387387
* start: starting virtual address of the region
388388
* end: end virtual address of the region
389+
* linesz: dcache line size
389390
* fixup: optional label to branch to on user fault
390-
* Corrupts: start, end, tmp1, tmp2
391+
* Corrupts: start, end, tmp
391392
*/
392-
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
393-
dcache_line_size \tmp1, \tmp2
394-
sub \tmp2, \tmp1, #1
395-
bic \start, \start, \tmp2
393+
.macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
394+
sub \tmp, \linesz, #1
395+
bic \start, \start, \tmp
396396
.Ldcache_op\@:
397397
.ifc \op, cvau
398398
__dcache_op_workaround_clean_cache \op, \start
@@ -411,14 +411,30 @@ alternative_endif
411411
.endif
412412
.endif
413413
.endif
414-
add \start, \start, \tmp1
414+
add \start, \start, \linesz
415415
cmp \start, \end
416416
b.lo .Ldcache_op\@
417417
dsb \domain
418418

419419
_cond_extable .Ldcache_op\@, \fixup
420420
.endm
421421

422+
/*
423+
* Macro to perform a data cache maintenance for the interval
424+
* [start, end)
425+
*
426+
* op: operation passed to dc instruction
427+
* domain: domain used in dsb instruciton
428+
* start: starting virtual address of the region
429+
* end: end virtual address of the region
430+
* fixup: optional label to branch to on user fault
431+
* Corrupts: start, end, tmp1, tmp2
432+
*/
433+
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
434+
dcache_line_size \tmp1, \tmp2
435+
dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
436+
.endm
437+
422438
/*
423439
* Macro to perform an instruction cache maintenance for the interval
424440
* [start, end)
@@ -442,6 +458,25 @@ alternative_endif
442458
_cond_extable .Licache_op\@, \fixup
443459
.endm
444460

461+
/*
462+
* To prevent the possibility of old and new partial table walks being visible
463+
* in the tlb, switch the ttbr to a zero page when we invalidate the old
464+
* records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
465+
* Even switching to our copied tables will cause a changed output address at
466+
* each stage of the walk.
467+
*/
468+
.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
469+
phys_to_ttbr \tmp, \zero_page
470+
msr ttbr1_el1, \tmp
471+
isb
472+
tlbi vmalle1
473+
dsb nsh
474+
phys_to_ttbr \tmp, \page_table
475+
offset_ttbr1 \tmp, \tmp2
476+
msr ttbr1_el1, \tmp
477+
isb
478+
.endm
479+
445480
/*
446481
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
447482
*/

arch/arm64/include/asm/kexec.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,12 +90,24 @@ static inline void crash_prepare_suspend(void) {}
9090
static inline void crash_post_resume(void) {}
9191
#endif
9292

93+
#if defined(CONFIG_KEXEC_CORE)
94+
void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
95+
unsigned long arg0, unsigned long arg1,
96+
unsigned long arg2);
97+
#endif
98+
9399
#define ARCH_HAS_KIMAGE_ARCH
94100

95101
struct kimage_arch {
96102
void *dtb;
97103
phys_addr_t dtb_mem;
98104
phys_addr_t kern_reloc;
105+
phys_addr_t el2_vectors;
106+
phys_addr_t ttbr0;
107+
phys_addr_t ttbr1;
108+
phys_addr_t zero_page;
109+
unsigned long phys_offset;
110+
unsigned long t0sz;
99111
};
100112

101113
#ifdef CONFIG_KEXEC_FILE

arch/arm64/include/asm/mmu_context.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,30 @@ static inline void cpu_install_idmap(void)
115115
cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
116116
}
117117

118+
/*
119+
* Load our new page tables. A strict BBM approach requires that we ensure that
120+
* TLBs are free of any entries that may overlap with the global mappings we are
121+
* about to install.
122+
*
123+
* For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
124+
* page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
125+
* services), while for a userspace-driven test_resume cycle it points to
126+
* userspace page tables (and we must point it at a zero page ourselves).
127+
*
128+
* We change T0SZ as part of installing the idmap. This is undone by
129+
* cpu_uninstall_idmap() in __cpu_suspend_exit().
130+
*/
131+
static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
132+
{
133+
cpu_set_reserved_ttbr0();
134+
local_flush_tlb_all();
135+
__cpu_set_tcr_t0sz(t0sz);
136+
137+
/* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
138+
write_sysreg(ttbr0, ttbr0_el1);
139+
isb();
140+
}
141+
118142
/*
119143
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
120144
* avoiding the possibility of conflicting TLB entries being allocated.

arch/arm64/include/asm/sections.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
2121
extern char __irqentry_text_start[], __irqentry_text_end[];
2222
extern char __mmuoff_data_start[], __mmuoff_data_end[];
2323
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
24+
extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
2425

2526
#endif /* __ASM_SECTIONS_H */

arch/arm64/include/asm/trans_pgd.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
/* SPDX-License-Identifier: GPL-2.0 */
22

33
/*
4-
* Copyright (c) 2020, Microsoft Corporation.
5-
* Pavel Tatashin <[email protected]>
4+
* Copyright (c) 2021, Microsoft Corporation.
5+
* Pasha Tatashin <[email protected]>
66
*/
77

88
#ifndef _ASM_TRANS_TABLE_H
@@ -15,7 +15,7 @@
1515
/*
1616
* trans_alloc_page
1717
* - Allocator that should return exactly one zeroed page, if this
18-
* allocator fails, trans_pgd_create_copy() and trans_pgd_map_page()
18+
* allocator fails, trans_pgd_create_copy() and trans_pgd_idmap_page()
1919
* return -ENOMEM error.
2020
*
2121
* trans_alloc_arg
@@ -30,10 +30,12 @@ struct trans_pgd_info {
3030
int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **trans_pgd,
3131
unsigned long start, unsigned long end);
3232

33-
int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
34-
void *page, unsigned long dst_addr, pgprot_t pgprot);
35-
3633
int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
3734
unsigned long *t0sz, void *page);
3835

36+
int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info,
37+
phys_addr_t *el2_vectors);
38+
39+
extern char trans_pgd_stub_vectors[];
40+
3941
#endif /* _ASM_TRANS_TABLE_H */

arch/arm64/include/asm/virt.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@
6767
*/
6868
extern u32 __boot_cpu_mode[2];
6969

70+
#define ARM64_VECTOR_TABLE_LEN SZ_2K
71+
7072
void __hyp_set_vectors(phys_addr_t phys_vector_base);
7173
void __hyp_reset_vectors(void);
7274

@@ -128,6 +130,11 @@ static __always_inline bool is_protected_kvm_enabled(void)
128130
return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
129131
}
130132

133+
static inline bool is_hyp_nvhe(void)
134+
{
135+
return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
136+
}
137+
131138
#endif /* __ASSEMBLY__ */
132139

133140
#endif /* ! __ASM__VIRT_H */

arch/arm64/kernel/asm-offsets.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
#include <linux/arm_sdei.h>
1111
#include <linux/sched.h>
12+
#include <linux/kexec.h>
1213
#include <linux/mm.h>
1314
#include <linux/dma-mapping.h>
1415
#include <linux/kvm_host.h>
@@ -170,6 +171,16 @@ int main(void)
170171
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
171172
#endif
172173
BLANK();
174+
#endif
175+
#ifdef CONFIG_KEXEC_CORE
176+
DEFINE(KIMAGE_ARCH_DTB_MEM, offsetof(struct kimage, arch.dtb_mem));
177+
DEFINE(KIMAGE_ARCH_EL2_VECTORS, offsetof(struct kimage, arch.el2_vectors));
178+
DEFINE(KIMAGE_ARCH_ZERO_PAGE, offsetof(struct kimage, arch.zero_page));
179+
DEFINE(KIMAGE_ARCH_PHYS_OFFSET, offsetof(struct kimage, arch.phys_offset));
180+
DEFINE(KIMAGE_ARCH_TTBR1, offsetof(struct kimage, arch.ttbr1));
181+
DEFINE(KIMAGE_HEAD, offsetof(struct kimage, head));
182+
DEFINE(KIMAGE_START, offsetof(struct kimage, start));
183+
BLANK();
173184
#endif
174185
return 0;
175186
}

arch/arm64/kernel/cpu-reset.S

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,7 @@
1616
.pushsection .idmap.text, "awx"
1717

1818
/*
19-
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
20-
* cpu_soft_restart.
19+
* cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2)
2120
*
2221
* @el2_switch: Flag to indicate a switch to EL2 is needed.
2322
* @entry: Location to jump to for soft reset.
@@ -29,7 +28,7 @@
2928
* branch to what would be the reset vector. It must be executed with the
3029
* flat identity mapping.
3130
*/
32-
SYM_CODE_START(__cpu_soft_restart)
31+
SYM_CODE_START(cpu_soft_restart)
3332
mov_q x12, INIT_SCTLR_EL1_MMU_OFF
3433
pre_disable_mmu_workaround
3534
/*
@@ -48,6 +47,6 @@ SYM_CODE_START(__cpu_soft_restart)
4847
mov x1, x3 // arg1
4948
mov x2, x4 // arg2
5049
br x8
51-
SYM_CODE_END(__cpu_soft_restart)
50+
SYM_CODE_END(cpu_soft_restart)
5251

5352
.popsection

arch/arm64/kernel/cpu-reset.h

Lines changed: 0 additions & 32 deletions
This file was deleted.

0 commit comments

Comments
 (0)