Skip to content

Commit 2ced0f3

Browse files
ardbiesheuvelctmarinas
authored andcommitted
arm64: head: Switch endianness before populating the ID map
Ensure that the endianness used for populating the ID map matches the endianness that the running kernel will be using, as this is no longer guaranteed now that create_idmap() is invoked before init_kernel_el(). Note that doing so is only safe if the MMU is off, as switching the endianness with the MMU on results in the active ID map to become invalid. So also clear the M bit when toggling the EE bit in SCTLR, and mark the MMU as disabled at boot. Note that the same issue has resulted in preserve_boot_args() recording the contents of registers X0 ... X3 in the wrong byte order, although this is arguably a very minor concern. Fixes: 32b135a ("arm64: head: avoid cache invalidation when entering with the MMU on") Reported-by: Nathan Chancellor <[email protected]> Signed-off-by: Ard Biesheuvel <[email protected]> Tested-by: Nathan Chancellor <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Catalin Marinas <[email protected]>
1 parent 6178617 commit 2ced0f3

File tree

2 files changed

+24
-2
lines changed

2 files changed

+24
-2
lines changed

arch/arm64/include/asm/sysreg.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -575,6 +575,7 @@
575575
#define SCTLR_ELx_DSSBS (BIT(44))
576576
#define SCTLR_ELx_ATA (BIT(43))
577577

578+
#define SCTLR_ELx_EE_SHIFT 25
578579
#define SCTLR_ELx_ENIA_SHIFT 31
579580

580581
#define SCTLR_ELx_ITFSB (BIT(37))
@@ -583,7 +584,7 @@
583584
#define SCTLR_ELx_LSMAOE (BIT(29))
584585
#define SCTLR_ELx_nTLSMD (BIT(28))
585586
#define SCTLR_ELx_ENDA (BIT(27))
586-
#define SCTLR_ELx_EE (BIT(25))
587+
#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT))
587588
#define SCTLR_ELx_EIS (BIT(22))
588589
#define SCTLR_ELx_IESB (BIT(21))
589590
#define SCTLR_ELx_TSCXT (BIT(20))

arch/arm64/kernel/head.S

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state)
129129
mrs x19, sctlr_el1
130130
b.ne 0f
131131
mrs x19, sctlr_el2
132-
0: tst x19, #SCTLR_ELx_C // Z := (C == 0)
132+
0:
133+
CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f )
134+
CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f )
135+
tst x19, #SCTLR_ELx_C // Z := (C == 0)
133136
and x19, x19, #SCTLR_ELx_M // isolate M bit
134137
csel x19, xzr, x19, eq // clear x19 if Z
135138
ret
139+
140+
/*
141+
* Set the correct endianness early so all memory accesses issued
142+
* before init_kernel_el() occur in the correct byte order. Note that
143+
* this means the MMU must be disabled, or the active ID map will end
144+
* up getting interpreted with the wrong byte order.
145+
*/
146+
1: eor x19, x19, #SCTLR_ELx_EE
147+
bic x19, x19, #SCTLR_ELx_M
148+
b.ne 2f
149+
pre_disable_mmu_workaround
150+
msr sctlr_el2, x19
151+
b 3f
152+
pre_disable_mmu_workaround
153+
2: msr sctlr_el1, x19
154+
3: isb
155+
mov x19, xzr
156+
ret
136157
SYM_CODE_END(record_mmu_state)
137158

138159
/*

0 commit comments

Comments
 (0)