Skip to content

Commit 806dc82

Browse files
committed
Merge branch 'for-next/asm-cleanups' into for-next/core
* for-next/asm-cleanups: : Various asm clean-ups (alignment, mov_q vs ldr, .idmap) arm64: move kimage_vaddr to .rodata arm64: use mov_q instead of literal ldr
2 parents 0829a07 + 6cf9a2d commit 806dc82

File tree

6 files changed

+15
-17
lines changed

6 files changed

+15
-17
lines changed

arch/arm64/kernel/cpu-reset.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
ENTRY(__cpu_soft_restart)
3333
/* Clear sctlr_el1 flags. */
3434
mrs x12, sctlr_el1
35-
ldr x13, =SCTLR_ELx_FLAGS
35+
mov_q x13, SCTLR_ELx_FLAGS
3636
bic x12, x12, x13
3737
pre_disable_mmu_workaround
3838
msr sctlr_el1, x12

arch/arm64/kernel/head.S

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -457,17 +457,19 @@ SYM_FUNC_START_LOCAL(__primary_switched)
457457
b start_kernel
458458
SYM_FUNC_END(__primary_switched)
459459

460+
.pushsection ".rodata", "a"
461+
SYM_DATA_START(kimage_vaddr)
462+
.quad _text - TEXT_OFFSET
463+
SYM_DATA_END(kimage_vaddr)
464+
EXPORT_SYMBOL(kimage_vaddr)
465+
.popsection
466+
460467
/*
461468
* end early head section, begin head code that is also used for
462469
* hotplug and needs to have the same protections as the text region
463470
*/
464471
.section ".idmap.text","awx"
465472

466-
SYM_DATA_START(kimage_vaddr)
467-
.quad _text - TEXT_OFFSET
468-
SYM_DATA_END(kimage_vaddr)
469-
EXPORT_SYMBOL(kimage_vaddr)
470-
471473
/*
472474
* If we're fortunate enough to boot at EL2, ensure that the world is
473475
* sane before dropping to EL1.

arch/arm64/kernel/hyp-stub.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ el1_sync:
6363
beq 9f // Nothing to reset!
6464

6565
/* Someone called kvm_call_hyp() against the hyp-stub... */
66-
ldr x0, =HVC_STUB_ERR
66+
mov_q x0, HVC_STUB_ERR
6767
eret
6868

6969
9: mov x0, xzr

arch/arm64/kernel/relocate_kernel.S

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ ENTRY(arm64_relocate_new_kernel)
4141
cmp x0, #CurrentEL_EL2
4242
b.ne 1f
4343
mrs x0, sctlr_el2
44-
ldr x1, =SCTLR_ELx_FLAGS
44+
mov_q x1, SCTLR_ELx_FLAGS
4545
bic x0, x0, x1
4646
pre_disable_mmu_workaround
4747
msr sctlr_el2, x0
@@ -113,8 +113,6 @@ ENTRY(arm64_relocate_new_kernel)
113113

114114
ENDPROC(arm64_relocate_new_kernel)
115115

116-
.ltorg
117-
118116
.align 3 /* To keep the 64-bit values below naturally aligned. */
119117

120118
.Lcopy_end:

arch/arm64/kvm/hyp-init.S

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ alternative_else_nop_endif
6060
msr ttbr0_el2, x4
6161

6262
mrs x4, tcr_el1
63-
ldr x5, =TCR_EL2_MASK
63+
mov_q x5, TCR_EL2_MASK
6464
and x4, x4, x5
6565
mov x5, #TCR_EL2_RES1
6666
orr x4, x4, x5
@@ -102,7 +102,7 @@ alternative_else_nop_endif
102102
* as well as the EE bit on BE. Drop the A flag since the compiler
103103
* is allowed to generate unaligned accesses.
104104
*/
105-
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
105+
mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
106106
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
107107
msr sctlr_el2, x4
108108
isb
@@ -142,7 +142,7 @@ reset:
142142
* case we coming via HVC_SOFT_RESTART.
143143
*/
144144
mrs x5, sctlr_el2
145-
ldr x6, =SCTLR_ELx_FLAGS
145+
mov_q x6, SCTLR_ELx_FLAGS
146146
bic x5, x5, x6 // Clear SCTL_M and etc
147147
pre_disable_mmu_workaround
148148
msr sctlr_el2, x5
@@ -155,11 +155,9 @@ reset:
155155
eret
156156

157157
1: /* Bad stub call */
158-
ldr x0, =HVC_STUB_ERR
158+
mov_q x0, HVC_STUB_ERR
159159
eret
160160

161161
SYM_CODE_END(__kvm_handle_stub_hvc)
162162

163-
.ltorg
164-
165163
.popsection

arch/arm64/mm/proc.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,7 @@ SYM_FUNC_START(__cpu_setup)
411411
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
412412
* both user and kernel.
413413
*/
414-
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
414+
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
415415
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
416416
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
417417
tcr_clear_errata_bits x10, x9, x5

0 commit comments

Comments
 (0)