Skip to content

Commit b18f729

Browse files
clementlegerpalmer-dabbelt
authored andcommitted
riscv: use ".L" local labels in assembly when applicable
For the sake of coherency, use local labels in assembly when applicable. This also avoid kprobes being confused when applying a kprobe since the size of function is computed by checking where the next visible symbol is located. This might end up in computing some function size to be way shorter than expected and thus failing to apply kprobes to the specified offset. Signed-off-by: Clément Léger <[email protected]> Reviewed-by: Andrew Jones <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 57a4542 commit b18f729

File tree

4 files changed

+44
-44
lines changed

4 files changed

+44
-44
lines changed

arch/riscv/kernel/entry.S

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception)
2626
* register will contain 0, and we should continue on the current TP.
2727
*/
2828
csrrw tp, CSR_SCRATCH, tp
29-
bnez tp, _save_context
29+
bnez tp, .Lsave_context
3030

31-
_restore_kernel_tpsp:
31+
.Lrestore_kernel_tpsp:
3232
csrr tp, CSR_SCRATCH
3333
REG_S sp, TASK_TI_KERNEL_SP(tp)
3434

@@ -40,7 +40,7 @@ _restore_kernel_tpsp:
4040
REG_L sp, TASK_TI_KERNEL_SP(tp)
4141
#endif
4242

43-
_save_context:
43+
.Lsave_context:
4444
REG_S sp, TASK_TI_USER_SP(tp)
4545
REG_L sp, TASK_TI_KERNEL_SP(tp)
4646
addi sp, sp, -(PT_SIZE_ON_STACK)

arch/riscv/kernel/head.S

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -164,12 +164,12 @@ secondary_start_sbi:
164164
XIP_FIXUP_OFFSET a0
165165
call relocate_enable_mmu
166166
#endif
167-
call setup_trap_vector
167+
call .Lsetup_trap_vector
168168
tail smp_callin
169169
#endif /* CONFIG_SMP */
170170

171171
.align 2
172-
setup_trap_vector:
172+
.Lsetup_trap_vector:
173173
/* Set trap vector to exception handler */
174174
la a0, handle_exception
175175
csrw CSR_TVEC, a0
@@ -206,15 +206,15 @@ ENTRY(_start_kernel)
206206
* not implement PMPs, so we set up a quick trap handler to just skip
207207
* touching the PMPs on any trap.
208208
*/
209-
la a0, pmp_done
209+
la a0, .Lpmp_done
210210
csrw CSR_TVEC, a0
211211

212212
li a0, -1
213213
csrw CSR_PMPADDR0, a0
214214
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
215215
csrw CSR_PMPCFG0, a0
216216
.align 2
217-
pmp_done:
217+
.Lpmp_done:
218218

219219
/*
220220
* The hartid in a0 is expected later on, and we have no firmware
@@ -275,12 +275,12 @@ pmp_done:
275275
/* Clear BSS for flat non-ELF images */
276276
la a3, __bss_start
277277
la a4, __bss_stop
278-
ble a4, a3, clear_bss_done
279-
clear_bss:
278+
ble a4, a3, .Lclear_bss_done
279+
.Lclear_bss:
280280
REG_S zero, (a3)
281281
add a3, a3, RISCV_SZPTR
282-
blt a3, a4, clear_bss
283-
clear_bss_done:
282+
blt a3, a4, .Lclear_bss
283+
.Lclear_bss_done:
284284
#endif
285285
la a2, boot_cpu_hartid
286286
XIP_FIXUP_OFFSET a2
@@ -305,7 +305,7 @@ clear_bss_done:
305305
call relocate_enable_mmu
306306
#endif /* CONFIG_MMU */
307307

308-
call setup_trap_vector
308+
call .Lsetup_trap_vector
309309
/* Restore C environment */
310310
la tp, init_task
311311
la sp, init_thread_union + THREAD_SIZE

arch/riscv/kernel/mcount.S

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -85,24 +85,24 @@ ENTRY(MCOUNT_NAME)
8585
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
8686
la t0, ftrace_graph_return
8787
REG_L t1, 0(t0)
88-
bne t1, t4, do_ftrace_graph_caller
88+
bne t1, t4, .Ldo_ftrace_graph_caller
8989

9090
la t3, ftrace_graph_entry
9191
REG_L t2, 0(t3)
9292
la t6, ftrace_graph_entry_stub
93-
bne t2, t6, do_ftrace_graph_caller
93+
bne t2, t6, .Ldo_ftrace_graph_caller
9494
#endif
9595
la t3, ftrace_trace_function
9696
REG_L t5, 0(t3)
97-
bne t5, t4, do_trace
97+
bne t5, t4, .Ldo_trace
9898
ret
9999

100100
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101101
/*
102102
* A pseudo representation for the function graph tracer:
103103
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
104104
*/
105-
do_ftrace_graph_caller:
105+
.Ldo_ftrace_graph_caller:
106106
addi a0, s0, -SZREG
107107
mv a1, ra
108108
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
118118
* A pseudo representation for the function tracer:
119119
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
120120
*/
121-
do_trace:
121+
.Ldo_trace:
122122
REG_L a1, -SZREG(s0)
123123
mv a0, ra
124124

arch/riscv/lib/memmove.S

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
2626
*/
2727

2828
/* Return if nothing to do */
29-
beq a0, a1, return_from_memmove
30-
beqz a2, return_from_memmove
29+
beq a0, a1, .Lreturn_from_memmove
30+
beqz a2, .Lreturn_from_memmove
3131

3232
/*
3333
* Register Uses
@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
6060
* small enough not to bother.
6161
*/
6262
andi t0, a2, -(2 * SZREG)
63-
beqz t0, byte_copy
63+
beqz t0, .Lbyte_copy
6464

6565
/*
6666
* Now solve for t5 and t6.
@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
8787
*/
8888
xor t0, a0, a1
8989
andi t1, t0, (SZREG - 1)
90-
beqz t1, coaligned_copy
90+
beqz t1, .Lcoaligned_copy
9191
/* Fall through to misaligned fixup copy */
9292

93-
misaligned_fixup_copy:
94-
bltu a1, a0, misaligned_fixup_copy_reverse
93+
.Lmisaligned_fixup_copy:
94+
bltu a1, a0, .Lmisaligned_fixup_copy_reverse
9595

96-
misaligned_fixup_copy_forward:
97-
jal t0, byte_copy_until_aligned_forward
96+
.Lmisaligned_fixup_copy_forward:
97+
jal t0, .Lbyte_copy_until_aligned_forward
9898

9999
andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
100100
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
153153
mv t3, t6 /* Fix the dest pointer in case the loop was broken */
154154

155155
add a1, t3, a5 /* Restore the src pointer */
156-
j byte_copy_forward /* Copy any remaining bytes */
156+
j .Lbyte_copy_forward /* Copy any remaining bytes */
157157

158-
misaligned_fixup_copy_reverse:
159-
jal t0, byte_copy_until_aligned_reverse
158+
.Lmisaligned_fixup_copy_reverse:
159+
jal t0, .Lbyte_copy_until_aligned_reverse
160160

161161
andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
162162
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
215215
mv t4, t5 /* Fix the dest pointer in case the loop was broken */
216216

217217
add a4, t4, a5 /* Restore the src pointer */
218-
j byte_copy_reverse /* Copy any remaining bytes */
218+
j .Lbyte_copy_reverse /* Copy any remaining bytes */
219219

220220
/*
221221
* Simple copy loops for SZREG co-aligned memory locations.
222222
* These also make calls to do byte copies for any unaligned
223223
* data at their terminations.
224224
*/
225-
coaligned_copy:
226-
bltu a1, a0, coaligned_copy_reverse
225+
.Lcoaligned_copy:
226+
bltu a1, a0, .Lcoaligned_copy_reverse
227227

228-
coaligned_copy_forward:
229-
jal t0, byte_copy_until_aligned_forward
228+
.Lcoaligned_copy_forward:
229+
jal t0, .Lbyte_copy_until_aligned_forward
230230

231231
1:
232232
REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +235,10 @@ coaligned_copy_forward:
235235
REG_S t1, (-1 * SZREG)(t3)
236236
bne t3, t6, 1b
237237

238-
j byte_copy_forward /* Copy any remaining bytes */
238+
j .Lbyte_copy_forward /* Copy any remaining bytes */
239239

240-
coaligned_copy_reverse:
241-
jal t0, byte_copy_until_aligned_reverse
240+
.Lcoaligned_copy_reverse:
241+
jal t0, .Lbyte_copy_until_aligned_reverse
242242

243243
1:
244244
REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +247,7 @@ coaligned_copy_reverse:
247247
REG_S t1, ( 0 * SZREG)(t4)
248248
bne t4, t5, 1b
249249

250-
j byte_copy_reverse /* Copy any remaining bytes */
250+
j .Lbyte_copy_reverse /* Copy any remaining bytes */
251251

252252
/*
253253
* These are basically sub-functions within the function. They
@@ -258,7 +258,7 @@ coaligned_copy_reverse:
258258
* up from where they were left and we avoid code duplication
259259
* without any overhead except the call in and return jumps.
260260
*/
261-
byte_copy_until_aligned_forward:
261+
.Lbyte_copy_until_aligned_forward:
262262
beq t3, t5, 2f
263263
1:
264264
lb t1, 0(a1)
@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
269269
2:
270270
jalr zero, 0x0(t0) /* Return to multibyte copy loop */
271271

272-
byte_copy_until_aligned_reverse:
272+
.Lbyte_copy_until_aligned_reverse:
273273
beq t4, t6, 2f
274274
1:
275275
lb t1, -1(a4)
@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
285285
* These will byte copy until they reach the end of data to copy.
286286
* At that point, they will call to return from memmove.
287287
*/
288-
byte_copy:
289-
bltu a1, a0, byte_copy_reverse
288+
.Lbyte_copy:
289+
bltu a1, a0, .Lbyte_copy_reverse
290290

291-
byte_copy_forward:
291+
.Lbyte_copy_forward:
292292
beq t3, t4, 2f
293293
1:
294294
lb t1, 0(a1)
@@ -299,7 +299,7 @@ byte_copy_forward:
299299
2:
300300
ret
301301

302-
byte_copy_reverse:
302+
.Lbyte_copy_reverse:
303303
beq t4, t3, 2f
304304
1:
305305
lb t1, -1(a4)
@@ -309,7 +309,7 @@ byte_copy_reverse:
309309
bne t4, t3, 1b
310310
2:
311311

312-
return_from_memmove:
312+
.Lreturn_from_memmove:
313313
ret
314314

315315
SYM_FUNC_END(memmove)

0 commit comments

Comments
 (0)