Skip to content

Commit 76329c6

Browse files
clementlegerpalmer-dabbelt
authored andcommitted
riscv: Use SYM_*() assembly macros instead of deprecated ones
ENTRY()/END()/WEAK() macros are deprecated and we should make use of the new SYM_*() macros [1] for better annotation of symbols. Replace the deprecated ones with the new ones and fix wrong usage of END()/ENDPROC() to correctly describe the symbols. [1] https://docs.kernel.org/core-api/asm-annotations.html Signed-off-by: Clément Léger <[email protected]> Reviewed-by: Andrew Jones <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent b18f729 commit 76329c6

File tree

17 files changed

+60
-74
lines changed

17 files changed

+60
-74
lines changed

arch/riscv/kernel/copy-unaligned.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
1010
/* Performs a memcpy without aligning buffers, using word loads and stores. */
1111
/* Note: The size is truncated to a multiple of 8 * SZREG */
12-
ENTRY(__riscv_copy_words_unaligned)
12+
SYM_FUNC_START(__riscv_copy_words_unaligned)
1313
andi a4, a2, ~((8*SZREG)-1)
1414
beqz a4, 2f
1515
add a3, a1, a4
@@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
3636

3737
2:
3838
ret
39-
END(__riscv_copy_words_unaligned)
39+
SYM_FUNC_END(__riscv_copy_words_unaligned)
4040

4141
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
4242
/* Performs a memcpy without aligning buffers, using only byte accesses. */
4343
/* Note: The size is truncated to a multiple of 8 */
44-
ENTRY(__riscv_copy_bytes_unaligned)
44+
SYM_FUNC_START(__riscv_copy_bytes_unaligned)
4545
andi a4, a2, ~(8-1)
4646
beqz a4, 2f
4747
add a3, a1, a4
@@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
6868

6969
2:
7070
ret
71-
END(__riscv_copy_bytes_unaligned)
71+
SYM_FUNC_END(__riscv_copy_bytes_unaligned)

arch/riscv/kernel/fpu.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
#include <asm/csr.h>
2020
#include <asm/asm-offsets.h>
2121

22-
ENTRY(__fstate_save)
22+
SYM_FUNC_START(__fstate_save)
2323
li a2, TASK_THREAD_F0
2424
add a0, a0, a2
2525
li t1, SR_FS
@@ -60,9 +60,9 @@ ENTRY(__fstate_save)
6060
sw t0, TASK_THREAD_FCSR_F0(a0)
6161
csrc CSR_STATUS, t1
6262
ret
63-
ENDPROC(__fstate_save)
63+
SYM_FUNC_END(__fstate_save)
6464

65-
ENTRY(__fstate_restore)
65+
SYM_FUNC_START(__fstate_restore)
6666
li a2, TASK_THREAD_F0
6767
add a0, a0, a2
6868
li t1, SR_FS
@@ -103,7 +103,7 @@ ENTRY(__fstate_restore)
103103
fscsr t0
104104
csrc CSR_STATUS, t1
105105
ret
106-
ENDPROC(__fstate_restore)
106+
SYM_FUNC_END(__fstate_restore)
107107

108108
#define get_f32(which) fmv.x.s a0, which; j 2f
109109
#define put_f32(which) fmv.s.x which, a1; j 2f

arch/riscv/kernel/head.S

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
#include "efi-header.S"
2020

2121
__HEAD
22-
ENTRY(_start)
22+
SYM_CODE_START(_start)
2323
/*
2424
* Image header expected by Linux boot-loaders. The image header data
2525
* structure is described in asm/image.h.
@@ -187,9 +187,9 @@ secondary_start_sbi:
187187
wfi
188188
j .Lsecondary_park
189189

190-
END(_start)
190+
SYM_CODE_END(_start)
191191

192-
ENTRY(_start_kernel)
192+
SYM_CODE_START(_start_kernel)
193193
/* Mask all interrupts */
194194
csrw CSR_IE, zero
195195
csrw CSR_IP, zero
@@ -348,10 +348,10 @@ ENTRY(_start_kernel)
348348
tail .Lsecondary_start_common
349349
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
350350

351-
END(_start_kernel)
351+
SYM_CODE_END(_start_kernel)
352352

353353
#ifdef CONFIG_RISCV_M_MODE
354-
ENTRY(reset_regs)
354+
SYM_CODE_START_LOCAL(reset_regs)
355355
li sp, 0
356356
li gp, 0
357357
li tp, 0
@@ -449,5 +449,5 @@ ENTRY(reset_regs)
449449
.Lreset_regs_done_vector:
450450
#endif /* CONFIG_RISCV_ISA_V */
451451
ret
452-
END(reset_regs)
452+
SYM_CODE_END(reset_regs)
453453
#endif /* CONFIG_RISCV_M_MODE */

arch/riscv/kernel/hibernate-asm.S

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
*
2222
* Always returns 0
2323
*/
24-
ENTRY(__hibernate_cpu_resume)
24+
SYM_FUNC_START(__hibernate_cpu_resume)
2525
/* switch to hibernated image's page table. */
2626
csrw CSR_SATP, s0
2727
sfence.vma
@@ -34,31 +34,31 @@ ENTRY(__hibernate_cpu_resume)
3434
mv a0, zero
3535

3636
ret
37-
END(__hibernate_cpu_resume)
37+
SYM_FUNC_END(__hibernate_cpu_resume)
3838

3939
/*
4040
* Prepare to restore the image.
4141
* a0: satp of saved page tables.
4242
* a1: satp of temporary page tables.
4343
* a2: cpu_resume.
4444
*/
45-
ENTRY(hibernate_restore_image)
45+
SYM_FUNC_START(hibernate_restore_image)
4646
mv s0, a0
4747
mv s1, a1
4848
mv s2, a2
4949
REG_L s4, restore_pblist
5050
REG_L a1, relocated_restore_code
5151

5252
jr a1
53-
END(hibernate_restore_image)
53+
SYM_FUNC_END(hibernate_restore_image)
5454

5555
/*
5656
* The below code will be executed from a 'safe' page.
5757
* It first switches to the temporary page table, then starts to copy the pages
5858
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
5959
* to restore the CPU context.
6060
*/
61-
ENTRY(hibernate_core_restore_code)
61+
SYM_FUNC_START(hibernate_core_restore_code)
6262
/* switch to temp page table. */
6363
csrw satp, s1
6464
sfence.vma
@@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
7373
bnez s4, .Lcopy
7474

7575
jr s2
76-
END(hibernate_core_restore_code)
76+
SYM_FUNC_END(hibernate_core_restore_code)

arch/riscv/kernel/mcount-dyn.S

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@
8282
.endm
8383
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
8484

85-
ENTRY(ftrace_caller)
85+
SYM_FUNC_START(ftrace_caller)
8686
SAVE_ABI
8787

8888
addi a0, t0, -FENTRY_RA_OFFSET
@@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
9191
mv a1, ra
9292
mv a3, sp
9393

94-
ftrace_call:
95-
.global ftrace_call
94+
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
9695
call ftrace_stub
9796

9897
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,16 +101,15 @@ ftrace_call:
102101
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
103102
mv a2, s0
104103
#endif
105-
ftrace_graph_call:
106-
.global ftrace_graph_call
104+
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
107105
call ftrace_stub
108106
#endif
109107
RESTORE_ABI
110108
jr t0
111-
ENDPROC(ftrace_caller)
109+
SYM_FUNC_END(ftrace_caller)
112110

113111
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
114-
ENTRY(ftrace_regs_caller)
112+
SYM_FUNC_START(ftrace_regs_caller)
115113
SAVE_ALL
116114

117115
addi a0, t0, -FENTRY_RA_OFFSET
@@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
120118
mv a1, ra
121119
mv a3, sp
122120

123-
ftrace_regs_call:
124-
.global ftrace_regs_call
121+
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
125122
call ftrace_stub
126123

127124
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,12 +128,11 @@ ftrace_regs_call:
131128
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
132129
mv a2, s0
133130
#endif
134-
ftrace_graph_regs_call:
135-
.global ftrace_graph_regs_call
131+
SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
136132
call ftrace_stub
137133
#endif
138134

139135
RESTORE_ALL
140136
jr t0
141-
ENDPROC(ftrace_regs_caller)
137+
SYM_FUNC_END(ftrace_regs_caller)
142138
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */

arch/riscv/kernel/mcount.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
6161
ret
6262
SYM_FUNC_END(ftrace_stub_graph)
6363

64-
ENTRY(return_to_handler)
64+
SYM_FUNC_START(return_to_handler)
6565
/*
6666
* On implementing the frame point test, the ideal way is to compare the
6767
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
@@ -76,11 +76,11 @@ ENTRY(return_to_handler)
7676
mv a2, a0
7777
RESTORE_RET_ABI_STATE
7878
jalr a2
79-
ENDPROC(return_to_handler)
79+
SYM_FUNC_END(return_to_handler)
8080
#endif
8181

8282
#ifndef CONFIG_DYNAMIC_FTRACE
83-
ENTRY(MCOUNT_NAME)
83+
SYM_FUNC_START(MCOUNT_NAME)
8484
la t4, ftrace_stub
8585
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
8686
la t0, ftrace_graph_return
@@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME)
126126
jalr t5
127127
RESTORE_ABI_STATE
128128
ret
129-
ENDPROC(MCOUNT_NAME)
129+
SYM_FUNC_END(MCOUNT_NAME)
130130
#endif
131131
EXPORT_SYMBOL(MCOUNT_NAME)

arch/riscv/kernel/probes/rethook_trampoline.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@
7575
REG_L x31, PT_T6(sp)
7676
.endm
7777

78-
ENTRY(arch_rethook_trampoline)
78+
SYM_CODE_START(arch_rethook_trampoline)
7979
addi sp, sp, -(PT_SIZE_ON_STACK)
8080
save_all_base_regs
8181

@@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
9090
addi sp, sp, PT_SIZE_ON_STACK
9191

9292
ret
93-
ENDPROC(arch_rethook_trampoline)
93+
SYM_CODE_END(arch_rethook_trampoline)

arch/riscv/kernel/suspend_entry.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
.altmacro
1717
.option norelax
1818

19-
ENTRY(__cpu_suspend_enter)
19+
SYM_FUNC_START(__cpu_suspend_enter)
2020
/* Save registers (except A0 and T0-T6) */
2121
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
2222
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
@@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter)
5757

5858
/* Return to C code */
5959
ret
60-
END(__cpu_suspend_enter)
60+
SYM_FUNC_END(__cpu_suspend_enter)
6161

6262
SYM_TYPED_FUNC_START(__cpu_resume_enter)
6363
/* Load the global pointer */

arch/riscv/kernel/vdso/flush_icache.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
.text
1010
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
11-
ENTRY(__vdso_flush_icache)
11+
SYM_FUNC_START(__vdso_flush_icache)
1212
.cfi_startproc
1313
#ifdef CONFIG_SMP
1414
li a7, __NR_riscv_flush_icache
@@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
1919
#endif
2020
ret
2121
.cfi_endproc
22-
ENDPROC(__vdso_flush_icache)
22+
SYM_FUNC_END(__vdso_flush_icache)

arch/riscv/kernel/vdso/getcpu.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@
88

99
.text
1010
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
11-
ENTRY(__vdso_getcpu)
11+
SYM_FUNC_START(__vdso_getcpu)
1212
.cfi_startproc
1313
/* For now, just do the syscall. */
1414
li a7, __NR_getcpu
1515
ecall
1616
ret
1717
.cfi_endproc
18-
ENDPROC(__vdso_getcpu)
18+
SYM_FUNC_END(__vdso_getcpu)

0 commit comments

Comments
 (0)