Skip to content

Commit 78762b0

Browse files
Jiri Slabysuryasaimadhu
authored andcommitted
x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*
All these are functions which are invoked from elsewhere but they are not typical C functions. So annotate them using the new SYM_CODE_START. All these were not balanced with any END, so mark their ends by SYM_CODE_END, appropriately. Signed-off-by: Jiri Slaby <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> [xen bits] Reviewed-by: Rafael J. Wysocki <[email protected]> [hibernate] Cc: Andy Lutomirski <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Juergen Gross <[email protected]> Cc: Len Brown <[email protected]> Cc: [email protected] Cc: [email protected] Cc: Pavel Machek <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Pingfan Liu <[email protected]> Cc: Stefano Stabellini <[email protected]> Cc: "Steven Rostedt (VMware)" <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: x86-ml <[email protected]> Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent 6dcc562 commit 78762b0

File tree

7 files changed

+22
-13
lines changed

7 files changed

+22
-13
lines changed

arch/x86/entry/entry_32.S

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
847847
* Xen doesn't set %esp to be precisely what the normal SYSENTER
848848
* entry point expects, so fix it up before using the normal path.
849849
*/
850-
ENTRY(xen_sysenter_target)
850+
SYM_CODE_START(xen_sysenter_target)
851851
addl $5*4, %esp /* remove xen-provided frame */
852852
jmp .Lsysenter_past_esp
853+
SYM_CODE_END(xen_sysenter_target)
853854
#endif
854855

855856
/*

arch/x86/kernel/acpi/wakeup_32.S

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@
99
.code32
1010
ALIGN
1111

12-
ENTRY(wakeup_pmode_return)
13-
wakeup_pmode_return:
12+
SYM_CODE_START(wakeup_pmode_return)
1413
movw $__KERNEL_DS, %ax
1514
movw %ax, %ss
1615
movw %ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
3938
# jump to place where we left off
4039
movl saved_eip, %eax
4140
jmp *%eax
41+
SYM_CODE_END(wakeup_pmode_return)
4242

4343
bogus_magic:
4444
jmp bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
7272
popfl
7373
ret
7474

75-
ENTRY(do_suspend_lowlevel)
75+
SYM_CODE_START(do_suspend_lowlevel)
7676
call save_processor_state
7777
call save_registers
7878
pushl $3
@@ -87,6 +87,7 @@ ret_point:
8787
call restore_registers
8888
call restore_processor_state
8989
ret
90+
SYM_CODE_END(do_suspend_lowlevel)
9091

9192
.data
9293
ALIGN

arch/x86/kernel/ftrace_32.S

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
8989
ret
9090
END(ftrace_caller)
9191

92-
ENTRY(ftrace_regs_caller)
92+
SYM_CODE_START(ftrace_regs_caller)
9393
/*
9494
* We're here from an mcount/fentry CALL, and the stack frame looks like:
9595
*
@@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
163163
popl %eax
164164

165165
jmp .Lftrace_ret
166+
SYM_CODE_END(ftrace_regs_caller)
166167

167168
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
168169
ENTRY(ftrace_graph_caller)

arch/x86/kernel/head_32.S

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
6464
* can.
6565
*/
6666
__HEAD
67-
ENTRY(startup_32)
67+
SYM_CODE_START(startup_32)
6868
movl pa(initial_stack),%ecx
6969

7070
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
172172
#else
173173
jmp .Ldefault_entry
174174
#endif /* CONFIG_PARAVIRT */
175+
SYM_CODE_END(startup_32)
175176

176177
#ifdef CONFIG_HOTPLUG_CPU
177178
/*

arch/x86/power/hibernate_asm_32.S

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
3535
ret
3636
ENDPROC(swsusp_arch_suspend)
3737

38-
ENTRY(restore_image)
38+
SYM_CODE_START(restore_image)
3939
/* prepare to jump to the image kernel */
4040
movl restore_jump_address, %ebx
4141
movl restore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
4545
/* jump to relocated restore code */
4646
movl relocated_restore_code, %eax
4747
jmpl *%eax
48+
SYM_CODE_END(restore_image)
4849

4950
/* code below has been relocated to a safe page */
50-
ENTRY(core_restore_code)
51+
SYM_CODE_START(core_restore_code)
5152
movl temp_pgt, %eax
5253
movl %eax, %cr3
5354

@@ -77,6 +78,7 @@ copy_loop:
7778

7879
done:
7980
jmpl *%ebx
81+
SYM_CODE_END(core_restore_code)
8082

8183
/* code below belongs to the image kernel */
8284
.align PAGE_SIZE

arch/x86/realmode/rm/trampoline_32.S

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
.code16
3030

3131
.balign PAGE_SIZE
32-
ENTRY(trampoline_start)
32+
SYM_CODE_START(trampoline_start)
3333
wbinvd # Needed for NUMA-Q should be harmless for others
3434

3535
LJMPW_RM(1f)
@@ -54,11 +54,13 @@ ENTRY(trampoline_start)
5454
lmsw %dx # into protected mode
5555

5656
ljmpl $__BOOT_CS, $pa_startup_32
57+
SYM_CODE_END(trampoline_start)
5758

5859
.section ".text32","ax"
5960
.code32
60-
ENTRY(startup_32) # note: also used from wakeup_asm.S
61+
SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S
6162
jmp *%eax
63+
SYM_CODE_END(startup_32)
6264

6365
.bss
6466
.balign 8

arch/x86/xen/xen-asm_32.S

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
_ASM_EXTABLE(1b,2b)
5757
.endm
5858

59-
ENTRY(xen_iret)
59+
SYM_CODE_START(xen_iret)
6060
/* test eflags for special cases */
6161
testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
6262
jnz hyper_iret
@@ -122,6 +122,7 @@ xen_iret_end_crit:
122122
hyper_iret:
123123
/* put this out of line since its very rarely used */
124124
jmp hypercall_page + __HYPERVISOR_iret * 32
125+
SYM_CODE_END(xen_iret)
125126

126127
.globl xen_iret_start_crit, xen_iret_end_crit
127128

@@ -165,7 +166,7 @@ hyper_iret:
165166
* SAVE_ALL state before going on, since it's usermode state which we
166167
* eventually need to restore.
167168
*/
168-
ENTRY(xen_iret_crit_fixup)
169+
SYM_CODE_START(xen_iret_crit_fixup)
169170
/*
170171
* Paranoia: Make sure we're really coming from kernel space.
171172
* One could imagine a case where userspace jumps into the
@@ -204,4 +205,4 @@ ENTRY(xen_iret_crit_fixup)
204205

205206
lea 4(%edi), %esp /* point esp to new frame */
206207
2: jmp xen_do_upcall
207-
208+
SYM_CODE_END(xen_iret_crit_fixup)

0 commit comments

Comments
 (0)