Skip to content

Commit ef1e031

Browse files
Jiri Slabysuryasaimadhu
authored andcommitted
x86/asm: Make some functions local
There are a couple of assembly functions which are invoked only locally in the file they are defined. In C, they are marked "static". In assembly, annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch their ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE is used, depends on whether ENDPROC or END was used for a particular function before. Signed-off-by: Jiri Slaby <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Andy Shevchenko <[email protected]> Cc: Ard Biesheuvel <[email protected]> Cc: Boris Ostrovsky <[email protected]> Cc: Darren Hart <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Juergen Gross <[email protected]> Cc: [email protected] Cc: linux-efi <[email protected]> Cc: [email protected] Cc: Matt Fleming <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Cc: Stefano Stabellini <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: x86-ml <[email protected]> Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent b4edca1 commit ef1e031

File tree

7 files changed

+35
-34
lines changed

7 files changed

+35
-34
lines changed

arch/x86/boot/compressed/efi_thunk_64.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -99,20 +99,20 @@ ENTRY(efi64_thunk)
9999
ret
100100
ENDPROC(efi64_thunk)
101101

102-
ENTRY(efi_exit32)
102+
SYM_FUNC_START_LOCAL(efi_exit32)
103103
movq func_rt_ptr(%rip), %rax
104104
push %rax
105105
mov %rdi, %rax
106106
ret
107-
ENDPROC(efi_exit32)
107+
SYM_FUNC_END(efi_exit32)
108108

109109
.code32
110110
/*
111111
* EFI service pointer must be in %edi.
112112
*
113113
* The stack should represent the 32-bit calling convention.
114114
*/
115-
ENTRY(efi_enter32)
115+
SYM_FUNC_START_LOCAL(efi_enter32)
116116
movl $__KERNEL_DS, %eax
117117
movl %eax, %ds
118118
movl %eax, %es
@@ -172,7 +172,7 @@ ENTRY(efi_enter32)
172172
btsl $X86_CR0_PG_BIT, %eax
173173
movl %eax, %cr0
174174
lret
175-
ENDPROC(efi_enter32)
175+
SYM_FUNC_END(efi_enter32)
176176

177177
.data
178178
.balign 8

arch/x86/entry/entry_64.S

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
11011101
* existing activation in its critical region -- if so, we pop the current
11021102
* activation and restart the handler using the previous one.
11031103
*/
1104-
ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
1104+
/* do_hypervisor_callback(struct *pt_regs) */
1105+
SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
11051106

11061107
/*
11071108
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
11191120
call xen_maybe_preempt_hcall
11201121
#endif
11211122
jmp error_exit
1122-
END(xen_do_hypervisor_callback)
1123+
SYM_CODE_END(xen_do_hypervisor_callback)
11231124

11241125
/*
11251126
* Hypervisor uses this for application faults while it executes.
@@ -1214,7 +1215,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
12141215
* Use slow, but surefire "are we in kernel?" check.
12151216
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
12161217
*/
1217-
ENTRY(paranoid_entry)
1218+
SYM_CODE_START_LOCAL(paranoid_entry)
12181219
UNWIND_HINT_FUNC
12191220
cld
12201221
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry)
12481249
FENCE_SWAPGS_KERNEL_ENTRY
12491250

12501251
ret
1251-
END(paranoid_entry)
1252+
SYM_CODE_END(paranoid_entry)
12521253

12531254
/*
12541255
* "Paranoid" exit path from exception stack. This is invoked
@@ -1262,7 +1263,7 @@ END(paranoid_entry)
12621263
*
12631264
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
12641265
*/
1265-
ENTRY(paranoid_exit)
1266+
SYM_CODE_START_LOCAL(paranoid_exit)
12661267
UNWIND_HINT_REGS
12671268
DISABLE_INTERRUPTS(CLBR_ANY)
12681269
TRACE_IRQS_OFF_DEBUG
@@ -1279,12 +1280,12 @@ ENTRY(paranoid_exit)
12791280
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
12801281
.Lparanoid_exit_restore:
12811282
jmp restore_regs_and_return_to_kernel
1282-
END(paranoid_exit)
1283+
SYM_CODE_END(paranoid_exit)
12831284

12841285
/*
12851286
* Save all registers in pt_regs, and switch GS if needed.
12861287
*/
1287-
ENTRY(error_entry)
1288+
SYM_CODE_START_LOCAL(error_entry)
12881289
UNWIND_HINT_FUNC
12891290
cld
12901291
PUSH_AND_CLEAR_REGS save_ret=1
@@ -1364,16 +1365,16 @@ ENTRY(error_entry)
13641365
call fixup_bad_iret
13651366
mov %rax, %rsp
13661367
jmp .Lerror_entry_from_usermode_after_swapgs
1367-
END(error_entry)
1368+
SYM_CODE_END(error_entry)
13681369

1369-
ENTRY(error_exit)
1370+
SYM_CODE_START_LOCAL(error_exit)
13701371
UNWIND_HINT_REGS
13711372
DISABLE_INTERRUPTS(CLBR_ANY)
13721373
TRACE_IRQS_OFF
13731374
testb $3, CS(%rsp)
13741375
jz retint_kernel
13751376
jmp .Lretint_user
1376-
END(error_exit)
1377+
SYM_CODE_END(error_exit)
13771378

13781379
/*
13791380
* Runs on exception stack. Xen PV does not go through this path at all,

arch/x86/lib/copy_page_64.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ ENTRY(copy_page)
2121
ENDPROC(copy_page)
2222
EXPORT_SYMBOL(copy_page)
2323

24-
ENTRY(copy_page_regs)
24+
SYM_FUNC_START_LOCAL(copy_page_regs)
2525
subq $2*8, %rsp
2626
movq %rbx, (%rsp)
2727
movq %r12, 1*8(%rsp)
@@ -86,4 +86,4 @@ ENTRY(copy_page_regs)
8686
movq 1*8(%rsp), %r12
8787
addq $2*8, %rsp
8888
ret
89-
ENDPROC(copy_page_regs)
89+
SYM_FUNC_END(copy_page_regs)

arch/x86/lib/memcpy_64.S

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
* rax original destination
3030
*/
3131
SYM_FUNC_START_ALIAS(__memcpy)
32-
ENTRY(memcpy)
32+
SYM_FUNC_START_LOCAL(memcpy)
3333
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
3434
"jmp memcpy_erms", X86_FEATURE_ERMS
3535

@@ -41,7 +41,7 @@ ENTRY(memcpy)
4141
movl %edx, %ecx
4242
rep movsb
4343
ret
44-
ENDPROC(memcpy)
44+
SYM_FUNC_END(memcpy)
4545
SYM_FUNC_END_ALIAS(__memcpy)
4646
EXPORT_SYMBOL(memcpy)
4747
EXPORT_SYMBOL(__memcpy)
@@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy)
5050
* memcpy_erms() - enhanced fast string memcpy. This is faster and
5151
* simpler than memcpy. Use memcpy_erms when possible.
5252
*/
53-
ENTRY(memcpy_erms)
53+
SYM_FUNC_START_LOCAL(memcpy_erms)
5454
movq %rdi, %rax
5555
movq %rdx, %rcx
5656
rep movsb
5757
ret
58-
ENDPROC(memcpy_erms)
58+
SYM_FUNC_END(memcpy_erms)
5959

60-
ENTRY(memcpy_orig)
60+
SYM_FUNC_START_LOCAL(memcpy_orig)
6161
movq %rdi, %rax
6262

6363
cmpq $0x20, %rdx
@@ -182,7 +182,7 @@ ENTRY(memcpy_orig)
182182

183183
.Lend:
184184
retq
185-
ENDPROC(memcpy_orig)
185+
SYM_FUNC_END(memcpy_orig)
186186

187187
#ifndef CONFIG_UML
188188

arch/x86/lib/memset_64.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,16 +59,16 @@ EXPORT_SYMBOL(__memset)
5959
*
6060
* rax original destination
6161
*/
62-
ENTRY(memset_erms)
62+
SYM_FUNC_START_LOCAL(memset_erms)
6363
movq %rdi,%r9
6464
movb %sil,%al
6565
movq %rdx,%rcx
6666
rep stosb
6767
movq %r9,%rax
6868
ret
69-
ENDPROC(memset_erms)
69+
SYM_FUNC_END(memset_erms)
7070

71-
ENTRY(memset_orig)
71+
SYM_FUNC_START_LOCAL(memset_orig)
7272
movq %rdi,%r10
7373

7474
/* expand byte value */
@@ -139,4 +139,4 @@ ENTRY(memset_orig)
139139
subq %r8,%rdx
140140
jmp .Lafter_bad_alignment
141141
.Lfinal:
142-
ENDPROC(memset_orig)
142+
SYM_FUNC_END(memset_orig)

arch/x86/platform/efi/efi_thunk_64.S

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ ENDPROC(efi64_thunk)
6767
*
6868
* This function must be invoked with a 1:1 mapped stack.
6969
*/
70-
ENTRY(__efi64_thunk)
70+
SYM_FUNC_START_LOCAL(__efi64_thunk)
7171
movl %ds, %eax
7272
push %rax
7373
movl %es, %eax
@@ -114,22 +114,22 @@ ENTRY(__efi64_thunk)
114114
or %rcx, %rax
115115
1:
116116
ret
117-
ENDPROC(__efi64_thunk)
117+
SYM_FUNC_END(__efi64_thunk)
118118

119-
ENTRY(efi_exit32)
119+
SYM_FUNC_START_LOCAL(efi_exit32)
120120
movq func_rt_ptr(%rip), %rax
121121
push %rax
122122
mov %rdi, %rax
123123
ret
124-
ENDPROC(efi_exit32)
124+
SYM_FUNC_END(efi_exit32)
125125

126126
.code32
127127
/*
128128
* EFI service pointer must be in %edi.
129129
*
130130
* The stack should represent the 32-bit calling convention.
131131
*/
132-
ENTRY(efi_enter32)
132+
SYM_FUNC_START_LOCAL(efi_enter32)
133133
movl $__KERNEL_DS, %eax
134134
movl %eax, %ds
135135
movl %eax, %es
@@ -145,7 +145,7 @@ ENTRY(efi_enter32)
145145
pushl %eax
146146

147147
lret
148-
ENDPROC(efi_enter32)
148+
SYM_FUNC_END(efi_enter32)
149149

150150
.data
151151
.balign 8

arch/x86/platform/pvh/head.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
5151
#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
5252

53-
ENTRY(pvh_start_xen)
53+
SYM_CODE_START_LOCAL(pvh_start_xen)
5454
cld
5555

5656
lgdt (_pa(gdt))
@@ -146,7 +146,7 @@ ENTRY(pvh_start_xen)
146146

147147
ljmp $PVH_CS_SEL, $_pa(startup_32)
148148
#endif
149-
END(pvh_start_xen)
149+
SYM_CODE_END(pvh_start_xen)
150150

151151
.section ".init.data","aw"
152152
.balign 8

0 commit comments

Comments
 (0)