@@ -98,7 +98,7 @@ SECTION_FUNC(TEXT, z_arm_pendsv)
98
98
#ifdef CONFIG_FPU_SHARING
99
99
/ * Assess whether switched - out thread had been using the FP registers. * /
100
100
tst lr , #_EXC_RETURN_FTYPE_Msk
101
- bne out_fp_endif
101
+ bne .L_out_fp_endif
102
102
103
103
/ * FP context active: set FP state and store callee - saved registers.
104
104
* Note: if Lazy FP stacking is enabled , storing the callee - saved
@@ -108,7 +108,7 @@ SECTION_FUNC(TEXT, z_arm_pendsv)
108
108
add r0 , r2 , #_thread_offset_to_preempt_ float
109
109
vstmia r0 , {s16 - s31}
110
110
111
- out_fp_endif :
111
+ .L_out_fp_endif :
112
112
/ * At this point FPCCR.LSPACT is guaranteed to be cleared ,
113
113
* regardless of whether the thread has an active FP context.
114
114
* /
@@ -204,9 +204,9 @@ out_fp_endif:
204
204
* were enabled before irq_lock was called.
205
205
* /
206
206
cmp r0 , # 0
207
- bne _thread_irq_disabled
207
+ bne .L_thread_irq_disabled
208
208
cpsie i
209
- _thread_irq_disabled :
209
+ .L_thread_irq_disabled :
210
210
211
211
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
212
212
/ * Re - program dynamic memory map * /
@@ -259,25 +259,25 @@ _thread_irq_disabled:
259
259
#ifdef CONFIG_FPU_SHARING
260
260
/ * Assess whether switched - in thread had been using the FP registers. * /
261
261
tst lr , #_EXC_RETURN_FTYPE_Msk
262
- beq in_fp_active
262
+ beq .L_in_fp_active
263
263
/ * FP context inactive for swapped - in thread:
264
264
* - reset FPSCR to 0
265
265
* - set EXC_RETURN.F_Type (prevents FP frame un - stacking when returning
266
266
* from pendSV)
267
267
* /
268
268
movs .n r3 , # 0
269
269
vmsr fpscr , r3
270
- b in_fp_endif
270
+ b .L_in_fp_endif
271
271
272
- in_fp_active :
272
+ .L_in_fp_active :
273
273
/ * FP context active:
274
274
* - clear EXC_RETURN.F_Type
275
275
* - FPSCR and caller - saved registers will be restored automatically
276
276
* - restore callee - saved FP registers
277
277
* /
278
278
add r0 , r2 , #_thread_offset_to_preempt_ float
279
279
vldmia r0 , {s16 - s31}
280
- in_fp_endif :
280
+ .L_in_fp_endif :
281
281
/ * Clear CONTROL.FPCA th at may have been set by FP instructions * /
282
282
mrs r3 , CONTROL
283
283
bic r3 , #_CONTROL_FPCA_Msk
@@ -361,12 +361,12 @@ SECTION_FUNC(TEXT, z_arm_svc)
361
361
movs r0 , #_EXC_RETURN_SPSEL_Msk
362
362
mov r1 , lr
363
363
tst r1 , r0
364
- beq _stack_frame_msp
364
+ beq .L_stack_frame_msp
365
365
mrs r0 , PSP
366
- bne _stack_frame_endif
367
- _stack_frame_msp :
366
+ bne .L_stack_frame_endif
367
+ .L_stack_frame_msp :
368
368
mrs r0 , MSP
369
- _stack_frame_endif :
369
+ .L_stack_frame_endif :
370
370
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
371
371
tst lr , #_EXC_RETURN_SPSEL_Msk / * did we come from thread mode ? * /
372
372
ite eq / * if zero (equal) , came from handler mode * /
@@ -399,7 +399,7 @@ _stack_frame_endif:
399
399
mrs r2 , CONTROL
400
400
401
401
cmp r1 , # 3
402
- beq _do_syscall
402
+ beq .L_do_syscall
403
403
404
404
/ *
405
405
* check th at we are privileged before invoking other SVCs
@@ -411,12 +411,12 @@ _stack_frame_endif:
411
411
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
412
412
tst r2 , # 0x1
413
413
#endif
414
- bne _oops
414
+ bne .L_oops
415
415
416
416
#endif / * CONFIG_USERSPACE * /
417
417
418
418
cmp r1 , # 2
419
- beq _oops
419
+ beq .L_oops
420
420
421
421
#if defined(CONFIG_IRQ_OFFLOAD)
422
422
push {r0 , lr}
@@ -434,7 +434,7 @@ _stack_frame_endif:
434
434
435
435
#endif
436
436
437
- _oops :
437
+ .L_oops :
438
438
push {r0 , lr}
439
439
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
440
440
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
@@ -484,7 +484,7 @@ _oops:
484
484
* r6 - call_id
485
485
* r8 - saved link register
486
486
* /
487
- _do_syscall :
487
+ .L_do_syscall :
488
488
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
489
489
movs r3 , # 24
490
490
ldr r1 , [ r0 , r3 ] / * grab address of PC from stack frame * /
@@ -510,15 +510,15 @@ _do_syscall:
510
510
/ * The supplied syscall_id must be lower than the limit
511
511
* (Requires unsigned integer comparison)
512
512
* /
513
- blo valid_syscall_id
513
+ blo .L_valid_syscall_id
514
514
515
515
/ * bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD * /
516
516
str r6 , [ r0 ]
517
517
ldr r6 , =K_SYSCALL_BAD
518
518
519
519
/ * Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. * /
520
520
521
- valid_syscall_id :
521
+ .L_valid_syscall_id :
522
522
ldr r0 , =_kernel
523
523
ldr r0 , [ r0 , #_kernel_offset_to_current ]
524
524
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
0 commit comments