@@ -275,7 +275,7 @@ ENDPROC(preserve_boot_args)
275
275
* - first few MB of the kernel linear mapping to jump to once the MMU has
276
276
* been enabled
277
277
* /
278
- __create_page_tables:
278
+ SYM_FUNC_START_LOCAL( __create_page_tables)
279
279
mov x28 , lr
280
280
281
281
/ *
@@ -403,15 +403,15 @@ __create_page_tables:
403
403
bl __inval_dcache_area
404
404
405
405
ret x28
406
- ENDPROC (__create_page_tables)
406
+ SYM_FUNC_END (__create_page_tables)
407
407
.ltorg
408
408
409
409
/ *
410
410
* The following fragment of code is executed with the MMU enabled.
411
411
*
412
412
* x0 = __PHYS_OFFSET
413
413
* /
414
- __primary_switched:
414
+ SYM_FUNC_START_LOCAL( __primary_switched)
415
415
adrp x4 , init_thread_union
416
416
add sp , x4 , #THREAD_SIZE
417
417
adr_l x5 , init_task
@@ -456,7 +456,7 @@ __primary_switched:
456
456
mov x29 , # 0
457
457
mov x30 , # 0
458
458
b start_kernel
459
- ENDPROC (__primary_switched)
459
+ SYM_FUNC_END (__primary_switched)
460
460
461
461
/ *
462
462
* end early head section , begin head code th at is also used for
@@ -475,7 +475,7 @@ EXPORT_SYMBOL(kimage_vaddr)
475
475
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
476
476
* booted in EL1 or EL2 respectively.
477
477
* /
478
- ENTRY (el2_setup)
478
+ SYM_FUNC_START (el2_setup)
479
479
msr SPsel , # 1 // We want to use SP_EL{ 1 , 2 }
480
480
mrs x0 , CurrentEL
481
481
cmp x0 , #CurrentEL_EL2
@@ -636,13 +636,13 @@ install_el2_stub:
636
636
msr elr_el2 , lr
637
637
mov w0 , #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
638
638
eret
639
- ENDPROC (el2_setup)
639
+ SYM_FUNC_END (el2_setup)
640
640
641
641
/ *
642
642
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
643
643
* in w0. See arch/arm64/include/asm/virt.h for more info.
644
644
* /
645
- set_cpu_boot_mode_flag:
645
+ SYM_FUNC_START_LOCAL( set_cpu_boot_mode_flag)
646
646
adr_l x1 , __boot_cpu_mode
647
647
cmp w0 , #BOOT_CPU_MODE_EL2
648
648
b.ne 1f
@@ -651,7 +651,7 @@ set_cpu_boot_mode_flag:
651
651
dmb sy
652
652
dc ivac , x1 // Invalidate potentially stale cache line
653
653
ret
654
- ENDPROC (set_cpu_boot_mode_flag)
654
+ SYM_FUNC_END (set_cpu_boot_mode_flag)
655
655
656
656
/ *
657
657
* These values are written with the MMU off , but read with the MMU on.
@@ -683,7 +683,7 @@ ENTRY(__early_cpu_boot_status)
683
683
* This provides a "holding pen" for platforms to hold all secondary
684
684
* cores are held until we're ready for them to initialise.
685
685
* /
686
- ENTRY (secondary_holding_pen)
686
+ SYM_FUNC_START (secondary_holding_pen)
687
687
bl el2_setup // Drop to EL1 , w0=cpu_boot_mode
688
688
bl set_cpu_boot_mode_flag
689
689
mrs x0 , mpidr_el1
@@ -695,19 +695,19 @@ pen: ldr x4, [x3]
695
695
b.eq secondary_startup
696
696
wfe
697
697
b pen
698
- ENDPROC (secondary_holding_pen)
698
+ SYM_FUNC_END (secondary_holding_pen)
699
699
700
700
/ *
701
701
* Secondary entry point th at jumps straight into the kernel. Only to
702
702
* be used where CPUs are brought online dynamically by the kernel.
703
703
* /
704
- ENTRY (secondary_entry)
704
+ SYM_FUNC_START (secondary_entry)
705
705
bl el2_setup // Drop to EL1
706
706
bl set_cpu_boot_mode_flag
707
707
b secondary_startup
708
- ENDPROC (secondary_entry)
708
+ SYM_FUNC_END (secondary_entry)
709
709
710
- secondary_startup:
710
+ SYM_FUNC_START_LOCAL( secondary_startup)
711
711
/ *
712
712
* Common entry point for secondary CPUs.
713
713
* /
@@ -717,9 +717,9 @@ secondary_startup:
717
717
bl __enable_mmu
718
718
ldr x8 , =__secondary_switched
719
719
br x8
720
- ENDPROC (secondary_startup)
720
+ SYM_FUNC_END (secondary_startup)
721
721
722
- __secondary_switched:
722
+ SYM_FUNC_START_LOCAL( __secondary_switched)
723
723
adr_l x5 , vectors
724
724
msr vbar_el1 , x5
725
725
isb
@@ -734,13 +734,13 @@ __secondary_switched:
734
734
mov x29 , # 0
735
735
mov x30 , # 0
736
736
b secondary_start_kernel
737
- ENDPROC (__secondary_switched)
737
+ SYM_FUNC_END (__secondary_switched)
738
738
739
- __secondary_too_slow:
739
+ SYM_FUNC_START_LOCAL( __secondary_too_slow)
740
740
wfe
741
741
wfi
742
742
b __secondary_too_slow
743
- ENDPROC (__secondary_too_slow)
743
+ SYM_FUNC_END (__secondary_too_slow)
744
744
745
745
/ *
746
746
* The booting CPU updates the failed status @__early_cpu_boot_status ,
@@ -772,7 +772,7 @@ ENDPROC(__secondary_too_slow)
772
772
* Checks if the selected granule size is supported by the CPU .
773
773
* If it isn't , park the CPU
774
774
* /
775
- ENTRY (__enable_mmu)
775
+ SYM_FUNC_START (__enable_mmu)
776
776
mrs x2 , ID_AA64MMFR0_EL1
777
777
ubfx x2 , x2 , #ID_AA64MMFR0_TGRAN_SHIFT , 4
778
778
cmp x2 , #ID_AA64MMFR0_TGRAN_SUPPORTED
@@ -796,9 +796,9 @@ ENTRY(__enable_mmu)
796
796
dsb nsh
797
797
isb
798
798
ret
799
- ENDPROC (__enable_mmu)
799
+ SYM_FUNC_END (__enable_mmu)
800
800
801
- ENTRY (__cpu_secondary_check52bitva)
801
+ SYM_FUNC_START (__cpu_secondary_check52bitva)
802
802
#ifdef CONFIG_ARM64_VA_BITS_52
803
803
ldr_l x0 , vabits_actual
804
804
cmp x0 , # 52
@@ -816,20 +816,20 @@ ENTRY(__cpu_secondary_check52bitva)
816
816
817
817
#endif
818
818
2 : ret
819
- ENDPROC (__cpu_secondary_check52bitva)
819
+ SYM_FUNC_END (__cpu_secondary_check52bitva)
820
820
821
- __no_granule_support:
821
+ SYM_FUNC_START_LOCAL( __no_granule_support)
822
822
/ * Indicate th at this CPU can't boot and is stuck in the kernel * /
823
823
update_early_cpu_boot_status \
824
824
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN , x1 , x2
825
825
1 :
826
826
wfe
827
827
wfi
828
828
b 1b
829
- ENDPROC (__no_granule_support)
829
+ SYM_FUNC_END (__no_granule_support)
830
830
831
831
#ifdef CONFIG_RELOCATABLE
832
- __relocate_kernel:
832
+ SYM_FUNC_START_LOCAL( __relocate_kernel)
833
833
/ *
834
834
* Iterate over each entry in the relocation table , and apply the
835
835
* relocations in place.
@@ -931,10 +931,10 @@ __relocate_kernel:
931
931
#endif
932
932
ret
933
933
934
- ENDPROC (__relocate_kernel)
934
+ SYM_FUNC_END (__relocate_kernel)
935
935
#endif
936
936
937
- __primary_switch:
937
+ SYM_FUNC_START_LOCAL( __primary_switch)
938
938
#ifdef CONFIG_RANDOMIZE_BASE
939
939
mov x19 , x0 // preserve new SCTLR_EL1 value
940
940
mrs x20 , sctlr_el1 // preserve old SCTLR_EL1 value
@@ -977,4 +977,4 @@ __primary_switch:
977
977
ldr x8 , =__primary_switched
978
978
adrp x0 , __PHYS_OFFSET
979
979
br x8
980
- ENDPROC (__primary_switch)
980
+ SYM_FUNC_END (__primary_switch)
0 commit comments