1010 * Authors: Alexander Graf <[email protected] > 1111 */
1212
13+ #include <linux/linkage.h>
1314#include <asm/ppc_asm.h>
1415#include <asm/code-patching-asm.h>
1516#include <asm/kvm_asm.h>
@@ -2358,7 +2359,7 @@ hmi_realmode:
23582359 * This routine calls kvmppc_read_intr, a C function, if an external
23592360 * interrupt is pending.
23602361 */
2361- kvmppc_check_wake_reason:
2362+ SYM_FUNC_START_ LOCAL ( kvmppc_check_wake_reason)
23622363 mfspr r6, SPRN_SRR1
23632364BEGIN_FTR_SECTION
23642365 rlwinm r6, r6, 45 -31 , 0xf /* extract wake reason field (P8) */
@@ -2427,14 +2428,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
24272428 addi r1, r1, PPC_MIN_STKFRM
24282429 mtlr r0
24292430 blr
2431+ SYM_FUNC_END(kvmppc_check_wake_reason)
24302432
24312433/*
24322434 * Save away FP, VMX and VSX registers.
24332435 * r3 = vcpu pointer
24342436 * N.B. r30 and r31 are volatile across this function,
24352437 * thus it is not callable from C.
24362438 */
2437- kvmppc_save_fp:
2439+ SYM_FUNC_START_ LOCAL ( kvmppc_save_fp)
24382440 mflr r30
24392441 mr r31,r3
24402442 mfmsr r5
@@ -2462,14 +2464,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
24622464 stw r6,VCPU_VRSAVE(r31)
24632465 mtlr r30
24642466 blr
2467+ SYM_FUNC_END(kvmppc_save_fp)
24652468
24662469/*
24672470 * Load up FP, VMX and VSX registers
24682471 * r4 = vcpu pointer
24692472 * N.B. r30 and r31 are volatile across this function,
24702473 * thus it is not callable from C.
24712474 */
2472- kvmppc_load_fp:
2475+ SYM_FUNC_START_ LOCAL ( kvmppc_load_fp)
24732476 mflr r30
24742477 mr r31,r4
24752478 mfmsr r9
@@ -2498,6 +2501,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
24982501 mtlr r30
24992502 mr r4,r31
25002503 blr
2504+ SYM_FUNC_END(kvmppc_load_fp)
25012505
25022506#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
25032507/*
@@ -2746,7 +2750,7 @@ kvmppc_bad_host_intr:
27462750 * r9 has a vcpu pointer (in)
27472751 * r0 is used as a scratch register
27482752 */
2749- kvmppc_msr_interrupt:
2753+ SYM_FUNC_START_ LOCAL ( kvmppc_msr_interrupt)
27502754 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
27512755 cmpwi r0, 2 /* Check if we are in transactional state.. */
27522756 ld r11, VCPU_INTR_MSR(r9)
@@ -2755,13 +2759,14 @@ kvmppc_msr_interrupt:
27552759 li r0, 1
275627601: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
27572761 blr
2762+ SYM_FUNC_END(kvmppc_msr_interrupt)
27582763
27592764/*
27602765 * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu)
27612766 *
27622767 * Load up guest PMU state. R3 points to the vcpu struct.
27632768 */
2764- kvmhv_load_guest_pmu:
2769+ SYM_FUNC_START_ LOCAL ( kvmhv_load_guest_pmu)
27652770 mr r4, r3
27662771 mflr r0
27672772 li r3, 1
@@ -2811,13 +2816,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
28112816 isync
28122817 mtlr r0
28132818 blr
2819+ SYM_FUNC_END(kvmhv_load_guest_pmu)
28142820
28152821/*
28162822 * void kvmhv_load_host_pmu(void)
28172823 *
28182824 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
28192825 */
2820- kvmhv_load_host_pmu:
2826+ SYM_FUNC_START_ LOCAL ( kvmhv_load_host_pmu)
28212827 mflr r0
28222828 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
28232829 cmpwi r4, 0
@@ -2859,14 +2865,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
28592865 isync
28602866 mtlr r0
2861286723: blr
2868+ SYM_FUNC_END(kvmhv_load_host_pmu)
28622869
28632870/*
28642871 * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use)
28652872 *
28662873 * Save guest PMU state into the vcpu struct.
28672874 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
28682875 */
2869- kvmhv_save_guest_pmu:
2876+ SYM_FUNC_START_ LOCAL ( kvmhv_save_guest_pmu)
28702877 mr r9, r3
28712878 mr r8, r4
28722879BEGIN_FTR_SECTION
@@ -2942,6 +2949,7 @@ BEGIN_FTR_SECTION
29422949 mtspr SPRN_MMCRS, r4
29432950END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2944295122: blr
2952+ SYM_FUNC_END(kvmhv_save_guest_pmu)
29452953
29462954/*
29472955 * This works around a hardware bug on POWER8E processors, where
0 commit comments