@@ -738,8 +738,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
738738 if (unlikely (apic -> apicv_active )) {
739739 /* need to update RVI */
740740 kvm_lapic_clear_vector (vec , apic -> regs + APIC_IRR );
741- static_call ( kvm_x86_hwapic_irr_update )(apic -> vcpu ,
742- apic_find_highest_irr (apic ));
741+ kvm_x86_call ( hwapic_irr_update )(apic -> vcpu ,
742+ apic_find_highest_irr (apic ));
743743 } else {
744744 apic -> irr_pending = false;
745745 kvm_lapic_clear_vector (vec , apic -> regs + APIC_IRR );
@@ -765,7 +765,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
765765 * just set SVI.
766766 */
767767 if (unlikely (apic -> apicv_active ))
768- static_call ( kvm_x86_hwapic_isr_update )(vec );
768+ kvm_x86_call ( hwapic_isr_update )(vec );
769769 else {
770770 ++ apic -> isr_count ;
771771 BUG_ON (apic -> isr_count > MAX_APIC_VECTOR );
@@ -810,7 +810,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
810810 * and must be left alone.
811811 */
812812 if (unlikely (apic -> apicv_active ))
813- static_call ( kvm_x86_hwapic_isr_update )(apic_find_highest_isr (apic ));
813+ kvm_x86_call ( hwapic_isr_update )(apic_find_highest_isr (apic ));
814814 else {
815815 -- apic -> isr_count ;
816816 BUG_ON (apic -> isr_count < 0 );
@@ -946,7 +946,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
946946{
947947 int highest_irr ;
948948 if (kvm_x86_ops .sync_pir_to_irr )
949- highest_irr = static_call ( kvm_x86_sync_pir_to_irr )(apic -> vcpu );
949+ highest_irr = kvm_x86_call ( sync_pir_to_irr )(apic -> vcpu );
950950 else
951951 highest_irr = apic_find_highest_irr (apic );
952952 if (highest_irr == -1 || (highest_irr & 0xF0 ) <= ppr )
@@ -1338,8 +1338,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
13381338 apic -> regs + APIC_TMR );
13391339 }
13401340
1341- static_call ( kvm_x86_deliver_interrupt )(apic , delivery_mode ,
1342- trig_mode , vector );
1341+ kvm_x86_call ( deliver_interrupt )(apic , delivery_mode ,
1342+ trig_mode , vector );
13431343 break ;
13441344
13451345 case APIC_DM_REMRD :
@@ -2105,7 +2105,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
21052105{
21062106 WARN_ON (preemptible ());
21072107 WARN_ON (!apic -> lapic_timer .hv_timer_in_use );
2108- static_call ( kvm_x86_cancel_hv_timer )(apic -> vcpu );
2108+ kvm_x86_call ( cancel_hv_timer )(apic -> vcpu );
21092109 apic -> lapic_timer .hv_timer_in_use = false;
21102110}
21112111
@@ -2122,7 +2122,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
21222122 if (!ktimer -> tscdeadline )
21232123 return false;
21242124
2125- if (static_call ( kvm_x86_set_hv_timer )(vcpu , ktimer -> tscdeadline , & expired ))
2125+ if (kvm_x86_call ( set_hv_timer )(vcpu , ktimer -> tscdeadline , & expired ))
21262126 return false;
21272127
21282128 ktimer -> hv_timer_in_use = true;
@@ -2577,7 +2577,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
25772577
25782578 if ((old_value ^ value ) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE )) {
25792579 kvm_make_request (KVM_REQ_APICV_UPDATE , vcpu );
2580- static_call ( kvm_x86_set_virtual_apic_mode )(vcpu );
2580+ kvm_x86_call ( set_virtual_apic_mode )(vcpu );
25812581 }
25822582
25832583 apic -> base_address = apic -> vcpu -> arch .apic_base &
@@ -2687,7 +2687,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
26872687 u64 msr_val ;
26882688 int i ;
26892689
2690- static_call ( kvm_x86_apicv_pre_state_restore )(vcpu );
2690+ kvm_x86_call ( apicv_pre_state_restore )(vcpu );
26912691
26922692 if (!init_event ) {
26932693 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE ;
@@ -2742,9 +2742,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
27422742 vcpu -> arch .pv_eoi .msr_val = 0 ;
27432743 apic_update_ppr (apic );
27442744 if (apic -> apicv_active ) {
2745- static_call ( kvm_x86_apicv_post_state_restore )(vcpu );
2746- static_call ( kvm_x86_hwapic_irr_update )(vcpu , -1 );
2747- static_call ( kvm_x86_hwapic_isr_update )(-1 );
2745+ kvm_x86_call ( apicv_post_state_restore )(vcpu );
2746+ kvm_x86_call ( hwapic_irr_update )(vcpu , -1 );
2747+ kvm_x86_call ( hwapic_isr_update )(-1 );
27482748 }
27492749
27502750 vcpu -> arch .apic_arb_prio = 0 ;
@@ -2840,7 +2840,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
28402840 vcpu -> arch .apic = apic ;
28412841
28422842 if (kvm_x86_ops .alloc_apic_backing_page )
2843- apic -> regs = static_call ( kvm_x86_alloc_apic_backing_page )(vcpu );
2843+ apic -> regs = kvm_x86_call ( alloc_apic_backing_page )(vcpu );
28442844 else
28452845 apic -> regs = (void * )get_zeroed_page (GFP_KERNEL_ACCOUNT );
28462846 if (!apic -> regs ) {
@@ -3019,7 +3019,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
30193019 struct kvm_lapic * apic = vcpu -> arch .apic ;
30203020 int r ;
30213021
3022- static_call ( kvm_x86_apicv_pre_state_restore )(vcpu );
3022+ kvm_x86_call ( apicv_pre_state_restore )(vcpu );
30233023
30243024 kvm_lapic_set_base (vcpu , vcpu -> arch .apic_base );
30253025 /* set SPIV separately to get count of SW disabled APICs right */
@@ -3046,9 +3046,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
30463046 kvm_lapic_set_reg (apic , APIC_TMCCT , 0 );
30473047 kvm_apic_update_apicv (vcpu );
30483048 if (apic -> apicv_active ) {
3049- static_call (kvm_x86_apicv_post_state_restore )(vcpu );
3050- static_call (kvm_x86_hwapic_irr_update )(vcpu , apic_find_highest_irr (apic ));
3051- static_call (kvm_x86_hwapic_isr_update )(apic_find_highest_isr (apic ));
3049+ kvm_x86_call (apicv_post_state_restore )(vcpu );
3050+ kvm_x86_call (hwapic_irr_update )(vcpu ,
3051+ apic_find_highest_irr (apic ));
3052+ kvm_x86_call (hwapic_isr_update )(apic_find_highest_isr (apic ));
30523053 }
30533054 kvm_make_request (KVM_REQ_EVENT , vcpu );
30543055 if (ioapic_in_kernel (vcpu -> kvm ))
@@ -3336,7 +3337,8 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
33363337 /* evaluate pending_events before reading the vector */
33373338 smp_rmb ();
33383339 sipi_vector = apic -> sipi_vector ;
3339- static_call (kvm_x86_vcpu_deliver_sipi_vector )(vcpu , sipi_vector );
3340+ kvm_x86_call (vcpu_deliver_sipi_vector )(vcpu ,
3341+ sipi_vector );
33403342 vcpu -> arch .mp_state = KVM_MP_STATE_RUNNABLE ;
33413343 }
33423344 }
0 commit comments