12
12
13
13
#ifdef CONFIG_KVM_INTEL_TDX
14
14
static_assert (offsetof(struct vcpu_vmx , vt ) == offsetof(struct vcpu_tdx , vt ));
15
- #endif
16
15
17
16
static void vt_disable_virtualization_cpu (void )
18
17
{
@@ -880,6 +879,13 @@ static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
880
879
return 0 ;
881
880
}
882
881
882
+ #define vt_op (name ) vt_##name
883
+ #define vt_op_tdx_only (name ) vt_##name
884
+ #else /* CONFIG_KVM_INTEL_TDX */
885
+ #define vt_op (name ) vmx_##name
886
+ #define vt_op_tdx_only (name ) NULL
887
+ #endif /* CONFIG_KVM_INTEL_TDX */
888
+
883
889
#define VMX_REQUIRED_APICV_INHIBITS \
884
890
(BIT(APICV_INHIBIT_REASON_DISABLED) | \
885
891
BIT(APICV_INHIBIT_REASON_ABSENT) | \
@@ -897,152 +903,152 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
897
903
.hardware_unsetup = vmx_hardware_unsetup ,
898
904
899
905
.enable_virtualization_cpu = vmx_enable_virtualization_cpu ,
900
- .disable_virtualization_cpu = vt_disable_virtualization_cpu ,
906
+ .disable_virtualization_cpu = vt_op ( disable_virtualization_cpu ) ,
901
907
.emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu ,
902
908
903
- .has_emulated_msr = vt_has_emulated_msr ,
909
+ .has_emulated_msr = vt_op ( has_emulated_msr ) ,
904
910
905
911
.vm_size = sizeof (struct kvm_vmx ),
906
912
907
- .vm_init = vt_vm_init ,
908
- .vm_pre_destroy = vt_vm_pre_destroy ,
909
- .vm_destroy = vt_vm_destroy ,
913
+ .vm_init = vt_op ( vm_init ) ,
914
+ .vm_destroy = vt_op ( vm_destroy ) ,
915
+ .vm_pre_destroy = vt_op_tdx_only ( vm_pre_destroy ) ,
910
916
911
- .vcpu_precreate = vt_vcpu_precreate ,
912
- .vcpu_create = vt_vcpu_create ,
913
- .vcpu_free = vt_vcpu_free ,
914
- .vcpu_reset = vt_vcpu_reset ,
917
+ .vcpu_precreate = vt_op ( vcpu_precreate ) ,
918
+ .vcpu_create = vt_op ( vcpu_create ) ,
919
+ .vcpu_free = vt_op ( vcpu_free ) ,
920
+ .vcpu_reset = vt_op ( vcpu_reset ) ,
915
921
916
- .prepare_switch_to_guest = vt_prepare_switch_to_guest ,
917
- .vcpu_load = vt_vcpu_load ,
918
- .vcpu_put = vt_vcpu_put ,
922
+ .prepare_switch_to_guest = vt_op ( prepare_switch_to_guest ) ,
923
+ .vcpu_load = vt_op ( vcpu_load ) ,
924
+ .vcpu_put = vt_op ( vcpu_put ) ,
919
925
920
- .update_exception_bitmap = vt_update_exception_bitmap ,
926
+ .update_exception_bitmap = vt_op ( update_exception_bitmap ) ,
921
927
.get_feature_msr = vmx_get_feature_msr ,
922
- .get_msr = vt_get_msr ,
923
- .set_msr = vt_set_msr ,
924
-
925
- .get_segment_base = vt_get_segment_base ,
926
- .get_segment = vt_get_segment ,
927
- .set_segment = vt_set_segment ,
928
- .get_cpl = vt_get_cpl ,
929
- .get_cpl_no_cache = vt_get_cpl_no_cache ,
930
- .get_cs_db_l_bits = vt_get_cs_db_l_bits ,
931
- .is_valid_cr0 = vt_is_valid_cr0 ,
932
- .set_cr0 = vt_set_cr0 ,
933
- .is_valid_cr4 = vt_is_valid_cr4 ,
934
- .set_cr4 = vt_set_cr4 ,
935
- .set_efer = vt_set_efer ,
936
- .get_idt = vt_get_idt ,
937
- .set_idt = vt_set_idt ,
938
- .get_gdt = vt_get_gdt ,
939
- .set_gdt = vt_set_gdt ,
940
- .set_dr6 = vt_set_dr6 ,
941
- .set_dr7 = vt_set_dr7 ,
942
- .sync_dirty_debug_regs = vt_sync_dirty_debug_regs ,
943
- .cache_reg = vt_cache_reg ,
944
- .get_rflags = vt_get_rflags ,
945
- .set_rflags = vt_set_rflags ,
946
- .get_if_flag = vt_get_if_flag ,
947
-
948
- .flush_tlb_all = vt_flush_tlb_all ,
949
- .flush_tlb_current = vt_flush_tlb_current ,
950
- .flush_tlb_gva = vt_flush_tlb_gva ,
951
- .flush_tlb_guest = vt_flush_tlb_guest ,
952
-
953
- .vcpu_pre_run = vt_vcpu_pre_run ,
954
- .vcpu_run = vt_vcpu_run ,
955
- .handle_exit = vt_handle_exit ,
928
+ .get_msr = vt_op ( get_msr ) ,
929
+ .set_msr = vt_op ( set_msr ) ,
930
+
931
+ .get_segment_base = vt_op ( get_segment_base ) ,
932
+ .get_segment = vt_op ( get_segment ) ,
933
+ .set_segment = vt_op ( set_segment ) ,
934
+ .get_cpl = vt_op ( get_cpl ) ,
935
+ .get_cpl_no_cache = vt_op ( get_cpl_no_cache ) ,
936
+ .get_cs_db_l_bits = vt_op ( get_cs_db_l_bits ) ,
937
+ .is_valid_cr0 = vt_op ( is_valid_cr0 ) ,
938
+ .set_cr0 = vt_op ( set_cr0 ) ,
939
+ .is_valid_cr4 = vt_op ( is_valid_cr4 ) ,
940
+ .set_cr4 = vt_op ( set_cr4 ) ,
941
+ .set_efer = vt_op ( set_efer ) ,
942
+ .get_idt = vt_op ( get_idt ) ,
943
+ .set_idt = vt_op ( set_idt ) ,
944
+ .get_gdt = vt_op ( get_gdt ) ,
945
+ .set_gdt = vt_op ( set_gdt ) ,
946
+ .set_dr6 = vt_op ( set_dr6 ) ,
947
+ .set_dr7 = vt_op ( set_dr7 ) ,
948
+ .sync_dirty_debug_regs = vt_op ( sync_dirty_debug_regs ) ,
949
+ .cache_reg = vt_op ( cache_reg ) ,
950
+ .get_rflags = vt_op ( get_rflags ) ,
951
+ .set_rflags = vt_op ( set_rflags ) ,
952
+ .get_if_flag = vt_op ( get_if_flag ) ,
953
+
954
+ .flush_tlb_all = vt_op ( flush_tlb_all ) ,
955
+ .flush_tlb_current = vt_op ( flush_tlb_current ) ,
956
+ .flush_tlb_gva = vt_op ( flush_tlb_gva ) ,
957
+ .flush_tlb_guest = vt_op ( flush_tlb_guest ) ,
958
+
959
+ .vcpu_pre_run = vt_op ( vcpu_pre_run ) ,
960
+ .vcpu_run = vt_op ( vcpu_run ) ,
961
+ .handle_exit = vt_op ( handle_exit ) ,
956
962
.skip_emulated_instruction = vmx_skip_emulated_instruction ,
957
963
.update_emulated_instruction = vmx_update_emulated_instruction ,
958
- .set_interrupt_shadow = vt_set_interrupt_shadow ,
959
- .get_interrupt_shadow = vt_get_interrupt_shadow ,
960
- .patch_hypercall = vt_patch_hypercall ,
961
- .inject_irq = vt_inject_irq ,
962
- .inject_nmi = vt_inject_nmi ,
963
- .inject_exception = vt_inject_exception ,
964
- .cancel_injection = vt_cancel_injection ,
965
- .interrupt_allowed = vt_interrupt_allowed ,
966
- .nmi_allowed = vt_nmi_allowed ,
967
- .get_nmi_mask = vt_get_nmi_mask ,
968
- .set_nmi_mask = vt_set_nmi_mask ,
969
- .enable_nmi_window = vt_enable_nmi_window ,
970
- .enable_irq_window = vt_enable_irq_window ,
971
- .update_cr8_intercept = vt_update_cr8_intercept ,
964
+ .set_interrupt_shadow = vt_op ( set_interrupt_shadow ) ,
965
+ .get_interrupt_shadow = vt_op ( get_interrupt_shadow ) ,
966
+ .patch_hypercall = vt_op ( patch_hypercall ) ,
967
+ .inject_irq = vt_op ( inject_irq ) ,
968
+ .inject_nmi = vt_op ( inject_nmi ) ,
969
+ .inject_exception = vt_op ( inject_exception ) ,
970
+ .cancel_injection = vt_op ( cancel_injection ) ,
971
+ .interrupt_allowed = vt_op ( interrupt_allowed ) ,
972
+ .nmi_allowed = vt_op ( nmi_allowed ) ,
973
+ .get_nmi_mask = vt_op ( get_nmi_mask ) ,
974
+ .set_nmi_mask = vt_op ( set_nmi_mask ) ,
975
+ .enable_nmi_window = vt_op ( enable_nmi_window ) ,
976
+ .enable_irq_window = vt_op ( enable_irq_window ) ,
977
+ .update_cr8_intercept = vt_op ( update_cr8_intercept ) ,
972
978
973
979
.x2apic_icr_is_split = false,
974
- .set_virtual_apic_mode = vt_set_virtual_apic_mode ,
975
- .set_apic_access_page_addr = vt_set_apic_access_page_addr ,
976
- .refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl ,
977
- .load_eoi_exitmap = vt_load_eoi_exitmap ,
980
+ .set_virtual_apic_mode = vt_op ( set_virtual_apic_mode ) ,
981
+ .set_apic_access_page_addr = vt_op ( set_apic_access_page_addr ) ,
982
+ .refresh_apicv_exec_ctrl = vt_op ( refresh_apicv_exec_ctrl ) ,
983
+ .load_eoi_exitmap = vt_op ( load_eoi_exitmap ) ,
978
984
.apicv_pre_state_restore = pi_apicv_pre_state_restore ,
979
985
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS ,
980
- .hwapic_isr_update = vt_hwapic_isr_update ,
981
- .sync_pir_to_irr = vt_sync_pir_to_irr ,
982
- .deliver_interrupt = vt_deliver_interrupt ,
986
+ .hwapic_isr_update = vt_op ( hwapic_isr_update ) ,
987
+ .sync_pir_to_irr = vt_op ( sync_pir_to_irr ) ,
988
+ .deliver_interrupt = vt_op ( deliver_interrupt ) ,
983
989
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt ,
984
990
985
- .set_tss_addr = vt_set_tss_addr ,
986
- .set_identity_map_addr = vt_set_identity_map_addr ,
991
+ .set_tss_addr = vt_op ( set_tss_addr ) ,
992
+ .set_identity_map_addr = vt_op ( set_identity_map_addr ) ,
987
993
.get_mt_mask = vmx_get_mt_mask ,
988
994
989
- .get_exit_info = vt_get_exit_info ,
990
- .get_entry_info = vt_get_entry_info ,
995
+ .get_exit_info = vt_op ( get_exit_info ) ,
996
+ .get_entry_info = vt_op ( get_entry_info ) ,
991
997
992
- .vcpu_after_set_cpuid = vt_vcpu_after_set_cpuid ,
998
+ .vcpu_after_set_cpuid = vt_op ( vcpu_after_set_cpuid ) ,
993
999
994
1000
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit ,
995
1001
996
- .get_l2_tsc_offset = vt_get_l2_tsc_offset ,
997
- .get_l2_tsc_multiplier = vt_get_l2_tsc_multiplier ,
998
- .write_tsc_offset = vt_write_tsc_offset ,
999
- .write_tsc_multiplier = vt_write_tsc_multiplier ,
1002
+ .get_l2_tsc_offset = vt_op ( get_l2_tsc_offset ) ,
1003
+ .get_l2_tsc_multiplier = vt_op ( get_l2_tsc_multiplier ) ,
1004
+ .write_tsc_offset = vt_op ( write_tsc_offset ) ,
1005
+ .write_tsc_multiplier = vt_op ( write_tsc_multiplier ) ,
1000
1006
1001
- .load_mmu_pgd = vt_load_mmu_pgd ,
1007
+ .load_mmu_pgd = vt_op ( load_mmu_pgd ) ,
1002
1008
1003
1009
.check_intercept = vmx_check_intercept ,
1004
1010
.handle_exit_irqoff = vmx_handle_exit_irqoff ,
1005
1011
1006
- .update_cpu_dirty_logging = vt_update_cpu_dirty_logging ,
1012
+ .update_cpu_dirty_logging = vt_op ( update_cpu_dirty_logging ) ,
1007
1013
1008
1014
.nested_ops = & vmx_nested_ops ,
1009
1015
1010
1016
.pi_update_irte = vmx_pi_update_irte ,
1011
1017
.pi_start_assignment = vmx_pi_start_assignment ,
1012
1018
1013
1019
#ifdef CONFIG_X86_64
1014
- .set_hv_timer = vt_set_hv_timer ,
1015
- .cancel_hv_timer = vt_cancel_hv_timer ,
1020
+ .set_hv_timer = vt_op ( set_hv_timer ) ,
1021
+ .cancel_hv_timer = vt_op ( cancel_hv_timer ) ,
1016
1022
#endif
1017
1023
1018
- .setup_mce = vt_setup_mce ,
1024
+ .setup_mce = vt_op ( setup_mce ) ,
1019
1025
1020
1026
#ifdef CONFIG_KVM_SMM
1021
- .smi_allowed = vt_smi_allowed ,
1022
- .enter_smm = vt_enter_smm ,
1023
- .leave_smm = vt_leave_smm ,
1024
- .enable_smi_window = vt_enable_smi_window ,
1027
+ .smi_allowed = vt_op ( smi_allowed ) ,
1028
+ .enter_smm = vt_op ( enter_smm ) ,
1029
+ .leave_smm = vt_op ( leave_smm ) ,
1030
+ .enable_smi_window = vt_op ( enable_smi_window ) ,
1025
1031
#endif
1026
1032
1027
- .check_emulate_instruction = vt_check_emulate_instruction ,
1028
- .apic_init_signal_blocked = vt_apic_init_signal_blocked ,
1033
+ .check_emulate_instruction = vt_op ( check_emulate_instruction ) ,
1034
+ .apic_init_signal_blocked = vt_op ( apic_init_signal_blocked ) ,
1029
1035
.migrate_timers = vmx_migrate_timers ,
1030
1036
1031
- .msr_filter_changed = vt_msr_filter_changed ,
1032
- .complete_emulated_msr = vt_complete_emulated_msr ,
1037
+ .msr_filter_changed = vt_op ( msr_filter_changed ) ,
1038
+ .complete_emulated_msr = vt_op ( complete_emulated_msr ) ,
1033
1039
1034
1040
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector ,
1035
1041
1036
1042
.get_untagged_addr = vmx_get_untagged_addr ,
1037
1043
1038
- .mem_enc_ioctl = vt_mem_enc_ioctl ,
1039
- .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl ,
1044
+ .mem_enc_ioctl = vt_op_tdx_only ( mem_enc_ioctl ) ,
1045
+ .vcpu_mem_enc_ioctl = vt_op_tdx_only ( vcpu_mem_enc_ioctl ) ,
1040
1046
1041
- .private_max_mapping_level = vt_gmem_private_max_mapping_level
1047
+ .private_max_mapping_level = vt_op_tdx_only ( gmem_private_max_mapping_level )
1042
1048
};
1043
1049
1044
1050
struct kvm_x86_init_ops vt_init_ops __initdata = {
1045
- .hardware_setup = vt_hardware_setup ,
1051
+ .hardware_setup = vt_op ( hardware_setup ) ,
1046
1052
.handle_intel_pt_intr = NULL ,
1047
1053
1048
1054
.runtime_ops = & vt_x86_ops ,
0 commit comments