Skip to content

Commit 907092b

Browse files
stellarhoppersean-jc
authored andcommitted
KVM: VMX: Clean up and macrofy x86_ops
Eliminate a lot of stub definitions by using macros to define the TDX vs non-TDX versions of various x86_ops. Moving the x86_ops wrappers under CONFIG_KVM_INTEL_TDX also allows nearly all of vmx/main.c to go under a single #ifdef, eliminating trampolines in the generated code, and almost all of the stubs. For example, with CONFIG_KVM_INTEL_TDX=n, before this cleanup, vt_refresh_apicv_exec_ctrl() would produce: 0000000000036490 <vt_refresh_apicv_exec_ctrl>: 36490: f3 0f 1e fa endbr64 36494: e8 00 00 00 00 call 36499 <vt_refresh_apicv_exec_ctrl+0x9> 36495: R_X86_64_PLT32 __fentry__-0x4 36499: e9 00 00 00 00 jmp 3649e <vt_refresh_apicv_exec_ctrl+0xe> 3649a: R_X86_64_PLT32 vmx_refresh_apicv_exec_ctrl-0x4 3649e: 66 90 xchg %ax,%ax After this patch, this is completely eliminated. Based on a patch by Sean Christopherson <[email protected]> Link: https://lore.kernel.org/kvm/[email protected]/ Cc: Sean Christopherson <[email protected]> Cc: Rick Edgecombe <[email protected]> Signed-off-by: Vishal Verma <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 1a81d9d commit 907092b

File tree

2 files changed

+101
-160
lines changed

2 files changed

+101
-160
lines changed

arch/x86/kvm/vmx/main.c

Lines changed: 101 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212

1313
#ifdef CONFIG_KVM_INTEL_TDX
1414
static_assert(offsetof(struct vcpu_vmx, vt) == offsetof(struct vcpu_tdx, vt));
15-
#endif
1615

1716
static void vt_disable_virtualization_cpu(void)
1817
{
@@ -880,6 +879,13 @@ static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
880879
return 0;
881880
}
882881

882+
#define vt_op(name) vt_##name
883+
#define vt_op_tdx_only(name) vt_##name
884+
#else /* CONFIG_KVM_INTEL_TDX */
885+
#define vt_op(name) vmx_##name
886+
#define vt_op_tdx_only(name) NULL
887+
#endif /* CONFIG_KVM_INTEL_TDX */
888+
883889
#define VMX_REQUIRED_APICV_INHIBITS \
884890
(BIT(APICV_INHIBIT_REASON_DISABLED) | \
885891
BIT(APICV_INHIBIT_REASON_ABSENT) | \
@@ -897,152 +903,152 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
897903
.hardware_unsetup = vmx_hardware_unsetup,
898904

899905
.enable_virtualization_cpu = vmx_enable_virtualization_cpu,
900-
.disable_virtualization_cpu = vt_disable_virtualization_cpu,
906+
.disable_virtualization_cpu = vt_op(disable_virtualization_cpu),
901907
.emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu,
902908

903-
.has_emulated_msr = vt_has_emulated_msr,
909+
.has_emulated_msr = vt_op(has_emulated_msr),
904910

905911
.vm_size = sizeof(struct kvm_vmx),
906912

907-
.vm_init = vt_vm_init,
908-
.vm_pre_destroy = vt_vm_pre_destroy,
909-
.vm_destroy = vt_vm_destroy,
913+
.vm_init = vt_op(vm_init),
914+
.vm_destroy = vt_op(vm_destroy),
915+
.vm_pre_destroy = vt_op_tdx_only(vm_pre_destroy),
910916

911-
.vcpu_precreate = vt_vcpu_precreate,
912-
.vcpu_create = vt_vcpu_create,
913-
.vcpu_free = vt_vcpu_free,
914-
.vcpu_reset = vt_vcpu_reset,
917+
.vcpu_precreate = vt_op(vcpu_precreate),
918+
.vcpu_create = vt_op(vcpu_create),
919+
.vcpu_free = vt_op(vcpu_free),
920+
.vcpu_reset = vt_op(vcpu_reset),
915921

916-
.prepare_switch_to_guest = vt_prepare_switch_to_guest,
917-
.vcpu_load = vt_vcpu_load,
918-
.vcpu_put = vt_vcpu_put,
922+
.prepare_switch_to_guest = vt_op(prepare_switch_to_guest),
923+
.vcpu_load = vt_op(vcpu_load),
924+
.vcpu_put = vt_op(vcpu_put),
919925

920-
.update_exception_bitmap = vt_update_exception_bitmap,
926+
.update_exception_bitmap = vt_op(update_exception_bitmap),
921927
.get_feature_msr = vmx_get_feature_msr,
922-
.get_msr = vt_get_msr,
923-
.set_msr = vt_set_msr,
924-
925-
.get_segment_base = vt_get_segment_base,
926-
.get_segment = vt_get_segment,
927-
.set_segment = vt_set_segment,
928-
.get_cpl = vt_get_cpl,
929-
.get_cpl_no_cache = vt_get_cpl_no_cache,
930-
.get_cs_db_l_bits = vt_get_cs_db_l_bits,
931-
.is_valid_cr0 = vt_is_valid_cr0,
932-
.set_cr0 = vt_set_cr0,
933-
.is_valid_cr4 = vt_is_valid_cr4,
934-
.set_cr4 = vt_set_cr4,
935-
.set_efer = vt_set_efer,
936-
.get_idt = vt_get_idt,
937-
.set_idt = vt_set_idt,
938-
.get_gdt = vt_get_gdt,
939-
.set_gdt = vt_set_gdt,
940-
.set_dr6 = vt_set_dr6,
941-
.set_dr7 = vt_set_dr7,
942-
.sync_dirty_debug_regs = vt_sync_dirty_debug_regs,
943-
.cache_reg = vt_cache_reg,
944-
.get_rflags = vt_get_rflags,
945-
.set_rflags = vt_set_rflags,
946-
.get_if_flag = vt_get_if_flag,
947-
948-
.flush_tlb_all = vt_flush_tlb_all,
949-
.flush_tlb_current = vt_flush_tlb_current,
950-
.flush_tlb_gva = vt_flush_tlb_gva,
951-
.flush_tlb_guest = vt_flush_tlb_guest,
952-
953-
.vcpu_pre_run = vt_vcpu_pre_run,
954-
.vcpu_run = vt_vcpu_run,
955-
.handle_exit = vt_handle_exit,
928+
.get_msr = vt_op(get_msr),
929+
.set_msr = vt_op(set_msr),
930+
931+
.get_segment_base = vt_op(get_segment_base),
932+
.get_segment = vt_op(get_segment),
933+
.set_segment = vt_op(set_segment),
934+
.get_cpl = vt_op(get_cpl),
935+
.get_cpl_no_cache = vt_op(get_cpl_no_cache),
936+
.get_cs_db_l_bits = vt_op(get_cs_db_l_bits),
937+
.is_valid_cr0 = vt_op(is_valid_cr0),
938+
.set_cr0 = vt_op(set_cr0),
939+
.is_valid_cr4 = vt_op(is_valid_cr4),
940+
.set_cr4 = vt_op(set_cr4),
941+
.set_efer = vt_op(set_efer),
942+
.get_idt = vt_op(get_idt),
943+
.set_idt = vt_op(set_idt),
944+
.get_gdt = vt_op(get_gdt),
945+
.set_gdt = vt_op(set_gdt),
946+
.set_dr6 = vt_op(set_dr6),
947+
.set_dr7 = vt_op(set_dr7),
948+
.sync_dirty_debug_regs = vt_op(sync_dirty_debug_regs),
949+
.cache_reg = vt_op(cache_reg),
950+
.get_rflags = vt_op(get_rflags),
951+
.set_rflags = vt_op(set_rflags),
952+
.get_if_flag = vt_op(get_if_flag),
953+
954+
.flush_tlb_all = vt_op(flush_tlb_all),
955+
.flush_tlb_current = vt_op(flush_tlb_current),
956+
.flush_tlb_gva = vt_op(flush_tlb_gva),
957+
.flush_tlb_guest = vt_op(flush_tlb_guest),
958+
959+
.vcpu_pre_run = vt_op(vcpu_pre_run),
960+
.vcpu_run = vt_op(vcpu_run),
961+
.handle_exit = vt_op(handle_exit),
956962
.skip_emulated_instruction = vmx_skip_emulated_instruction,
957963
.update_emulated_instruction = vmx_update_emulated_instruction,
958-
.set_interrupt_shadow = vt_set_interrupt_shadow,
959-
.get_interrupt_shadow = vt_get_interrupt_shadow,
960-
.patch_hypercall = vt_patch_hypercall,
961-
.inject_irq = vt_inject_irq,
962-
.inject_nmi = vt_inject_nmi,
963-
.inject_exception = vt_inject_exception,
964-
.cancel_injection = vt_cancel_injection,
965-
.interrupt_allowed = vt_interrupt_allowed,
966-
.nmi_allowed = vt_nmi_allowed,
967-
.get_nmi_mask = vt_get_nmi_mask,
968-
.set_nmi_mask = vt_set_nmi_mask,
969-
.enable_nmi_window = vt_enable_nmi_window,
970-
.enable_irq_window = vt_enable_irq_window,
971-
.update_cr8_intercept = vt_update_cr8_intercept,
964+
.set_interrupt_shadow = vt_op(set_interrupt_shadow),
965+
.get_interrupt_shadow = vt_op(get_interrupt_shadow),
966+
.patch_hypercall = vt_op(patch_hypercall),
967+
.inject_irq = vt_op(inject_irq),
968+
.inject_nmi = vt_op(inject_nmi),
969+
.inject_exception = vt_op(inject_exception),
970+
.cancel_injection = vt_op(cancel_injection),
971+
.interrupt_allowed = vt_op(interrupt_allowed),
972+
.nmi_allowed = vt_op(nmi_allowed),
973+
.get_nmi_mask = vt_op(get_nmi_mask),
974+
.set_nmi_mask = vt_op(set_nmi_mask),
975+
.enable_nmi_window = vt_op(enable_nmi_window),
976+
.enable_irq_window = vt_op(enable_irq_window),
977+
.update_cr8_intercept = vt_op(update_cr8_intercept),
972978

973979
.x2apic_icr_is_split = false,
974-
.set_virtual_apic_mode = vt_set_virtual_apic_mode,
975-
.set_apic_access_page_addr = vt_set_apic_access_page_addr,
976-
.refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl,
977-
.load_eoi_exitmap = vt_load_eoi_exitmap,
980+
.set_virtual_apic_mode = vt_op(set_virtual_apic_mode),
981+
.set_apic_access_page_addr = vt_op(set_apic_access_page_addr),
982+
.refresh_apicv_exec_ctrl = vt_op(refresh_apicv_exec_ctrl),
983+
.load_eoi_exitmap = vt_op(load_eoi_exitmap),
978984
.apicv_pre_state_restore = pi_apicv_pre_state_restore,
979985
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
980-
.hwapic_isr_update = vt_hwapic_isr_update,
981-
.sync_pir_to_irr = vt_sync_pir_to_irr,
982-
.deliver_interrupt = vt_deliver_interrupt,
986+
.hwapic_isr_update = vt_op(hwapic_isr_update),
987+
.sync_pir_to_irr = vt_op(sync_pir_to_irr),
988+
.deliver_interrupt = vt_op(deliver_interrupt),
983989
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
984990

985-
.set_tss_addr = vt_set_tss_addr,
986-
.set_identity_map_addr = vt_set_identity_map_addr,
991+
.set_tss_addr = vt_op(set_tss_addr),
992+
.set_identity_map_addr = vt_op(set_identity_map_addr),
987993
.get_mt_mask = vmx_get_mt_mask,
988994

989-
.get_exit_info = vt_get_exit_info,
990-
.get_entry_info = vt_get_entry_info,
995+
.get_exit_info = vt_op(get_exit_info),
996+
.get_entry_info = vt_op(get_entry_info),
991997

992-
.vcpu_after_set_cpuid = vt_vcpu_after_set_cpuid,
998+
.vcpu_after_set_cpuid = vt_op(vcpu_after_set_cpuid),
993999

9941000
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
9951001

996-
.get_l2_tsc_offset = vt_get_l2_tsc_offset,
997-
.get_l2_tsc_multiplier = vt_get_l2_tsc_multiplier,
998-
.write_tsc_offset = vt_write_tsc_offset,
999-
.write_tsc_multiplier = vt_write_tsc_multiplier,
1002+
.get_l2_tsc_offset = vt_op(get_l2_tsc_offset),
1003+
.get_l2_tsc_multiplier = vt_op(get_l2_tsc_multiplier),
1004+
.write_tsc_offset = vt_op(write_tsc_offset),
1005+
.write_tsc_multiplier = vt_op(write_tsc_multiplier),
10001006

1001-
.load_mmu_pgd = vt_load_mmu_pgd,
1007+
.load_mmu_pgd = vt_op(load_mmu_pgd),
10021008

10031009
.check_intercept = vmx_check_intercept,
10041010
.handle_exit_irqoff = vmx_handle_exit_irqoff,
10051011

1006-
.update_cpu_dirty_logging = vt_update_cpu_dirty_logging,
1012+
.update_cpu_dirty_logging = vt_op(update_cpu_dirty_logging),
10071013

10081014
.nested_ops = &vmx_nested_ops,
10091015

10101016
.pi_update_irte = vmx_pi_update_irte,
10111017
.pi_start_assignment = vmx_pi_start_assignment,
10121018

10131019
#ifdef CONFIG_X86_64
1014-
.set_hv_timer = vt_set_hv_timer,
1015-
.cancel_hv_timer = vt_cancel_hv_timer,
1020+
.set_hv_timer = vt_op(set_hv_timer),
1021+
.cancel_hv_timer = vt_op(cancel_hv_timer),
10161022
#endif
10171023

1018-
.setup_mce = vt_setup_mce,
1024+
.setup_mce = vt_op(setup_mce),
10191025

10201026
#ifdef CONFIG_KVM_SMM
1021-
.smi_allowed = vt_smi_allowed,
1022-
.enter_smm = vt_enter_smm,
1023-
.leave_smm = vt_leave_smm,
1024-
.enable_smi_window = vt_enable_smi_window,
1027+
.smi_allowed = vt_op(smi_allowed),
1028+
.enter_smm = vt_op(enter_smm),
1029+
.leave_smm = vt_op(leave_smm),
1030+
.enable_smi_window = vt_op(enable_smi_window),
10251031
#endif
10261032

1027-
.check_emulate_instruction = vt_check_emulate_instruction,
1028-
.apic_init_signal_blocked = vt_apic_init_signal_blocked,
1033+
.check_emulate_instruction = vt_op(check_emulate_instruction),
1034+
.apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
10291035
.migrate_timers = vmx_migrate_timers,
10301036

1031-
.msr_filter_changed = vt_msr_filter_changed,
1032-
.complete_emulated_msr = vt_complete_emulated_msr,
1037+
.msr_filter_changed = vt_op(msr_filter_changed),
1038+
.complete_emulated_msr = vt_op(complete_emulated_msr),
10331039

10341040
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
10351041

10361042
.get_untagged_addr = vmx_get_untagged_addr,
10371043

1038-
.mem_enc_ioctl = vt_mem_enc_ioctl,
1039-
.vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl,
1044+
.mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl),
1045+
.vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl),
10401046

1041-
.private_max_mapping_level = vt_gmem_private_max_mapping_level
1047+
.private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level)
10421048
};
10431049

10441050
struct kvm_x86_init_ops vt_init_ops __initdata = {
1045-
.hardware_setup = vt_hardware_setup,
1051+
.hardware_setup = vt_op(hardware_setup),
10461052
.handle_intel_pt_intr = NULL,
10471053

10481054
.runtime_ops = &vt_x86_ops,

arch/x86/kvm/vmx/x86_ops.h

Lines changed: 0 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -164,71 +164,6 @@ void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
164164
void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
165165
void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
166166
int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
167-
#else
168-
static inline void tdx_disable_virtualization_cpu(void) {}
169-
static inline int tdx_vm_init(struct kvm *kvm) { return -EOPNOTSUPP; }
170-
static inline void tdx_mmu_release_hkid(struct kvm *kvm) {}
171-
static inline void tdx_vm_destroy(struct kvm *kvm) {}
172-
static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
173-
174-
static inline int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
175-
static inline void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {}
176-
static inline void tdx_vcpu_free(struct kvm_vcpu *vcpu) {}
177-
static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
178-
static inline int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; }
179-
static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
180-
{
181-
return EXIT_FASTPATH_NONE;
182-
}
183-
static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
184-
static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
185-
static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
186-
static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
187-
enum exit_fastpath_completion fastpath) { return 0; }
188-
189-
static inline void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
190-
int trig_mode, int vector) {}
191-
static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
192-
static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,
193-
u64 *info2, u32 *intr_info, u32 *error_code) {}
194-
static inline bool tdx_has_emulated_msr(u32 index) { return false; }
195-
static inline int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
196-
static inline int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; }
197-
198-
static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }
199-
200-
static inline int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
201-
enum pg_level level,
202-
void *private_spt)
203-
{
204-
return -EOPNOTSUPP;
205-
}
206-
207-
static inline int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
208-
enum pg_level level,
209-
void *private_spt)
210-
{
211-
return -EOPNOTSUPP;
212-
}
213-
214-
static inline int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
215-
enum pg_level level,
216-
kvm_pfn_t pfn)
217-
{
218-
return -EOPNOTSUPP;
219-
}
220-
221-
static inline int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
222-
enum pg_level level,
223-
kvm_pfn_t pfn)
224-
{
225-
return -EOPNOTSUPP;
226-
}
227-
228-
static inline void tdx_flush_tlb_current(struct kvm_vcpu *vcpu) {}
229-
static inline void tdx_flush_tlb_all(struct kvm_vcpu *vcpu) {}
230-
static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {}
231-
static inline int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) { return 0; }
232167
#endif
233168

234169
#endif /* __KVM_X86_VMX_X86_OPS_H */

0 commit comments

Comments
 (0)