@@ -869,6 +869,12 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
869
869
870
870
eb = (1u << PF_VECTOR ) | (1u << UD_VECTOR ) | (1u << MC_VECTOR ) |
871
871
(1u << DB_VECTOR ) | (1u << AC_VECTOR );
872
+ /*
873
+ * #VE isn't used for VMX. To test against unexpected changes
874
+ * related to #VE for VMX, intercept unexpected #VE and warn on it.
875
+ */
876
+ if (IS_ENABLED (CONFIG_KVM_INTEL_PROVE_VE ))
877
+ eb |= 1u << VE_VECTOR ;
872
878
/*
873
879
* Guest access to VMware backdoor ports could legitimately
874
880
* trigger #GP because of TSS I/O permission bitmap.
@@ -2602,6 +2608,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2602
2608
& _cpu_based_2nd_exec_control ))
2603
2609
return - EIO ;
2604
2610
}
2611
+ if (!IS_ENABLED (CONFIG_KVM_INTEL_PROVE_VE ))
2612
+ _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE ;
2613
+
2605
2614
#ifndef CONFIG_X86_64
2606
2615
if (!(_cpu_based_2nd_exec_control &
2607
2616
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ))
@@ -2626,6 +2635,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2626
2635
return - EIO ;
2627
2636
2628
2637
vmx_cap -> ept = 0 ;
2638
+ _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE ;
2629
2639
}
2630
2640
if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID ) &&
2631
2641
vmx_cap -> vpid ) {
@@ -4588,6 +4598,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4588
4598
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID ;
4589
4599
if (!enable_ept ) {
4590
4600
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT ;
4601
+ exec_control &= ~SECONDARY_EXEC_EPT_VIOLATION_VE ;
4591
4602
enable_unrestricted_guest = 0 ;
4592
4603
}
4593
4604
if (!enable_unrestricted_guest )
@@ -4711,8 +4722,12 @@ static void init_vmcs(struct vcpu_vmx *vmx)
4711
4722
4712
4723
exec_controls_set (vmx , vmx_exec_control (vmx ));
4713
4724
4714
- if (cpu_has_secondary_exec_ctrls ())
4725
+ if (cpu_has_secondary_exec_ctrls ()) {
4715
4726
secondary_exec_controls_set (vmx , vmx_secondary_exec_control (vmx ));
4727
+ if (vmx -> ve_info )
4728
+ vmcs_write64 (VE_INFORMATION_ADDRESS ,
4729
+ __pa (vmx -> ve_info ));
4730
+ }
4716
4731
4717
4732
if (cpu_has_tertiary_exec_ctrls ())
4718
4733
tertiary_exec_controls_set (vmx , vmx_tertiary_exec_control (vmx ));
@@ -5200,6 +5215,9 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5200
5215
if (is_invalid_opcode (intr_info ))
5201
5216
return handle_ud (vcpu );
5202
5217
5218
+ if (KVM_BUG_ON (is_ve_fault (intr_info ), vcpu -> kvm ))
5219
+ return - EIO ;
5220
+
5203
5221
error_code = 0 ;
5204
5222
if (intr_info & INTR_INFO_DELIVER_CODE_MASK )
5205
5223
error_code = vmcs_read32 (VM_EXIT_INTR_ERROR_CODE );
@@ -6409,8 +6427,22 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
6409
6427
pr_err ("Virtual processor ID = 0x%04x\n" ,
6410
6428
vmcs_read16 (VIRTUAL_PROCESSOR_ID ));
6411
6429
if (secondary_exec_control & SECONDARY_EXEC_EPT_VIOLATION_VE ) {
6412
- pr_err ("VE info address = 0x%016llx\n" ,
6413
- vmcs_read64 (VE_INFORMATION_ADDRESS ));
6430
+ struct vmx_ve_information * ve_info = vmx -> ve_info ;
6431
+ u64 ve_info_pa = vmcs_read64 (VE_INFORMATION_ADDRESS );
6432
+
6433
+ /*
6434
+ * If KVM is dumping the VMCS, then something has gone wrong
6435
+ * already. Derefencing an address from the VMCS, which could
6436
+ * very well be corrupted, is a terrible idea. The virtual
6437
+ * address is known so use it.
6438
+ */
6439
+ pr_err ("VE info address = 0x%016llx%s\n" , ve_info_pa ,
6440
+ ve_info_pa == __pa (ve_info ) ? "" : "(corrupted!)" );
6441
+ pr_err ("ve_info: 0x%08x 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%04x\n" ,
6442
+ ve_info -> exit_reason , ve_info -> delivery ,
6443
+ ve_info -> exit_qualification ,
6444
+ ve_info -> guest_linear_address ,
6445
+ ve_info -> guest_physical_address , ve_info -> eptp_index );
6414
6446
}
6415
6447
}
6416
6448
@@ -7466,6 +7498,7 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7466
7498
free_vpid (vmx -> vpid );
7467
7499
nested_vmx_free_vcpu (vcpu );
7468
7500
free_loaded_vmcs (vmx -> loaded_vmcs );
7501
+ free_page ((unsigned long )vmx -> ve_info );
7469
7502
}
7470
7503
7471
7504
int vmx_vcpu_create (struct kvm_vcpu * vcpu )
@@ -7559,6 +7592,20 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7559
7592
goto free_vmcs ;
7560
7593
}
7561
7594
7595
+ err = - ENOMEM ;
7596
+ if (vmcs_config .cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_EPT_VIOLATION_VE ) {
7597
+ struct page * page ;
7598
+
7599
+ BUILD_BUG_ON (sizeof (* vmx -> ve_info ) > PAGE_SIZE );
7600
+
7601
+ /* ve_info must be page aligned. */
7602
+ page = alloc_page (GFP_KERNEL_ACCOUNT | __GFP_ZERO );
7603
+ if (!page )
7604
+ goto free_vmcs ;
7605
+
7606
+ vmx -> ve_info = page_to_virt (page );
7607
+ }
7608
+
7562
7609
if (vmx_can_use_ipiv (vcpu ))
7563
7610
WRITE_ONCE (to_kvm_vmx (vcpu -> kvm )-> pid_table [vcpu -> vcpu_id ],
7564
7611
__pa (& vmx -> pi_desc ) | PID_TABLE_ENTRY_VALID );
0 commit comments