@@ -161,7 +161,7 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
161161
162162/*
163163 * List of MSRs that can be directly passed to the guest.
164- * In addition to these x2apic and PT MSRs are handled specially.
164+ * In addition to these x2apic, PT and LBR MSRs are handled specially.
165165 */
166166static u32 vmx_possible_passthrough_msrs [MAX_POSSIBLE_PASSTHROUGH_MSRS ] = {
167167 MSR_IA32_SPEC_CTRL ,
@@ -669,25 +669,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
669669 return flexpriority_enabled && lapic_in_kernel (vcpu );
670670}
671671
672- static int possible_passthrough_msr_slot (u32 msr )
672+ static int vmx_get_passthrough_msr_slot (u32 msr )
673673{
674- u32 i ;
675-
676- for (i = 0 ; i < ARRAY_SIZE (vmx_possible_passthrough_msrs ); i ++ )
677- if (vmx_possible_passthrough_msrs [i ] == msr )
678- return i ;
679-
680- return - ENOENT ;
681- }
682-
683- static bool is_valid_passthrough_msr (u32 msr )
684- {
685- bool r ;
674+ int i ;
686675
687676 switch (msr ) {
688677 case 0x800 ... 0x8ff :
689678 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
690- return true ;
679+ return - ENOENT ;
691680 case MSR_IA32_RTIT_STATUS :
692681 case MSR_IA32_RTIT_OUTPUT_BASE :
693682 case MSR_IA32_RTIT_OUTPUT_MASK :
@@ -702,14 +691,16 @@ static bool is_valid_passthrough_msr(u32 msr)
702691 case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8 :
703692 case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8 :
704693 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
705- return true ;
694+ return - ENOENT ;
706695 }
707696
708- r = possible_passthrough_msr_slot (msr ) != - ENOENT ;
709-
710- WARN (!r , "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]" , msr );
697+ for (i = 0 ; i < ARRAY_SIZE (vmx_possible_passthrough_msrs ); i ++ ) {
698+ if (vmx_possible_passthrough_msrs [i ] == msr )
699+ return i ;
700+ }
711701
712- return r ;
702+ WARN (1 , "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]" , msr );
703+ return - ENOENT ;
713704}
714705
715706struct vmx_uret_msr * vmx_find_uret_msr (struct vcpu_vmx * vmx , u32 msr )
@@ -3963,6 +3954,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
39633954{
39643955 struct vcpu_vmx * vmx = to_vmx (vcpu );
39653956 unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
3957+ int idx ;
39663958
39673959 if (!cpu_has_vmx_msr_bitmap ())
39683960 return ;
@@ -3972,16 +3964,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
39723964 /*
39733965 * Mark the desired intercept state in shadow bitmap, this is needed
39743966 * for resync when the MSR filters change.
3975- */
3976- if (is_valid_passthrough_msr (msr )) {
3977- int idx = possible_passthrough_msr_slot (msr );
3978-
3979- if (idx != - ENOENT ) {
3980- if (type & MSR_TYPE_R )
3981- clear_bit (idx , vmx -> shadow_msr_intercept .read );
3982- if (type & MSR_TYPE_W )
3983- clear_bit (idx , vmx -> shadow_msr_intercept .write );
3984- }
3967+ */
3968+ idx = vmx_get_passthrough_msr_slot (msr );
3969+ if (idx >= 0 ) {
3970+ if (type & MSR_TYPE_R )
3971+ clear_bit (idx , vmx -> shadow_msr_intercept .read );
3972+ if (type & MSR_TYPE_W )
3973+ clear_bit (idx , vmx -> shadow_msr_intercept .write );
39853974 }
39863975
39873976 if ((type & MSR_TYPE_R ) &&
@@ -4007,6 +3996,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
40073996{
40083997 struct vcpu_vmx * vmx = to_vmx (vcpu );
40093998 unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
3999+ int idx ;
40104000
40114001 if (!cpu_has_vmx_msr_bitmap ())
40124002 return ;
@@ -4016,16 +4006,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
40164006 /*
40174007 * Mark the desired intercept state in shadow bitmap, this is needed
40184008 * for resync when the MSR filter changes.
4019- */
4020- if (is_valid_passthrough_msr (msr )) {
4021- int idx = possible_passthrough_msr_slot (msr );
4022-
4023- if (idx != - ENOENT ) {
4024- if (type & MSR_TYPE_R )
4025- set_bit (idx , vmx -> shadow_msr_intercept .read );
4026- if (type & MSR_TYPE_W )
4027- set_bit (idx , vmx -> shadow_msr_intercept .write );
4028- }
4009+ */
4010+ idx = vmx_get_passthrough_msr_slot (msr );
4011+ if (idx >= 0 ) {
4012+ if (type & MSR_TYPE_R )
4013+ set_bit (idx , vmx -> shadow_msr_intercept .read );
4014+ if (type & MSR_TYPE_W )
4015+ set_bit (idx , vmx -> shadow_msr_intercept .write );
40294016 }
40304017
40314018 if (type & MSR_TYPE_R )
@@ -4136,6 +4123,9 @@ static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
41364123 struct vcpu_vmx * vmx = to_vmx (vcpu );
41374124 u32 i ;
41384125
4126+ if (!cpu_has_vmx_msr_bitmap ())
4127+ return ;
4128+
41394129 /*
41404130 * Redo intercept permissions for MSRs that KVM is passing through to
41414131 * the guest. Disabling interception will check the new MSR filter and
@@ -6539,7 +6529,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
65396529 vcpu -> run -> internal .suberror = KVM_INTERNAL_ERROR_DELIVERY_EV ;
65406530 vcpu -> run -> internal .data [0 ] = vectoring_info ;
65416531 vcpu -> run -> internal .data [1 ] = exit_reason .full ;
6542- vcpu -> run -> internal .data [2 ] = vcpu -> arch . exit_qualification ;
6532+ vcpu -> run -> internal .data [2 ] = vmx_get_exit_qual ( vcpu ) ;
65436533 if (exit_reason .basic == EXIT_REASON_EPT_MISCONFIG ) {
65446534 vcpu -> run -> internal .data [ndata ++ ] =
65456535 vmcs_read64 (GUEST_PHYSICAL_ADDRESS );
0 commit comments