@@ -2087,9 +2087,29 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2087
2087
return HRTIMER_NORESTART ;
2088
2088
}
2089
2089
2090
- static void vmx_start_preemption_timer (struct kvm_vcpu * vcpu )
2090
+ static u64 vmx_calc_preemption_timer_value (struct kvm_vcpu * vcpu )
2091
+ {
2092
+ struct vcpu_vmx * vmx = to_vmx (vcpu );
2093
+ struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
2094
+ u64 timer_value = 0 ;
2095
+
2096
+ u64 l1_scaled_tsc = kvm_read_l1_tsc (vcpu , rdtsc ()) >>
2097
+ VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE ;
2098
+
2099
+ if (!vmx -> nested .has_preemption_timer_deadline ) {
2100
+ timer_value = vmcs12 -> vmx_preemption_timer_value ;
2101
+ vmx -> nested .preemption_timer_deadline = timer_value +
2102
+ l1_scaled_tsc ;
2103
+ vmx -> nested .has_preemption_timer_deadline = true;
2104
+ } else if (l1_scaled_tsc < vmx -> nested .preemption_timer_deadline )
2105
+ timer_value = vmx -> nested .preemption_timer_deadline -
2106
+ l1_scaled_tsc ;
2107
+ return timer_value ;
2108
+ }
2109
+
2110
+ static void vmx_start_preemption_timer (struct kvm_vcpu * vcpu ,
2111
+ u64 preemption_timeout )
2091
2112
{
2092
- u64 preemption_timeout = get_vmcs12 (vcpu )-> vmx_preemption_timer_value ;
2093
2113
struct vcpu_vmx * vmx = to_vmx (vcpu );
2094
2114
2095
2115
/*
@@ -3348,8 +3368,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3348
3368
* the timer.
3349
3369
*/
3350
3370
vmx -> nested .preemption_timer_expired = false;
3351
- if (nested_cpu_has_preemption_timer (vmcs12 ))
3352
- vmx_start_preemption_timer (vcpu );
3371
+ if (nested_cpu_has_preemption_timer (vmcs12 )) {
3372
+ u64 timer_value = vmx_calc_preemption_timer_value (vcpu );
3373
+ vmx_start_preemption_timer (vcpu , timer_value );
3374
+ }
3353
3375
3354
3376
/*
3355
3377
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -3457,6 +3479,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3457
3479
* the nested entry.
3458
3480
*/
3459
3481
vmx -> nested .nested_run_pending = 1 ;
3482
+ vmx -> nested .has_preemption_timer_deadline = false;
3460
3483
status = nested_vmx_enter_non_root_mode (vcpu , true);
3461
3484
if (unlikely (status != NVMX_VMENTRY_SUCCESS ))
3462
3485
goto vmentry_failed ;
@@ -3957,9 +3980,10 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3957
3980
vmcs12 -> guest_activity_state = GUEST_ACTIVITY_ACTIVE ;
3958
3981
3959
3982
if (nested_cpu_has_preemption_timer (vmcs12 ) &&
3960
- vmcs12 -> vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER )
3961
- vmcs12 -> vmx_preemption_timer_value =
3962
- vmx_get_preemption_timer_value (vcpu );
3983
+ vmcs12 -> vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
3984
+ !vmx -> nested .nested_run_pending )
3985
+ vmcs12 -> vmx_preemption_timer_value =
3986
+ vmx_get_preemption_timer_value (vcpu );
3963
3987
3964
3988
/*
3965
3989
* In some cases (usually, nested EPT), L2 is allowed to change its
@@ -5891,8 +5915,10 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5891
5915
.flags = 0 ,
5892
5916
.format = KVM_STATE_NESTED_FORMAT_VMX ,
5893
5917
.size = sizeof (kvm_state ),
5918
+ .hdr .vmx .flags = 0 ,
5894
5919
.hdr .vmx .vmxon_pa = -1ull ,
5895
5920
.hdr .vmx .vmcs12_pa = -1ull ,
5921
+ .hdr .vmx .preemption_timer_deadline = 0 ,
5896
5922
};
5897
5923
struct kvm_vmx_nested_state_data __user * user_vmx_nested_state =
5898
5924
& user_kvm_nested_state -> data .vmx [0 ];
@@ -5934,6 +5960,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5934
5960
5935
5961
if (vmx -> nested .mtf_pending )
5936
5962
kvm_state .flags |= KVM_STATE_NESTED_MTF_PENDING ;
5963
+
5964
+ if (nested_cpu_has_preemption_timer (vmcs12 ) &&
5965
+ vmx -> nested .has_preemption_timer_deadline ) {
5966
+ kvm_state .hdr .vmx .flags |=
5967
+ KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE ;
5968
+ kvm_state .hdr .vmx .preemption_timer_deadline =
5969
+ vmx -> nested .preemption_timer_deadline ;
5970
+ }
5937
5971
}
5938
5972
}
5939
5973
@@ -5979,7 +6013,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5979
6013
get_shadow_vmcs12 (vcpu ), VMCS12_SIZE ))
5980
6014
return - EFAULT ;
5981
6015
}
5982
-
5983
6016
out :
5984
6017
return kvm_state .size ;
5985
6018
}
@@ -6141,6 +6174,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6141
6174
goto error_guest_mode ;
6142
6175
}
6143
6176
6177
+ if (kvm_state -> hdr .vmx .flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE ) {
6178
+ vmx -> nested .has_preemption_timer_deadline = true;
6179
+ vmx -> nested .preemption_timer_deadline =
6180
+ kvm_state -> hdr .vmx .preemption_timer_deadline ;
6181
+ }
6182
+
6144
6183
if (nested_vmx_check_controls (vcpu , vmcs12 ) ||
6145
6184
nested_vmx_check_host_state (vcpu , vmcs12 ) ||
6146
6185
nested_vmx_check_guest_state (vcpu , vmcs12 , & ignored ))
0 commit comments