@@ -2542,15 +2542,15 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2542
2542
kvm_vcpu_write_tsc_offset (vcpu , offset );
2543
2543
raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
2544
2544
2545
- spin_lock_irqsave (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2545
+ raw_spin_lock_irqsave (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2546
2546
if (!matched ) {
2547
2547
kvm -> arch .nr_vcpus_matched_tsc = 0 ;
2548
2548
} else if (!already_matched ) {
2549
2549
kvm -> arch .nr_vcpus_matched_tsc ++ ;
2550
2550
}
2551
2551
2552
2552
kvm_track_tsc_matching (vcpu );
2553
- spin_unlock_irqrestore (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2553
+ raw_spin_unlock_irqrestore (& kvm -> arch .pvclock_gtod_sync_lock , flags );
2554
2554
}
2555
2555
2556
2556
static inline void adjust_tsc_offset_guest (struct kvm_vcpu * vcpu ,
@@ -2780,9 +2780,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
2780
2780
kvm_make_mclock_inprogress_request (kvm );
2781
2781
2782
2782
/* no guest entries from this point */
2783
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2783
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2784
2784
pvclock_update_vm_gtod_copy (kvm );
2785
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2785
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2786
2786
2787
2787
kvm_for_each_vcpu (i , vcpu , kvm )
2788
2788
kvm_make_request (KVM_REQ_CLOCK_UPDATE , vcpu );
@@ -2800,15 +2800,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
2800
2800
unsigned long flags ;
2801
2801
u64 ret ;
2802
2802
2803
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2803
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2804
2804
if (!ka -> use_master_clock ) {
2805
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2805
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2806
2806
return get_kvmclock_base_ns () + ka -> kvmclock_offset ;
2807
2807
}
2808
2808
2809
2809
hv_clock .tsc_timestamp = ka -> master_cycle_now ;
2810
2810
hv_clock .system_time = ka -> master_kernel_ns + ka -> kvmclock_offset ;
2811
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2811
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2812
2812
2813
2813
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
2814
2814
get_cpu ();
@@ -2902,13 +2902,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
2902
2902
* If the host uses TSC clock, then passthrough TSC as stable
2903
2903
* to the guest.
2904
2904
*/
2905
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2905
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
2906
2906
use_master_clock = ka -> use_master_clock ;
2907
2907
if (use_master_clock ) {
2908
2908
host_tsc = ka -> master_cycle_now ;
2909
2909
kernel_ns = ka -> master_kernel_ns ;
2910
2910
}
2911
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2911
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
2912
2912
2913
2913
/* Keep irq disabled to prevent changes to the clock */
2914
2914
local_irq_save (flags );
@@ -6100,13 +6100,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
6100
6100
* is slightly ahead) here we risk going negative on unsigned
6101
6101
* 'system_time' when 'user_ns.clock' is very small.
6102
6102
*/
6103
- spin_lock_irq (& ka -> pvclock_gtod_sync_lock );
6103
+ raw_spin_lock_irq (& ka -> pvclock_gtod_sync_lock );
6104
6104
if (kvm -> arch .use_master_clock )
6105
6105
now_ns = ka -> master_kernel_ns ;
6106
6106
else
6107
6107
now_ns = get_kvmclock_base_ns ();
6108
6108
ka -> kvmclock_offset = user_ns .clock - now_ns ;
6109
- spin_unlock_irq (& ka -> pvclock_gtod_sync_lock );
6109
+ raw_spin_unlock_irq (& ka -> pvclock_gtod_sync_lock );
6110
6110
6111
6111
kvm_make_all_cpus_request (kvm , KVM_REQ_CLOCK_UPDATE );
6112
6112
break ;
@@ -8156,9 +8156,9 @@ static void kvm_hyperv_tsc_notifier(void)
8156
8156
list_for_each_entry (kvm , & vm_list , vm_list ) {
8157
8157
struct kvm_arch * ka = & kvm -> arch ;
8158
8158
8159
- spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
8159
+ raw_spin_lock_irqsave (& ka -> pvclock_gtod_sync_lock , flags );
8160
8160
pvclock_update_vm_gtod_copy (kvm );
8161
- spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
8161
+ raw_spin_unlock_irqrestore (& ka -> pvclock_gtod_sync_lock , flags );
8162
8162
8163
8163
kvm_for_each_vcpu (cpu , vcpu , kvm )
8164
8164
kvm_make_request (KVM_REQ_CLOCK_UPDATE , vcpu );
@@ -8800,9 +8800,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
8800
8800
8801
8801
kvm_run -> cr8 = kvm_get_cr8 (vcpu );
8802
8802
kvm_run -> apic_base = kvm_get_apic_base (vcpu );
8803
+
8804
+ /*
8805
+ * The call to kvm_ready_for_interrupt_injection() may end up in
8806
+ * kvm_xen_has_interrupt() which may require the srcu lock to be
8807
+ * held, to protect against changes in the vcpu_info address.
8808
+ */
8809
+ vcpu -> srcu_idx = srcu_read_lock (& vcpu -> kvm -> srcu );
8803
8810
kvm_run -> ready_for_interrupt_injection =
8804
8811
pic_in_kernel (vcpu -> kvm ) ||
8805
8812
kvm_vcpu_ready_for_interrupt_injection (vcpu );
8813
+ srcu_read_unlock (& vcpu -> kvm -> srcu , vcpu -> srcu_idx );
8806
8814
8807
8815
if (is_smm (vcpu ))
8808
8816
kvm_run -> flags |= KVM_RUN_X86_SMM ;
@@ -11199,7 +11207,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
11199
11207
11200
11208
raw_spin_lock_init (& kvm -> arch .tsc_write_lock );
11201
11209
mutex_init (& kvm -> arch .apic_map_lock );
11202
- spin_lock_init (& kvm -> arch .pvclock_gtod_sync_lock );
11210
+ raw_spin_lock_init (& kvm -> arch .pvclock_gtod_sync_lock );
11203
11211
11204
11212
kvm -> arch .kvmclock_offset = - get_kvmclock_base_ns ();
11205
11213
pvclock_update_vm_gtod_copy (kvm );
0 commit comments