@@ -84,14 +84,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
84
84
85
85
static u64 timer_get_offset (struct arch_timer_context * ctxt )
86
86
{
87
- struct kvm_vcpu * vcpu = ctxt -> vcpu ;
87
+ if (ctxt -> offset .vm_offset )
88
+ return * ctxt -> offset .vm_offset ;
88
89
89
- switch (arch_timer_ctx_index (ctxt )) {
90
- case TIMER_VTIMER :
91
- return __vcpu_sys_reg (vcpu , CNTVOFF_EL2 );
92
- default :
93
- return 0 ;
94
- }
90
+ return 0 ;
95
91
}
96
92
97
93
static void timer_set_ctl (struct arch_timer_context * ctxt , u32 ctl )
@@ -128,15 +124,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
128
124
129
125
static void timer_set_offset (struct arch_timer_context * ctxt , u64 offset )
130
126
{
131
- struct kvm_vcpu * vcpu = ctxt -> vcpu ;
132
-
133
- switch (arch_timer_ctx_index (ctxt )) {
134
- case TIMER_VTIMER :
135
- __vcpu_sys_reg (vcpu , CNTVOFF_EL2 ) = offset ;
136
- break ;
137
- default :
127
+ if (!ctxt -> offset .vm_offset ) {
138
128
WARN (offset , "timer %ld\n" , arch_timer_ctx_index (ctxt ));
129
+ return ;
139
130
}
131
+
132
+ WRITE_ONCE (* ctxt -> offset .vm_offset , offset );
140
133
}
141
134
142
135
u64 kvm_phys_timer_read (void )
@@ -765,36 +758,18 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
765
758
return 0 ;
766
759
}
767
760
768
- /* Make the updates of cntvoff for all vtimer contexts atomic */
769
- static void update_vtimer_cntvoff (struct kvm_vcpu * vcpu , u64 cntvoff )
770
- {
771
- unsigned long i ;
772
- struct kvm * kvm = vcpu -> kvm ;
773
- struct kvm_vcpu * tmp ;
774
-
775
- mutex_lock (& kvm -> lock );
776
- kvm_for_each_vcpu (i , tmp , kvm )
777
- timer_set_offset (vcpu_vtimer (tmp ), cntvoff );
778
-
779
- /*
780
- * When called from the vcpu create path, the CPU being created is not
781
- * included in the loop above, so we just set it here as well.
782
- */
783
- timer_set_offset (vcpu_vtimer (vcpu ), cntvoff );
784
- mutex_unlock (& kvm -> lock );
785
- }
786
-
787
761
void kvm_timer_vcpu_init (struct kvm_vcpu * vcpu )
788
762
{
789
763
struct arch_timer_cpu * timer = vcpu_timer (vcpu );
790
764
struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
791
765
struct arch_timer_context * ptimer = vcpu_ptimer (vcpu );
792
766
793
767
vtimer -> vcpu = vcpu ;
768
+ vtimer -> offset .vm_offset = & vcpu -> kvm -> arch .timer_data .voffset ;
794
769
ptimer -> vcpu = vcpu ;
795
770
796
771
/* Synchronize cntvoff across all vtimers of a VM. */
797
- update_vtimer_cntvoff ( vcpu , kvm_phys_timer_read ());
772
+ timer_set_offset ( vtimer , kvm_phys_timer_read ());
798
773
timer_set_offset (ptimer , 0 );
799
774
800
775
hrtimer_init (& timer -> bg_timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS_HARD );
@@ -840,7 +815,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
840
815
break ;
841
816
case KVM_REG_ARM_TIMER_CNT :
842
817
timer = vcpu_vtimer (vcpu );
843
- update_vtimer_cntvoff ( vcpu , kvm_phys_timer_read () - value );
818
+ timer_set_offset ( timer , kvm_phys_timer_read () - value );
844
819
break ;
845
820
case KVM_REG_ARM_TIMER_CVAL :
846
821
timer = vcpu_vtimer (vcpu );
0 commit comments