@@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
51
51
struct arch_timer_context * timer ,
52
52
enum kvm_arch_timer_regs treg );
53
53
54
+ u32 timer_get_ctl (struct arch_timer_context * ctxt )
55
+ {
56
+ struct kvm_vcpu * vcpu = ctxt -> vcpu ;
57
+
58
+ switch (arch_timer_ctx_index (ctxt )) {
59
+ case TIMER_VTIMER :
60
+ return __vcpu_sys_reg (vcpu , CNTV_CTL_EL0 );
61
+ case TIMER_PTIMER :
62
+ return __vcpu_sys_reg (vcpu , CNTP_CTL_EL0 );
63
+ default :
64
+ WARN_ON (1 );
65
+ return 0 ;
66
+ }
67
+ }
68
+
69
+ u64 timer_get_cval (struct arch_timer_context * ctxt )
70
+ {
71
+ struct kvm_vcpu * vcpu = ctxt -> vcpu ;
72
+
73
+ switch (arch_timer_ctx_index (ctxt )) {
74
+ case TIMER_VTIMER :
75
+ return __vcpu_sys_reg (vcpu , CNTV_CVAL_EL0 );
76
+ case TIMER_PTIMER :
77
+ return __vcpu_sys_reg (vcpu , CNTP_CVAL_EL0 );
78
+ default :
79
+ WARN_ON (1 );
80
+ return 0 ;
81
+ }
82
+ }
83
+
84
+ static u64 timer_get_offset (struct arch_timer_context * ctxt )
85
+ {
86
+ struct kvm_vcpu * vcpu = ctxt -> vcpu ;
87
+
88
+ switch (arch_timer_ctx_index (ctxt )) {
89
+ case TIMER_VTIMER :
90
+ return __vcpu_sys_reg (vcpu , CNTVOFF_EL2 );
91
+ default :
92
+ return 0 ;
93
+ }
94
+ }
95
+
96
+ static void timer_set_ctl (struct arch_timer_context * ctxt , u32 ctl )
97
+ {
98
+ struct kvm_vcpu * vcpu = ctxt -> vcpu ;
99
+
100
+ switch (arch_timer_ctx_index (ctxt )) {
101
+ case TIMER_VTIMER :
102
+ __vcpu_sys_reg (vcpu , CNTV_CTL_EL0 ) = ctl ;
103
+ break ;
104
+ case TIMER_PTIMER :
105
+ __vcpu_sys_reg (vcpu , CNTP_CTL_EL0 ) = ctl ;
106
+ break ;
107
+ default :
108
+ WARN_ON (1 );
109
+ }
110
+ }
111
+
112
+ static void timer_set_cval (struct arch_timer_context * ctxt , u64 cval )
113
+ {
114
+ struct kvm_vcpu * vcpu = ctxt -> vcpu ;
115
+
116
+ switch (arch_timer_ctx_index (ctxt )) {
117
+ case TIMER_VTIMER :
118
+ __vcpu_sys_reg (vcpu , CNTV_CVAL_EL0 ) = cval ;
119
+ break ;
120
+ case TIMER_PTIMER :
121
+ __vcpu_sys_reg (vcpu , CNTP_CVAL_EL0 ) = cval ;
122
+ break ;
123
+ default :
124
+ WARN_ON (1 );
125
+ }
126
+ }
127
+
128
+ static void timer_set_offset (struct arch_timer_context * ctxt , u64 offset )
129
+ {
130
+ struct kvm_vcpu * vcpu = ctxt -> vcpu ;
131
+
132
+ switch (arch_timer_ctx_index (ctxt )) {
133
+ case TIMER_VTIMER :
134
+ __vcpu_sys_reg (vcpu , CNTVOFF_EL2 ) = offset ;
135
+ break ;
136
+ default :
137
+ WARN (offset , "timer %ld\n" , arch_timer_ctx_index (ctxt ));
138
+ }
139
+ }
140
+
54
141
u64 kvm_phys_timer_read (void )
55
142
{
56
143
return timecounter -> cc -> read (timecounter -> cc );
@@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
124
211
{
125
212
u64 cval , now ;
126
213
127
- cval = timer_ctx -> cnt_cval ;
128
- now = kvm_phys_timer_read () - timer_ctx -> cntvoff ;
214
+ cval = timer_get_cval ( timer_ctx ) ;
215
+ now = kvm_phys_timer_read () - timer_get_offset ( timer_ctx ) ;
129
216
130
217
if (now < cval ) {
131
218
u64 ns ;
@@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
144
231
{
145
232
WARN_ON (timer_ctx && timer_ctx -> loaded );
146
233
return timer_ctx &&
147
- !( timer_ctx -> cnt_ctl & ARCH_TIMER_CTRL_IT_MASK ) & &
148
- ( timer_ctx -> cnt_ctl & ARCH_TIMER_CTRL_ENABLE );
234
+ (( timer_get_ctl ( timer_ctx ) &
235
+ ( ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE )) == ARCH_TIMER_CTRL_ENABLE );
149
236
}
150
237
151
238
/*
@@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
256
343
if (!kvm_timer_irq_can_fire (timer_ctx ))
257
344
return false;
258
345
259
- cval = timer_ctx -> cnt_cval ;
260
- now = kvm_phys_timer_read () - timer_ctx -> cntvoff ;
346
+ cval = timer_get_cval ( timer_ctx ) ;
347
+ now = kvm_phys_timer_read () - timer_get_offset ( timer_ctx ) ;
261
348
262
349
return cval <= now ;
263
350
}
@@ -350,17 +437,17 @@ static void timer_save_state(struct arch_timer_context *ctx)
350
437
351
438
switch (index ) {
352
439
case TIMER_VTIMER :
353
- ctx -> cnt_ctl = read_sysreg_el0 (SYS_CNTV_CTL );
354
- ctx -> cnt_cval = read_sysreg_el0 (SYS_CNTV_CVAL );
440
+ timer_set_ctl ( ctx , read_sysreg_el0 (SYS_CNTV_CTL ) );
441
+ timer_set_cval ( ctx , read_sysreg_el0 (SYS_CNTV_CVAL ) );
355
442
356
443
/* Disable the timer */
357
444
write_sysreg_el0 (0 , SYS_CNTV_CTL );
358
445
isb ();
359
446
360
447
break ;
361
448
case TIMER_PTIMER :
362
- ctx -> cnt_ctl = read_sysreg_el0 (SYS_CNTP_CTL );
363
- ctx -> cnt_cval = read_sysreg_el0 (SYS_CNTP_CVAL );
449
+ timer_set_ctl ( ctx , read_sysreg_el0 (SYS_CNTP_CTL ) );
450
+ timer_set_cval ( ctx , read_sysreg_el0 (SYS_CNTP_CVAL ) );
364
451
365
452
/* Disable the timer */
366
453
write_sysreg_el0 (0 , SYS_CNTP_CTL );
@@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx)
429
516
430
517
switch (index ) {
431
518
case TIMER_VTIMER :
432
- write_sysreg_el0 (ctx -> cnt_cval , SYS_CNTV_CVAL );
519
+ write_sysreg_el0 (timer_get_cval ( ctx ) , SYS_CNTV_CVAL );
433
520
isb ();
434
- write_sysreg_el0 (ctx -> cnt_ctl , SYS_CNTV_CTL );
521
+ write_sysreg_el0 (timer_get_ctl ( ctx ) , SYS_CNTV_CTL );
435
522
break ;
436
523
case TIMER_PTIMER :
437
- write_sysreg_el0 (ctx -> cnt_cval , SYS_CNTP_CVAL );
524
+ write_sysreg_el0 (timer_get_cval ( ctx ) , SYS_CNTP_CVAL );
438
525
isb ();
439
- write_sysreg_el0 (ctx -> cnt_ctl , SYS_CNTP_CTL );
526
+ write_sysreg_el0 (timer_get_ctl ( ctx ) , SYS_CNTP_CTL );
440
527
break ;
441
528
case NR_KVM_TIMERS :
442
529
BUG ();
@@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
528
615
kvm_timer_vcpu_load_nogic (vcpu );
529
616
}
530
617
531
- set_cntvoff (map .direct_vtimer -> cntvoff );
618
+ set_cntvoff (timer_get_offset ( map .direct_vtimer ) );
532
619
533
620
kvm_timer_unblocking (vcpu );
534
621
@@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
639
726
* resets the timer to be disabled and unmasked and is compliant with
640
727
* the ARMv7 architecture.
641
728
*/
642
- vcpu_vtimer (vcpu )-> cnt_ctl = 0 ;
643
- vcpu_ptimer (vcpu )-> cnt_ctl = 0 ;
729
+ timer_set_ctl ( vcpu_vtimer (vcpu ), 0 ) ;
730
+ timer_set_ctl ( vcpu_ptimer (vcpu ), 0 ) ;
644
731
645
732
if (timer -> enabled ) {
646
733
kvm_timer_update_irq (vcpu , false, vcpu_vtimer (vcpu ));
@@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
668
755
669
756
mutex_lock (& kvm -> lock );
670
757
kvm_for_each_vcpu (i , tmp , kvm )
671
- vcpu_vtimer (tmp )-> cntvoff = cntvoff ;
758
+ timer_set_offset ( vcpu_vtimer (tmp ), cntvoff ) ;
672
759
673
760
/*
674
761
* When called from the vcpu create path, the CPU being created is not
675
762
* included in the loop above, so we just set it here as well.
676
763
*/
677
- vcpu_vtimer (vcpu )-> cntvoff = cntvoff ;
764
+ timer_set_offset ( vcpu_vtimer (vcpu ), cntvoff ) ;
678
765
mutex_unlock (& kvm -> lock );
679
766
}
680
767
@@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
684
771
struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
685
772
struct arch_timer_context * ptimer = vcpu_ptimer (vcpu );
686
773
774
+ vtimer -> vcpu = vcpu ;
775
+ ptimer -> vcpu = vcpu ;
776
+
687
777
/* Synchronize cntvoff across all vtimers of a VM. */
688
778
update_vtimer_cntvoff (vcpu , kvm_phys_timer_read ());
689
- ptimer -> cntvoff = 0 ;
779
+ timer_set_offset ( ptimer , 0 ) ;
690
780
691
781
hrtimer_init (& timer -> bg_timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS_HARD );
692
782
timer -> bg_timer .function = kvm_bg_timer_expire ;
@@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
704
794
705
795
vtimer -> host_timer_irq_flags = host_vtimer_irq_flags ;
706
796
ptimer -> host_timer_irq_flags = host_ptimer_irq_flags ;
707
-
708
- vtimer -> vcpu = vcpu ;
709
- ptimer -> vcpu = vcpu ;
710
797
}
711
798
712
799
static void kvm_timer_init_interrupt (void * info )
@@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
756
843
* UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
757
844
* regardless of ENABLE bit for our implementation convenience.
758
845
*/
846
+ u32 ctl = timer_get_ctl (timer );
847
+
759
848
if (!kvm_timer_compute_delta (timer ))
760
- return timer -> cnt_ctl | ARCH_TIMER_CTRL_IT_STAT ;
761
- else
762
- return timer -> cnt_ctl ;
849
+ ctl |= ARCH_TIMER_CTRL_IT_STAT ;
850
+
851
+ return ctl ;
763
852
}
764
853
765
854
u64 kvm_arm_timer_get_reg (struct kvm_vcpu * vcpu , u64 regid )
@@ -795,20 +884,20 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
795
884
796
885
switch (treg ) {
797
886
case TIMER_REG_TVAL :
798
- val = timer -> cnt_cval - kvm_phys_timer_read () + timer -> cntvoff ;
799
- val & = lower_32_bits (val );
887
+ val = timer_get_cval ( timer ) - kvm_phys_timer_read () + timer_get_offset ( timer ) ;
888
+ val = lower_32_bits (val );
800
889
break ;
801
890
802
891
case TIMER_REG_CTL :
803
892
val = read_timer_ctl (timer );
804
893
break ;
805
894
806
895
case TIMER_REG_CVAL :
807
- val = timer -> cnt_cval ;
896
+ val = timer_get_cval ( timer ) ;
808
897
break ;
809
898
810
899
case TIMER_REG_CNT :
811
- val = kvm_phys_timer_read () - timer -> cntvoff ;
900
+ val = kvm_phys_timer_read () - timer_get_offset ( timer ) ;
812
901
break ;
813
902
814
903
default :
@@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
842
931
{
843
932
switch (treg ) {
844
933
case TIMER_REG_TVAL :
845
- timer -> cnt_cval = kvm_phys_timer_read () - timer -> cntvoff + (s32 )val ;
934
+ timer_set_cval ( timer , kvm_phys_timer_read () - timer_get_offset ( timer ) + (s32 )val ) ;
846
935
break ;
847
936
848
937
case TIMER_REG_CTL :
849
- timer -> cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT ;
938
+ timer_set_ctl ( timer , val & ~ARCH_TIMER_CTRL_IT_STAT ) ;
850
939
break ;
851
940
852
941
case TIMER_REG_CVAL :
853
- timer -> cnt_cval = val ;
942
+ timer_set_cval ( timer , val ) ;
854
943
break ;
855
944
856
945
default :
0 commit comments