@@ -152,6 +152,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
152
152
return val ;
153
153
}
154
154
155
+ static u64 arch_counter_get_cntpct_stable (void )
156
+ {
157
+ return __arch_counter_get_cntpct_stable ();
158
+ }
159
+
160
+ static u64 arch_counter_get_cntpct (void )
161
+ {
162
+ return __arch_counter_get_cntpct ();
163
+ }
164
+
165
+ static u64 arch_counter_get_cntvct_stable (void )
166
+ {
167
+ return __arch_counter_get_cntvct_stable ();
168
+ }
169
+
170
+ static u64 arch_counter_get_cntvct (void )
171
+ {
172
+ return __arch_counter_get_cntvct ();
173
+ }
174
+
155
175
/*
156
176
* Default to cp15 based access because arm64 uses this function for
157
177
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -365,6 +385,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
365
385
DEFINE_PER_CPU (const struct arch_timer_erratum_workaround * , timer_unstable_counter_workaround );
366
386
EXPORT_SYMBOL_GPL (timer_unstable_counter_workaround );
367
387
388
+ static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT (0 );
368
389
369
390
static void erratum_set_next_event_tval_generic (const int access , unsigned long evt ,
370
391
struct clock_event_device * clk )
@@ -535,6 +556,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
535
556
per_cpu (timer_unstable_counter_workaround , i ) = wa ;
536
557
}
537
558
559
+ if (wa -> read_cntvct_el0 || wa -> read_cntpct_el0 )
560
+ atomic_set (& timer_unstable_counter_workaround_in_use , 1 );
561
+
538
562
/*
539
563
* Don't use the vdso fastpath if errata require using the
540
564
* out-of-line counter accessor. We may change our mind pretty
@@ -591,9 +615,15 @@ static bool arch_timer_this_cpu_has_cntvct_wa(void)
591
615
{
592
616
return has_erratum_handler (read_cntvct_el0 );
593
617
}
618
+
619
+ static bool arch_timer_counter_has_wa (void )
620
+ {
621
+ return atomic_read (& timer_unstable_counter_workaround_in_use );
622
+ }
594
623
#else
595
624
#define arch_timer_check_ool_workaround (t ,a ) do { } while(0)
596
625
#define arch_timer_this_cpu_has_cntvct_wa () ({false;})
626
+ #define arch_timer_counter_has_wa () ({false;})
597
627
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
598
628
599
629
static __always_inline irqreturn_t timer_handler (const int access ,
@@ -942,12 +972,22 @@ static void __init arch_counter_register(unsigned type)
942
972
943
973
/* Register the CP15 based counter if we have one */
944
974
if (type & ARCH_TIMER_TYPE_CP15 ) {
975
+ u64 (* rd )(void );
976
+
945
977
if ((IS_ENABLED (CONFIG_ARM64 ) && !is_hyp_mode_available ()) ||
946
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI )
947
- arch_timer_read_counter = arch_counter_get_cntvct ;
948
- else
949
- arch_timer_read_counter = arch_counter_get_cntpct ;
978
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI ) {
979
+ if (arch_timer_counter_has_wa ())
980
+ rd = arch_counter_get_cntvct_stable ;
981
+ else
982
+ rd = arch_counter_get_cntvct ;
983
+ } else {
984
+ if (arch_timer_counter_has_wa ())
985
+ rd = arch_counter_get_cntpct_stable ;
986
+ else
987
+ rd = arch_counter_get_cntpct ;
988
+ }
950
989
990
+ arch_timer_read_counter = rd ;
951
991
clocksource_counter .archdata .vdso_direct = vdso_default ;
952
992
} else {
953
993
arch_timer_read_counter = arch_counter_get_cntvct_mem ;
0 commit comments