Skip to content

Commit 0ea4153

Browse files
Marc Zyngierwildea01
authored andcommitted
clocksource/arm_arch_timer: Use arch_timer_read_counter to access stable counters
Instead of always going via arch_counter_get_cntvct_stable to access the counter workaround, let's have arch_timer_read_counter point to the right method. For that, we need to track whether any CPU in the system has a workaround for the counter. This is done by having an atomic variable tracking this. Acked-by: Mark Rutland <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent a862fc2 commit 0ea4153

File tree

3 files changed

+70
-8
lines changed

3 files changed

+70
-8
lines changed

arch/arm/include/asm/arch_timer.h

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void)
8383
return val;
8484
}
8585

86-
static inline u64 arch_counter_get_cntpct(void)
86+
static inline u64 __arch_counter_get_cntpct(void)
8787
{
8888
u64 cval;
8989

@@ -92,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void)
9292
return cval;
9393
}
9494

95-
static inline u64 arch_counter_get_cntvct(void)
95+
static inline u64 __arch_counter_get_cntpct_stable(void)
96+
{
97+
return __arch_counter_get_cntpct();
98+
}
99+
100+
static inline u64 __arch_counter_get_cntvct(void)
96101
{
97102
u64 cval;
98103

@@ -101,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void)
101106
return cval;
102107
}
103108

109+
static inline u64 __arch_counter_get_cntvct_stable(void)
110+
{
111+
return __arch_counter_get_cntvct();
112+
}
113+
104114
static inline u32 arch_timer_get_cntkctl(void)
105115
{
106116
u32 cntkctl;

arch/arm64/include/asm/arch_timer.h

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,18 +174,30 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
174174
isb();
175175
}
176176

177-
static inline u64 arch_counter_get_cntpct(void)
177+
static inline u64 __arch_counter_get_cntpct_stable(void)
178178
{
179179
isb();
180180
return arch_timer_reg_read_stable(cntpct_el0);
181181
}
182182

183-
static inline u64 arch_counter_get_cntvct(void)
183+
static inline u64 __arch_counter_get_cntpct(void)
184+
{
185+
isb();
186+
return read_sysreg(cntpct_el0);
187+
}
188+
189+
static inline u64 __arch_counter_get_cntvct_stable(void)
184190
{
185191
isb();
186192
return arch_timer_reg_read_stable(cntvct_el0);
187193
}
188194

195+
static inline u64 __arch_counter_get_cntvct(void)
196+
{
197+
isb();
198+
return read_sysreg(cntvct_el0);
199+
}
200+
189201
static inline int arch_timer_arch_init(void)
190202
{
191203
return 0;

drivers/clocksource/arm_arch_timer.c

Lines changed: 44 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
152152
return val;
153153
}
154154

155+
static u64 arch_counter_get_cntpct_stable(void)
156+
{
157+
return __arch_counter_get_cntpct_stable();
158+
}
159+
160+
static u64 arch_counter_get_cntpct(void)
161+
{
162+
return __arch_counter_get_cntpct();
163+
}
164+
165+
static u64 arch_counter_get_cntvct_stable(void)
166+
{
167+
return __arch_counter_get_cntvct_stable();
168+
}
169+
170+
static u64 arch_counter_get_cntvct(void)
171+
{
172+
return __arch_counter_get_cntvct();
173+
}
174+
155175
/*
156176
* Default to cp15 based access because arm64 uses this function for
157177
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -365,6 +385,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
365385
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
366386
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
367387

388+
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
368389

369390
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
370391
struct clock_event_device *clk)
@@ -535,6 +556,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
535556
per_cpu(timer_unstable_counter_workaround, i) = wa;
536557
}
537558

559+
if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
560+
atomic_set(&timer_unstable_counter_workaround_in_use, 1);
561+
538562
/*
539563
* Don't use the vdso fastpath if errata require using the
540564
* out-of-line counter accessor. We may change our mind pretty
@@ -591,9 +615,15 @@ static bool arch_timer_this_cpu_has_cntvct_wa(void)
591615
{
592616
return has_erratum_handler(read_cntvct_el0);
593617
}
618+
619+
static bool arch_timer_counter_has_wa(void)
620+
{
621+
return atomic_read(&timer_unstable_counter_workaround_in_use);
622+
}
594623
#else
595624
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
596625
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
626+
#define arch_timer_counter_has_wa() ({false;})
597627
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
598628

599629
static __always_inline irqreturn_t timer_handler(const int access,
@@ -942,12 +972,22 @@ static void __init arch_counter_register(unsigned type)
942972

943973
/* Register the CP15 based counter if we have one */
944974
if (type & ARCH_TIMER_TYPE_CP15) {
975+
u64 (*rd)(void);
976+
945977
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
946-
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
947-
arch_timer_read_counter = arch_counter_get_cntvct;
948-
else
949-
arch_timer_read_counter = arch_counter_get_cntpct;
978+
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
979+
if (arch_timer_counter_has_wa())
980+
rd = arch_counter_get_cntvct_stable;
981+
else
982+
rd = arch_counter_get_cntvct;
983+
} else {
984+
if (arch_timer_counter_has_wa())
985+
rd = arch_counter_get_cntpct_stable;
986+
else
987+
rd = arch_counter_get_cntpct;
988+
}
950989

990+
arch_timer_read_counter = rd;
951991
clocksource_counter.archdata.vdso_direct = vdso_default;
952992
} else {
953993
arch_timer_read_counter = arch_counter_get_cntvct_mem;

0 commit comments

Comments
 (0)