11/*
22 * Copyright (c) 2016-2017 Nordic Semiconductor ASA
33 * Copyright (c) 2018 Intel Corporation
4- * Copyright (c) 2019 Peter Bigot Consulting, LLC
54 *
65 * SPDX-License-Identifier: Apache-2.0
76 */
1615
1716#define RTC NRF_RTC1
1817
19- /*
20- * Compare values must be set to at least 2 greater than the current
21- * counter value to ensure that the compare fires. Compare values are
22- * generally determined by reading the counter, then performing some
23- * calculations to convert a relative delay to an absolute delay.
24- * Assume that the counter will not increment more than twice during
25- * these calculations, allowing for a final check that can replace a
26- * too-low compare with a value that will guarantee fire.
27- */
28- #define MIN_DELAY 4
29-
18+ #define COUNTER_MAX 0x00ffffff
3019#define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \
3120 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
32- #if CYC_PER_TICK < MIN_DELAY
33- #error Cycles per tick is too small
34- #endif
21+ #define MAX_TICKS ((COUNTER_MAX - CYC_PER_TICK) / CYC_PER_TICK)
22+
23+ #define MIN_DELAY 32
3524
36- #define COUNTER_MAX 0x00ffffffU
37- #define MAX_TICKS ((COUNTER_MAX - MIN_DELAY) / CYC_PER_TICK)
38- #define MAX_DELAY (MAX_TICKS * CYC_PER_TICK)
25+ static struct k_spinlock lock ;
3926
4027static u32_t last_count ;
4128
42- static inline u32_t counter_sub (u32_t a , u32_t b )
29+ static u32_t counter_sub (u32_t a , u32_t b )
4330{
4431 return (a - b ) & COUNTER_MAX ;
4532}
4633
47- static inline void set_comparator (u32_t cyc )
34+ static void set_comparator (u32_t cyc )
4835{
49- nrf_rtc_cc_set (RTC , 0 , cyc );
36+ nrf_rtc_cc_set (RTC , 0 , cyc & COUNTER_MAX );
5037}
5138
52- static inline u32_t counter (void )
39+ static u32_t counter (void )
5340{
5441 return nrf_rtc_counter_get (RTC );
5542}
@@ -67,7 +54,7 @@ void rtc1_nrf_isr(void *arg)
6754 ARG_UNUSED (arg );
6855 RTC -> EVENTS_COMPARE [0 ] = 0 ;
6956
70- u32_t key = irq_lock ( );
57+ k_spinlock_key_t key = k_spin_lock ( & lock );
7158 u32_t t = counter ();
7259 u32_t dticks = counter_sub (t , last_count ) / CYC_PER_TICK ;
7360
@@ -82,7 +69,7 @@ void rtc1_nrf_isr(void *arg)
8269 set_comparator (next );
8370 }
8471
85- irq_unlock ( key );
72+ k_spin_unlock ( & lock , key );
8673 z_clock_announce (IS_ENABLED (CONFIG_TICKLESS_KERNEL ) ? dticks : 1 );
8774}
8875
@@ -130,53 +117,21 @@ void z_clock_set_timeout(s32_t ticks, bool idle)
130117 ticks = (ticks == K_FOREVER ) ? MAX_TICKS : ticks ;
131118 ticks = MAX (MIN (ticks - 1 , (s32_t )MAX_TICKS ), 0 );
132119
133- /*
134- * Get the requested delay in tick-aligned cycles. Increase
135- * by one tick to round up so we don't timeout early due to
136- * cycles elapsed since the last tick. Cap at the maximum
137- * tick-aligned delta.
138- */
139- u32_t cyc = MIN ((1 + ticks ) * CYC_PER_TICK , MAX_DELAY );
140-
141- u32_t key = irq_lock ();
142- u32_t d = counter_sub (counter (), last_count );
143-
144- /*
145- * We've already accounted for anything less than a full tick,
146- * and assumed we meet the minimum delay for the tick. If
147- * that's not true, we have to adjust, which may involve a
148- * rare and expensive integer division.
149- */
150- if (d > (CYC_PER_TICK - MIN_DELAY )) {
151- if (d >= CYC_PER_TICK ) {
152- /*
153- * We're late by at least one tick. Adjust
154- * the compare offset for the missed ones, and
155- * reduce d to be the portion since the last
156- * (unseen) tick.
157- */
158- u32_t missed_ticks = d / CYC_PER_TICK ;
159- u32_t missed_cycles = missed_ticks * CYC_PER_TICK ;
160- cyc += missed_cycles ;
161- d -= missed_cycles ;
162- }
163- if (d > (CYC_PER_TICK - MIN_DELAY )) {
164- /*
165- * We're (now) within the tick, but too close
166- * to meet the minimum delay required to
167- * guarantee compare firing. Step up to the
168- * next tick.
169- */
170- cyc += CYC_PER_TICK ;
171- }
172- if (cyc > MAX_DELAY ) {
173- /* Don't adjust beyond the counter range. */
174- cyc = MAX_DELAY ;
175- }
120+ k_spinlock_key_t key = k_spin_lock (& lock );
121+ u32_t cyc , t = counter ();
122+
123+ /* Round up to next tick boundary */
124+ cyc = ticks * CYC_PER_TICK + counter_sub (t , last_count );
125+ cyc += (CYC_PER_TICK - 1 );
126+ cyc = (cyc / CYC_PER_TICK ) * CYC_PER_TICK ;
127+ cyc += last_count ;
128+
129+ if (counter_sub (cyc , t ) < MIN_DELAY ) {
130+ cyc += CYC_PER_TICK ;
176131 }
177- set_comparator (last_count + cyc );
178132
179- irq_unlock (key );
133+ set_comparator (cyc );
134+ k_spin_unlock (& lock , key );
180135#endif
181136}
182137
@@ -186,18 +141,18 @@ u32_t z_clock_elapsed(void)
186141 return 0 ;
187142 }
188143
189- u32_t key = irq_lock ( );
144+ k_spinlock_key_t key = k_spin_lock ( & lock );
190145 u32_t ret = counter_sub (counter (), last_count ) / CYC_PER_TICK ;
191146
192- irq_unlock ( key );
147+ k_spin_unlock ( & lock , key );
193148 return ret ;
194149}
195150
196151u32_t z_timer_cycle_get_32 (void )
197152{
198- u32_t key = irq_lock ( );
153+ k_spinlock_key_t key = k_spin_lock ( & lock );
199154 u32_t ret = counter_sub (counter (), last_count ) + last_count ;
200155
201- irq_unlock ( key );
156+ k_spin_unlock ( & lock , key );
202157 return ret ;
203158}
0 commit comments