@@ -47,16 +47,19 @@ BUILD_ASSERT_MSG(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 32768,
4747#define CYCLES_PER_TICK \
4848 (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
4949
50+ /* Mask off bits[31:28] of 32-bit count */
51+ #define TIMER_MAX 0x0FFFFFFFUL
5052
51- #define CPT1000 ((CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * 1000UL) \
52- / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
53+ #define TIMER_COUNT_MASK 0x0FFFFFFFUL
5354
54- #define CPT_FRACT (CPT1000 - (CYCLES_PER_TICK * 1000UL))
55+ #define TIMER_STOPPED 0xF0000000UL
5556
57+ /* Adjust cycle count programmed into timer for HW restart latency */
58+ #define TIMER_ADJUST_LIMIT 2
59+ #define TIMER_ADJUST_CYCLES 1
5660
5761/* max number of ticks we can load into the timer in one shot */
58-
59- #define MAX_TICKS (0x7FFFFFFFU / CYCLES_PER_TICK)
62+ #define MAX_TICKS (TIMER_MAX / CYCLES_PER_TICK)
6063
6164/*
6265 * The spinlock protects all access to the RTMR registers, as well as
@@ -68,25 +71,43 @@ BUILD_ASSERT_MSG(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 32768,
6871 */
6972
7073static struct k_spinlock lock ;
71- static u64_t total_cycles ;
74+ static u32_t total_cycles ;
7275static u32_t cached_icr = CYCLES_PER_TICK ;
7376
77+ static void timer_restart (u32_t countdown )
78+ {
79+ RTMR_REGS -> CTRL = 0U ;
80+ RTMR_REGS -> CTRL = MCHP_RTMR_CTRL_BLK_EN ;
81+ RTMR_REGS -> PRLD = countdown ;
82+ RTMR_REGS -> CTRL = TIMER_START_VAL ;
83+ }
84+
7485/*
75- * Restart XEC RTOS timer with new count down value.
76- * This timer requires its control register to be cleared, the new
77- * preload value written twice, and timer started.
86+ * Read the RTOS timer counter handling the case where the timer
87+ * has been reloaded within 1 32KHz clock of reading its count register.
88+ * The RTOS timer hardware must synchronize the write to its control register
89+ * on the AHB clock domain with the 32KHz clock domain of its internal logic.
90+ * This synchronization can take from nearly 0 time up to 1 32KHz clock as it
91+ * depends upon which 48MHz AHB clock with a 32KHz period the register write
92+ * was on. We detect the timer is in the load state by checking the read-only
93+ * count register and the START bit in the control register. If count register
94+ * is 0 and the START bit is set then the timer has been started and is in the
95+ * process of moving the preload register value into the count register.
7896 */
79- static INLINE void timer_restart ( u32_t val )
97+ static INLINE u32_t timer_count ( void )
8098{
81- RTMR_REGS -> CTRL = 0 ;
82- RTMR_REGS -> PRLD = val ;
83- RTMR_REGS -> PRLD = val ;
84- RTMR_REGS -> CTRL = TIMER_START_VAL ;
99+ u32_t ccr = RTMR_REGS -> CNT ;
100+
101+ if ((ccr == 0 ) && (RTMR_REGS -> CTRL & MCHP_RTMR_CTRL_START )) {
102+ ccr = cached_icr ;
103+ }
104+
105+ return ccr ;
85106}
86107
87108#ifdef CONFIG_TICKLESS_KERNEL
88109
89- static u64_t last_announcement ; /* last time we called z_clock_announce() */
110+ static u32_t last_announcement ; /* last time we called z_clock_announce() */
90111
91112/*
92113 * Request a timeout n Zephyr ticks in the future from now.
@@ -109,6 +130,16 @@ void z_clock_set_timeout(s32_t n, bool idle)
109130 u32_t full_cycles ; /* full_ticks represented as cycles */
110131 u32_t partial_cycles ; /* number of cycles to first tick boundary */
111132
133+ if (idle && (n == K_FOREVER )) {
134+ /*
135+ * We are not in a locked section. Are writes to two
136+ * global objects safe from pre-emption?
137+ */
138+ RTMR_REGS -> CTRL = 0U ; /* stop timer */
139+ cached_icr = TIMER_STOPPED ;
140+ return ;
141+ }
142+
112143 if (n < 1 ) {
113144 full_ticks = 0 ;
114145 } else if ((n == K_FOREVER ) || (n > MAX_TICKS )) {
@@ -117,53 +148,61 @@ void z_clock_set_timeout(s32_t n, bool idle)
117148 full_ticks = n - 1 ;
118149 }
119150
120- /*
121- * RTMR frequency is fixed at 32KHz resulting in truncation errors.
122- * Tune the denominator taking into account delay in the caller and
123- * this routine.
124- */
125- full_cycles = (full_ticks * CYCLES_PER_TICK )
126- + ((full_ticks * CPT_FRACT ) / 1000UL );
127-
128- /*
129- * There's a wee race condition here. The timer may expire while
130- * we're busy reprogramming it; an interrupt will be queued at the
131- * NVIC and the ISR will be called too early, roughly right
132- * after we unlock, and not because the count we just programmed has
133- * counted down. We can detect this situation only by using one-shot
134- * mode. The counter will be 0 for a "real" interrupt and non-zero
135- * if we have restarted the timer here.
136- */
151+ full_cycles = full_ticks * CYCLES_PER_TICK ;
137152
138153 k_spinlock_key_t key = k_spin_lock (& lock );
139154
140- ccr = RTMR_REGS -> CNT ;
141- total_cycles += (cached_icr - ccr );
155+ ccr = timer_count ();
156+
157+ /* turn off to clear any pending interrupt status */
158+ RTMR_REGS -> CTRL = 0U ;
159+ GIRQ23_REGS -> SRC = MCHP_RTMR_GIRQ_VAL ;
160+ NVIC_ClearPendingIRQ (RTMR_IRQn );
161+
162+ temp = total_cycles ;
163+ temp += (cached_icr - ccr );
164+ temp &= TIMER_COUNT_MASK ;
165+ total_cycles = temp ;
166+
142167 partial_cycles = CYCLES_PER_TICK - (total_cycles % CYCLES_PER_TICK );
143- temp = full_cycles + partial_cycles ;
168+ cached_icr = full_cycles + partial_cycles ;
169+ /* adjust for up to one 32KHz cycle startup time */
170+ temp = cached_icr ;
171+ if (temp > TIMER_ADJUST_LIMIT ) {
172+ temp -= TIMER_ADJUST_CYCLES ;
173+ }
144174
145175 timer_restart (temp );
146- cached_icr = temp ;
147176
148177 k_spin_unlock (& lock , key );
149178}
150179
151180/*
152181 * Return the number of Zephyr ticks elapsed from last call to
153- * z_clock_announce in the ISR.
182+ * z_clock_announce in the ISR. The caller casts u32_t to s32_t.
183+ * We must make sure bit[31] is 0 in the return value.
154184 */
155185u32_t z_clock_elapsed (void )
156186{
157187 u32_t ccr ;
158188 u32_t ticks ;
189+ s32_t elapsed ;
159190
160191 k_spinlock_key_t key = k_spin_lock (& lock );
161192
162- ccr = RTMR_REGS -> CNT ;
163- ticks = total_cycles - last_announcement ;
193+ ccr = timer_count ();
194+
195+ /* It may not look efficient but the compiler does a good job */
196+ elapsed = (s32_t )total_cycles - (s32_t )last_announcement ;
197+ if (elapsed < 0 ) {
198+ elapsed = -1 * elapsed ;
199+ }
200+ ticks = (u32_t )elapsed ;
164201 ticks += cached_icr - ccr ;
165- k_spin_unlock (& lock , key );
166202 ticks /= CYCLES_PER_TICK ;
203+ ticks &= TIMER_COUNT_MASK ;
204+
205+ k_spin_unlock (& lock , key );
167206
168207 return ticks ;
169208}
@@ -172,36 +211,28 @@ static void xec_rtos_timer_isr(void *arg)
172211{
173212 ARG_UNUSED (arg );
174213
175- u32_t cycles , preload ;
214+ u32_t cycles ;
176215 s32_t ticks ;
177216
178217 k_spinlock_key_t key = k_spin_lock (& lock );
179218
180- /*
181- * Clear RTOS timer interrupt RW/1C status in GIRQ23 source register.
182- * NVIC will clear its pending bit on ISR exit.
183- */
184219 GIRQ23_REGS -> SRC = MCHP_RTMR_GIRQ_VAL ;
185-
186- /*
187- * If we get here and the RTMR count registers isn't zero, then
188- * this interrupt is stale: it was queued while z_clock_set_timeout()
189- * was setting a new counter. Just ignore it. See above for more info.
190- */
191- if (RTMR_REGS -> CNT != 0 ) {
192- k_spin_unlock (& lock , key );
193- return ;
194- }
195-
196- /* restart the timer */
197- preload = MAX_TICKS * CYCLES_PER_TICK ;
198- timer_restart (preload );
220+ /* Restart the timer as early as possible to minimize drift... */
221+ timer_restart (MAX_TICKS * CYCLES_PER_TICK );
199222
200223 cycles = cached_icr ;
201- cached_icr = preload ;
224+ cached_icr = MAX_TICKS * CYCLES_PER_TICK ;
225+
202226 total_cycles += cycles ;
203- ticks = (total_cycles - last_announcement ) / CYCLES_PER_TICK ;
227+ total_cycles &= TIMER_COUNT_MASK ;
228+
229+ /* handle wrap by using (power of 2) - 1 mask */
230+ ticks = total_cycles - last_announcement ;
231+ ticks &= TIMER_COUNT_MASK ;
232+ ticks /= CYCLES_PER_TICK ;
233+
204234 last_announcement = total_cycles ;
235+
205236 k_spin_unlock (& lock , key );
206237 z_clock_announce (ticks );
207238}
@@ -216,14 +247,13 @@ static void xec_rtos_timer_isr(void *arg)
216247
217248 k_spinlock_key_t key = k_spin_lock (& lock );
218249
219- /*
220- * Clear RTOS timer interrupt status RW/1C status in GIRQ23 register.
221- * NVIC will clear its pending bit on ISR exit.
222- */
223250 GIRQ23_REGS -> SRC = MCHP_RTMR_GIRQ_VAL ;
224-
225- total_cycles += CYCLES_PER_TICK ;
251+ /* Restart the timer as early as possible to minimize drift... */
226252 timer_restart (cached_icr );
253+
254+ u32_t temp = total_cycles + CYCLES_PER_TICK ;
255+
256+ total_cycles = temp & TIMER_COUNT_MASK ;
227257 k_spin_unlock (& lock , key );
228258
229259 z_clock_announce (1 );
@@ -237,11 +267,14 @@ u32_t z_clock_elapsed(void)
237267#endif /* CONFIG_TICKLESS_KERNEL */
238268
239269/*
240- * Return an increasing hardware cycle count.
241- * Implementation: We return current total number of cycles elapsed
242- * from first start of the timer. The return value is 32-bits
243- * resulting in only the lower 32-bits of the total count
244- * being returned.
270+ * Warning RTOS timer resolution is 30.5 us.
271+ * This is called by two code paths:
272+ * 1. Kernel call to k_cycle_get_32() -> z_arch_k_cycle_get_32() -> here.
273+ * The kernel is casting return to (int) and using it uncasted in math
274+ * expressions with int types. Expression result is stored in an int.
275+ * 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then
276+ * z_impl_k_busy_wait calls here. This code path uses the value as u32_t.
277+ *
245278 */
246279u32_t z_timer_cycle_get_32 (void )
247280{
@@ -250,30 +283,96 @@ u32_t z_timer_cycle_get_32(void)
250283
251284 k_spinlock_key_t key = k_spin_lock (& lock );
252285
253- ccr = RTMR_REGS -> CNT ;
254- ret = total_cycles + (cached_icr - ccr );
286+ ccr = timer_count ();
287+ ret = (total_cycles + (cached_icr - ccr )) & TIMER_COUNT_MASK ;
288+
255289 k_spin_unlock (& lock , key );
256290
257291 return ret ;
258292}
259293
294+ void z_clock_idle_exit (void )
295+ {
296+ if (cached_icr == TIMER_STOPPED ) {
297+ cached_icr = CYCLES_PER_TICK ;
298+ timer_restart (cached_icr );
299+ }
300+ }
301+
302+ void sys_clock_disable (void )
303+ {
304+ RTMR_REGS -> CTRL = 0U ;
305+ }
306+
260307int z_clock_driver_init (struct device * device )
261308{
262309 ARG_UNUSED (device );
263310
264311 mchp_pcr_periph_slp_ctrl (PCR_RTMR , MCHP_PCR_SLEEP_DIS );
265312
313+ #ifdef CONFIG_TICKLESS_KERNEL
314+ cached_icr = MAX_TICKS ;
315+ #endif
316+
266317 RTMR_REGS -> CTRL = 0U ;
267318 GIRQ23_REGS -> SRC = MCHP_RTMR_GIRQ_VAL ;
268319 NVIC_ClearPendingIRQ (RTMR_IRQn );
269320
270- timer_restart (cached_icr );
321+ IRQ_CONNECT (RTMR_IRQn ,
322+ DT_INST_0_MICROCHIP_XEC_RTOS_TIMER_IRQ_0_PRIORITY ,
323+ xec_rtos_timer_isr , 0 , 0 );
271324
272- IRQ_CONNECT (RTMR_IRQn , 1 , xec_rtos_timer_isr , 0 , 0 );
273325 GIRQ23_REGS -> EN_SET = MCHP_RTMR_GIRQ_VAL ;
274-
275- RTMR_REGS -> CTRL = TIMER_START_VAL ;
276326 irq_enable (RTMR_IRQn );
277327
328+ #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
329+ u32_t btmr_ctrl = B32TMR0_REGS -> CTRL = (MCHP_BTMR_CTRL_ENABLE
330+ | MCHP_BTMR_CTRL_AUTO_RESTART
331+ | MCHP_BTMR_CTRL_COUNT_UP
332+ | (47UL << MCHP_BTMR_CTRL_PRESCALE_POS ));
333+ B32TMR0_REGS -> CTRL = MCHP_BTMR_CTRL_SOFT_RESET ;
334+ B32TMR0_REGS -> CTRL = btmr_ctrl ;
335+ B32TMR0_REGS -> PRLD = 0xFFFFFFFFUL ;
336+ btmr_ctrl |= MCHP_BTMR_CTRL_START ;
337+
338+ timer_restart (cached_icr );
339+ /* wait for Hibernation timer to load count register from preload */
340+ while (RTMR_REGS -> CNT == 0 )
341+ ;
342+ B32TMR0_REGS -> CTRL = btmr_ctrl ;
343+ #else
344+ timer_restart (cached_icr );
345+ #endif
346+
278347 return 0 ;
279348}
349+
350+ #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
351+
352+ /*
353+ * We implement custom busy wait using a MEC1501 basic timer running on
354+ * the 48MHz clock domain. This code is here for future power management
355+ * save/restore of the timer context.
356+ */
357+
358+ /*
359+ * 32-bit basic timer 0 configured for 1MHz count up, auto-reload,
360+ * and no interrupt generation.
361+ */
362+ void z_arch_busy_wait (u32_t usec_to_wait )
363+ {
364+ if (usec_to_wait == 0 ) {
365+ return ;
366+ }
367+
368+ u32_t start = B32TMR0_REGS -> CNT ;
369+
370+ for (;;) {
371+ u32_t curr = B32TMR0_REGS -> CNT ;
372+
373+ if ((curr - start ) >= usec_to_wait ) {
374+ break ;
375+ }
376+ }
377+ }
378+ #endif
0 commit comments