4949#define COUNTER_SPAN (GRTC_SYSCOUNTERL_VALUE_Msk | ((uint64_t)GRTC_SYSCOUNTERH_VALUE_Msk << 32))
5050#define MAX_ABS_TICKS (COUNTER_SPAN / CYC_PER_TICK)
5151
52- #define MAX_TICKS \
53- (((COUNTER_SPAN / CYC_PER_TICK) > INT_MAX) ? INT_MAX : (COUNTER_SPAN / CYC_PER_TICK))
54-
55- #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
52+ /* To allow use of CCADD we need to limit max cycles to 31 bits. */
53+ #define MAX_CYCLES BIT_MASK(31)
54+ #define MAX_TICKS (MAX_CYCLES / CYC_PER_TICK)
5655
5756#define LFCLK_FREQUENCY_HZ DT_PROP(LFCLK_NODE, clock_frequency)
5857
58+ /* Threshold used to determine if there is a risk of unexpected GRTC COMPARE event coming
59+ * from previous CC value.
60+ */
61+ #define LATENCY_THR_TICKS 200
62+
5963#if defined(CONFIG_TEST )
6064const int32_t z_sys_timer_irq_for_test = DT_IRQN (GRTC_NODE );
6165#endif
6266
6367static void sys_clock_timeout_handler (int32_t id , uint64_t cc_val , void * p_context );
6468
65- static struct k_spinlock lock ;
6669static uint64_t last_count ; /* Time (SYSCOUNTER value) @last sys_clock_announce() */
70+ static uint32_t last_elapsed ;
71+ static uint64_t cc_value ; /* Value that is expected to be in CC register. */
72+ static uint64_t expired_cc ; /* Value that is expected to be in CC register. */
6773static atomic_t int_mask ;
6874static uint8_t ext_channels_allocated ;
6975static nrfx_grtc_channel_t system_clock_channel_data = {
@@ -145,17 +151,13 @@ static void compare_int_unlock(int32_t chan, bool key)
145151static void sys_clock_timeout_handler (int32_t id , uint64_t cc_val , void * p_context )
146152{
147153 ARG_UNUSED (id );
154+ ARG_UNUSED (cc_val );
148155 ARG_UNUSED (p_context );
149- uint64_t dticks ;
150- uint64_t now = counter ();
151-
152- if (unlikely (now < cc_val )) {
153- return ;
154- }
156+ uint32_t dticks ;
155157
156158 dticks = counter_sub (cc_val , last_count ) / CYC_PER_TICK ;
157-
158- last_count += dticks * CYC_PER_TICK ;
159+ last_count += ( dticks * CYC_PER_TICK );
160+ expired_cc = cc_val ;
159161
160162 if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
161163 /* protection is not needed because we are in the GRTC interrupt
@@ -164,6 +166,7 @@ static void sys_clock_timeout_handler(int32_t id, uint64_t cc_val, void *p_conte
164166 system_timeout_set_abs (last_count + CYC_PER_TICK );
165167 }
166168
169+ last_elapsed = 0 ;
167170 sys_clock_announce ((int32_t )dticks );
168171}
169172
@@ -368,6 +371,7 @@ int z_nrf_grtc_wakeup_prepare(uint64_t wake_time_us)
368371 /* Minimum time that ensures valid execution of system-off procedure. */
369372 uint32_t minimum_latency_us ;
370373 uint32_t chan ;
374+ struct k_spinlock lock ;
371375 int ret ;
372376
373377 nrfx_grtc_sleep_configuration_get (& sleep_cfg );
@@ -424,20 +428,12 @@ int z_nrf_grtc_wakeup_prepare(uint64_t wake_time_us)
424428
425429uint32_t sys_clock_cycle_get_32 (void )
426430{
427- k_spinlock_key_t key = k_spin_lock (& lock );
428- uint32_t ret = (uint32_t )counter ();
429-
430- k_spin_unlock (& lock , key );
431- return ret ;
431+ return (uint32_t )counter ();
432432}
433433
434434uint64_t sys_clock_cycle_get_64 (void )
435435{
436- k_spinlock_key_t key = k_spin_lock (& lock );
437- uint64_t ret = counter ();
438-
439- k_spin_unlock (& lock , key );
440- return ret ;
436+ return counter ();
441437}
442438
443439uint32_t sys_clock_elapsed (void )
@@ -446,7 +442,9 @@ uint32_t sys_clock_elapsed(void)
446442 return 0 ;
447443 }
448444
449- return (uint32_t )(counter_sub (counter (), last_count ) / CYC_PER_TICK );
445+ last_elapsed = (uint32_t )counter_sub (counter (), last_count );
446+
447+ return last_elapsed / CYC_PER_TICK ;
450448}
451449
452450static int sys_clock_driver_init (void )
@@ -485,6 +483,9 @@ static int sys_clock_driver_init(void)
485483 }
486484#endif /* CONFIG_NRF_GRTC_START_SYSCOUNTER */
487485
486+ nrfx_grtc_channel_callback_set (system_clock_channel_data .channel ,
487+ sys_clock_timeout_handler , NULL );
488+
488489 int_mask = NRFX_GRTC_CONFIG_ALLOWED_CC_CHANNELS_MASK ;
489490 if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
490491 system_timeout_set_relative (CYC_PER_TICK );
@@ -539,22 +540,47 @@ void sys_clock_set_timeout(int32_t ticks, bool idle)
539540{
540541 ARG_UNUSED (idle );
541542
543+ if (ticks == 0 ) {
544+ return ;
545+ }
546+
542547 if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
543548 return ;
544549 }
545550
546- ticks = (ticks == K_TICKS_FOREVER ) ? MAX_TICKS : MIN (MAX_TICKS , MAX (ticks , 0 ));
551+ uint32_t cyc ;
552+ uint32_t ch = system_clock_channel_data .channel ;
547553
548- uint64_t delta_time = ticks * CYC_PER_TICK ;
554+ if ((uint32_t )ticks > MAX_TICKS ) {
555+ cyc = MAX_CYCLES ;
556+ } else {
557+ cyc = ticks * CYC_PER_TICK ;
558+ }
549559
550- uint64_t target_time = counter () + delta_time ;
560+ if (cc_value == expired_cc ) {
561+ cc_value += cyc ;
562+ nrfx_grtc_syscounter_cc_rel_set (ch , cyc , NRFX_GRTC_CC_RELATIVE_COMPARE );
563+ } else {
564+ bool safe_setting = false;
565+ int64_t prev_cc_val = cc_value ;
551566
552- /* Rounded down target_time to the tick boundary
553- * (but not less than one tick after the last)
554- */
555- target_time = MAX ((target_time - last_count )/CYC_PER_TICK , 1 )* CYC_PER_TICK + last_count ;
567+ cc_value = last_count + last_elapsed + cyc ;
568+
569+ /* In case of timeout abort it may happen that CC is being set to a value
570+ * that later than previous CC. If previous CC value is not far in the
571+ * future, there is a risk that COMPARE event will be triggered for that
572+ * previous CC value. If there is such risk safe procedure must be applied
573+ * which is more time consuming but ensures that there will be no spurious
574+ * event.
575+ */
576+ if (prev_cc_val < cc_value ) {
577+ int64_t now = last_count + last_elapsed ;
556578
557- system_timeout_set_abs (target_time );
579+ safe_setting = (prev_cc_val - now ) < LATENCY_THR_TICKS ;
580+ }
581+
582+ nrfx_grtc_syscounter_cc_abs_set (ch , cc_value , safe_setting );
583+ }
558584}
559585
560586#if defined(CONFIG_NRF_GRTC_TIMER_APP_DEFINED_INIT )
0 commit comments