4949#define COUNTER_SPAN (GRTC_SYSCOUNTERL_VALUE_Msk | ((uint64_t)GRTC_SYSCOUNTERH_VALUE_Msk << 32))
5050#define MAX_ABS_TICKS (COUNTER_SPAN / CYC_PER_TICK)
5151
52- #define MAX_TICKS \
53- (((COUNTER_SPAN / CYC_PER_TICK) > INT_MAX) ? INT_MAX : (COUNTER_SPAN / CYC_PER_TICK))
54-
55- #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
52+ /* To allow use of CCADD we need to limit max cycles to 31 bits. */
53+ #define MAX_CYCLES BIT_MASK(31)
54+ #define MAX_TICKS (MAX_CYCLES / CYC_PER_TICK)
5655
5756#define LFCLK_FREQUENCY_HZ DT_PROP(LFCLK_NODE, clock_frequency)
5857
58+ /* Threshold used to determine if there is a risk of unexpected GRTC COMPARE event coming
59+ * from previous CC value.
60+ */
61+ #define LATENCY_THR_TICKS 200
62+
5963#if defined(CONFIG_TEST )
6064const int32_t z_sys_timer_irq_for_test = DT_IRQN (GRTC_NODE );
6165#endif
6266
6367static void sys_clock_timeout_handler (int32_t id , uint64_t cc_val , void * p_context );
6468
65- static struct k_spinlock lock ;
6669static uint64_t last_count ; /* Time (SYSCOUNTER value) @last sys_clock_announce() */
70+ static uint32_t last_elapsed ;
71+ static uint64_t cc_value ; /* Value that is expected to be in CC register. */
72+ static uint64_t expired_cc ; /* Value that is expected to be in CC register. */
6773static atomic_t int_mask ;
6874static uint8_t ext_channels_allocated ;
75+ static bool in_announce ;
6976static nrfx_grtc_channel_t system_clock_channel_data = {
7077 .handler = sys_clock_timeout_handler ,
7178 .p_context = NULL ,
@@ -145,17 +152,13 @@ static void compare_int_unlock(int32_t chan, bool key)
145152static void sys_clock_timeout_handler (int32_t id , uint64_t cc_val , void * p_context )
146153{
147154 ARG_UNUSED (id );
155+ ARG_UNUSED (cc_val );
148156 ARG_UNUSED (p_context );
149157 uint64_t dticks ;
150- uint64_t now = counter ();
151-
152- if (unlikely (now < cc_val )) {
153- return ;
154- }
155-
156158 dticks = counter_sub (cc_val , last_count ) / CYC_PER_TICK ;
157159
158- last_count += dticks * CYC_PER_TICK ;
160+ last_count += (dticks * CYC_PER_TICK );
161+ expired_cc = cc_val ;
159162
160163 if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
161164 /* protection is not needed because we are in the GRTC interrupt
@@ -164,7 +167,10 @@ static void sys_clock_timeout_handler(int32_t id, uint64_t cc_val, void *p_conte
164167 system_timeout_set_abs (last_count + CYC_PER_TICK );
165168 }
166169
170+ last_elapsed = 0 ;
171+ in_announce = true;
167172 sys_clock_announce ((int32_t )dticks );
173+ in_announce = false;
168174}
169175
170176int32_t z_nrf_grtc_timer_chan_alloc (void )
@@ -424,20 +430,12 @@ int z_nrf_grtc_wakeup_prepare(uint64_t wake_time_us)
424430
425431uint32_t sys_clock_cycle_get_32 (void )
426432{
427- k_spinlock_key_t key = k_spin_lock (& lock );
428- uint32_t ret = (uint32_t )counter ();
429-
430- k_spin_unlock (& lock , key );
431- return ret ;
433+ return (uint32_t )counter ();
432434}
433435
434436uint64_t sys_clock_cycle_get_64 (void )
435437{
436- k_spinlock_key_t key = k_spin_lock (& lock );
437- uint64_t ret = counter ();
438-
439- k_spin_unlock (& lock , key );
440- return ret ;
438+ return counter ();
441439}
442440
443441uint32_t sys_clock_elapsed (void )
@@ -446,7 +444,13 @@ uint32_t sys_clock_elapsed(void)
446444 return 0 ;
447445 }
448446
449- return (uint32_t )(counter_sub (counter (), last_count ) / CYC_PER_TICK );
447+ if (in_announce ) {
448+ return 0 ;
449+ }
450+
451+ last_elapsed = (uint32_t )counter_sub (counter (), last_count );
452+
453+ return last_elapsed / CYC_PER_TICK ;
450454}
451455
452456static int sys_clock_driver_init (void )
@@ -485,6 +489,9 @@ static int sys_clock_driver_init(void)
485489 }
486490#endif /* CONFIG_NRF_GRTC_START_SYSCOUNTER */
487491
492+ nrfx_grtc_channel_callback_set (system_clock_channel_data .channel ,
493+ sys_clock_timeout_handler , NULL );
494+
488495 int_mask = NRFX_GRTC_CONFIG_ALLOWED_CC_CHANNELS_MASK ;
489496 if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
490497 system_timeout_set_relative (CYC_PER_TICK );
@@ -543,18 +550,40 @@ void sys_clock_set_timeout(int32_t ticks, bool idle)
543550 return ;
544551 }
545552
546- ticks = (ticks == K_TICKS_FOREVER ) ? MAX_TICKS : MIN (MAX_TICKS , MAX (ticks , 0 ));
553+ uint32_t cyc ;
554+ uint32_t ch = system_clock_channel_data .channel ;
547555
548- uint64_t delta_time = ticks * CYC_PER_TICK ;
556+ if ((uint32_t )ticks > MAX_TICKS ) {
557+ cyc = MAX_CYCLES ;
558+ } else {
559+ cyc = ticks * CYC_PER_TICK ;
560+ }
549561
550- uint64_t target_time = counter () + delta_time ;
562+ if (in_announce && (cc_value == expired_cc )) {
563+ cc_value += cyc ;
564+ nrfx_grtc_syscounter_cc_rel_set (ch , cyc , NRFX_GRTC_CC_RELATIVE_COMPARE );
565+ in_announce = false;
566+ } else {
567+ bool safe_setting = false;
568+ int64_t prev_cc_val = cc_value ;
551569
552- /* Rounded down target_time to the tick boundary
553- * (but not less than one tick after the last)
554- */
555- target_time = MAX ((target_time - last_count )/CYC_PER_TICK , 1 )* CYC_PER_TICK + last_count ;
570+ cc_value = last_count + last_elapsed + cyc ;
571+
572+ /* In case of timeout abort it may happen that CC is being set to a value
573+ * that later than previous CC. If previous CC value is not far in the
574+ * future, there is a risk that COMPARE event will be triggered for that
575+ * previous CC value. If there is such risk safe procedure must be applied
576+ * which is more time consuming but ensures that there will be no spurious
577+ * event.
578+ */
579+ if (prev_cc_val < cc_value ) {
580+ int64_t now = last_count + last_elapsed ;
556581
557- system_timeout_set_abs (target_time );
582+ safe_setting = (prev_cc_val - now ) < LATENCY_THR_TICKS ;
583+ }
584+
585+ nrfx_grtc_syscounter_cc_abs_set (ch , cc_value , safe_setting );
586+ }
558587}
559588
560589#if defined(CONFIG_NRF_GRTC_TIMER_APP_DEFINED_INIT )
0 commit comments