49
49
#define COUNTER_SPAN (GRTC_SYSCOUNTERL_VALUE_Msk | ((uint64_t)GRTC_SYSCOUNTERH_VALUE_Msk << 32))
50
50
#define MAX_ABS_TICKS (COUNTER_SPAN / CYC_PER_TICK)
51
51
52
- #define MAX_TICKS \
53
- (((COUNTER_SPAN / CYC_PER_TICK) > INT_MAX) ? INT_MAX : (COUNTER_SPAN / CYC_PER_TICK))
54
-
55
- #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
52
+ /* To allow use of CCADD we need to limit max cycles to 31 bits. */
53
+ #define MAX_REL_CYCLES BIT_MASK(31)
54
+ #define MAX_REL_TICKS (MAX_REL_CYCLES / CYC_PER_TICK)
56
55
57
56
#if DT_NODE_HAS_STATUS_OKAY (LFCLK_NODE )
58
57
#define LFCLK_FREQUENCY_HZ DT_PROP(LFCLK_NODE, clock_frequency)
59
58
#else
60
59
#define LFCLK_FREQUENCY_HZ CONFIG_CLOCK_CONTROL_NRF_K32SRC_FREQUENCY
61
60
#endif
62
61
62
+ /* Threshold used to determine if there is a risk of unexpected GRTC COMPARE event coming
63
+ * from previous CC value.
64
+ */
65
+ #define LATENCY_THR_TICKS 200
66
+
63
67
#if defined(CONFIG_TEST )
64
68
const int32_t z_sys_timer_irq_for_test = DT_IRQN (GRTC_NODE );
65
69
#endif
66
70
67
71
static void sys_clock_timeout_handler (int32_t id , uint64_t cc_val , void * p_context );
68
72
69
- static struct k_spinlock lock ;
70
73
static uint64_t last_count ; /* Time (SYSCOUNTER value) @last sys_clock_announce() */
74
+ static uint32_t last_elapsed ;
75
+ static uint64_t cc_value ; /* Value that is expected to be in CC register. */
76
+ static uint64_t expired_cc ; /* Value that is expected to be in CC register. */
71
77
static atomic_t int_mask ;
72
78
static uint8_t ext_channels_allocated ;
73
79
static uint64_t grtc_start_value ;
@@ -150,17 +156,13 @@ static void compare_int_unlock(int32_t chan, bool key)
150
156
static void sys_clock_timeout_handler (int32_t id , uint64_t cc_val , void * p_context )
151
157
{
152
158
ARG_UNUSED (id );
159
+ ARG_UNUSED (cc_val );
153
160
ARG_UNUSED (p_context );
154
- uint64_t dticks ;
155
- uint64_t now = counter ();
156
-
157
- if (unlikely (now < cc_val )) {
158
- return ;
159
- }
161
+ uint32_t dticks ;
160
162
161
163
dticks = counter_sub (cc_val , last_count ) / CYC_PER_TICK ;
162
-
163
- last_count += dticks * CYC_PER_TICK ;
164
+ last_count += ( dticks * CYC_PER_TICK );
165
+ expired_cc = cc_val ;
164
166
165
167
if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
166
168
/* protection is not needed because we are in the GRTC interrupt
@@ -169,6 +171,7 @@ static void sys_clock_timeout_handler(int32_t id, uint64_t cc_val, void *p_conte
169
171
system_timeout_set_abs (last_count + CYC_PER_TICK );
170
172
}
171
173
174
+ last_elapsed = 0 ;
172
175
sys_clock_announce ((int32_t )dticks );
173
176
}
174
177
@@ -372,6 +375,7 @@ uint64_t z_nrf_grtc_timer_startup_value_get(void)
372
375
int z_nrf_grtc_wakeup_prepare (uint64_t wake_time_us )
373
376
{
374
377
nrfx_err_t err_code ;
378
+ static struct k_spinlock lock ;
375
379
static uint8_t systemoff_channel ;
376
380
uint64_t now = counter ();
377
381
nrfx_grtc_sleep_config_t sleep_cfg ;
@@ -434,20 +438,12 @@ int z_nrf_grtc_wakeup_prepare(uint64_t wake_time_us)
434
438
435
439
uint32_t sys_clock_cycle_get_32 (void )
436
440
{
437
- k_spinlock_key_t key = k_spin_lock (& lock );
438
- uint32_t ret = (uint32_t )counter ();
439
-
440
- k_spin_unlock (& lock , key );
441
- return ret ;
441
+ return (uint32_t )counter ();
442
442
}
443
443
444
444
uint64_t sys_clock_cycle_get_64 (void )
445
445
{
446
- k_spinlock_key_t key = k_spin_lock (& lock );
447
- uint64_t ret = counter ();
448
-
449
- k_spin_unlock (& lock , key );
450
- return ret ;
446
+ return counter ();
451
447
}
452
448
453
449
uint32_t sys_clock_elapsed (void )
@@ -456,7 +452,9 @@ uint32_t sys_clock_elapsed(void)
456
452
return 0 ;
457
453
}
458
454
459
- return (uint32_t )(counter_sub (counter (), last_count ) / CYC_PER_TICK );
455
+ last_elapsed = (uint32_t )counter_sub (counter (), last_count );
456
+
457
+ return last_elapsed / CYC_PER_TICK ;
460
458
}
461
459
462
460
#if !defined(CONFIG_GEN_SW_ISR_TABLE )
@@ -512,6 +510,10 @@ static int sys_clock_driver_init(void)
512
510
513
511
last_count = (counter () / CYC_PER_TICK ) * CYC_PER_TICK ;
514
512
grtc_start_value = last_count ;
513
+ expired_cc = UINT64_MAX ;
514
+ nrfx_grtc_channel_callback_set (system_clock_channel_data .channel ,
515
+ sys_clock_timeout_handler , NULL );
516
+
515
517
int_mask = NRFX_GRTC_CONFIG_ALLOWED_CC_CHANNELS_MASK ;
516
518
if (!IS_ENABLED (CONFIG_TICKLESS_KERNEL )) {
517
519
system_timeout_set_relative (CYC_PER_TICK );
@@ -570,18 +572,48 @@ void sys_clock_set_timeout(int32_t ticks, bool idle)
570
572
return ;
571
573
}
572
574
573
- ticks = (ticks == K_TICKS_FOREVER ) ? MAX_TICKS : MIN (MAX_TICKS , MAX (ticks , 0 ));
575
+ uint32_t ch = system_clock_channel_data .channel ;
576
+
577
+ if ((cc_value == expired_cc ) && (ticks < MAX_REL_TICKS )) {
578
+ uint32_t cyc = ticks * CYC_PER_TICK ;
579
+
580
+ if (cyc == 0 ) {
581
+ /* GRTC will expire anyway since HW ensures that past value triggers an
582
+ * event but we need to ensure to always progress the cc_value as this
583
+ * if condition expects that cc_value will change after each call to
584
+ * set_timeout function.
585
+ */
586
+ cyc = 1 ;
587
+ }
574
588
575
- uint64_t delta_time = ticks * CYC_PER_TICK ;
589
+ /* If it's the first timeout setting after previous expiration and timeout
590
+ * is short so fast method can be used which utilizes relative CC configuration.
591
+ */
592
+ cc_value += cyc ;
593
+ nrfx_grtc_syscounter_cc_rel_set (ch , cyc , NRFX_GRTC_CC_RELATIVE_COMPARE );
594
+ return ;
595
+ }
576
596
577
- uint64_t target_time = counter () + delta_time ;
597
+ uint64_t cyc = (uint64_t )ticks * CYC_PER_TICK ;
598
+ bool safe_setting = false;
599
+ int64_t prev_cc_val = cc_value ;
578
600
579
- /* Rounded down target_time to the tick boundary
580
- * (but not less than one tick after the last)
601
+ cc_value = last_count + last_elapsed + cyc ;
602
+
603
+ /* In case of timeout abort it may happen that CC is being set to a value
604
+ * that later than previous CC. If previous CC value is not far in the
605
+ * future, there is a risk that COMPARE event will be triggered for that
606
+ * previous CC value. If there is such risk safe procedure must be applied
607
+ * which is more time consuming but ensures that there will be no spurious
608
+ * event.
581
609
*/
582
- target_time = MAX ((target_time - last_count )/CYC_PER_TICK , 1 )* CYC_PER_TICK + last_count ;
610
+ if (prev_cc_val < cc_value ) {
611
+ int64_t now = last_count + last_elapsed ;
612
+
613
+ safe_setting = (prev_cc_val - now ) < LATENCY_THR_TICKS ;
614
+ }
583
615
584
- system_timeout_set_abs ( target_time );
616
+ nrfx_grtc_syscounter_cc_abs_set ( ch , cc_value , safe_setting );
585
617
}
586
618
587
619
#if defined(CONFIG_NRF_GRTC_TIMER_APP_DEFINED_INIT )
0 commit comments