@@ -33,8 +33,14 @@ static kcb_t kernel_state = {
33
33
};
34
34
kcb_t * kcb = & kernel_state ;
35
35
36
- /* Deferred timer work flag to reduce interrupt latency */
37
- static volatile bool timer_work_pending = false;
36
+ /* timer work management for reduced latency */
37
+ static volatile uint32_t timer_work_pending = 0 ; /* timer work types */
38
+ static volatile uint32_t timer_work_generation = 0 ; /* counter for coalescing */
39
+
40
+ /* Timer work types for prioritized processing */
41
+ #define TIMER_WORK_TICK_HANDLER (1U << 0) /* Standard timer callbacks */
42
+ #define TIMER_WORK_DELAY_UPDATE (1U << 1) /* Task delay processing */
43
+ #define TIMER_WORK_CRITICAL (1U << 2) /* High-priority timer work */
38
44
39
45
#if CONFIG_STACK_PROTECTION
40
46
/* Stack canary checking frequency - check every N context switches */
@@ -112,6 +118,66 @@ static void task_stack_check(void)
112
118
}
113
119
#endif /* CONFIG_STACK_PROTECTION */
114
120
121
+ /* delay update with early exit and batching */
122
+ static list_node_t * delay_update_batch (list_node_t * node , void * arg )
123
+ {
124
+ uint32_t * ready_count = (uint32_t * ) arg ;
125
+ if (unlikely (!node || !node -> data ))
126
+ return NULL ;
127
+
128
+ tcb_t * t = node -> data ;
129
+
130
+ /* Skip non-blocked tasks (common case) */
131
+ if (likely (t -> state != TASK_BLOCKED ))
132
+ return NULL ;
133
+
134
+ /* Process delays only if tick actually advanced */
135
+ if (t -> delay > 0 ) {
136
+ if (-- t -> delay == 0 ) {
137
+ t -> state = TASK_READY ;
138
+ (* ready_count )++ ;
139
+ }
140
+ }
141
+ return NULL ;
142
+ }
143
+
144
+ /* timer work processing with coalescing and prioritization */
145
+ static inline void process_timer_work (uint32_t work_mask )
146
+ {
147
+ if (unlikely (!work_mask ))
148
+ return ;
149
+
150
+ /* Process high-priority timer work first */
151
+ if (work_mask & TIMER_WORK_CRITICAL ) {
152
+ /* Handle critical timer callbacks immediately */
153
+ _timer_tick_handler ();
154
+ } else if (work_mask & TIMER_WORK_TICK_HANDLER ) {
155
+ /* Handle standard timer callbacks */
156
+ _timer_tick_handler ();
157
+ }
158
+
159
+ /* Delay updates are handled separately in scheduler */
160
+ }
161
+
162
+ /* Fast timer work processing for yield points */
163
+ static inline void process_deferred_timer_work (void )
164
+ {
165
+ uint32_t work = timer_work_pending ;
166
+ if (likely (!work ))
167
+ return ;
168
+
169
+ /* Atomic clear with generation check to prevent race conditions */
170
+ uint32_t current_gen = timer_work_generation ;
171
+ timer_work_pending = 0 ;
172
+
173
+ process_timer_work (work );
174
+
175
+ /* Check if new work arrived while processing */
176
+ if (unlikely (timer_work_generation != current_gen )) {
177
+ ; /* New work arrived, will be processed on next yield */
178
+ }
179
+ }
180
+
115
181
/* Updates task delay counters and unblocks tasks when delays expire */
116
182
static list_node_t * delay_update (list_node_t * node , void * arg )
117
183
{
@@ -298,7 +364,11 @@ static int32_t noop_rtsched(void)
298
364
void dispatcher (void )
299
365
{
300
366
kcb -> ticks ++ ;
301
- timer_work_pending = true;
367
+
368
+ /* Set timer work with generation increment for coalescing */
369
+ timer_work_pending |= TIMER_WORK_TICK_HANDLER ;
370
+ timer_work_generation ++ ;
371
+
302
372
_dispatch ();
303
373
}
304
374
@@ -321,7 +391,9 @@ void dispatch(void)
321
391
task_stack_check ();
322
392
#endif
323
393
324
- list_foreach (kcb -> tasks , delay_update , NULL );
394
+ /* Batch process task delays for better efficiency */
395
+ uint32_t ready_count = 0 ;
396
+ list_foreach (kcb -> tasks , delay_update_batch , & ready_count );
325
397
326
398
/* Hook for real-time scheduler - if it selects a task, use it */
327
399
if (kcb -> rt_sched () < 0 )
@@ -340,10 +412,7 @@ void yield(void)
340
412
return ;
341
413
342
414
/* Process deferred timer work during yield */
343
- if (timer_work_pending ) {
344
- timer_work_pending = false;
345
- _timer_tick_handler ();
346
- }
415
+ process_deferred_timer_work ();
347
416
348
417
/* HAL context switching is used for preemptive scheduling. */
349
418
if (hal_context_save (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
@@ -514,10 +583,7 @@ void mo_task_yield(void)
514
583
void mo_task_delay (uint16_t ticks )
515
584
{
516
585
/* Process deferred timer work before sleeping */
517
- if (timer_work_pending ) {
518
- timer_work_pending = false;
519
- _timer_tick_handler ();
520
- }
586
+ process_deferred_timer_work ();
521
587
522
588
if (!ticks )
523
589
return ;
@@ -663,10 +729,7 @@ int32_t mo_task_idref(void *task_entry)
663
729
void mo_task_wfi (void )
664
730
{
665
731
/* Process deferred timer work before waiting */
666
- if (timer_work_pending ) {
667
- timer_work_pending = false;
668
- _timer_tick_handler ();
669
- }
732
+ process_deferred_timer_work ();
670
733
671
734
if (!kcb -> preemptive )
672
735
return ;
@@ -698,16 +761,12 @@ void _sched_block(queue_t *wait_q)
698
761
panic (ERR_SEM_OPERATION );
699
762
700
763
/* Process deferred timer work before blocking */
701
- if (timer_work_pending ) {
702
- timer_work_pending = false;
703
- _timer_tick_handler ();
704
- }
764
+ process_deferred_timer_work ();
705
765
706
766
tcb_t * self = kcb -> task_current -> data ;
707
767
708
- if (queue_enqueue (wait_q , self ) != 0 ) {
768
+ if (queue_enqueue (wait_q , self ) != 0 )
709
769
panic (ERR_SEM_OPERATION );
710
- }
711
770
712
771
self -> state = TASK_BLOCKED ;
713
772
_yield ();
0 commit comments