Skip to content

Commit c0fafca

Browse files
committed
Implement work coalescing and generation-based tracking
This commit replaces simple timer work flag with prioritized work system to reduce interrupt latency and process timer work atomically at natural yield points.
1 parent cbf5378 commit c0fafca

File tree

1 file changed

+81
-22
lines changed

1 file changed

+81
-22
lines changed

kernel/task.c

Lines changed: 81 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,14 @@ static kcb_t kernel_state = {
3333
};
3434
kcb_t *kcb = &kernel_state;
3535

36-
/* Deferred timer work flag to reduce interrupt latency */
37-
static volatile bool timer_work_pending = false;
36+
/* timer work management for reduced latency */
37+
static volatile uint32_t timer_work_pending = 0; /* timer work types */
38+
static volatile uint32_t timer_work_generation = 0; /* counter for coalescing */
39+
40+
/* Timer work types for prioritized processing */
41+
#define TIMER_WORK_TICK_HANDLER (1U << 0) /* Standard timer callbacks */
42+
#define TIMER_WORK_DELAY_UPDATE (1U << 1) /* Task delay processing */
43+
#define TIMER_WORK_CRITICAL (1U << 2) /* High-priority timer work */
3844

3945
#if CONFIG_STACK_PROTECTION
4046
/* Stack canary checking frequency - check every N context switches */
@@ -112,6 +118,66 @@ static void task_stack_check(void)
112118
}
113119
#endif /* CONFIG_STACK_PROTECTION */
114120

121+
/* delay update with early exit and batching */
122+
static list_node_t *delay_update_batch(list_node_t *node, void *arg)
123+
{
124+
uint32_t *ready_count = (uint32_t *) arg;
125+
if (unlikely(!node || !node->data))
126+
return NULL;
127+
128+
tcb_t *t = node->data;
129+
130+
/* Skip non-blocked tasks (common case) */
131+
if (likely(t->state != TASK_BLOCKED))
132+
return NULL;
133+
134+
/* Process delays only if tick actually advanced */
135+
if (t->delay > 0) {
136+
if (--t->delay == 0) {
137+
t->state = TASK_READY;
138+
(*ready_count)++;
139+
}
140+
}
141+
return NULL;
142+
}
143+
144+
/* timer work processing with coalescing and prioritization */
145+
static inline void process_timer_work(uint32_t work_mask)
146+
{
147+
if (unlikely(!work_mask))
148+
return;
149+
150+
/* Process high-priority timer work first */
151+
if (work_mask & TIMER_WORK_CRITICAL) {
152+
/* Handle critical timer callbacks immediately */
153+
_timer_tick_handler();
154+
} else if (work_mask & TIMER_WORK_TICK_HANDLER) {
155+
/* Handle standard timer callbacks */
156+
_timer_tick_handler();
157+
}
158+
159+
/* Delay updates are handled separately in scheduler */
160+
}
161+
162+
/* Fast timer work processing for yield points */
163+
static inline void process_deferred_timer_work(void)
164+
{
165+
uint32_t work = timer_work_pending;
166+
if (likely(!work))
167+
return;
168+
169+
/* Atomic clear with generation check to prevent race conditions */
170+
uint32_t current_gen = timer_work_generation;
171+
timer_work_pending = 0;
172+
173+
process_timer_work(work);
174+
175+
/* Check if new work arrived while processing */
176+
if (unlikely(timer_work_generation != current_gen)) {
177+
; /* New work arrived, will be processed on next yield */
178+
}
179+
}
180+
115181
/* Updates task delay counters and unblocks tasks when delays expire */
116182
static list_node_t *delay_update(list_node_t *node, void *arg)
117183
{
@@ -298,7 +364,11 @@ static int32_t noop_rtsched(void)
298364
void dispatcher(void)
299365
{
300366
kcb->ticks++;
301-
timer_work_pending = true;
367+
368+
/* Set timer work with generation increment for coalescing */
369+
timer_work_pending |= TIMER_WORK_TICK_HANDLER;
370+
timer_work_generation++;
371+
302372
_dispatch();
303373
}
304374

@@ -321,7 +391,9 @@ void dispatch(void)
321391
task_stack_check();
322392
#endif
323393

324-
list_foreach(kcb->tasks, delay_update, NULL);
394+
/* Batch process task delays for better efficiency */
395+
uint32_t ready_count = 0;
396+
list_foreach(kcb->tasks, delay_update_batch, &ready_count);
325397

326398
/* Hook for real-time scheduler - if it selects a task, use it */
327399
if (kcb->rt_sched() < 0)
@@ -340,10 +412,7 @@ void yield(void)
340412
return;
341413

342414
/* Process deferred timer work during yield */
343-
if (timer_work_pending) {
344-
timer_work_pending = false;
345-
_timer_tick_handler();
346-
}
415+
process_deferred_timer_work();
347416

348417
/* HAL context switching is used for preemptive scheduling. */
349418
if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0)
@@ -514,10 +583,7 @@ void mo_task_yield(void)
514583
void mo_task_delay(uint16_t ticks)
515584
{
516585
/* Process deferred timer work before sleeping */
517-
if (timer_work_pending) {
518-
timer_work_pending = false;
519-
_timer_tick_handler();
520-
}
586+
process_deferred_timer_work();
521587

522588
if (!ticks)
523589
return;
@@ -663,10 +729,7 @@ int32_t mo_task_idref(void *task_entry)
663729
void mo_task_wfi(void)
664730
{
665731
/* Process deferred timer work before waiting */
666-
if (timer_work_pending) {
667-
timer_work_pending = false;
668-
_timer_tick_handler();
669-
}
732+
process_deferred_timer_work();
670733

671734
if (!kcb->preemptive)
672735
return;
@@ -698,16 +761,12 @@ void _sched_block(queue_t *wait_q)
698761
panic(ERR_SEM_OPERATION);
699762

700763
/* Process deferred timer work before blocking */
701-
if (timer_work_pending) {
702-
timer_work_pending = false;
703-
_timer_tick_handler();
704-
}
764+
process_deferred_timer_work();
705765

706766
tcb_t *self = kcb->task_current->data;
707767

708-
if (queue_enqueue(wait_q, self) != 0) {
768+
if (queue_enqueue(wait_q, self) != 0)
709769
panic(ERR_SEM_OPERATION);
710-
}
711770

712771
self->state = TASK_BLOCKED;
713772
_yield();

0 commit comments

Comments
 (0)