@@ -23,7 +23,7 @@ void _timer_tick_handler(void);
23
23
*/
24
24
static kcb_t kernel_state = {
25
25
.tasks = NULL ,
26
- .task_current = NULL ,
26
+ .task_current = {} ,
27
27
.rt_sched = noop_rtsched ,
28
28
.timer_list = NULL , /* Managed by timer.c, but stored here. */
29
29
.next_tid = 1 , /* Start from 1 to avoid confusion with invalid ID 0 */
@@ -85,10 +85,10 @@ static void task_stack_check(void)
85
85
if (!should_check )
86
86
return ;
87
87
88
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
88
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
89
89
panic (ERR_STACK_CHECK );
90
90
91
- tcb_t * self = kcb -> task_current -> data ;
91
+ tcb_t * self = get_task_current () -> data ;
92
92
if (unlikely (!is_valid_task (self )))
93
93
panic (ERR_STACK_CHECK );
94
94
@@ -205,7 +205,7 @@ void _yield(void) __attribute__((weak, alias("yield")));
205
205
/* Scheduler with hint-based ready task search */
206
206
static list_node_t * find_next_ready_task (void )
207
207
{
208
- if (unlikely (!kcb -> task_current ))
208
+ if (unlikely (!get_task_current () ))
209
209
return NULL ;
210
210
211
211
list_node_t * node ;
@@ -225,7 +225,7 @@ static list_node_t *find_next_ready_task(void)
225
225
}
226
226
}
227
227
228
- node = kcb -> task_current ;
228
+ node = get_task_current () ;
229
229
while (itcnt ++ < SCHED_IMAX ) {
230
230
node = list_cnext (kcb -> tasks , node );
231
231
if (unlikely (!node || !node -> data ))
@@ -258,11 +258,11 @@ static list_node_t *find_next_ready_task(void)
258
258
/* Scheduler with reduced overhead */
259
259
static uint16_t schedule_next_task (void )
260
260
{
261
- if (unlikely (!kcb -> task_current || !kcb -> task_current -> data ))
261
+ if (unlikely (!get_task_current () || !get_task_current () -> data ))
262
262
panic (ERR_NO_TASKS );
263
263
264
264
/* Mark the previously running task as ready for the next cycle. */
265
- tcb_t * current_task = kcb -> task_current -> data ;
265
+ tcb_t * current_task = get_task_current () -> data ;
266
266
if (current_task -> state == TASK_RUNNING )
267
267
current_task -> state = TASK_READY ;
268
268
@@ -273,7 +273,7 @@ static uint16_t schedule_next_task(void)
273
273
}
274
274
275
275
/* Update scheduler state */
276
- kcb -> task_current = next_node ;
276
+ set_task_current ( next_node ) ;
277
277
tcb_t * next_task = next_node -> data ;
278
278
next_task -> state = TASK_RUNNING ;
279
279
@@ -297,11 +297,11 @@ void dispatcher(void)
297
297
/* Top-level context-switch for preemptive scheduling. */
298
298
void dispatch (void )
299
299
{
300
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
300
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
301
301
panic (ERR_NO_TASKS );
302
302
303
303
/* Return from longjmp: context is restored, continue task execution. */
304
- if (setjmp (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
304
+ if (setjmp (((tcb_t * ) get_task_current () -> data )-> context ) != 0 )
305
305
return ;
306
306
307
307
task_stack_check ();
@@ -313,16 +313,16 @@ void dispatch(void)
313
313
}
314
314
315
315
hal_interrupt_tick ();
316
- longjmp (((tcb_t * ) kcb -> task_current -> data )-> context , 1 );
316
+ longjmp (((tcb_t * ) get_task_current () -> data )-> context , 1 );
317
317
}
318
318
319
319
/* Cooperative context switch */
320
320
void yield (void )
321
321
{
322
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
322
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
323
323
return ;
324
324
325
- if (setjmp (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
325
+ if (setjmp (((tcb_t * ) get_task_current () -> data )-> context ) != 0 )
326
326
return ;
327
327
328
328
task_stack_check ();
@@ -332,7 +332,7 @@ void yield(void)
332
332
list_foreach (kcb -> tasks , delay_update , NULL );
333
333
334
334
schedule_next_task ();
335
- longjmp (((tcb_t * ) kcb -> task_current -> data )-> context , 1 );
335
+ longjmp (((tcb_t * ) get_task_current () -> data )-> context , 1 );
336
336
}
337
337
338
338
/* Stack initialization with minimal overhead */
@@ -411,8 +411,8 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
411
411
tcb -> id = kcb -> next_tid ++ ;
412
412
kcb -> task_count ++ ;
413
413
414
- if (!kcb -> task_current )
415
- kcb -> task_current = node ;
414
+ if (!get_task_current () )
415
+ set_task_current ( node ) ;
416
416
417
417
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
418
418
@@ -483,12 +483,12 @@ void mo_task_delay(uint16_t ticks)
483
483
return ;
484
484
485
485
spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
486
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data )) {
486
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data )) {
487
487
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
488
488
return ;
489
489
}
490
490
491
- tcb_t * self = kcb -> task_current -> data ;
491
+ tcb_t * self = get_task_current () -> data ;
492
492
self -> delay = ticks ;
493
493
self -> state = TASK_BLOCKED ;
494
494
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
@@ -516,7 +516,7 @@ int32_t mo_task_suspend(uint16_t id)
516
516
}
517
517
518
518
task -> state = TASK_SUSPENDED ;
519
- bool is_current = (kcb -> task_current == node );
519
+ bool is_current = (get_task_current () == node );
520
520
521
521
/* Clear ready hint if suspending that task */
522
522
if (kcb -> last_ready_hint == node )
@@ -603,9 +603,9 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
603
603
604
604
uint16_t mo_task_id (void )
605
605
{
606
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
606
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
607
607
return 0 ;
608
- return ((tcb_t * ) kcb -> task_current -> data )-> id ;
608
+ return ((tcb_t * ) get_task_current () -> data )-> id ;
609
609
}
610
610
611
611
int32_t mo_task_idref (void * task_entry )
@@ -647,11 +647,11 @@ uint64_t mo_uptime(void)
647
647
648
648
void _sched_block (queue_t * wait_q )
649
649
{
650
- if (unlikely (!wait_q || !kcb || !kcb -> task_current ||
651
- !kcb -> task_current -> data ))
650
+ if (unlikely (!wait_q || !kcb || !get_task_current () ||
651
+ !get_task_current () -> data ))
652
652
panic (ERR_SEM_OPERATION );
653
653
654
- tcb_t * self = kcb -> task_current -> data ;
654
+ tcb_t * self = get_task_current () -> data ;
655
655
656
656
if (queue_enqueue (wait_q , self ) != 0 ) {
657
657
panic (ERR_SEM_OPERATION );
0 commit comments