@@ -19,7 +19,7 @@ void _timer_tick_handler(void);
19
19
/* Kernel-wide control block (KCB) */
20
20
static kcb_t kernel_state = {
21
21
.tasks = NULL ,
22
- .task_current = NULL ,
22
+ .task_current = {} ,
23
23
.rt_sched = noop_rtsched ,
24
24
.timer_list = NULL , /* Managed by timer.c, but stored here. */
25
25
.next_tid = 1 , /* Start from 1 to avoid confusion with invalid ID 0 */
@@ -145,10 +145,10 @@ static void task_stack_check(void)
145
145
if (!should_check )
146
146
return ;
147
147
148
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
148
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
149
149
panic (ERR_STACK_CHECK );
150
150
151
- tcb_t * self = kcb -> task_current -> data ;
151
+ tcb_t * self = get_task_current () -> data ;
152
152
if (unlikely (!is_valid_task (self )))
153
153
panic (ERR_STACK_CHECK );
154
154
@@ -366,10 +366,10 @@ void sched_dequeue_task(tcb_t *task)
366
366
/* Handle time slice expiration for current task */
367
367
void sched_tick_current_task (void )
368
368
{
369
- if (unlikely (!kcb -> task_current || !kcb -> task_current -> data ))
369
+ if (unlikely (!get_task_current () || !get_task_current () -> data ))
370
370
return ;
371
371
372
- tcb_t * current_task = kcb -> task_current -> data ;
372
+ tcb_t * current_task = get_task_current () -> data ;
373
373
374
374
/* Decrement time slice */
375
375
if (current_task -> time_slice > 0 )
@@ -414,17 +414,17 @@ void sched_wakeup_task(tcb_t *task)
414
414
*/
415
415
uint16_t sched_select_next_task (void )
416
416
{
417
- if (unlikely (!kcb -> task_current || !kcb -> task_current -> data ))
417
+ if (unlikely (!get_task_current () || !get_task_current () -> data ))
418
418
panic (ERR_NO_TASKS );
419
419
420
- tcb_t * current_task = kcb -> task_current -> data ;
420
+ tcb_t * current_task = get_task_current () -> data ;
421
421
422
422
/* Mark current task as ready if it was running */
423
423
if (current_task -> state == TASK_RUNNING )
424
424
current_task -> state = TASK_READY ;
425
425
426
426
/* Round-robin search: find next ready task in the master task list */
427
- list_node_t * start_node = kcb -> task_current ;
427
+ list_node_t * start_node = get_task_current () ;
428
428
list_node_t * node = start_node ;
429
429
int iterations = 0 ; /* Safety counter to prevent infinite loops */
430
430
@@ -441,7 +441,7 @@ uint16_t sched_select_next_task(void)
441
441
continue ;
442
442
443
443
/* Found a ready task */
444
- kcb -> task_current = node ;
444
+ set_task_current ( node ) ;
445
445
task -> state = TASK_RUNNING ;
446
446
task -> time_slice = get_priority_timeslice (task -> prio_level );
447
447
@@ -478,14 +478,14 @@ void dispatcher(void)
478
478
/* Top-level context-switch for preemptive scheduling. */
479
479
void dispatch (void )
480
480
{
481
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
481
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
482
482
panic (ERR_NO_TASKS );
483
483
484
484
/* Save current context using dedicated HAL routine that handles both
485
485
* execution context and processor state for context switching.
486
486
* Returns immediately if this is the restore path.
487
487
*/
488
- if (hal_context_save (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
488
+ if (hal_context_save (((tcb_t * ) get_task_current () -> data )-> context ) != 0 )
489
489
return ;
490
490
491
491
#if CONFIG_STACK_PROTECTION
@@ -505,20 +505,20 @@ void dispatch(void)
505
505
hal_interrupt_tick ();
506
506
507
507
/* Restore next task context */
508
- hal_context_restore (((tcb_t * ) kcb -> task_current -> data )-> context , 1 );
508
+ hal_context_restore (((tcb_t * ) get_task_current () -> data )-> context , 1 );
509
509
}
510
510
511
511
/* Cooperative context switch */
512
512
void yield (void )
513
513
{
514
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
514
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
515
515
return ;
516
516
517
517
/* Process deferred timer work during yield */
518
518
process_deferred_timer_work ();
519
519
520
520
/* HAL context switching is used for preemptive scheduling. */
521
- if (hal_context_save (((tcb_t * ) kcb -> task_current -> data )-> context ) != 0 )
521
+ if (hal_context_save (((tcb_t * ) get_task_current () -> data )-> context ) != 0 )
522
522
return ;
523
523
524
524
#if CONFIG_STACK_PROTECTION
@@ -530,7 +530,7 @@ void yield(void)
530
530
list_foreach (kcb -> tasks , delay_update , NULL );
531
531
532
532
sched_select_next_task (); /* Use O(1) priority scheduler */
533
- hal_context_restore (((tcb_t * ) kcb -> task_current -> data )-> context , 1 );
533
+ hal_context_restore (((tcb_t * ) get_task_current () -> data )-> context , 1 );
534
534
}
535
535
536
536
/* Stack initialization with minimal overhead */
@@ -618,8 +618,8 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
618
618
tcb -> id = kcb -> next_tid ++ ;
619
619
kcb -> task_count ++ ; /* Cached count of active tasks for quick access */
620
620
621
- if (!kcb -> task_current )
622
- kcb -> task_current = node ;
621
+ if (!get_task_current () )
622
+ set_task_current ( node ) ;
623
623
624
624
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
625
625
@@ -691,12 +691,12 @@ void mo_task_delay(uint16_t ticks)
691
691
return ;
692
692
693
693
spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
694
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data )) {
694
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data )) {
695
695
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
696
696
return ;
697
697
}
698
698
699
- tcb_t * self = kcb -> task_current -> data ;
699
+ tcb_t * self = get_task_current () -> data ;
700
700
701
701
/* Set delay and blocked state - scheduler will skip blocked tasks */
702
702
self -> delay = ticks ;
@@ -726,7 +726,7 @@ int32_t mo_task_suspend(uint16_t id)
726
726
}
727
727
728
728
task -> state = TASK_SUSPENDED ;
729
- bool is_current = (kcb -> task_current == node );
729
+ bool is_current = (get_task_current () == node );
730
730
731
731
spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
732
732
@@ -813,9 +813,9 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
813
813
814
814
uint16_t mo_task_id (void )
815
815
{
816
- if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data ))
816
+ if (unlikely (!kcb || !get_task_current () || !get_task_current () -> data ))
817
817
return 0 ;
818
- return ((tcb_t * ) kcb -> task_current -> data )-> id ;
818
+ return ((tcb_t * ) get_task_current () -> data )-> id ;
819
819
}
820
820
821
821
int32_t mo_task_idref (void * task_entry )
@@ -860,14 +860,14 @@ uint64_t mo_uptime(void)
860
860
861
861
void _sched_block (queue_t * wait_q )
862
862
{
863
- if (unlikely (!wait_q || !kcb || !kcb -> task_current ||
864
- !kcb -> task_current -> data ))
863
+ if (unlikely (!wait_q || !kcb || !get_task_current () ||
864
+ !get_task_current () -> data ))
865
865
panic (ERR_SEM_OPERATION );
866
866
867
867
/* Process deferred timer work before blocking */
868
868
process_deferred_timer_work ();
869
869
870
- tcb_t * self = kcb -> task_current -> data ;
870
+ tcb_t * self = get_task_current () -> data ;
871
871
872
872
if (queue_enqueue (wait_q , self ) != 0 )
873
873
panic (ERR_SEM_OPERATION );
0 commit comments