Skip to content

Commit 005eed5

Browse files
committed
Use per-hart current task pointer in KCB
Previously, only a single global pointer tracked the current running task, which worked for single-core systems. To support SMP, change the Kernel Control Block (KCB) to maintain an array of current task pointers, one per hart. Added get_task_current() and set_task_current() helper functions to retrieve and update the current task for the executing hart. Modify kernel and HAL code to use these new functions instead of the single global current task pointer, ensuring correct task tracking on each hart.
1 parent 623125f commit 005eed5

File tree

5 files changed

+57
-40
lines changed

5 files changed

+57
-40
lines changed

arch/riscv/hal.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ void hal_timer_disable(void)
320320
*/
321321
void hal_interrupt_tick(void)
322322
{
323-
tcb_t *task = kcb->task_current->data;
323+
tcb_t *task = get_task_current(kcb)->data;
324324
if (unlikely(!task))
325325
hal_panic(); /* Fatal error - invalid task state */
326326

include/sys/task.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ typedef struct tcb {
8585
void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */
8686
} tcb_t;
8787

88+
#define MAX_HARTS 8
89+
8890
/* Kernel Control Block (KCB)
8991
*
9092
* Singleton structure holding global kernel state, including task lists,
@@ -93,7 +95,7 @@ typedef struct tcb {
9395
typedef struct {
9496
/* Task Management */
9597
list_t *tasks; /* Master list of all tasks (nodes contain tcb_t) */
96-
list_node_t *task_current; /* Node of currently running task */
98+
list_node_t *task_current[MAX_HARTS]; /* Node of currently running task */
9799
jmp_buf context; /* Saved context of main kernel thread before scheduling */
98100
uint16_t next_tid; /* Monotonically increasing ID for next new task */
99101
uint16_t task_count; /* Cached count of active tasks for quick access */
@@ -123,6 +125,20 @@ extern kcb_t *kcb;
123125

124126
/* Core Kernel and Task Management API */
125127

128+
static inline list_node_t *get_task_current()
129+
{
130+
const uint32_t mhartid = read_csr(mhartid);
131+
132+
return kcb->task_current[mhartid];
133+
}
134+
135+
static inline void set_task_current(list_node_t *task)
136+
{
137+
const uint32_t mhartid = read_csr(mhartid);
138+
139+
kcb->task_current[mhartid] = task;
140+
}
141+
126142
/* System Control Functions */
127143

128144
/* Prints a fatal error message and halts the system */

kernel/main.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,10 @@ int32_t main(int32_t hartid)
5656
mo_task_spawn(idle_task, DEFAULT_STACK_SIZE);
5757

5858
/* Verify that the application created at least one task.
59-
* If 'kcb->task_current' is still NULL, it means mo_task_spawn was never
59+
* If 'get_task_current()' is still NULL, it means mo_task_spawn was never
6060
* successfully called.
6161
*/
62-
if (!kcb->task_current)
62+
if (!get_task_current())
6363
panic(ERR_NO_TASKS);
6464

6565
/* Save the kernel's context. This is a formality to establish a base
@@ -70,10 +70,11 @@ int32_t main(int32_t hartid)
7070
spin_lock(&finish_lock);
7171

7272
/* Launch the first task.
73-
* 'kcb->task_current' was set by the first call to mo_task_spawn.
73+
* 'get_task_current()' was set by the first call to mo_task_spawn.
7474
* This function transfers control and does not return.
7575
*/
76-
tcb_t *first_task = kcb->task_current->data;
76+
77+
tcb_t *first_task = get_task_current()->data;
7778
if (!first_task)
7879
panic(ERR_NO_TASKS);
7980

kernel/mutex.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,11 +49,11 @@ static inline void cond_invalidate(cond_t *c)
4949
*/
5050
static bool remove_self_from_waiters(list_t *waiters)
5151
{
52-
if (unlikely(!waiters || !kcb || !kcb->task_current ||
53-
!kcb->task_current->data))
52+
if (unlikely(!waiters || !kcb || !get_task_current() ||
53+
!get_task_current()->data))
5454
return false;
5555

56-
tcb_t *self = kcb->task_current->data;
56+
tcb_t *self = get_task_current()->data;
5757

5858
/* Search for and remove self from waiters list */
5959
list_node_t *curr = waiters->head->next;
@@ -71,11 +71,11 @@ static bool remove_self_from_waiters(list_t *waiters)
7171
/* Atomic block operation with enhanced error checking */
7272
static void mutex_block_atomic(list_t *waiters)
7373
{
74-
if (unlikely(!waiters || !kcb || !kcb->task_current ||
75-
!kcb->task_current->data))
74+
if (unlikely(!waiters || !kcb || !get_task_current() ||
75+
!get_task_current()->data))
7676
panic(ERR_SEM_OPERATION);
7777

78-
tcb_t *self = kcb->task_current->data;
78+
tcb_t *self = get_task_current()->data;
7979

8080
/* Add to waiters list */
8181
if (unlikely(!list_pushback(waiters, self)))
@@ -223,7 +223,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
223223
}
224224

225225
/* Slow path: must block with timeout using delay mechanism */
226-
tcb_t *self = kcb->task_current->data;
226+
tcb_t *self = get_task_current()->data;
227227
if (unlikely(!list_pushback(m->waiters, self))) {
228228
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
229229
panic(ERR_SEM_OPERATION);
@@ -384,7 +384,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
384384
if (unlikely(!mo_mutex_owned_by_current(m)))
385385
return ERR_NOT_OWNER;
386386

387-
tcb_t *self = kcb->task_current->data;
387+
tcb_t *self = get_task_current()->data;
388388

389389
/* Atomically add to wait list */
390390
spin_lock_irqsave(&mutex_lock, &mutex_flags);
@@ -426,7 +426,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
426426
return ERR_TIMEOUT;
427427
}
428428

429-
tcb_t *self = kcb->task_current->data;
429+
tcb_t *self = get_task_current()->data;
430430

431431
/* Atomically add to wait list with timeout */
432432
spin_lock_irqsave(&mutex_lock, &mutex_flags);

kernel/task.c

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ void _timer_tick_handler(void);
1919
/* Kernel-wide control block (KCB) */
2020
static kcb_t kernel_state = {
2121
.tasks = NULL,
22-
.task_current = NULL,
22+
.task_current = {},
2323
.rt_sched = noop_rtsched,
2424
.timer_list = NULL, /* Managed by timer.c, but stored here. */
2525
.next_tid = 1, /* Start from 1 to avoid confusion with invalid ID 0 */
@@ -145,10 +145,10 @@ static void task_stack_check(void)
145145
if (!should_check)
146146
return;
147147

148-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
148+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
149149
panic(ERR_STACK_CHECK);
150150

151-
tcb_t *self = kcb->task_current->data;
151+
tcb_t *self = get_task_current()->data;
152152
if (unlikely(!is_valid_task(self)))
153153
panic(ERR_STACK_CHECK);
154154

@@ -366,10 +366,10 @@ void sched_dequeue_task(tcb_t *task)
366366
/* Handle time slice expiration for current task */
367367
void sched_tick_current_task(void)
368368
{
369-
if (unlikely(!kcb->task_current || !kcb->task_current->data))
369+
if (unlikely(!get_task_current() || !get_task_current()->data))
370370
return;
371371

372-
tcb_t *current_task = kcb->task_current->data;
372+
tcb_t *current_task = get_task_current()->data;
373373

374374
/* Decrement time slice */
375375
if (current_task->time_slice > 0)
@@ -414,17 +414,17 @@ void sched_wakeup_task(tcb_t *task)
414414
*/
415415
uint16_t sched_select_next_task(void)
416416
{
417-
if (unlikely(!kcb->task_current || !kcb->task_current->data))
417+
if (unlikely(!get_task_current() || !get_task_current()->data))
418418
panic(ERR_NO_TASKS);
419419

420-
tcb_t *current_task = kcb->task_current->data;
420+
tcb_t *current_task = get_task_current()->data;
421421

422422
/* Mark current task as ready if it was running */
423423
if (current_task->state == TASK_RUNNING)
424424
current_task->state = TASK_READY;
425425

426426
/* Round-robin search: find next ready task in the master task list */
427-
list_node_t *start_node = kcb->task_current;
427+
list_node_t *start_node = get_task_current();
428428
list_node_t *node = start_node;
429429
int iterations = 0; /* Safety counter to prevent infinite loops */
430430

@@ -441,7 +441,7 @@ uint16_t sched_select_next_task(void)
441441
continue;
442442

443443
/* Found a ready task */
444-
kcb->task_current = node;
444+
get_task_current() = node;
445445
task->state = TASK_RUNNING;
446446
task->time_slice = get_priority_timeslice(task->prio_level);
447447

@@ -478,14 +478,14 @@ void dispatcher(void)
478478
/* Top-level context-switch for preemptive scheduling. */
479479
void dispatch(void)
480480
{
481-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
481+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
482482
panic(ERR_NO_TASKS);
483483

484484
/* Save current context using dedicated HAL routine that handles both
485485
* execution context and processor state for context switching.
486486
* Returns immediately if this is the restore path.
487487
*/
488-
if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0)
488+
if (hal_context_save(((tcb_t *) get_task_current()->data)->context) != 0)
489489
return;
490490

491491
#if CONFIG_STACK_PROTECTION
@@ -505,20 +505,20 @@ void dispatch(void)
505505
hal_interrupt_tick();
506506

507507
/* Restore next task context */
508-
hal_context_restore(((tcb_t *) kcb->task_current->data)->context, 1);
508+
hal_context_restore(((tcb_t *) get_task_current()->data)->context, 1);
509509
}
510510

511511
/* Cooperative context switch */
512512
void yield(void)
513513
{
514-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
514+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
515515
return;
516516

517517
/* Process deferred timer work during yield */
518518
process_deferred_timer_work();
519519

520520
/* HAL context switching is used for preemptive scheduling. */
521-
if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0)
521+
if (hal_context_save(((tcb_t *) get_task_current()->data)->context) != 0)
522522
return;
523523

524524
#if CONFIG_STACK_PROTECTION
@@ -530,7 +530,7 @@ void yield(void)
530530
list_foreach(kcb->tasks, delay_update, NULL);
531531

532532
sched_select_next_task(); /* Use O(1) priority scheduler */
533-
hal_context_restore(((tcb_t *) kcb->task_current->data)->context, 1);
533+
hal_context_restore(((tcb_t *) get_task_current()->data)->context, 1);
534534
}
535535

536536
/* Stack initialization with minimal overhead */
@@ -618,8 +618,8 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
618618
tcb->id = kcb->next_tid++;
619619
kcb->task_count++; /* Cached count of active tasks for quick access */
620620

621-
if (!kcb->task_current)
622-
kcb->task_current = node;
621+
if (!get_task_current())
622+
set_task_current(node);
623623

624624
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
625625

@@ -691,12 +691,12 @@ void mo_task_delay(uint16_t ticks)
691691
return;
692692

693693
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
694-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) {
694+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) {
695695
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
696696
return;
697697
}
698698

699-
tcb_t *self = kcb->task_current->data;
699+
tcb_t *self = get_task_current()->data;
700700

701701
/* Set delay and blocked state - scheduler will skip blocked tasks */
702702
self->delay = ticks;
@@ -726,7 +726,7 @@ int32_t mo_task_suspend(uint16_t id)
726726
}
727727

728728
task->state = TASK_SUSPENDED;
729-
bool is_current = (kcb->task_current == node);
729+
bool is_current = (get_task_current() == node);
730730

731731
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
732732

@@ -813,9 +813,9 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
813813

814814
uint16_t mo_task_id(void)
815815
{
816-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
816+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
817817
return 0;
818-
return ((tcb_t *) kcb->task_current->data)->id;
818+
return ((tcb_t *) get_task_current()->data)->id;
819819
}
820820

821821
int32_t mo_task_idref(void *task_entry)
@@ -860,14 +860,14 @@ uint64_t mo_uptime(void)
860860

861861
void _sched_block(queue_t *wait_q)
862862
{
863-
if (unlikely(!wait_q || !kcb || !kcb->task_current ||
864-
!kcb->task_current->data))
863+
if (unlikely(!wait_q || !kcb || !get_task_current() ||
864+
!get_task_current()->data))
865865
panic(ERR_SEM_OPERATION);
866866

867867
/* Process deferred timer work before blocking */
868868
process_deferred_timer_work();
869869

870-
tcb_t *self = kcb->task_current->data;
870+
tcb_t *self = get_task_current()->data;
871871

872872
if (queue_enqueue(wait_q, self) != 0)
873873
panic(ERR_SEM_OPERATION);

0 commit comments

Comments
 (0)