Skip to content

Commit 2c92431

Browse files
committed
Use per-hart current task pointer in KCB
Previously, only a single global pointer tracked the current running task, which worked for single-core systems. To support SMP, change the Kernel Control Block (KCB) to maintain an array of current task pointers, one per hart. Added get_task_current() and set_task_current() helper functions to retrieve and update the current task for the executing hart. Modify kernel and HAL code to use these new functions instead of the single global current task pointer, ensuring correct task tracking on each hart.
1 parent 5248c14 commit 2c92431

File tree

5 files changed

+52
-35
lines changed

5 files changed

+52
-35
lines changed

arch/riscv/hal.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ void hal_timer_disable(void)
325325
*/
326326
void hal_interrupt_tick(void)
327327
{
328-
tcb_t *task = kcb->task_current->data;
328+
tcb_t *task = get_task_current(kcb)->data;
329329
if (unlikely(!task))
330330
hal_panic(); /* Fatal error - invalid task state */
331331

include/sys/task.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ typedef struct {
6161
void *rt_prio; /* Opaque pointer for a custom real-time scheduler hook. */
6262
} tcb_t;
6363

64+
#define MAX_HARTS 8
65+
6466
/* Kernel Control Block (KCB)
6567
*
6668
* A singleton structure that holds the global state of the kernel, including
@@ -69,7 +71,7 @@ typedef struct {
6971
typedef struct {
7072
/* Task Management */
7173
list_t *tasks; /* The master list of all tasks (nodes contain tcb_t). */
72-
list_node_t *task_current; /* Node of the currently running task. */
74+
list_node_t *task_current[MAX_HARTS]; /* Node of the currently running task. */
7375
/* Saved context of the main kernel thread before scheduling starts. */
7476
jmp_buf context;
7577
uint16_t next_tid; /* Monotonically increasing ID for the next new task. */
@@ -113,6 +115,20 @@ extern kcb_t *kcb;
113115

114116
/* Core Kernel and Task Management API */
115117

118+
static inline list_node_t *get_task_current()
119+
{
120+
const uint32_t mhartid = read_csr(mhartid);
121+
122+
return kcb->task_current[mhartid];
123+
}
124+
125+
static inline void set_task_current(list_node_t *task)
126+
{
127+
const uint32_t mhartid = read_csr(mhartid);
128+
129+
kcb->task_current[mhartid] = task;
130+
}
131+
116132
/* Prints a fatal error message and halts the system. */
117133
void panic(int32_t ecode);
118134

kernel/main.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,10 @@ int32_t main(int32_t hartid)
5656
mo_task_spawn(idle_task, DEFAULT_STACK_SIZE);
5757

5858
/* Verify that the application created at least one task.
59-
* If 'kcb->task_current' is still NULL, it means mo_task_spawn was never
59+
* If 'get_task_current()' is still NULL, it means mo_task_spawn was never
6060
* successfully called.
6161
*/
62-
if (!kcb->task_current)
62+
if (!get_task_current())
6363
panic(ERR_NO_TASKS);
6464

6565
/* Save the kernel's context. This is a formality to establish a base
@@ -70,10 +70,11 @@ int32_t main(int32_t hartid)
7070
spin_lock(&finish_lock);
7171

7272
/* Launch the first task.
73-
* 'kcb->task_current' was set by the first call to mo_task_spawn.
73+
* 'get_task_current()' was set by the first call to mo_task_spawn.
7474
* This function transfers control and does not return.
7575
*/
76-
tcb_t *first_task = kcb->task_current->data;
76+
77+
tcb_t *first_task = get_task_current()->data;
7778
if (!first_task)
7879
panic(ERR_NO_TASKS);
7980

kernel/mutex.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,10 @@ static list_node_t *find_node_by_data(list_t *list, void *data)
4747
*/
4848
static void mutex_block_atomic(list_t *waiters)
4949
{
50-
if (!waiters || !kcb || !kcb->task_current || !kcb->task_current->data)
50+
if (!waiters || !kcb || !get_task_current() || !get_task_current()->data)
5151
panic(ERR_SEM_OPERATION);
5252

53-
tcb_t *self = kcb->task_current->data;
53+
tcb_t *self = get_task_current()->data;
5454

5555
/* Add to waiters list */
5656
if (!list_pushback(waiters, self))
@@ -198,7 +198,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
198198
}
199199

200200
/* Must block with timeout */
201-
tcb_t *self = kcb->task_current->data;
201+
tcb_t *self = get_task_current()->data;
202202
if (!list_pushback(m->waiters, self)) {
203203
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
204204
panic(ERR_SEM_OPERATION);
@@ -353,7 +353,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
353353

354354
/* Atomically add to wait list and block */
355355
spin_lock_irqsave(&mutex_lock, &mutex_flags);
356-
tcb_t *self = kcb->task_current->data;
356+
tcb_t *self = get_task_current()->data;
357357
if (!list_pushback(c->waiters, self)) {
358358
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
359359
panic(ERR_SEM_OPERATION);
@@ -399,7 +399,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
399399

400400
/* Atomically add to wait list */
401401
spin_lock_irqsave(&mutex_lock, &mutex_flags);
402-
tcb_t *self = kcb->task_current->data;
402+
tcb_t *self = get_task_current()->data;
403403
if (!list_pushback(c->waiters, self)) {
404404
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
405405
panic(ERR_SEM_OPERATION);

kernel/task.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ void _timer_tick_handler(void);
2323
*/
2424
static kcb_t kernel_state = {
2525
.tasks = NULL,
26-
.task_current = NULL,
26+
.task_current = {},
2727
.rt_sched = noop_rtsched,
2828
.timer_list = NULL, /* Managed by timer.c, but stored here. */
2929
.next_tid = 1, /* Start from 1 to avoid confusion with invalid ID 0 */
@@ -85,10 +85,10 @@ static void task_stack_check(void)
8585
if (!should_check)
8686
return;
8787

88-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
88+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
8989
panic(ERR_STACK_CHECK);
9090

91-
tcb_t *self = kcb->task_current->data;
91+
tcb_t *self = get_task_current()->data;
9292
if (unlikely(!is_valid_task(self)))
9393
panic(ERR_STACK_CHECK);
9494

@@ -205,7 +205,7 @@ void _yield(void) __attribute__((weak, alias("yield")));
205205
/* Scheduler with hint-based ready task search */
206206
static list_node_t *find_next_ready_task(void)
207207
{
208-
if (unlikely(!kcb->task_current))
208+
if (unlikely(!get_task_current()))
209209
return NULL;
210210

211211
list_node_t *node;
@@ -225,7 +225,7 @@ static list_node_t *find_next_ready_task(void)
225225
}
226226
}
227227

228-
node = kcb->task_current;
228+
node = get_task_current();
229229
while (itcnt++ < SCHED_IMAX) {
230230
node = list_cnext(kcb->tasks, node);
231231
if (unlikely(!node || !node->data))
@@ -258,11 +258,11 @@ static list_node_t *find_next_ready_task(void)
258258
/* Scheduler with reduced overhead */
259259
static uint16_t schedule_next_task(void)
260260
{
261-
if (unlikely(!kcb->task_current || !kcb->task_current->data))
261+
if (unlikely(!get_task_current() || !get_task_current()->data))
262262
panic(ERR_NO_TASKS);
263263

264264
/* Mark the previously running task as ready for the next cycle. */
265-
tcb_t *current_task = kcb->task_current->data;
265+
tcb_t *current_task = get_task_current()->data;
266266
if (current_task->state == TASK_RUNNING)
267267
current_task->state = TASK_READY;
268268

@@ -273,7 +273,7 @@ static uint16_t schedule_next_task(void)
273273
}
274274

275275
/* Update scheduler state */
276-
kcb->task_current = next_node;
276+
set_task_current(next_node);
277277
tcb_t *next_task = next_node->data;
278278
next_task->state = TASK_RUNNING;
279279

@@ -297,11 +297,11 @@ void dispatcher(void)
297297
/* Top-level context-switch for preemptive scheduling. */
298298
void dispatch(void)
299299
{
300-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
300+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
301301
panic(ERR_NO_TASKS);
302302

303303
/* Return from longjmp: context is restored, continue task execution. */
304-
if (setjmp(((tcb_t *) kcb->task_current->data)->context) != 0)
304+
if (setjmp(((tcb_t *) get_task_current()->data)->context) != 0)
305305
return;
306306

307307
task_stack_check();
@@ -313,16 +313,16 @@ void dispatch(void)
313313
}
314314

315315
hal_interrupt_tick();
316-
longjmp(((tcb_t *) kcb->task_current->data)->context, 1);
316+
longjmp(((tcb_t *) get_task_current()->data)->context, 1);
317317
}
318318

319319
/* Cooperative context switch */
320320
void yield(void)
321321
{
322-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
322+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
323323
return;
324324

325-
if (setjmp(((tcb_t *) kcb->task_current->data)->context) != 0)
325+
if (setjmp(((tcb_t *) get_task_current()->data)->context) != 0)
326326
return;
327327

328328
task_stack_check();
@@ -332,7 +332,7 @@ void yield(void)
332332
list_foreach(kcb->tasks, delay_update, NULL);
333333

334334
schedule_next_task();
335-
longjmp(((tcb_t *) kcb->task_current->data)->context, 1);
335+
longjmp(((tcb_t *) get_task_current()->data)->context, 1);
336336
}
337337

338338
/* Stack initialization with minimal overhead */
@@ -411,8 +411,8 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
411411
tcb->id = kcb->next_tid++;
412412
kcb->task_count++;
413413

414-
if (!kcb->task_current)
415-
kcb->task_current = node;
414+
if (!get_task_current())
415+
set_task_current(node);
416416

417417
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
418418

@@ -483,12 +483,12 @@ void mo_task_delay(uint16_t ticks)
483483
return;
484484

485485
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
486-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) {
486+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data)) {
487487
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
488488
return;
489489
}
490490

491-
tcb_t *self = kcb->task_current->data;
491+
tcb_t *self = get_task_current()->data;
492492
self->delay = ticks;
493493
self->state = TASK_BLOCKED;
494494
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
@@ -516,7 +516,7 @@ int32_t mo_task_suspend(uint16_t id)
516516
}
517517

518518
task->state = TASK_SUSPENDED;
519-
bool is_current = (kcb->task_current == node);
519+
bool is_current = (get_task_current() == node);
520520

521521
/* Clear ready hint if suspending that task */
522522
if (kcb->last_ready_hint == node)
@@ -603,9 +603,9 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
603603

604604
uint16_t mo_task_id(void)
605605
{
606-
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data))
606+
if (unlikely(!kcb || !get_task_current() || !get_task_current()->data))
607607
return 0;
608-
return ((tcb_t *) kcb->task_current->data)->id;
608+
return ((tcb_t *) get_task_current()->data)->id;
609609
}
610610

611611
int32_t mo_task_idref(void *task_entry)
@@ -647,11 +647,11 @@ uint64_t mo_uptime(void)
647647

648648
void _sched_block(queue_t *wait_q)
649649
{
650-
if (unlikely(!wait_q || !kcb || !kcb->task_current ||
651-
!kcb->task_current->data))
650+
if (unlikely(!wait_q || !kcb || !get_task_current() ||
651+
!get_task_current()->data))
652652
panic(ERR_SEM_OPERATION);
653653

654-
tcb_t *self = kcb->task_current->data;
654+
tcb_t *self = get_task_current()->data;
655655

656656
if (queue_enqueue(wait_q, self) != 0) {
657657
panic(ERR_SEM_OPERATION);

0 commit comments

Comments
 (0)