Skip to content

Commit 9683192

Browse files
committed
Replace interrupt masking with spinlock in task management for SMP support
The original task management code used CRITICAL_ENTER() / CRITICAL_LEAVE() and NOSCHED_ENTER() / NOSCHED_LEAVE() to protect critical sections by disabling interrupts, which was sufficient for single-core systems. To support SMP, these macros are replaced with a spinlock based on RV32A atomic instructions. This ensures that multiple harts can safely access and modify shared task data such as ready queues, priority values, and task control blocks. This change is essential for enabling multi-hart task scheduling without introducing race conditions in the kernel task subsystem.
1 parent 30330c2 commit 9683192

File tree

1 file changed

+33
-30
lines changed

1 file changed

+33
-30
lines changed

kernel/task.c

Lines changed: 33 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
*/
77

88
#include <hal.h>
9+
#include <spinlock.h>
910
#include <lib/queue.h>
1011
#include <sys/task.h>
1112

@@ -106,6 +107,8 @@ static inline uint8_t extract_priority_level(uint16_t prio)
106107
return 4; /* Default to normal priority */
107108
}
108109
}
110+
static spinlock_t task_lock = SPINLOCK_INITIALIZER;
111+
static uint32_t task_flags = 0;
109112

110113
static inline bool is_valid_task(tcb_t *task)
111114
{
@@ -591,12 +594,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
591594
}
592595

593596
/* Minimize critical section duration */
594-
CRITICAL_ENTER();
597+
spin_lock_irqsave(&task_lock, &task_flags);
595598

596599
if (!kcb->tasks) {
597600
kcb->tasks = list_create();
598601
if (!kcb->tasks) {
599-
CRITICAL_LEAVE();
602+
spin_unlock_irqrestore(&task_lock, task_flags);
600603
free(tcb->stack);
601604
free(tcb);
602605
panic(ERR_KCB_ALLOC);
@@ -605,7 +608,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
605608

606609
list_node_t *node = list_pushback(kcb->tasks, tcb);
607610
if (!node) {
608-
CRITICAL_LEAVE();
611+
spin_unlock_irqrestore(&task_lock, task_flags);
609612
free(tcb->stack);
610613
free(tcb);
611614
panic(ERR_TCB_ALLOC);
@@ -618,7 +621,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
618621
if (!kcb->task_current)
619622
kcb->task_current = node;
620623

621-
CRITICAL_LEAVE();
624+
spin_unlock_irqrestore(&task_lock, task_flags);
622625

623626
/* Initialize execution context outside critical section. */
624627
hal_context_init(&tcb->context, (size_t) tcb->stack, new_stack_size,
@@ -640,16 +643,16 @@ int32_t mo_task_cancel(uint16_t id)
640643
if (id == 0 || id == mo_task_id())
641644
return ERR_TASK_CANT_REMOVE;
642645

643-
CRITICAL_ENTER();
646+
spin_lock_irqsave(&task_lock, &task_flags);
644647
list_node_t *node = find_task_node_by_id(id);
645648
if (!node) {
646-
CRITICAL_LEAVE();
649+
spin_unlock_irqrestore(&task_lock, task_flags);
647650
return ERR_TASK_NOT_FOUND;
648651
}
649652

650653
tcb_t *tcb = node->data;
651654
if (!tcb || tcb->state == TASK_RUNNING) {
652-
CRITICAL_LEAVE();
655+
spin_unlock_irqrestore(&task_lock, task_flags);
653656
return ERR_TASK_CANT_REMOVE;
654657
}
655658

@@ -665,7 +668,7 @@ int32_t mo_task_cancel(uint16_t id)
665668
}
666669
}
667670

668-
CRITICAL_LEAVE();
671+
spin_unlock_irqrestore(&task_lock, task_flags);
669672

670673
/* Free memory outside critical section */
671674
free(tcb->stack);
@@ -687,9 +690,9 @@ void mo_task_delay(uint16_t ticks)
687690
if (!ticks)
688691
return;
689692

690-
NOSCHED_ENTER();
693+
spin_lock_irqsave(&task_lock, &task_flags);
691694
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) {
692-
NOSCHED_LEAVE();
695+
spin_unlock_irqrestore(&task_lock, task_flags);
693696
return;
694697
}
695698

@@ -698,7 +701,7 @@ void mo_task_delay(uint16_t ticks)
698701
/* Set delay and blocked state - scheduler will skip blocked tasks */
699702
self->delay = ticks;
700703
self->state = TASK_BLOCKED;
701-
NOSCHED_LEAVE();
704+
spin_unlock_irqrestore(&task_lock, task_flags);
702705

703706
mo_task_yield();
704707
}
@@ -708,24 +711,24 @@ int32_t mo_task_suspend(uint16_t id)
708711
if (id == 0)
709712
return ERR_TASK_NOT_FOUND;
710713

711-
CRITICAL_ENTER();
714+
spin_lock_irqsave(&task_lock, &task_flags);
712715
list_node_t *node = find_task_node_by_id(id);
713716
if (!node) {
714-
CRITICAL_LEAVE();
717+
spin_unlock_irqrestore(&task_lock, task_flags);
715718
return ERR_TASK_NOT_FOUND;
716719
}
717720

718721
tcb_t *task = node->data;
719722
if (!task || (task->state != TASK_READY && task->state != TASK_RUNNING &&
720723
task->state != TASK_BLOCKED)) {
721-
CRITICAL_LEAVE();
724+
spin_unlock_irqrestore(&task_lock, task_flags);
722725
return ERR_TASK_CANT_SUSPEND;
723726
}
724727

725728
task->state = TASK_SUSPENDED;
726729
bool is_current = (kcb->task_current == node);
727730

728-
CRITICAL_LEAVE();
731+
spin_unlock_irqrestore(&task_lock, task_flags);
729732

730733
if (is_current)
731734
mo_task_yield();
@@ -738,23 +741,22 @@ int32_t mo_task_resume(uint16_t id)
738741
if (id == 0)
739742
return ERR_TASK_NOT_FOUND;
740743

741-
CRITICAL_ENTER();
744+
spin_lock_irqsave(&task_lock, &task_flags);
742745
list_node_t *node = find_task_node_by_id(id);
743746
if (!node) {
744-
CRITICAL_LEAVE();
747+
spin_unlock_irqrestore(&task_lock, task_flags);
745748
return ERR_TASK_NOT_FOUND;
746749
}
747750

748751
tcb_t *task = node->data;
749752
if (!task || task->state != TASK_SUSPENDED) {
750-
CRITICAL_LEAVE();
753+
spin_unlock_irqrestore(&task_lock, task_flags);
751754
return ERR_TASK_CANT_RESUME;
752755
}
753756

754757
/* mark as ready - scheduler will find it */
755758
task->state = TASK_READY;
756-
757-
CRITICAL_LEAVE();
759+
spin_unlock_irqrestore(&task_lock, task_flags);
758760
return ERR_OK;
759761
}
760762

@@ -763,16 +765,16 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
763765
if (id == 0 || !is_valid_priority(priority))
764766
return ERR_TASK_INVALID_PRIO;
765767

766-
CRITICAL_ENTER();
768+
spin_lock_irqsave(&task_lock, &task_flags);
767769
list_node_t *node = find_task_node_by_id(id);
768770
if (!node) {
769-
CRITICAL_LEAVE();
771+
spin_unlock_irqrestore(&task_lock, task_flags);
770772
return ERR_TASK_NOT_FOUND;
771773
}
772774

773775
tcb_t *task = node->data;
774776
if (!task) {
775-
CRITICAL_LEAVE();
777+
spin_unlock_irqrestore(&task_lock, task_flags);
776778
return ERR_TASK_NOT_FOUND;
777779
}
778780

@@ -781,7 +783,8 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
781783
task->prio_level = extract_priority_level(priority);
782784
task->time_slice = get_priority_timeslice(task->prio_level);
783785

784-
CRITICAL_LEAVE();
786+
spin_unlock_irqrestore(&task_lock, task_flags);
787+
785788
return ERR_OK;
786789
}
787790

@@ -790,21 +793,21 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
790793
if (id == 0)
791794
return ERR_TASK_NOT_FOUND;
792795

793-
CRITICAL_ENTER();
796+
spin_lock_irqsave(&task_lock, &task_flags);
794797
list_node_t *node = find_task_node_by_id(id);
795798
if (!node) {
796-
CRITICAL_LEAVE();
799+
spin_unlock_irqrestore(&task_lock, task_flags);
797800
return ERR_TASK_NOT_FOUND;
798801
}
799802

800803
tcb_t *task = node->data;
801804
if (!task) {
802-
CRITICAL_LEAVE();
805+
spin_unlock_irqrestore(&task_lock, task_flags);
803806
return ERR_TASK_NOT_FOUND;
804807
}
805808

806809
task->rt_prio = priority;
807-
CRITICAL_LEAVE();
810+
spin_unlock_irqrestore(&task_lock, task_flags);
808811
return ERR_OK;
809812
}
810813

@@ -820,9 +823,9 @@ int32_t mo_task_idref(void *task_entry)
820823
if (!task_entry || !kcb->tasks)
821824
return ERR_TASK_NOT_FOUND;
822825

823-
CRITICAL_ENTER();
826+
spin_lock_irqsave(&task_lock, &task_flags);
824827
list_node_t *node = list_foreach(kcb->tasks, refcmp, task_entry);
825-
CRITICAL_LEAVE();
828+
spin_unlock_irqrestore(&task_lock, task_flags);
826829

827830
return node ? ((tcb_t *) node->data)->id : ERR_TASK_NOT_FOUND;
828831
}

0 commit comments

Comments
 (0)