Skip to content

Commit 84dfbc9

Browse files
committed
Move task_lock spinlock into kcb struct
The task_lock spinlock was primarily used to protect access to the Kernel Control Block (kcb) and its internal data structures. Move the spinlock into the kcb_t struct as kcb_lock, consolidating related state and synchronization primitives together. All uses of the standalone task_lock spinlock are replaced by kcb->kcb_lock accesses, improving code clarity and encapsulation of the kernel's core control block.
1 parent 55e1c2c commit 84dfbc9

File tree

2 files changed

+34
-30
lines changed

2 files changed

+34
-30
lines changed

include/sys/task.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
*/
1414

1515
#include <hal.h>
16+
#include <spinlock.h>
1617
#include <lib/list.h>
1718
#include <lib/queue.h>
1819

@@ -104,6 +105,9 @@ typedef struct {
104105
/* Timer Management */
105106
list_t *timer_list; /* List of active software timers */
106107
volatile uint32_t ticks; /* Global system tick, incremented by timer */
108+
/* Timers */
109+
110+
spinlock_t kcb_lock;
107111
} kcb_t;
108112

109113
/* Global pointer to the singleton Kernel Control Block */

kernel/task.c

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ static kcb_t kernel_state = {
2626
.task_count = 0,
2727
.ticks = 0,
2828
.preemptive = true, /* Default to preemptive mode */
29+
.kcb_lock = SPINLOCK_INITIALIZER,
2930
};
3031
kcb_t *kcb = &kernel_state;
3132

@@ -107,7 +108,6 @@ static inline uint8_t extract_priority_level(uint16_t prio)
107108
return 4; /* Default to normal priority */
108109
}
109110
}
110-
static spinlock_t task_lock = SPINLOCK_INITIALIZER;
111111
static uint32_t task_flags = 0;
112112

113113
static inline bool is_valid_task(tcb_t *task)
@@ -594,12 +594,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
594594
}
595595

596596
/* Minimize critical section duration */
597-
spin_lock_irqsave(&task_lock, &task_flags);
597+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
598598

599599
if (!kcb->tasks) {
600600
kcb->tasks = list_create();
601601
if (!kcb->tasks) {
602-
spin_unlock_irqrestore(&task_lock, task_flags);
602+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
603603
free(tcb->stack);
604604
free(tcb);
605605
panic(ERR_KCB_ALLOC);
@@ -608,7 +608,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
608608

609609
list_node_t *node = list_pushback(kcb->tasks, tcb);
610610
if (!node) {
611-
spin_unlock_irqrestore(&task_lock, task_flags);
611+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
612612
free(tcb->stack);
613613
free(tcb);
614614
panic(ERR_TCB_ALLOC);
@@ -621,7 +621,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
621621
if (!kcb->task_current)
622622
kcb->task_current = node;
623623

624-
spin_unlock_irqrestore(&task_lock, task_flags);
624+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
625625

626626
/* Initialize execution context outside critical section. */
627627
hal_context_init(&tcb->context, (size_t) tcb->stack, new_stack_size,
@@ -643,16 +643,16 @@ int32_t mo_task_cancel(uint16_t id)
643643
if (id == 0 || id == mo_task_id())
644644
return ERR_TASK_CANT_REMOVE;
645645

646-
spin_lock_irqsave(&task_lock, &task_flags);
646+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
647647
list_node_t *node = find_task_node_by_id(id);
648648
if (!node) {
649-
spin_unlock_irqrestore(&task_lock, task_flags);
649+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
650650
return ERR_TASK_NOT_FOUND;
651651
}
652652

653653
tcb_t *tcb = node->data;
654654
if (!tcb || tcb->state == TASK_RUNNING) {
655-
spin_unlock_irqrestore(&task_lock, task_flags);
655+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
656656
return ERR_TASK_CANT_REMOVE;
657657
}
658658

@@ -668,7 +668,7 @@ int32_t mo_task_cancel(uint16_t id)
668668
}
669669
}
670670

671-
spin_unlock_irqrestore(&task_lock, task_flags);
671+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
672672

673673
/* Free memory outside critical section */
674674
free(tcb->stack);
@@ -690,9 +690,9 @@ void mo_task_delay(uint16_t ticks)
690690
if (!ticks)
691691
return;
692692

693-
spin_lock_irqsave(&task_lock, &task_flags);
693+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
694694
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) {
695-
spin_unlock_irqrestore(&task_lock, task_flags);
695+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
696696
return;
697697
}
698698

@@ -701,7 +701,7 @@ void mo_task_delay(uint16_t ticks)
701701
/* Set delay and blocked state - scheduler will skip blocked tasks */
702702
self->delay = ticks;
703703
self->state = TASK_BLOCKED;
704-
spin_unlock_irqrestore(&task_lock, task_flags);
704+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
705705

706706
mo_task_yield();
707707
}
@@ -711,24 +711,24 @@ int32_t mo_task_suspend(uint16_t id)
711711
if (id == 0)
712712
return ERR_TASK_NOT_FOUND;
713713

714-
spin_lock_irqsave(&task_lock, &task_flags);
714+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
715715
list_node_t *node = find_task_node_by_id(id);
716716
if (!node) {
717-
spin_unlock_irqrestore(&task_lock, task_flags);
717+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
718718
return ERR_TASK_NOT_FOUND;
719719
}
720720

721721
tcb_t *task = node->data;
722722
if (!task || (task->state != TASK_READY && task->state != TASK_RUNNING &&
723723
task->state != TASK_BLOCKED)) {
724-
spin_unlock_irqrestore(&task_lock, task_flags);
724+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
725725
return ERR_TASK_CANT_SUSPEND;
726726
}
727727

728728
task->state = TASK_SUSPENDED;
729729
bool is_current = (kcb->task_current == node);
730730

731-
spin_unlock_irqrestore(&task_lock, task_flags);
731+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
732732

733733
if (is_current)
734734
mo_task_yield();
@@ -741,22 +741,22 @@ int32_t mo_task_resume(uint16_t id)
741741
if (id == 0)
742742
return ERR_TASK_NOT_FOUND;
743743

744-
spin_lock_irqsave(&task_lock, &task_flags);
744+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
745745
list_node_t *node = find_task_node_by_id(id);
746746
if (!node) {
747-
spin_unlock_irqrestore(&task_lock, task_flags);
747+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
748748
return ERR_TASK_NOT_FOUND;
749749
}
750750

751751
tcb_t *task = node->data;
752752
if (!task || task->state != TASK_SUSPENDED) {
753-
spin_unlock_irqrestore(&task_lock, task_flags);
753+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
754754
return ERR_TASK_CANT_RESUME;
755755
}
756756

757757
/* mark as ready - scheduler will find it */
758758
task->state = TASK_READY;
759-
spin_unlock_irqrestore(&task_lock, task_flags);
759+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
760760
return ERR_OK;
761761
}
762762

@@ -765,16 +765,16 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
765765
if (id == 0 || !is_valid_priority(priority))
766766
return ERR_TASK_INVALID_PRIO;
767767

768-
spin_lock_irqsave(&task_lock, &task_flags);
768+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
769769
list_node_t *node = find_task_node_by_id(id);
770770
if (!node) {
771-
spin_unlock_irqrestore(&task_lock, task_flags);
771+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
772772
return ERR_TASK_NOT_FOUND;
773773
}
774774

775775
tcb_t *task = node->data;
776776
if (!task) {
777-
spin_unlock_irqrestore(&task_lock, task_flags);
777+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
778778
return ERR_TASK_NOT_FOUND;
779779
}
780780

@@ -783,7 +783,7 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
783783
task->prio_level = extract_priority_level(priority);
784784
task->time_slice = get_priority_timeslice(task->prio_level);
785785

786-
spin_unlock_irqrestore(&task_lock, task_flags);
786+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
787787

788788
return ERR_OK;
789789
}
@@ -793,21 +793,21 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
793793
if (id == 0)
794794
return ERR_TASK_NOT_FOUND;
795795

796-
spin_lock_irqsave(&task_lock, &task_flags);
796+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
797797
list_node_t *node = find_task_node_by_id(id);
798798
if (!node) {
799-
spin_unlock_irqrestore(&task_lock, task_flags);
799+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
800800
return ERR_TASK_NOT_FOUND;
801801
}
802802

803803
tcb_t *task = node->data;
804804
if (!task) {
805-
spin_unlock_irqrestore(&task_lock, task_flags);
805+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
806806
return ERR_TASK_NOT_FOUND;
807807
}
808808

809809
task->rt_prio = priority;
810-
spin_unlock_irqrestore(&task_lock, task_flags);
810+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
811811
return ERR_OK;
812812
}
813813

@@ -823,9 +823,9 @@ int32_t mo_task_idref(void *task_entry)
823823
if (!task_entry || !kcb->tasks)
824824
return ERR_TASK_NOT_FOUND;
825825

826-
spin_lock_irqsave(&task_lock, &task_flags);
826+
spin_lock_irqsave(&kcb->kcb_lock, &task_flags);
827827
list_node_t *node = list_foreach(kcb->tasks, refcmp, task_entry);
828-
spin_unlock_irqrestore(&task_lock, task_flags);
828+
spin_unlock_irqrestore(&kcb->kcb_lock, task_flags);
829829

830830
return node ? ((tcb_t *) node->data)->id : ERR_TASK_NOT_FOUND;
831831
}

0 commit comments

Comments
 (0)