6
6
*/
7
7
8
8
#include <hal.h>
9
+ #include <spinlock.h>
9
10
#include <lib/queue.h>
10
11
#include <sys/task.h>
11
12
@@ -106,6 +107,8 @@ static inline uint8_t extract_priority_level(uint16_t prio)
106
107
return 4 ; /* Default to normal priority */
107
108
}
108
109
}
110
+ static spinlock_t task_lock = SPINLOCK_INITIALIZER ;
111
+ static uint32_t task_flags = 0 ;
109
112
110
113
static inline bool is_valid_task (tcb_t * task )
111
114
{
@@ -591,12 +594,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
591
594
}
592
595
593
596
/* Minimize critical section duration */
594
- CRITICAL_ENTER ( );
597
+ spin_lock_irqsave ( & task_lock , & task_flags );
595
598
596
599
if (!kcb -> tasks ) {
597
600
kcb -> tasks = list_create ();
598
601
if (!kcb -> tasks ) {
599
- CRITICAL_LEAVE ( );
602
+ spin_unlock_irqrestore ( & task_lock , task_flags );
600
603
free (tcb -> stack );
601
604
free (tcb );
602
605
panic (ERR_KCB_ALLOC );
@@ -605,7 +608,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
605
608
606
609
list_node_t * node = list_pushback (kcb -> tasks , tcb );
607
610
if (!node ) {
608
- CRITICAL_LEAVE ( );
611
+ spin_unlock_irqrestore ( & task_lock , task_flags );
609
612
free (tcb -> stack );
610
613
free (tcb );
611
614
panic (ERR_TCB_ALLOC );
@@ -618,7 +621,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
618
621
if (!kcb -> task_current )
619
622
kcb -> task_current = node ;
620
623
621
- CRITICAL_LEAVE ( );
624
+ spin_unlock_irqrestore ( & task_lock , task_flags );
622
625
623
626
/* Initialize execution context outside critical section. */
624
627
hal_context_init (& tcb -> context , (size_t ) tcb -> stack , new_stack_size ,
@@ -640,16 +643,16 @@ int32_t mo_task_cancel(uint16_t id)
640
643
if (id == 0 || id == mo_task_id ())
641
644
return ERR_TASK_CANT_REMOVE ;
642
645
643
- CRITICAL_ENTER ( );
646
+ spin_lock_irqsave ( & task_lock , & task_flags );
644
647
list_node_t * node = find_task_node_by_id (id );
645
648
if (!node ) {
646
- CRITICAL_LEAVE ( );
649
+ spin_unlock_irqrestore ( & task_lock , task_flags );
647
650
return ERR_TASK_NOT_FOUND ;
648
651
}
649
652
650
653
tcb_t * tcb = node -> data ;
651
654
if (!tcb || tcb -> state == TASK_RUNNING ) {
652
- CRITICAL_LEAVE ( );
655
+ spin_unlock_irqrestore ( & task_lock , task_flags );
653
656
return ERR_TASK_CANT_REMOVE ;
654
657
}
655
658
@@ -665,7 +668,7 @@ int32_t mo_task_cancel(uint16_t id)
665
668
}
666
669
}
667
670
668
- CRITICAL_LEAVE ( );
671
+ spin_unlock_irqrestore ( & task_lock , task_flags );
669
672
670
673
/* Free memory outside critical section */
671
674
free (tcb -> stack );
@@ -687,9 +690,9 @@ void mo_task_delay(uint16_t ticks)
687
690
if (!ticks )
688
691
return ;
689
692
690
- NOSCHED_ENTER ( );
693
+ spin_lock_irqsave ( & task_lock , & task_flags );
691
694
if (unlikely (!kcb || !kcb -> task_current || !kcb -> task_current -> data )) {
692
- NOSCHED_LEAVE ( );
695
+ spin_unlock_irqrestore ( & task_lock , task_flags );
693
696
return ;
694
697
}
695
698
@@ -698,7 +701,7 @@ void mo_task_delay(uint16_t ticks)
698
701
/* Set delay and blocked state - scheduler will skip blocked tasks */
699
702
self -> delay = ticks ;
700
703
self -> state = TASK_BLOCKED ;
701
- NOSCHED_LEAVE ( );
704
+ spin_unlock_irqrestore ( & task_lock , task_flags );
702
705
703
706
mo_task_yield ();
704
707
}
@@ -708,24 +711,24 @@ int32_t mo_task_suspend(uint16_t id)
708
711
if (id == 0 )
709
712
return ERR_TASK_NOT_FOUND ;
710
713
711
- CRITICAL_ENTER ( );
714
+ spin_lock_irqsave ( & task_lock , & task_flags );
712
715
list_node_t * node = find_task_node_by_id (id );
713
716
if (!node ) {
714
- CRITICAL_LEAVE ( );
717
+ spin_unlock_irqrestore ( & task_lock , task_flags );
715
718
return ERR_TASK_NOT_FOUND ;
716
719
}
717
720
718
721
tcb_t * task = node -> data ;
719
722
if (!task || (task -> state != TASK_READY && task -> state != TASK_RUNNING &&
720
723
task -> state != TASK_BLOCKED )) {
721
- CRITICAL_LEAVE ( );
724
+ spin_unlock_irqrestore ( & task_lock , task_flags );
722
725
return ERR_TASK_CANT_SUSPEND ;
723
726
}
724
727
725
728
task -> state = TASK_SUSPENDED ;
726
729
bool is_current = (kcb -> task_current == node );
727
730
728
- CRITICAL_LEAVE ( );
731
+ spin_unlock_irqrestore ( & task_lock , task_flags );
729
732
730
733
if (is_current )
731
734
mo_task_yield ();
@@ -738,23 +741,22 @@ int32_t mo_task_resume(uint16_t id)
738
741
if (id == 0 )
739
742
return ERR_TASK_NOT_FOUND ;
740
743
741
- CRITICAL_ENTER ( );
744
+ spin_lock_irqsave ( & task_lock , & task_flags );
742
745
list_node_t * node = find_task_node_by_id (id );
743
746
if (!node ) {
744
- CRITICAL_LEAVE ( );
747
+ spin_unlock_irqrestore ( & task_lock , task_flags );
745
748
return ERR_TASK_NOT_FOUND ;
746
749
}
747
750
748
751
tcb_t * task = node -> data ;
749
752
if (!task || task -> state != TASK_SUSPENDED ) {
750
- CRITICAL_LEAVE ( );
753
+ spin_unlock_irqrestore ( & task_lock , task_flags );
751
754
return ERR_TASK_CANT_RESUME ;
752
755
}
753
756
754
757
/* mark as ready - scheduler will find it */
755
758
task -> state = TASK_READY ;
756
-
757
- CRITICAL_LEAVE ();
759
+ spin_unlock_irqrestore (& task_lock , task_flags );
758
760
return ERR_OK ;
759
761
}
760
762
@@ -763,16 +765,16 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
763
765
if (id == 0 || !is_valid_priority (priority ))
764
766
return ERR_TASK_INVALID_PRIO ;
765
767
766
- CRITICAL_ENTER ( );
768
+ spin_lock_irqsave ( & task_lock , & task_flags );
767
769
list_node_t * node = find_task_node_by_id (id );
768
770
if (!node ) {
769
- CRITICAL_LEAVE ( );
771
+ spin_unlock_irqrestore ( & task_lock , task_flags );
770
772
return ERR_TASK_NOT_FOUND ;
771
773
}
772
774
773
775
tcb_t * task = node -> data ;
774
776
if (!task ) {
775
- CRITICAL_LEAVE ( );
777
+ spin_unlock_irqrestore ( & task_lock , task_flags );
776
778
return ERR_TASK_NOT_FOUND ;
777
779
}
778
780
@@ -781,7 +783,8 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
781
783
task -> prio_level = extract_priority_level (priority );
782
784
task -> time_slice = get_priority_timeslice (task -> prio_level );
783
785
784
- CRITICAL_LEAVE ();
786
+ spin_unlock_irqrestore (& task_lock , task_flags );
787
+
785
788
return ERR_OK ;
786
789
}
787
790
@@ -790,21 +793,21 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
790
793
if (id == 0 )
791
794
return ERR_TASK_NOT_FOUND ;
792
795
793
- CRITICAL_ENTER ( );
796
+ spin_lock_irqsave ( & task_lock , & task_flags );
794
797
list_node_t * node = find_task_node_by_id (id );
795
798
if (!node ) {
796
- CRITICAL_LEAVE ( );
799
+ spin_unlock_irqrestore ( & task_lock , task_flags );
797
800
return ERR_TASK_NOT_FOUND ;
798
801
}
799
802
800
803
tcb_t * task = node -> data ;
801
804
if (!task ) {
802
- CRITICAL_LEAVE ( );
805
+ spin_unlock_irqrestore ( & task_lock , task_flags );
803
806
return ERR_TASK_NOT_FOUND ;
804
807
}
805
808
806
809
task -> rt_prio = priority ;
807
- CRITICAL_LEAVE ( );
810
+ spin_unlock_irqrestore ( & task_lock , task_flags );
808
811
return ERR_OK ;
809
812
}
810
813
@@ -820,9 +823,9 @@ int32_t mo_task_idref(void *task_entry)
820
823
if (!task_entry || !kcb -> tasks )
821
824
return ERR_TASK_NOT_FOUND ;
822
825
823
- CRITICAL_ENTER ( );
826
+ spin_lock_irqsave ( & task_lock , & task_flags );
824
827
list_node_t * node = list_foreach (kcb -> tasks , refcmp , task_entry );
825
- CRITICAL_LEAVE ( );
828
+ spin_unlock_irqrestore ( & task_lock , task_flags );
826
829
827
830
return node ? ((tcb_t * ) node -> data )-> id : ERR_TASK_NOT_FOUND ;
828
831
}
0 commit comments