Skip to content

Commit fcb0ee8

Browse files
committed
Add task termination infrastructure
Implement deferred cleanup mechanism to safely terminate tasks that encounter unrecoverable faults. Tasks cannot immediately free their own resources since they execute on the stack being freed. The solution uses a two-phase approach: when termination is requested, the task is marked as suspended with a zombie flag indicating pending cleanup, then yields to another task. The scheduler detects zombie tasks during task selection and safely frees their resources from a different task's context. This infrastructure enables graceful handling of PMP violations and other unrecoverable task faults without panicking the entire system.
1 parent 0b17a78 commit fcb0ee8

File tree

2 files changed

+110
-5
lines changed

2 files changed

+110
-5
lines changed

include/sys/task.h

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,9 @@ enum task_states {
4444
TASK_SUSPENDED /* Task paused/excluded from scheduling until resumed */
4545
};
4646

47+
/* Task Flags */
48+
#define TASK_FLAG_ZOMBIE 0x01 /* Task terminated, awaiting cleanup */
49+
4750
/* Priority Level Constants for Priority-Aware Time Slicing */
4851
#define TASK_PRIORITY_LEVELS 8 /* Number of priority levels (0-7) */
4952
#define TASK_HIGHEST_PRIORITY 0 /* Highest priority level */
@@ -83,7 +86,7 @@ typedef struct tcb {
8386
uint16_t delay; /* Ticks remaining for task in TASK_BLOCKED state */
8487
uint16_t id; /* Unique task ID, assigned by kernel upon creation */
8588
uint8_t state; /* Current lifecycle state (e.g., TASK_READY) */
86-
uint8_t flags; /* Task flags for future extensions (reserved) */
89+
uint8_t flags; /* Task flags (TASK_FLAG_ZOMBIE for deferred cleanup) */
8790

8891
/* Real-time Scheduling Support */
8992
void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */
@@ -281,6 +284,16 @@ uint64_t mo_uptime(void);
281284
*/
282285
void _sched_block(queue_t *wait_q);
283286

287+
/* Terminates the currently running task due to unrecoverable fault.
288+
*
289+
* Marks the current task as suspended and sets the zombie flag for deferred
290+
* cleanup. Forces an immediate context switch to another task. The marked
291+
* task's resources will be freed by the scheduler after the switch completes.
292+
*
293+
* This function does not return - execution continues in another task.
294+
*/
295+
void task_terminate_current(void) __attribute__((noreturn));
296+
284297
/* Application Entry Point */
285298

286299
/* The main entry point for the user application.

kernel/task.c

Lines changed: 96 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,48 @@ void sched_wakeup_task(tcb_t *task)
395395
}
396396
}
397397

398+
/* Helper to clean up zombie task resources from safe context */
399+
static void cleanup_zombie_task(tcb_t *zombie)
400+
{
401+
if (!zombie || !(zombie->flags & TASK_FLAG_ZOMBIE))
402+
return;
403+
404+
/* Find and remove task node from list */
405+
CRITICAL_ENTER();
406+
407+
list_node_t *node = NULL;
408+
list_node_t *iter = kcb->tasks->head;
409+
while (iter) {
410+
if (iter->data == zombie) {
411+
node = iter;
412+
break;
413+
}
414+
iter = iter->next;
415+
}
416+
417+
if (node) {
418+
list_remove(kcb->tasks, node);
419+
kcb->task_count--;
420+
421+
/* Clear from cache */
422+
for (int i = 0; i < TASK_CACHE_SIZE; i++) {
423+
if (task_cache[i].task == zombie) {
424+
task_cache[i].id = 0;
425+
task_cache[i].task = NULL;
426+
}
427+
}
428+
}
429+
430+
CRITICAL_LEAVE();
431+
432+
/* Free resources outside critical section */
433+
if (zombie->mspace)
434+
mo_memspace_destroy(zombie->mspace);
435+
436+
free(zombie->stack);
437+
free(zombie);
438+
}
439+
398440
/* Efficient Round-Robin Task Selection with O(n) Complexity
399441
*
400442
* Selects the next ready task using circular traversal of the master task list.
@@ -417,6 +459,16 @@ uint16_t sched_select_next_task(void)
417459

418460
tcb_t *current_task = kcb->task_current->data;
419461

462+
/* Clean up current task if it's a zombie before proceeding */
463+
if (current_task->flags & TASK_FLAG_ZOMBIE) {
464+
cleanup_zombie_task(current_task);
465+
/* After cleanup, move to first real task to start fresh search */
466+
kcb->task_current = kcb->tasks->head->next;
467+
if (!kcb->task_current || !kcb->task_current->data)
468+
panic(ERR_NO_TASKS);
469+
current_task = kcb->task_current->data;
470+
}
471+
420472
/* Mark current task as ready if it was running */
421473
if (current_task->state == TASK_RUNNING)
422474
current_task->state = TASK_READY;
@@ -434,6 +486,16 @@ uint16_t sched_select_next_task(void)
434486

435487
tcb_t *task = node->data;
436488

489+
/* Clean up zombie tasks during scheduling */
490+
if (task->flags & TASK_FLAG_ZOMBIE) {
491+
list_node_t *next_node = list_cnext(kcb->tasks, node);
492+
cleanup_zombie_task(task);
493+
node = next_node ? next_node : kcb->tasks->head;
494+
if (!node || !node->data)
495+
continue;
496+
task = node->data;
497+
}
498+
437499
/* Skip non-ready tasks */
438500
if (task->state != TASK_READY)
439501
continue;
@@ -458,6 +520,7 @@ static int32_t noop_rtsched(void)
458520
return -1;
459521
}
460522

523+
461524
/* The main entry point from the system tick interrupt. */
462525
void dispatcher(void)
463526
{
@@ -496,8 +559,9 @@ void dispatch(void)
496559
uint32_t ready_count = 0;
497560
list_foreach(kcb->tasks, delay_update_batch, &ready_count);
498561

499-
/* Save old task before scheduler modifies task_current */
500-
memspace_t *old_mspace = ((tcb_t *) kcb->task_current->data)->mspace;
562+
/* Save old task's memory space for PMP context switching */
563+
tcb_t *old_task = (tcb_t *)kcb->task_current->data;
564+
memspace_t *old_mspace = old_task->mspace;
501565

502566
/* Hook for real-time scheduler - if it selects a task, use it */
503567
if (kcb->rt_sched() < 0)
@@ -534,8 +598,9 @@ void yield(void)
534598
if (!kcb->preemptive)
535599
list_foreach(kcb->tasks, delay_update, NULL);
536600

537-
/* Save old task before scheduler modifies task_current */
538-
memspace_t *old_mspace = ((tcb_t *) kcb->task_current->data)->mspace;
601+
/* Save old task's memory space for PMP context switching */
602+
tcb_t *old_task = (tcb_t *)kcb->task_current->data;
603+
memspace_t *old_mspace = old_task->mspace;
539604

540605
sched_select_next_task(); /* Use O(1) priority scheduler */
541606

@@ -714,6 +779,33 @@ int32_t mo_task_cancel(uint16_t id)
714779
return ERR_OK;
715780
}
716781

782+
void task_terminate_current(void)
783+
{
784+
NOSCHED_ENTER();
785+
786+
/* Verify we have a current task */
787+
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) {
788+
NOSCHED_LEAVE();
789+
panic(ERR_NO_TASKS);
790+
}
791+
792+
tcb_t *self = kcb->task_current->data;
793+
794+
/* Mark as suspended to prevent re-scheduling */
795+
self->state = TASK_SUSPENDED;
796+
797+
/* Set zombie flag for deferred cleanup */
798+
self->flags |= TASK_FLAG_ZOMBIE;
799+
800+
NOSCHED_LEAVE();
801+
802+
/* Force immediate context switch - never returns */
803+
_dispatch();
804+
805+
/* Unreachable */
806+
__builtin_unreachable();
807+
}
808+
717809
void mo_task_yield(void)
718810
{
719811
_yield();

0 commit comments

Comments
 (0)