diff --git a/include/sys/task.h b/include/sys/task.h index 33d0b60..b6e2af9 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -59,6 +59,12 @@ enum task_states { #define TASK_TIMESLICE_LOW 10 /* Low priority: longer slice */ #define TASK_TIMESLICE_IDLE 15 /* Idle tasks: longest slice */ +/* Bitmap operations */ +#define bitmap_check(prio) (kcb->harts->ready_bitmap & 1U << prio) +#define bitmap_set(prio) (kcb->harts->ready_bitmap |= 1U << prio) +#define bitmap_clean(prio) (kcb->harts->ready_bitmap &= ~(1U << prio)) + + /* Task Control Block (TCB) * * Contains all essential information about a single task, including saved @@ -84,6 +90,28 @@ typedef struct tcb { void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */ } tcb_t; +/* Scheduler attribution */ +typedef struct sched { + uint32_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + uint16_t queue_counts[TASK_PRIORITY_LEVELS]; /* O(1) size tracking */ + + /* Weighted Round-Robin State per Priority Level */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ + uint32_t + quantum_cycles[TASK_PRIORITY_LEVELS]; /* Scheduling cycles per level */ + + /* Performance Optimization */ + uint8_t last_selected_prio; /* Cache hint for next selection */ + uint32_t local_switches; /* Context switch count */ + + /* Hart-Specific Data */ + tcb_t *current_task; /* Currently running task */ + uint8_t hart_id; /* RISC-V hart identifier */ + +} sched_t; + /* Kernel Control Block (KCB) * * Singleton structure holding global kernel state, including task lists, @@ -104,6 +132,9 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* per-hart scheduler management */ + sched_t *harts; } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 59ffdae..1e8531a 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -15,6 +15,18 @@ static int32_t noop_rtsched(void); void _timer_tick_handler(void); +/* hart 0 scheduler */ +static sched_t hart0_sched = { + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, + .quantum_cycles = {0}, + .last_selected_prio = 0, + .local_switches = 0, + .current_task = NULL, + .hart_id = 0, +}; + /* Kernel-wide control block (KCB) */ static kcb_t kernel_state = { .tasks = NULL, @@ -25,6 +37,7 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .harts = &hart0_sched, }; kcb_t *kcb = &kernel_state; @@ -345,7 +358,14 @@ static void sched_enqueue_task(tcb_t *task) task->time_slice = get_priority_timeslice(task->prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + /* Push task into corresponding ready queue and setup bitmap */ + CRITICAL_ENTER(); + if (!kcb->harts->ready_queues[task->prio_level]) + kcb->harts->ready_queues[task->prio_level] = list_create(); + + list_pushback(kcb->harts->ready_queues[task->prio_level], task); + bitmap_set(task->prio_level); + CRITICAL_LEAVE(); } /* Remove task from ready queues - state-based approach for compatibility */ @@ -355,9 +375,20 @@ void sched_dequeue_task(tcb_t *task) return; /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. + * we rely on the state change. The scheduler will remove it from + * corresponding priority ready queue and setup bitmap by checking remaining + * task count in this queue. + * + * The state of task will be modified by `mo_task_suspended` or + * `mo_task_cancel`. */ + CRITICAL_ENTER(); + list_node_t *node = find_task_node_by_id(task->id); + list_remove(kcb->harts->ready_queues[task->prio_level], node); + + if (!kcb->harts->ready_queues[task->prio_level]->length) + bitmap_clean(task->prio_level); + CRITICAL_LEAVE(); } /* Handle time slice expiration for current task */ @@ -418,33 +449,29 @@ uint16_t sched_select_next_task(void) /* Mark current task as ready if it was running */ if (current_task->state == TASK_RUNNING) - current_task->state = TASK_READY; + sched_enqueue_task(current_task); /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ - - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; - tcb_t *task = node->data; + /* Find highest priority task queue */ + uint32_t bitmap = kcb->harts->ready_bitmap; + int highest_prio_level = 0; + for (; !(bitmap & 1U); highest_prio_level++, bitmap >>= 1) + ; - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; - - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Pop out from corresponding queue and mark it as TASK_RUNNING */ + list_node_t *node = + kcb->harts->ready_queues[highest_prio_level]->head->next; + list_pop(kcb->harts->ready_queues[highest_prio_level]); + ((tcb_t *) node->data)->state = TASK_RUNNING; + kcb->task_current = node; - return task->id; + /* Check popped queue is empty or not */ + if (kcb->harts->ready_queues[highest_prio_level]->length == 0) + bitmap_clean(highest_prio_level); - } while (node != start_node && ++iterations < SCHED_IMAX); + if (node) + return ((tcb_t *) node->data)->id; /* No ready tasks found - this should not happen in normal operation */ panic(ERR_NO_TASKS); @@ -603,6 +630,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) } } + list_node_t *node = list_pushback(kcb->tasks, tcb); if (!node) { CRITICAL_LEAVE(); @@ -615,8 +643,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ - if (!kcb->task_current) + /* If tcb is the first task, turn it into TASK_RUNNING state and does not + * put into ready queue */ + if (!kcb->task_current) { kcb->task_current = node; + tcb->state = TASK_RUNNING; + } CRITICAL_LEAVE(); @@ -630,8 +662,10 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); + /* Active task from TASK_STOPPED state */ + if (tcb->state == TASK_STOPPED) + sched_enqueue_task(tcb); return tcb->id; } @@ -721,7 +755,6 @@ int32_t mo_task_suspend(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_SUSPEND; } - task->state = TASK_SUSPENDED; bool is_current = (kcb->task_current == node);