From 69421e4b85c9be9c5e9ef164229222c07dca3b65 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 1/4] Add data structures for O(1) scheduler This commit extends the core scheduler data structures to support the new O(1) scheduler design. Adds in tcb_t: - rq_node: embedded list node for ready-queue membership used during task state transitions. This avoids redundant malloc/free for per-enqueue/dequeue nodes by tying the node's lifetime to the task control block. Adds in kcb_t: - ready_bitmap: 8-bit bitmap tracking which priority levels have runnable tasks. - ready_queues[]: per-priority ready queues for O(1) task selection. - queue_counts[]: per-priority runnable task counters used for bookkeeping and consistency checks. - rr_cursors[]: round-robin cursor per priority level to support fair selection within the same priority. These additions are structural only and prepare the scheduler for O(1) ready-queue operations; they do not change behavior yet. --- include/sys/task.h | 13 +++++++++++++ kernel/task.c | 3 +++ 2 files changed, 16 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 33d0b601..1b4ff9a5 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -82,6 +82,10 @@ typedef struct tcb { /* Real-time Scheduling Support */ void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -104,6 +108,15 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attribution */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + uint16_t queue_counts[TASK_PRIORITY_LEVELS]; /* O(1) size tracking */ + + /* Weighted Round-Robin State per Priority Level */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 59ffdae5..f63b4ed6 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -25,6 +25,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; From d2dcce231ee4491c7f72b13fdc3055aad4498088 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:21:31 +0800 Subject: [PATCH 2/4] Add three marcos for ready queue bitmap operation When a task is enqueued into or dequeued from the ready queue, the bitmap that indicates the ready queue state should be updated. These three marcos can be used in mo_task_dequeue() and mo_task_enqueue() APIs to improve readability and maintain consistency. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index f63b4ed6..ba8e9ea7 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -31,6 +31,11 @@ static kcb_t kernel_state = { }; kcb_t *kcb = &kernel_state; +/* Bitmap operation */ +#define BITMAP_CHECK(prio) (kcb->ready_bitmap & 1U << prio) +#define BITMAP_SET(prio) (kcb->ready_bitmap |= 1U << prio) +#define BITMAP_CLEAN(prio) (kcb->ready_bitmap &= ~(1U << prio)) + /* timer work management for reduced latency */ static volatile uint32_t timer_work_pending = 0; /* timer work types */ static volatile uint32_t timer_work_generation = 0; /* counter for coalescing */ From 5d75ddfb5faf622f380c9e86a3b8a16527e56b2c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 19 Nov 2025 22:50:48 +0800 Subject: [PATCH 3/4] Add intrusive node helpers for scheduler ready queues This commit introduces two helper functions for intrusive list usage, where each task embeds its own list node instead of relying on per-operation malloc/free. The new APIs allow the scheduler to manipulate ready-queue nodes directly: - list_pushback_node(): append an existing node to the end of the list (before the tail sentinel) without allocating memory. - list_remove_node(): remove a node from the list without freeing it, allowing the caller to control the node's lifetime. These helpers will be used by the upcoming O(1) scheduler enqueue/dequeue paths, which require embedded list nodes stored in tcb_t. --- include/lib/list.h | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..ce791a2b 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From 9ca3f90c8be21af2b7942f2a1a20c28852b2407f Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:39:29 +0800 Subject: [PATCH 4/4] Refactor dequeue/enqueue logic for O(1) scheduler This commit refactors sched_enqueue_task() and sched_dequeue_task() to use the per-priority ready queues and the embedded rq_node stored in tcb_t, instead of relying only on task state inspection. Tasks are now explicitly added to and removed from the appropriate ready queue, and queue_counts, rr_cursors, and the ready_bitmap are updated accordingly. --- kernel/task.c | 96 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 71 insertions(+), 25 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index ba8e9ea7..e405c833 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -76,7 +76,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -343,29 +343,67 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Update task count in ready queue */ + kcb->queue_counts[prio_level]++; + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + BITMAP_SET(task->prio_level); + return; } -/* Remove task from ready queues - state-based approach for compatibility */ -void sched_dequeue_task(tcb_t *task) +/* Remove task from ready queue; return removed ready queue node */ +static __attribute__((unused)) void sched_dequeue_task(tcb_t *task) { - if (unlikely(!task)) + if (unlikely(!task || !(&task->rq_node))) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (!--kcb->queue_counts[prio_level]) { + *cursor = NULL; + BITMAP_CLEAN(task->prio_level); + } + return; } /* Handle time slice expiration for current task */ @@ -386,20 +424,15 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection with O(n) Complexity @@ -673,6 +706,10 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ @@ -703,7 +740,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -730,8 +769,13 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current == node); + bool is_current = (kcb->task_current->data == task); CRITICAL_LEAVE(); @@ -758,9 +802,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -874,6 +917,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION);