From 43f9499a077c4073cba85cedae42b5e35ec8c2b4 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 01/18] Add data structures for O(1) scheduler This commit extends the core scheduler data structures to support the new O(1) scheduler design. Adds in tcb_t: - rq_node: embedded list node for ready-queue membership used during task state transitions. This avoids redundant malloc/free for per-enqueue/dequeue nodes by tying the node's lifetime to the task control block. Adds in kcb_t: - ready_bitmap: 8-bit bitmap tracking which priority levels have runnable tasks. - ready_queues[]: per-priority ready queues for O(1) task selection. - queue_counts[]: per-priority runnable task counters used for bookkeeping and consistency checks. - rr_cursors[]: round-robin cursor per priority level to support fair selection within the same priority. These additions are structural only and prepare the scheduler for O(1) ready-queue operations; they do not change behavior yet. --- include/sys/task.h | 13 +++++++++++++ kernel/task.c | 3 +++ 2 files changed, 16 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4..2351131 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -86,6 +86,10 @@ typedef struct tcb { /* Stack Protection */ uint32_t canary; /* Random stack canary for overflow detection */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -108,6 +112,15 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attribution */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + uint16_t queue_counts[TASK_PRIORITY_LEVELS]; /* O(1) size tracking */ + + /* Weighted Round-Robin State per Priority Level */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 7365873..7d65ba6 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -26,6 +26,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; From 4f29f875fe9074cbeb32c513f36fd833264d0905 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:21:31 +0800 Subject: [PATCH 02/18] Add ready queue bitmap helper functions When a task is enqueued into or dequeued from the ready queue, the bitmap that indicates the ready queue state should be updated. These two help functions can be used in relatives functions when bitmap operations are required. --- kernel/task.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 7d65ba6..befa40c 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -38,6 +38,18 @@ kcb_t *kcb = &kernel_state; */ volatile bool scheduler_started = false; +/* Bitmap functions */ + +static inline void bitmap_set(uint8_t prio_level) +{ + kcb->ready_bitmap |= (1U << prio_level); +} + +static inline void bitmap_clean(uint8_t prio_level) +{ + kcb->ready_bitmap &= ~(1U << prio_level); +} + /* timer work management for reduced latency */ static volatile uint32_t timer_work_pending = 0; /* timer work types */ static volatile uint32_t timer_work_generation = 0; /* counter for coalescing */ From 3a1f6823df5a9ddb242fc151a93927c209b5b52c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 19 Nov 2025 22:50:48 +0800 Subject: [PATCH 03/18] Add list helpers for scheduler ready queues Previously, list_pushback() and list_remove() were the only list APIs available for manipulating task lists. However, both functions perform malloc/free internally, which is unnecessary and inefficient during task state transitions. The previous commit introduced an intrusive ready-queue membership structure in tcb_t. To support this design and improve efficiency, this commit adds two helper functions for intrusive list manipulation, eliminating overhead malloc/free operation during task lifecycle. - list_pushback_node(): append an existing node to the end of the list in O(n) time without allocating memory. - list_remove_node(): remove a node from the list without freeing it. Both helper functions are operated in O(n) by linearly searching method. --- include/lib/list.h | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c8..ce791a2 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From ddd1205de5599fa3150e1f0e73a54d72a9284afa Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 12:16:41 +0800 Subject: [PATCH 04/18] Refactor enqueuing logic for the new scheduler This commit refactors mo_enqueue_task() to support the data structures for the new scheduler. The RR cursor must be advanced if it same as the running task, which only happens in one task in ready queue. RR cursor will point to new enqueued task for scheduler consistency. --- kernel/task.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index befa40c..1e7e37c 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -86,7 +86,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -369,17 +369,39 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Update task count in ready queue */ + kcb->queue_counts[prio_level]++; + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + bitmap_set(task->prio_level); + return; } /* Remove task from ready queues - state-based approach for compatibility */ From fbfdfde13b8f037806928cbdd1fb2247cd4a0897 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 12:30:04 +0800 Subject: [PATCH 05/18] Refactor dequeuing logic for the new scheduler This commit refactors mo_dequeue_task() to support data structures for the new scheduler. - Set the RR cursor to NULL when no task remains in the ready queue - Circularly advance the RR cursor if it currently points to the task being dequeued, avoid RR cursor points to unlinked node. --- kernel/task.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 1e7e37c..b7a6567 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -404,16 +404,32 @@ static void sched_enqueue_task(tcb_t *task) return; } -/* Remove task from ready queues - state-based approach for compatibility */ +/* Remove task from ready queue */ void sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (!--kcb->queue_counts[prio_level]) { + *cursor = NULL; + bitmap_clean(task->prio_level); + } + return; } /* Handle time slice expiration for current task */ From 4355ce802b548d51a7b3210c8491f37ecf97c5f3 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 13:30:24 +0800 Subject: [PATCH 06/18] =?UTF-8?q?Refactor=20task=20state=E2=80=93related?= =?UTF-8?q?=20operation=20APIs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit refactors all task operation APIs that are related to task state transitions to support the new scheduler. The simplified mo_enqueue_task() and mo_dequeue_task() routines are now invoked directly inside these operations. Enqueue and dequeue actions are performed only when the state transition crosses the following groups: {TASK_RUNNING, TASK_READY} ↔ {other states} The sections below describe the detailed changes for each API: - sched_wakeup_task(): Add TASK_RUNNING as part of the state-group complement, avoid running tasks enqueue again. - mo_task_cancel(): Cancel all tasks except TASK_RUNNING. If the task is in TASK_READY, mo_dequeue_task() is invoked before cancellation. - mo_task_delay(): Transition from TASK_RUNNING to TASK_BLOCKED; call mo_dequeue_task() accordingly. - mo_task_suspend(): This API can be called for both TASK_RUNNING and TASK_READY tasks. Both conditions require invoking mo_dequeue_task() before transitioning to TASK_SUSPEND. - mo_task_resume(): Transition from TASK_SUSPEND to TASK_READY; call mo_enqueue_task(). - _sched_block(): Invoked only when a TASK_RUNNING task calls mutex- related APIs; performs the TASK_RUNNING → TASK_BLOCKED transition. --- kernel/task.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index b7a6567..514717d 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -454,20 +454,15 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection with O(n) Complexity @@ -879,6 +874,10 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ @@ -908,7 +907,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -935,6 +936,11 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; bool is_current = (kcb->task_current == node); @@ -963,9 +969,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -1087,6 +1092,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); From 09f45dded93b210a81189164b68cf9ceda932d00 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 13:32:44 +0800 Subject: [PATCH 07/18] Fix current-task check in mo_task_suspend() The previous implementation compared kcb->task_current directly with the task's list node, which became incorrect after introducing the embedded ready-queue list-node structure. This commit updates the condition to compare the underlying task object instead: kcb->task_current->data == task This ensures mo_task_suspend() correctly detects when the suspended task is the currently running one. --- kernel/task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 514717d..ba7f8e5 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -942,7 +942,7 @@ int32_t mo_task_suspend(uint16_t id) sched_dequeue_task(task); task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current == node); + bool is_current = (kcb->task_current->data == task); CRITICAL_LEAVE(); From 10e1e89b527539972d15bedf9d43c8099236a3d9 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 16:54:02 +0800 Subject: [PATCH 08/18] Add enqueuing path in semaphore and mutex This commit adds the missing enqueuing path for awakened tasks during semaphore signaling and mutex unlocking, ensuring that tasks are correctly inserted into the ready queue under the new scheduler design. --- include/sys/task.h | 3 +++ kernel/mutex.c | 2 +- kernel/semaphore.c | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 2351131..3001f26 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -312,3 +312,6 @@ void _sched_block(queue_t *wait_q); * Returns 'true' to enable preemptive scheduling, or 'false' for cooperative */ int32_t app_main(void); + +/* Wake up and enqueue task into ready queue */ +void sched_wakeup_task(tcb_t *); diff --git a/kernel/mutex.c b/kernel/mutex.c index 52a16a7..39a21d3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -283,7 +283,7 @@ int32_t mo_mutex_unlock(mutex_t *m) /* Validate task state before waking */ if (likely(next_owner->state == TASK_BLOCKED)) { m->owner_tid = next_owner->id; - next_owner->state = TASK_READY; + sched_wakeup_task(next_owner); /* Clear any pending timeout since we're granting ownership */ next_owner->delay = 0; } else { diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372..fbc3271 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + sched_wakeup_task(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ From 55bf840e01a9f63617061e1587739b3fc9b9c798 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 22 Nov 2025 11:03:57 +0800 Subject: [PATCH 09/18] Add atomic block helper for mutex lock Previously, mutex_block_atomic() only updated the task state. Under the new scheduler design, the blocked task must also be removed from the ready queue. The existing helper _sched_block() does not match the mutex path because it operates on queue_t instead of list_t and also processes deferred timer work, which mutex locking does not require. This commit introduces _sched_block_mutex(), a helper that supports the list- based waiter structure and skips deferred timer handling. It will be used by the mutex lock APIs in a later change. --- include/sys/task.h | 3 +++ kernel/task.c | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 3001f26..7fc6ef9 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -301,6 +301,9 @@ uint64_t mo_uptime(void); */ void _sched_block(queue_t *wait_q); +/* Support mutex data structure */ +void _sched_block_mutex(list_t *waiters); + /* Application Entry Point */ /* The main entry point for the user application. diff --git a/kernel/task.c b/kernel/task.c index ba7f8e5..ce57624 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -1102,3 +1102,22 @@ void _sched_block(queue_t *wait_q) self->state = TASK_BLOCKED; _yield(); } + +void _sched_block_mutex(list_t *waiters) +{ + if (unlikely(!waiters || !kcb || !kcb->task_current || + !kcb->task_current->data)) + panic(ERR_SEM_OPERATION); + + tcb_t *self = kcb->task_current->data; + + /* Remove node from ready queue */ + sched_dequeue_task(self); + + if (unlikely(!list_pushback(waiters, self))) + panic(ERR_SEM_OPERATION); + + /* set blocked state - scheduler will skip blocked tasks */ + self->state = TASK_BLOCKED; + _yield(); +} From 5fe4ed19e8293add60b876ec6a0945048cddf236 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sat, 22 Nov 2025 11:13:12 +0800 Subject: [PATCH 10/18] Replace mutex_block_atomic by _sched_block_mutex This commit replaces mutex_block_atomic() with _sched_block_mutex() to align mutex blocking behavior with the new scheduler design. Blocked tasks are now properly dequeued from the ready queue, and no deferred timer processing is performed. --- kernel/mutex.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 39a21d3..293eed1 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -64,24 +64,6 @@ static bool remove_self_from_waiters(list_t *waiters) return false; } -/* Atomic block operation with enhanced error checking */ -static void mutex_block_atomic(list_t *waiters) -{ - if (unlikely(!waiters || !kcb || !kcb->task_current || - !kcb->task_current->data)) - panic(ERR_SEM_OPERATION); - - tcb_t *self = kcb->task_current->data; - - /* Add to waiters list */ - if (unlikely(!list_pushback(waiters, self))) - panic(ERR_SEM_OPERATION); - - /* Block and yield atomically */ - self->state = TASK_BLOCKED; - _yield(); /* This releases NOSCHED when we context switch */ -} - int32_t mo_mutex_init(mutex_t *m) { if (unlikely(!m)) @@ -162,7 +144,7 @@ int32_t mo_mutex_lock(mutex_t *m) } /* Slow path: mutex is owned, must block atomically */ - mutex_block_atomic(m->waiters); + _sched_block_mutex(m->waiters); /* When we return here, we've been woken by mo_mutex_unlock() * and ownership has been transferred to us. */ From 543d24f92eab241debc623b5d9aea8cfb0bee916 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 17:35:19 +0800 Subject: [PATCH 11/18] Add sched_migrate_task() helper This commit introduces a new helper, sched_migrate_task(), which migrates a task between ready queues of different priority levels and will be introduced in mo_task_priority() for readability. --- kernel/task.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index ce57624..ffba944 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -432,6 +432,27 @@ void sched_dequeue_task(tcb_t *task) return; } +/* Task migration from origin to new priority ready queue */ +static void sched_migrate_task(tcb_t *task, int16_t priority) +{ + if (unlikely(!task || !is_valid_priority(priority))) + return; + + if (task->prio == priority) + return; + + /* Remove task node from origin ready queue */ + sched_dequeue_task(task); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue task node into new priority ready queue*/ + sched_enqueue_task(task); + return; +} + /* Handle time slice expiration for current task */ void sched_tick_current_task(void) { From 0bc7918c6fbbbfdbfb5c39b3054a23ceedc47197 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 21 Nov 2025 17:36:50 +0800 Subject: [PATCH 12/18] Refactor priority-change path and introduce sched_migrate_task() This commit introduces the sched_migrate_task() helper, which handles migration of a task to the correct ready queue when its priority changes. If the task is already in a ready queue, the helper dequeues it from the old priority level, enqueues it into the new one, and updates all related bookkeeping. In addition, if a TASK_RUNNING task changes its priority, it now yields immediately. This ensures that the scheduler always executes tasks in strict priority order, preventing a running task from continuing to run at an outdated priority level. --- kernel/task.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index ffba944..5fc24e9 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -1015,12 +1015,22 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) + sched_migrate_task(task, priority); + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } From b897d04af4b415c35480232af0ddb97776b551e4 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:33:35 +0800 Subject: [PATCH 13/18] Add idle task and initialization API This commit adds the system idle task and its initialization routine, idle_task_init(). The idle task serves as the default execution context when no runnable tasks exist. It never enters any ready queue and always uses the fixed priority TASK_PRIO_IDLE. Introducing a dedicated idle task ensures consistent scheduler entry during startup, strict ordering for user tasks, and allows priority adjustments before user tasks run for the first time. --- include/sys/task.h | 17 +++++++++++++ kernel/task.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 7fc6ef9..f85db84 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -121,6 +121,9 @@ typedef struct { /* Weighted Round-Robin State per Priority Level */ list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ + + /* System idle task */ + list_node_t task_idle; } kcb_t; /* Global pointer to the singleton Kernel Control Block */ @@ -316,5 +319,19 @@ void _sched_block_mutex(list_t *waiters); */ int32_t app_main(void); +/* Initialize the idle task + * + * This function statically creates and initializes the idle task structure. + * It should be called once during system startup. + * + * The idle task is a permanent system task that runs when no other + * ready tasks exist. It is never enqueued into any ready queue and + * cannot be suspended, canceled, or priority modified. + * + * Only one idle task exists per hart. Its priority is fixed to the + * lowest level and its time slice is zero. + */ +void idle_task_init(void); + /* Wake up and enqueue task into ready queue */ void sched_wakeup_task(tcb_t *); diff --git a/kernel/task.c b/kernel/task.c index 5fc24e9..7c5eda6 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -486,6 +486,20 @@ void sched_wakeup_task(tcb_t *task) sched_enqueue_task(task); } +/* System idle task, it will be executed when no ready tasks in ready queue */ +static void sched_idle(void) +{ + if (!kcb->preemptive) + /* Cooperative mode idle */ + while (1) + mo_task_yield(); + + /* Preemptive mode idle */ + while (1) + mo_task_wfi(); +} + + /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. @@ -779,6 +793,52 @@ static bool init_task_stack(tcb_t *tcb, size_t stack_size) return true; } +/* Initialize idle task */ +void idle_task_init(void) +{ + /* Ensure proper alignment */ + size_t stack_size = DEFAULT_STACK_SIZE; + stack_size = (stack_size + 0xF) & ~0xFU; + + /* Allocate and initialize TCB */ + tcb_t *idle = malloc(sizeof(tcb_t)); + if (!idle) + panic(ERR_TCB_ALLOC); + + idle->entry = &sched_idle; + idle->delay = 0; + idle->rt_prio = NULL; + idle->state = TASK_READY; + idle->flags = 0; + + /* Set idle task priority */ + idle->prio = TASK_PRIO_IDLE; + idle->prio_level = 0; + idle->time_slice = 0; + + /* Set idle task id and task count */ + idle->id = kcb->next_tid++; + kcb->task_count++; + + /* Initialize stack */ + if (!init_task_stack(idle, stack_size)) { + free(idle); + panic(ERR_STACK_ALLOC); + } + + /* Binding idle task into kcb */ + kcb->task_idle.data = idle; + + /* Initialize idle task execution context */ + hal_context_init(&idle->context, (size_t) idle->stack, stack_size, + (size_t) &sched_idle); + + printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, + idle->stack, (unsigned int) stack_size); + + return; +} + /* Task Management API */ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) From 1faf13c96a86c2492d8978af192dc3d5bcf35cbf Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:04:47 +0800 Subject: [PATCH 14/18] Refactor launch sequence in main() for scheduler initialization This change sets up the scheduler state during system startup by assigning kcb->task_current to kcb->harts->task_idle and dispatching to the idle task as the first execution context. This commit also keeps the scheduling entry path consistent between startup and runtime. --- kernel/main.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/kernel/main.c b/kernel/main.c index ce0dc08..bebb0cd 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -31,6 +31,13 @@ int32_t main(void) uint32_t entropy = read_csr(mcycle) ^ (uint32_t) _read_us(); srand(entropy); + /* Initialize idle task */ + /* Initialize the first current task as idle sentinel node. + * This ensures a valid entry point before any real task runs. + */ + idle_task_init(); + kcb->task_current = &kcb->task_idle; + /* Initialize deferred logging system. * Must be done after heap init but before app_main() to ensure * application tasks can use thread-safe printf. @@ -47,32 +54,23 @@ int32_t main(void) printf("Scheduler mode: %s\n", kcb->preemptive ? "Preemptive" : "Cooperative"); - /* Verify that the application created at least one task. - * If 'kcb->task_current' is still NULL, it means mo_task_spawn was never - * successfully called. - */ - if (!kcb->task_current) - panic(ERR_NO_TASKS); - /* Save the kernel's context. This is a formality to establish a base * execution context before launching the first real task. */ setjmp(kcb->context); - /* Launch the first task. - * 'kcb->task_current' was set by the first call to mo_task_spawn. - * This function transfers control and does not return. + /* Launch the first task (idle task), then scheduler will select highest + * priority task. This function transfers control and does not return. */ - tcb_t *first_task = kcb->task_current->data; - if (!first_task) - panic(ERR_NO_TASKS); + tcb_t *idle = kcb->task_current->data; + idle->state = TASK_RUNNING; /* Mark scheduler as started - enables timer IRQ in NOSCHED_LEAVE. * Must be set before hal_dispatch_init() which enables preemption. */ scheduler_started = true; - hal_dispatch_init(first_task->context); + hal_dispatch_init(idle->context); /* This line should be unreachable. */ panic(ERR_UNKNOWN); From 5c980da65a31636655695e2cbf58e1f60a8300bb Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:09:33 +0800 Subject: [PATCH 15/18] Refactor mo_task_spawn() for the new scheduler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit refactors mo_task_spawn() to align with the new O(1) scheduler design. The task control block (tcb_t) embeds its list node during task creation. The enqueue operation is moved inside a critical section to guarantee consistent enqueuing process during task creation. The “first task assignment” logic is removed because first task has been assigned to system idle task as previous commit mentioned. --- kernel/task.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 7c5eda6..8105e32 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -899,8 +899,11 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ - if (!kcb->task_current) - kcb->task_current = node; + /* Binding ready queue node */ + tcb->rq_node.data = tcb; + + /* Push node to ready queue */ + sched_enqueue_task(tcb); CRITICAL_LEAVE(); @@ -920,7 +923,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } From f3bef28154a3b8fd02a9c73e773e837aeb4ba09d Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 00:42:35 +0800 Subject: [PATCH 16/18] Add sched_switch_to_idle() helper When all ready queues are empty, the scheduler should switch to idle mode and wait for incoming interrupts. This commit introduces a dedicated helper to handle that transition, centralizing the logic and improving readbility of the scheduler path to idle. --- kernel/task.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 8105e32..b8c3dfe 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -499,6 +499,14 @@ static void sched_idle(void) mo_task_wfi(); } +/* Switch to idle task and return idle task id */ +static inline tcb_t *sched_switch_to_idle(void) +{ + kcb->task_current = &kcb->task_idle; + tcb_t *idle = kcb->task_idle.data; + idle->state = TASK_RUNNING; + return idle; +} /* Efficient Round-Robin Task Selection with O(n) Complexity * From a96606a1322bf7e9a320cff098ac17e5a00cd81b Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:04:46 +0800 Subject: [PATCH 17/18] Add De Bruijn helper for O(1) priority lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces a 32-entry De Bruijn lookup table to support constant-time bitmap index computation. This mechanism will be used in later commits to replace iterative bit-scanning when selecting the next runnable priority. The helper itself does not change any scheduling behavior yet, but lays the groundwork for the new O(1) scheduler’s priority computation path. --- kernel/task.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index b8c3dfe..2309fcf 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -356,6 +356,23 @@ void panic(int32_t ecode) hal_panic(); } +/* RISC-V optimized priority finding using De Bruijn sequence */ +static const uint8_t debruijn_lut[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; + +/* O(1) priority selection optimized for RISC-V */ +static inline uint8_t find_highest_ready_priority(uint32_t bitmap) +{ + /* Isolate rightmost set bit (highest priority) */ + uint32_t isolated = bitmap & (-bitmap); + + /* De Bruijn multiplication for O(1) bit position finding */ + uint32_t hash = (isolated * 0x077CB531U) >> 27; + + return debruijn_lut[hash & 0x1F]; +} + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From fc2a929672b328c15289835f987fc33e55cdf049 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:08:38 +0800 Subject: [PATCH 18/18] Refactor scheduler to RR cursor-based O(1) design Previously, the scheduler performed an O(N) scan of the global task list (kcb->tasks) to locate the next TASK_READY task. This resulted in non-deterministic selection latency and unstable round-robin rotation under heavy load or frequent task state transitions. This change introduces a strict O(1) scheduler based on per-priority ready queues and round-robin (RR) cursors. Each priority level maintains its own ready queue and cursor, enabling constant-time selection of the next runnable task while preserving fairness within the same priority. Additionally, when all tasks are non-runnable, the scheduler now switches directly to the system idle task after bitmap lookup, ensuring consistent control flow and eliminating unnecessary scanning paths. --- include/sys/task.h | 2 -- kernel/task.c | 83 ++++++++++++++++++---------------------------- 2 files changed, 33 insertions(+), 52 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index f85db84..3916f26 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -130,8 +130,6 @@ typedef struct { extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ diff --git a/kernel/task.c b/kernel/task.c index 2309fcf..c83d7f4 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -525,20 +525,20 @@ static inline tcb_t *sched_switch_to_idle(void) return idle; } -/* Efficient Round-Robin Task Selection with O(n) Complexity +/* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * - * Selects the next ready task using circular traversal of the master task list. + * Selects the next ready task by advancing the per-priority round-robin + * cursor (rr_cursor) circularly using list API list_cnext(). * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -551,53 +551,36 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ + /* Check out bitmap */ + uint32_t bitmap = kcb->ready_bitmap; - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; + /* If no available ready queue found in bitmap - all tasks blocked, the + * scheduler will switch to system idle task and wait for next timer + * interrupt*/ + if (unlikely(!bitmap)) + return sched_switch_to_idle()->id; - tcb_t *task = node->data; + /* Find top priority ready queue */ + uint8_t top_prio_level = find_highest_ready_priority(bitmap); - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; + list_t *rq = kcb->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - return task->id; + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - } while (node != start_node && ++iterations < SCHED_IMAX); + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - /* No ready tasks found in preemptive mode - all tasks are blocked. - * This is normal for periodic RT tasks waiting for their next period. - * We CANNOT return a BLOCKED task as that would cause it to run. - * Instead, find ANY task (even blocked) as a placeholder, then wait for - * interrupt. - */ - if (kcb->preemptive) { - /* Select any task as placeholder (dispatcher won't actually switch to - * it if blocked) */ - list_node_t *any_node = list_next(kcb->tasks->head); - while (any_node && any_node != kcb->tasks->tail) { - if (any_node->data) { - kcb->task_current = any_node; - tcb_t *any_task = any_node->data; - return any_task->id; - } - any_node = list_next(any_node); - } - /* No tasks at all - this is a real error */ - panic(ERR_NO_TASKS); - } + if (kcb->task_current) + return new_task->id; /* In cooperative mode, having no ready tasks is an error */ panic(ERR_NO_TASKS);