From 69421e4b85c9be9c5e9ef164229222c07dca3b65 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 01/19] Add data structures for O(1) scheduler This commit extends the core scheduler data structures to support the new O(1) scheduler design. Adds in tcb_t: - rq_node: embedded list node for ready-queue membership used during task state transitions. This avoids redundant malloc/free for per-enqueue/dequeue nodes by tying the node's lifetime to the task control block. Adds in kcb_t: - ready_bitmap: 8-bit bitmap tracking which priority levels have runnable tasks. - ready_queues[]: per-priority ready queues for O(1) task selection. - queue_counts[]: per-priority runnable task counters used for bookkeeping and consistency checks. - rr_cursors[]: round-robin cursor per priority level to support fair selection within the same priority. These additions are structural only and prepare the scheduler for O(1) ready-queue operations; they do not change behavior yet. --- include/sys/task.h | 13 +++++++++++++ kernel/task.c | 3 +++ 2 files changed, 16 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 33d0b601..1b4ff9a5 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -82,6 +82,10 @@ typedef struct tcb { /* Real-time Scheduling Support */ void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */ + + /* State transition support */ + /* Ready queue membership node (only one per task) */ + list_node_t rq_node; } tcb_t; /* Kernel Control Block (KCB) @@ -104,6 +108,15 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* Scheduling attribution */ + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + uint16_t queue_counts[TASK_PRIORITY_LEVELS]; /* O(1) size tracking */ + + /* Weighted Round-Robin State per Priority Level */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 59ffdae5..f63b4ed6 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -25,6 +25,9 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, }; kcb_t *kcb = &kernel_state; From d2dcce231ee4491c7f72b13fdc3055aad4498088 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:21:31 +0800 Subject: [PATCH 02/19] Add three marcos for ready queue bitmap operation When a task is enqueued into or dequeued from the ready queue, the bitmap that indicates the ready queue state should be updated. These three marcos can be used in mo_task_dequeue() and mo_task_enqueue() APIs to improve readability and maintain consistency. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index f63b4ed6..ba8e9ea7 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -31,6 +31,11 @@ static kcb_t kernel_state = { }; kcb_t *kcb = &kernel_state; +/* Bitmap operation */ +#define BITMAP_CHECK(prio) (kcb->ready_bitmap & 1U << prio) +#define BITMAP_SET(prio) (kcb->ready_bitmap |= 1U << prio) +#define BITMAP_CLEAN(prio) (kcb->ready_bitmap &= ~(1U << prio)) + /* timer work management for reduced latency */ static volatile uint32_t timer_work_pending = 0; /* timer work types */ static volatile uint32_t timer_work_generation = 0; /* counter for coalescing */ From 5d75ddfb5faf622f380c9e86a3b8a16527e56b2c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 19 Nov 2025 22:50:48 +0800 Subject: [PATCH 03/19] Add intrusive node helpers for scheduler ready queues This commit introduces two helper functions for intrusive list usage, where each task embeds its own list node instead of relying on per-operation malloc/free. The new APIs allow the scheduler to manipulate ready-queue nodes directly: - list_pushback_node(): append an existing node to the end of the list (before the tail sentinel) without allocating memory. - list_remove_node(): remove a node from the list without freeing it, allowing the caller to control the node's lifetime. These helpers will be used by the upcoming O(1) scheduler enqueue/dequeue paths, which require embedded list nodes stored in tcb_t. --- include/lib/list.h | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..ce791a2b 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -100,6 +100,24 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } +/* Pushback list node into list */ +static inline void list_pushback_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || target->next)) + return; + + target->next = list->tail; + + /* Insert before tail sentinel */ + list_node_t *prev = list->head; + while (prev->next != list->tail) + prev = prev->next; + + prev->next = target; + list->length++; + return; +} + static inline void *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) @@ -134,6 +152,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Remove a node from list without freeing */ +static inline void list_remove_node(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From fb20c5a79d66ae0b93d9290b0e599fd5e3634072 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:39:29 +0800 Subject: [PATCH 04/19] Refactor dequeue/enqueue logic for O(1) scheduler This commit refactors sched_enqueue_task() and sched_dequeue_task() to use the per-priority ready queues and the embedded rq_node stored in tcb_t, instead of relying only on task state inspection. Tasks are now explicitly added to and removed from the appropriate ready queue, and queue_counts, rr_cursors, and the ready_bitmap are updated accordingly. --- kernel/task.c | 95 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 70 insertions(+), 25 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index ba8e9ea7..38c6bef9 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -76,7 +76,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -343,29 +343,67 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_pushback_node(*rq, &task->rq_node); + + /* Update task count in ready queue */ + kcb->queue_counts[prio_level]++; + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = &task->rq_node; + + /* Advance cursor when cursor same as running task */ + if (*cursor == kcb->task_current) + *cursor = &task->rq_node; + + BITMAP_SET(task->prio_level); + return; } -/* Remove task from ready queues - state-based approach for compatibility */ -void sched_dequeue_task(tcb_t *task) +/* Remove task from ready queue; return removed ready queue node */ +static void sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) return; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->ready_queues[prio_level]; + list_node_t **cursor = &kcb->rr_cursors[prio_level]; + + /* Safely move cursor to next task node. */ + if (&task->rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + /* Remove ready queue node */ + list_remove_node(rq, &task->rq_node); + + /* Update task count in ready queue */ + if (!--kcb->queue_counts[prio_level]) { + *cursor = NULL; + BITMAP_CLEAN(task->prio_level); + } + return; } /* Handle time slice expiration for current task */ @@ -386,20 +424,15 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal - */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + /* Enqueue task into ready queue */ + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection with O(n) Complexity @@ -673,12 +706,15 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) + sched_dequeue_task(tcb); + CRITICAL_LEAVE(); /* Free memory outside critical section */ free(tcb->stack); free(tcb); - free(node); return ERR_OK; } @@ -703,7 +739,9 @@ void mo_task_delay(uint16_t ticks) tcb_t *self = kcb->task_current->data; - /* Set delay and blocked state - scheduler will skip blocked tasks */ + /* Set delay and blocked state, dequeue from ready queue */ + sched_dequeue_task(self); + self->delay = ticks; self->state = TASK_BLOCKED; NOSCHED_LEAVE(); @@ -730,8 +768,13 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) + sched_dequeue_task(task); + task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current == node); + bool is_current = (kcb->task_current->data == task); CRITICAL_LEAVE(); @@ -758,9 +801,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; @@ -874,6 +916,9 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + /* Remove node from ready queue */ + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); From 0e1a10f27817a494ab1b137e05e463a7eb3f156c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:50:12 +0800 Subject: [PATCH 05/19] Add sched_migrate_task() helper This commit introduces a new API, sched_migrate_task(), which enables migration of a task between ready queues of different priority levels. The function safely removes the task from its current ready queue and enqueues it into the target queue, updating the corresponding RR cursor and ready bitmap to maintain scheduler consistency. This helper will be used in mo_task_priority() and other task management routines that adjust task priority dynamically. Future improvement: The current enqueue path allocates a new list node for each task insertion based on its TCB pointer. In the future, this can be optimized by directly transferring or reusing the existing list node between ready queues, eliminating the need for an additional malloc() and free() operations during priority migrations. --- kernel/task.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 38c6bef9..0d596db9 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -406,6 +406,27 @@ static void sched_dequeue_task(tcb_t *task) return; } +/* Task migration from origin to new priority ready queue */ +static void sched_migrate_task(tcb_t *task, int16_t priority) +{ + if (unlikely(!task || !is_valid_priority(priority))) + return; + + if (task->prio == priority) + return; + + /* Remove task node from origin ready queue */ + sched_dequeue_task(task); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue task node into new priority ready queue*/ + sched_enqueue_task(task); + return; +} + /* Handle time slice expiration for current task */ void sched_tick_current_task(void) { From 0da3eb0c8ffa4b070394630a39be3ae998db3e9f Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 13:23:40 +0800 Subject: [PATCH 06/19] Use mo_task_migration() in mo_task_priority() This change refactors the priority update process in mo_task_priority() to include early-return checks and proper task migration handling. - Early-return conditions: * Prevent modification of the idle task. * Disallow assigning TASK_PRIO_IDLE to non-idle tasks. The idle task is created by idle_task_init() during system startup and must retain its fixed priority. - Task migration: If the priority-changed task resides in a ready queue (TASK_READY or TASK_RUNNING), sched_migrate_task() is called to move it to the queue corresponding to the new priority. - Running task behavior: When the current running task changes its own priority, it yields the CPU so the scheduler can dispatch the next highest-priority task. --- kernel/task.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 0d596db9..2d390ed7 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -847,12 +847,22 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) + sched_migrate_task(task, priority); + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } From 86e79904638c66ace0c951b842fdae395ad1c871 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:33:35 +0800 Subject: [PATCH 07/19] Add idle task and initialization API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces the system idle task and its initialization API (idle_task_init()). The idle task serves as the default execution context when no other runnable tasks exist in the system. The sched_idle() function supports both preemptive and cooperative modes. In sched_t, a list node named task_idle is added to record the idle task sentinel. The idle task never enters any ready queue and its priority level cannot be changed. When idle_task_init() is called, the idle task is initialized as the first execution context. This eliminates the need for additional APIs in main() to set up the initial high-priority task during system launch. This design allows task priorities to be adjusted safely during app_main(), while keeping the scheduler’s entry point consistent. --- include/sys/task.h | 17 +++++++++++++ kernel/task.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 1b4ff9a5..6e2d477d 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -117,6 +117,9 @@ typedef struct { /* Weighted Round-Robin State per Priority Level */ list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ + + /* System idle task */ + list_node_t task_idle; } kcb_t; /* Global pointer to the singleton Kernel Control Block */ @@ -300,3 +303,17 @@ void _sched_block(queue_t *wait_q); * Returns 'true' to enable preemptive scheduling, or 'false' for cooperative */ int32_t app_main(void); + +/* Initialize the idle task + * + * This function statically creates and initializes the idle task structure. + * It should be called once during system startup. + * + * The idle task is a permanent system task that runs when no other + * ready tasks exist. It is never enqueued into any ready queue and + * cannot be suspended, canceled, or priority modified. + * + * Only one idle task exists per hart. Its priority is fixed to the + * lowest level and its time slice is zero. + */ +void idle_task_init(void); \ No newline at end of file diff --git a/kernel/task.c b/kernel/task.c index 2d390ed7..b62ec36d 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -456,6 +456,20 @@ void sched_wakeup_task(tcb_t *task) sched_enqueue_task(task); } +/* System idle task, it will be executed when no ready tasks in ready queue */ +static void sched_idle(void) +{ + if (!kcb->preemptive) + /* Cooperative mode idle */ + while (1) + mo_task_yield(); + + /* Preemptive mode idle */ + while (1) + mo_task_wfi(); +} + + /* Efficient Round-Robin Task Selection with O(n) Complexity * * Selects the next ready task using circular traversal of the master task list. @@ -617,6 +631,55 @@ static bool init_task_stack(tcb_t *tcb, size_t stack_size) return true; } +/* Initialize idle task */ +void idle_task_init(void) +{ + /* Ensure proper alignment */ + size_t stack_size = DEFAULT_STACK_SIZE; + stack_size = (stack_size + 0xF) & ~0xFU; + + /* Allocate and initialize TCB */ + tcb_t *idle = malloc(sizeof(tcb_t)); + if (!idle) + panic(ERR_TCB_ALLOC); + + idle->entry = &sched_idle; + idle->delay = 0; + idle->rt_prio = NULL; + idle->state = TASK_READY; + idle->flags = 0; + + /* Set idle task priority */ + idle->prio = TASK_PRIO_IDLE; + idle->prio_level = 0; + idle->time_slice = 0; + + /* Set idle task id and task count */ + idle->id = kcb->next_tid++; + kcb->task_count++; + + /* Initialize stack */ + if (!init_task_stack(idle, stack_size)) { + free(idle); + panic(ERR_STACK_ALLOC); + } + + /* Bindin idle task into kcb */ + kcb->task_idle.data = idle; + + /* Initialize idle task execution context */ + hal_context_init(&idle->context, (size_t) idle->stack, stack_size, + (size_t) &sched_idle); + + printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, + idle->stack, (unsigned int) stack_size); + + if (!kcb->task_current) + kcb->task_current = kcb->harts->task_idle; + + return; +} + /* Task Management API */ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) From c432f0cd83be8b9bd94e6305edffa506447cc6d0 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 00:42:35 +0800 Subject: [PATCH 08/19] Add sched_switch_to_idle() helper When all ready queues are empty, the scheduler should switch to idle mode and wait for incoming interrupts. This commit introduces a dedicated helper to handle that transition, centralizing the logic and improving readbility of the scheduler path to idle. --- kernel/task.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index b62ec36d..e6bdd276 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -469,6 +469,14 @@ static void sched_idle(void) mo_task_wfi(); } +/* Switch to idle task and return idle task id */ +static inline tcb_t *sched_switch_to_idle(void) +{ + kcb->task_current = &kcb->task_idle; + tcb_t *idle = kcb->task_idle.data; + idle->state = TASK_RUNNING; + return idle; +} /* Efficient Round-Robin Task Selection with O(n) Complexity * From 0b17e13a58a1ebd1d057f974d414dfadb3fb3434 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:04:46 +0800 Subject: [PATCH 09/19] Add De Bruijn LUT for future O(1) priority selection Prepare for O(1) bitmap index lookup by adding a 32-entry De Bruijn sequence table. The table will be used in later commits to replace iterative bit scanning. No functional change in this patch. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index e6bdd276..43ab5151 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -330,6 +330,11 @@ void panic(int32_t ecode) hal_panic(); } +/* RISC-V optimized priority finding using De Bruijn sequence */ +static const uint8_t debruijn_lut[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From 9868e7d29856181da657c13006d9c6635a7cbe54 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:17:41 +0800 Subject: [PATCH 10/19] Implement De Bruijn-based top priority helper Implement the helper function that uses a De Bruijn multiply-and-LUT approach to compute the index of the least-significant set bit in O(1) time complexity. This helper is not yet wired into the scheduler logic; integration will follow in a later commit. No functional change in this patch. --- kernel/task.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 43ab5151..447c2cd6 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -335,6 +335,18 @@ static const uint8_t debruijn_lut[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; +/* O(1) priority selection optimized for RISC-V */ +static inline uint8_t find_highest_ready_priority(uint32_t bitmap) +{ + /* Isolate rightmost set bit (highest priority) */ + uint32_t isolated = bitmap & (-bitmap); + + /* De Bruijn multiplication for O(1) bit position finding */ + uint32_t hash = (isolated * 0x077CB531U) >> 27; + + return debruijn_lut[hash & 0x1F]; +} + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From 75c93a9ec82ce6697a8345a1d1812cc97f9e2c4f Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 10:20:09 +0800 Subject: [PATCH 11/19] Make sched_wakeup_task() globally visible Previously, sched_wakeup_task() was limited to internal use within the scheduler module. This change makes it globally visible so that it can be reused in semaphore.c for task wake-up operations. --- include/sys/task.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/sys/task.h b/include/sys/task.h index 6e2d477d..377af2a1 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -316,4 +316,7 @@ int32_t app_main(void); * Only one idle task exists per hart. Its priority is fixed to the * lowest level and its time slice is zero. */ -void idle_task_init(void); \ No newline at end of file +void idle_task_init(void); + +/* Wake up and enqueue task into ready queue */ +void sched_wakeup_task(tcb_t *); \ No newline at end of file From 2844df32482c7338d9e74349ead2a7cc1087f3f5 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 10:25:06 +0800 Subject: [PATCH 12/19] Add sched_wakeup_task() in mo_sem_signal() Previously, mo_sem_signal() only changed the awakened task state to TASK_READY when a semaphore signal was triggered. In the new scheduler design, which selects runnable tasks from ready queues, the awakened task must also be enqueued for scheduling. This change invokes sched_wakeup_task() to perform the enqueue operation, ensuring the awakened task is properly inserted into the ready queue. --- kernel/semaphore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372d..fbc3271c 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + sched_wakeup_task(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */ From 72d3d91e3b6b8a6db0b5c1e849ccb6f13b31c829 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:07:58 +0800 Subject: [PATCH 13/19] Refactor mo_task_spawn() for O(1) scheduler support Previously, mo_task_spawn() only created a task and appended it to the global task list (kcb->tasks), assigning the first task directly from the global list node. This change adds a call to sched_enqueue_task() within the critical section to enqueue the task into the ready queue and safely initialize its scheduling attributes. The first task assignment is now aligned with the RR cursor mechanism to ensure consistency with the O(1) scheduler. --- kernel/task.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 447c2cd6..1f8b779f 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -765,8 +765,15 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ - if (!kcb->task_current) - kcb->task_current = node; + /* Binding ready queue node */ + tcb->rq_node.data = tcb; + + /* Push node to ready queue */ + sched_enqueue_task(tcb); + if (!kcb->task_current) { + kcb->task_current = kcb->harts->rr_cursors[tcb->prio_level]; + tcb->state = TASK_RUNNING; + } CRITICAL_LEAVE(); @@ -780,7 +787,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } From 009b1678cd62db2920f91a375bb98b064e700f5a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:08:38 +0800 Subject: [PATCH 14/19] Refactor scheduler to RR cursor-based O(1) design Previously, the scheduler iterated through the global task list (kcb->tasks) to find the next TASK_READY task, resulting in O(N) selection time. This approach limited scalability and caused inconsistent task rotation under heavy load. The new scheduling process: 1. Check the ready bitmap and find the highest priority level. 2. Select the RR cursor node from the corresponding ready queue. 3. Advance the selected cursor node circularly. Why RR cursor instead of pop/enqueue rotation: - Fewer operations on the ready queue: compared to the pop/enqueue approach, which requires two function calls per switch, the RR cursor method only advances one pointer per scheduling cycle. - Cache friendly: always accesses the same cursor node, improving cache locality on hot paths. - Cycle deterministic: RR cursor design allows deterministic task rotation and enables potential future extensions such as cycle accounting or fairness-based algorithms. This change introduces a fully O(1) scheduler design based on per-priority ready queues and round-robin (RR) cursors. Each ready queue maintains its own cursor, allowing the scheduler to select the next runnable task in constant time. --- include/sys/task.h | 2 -- kernel/task.c | 61 ++++++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 377af2a1..6cbbcc1f 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -126,8 +126,6 @@ typedef struct { extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ diff --git a/kernel/task.c b/kernel/task.c index 1f8b779f..92fdac4b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -495,20 +495,20 @@ static inline tcb_t *sched_switch_to_idle(void) return idle; } -/* Efficient Round-Robin Task Selection with O(n) Complexity +/* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * - * Selects the next ready task using circular traversal of the master task list. + * Selects the next ready task by advancing the per-priority round-robin + * cursor (rr_cursor) circularly using list API list_cnext(). * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -521,31 +521,34 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ + /* Check out bitmap */ + uint32_t bitmap = kcb->ready_bitmap; + if (unlikely(!bitmap)) + panic(ERR_NO_TASKS); - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; + /* Find top priority ready queue */ + int top_prio_level = 0; + for (; !(bitmap & 1U); top_prio_level++, bitmap >>= 1) + ; - tcb_t *task = node->data; + list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; + list_t *rq = kcb->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - return task->id; + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - } while (node != start_node && ++iterations < SCHED_IMAX); + if (kcb->task_current) + return new_task->id; /* No ready tasks found - this should not happen in normal operation */ panic(ERR_NO_TASKS); From 22f7dedc8d306576328621f7330b584449fe31c4 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 01:00:02 +0800 Subject: [PATCH 15/19] Add sched_switch_to_idle() helper in the scheduler Previously, when all ready queues were empty, the scheduler would trigger a kernel panic. This condition should instead transition into the idle task rather than panic. The new sched_switch_to_idle() helper centralizes this logic, making the path to idle clearer and more readable. --- kernel/task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 92fdac4b..9911075e 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -524,7 +524,7 @@ uint16_t sched_select_next_task(void) /* Check out bitmap */ uint32_t bitmap = kcb->ready_bitmap; if (unlikely(!bitmap)) - panic(ERR_NO_TASKS); + return sched_switch_to_idle()->id; /* Find top priority ready queue */ int top_prio_level = 0; From 37e694613b7672a988a8204afff1486ceacfb02f Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:22:56 +0800 Subject: [PATCH 16/19] Use De Brujin-based top priority helper in scheduler Replace the iterative bitmap scanning with the De Bruijn multiply+LUT method via the new helper. This change makes top-priority selection constant-time and deterministic. --- kernel/task.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 9911075e..ae9146b9 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -527,9 +527,7 @@ uint16_t sched_select_next_task(void) return sched_switch_to_idle()->id; /* Find top priority ready queue */ - int top_prio_level = 0; - for (; !(bitmap & 1U); top_prio_level++, bitmap >>= 1) - ; + uint8_t top_prio_level = find_highest_ready_priority(bitmap); list_node_t **cursor = &kcb->rr_cursors[top_prio_level]; list_t *rq = kcb->ready_queues[top_prio_level]; From b3fadccd50c027c7e9c072f1fae7488747b838ec Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:06:08 +0800 Subject: [PATCH 17/19] Add idle_task_init() call in main() The idle task is now initialized in main() during system startup. This ensures that the scheduler always has a valid execution context before any user or application tasks are created. Initializing the idle task early guarantees a safe fallback path when no runnable tasks exist and keeps the scheduler entry point consistent. --- kernel/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/main.c b/kernel/main.c index efa46ff7..7b63df35 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -23,6 +23,9 @@ int32_t main(void) printf("Heap initialized, %u bytes available\n", (unsigned int) (size_t) &_heap_size); + /* Initialize idle task */ + idle_task_init(); + /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); printf("Scheduler mode: %s\n", From 654c0491103838b36314cef46e271367fbaf4eff Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:04:47 +0800 Subject: [PATCH 18/19] Refactor launch sequence in main() for scheduler initialization This change sets up the scheduler state during system startup by assigning kcb->task_current to kcb->harts->task_idle and dispatching to the idle task as the first execution context. This commit also keeps the scheduling entry path consistent between startup and runtime. --- kernel/main.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/kernel/main.c b/kernel/main.c index 7b63df35..3baccd52 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -23,35 +23,29 @@ int32_t main(void) printf("Heap initialized, %u bytes available\n", (unsigned int) (size_t) &_heap_size); - /* Initialize idle task */ + /* Initialize the first current task as idle sentinel node. + * This ensures a valid entry point before any real task runs. + */ idle_task_init(); + kcb->task_current = kcb->harts->task_idle; /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); printf("Scheduler mode: %s\n", kcb->preemptive ? "Preemptive" : "Cooperative"); - /* Verify that the application created at least one task. - * If 'kcb->task_current' is still NULL, it means mo_task_spawn was never - * successfully called. - */ - if (!kcb->task_current) - panic(ERR_NO_TASKS); - /* Save the kernel's context. This is a formality to establish a base * execution context before launching the first real task. */ setjmp(kcb->context); - /* Launch the first task. - * 'kcb->task_current' was set by the first call to mo_task_spawn. - * This function transfers control and does not return. + /* Launch the first task (idle task), then scheduler will select highest + * priority task. This function transfers control and does not return. */ - tcb_t *first_task = kcb->task_current->data; - if (!first_task) - panic(ERR_NO_TASKS); + tcb_t *idle = kcb->task_current->data; + idle->state = TASK_RUNNING; - hal_dispatch_init(first_task->context); + hal_dispatch_init(idle->context); /* This line should be unreachable. */ panic(ERR_UNKNOWN); From 0d8c856e7e272b480a2661e2de6a2ac5cbaab8aa Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:09:33 +0800 Subject: [PATCH 19/19] Remove first-task binding from task initialization Previously, both mo_task_spawn() and idle_task_init() implicitly bound their created tasks to kcb->task_current as the first execution context. This behavior caused ambiguity with the scheduler, which is now responsible for determining the active task during system startup. This change removes the initial binding logic from both functions, allowing the startup process (main()) to explicitly assign kcb->task_current (typically to the idle task) during launch. This ensures a single, centralized initialization flow and improves the separation between task creation and scheduling control. --- kernel/main.c | 2 +- kernel/task.c | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/kernel/main.c b/kernel/main.c index 3baccd52..2da73bf3 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -27,7 +27,7 @@ int32_t main(void) * This ensures a valid entry point before any real task runs. */ idle_task_init(); - kcb->task_current = kcb->harts->task_idle; + kcb->task_current = &kcb->task_idle; /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); diff --git a/kernel/task.c b/kernel/task.c index ae9146b9..0f0881d1 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -700,9 +700,6 @@ void idle_task_init(void) printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, idle->stack, (unsigned int) stack_size); - if (!kcb->task_current) - kcb->task_current = kcb->harts->task_idle; - return; } @@ -771,10 +768,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Push node to ready queue */ sched_enqueue_task(tcb); - if (!kcb->task_current) { - kcb->task_current = kcb->harts->rr_cursors[tcb->prio_level]; - tcb->state = TASK_RUNNING; - } CRITICAL_LEAVE();