Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kernel: support memory mapped stacks #70810

Merged
merged 13 commits into from
Apr 10, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 7 additions & 0 deletions kernel/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,13 @@ config THREAD_ABORT_HOOK
help
Used by portability layers to modify locally managed status mask.

config THREAD_ABORT_NEED_CLEANUP
bool
help
This option enables the bits to clean up the current thread if
k_thread_abort(_current) is called, as the cleanup cannot be
running in the current thread stack.

config THREAD_CUSTOM_DATA
bool "Thread custom data"
help
Expand Down
29 changes: 29 additions & 0 deletions kernel/include/kernel_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,35 @@ int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */

#if defined(CONFIG_THREAD_ABORT_NEED_CLEANUP)
/**
* Perform cleanup at the end of k_thread_abort().
*
* This performs additional cleanup steps at the end of k_thread_abort()
* where these steps require that the thread is no longer running.
* If the target thread is not the current running thread, the cleanup
* steps will be performed immediately. However, if the target thread is
* the current running thread (e.g. k_thread_abort(_current)), it defers
* the cleanup steps to later when the work will be finished in another
* context.
*
* @param thread Pointer to thread to be cleaned up.
*/
void k_thread_abort_cleanup(struct k_thread *thread);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good example here: if your stack creation is a library level thing, you can just provide a top level wrapper or whatever and clean it up on exit. No need to muck with thread lifecycle stuff in the kernel


/**
* Check if thread is the same as the one waiting for cleanup.
*
* This is used to guard against reusing the same thread object
* before the previous cleanup has finished. This will perform
* the necessary cleanups before the thread object can be
* reused. Should mainly be used during thread creation.
*
* @param thread Pointer to thread to be checked.
*/
void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */

#ifdef __cplusplus
}
#endif
Expand Down
1 change: 1 addition & 0 deletions kernel/include/ksched.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ void z_sched_abort(struct k_thread *thread);
void z_sched_ipi(void);
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
void z_ready_thread_locked(struct k_thread *thread);
void z_requeue_current(struct k_thread *curr);
struct k_thread *z_swap_next_thread(void);
void z_thread_abort(struct k_thread *thread);
Expand Down
11 changes: 11 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -627,6 +627,13 @@ static void ready_thread(struct k_thread *thread)
}
}

void z_ready_thread_locked(struct k_thread *thread)
{
if (!thread_active_elsewhere(thread)) {
ready_thread(thread);
}
}

void z_ready_thread(struct k_thread *thread)
{
K_SPINLOCK(&_sched_spinlock) {
Expand Down Expand Up @@ -1641,6 +1648,10 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
k_object_uninit(thread->stack_obj);
k_object_uninit(thread);
#endif /* CONFIG_USERSPACE */

#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup(thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
}
}

Expand Down
79 changes: 79 additions & 0 deletions kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,10 @@ char *z_setup_new_thread(struct k_thread *new_thread,

Z_ASSERT_VALID_PRIO(prio, entry);

#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup_check_reuse(new_thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */

#ifdef CONFIG_OBJ_CORE_THREAD
k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
Expand Down Expand Up @@ -939,3 +943,78 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)

return 0;
}

#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
/** Pointer to thread which needs to be cleaned up. */
static struct k_thread *thread_to_cleanup;

/** Spinlock for thread abort cleanup. */
static struct k_spinlock thread_cleanup_lock;

void defer_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new deferred cleanup steps:
* - The thread object may have been overwritten by the time
* the actual cleanup is being done (e.g. thread object
* allocated on a stack). So stash any necessary data here
* that will be used in the actual cleanup steps.
*/
thread_to_cleanup = thread;
}

void do_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new actual cleanup steps:
* - The thread object may have been overwritten when this is
* called. So avoid using any data from the thread object.
*/
ARG_UNUSED(thread);
}

void k_thread_abort_cleanup(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
if (thread_to_cleanup != NULL) {
/* Finish the pending one first. */
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}

if (thread == _current) {
/* Need to defer for current running thread as the cleanup
* might result in exception. Actual cleanup will be done
* at the next time k_thread_abort() is called, or at thread
* creation if the same thread object is being reused. This
* is to make sure the cleanup code no longer needs this
* thread's stack. This is not exactly ideal as the stack
* may still be memory mapped for a while. However, this is
* a simple solution without a) the need to workaround
* the schedule lock during k_thread_abort(), b) creating
* another thread to perform the cleanup, and c) does not
* require architecture code support (e.g. via exception).
*/
defer_thread_cleanup(thread);
} else {
/* Not the current running thread, so we are safe to do
* cleanups.
*/
do_thread_cleanup(thread);
}
}
}

void k_thread_abort_cleanup_check_reuse(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
/* This is to guard reuse of the same thread object and make sure
* any pending cleanups of it needs to be finished before the thread
* object can be reused.
*/
if (thread_to_cleanup == thread) {
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
}
}

#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */