Skip to content
Permalink
Browse files

linker generated list: provide an iterator to simplify list access

Given that the section name and boundary simbols can be inferred from
the struct object name, it makes sense to create an iterator that
abstracts away the access details and reduce the possibility for
mistakes.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
  • Loading branch information...
Nicolas Pitre authored and andrewboie committed Jun 3, 2019
1 parent b1d3742 commit aa9228854fc8ee8593680cb11a2bcb7a405ec440
@@ -33,9 +33,9 @@

SECTION_DATA_PROLOGUE(_static_thread_area,,SUBALIGN(4))
{
_static_thread_data_list_start = .;
__static_thread_data_list_start = .;
KEEP(*(SORT_BY_NAME(".__static_thread_data.static.*")))
_static_thread_data_list_end = .;
__static_thread_data_list_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)

#ifdef CONFIG_USERSPACE
@@ -38,9 +38,9 @@
*/
SECTION_PROLOGUE(object_access,,)
{
__object_access_start = .;
__k_object_assignment_list_start = .;
KEEP(*(".__k_object_assignment.*"))
__object_access_end = .;
__k_object_assignment_list_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
#endif

@@ -172,4 +172,20 @@
Z_DECL_ALIGN(struct struct_type) name \
__in_section(_##struct_type, static, name) __used

/*
* Itterator for structure instances gathered by Z_STRUCT_SECTION_ITERABLE().
* The linker must provide a _<struct_type>_list_start symbol and a
* _<struct_type>_list_end symbol to mark the start and the end of the
* list of struct objects to iterate over.
*/
#define Z_STRUCT_SECTION_FOREACH(struct_type, iterator) \
extern struct struct_type _CONCAT(_##struct_type, _list_start)[]; \
extern struct struct_type _CONCAT(_##struct_type, _list_end)[]; \
for (struct struct_type *iterator = \
_CONCAT(_##struct_type, _list_start); \
({ __ASSERT(iterator <= _CONCAT(_##struct_type, _list_end), \
"unexpected list end location"); \
iterator < _CONCAT(_##struct_type, _list_end); }); \
iterator++)

#endif /* ZEPHYR_INCLUDE_TOOLCHAIN_COMMON_H_ */
@@ -43,9 +43,6 @@ static inline void mbox_async_free(struct k_mbox_async *async)

#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */

extern struct k_mbox _k_mbox_list_start[];
extern struct k_mbox _k_mbox_list_end[];

#ifdef CONFIG_OBJECT_TRACING
struct k_mbox *_trace_list_k_mbox;
#endif /* CONFIG_OBJECT_TRACING */
@@ -87,9 +84,7 @@ static int init_mbox_module(struct device *dev)
/* Complete initialization of statically defined mailboxes. */

#ifdef CONFIG_OBJECT_TRACING
struct k_mbox *mbox;

for (mbox = _k_mbox_list_start; mbox < _k_mbox_list_end; mbox++) {
Z_STRUCT_SECTION_FOREACH(k_mbox, mbox) {
SYS_TRACING_OBJ_INIT(k_mbox, mbox);
}
#endif /* CONFIG_OBJECT_TRACING */
@@ -14,9 +14,6 @@
#include <ksched.h>
#include <init.h>

extern struct k_mem_slab _k_mem_slab_list_start[];
extern struct k_mem_slab _k_mem_slab_list_end[];

static struct k_spinlock lock;

#ifdef CONFIG_OBJECT_TRACING
@@ -57,11 +54,7 @@ static int init_mem_slab_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_mem_slab *slab;

for (slab = _k_mem_slab_list_start;
slab < _k_mem_slab_list_end;
slab++) {
Z_STRUCT_SECTION_FOREACH(k_mem_slab, slab) {
create_free_list(slab);
SYS_TRACING_OBJ_INIT(k_mem_slab, slab);
z_object_init(slab);
@@ -13,19 +13,17 @@
#include <misc/math_extras.h>
#include <stdbool.h>

/* Linker-defined symbols bound the static pool structs */
extern struct k_mem_pool _k_mem_pool_list_start[];
extern struct k_mem_pool _k_mem_pool_list_end[];

static struct k_spinlock lock;

static struct k_mem_pool *get_pool(int id)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return &_k_mem_pool_list_start[id];
}

static int pool_id(struct k_mem_pool *pool)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return pool - &_k_mem_pool_list_start[0];
}

@@ -38,9 +36,8 @@ static void k_mem_pool_init(struct k_mem_pool *p)
int init_static_pools(struct device *unused)
{
ARG_UNUSED(unused);
struct k_mem_pool *p;

for (p = _k_mem_pool_list_start; p < _k_mem_pool_list_end; p++) {
Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) {
k_mem_pool_init(p);
}

@@ -23,9 +23,6 @@
#include <syscall_handler.h>
#include <kernel_internal.h>

extern struct k_msgq _k_msgq_list_start[];
extern struct k_msgq _k_msgq_list_end[];

#ifdef CONFIG_OBJECT_TRACING

struct k_msgq *_trace_list_k_msgq;
@@ -37,9 +34,7 @@ static int init_msgq_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_msgq *msgq;

for (msgq = _k_msgq_list_start; msgq < _k_msgq_list_end; msgq++) {
Z_STRUCT_SECTION_FOREACH(k_msgq, msgq) {
SYS_TRACING_OBJ_INIT(k_msgq, msgq);
}
return 0;
@@ -38,9 +38,6 @@
#include <syscall_handler.h>
#include <tracing.h>

extern struct k_mutex _k_mutex_list_start[];
extern struct k_mutex _k_mutex_list_end[];

/* We use a global spinlock here because some of the synchronization
* is protecting things like owner thread priorities which aren't
* "part of" a single k_mutex. Should move those bits of the API
@@ -59,9 +56,7 @@ static int init_mutex_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_mutex *mutex;

for (mutex = _k_mutex_list_start; mutex < _k_mutex_list_end; mutex++) {
Z_STRUCT_SECTION_FOREACH(k_mutex, mutex) {
SYS_TRACING_OBJ_INIT(k_mutex, mutex);
}
return 0;
@@ -37,9 +37,6 @@ struct k_pipe_async {
struct k_pipe_desc desc; /* Pipe message descriptor */
};

extern struct k_pipe _k_pipe_list_start[];
extern struct k_pipe _k_pipe_list_end[];

#ifdef CONFIG_OBJECT_TRACING
struct k_pipe *_trace_list_k_pipe;
#endif /* CONFIG_OBJECT_TRACING */
@@ -118,9 +115,7 @@ static int init_pipes_module(struct device *dev)
/* Complete initialization of statically defined mailboxes. */

#ifdef CONFIG_OBJECT_TRACING
struct k_pipe *pipe;

for (pipe = _k_pipe_list_start; pipe < _k_pipe_list_end; pipe++) {
Z_STRUCT_SECTION_FOREACH(k_pipe, pipe) {
SYS_TRACING_OBJ_INIT(k_pipe, pipe);
}
#endif /* CONFIG_OBJECT_TRACING */
@@ -23,9 +23,6 @@
#include <syscall_handler.h>
#include <kernel_internal.h>

extern struct k_queue _k_queue_list_start[];
extern struct k_queue _k_queue_list_end[];

struct alloc_node {
sys_sfnode_t node;
void *data;
@@ -70,9 +67,7 @@ static int init_queue_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_queue *queue;

for (queue = _k_queue_list_start; queue < _k_queue_list_end; queue++) {
Z_STRUCT_SECTION_FOREACH(k_queue, queue) {
SYS_TRACING_OBJ_INIT(k_queue, queue);
}
return 0;
@@ -29,9 +29,6 @@
#include <syscall_handler.h>
#include <tracing.h>

extern struct k_sem _k_sem_list_start[];
extern struct k_sem _k_sem_list_end[];

/* We use a system-wide lock to synchronize semaphores, which has
* unfortunate performance impact vs. using a per-object lock
* (semaphores are *very* widely used). But per-object locks require
@@ -52,9 +49,7 @@ static int init_sem_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_sem *sem;

for (sem = _k_sem_list_start; sem < _k_sem_list_end; sem++) {
Z_STRUCT_SECTION_FOREACH(k_sem, sem) {
SYS_TRACING_OBJ_INIT(k_sem, sem);
}
return 0;
@@ -20,9 +20,6 @@
#include <syscall_handler.h>
#include <kernel_internal.h>

extern struct k_stack _k_stack_list_start[];
extern struct k_stack _k_stack_list_end[];

#ifdef CONFIG_OBJECT_TRACING

struct k_stack *_trace_list_k_stack;
@@ -34,9 +31,7 @@ static int init_stack_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_stack *stack;

for (stack = _k_stack_list_start; stack < _k_stack_list_end; stack++) {
Z_STRUCT_SECTION_FOREACH(k_stack, stack) {
SYS_TRACING_OBJ_INIT(k_stack, stack);
}
return 0;
@@ -32,16 +32,10 @@
#include <tracing.h>
#include <stdbool.h>

extern struct _static_thread_data _static_thread_data_list_start[];
extern struct _static_thread_data _static_thread_data_list_end[];

static struct k_spinlock lock;

#define _FOREACH_STATIC_THREAD(thread_data) \
for (struct _static_thread_data *thread_data = \
_static_thread_data_list_start; \
thread_data < _static_thread_data_list_end; \
thread_data++)
Z_STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)

void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
{
@@ -614,16 +608,10 @@ void z_thread_single_abort(struct k_thread *thread)

#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_USERSPACE
extern char __object_access_start[];
extern char __object_access_end[];

static void grant_static_access(void)
{
struct _k_object_assignment *pos;

for (pos = (struct _k_object_assignment *)__object_access_start;
pos < (struct _k_object_assignment *)__object_access_end;
pos++) {
Z_STRUCT_SECTION_FOREACH(_k_object_assignment, pos) {
for (int i = 0; pos->objects[i] != NULL; i++) {
k_object_access_grant(pos->objects[i],
pos->thread);
@@ -12,9 +12,6 @@
#include <stdbool.h>
#include <spinlock.h>

extern struct k_timer _k_timer_list_start[];
extern struct k_timer _k_timer_list_end[];

static struct k_spinlock lock;

#ifdef CONFIG_OBJECT_TRACING
@@ -28,9 +25,7 @@ static int init_timer_module(struct device *dev)
{
ARG_UNUSED(dev);

struct k_timer *timer;

for (timer = _k_timer_list_start; timer < _k_timer_list_end; timer++) {
Z_STRUCT_SECTION_FOREACH(k_timer, timer) {
SYS_TRACING_OBJ_INIT(k_timer, timer);
}
return 0;

0 comments on commit aa92288

Please sign in to comment.
You can’t perform that action at this time.