Skip to content
Permalink
Browse files

cleanup: include/: move misc/mutex.h to sys/mutex.h

move misc/mutex.h to sys/mutex.h and
create a shim for backward-compatibility.

No functional changes to the headers.
A warning in the shim can be controlled with CONFIG_COMPAT_INCLUDES.

Related to #16539

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
  • Loading branch information...
nashif committed Jun 26, 2019
1 parent 08ee8b0 commit 0c9e280547c30c8b640aa4c93116d5e9d48d6fff
@@ -3,147 +3,13 @@
*
* SPDX-License-Identifier: Apache-2.0
*/

#ifndef ZEPHYR_INCLUDE_MISC_MUTEX_H_
#define ZEPHYR_INCLUDE_MISC_MUTEX_H_

/*
* sys_mutex behaves almost exactly like k_mutex, with the added advantage
* that a sys_mutex instance can reside in user memory.
*
* Further enhancements will support locking/unlocking uncontended sys_mutexes
* with simple atomic ops instead of syscalls, similar to Linux's
* FUTEX_LOCK_PI and FUTEX_UNLOCK_PI
*/

#ifdef CONFIG_USERSPACE
#include <sys/atomic.h>
#include <zephyr/types.h>

struct sys_mutex {
/* Currently unused, but will be used to store state for fast mutexes
* that can be locked/unlocked with atomic ops if there is no
* contention
*/
atomic_t val;
};

#define SYS_MUTEX_DEFINE(name) \
struct sys_mutex name

/**
* @brief Initialize a mutex.
*
* This routine initializes a mutex object, prior to its first use.
*
* Upon completion, the mutex is available and does not have an owner.
*
* This routine is only necessary to call when userspace is disabled
* and the mutex was not created with SYS_MUTEX_DEFINE().
*
* @param mutex Address of the mutex.
*
* @return N/A
*/
static inline void sys_mutex_init(struct sys_mutex *mutex)
{
ARG_UNUSED(mutex);

/* Nothing to do, kernel-side data structures are initialized at
* boot
*/
}

__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout);

__syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);

/**
* @brief Lock a mutex.
*
* This routine locks @a mutex. If the mutex is locked by another thread,
* the calling thread waits until the mutex becomes available or until
* a timeout occurs.
*
* A thread is permitted to lock a mutex it has already locked. The operation
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex, which may reside in user memory
* @param timeout Waiting period to lock the mutex (in milliseconds),
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Mutex locked.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel
*/
static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_lock(mutex, timeout);
}

/**
* @brief Unlock a mutex.
*
* This routine unlocks @a mutex. The mutex must already be locked by the
* calling thread.
*
* The mutex cannot be claimed by another thread until it has been unlocked by
* the calling thread as many times as it was previously locked by that
* thread.
*
* @param mutex Address of the mutex, which may reside in user memory
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel or mutex wasn't
* locked
* @retval -EPERM Caller does not own the mutex
*/
static inline int sys_mutex_unlock(struct sys_mutex *mutex)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_unlock(mutex);
}

#include <syscalls/mutex.h>

#else
#include <kernel.h>
#include <kernel_structs.h>

struct sys_mutex {
struct k_mutex kernel_mutex;
};

#define SYS_MUTEX_DEFINE(name) \
struct sys_mutex name = { \
.kernel_mutex = _K_MUTEX_INITIALIZER(name.kernel_mutex) \
}

static inline void sys_mutex_init(struct sys_mutex *mutex)
{
k_mutex_init(&mutex->kernel_mutex);
}

static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
{
return k_mutex_lock(&mutex->kernel_mutex, timeout);
}

static inline int sys_mutex_unlock(struct sys_mutex *mutex)
{
if (mutex->kernel_mutex.lock_count == 0) {
return -EINVAL;
}

if (mutex->kernel_mutex.owner != _current) {
return -EPERM;
}
#ifndef CONFIG_COMPAT_INCLUDES
#warning "This header file has moved, include <sys/mutex.h> instead."
#endif

k_mutex_unlock(&mutex->kernel_mutex);
return 0;
}
#include <sys/mutex.h>

#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_MISC_MUTEX_H_ */
@@ -27,7 +27,7 @@
#include <zephyr.h>
#include <zephyr/types.h>
#include <net/tls_credentials.h>
#include <misc/mutex.h>
#include <sys/mutex.h>

#ifdef __cplusplus
extern "C" {
@@ -9,7 +9,7 @@

#include <kernel.h>
#include <sys/mempool_base.h>
#include <misc/mutex.h>
#include <sys/mutex.h>

struct sys_mem_pool {
struct sys_mem_pool_base base;
@@ -0,0 +1,149 @@
/*
* Copyright (c) 2019 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/

#ifndef ZEPHYR_INCLUDE_SYS_MUTEX_H_
#define ZEPHYR_INCLUDE_SYS_MUTEX_H_

/*
* sys_mutex behaves almost exactly like k_mutex, with the added advantage
* that a sys_mutex instance can reside in user memory.
*
* Further enhancements will support locking/unlocking uncontended sys_mutexes
* with simple atomic ops instead of syscalls, similar to Linux's
* FUTEX_LOCK_PI and FUTEX_UNLOCK_PI
*/

#ifdef CONFIG_USERSPACE
#include <sys/atomic.h>
#include <zephyr/types.h>

struct sys_mutex {
/* Currently unused, but will be used to store state for fast mutexes
* that can be locked/unlocked with atomic ops if there is no
* contention
*/
atomic_t val;
};

#define SYS_MUTEX_DEFINE(name) \
struct sys_mutex name

/**
* @brief Initialize a mutex.
*
* This routine initializes a mutex object, prior to its first use.
*
* Upon completion, the mutex is available and does not have an owner.
*
* This routine is only necessary to call when userspace is disabled
* and the mutex was not created with SYS_MUTEX_DEFINE().
*
* @param mutex Address of the mutex.
*
* @return N/A
*/
static inline void sys_mutex_init(struct sys_mutex *mutex)
{
ARG_UNUSED(mutex);

/* Nothing to do, kernel-side data structures are initialized at
* boot
*/
}

__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout);

__syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);

/**
* @brief Lock a mutex.
*
* This routine locks @a mutex. If the mutex is locked by another thread,
* the calling thread waits until the mutex becomes available or until
* a timeout occurs.
*
* A thread is permitted to lock a mutex it has already locked. The operation
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex, which may reside in user memory
* @param timeout Waiting period to lock the mutex (in milliseconds),
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Mutex locked.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel
*/
static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_lock(mutex, timeout);
}

/**
* @brief Unlock a mutex.
*
* This routine unlocks @a mutex. The mutex must already be locked by the
* calling thread.
*
* The mutex cannot be claimed by another thread until it has been unlocked by
* the calling thread as many times as it was previously locked by that
* thread.
*
* @param mutex Address of the mutex, which may reside in user memory
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel or mutex wasn't
* locked
* @retval -EPERM Caller does not own the mutex
*/
static inline int sys_mutex_unlock(struct sys_mutex *mutex)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_unlock(mutex);
}

#include <syscalls/mutex.h>

#else
#include <kernel.h>
#include <kernel_structs.h>

struct sys_mutex {
struct k_mutex kernel_mutex;
};

#define SYS_MUTEX_DEFINE(name) \
struct sys_mutex name = { \
.kernel_mutex = _K_MUTEX_INITIALIZER(name.kernel_mutex) \
}

static inline void sys_mutex_init(struct sys_mutex *mutex)
{
k_mutex_init(&mutex->kernel_mutex);
}

static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
{
return k_mutex_lock(&mutex->kernel_mutex, timeout);
}

static inline int sys_mutex_unlock(struct sys_mutex *mutex)
{
if (mutex->kernel_mutex.lock_count == 0) {
return -EINVAL;
}

if (mutex->kernel_mutex.owner != _current) {
return -EPERM;
}

k_mutex_unlock(&mutex->kernel_mutex);
return 0;
}

#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_SYS_MUTEX_H_ */
@@ -20,7 +20,7 @@
#include <stdbool.h>
#include <app_memory/app_memdomain.h>
#include <sys/libc-hooks.h>
#include <misc/mutex.h>
#include <sys/mutex.h>

#ifdef Z_LIBC_PARTITION_EXISTS
K_APPMEM_PARTITION_DEFINE(z_libc_partition);
@@ -5,7 +5,7 @@
*/

#include <kernel.h>
#include <misc/mutex.h>
#include <sys/mutex.h>
#include <syscall_handler.h>
#include <kernel_structs.h>

@@ -20,7 +20,7 @@

#include <stddef.h>
#include <kernel.h>
#include <misc/mutex.h>
#include <sys/mutex.h>

#include <net/net_core.h>

@@ -48,7 +48,7 @@
#include <tc_util.h>
#include <zephyr.h>
#include <ztest.h>
#include <misc/mutex.h>
#include <sys/mutex.h>

#define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)

@@ -17,7 +17,7 @@

#include <tc_util.h>
#include <zephyr.h>
#include <misc/mutex.h>
#include <sys/mutex.h>

static int tc_rc = TC_PASS; /* test case return code */

@@ -9,7 +9,7 @@ LOG_MODULE_REGISTER(net_test, CONFIG_NET_SOCKETS_LOG_LEVEL);

#include <stdio.h>
#include <ztest_assert.h>
#include <misc/mutex.h>
#include <sys/mutex.h>
#include <net/socket.h>
#include <net/dns_resolve.h>
#include <net/buf.h>

0 comments on commit 0c9e280

Please sign in to comment.
You can’t perform that action at this time.