Skip to content
Permalink
Browse files

arch/x86: merge asm_inline_gcc.h with asm_inline.h

This pattern exists in both the include/arch/x86 and arch/x86/include
trees. This indirection is historic and unnecessary, as all supported
toolchains for x86 support gas/gcc-style inline assembly.

Signed-off-by: Charles E. Youse <charles.youse@intel.com>
  • Loading branch information...
Charles E. Youse authored and nashif committed Jun 27, 2019
1 parent da735b9 commit 8a8e6a1e529038fb11c65127f7b28def80788862
Showing with 221 additions and 276 deletions.
  1. +105 −9 arch/x86/include/asm_inline.h
  2. +0 −124 arch/x86/include/asm_inline_gcc.h
  3. +116 −7 include/arch/x86/asm_inline.h
  4. +0 −136 include/arch/x86/asm_inline_gcc.h
@@ -1,22 +1,118 @@
/* Inline assembler kernel functions and macros */

/*
* Copyright (c) 2015, Wind River Systems, Inc.
* Copyright (c) 2019, Intel Corp.
*
* SPDX-License-Identifier: Apache-2.0
*/

#ifndef ZEPHYR_ARCH_X86_INCLUDE_ASM_INLINE_H_
#define ZEPHYR_ARCH_X86_INCLUDE_ASM_INLINE_H_

#if !defined(CONFIG_X86)
#error The arch/x86/include/asm_inline.h is only for x86 architecture
#ifdef __cplusplus
extern "C" {
#endif

#if defined(__GNUC__)
#include <asm_inline_gcc.h>
#else
#include <asm_inline_other.h>
#endif /* __GNUC__ */
#ifndef _ASMLANGUAGE

#ifdef CONFIG_LAZY_FP_SHARING

/**
*
* @brief Disallow use of floating point capabilities
*
* This routine sets CR0[TS] to 1, which disallows the use of FP instructions
* by the currently executing thread.
*
* @return N/A
*/
static inline void z_FpAccessDisable(void)
{
void *tempReg;

__asm__ volatile(
"movl %%cr0, %0;\n\t"
"orl $0x8, %0;\n\t"
"movl %0, %%cr0;\n\t"
: "=r"(tempReg)
:
: "memory");
}


/**
*
* @brief Save non-integer context information
*
* This routine saves the system's "live" non-integer context into the
* specified area. If the specified thread supports SSE then
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
* Function is invoked by FpCtxSave(struct k_thread *thread)
*
* @return N/A
*/
static inline void z_do_fp_regs_save(void *preemp_float_reg)
{
__asm__ volatile("fnsave (%0);\n\t"
:
: "r"(preemp_float_reg)
: "memory");
}

#ifdef CONFIG_SSE
/**
*
* @brief Save non-integer context information
*
* This routine saves the system's "live" non-integer context into the
* specified area. If the specified thread supports SSE then
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
* Function is invoked by FpCtxSave(struct k_thread *thread)
*
* @return N/A
*/
static inline void z_do_fp_and_sse_regs_save(void *preemp_float_reg)
{
__asm__ volatile("fxsave (%0);\n\t"
:
: "r"(preemp_float_reg)
: "memory");
}
#endif /* CONFIG_SSE */

/**
*
* @brief Initialize floating point register context information.
*
* This routine initializes the system's "live" floating point registers.
*
* @return N/A
*/
static inline void z_do_fp_regs_init(void)
{
__asm__ volatile("fninit\n\t");
}

#ifdef CONFIG_SSE
/**
*
* @brief Initialize SSE register context information.
*
* This routine initializes the system's "live" SSE registers.
*
* @return N/A
*/
static inline void z_do_sse_regs_init(void)
{
__asm__ volatile("ldmxcsr _sse_mxcsr_default_value\n\t");
}
#endif /* CONFIG_SSE */

#endif /* CONFIG_LAZY_FP_SHARING */

#endif /* _ASMLANGUAGE */

#ifdef __cplusplus
}
#endif

#endif /* ZEPHYR_ARCH_X86_INCLUDE_ASM_INLINE_H_ */

This file was deleted.

@@ -1,8 +1,6 @@
/* Intel x86 inline assembler functions and macros for public functions */

/*
* Copyright (c) 2015, Wind River Systems, Inc.
*
* Copyright (c) 2019, Intel Corp.
* SPDX-License-Identifier: Apache-2.0
*/

@@ -14,10 +12,121 @@
* Include kernel.h instead
*/

#if defined(__GNUC__)
#include <arch/x86/asm_inline_gcc.h>
#else
#include <arch/x86/asm_inline_other.h>
#ifdef __cplusplus
extern "C" {
#endif

#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stddef.h>

/**
*
* @internal
*
* @brief Disable all interrupts on the CPU
*
* GCC assembly internals of irq_lock(). See irq_lock() for a complete
* description.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/

static ALWAYS_INLINE unsigned int _do_irq_lock(void)
{
unsigned int key;

__asm__ volatile (
"pushfl;\n\t"
"cli;\n\t"
"popl %0;\n\t"
: "=g" (key)
:
: "memory"
);

return key;
}


/**
*
* @internal
*
* @brief Enable all interrupts on the CPU (inline)
*
* GCC assembly internals of irq_lock_unlock(). See irq_lock_unlock() for a
* complete description.
*
* @return N/A
*/

static ALWAYS_INLINE void z_do_irq_unlock(void)
{
__asm__ volatile (
"sti;\n\t"
: : : "memory"
);
}


/**
* @brief read timestamp register ensuring serialization
*/

static inline u64_t z_tsc_read(void)
{
union {
struct {
u32_t lo;
u32_t hi;
};
u64_t value;
} rv;

/* rdtsc & cpuid clobbers eax, ebx, ecx and edx registers */
__asm__ volatile (/* serialize */
"xorl %%eax,%%eax;\n\t"
"cpuid;\n\t"
:
:
: "%eax", "%ebx", "%ecx", "%edx"
);
/*
* We cannot use "=A", since this would use %rax on x86_64 and
* return only the lower 32bits of the TSC
*/
__asm__ volatile ("rdtsc" : "=a" (rv.lo), "=d" (rv.hi));


return rv.value;
}


/**
*
* @brief Get a 32 bit CPU timestamp counter
*
* @return a 32-bit number
*/

static ALWAYS_INLINE
u32_t z_do_read_cpu_timestamp32(void)
{
u32_t rv;

__asm__ volatile("rdtsc" : "=a"(rv) : : "%edx");

return rv;
}



#endif /* _ASMLANGUAGE */

#ifdef __cplusplus
}
#endif

#endif /* ZEPHYR_INCLUDE_ARCH_X86_ASM_INLINE_H_ */

0 comments on commit 8a8e6a1

Please sign in to comment.
You can’t perform that action at this time.