diff --git a/.gitignore b/.gitignore
index f4c769df4a2..5ef035d17b9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,5 +25,3 @@
/build2
/build_vc2019-64
/build_vc2019-32
-.kdev4
-*.kdev4
\ No newline at end of file
diff --git a/libraries/asmjit/CMakeLists.txt b/libraries/asmjit/CMakeLists.txt
index 118570c3a51..6b7636ebe98 100644
--- a/libraries/asmjit/CMakeLists.txt
+++ b/libraries/asmjit/CMakeLists.txt
@@ -1,22 +1,17 @@
cmake_minimum_required(VERSION 2.8.7)
+
#make_release_only()
-project(asmjit CXX)
+project(asmjit C)
set(ASMJITNAME asmjit)
-add_definitions(-DASMJIT_STATIC)
-if(CMAKE_VERSION VERSION_LESS "3.1")
- if("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang|AppleClang)$")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
- endif()
-else()
- set(CMAKE_CXX_STANDARD 11)
-endif()
+add_definitions(-DASMJIT_BUILD_EMBED)
+add_definitions(-DASMJIT_STATIC)
if(MSVC)
- set(CMAKE_DEBUG_POSTFIX "d")
- add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE)
+ set(CMAKE_DEBUG_POSTFIX "d")
+ add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE)
endif()
if(APPLE)
@@ -26,127 +21,92 @@ endif()
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
-set(ASMJIT_SRCS
+set(ASMJIT_PUBLIC_HDRS
+ asmjit/arm.h
asmjit/asmjit.h
-
- asmjit/core.h
- asmjit/core/build.h
- asmjit/core/arch.cpp
- asmjit/core/arch.h
- asmjit/core/assembler.cpp
- asmjit/core/assembler.h
- asmjit/core/builder.cpp
- asmjit/core/builder.h
- asmjit/core/callconv.cpp
- asmjit/core/callconv.h
- asmjit/core/codebufferwriter_p.h
- asmjit/core/codeholder.cpp
- asmjit/core/codeholder.h
- asmjit/core/compiler.cpp
- asmjit/core/compiler.h
- asmjit/core/constpool.cpp
- asmjit/core/constpool.h
- asmjit/core/cpuinfo.cpp
- asmjit/core/cpuinfo.h
- asmjit/core/datatypes.h
- asmjit/core/emitter.cpp
- asmjit/core/emitter.h
- asmjit/core/features.h
- asmjit/core/func.cpp
- asmjit/core/func.h
- asmjit/core/globals.cpp
- asmjit/core/globals.h
- asmjit/core/inst.cpp
- asmjit/core/inst.h
- asmjit/core/jitallocator.cpp
- asmjit/core/jitallocator.h
- asmjit/core/jitruntime.cpp
- asmjit/core/jitruntime.h
- asmjit/core/logging.cpp
- asmjit/core/logging.h
- asmjit/core/misc_p.h
- asmjit/core/operand.cpp
- asmjit/core/operand.h
- asmjit/core/osutils.cpp
- asmjit/core/osutils.h
- asmjit/core/raassignment_p.h
- asmjit/core/rabuilders_p.h
- asmjit/core/radefs_p.h
- asmjit/core/ralocal.cpp
- asmjit/core/ralocal_p.h
- asmjit/core/rapass.cpp
- asmjit/core/rapass_p.h
- asmjit/core/rastack.cpp
- asmjit/core/rastack_p.h
- asmjit/core/string.cpp
- asmjit/core/string.h
- asmjit/core/support.cpp
- asmjit/core/support.h
- asmjit/core/target.cpp
- asmjit/core/target.h
- asmjit/core/type.cpp
- asmjit/core/type.h
- asmjit/core/virtmem.cpp
- asmjit/core/virtmem.h
- asmjit/core/zone.cpp
- asmjit/core/zone.h
- asmjit/core/zonehash.cpp
- asmjit/core/zonehash.h
- asmjit/core/zonelist.cpp
- asmjit/core/zonelist.h
- asmjit/core/zonestack.cpp
- asmjit/core/zonestack.h
- asmjit/core/zonestring.h
- asmjit/core/zonetree.cpp
- asmjit/core/zonetree.h
- asmjit/core/zonevector.cpp
- asmjit/core/zonevector.h
-
- asmjit/x86.h
- asmjit/x86/x86assembler.cpp
- asmjit/x86/x86assembler.h
- asmjit/x86/x86builder.cpp
- asmjit/x86/x86builder.h
- asmjit/x86/x86callconv.cpp
- asmjit/x86/x86callconv_p.h
- asmjit/x86/x86compiler.cpp
- asmjit/x86/x86compiler.h
- asmjit/x86/x86emitter.h
- asmjit/x86/x86features.cpp
- asmjit/x86/x86features.h
- asmjit/x86/x86globals.h
- asmjit/x86/x86instapi.cpp
- asmjit/x86/x86instapi_p.h
- asmjit/x86/x86instdb.cpp
- asmjit/x86/x86instdb.h
- asmjit/x86/x86instdb_p.h
- asmjit/x86/x86internal.cpp
- asmjit/x86/x86internal_p.h
- asmjit/x86/x86logging.cpp
- asmjit/x86/x86logging_p.h
- asmjit/x86/x86opcode_p.h
- asmjit/x86/x86operand.cpp
- asmjit/x86/x86operand.h
- asmjit/x86/x86rapass.cpp
- asmjit/x86/x86rapass_p.h
+ asmjit/asmjit_apibegin.h
+ asmjit/asmjit_apiend.h
+ asmjit/asmjit_build.h
+ asmjit/base.h
+ asmjit/base/arch.h
+ asmjit/base/assembler.h
+ asmjit/base/codebuilder.h
+ asmjit/base/codecompiler.h
+ asmjit/base/codeemitter.h
+ asmjit/base/codeholder.h
+ asmjit/base/constpool.h
+ asmjit/base/cpuinfo.h
+ asmjit/base/func.h
+ asmjit/base/globals.h
+ asmjit/base/inst.h
+ asmjit/base/logging.h
+ asmjit/base/misc_p.h
+ asmjit/base/operand.h
+ asmjit/base/osutils.h
+ asmjit/base/regalloc_p.h
+ asmjit/base/runtime.h
+ asmjit/base/simdtypes.h
+ asmjit/base/string.h
+ asmjit/base/utils.h
+ asmjit/base/vmem.h
+ asmjit/base/zone.h
+ asmjit/x86.h
+ asmjit/x86/x86assembler.h
+ asmjit/x86/x86builder.h
+ asmjit/x86/x86compiler.h
+ asmjit/x86/x86emitter.h
+ asmjit/x86/x86globals.h
+ asmjit/x86/x86inst.h
+ asmjit/x86/x86instimpl_p.h
+ asmjit/x86/x86internal_p.h
+ asmjit/x86/x86logging_p.h
+ asmjit/x86/x86misc.h
+ asmjit/x86/x86operand.h
+ asmjit/x86/x86regalloc_p.h
+)
+set(ASMJIT_SRCS
+ asmjit/base/arch.cpp
+ asmjit/base/assembler.cpp
+ asmjit/base/codebuilder.cpp
+ asmjit/base/codecompiler.cpp
+ asmjit/base/codeemitter.cpp
+ asmjit/base/codeholder.cpp
+ asmjit/base/constpool.cpp
+ asmjit/base/cpuinfo.cpp
+ asmjit/base/func.cpp
+ asmjit/base/globals.cpp
+ asmjit/base/inst.cpp
+ asmjit/base/logging.cpp
+ asmjit/base/operand.cpp
+ asmjit/base/osutils.cpp
+ asmjit/base/regalloc.cpp
+ asmjit/base/runtime.cpp
+ asmjit/base/string.cpp
+ asmjit/base/utils.cpp
+ asmjit/base/vmem.cpp
+ asmjit/base/zone.cpp
+ asmjit/x86/x86assembler.cpp
+ asmjit/x86/x86builder.cpp
+ asmjit/x86/x86compiler.cpp
+ asmjit/x86/x86inst.cpp
+ asmjit/x86/x86instimpl.cpp
+ asmjit/x86/x86internal.cpp
+ asmjit/x86/x86logging.cpp
+ asmjit/x86/x86operand.cpp
+ asmjit/x86/x86operand_regs.cpp
+ asmjit/x86/x86regalloc.cpp
)
-set(ASMJIT_PUBLIC_HDRS "")
-foreach(_src_file ${ASMJIT_SRCS})
- if ("${_src_file}" MATCHES "\\.h$" AND NOT "${_src_file}" MATCHES "_p\\.h$")
- list(APPEND ASMJIT_PUBLIC_HDRS ${_src_file})
- endif()
-endforeach()
+add_library(${ASMJITNAME} STATIC ${ASMJIT_SRCS} ${ASMJIT_PUBLIC_HDRS})
-add_library(${ASMJITNAME} STATIC ${ASMJIT_SRCS})
set_target_properties(${ASMJITNAME} PROPERTIES OUTPUT_NAME asmjit)
-if(NOT SKIP_INSTALL_LIBRARIES AND NOT SKIP_INSTALL_ALL)
- install(TARGETS ${ASMJITNAME}
- RUNTIME DESTINATION bin
- ARCHIVE DESTINATION lib
- LIBRARY DESTINATION lib)
+if(NOT SKIP_INSTALL_LIBRARIES AND NOT SKIP_INSTALL_ALL )
+ install(TARGETS ${ASMJITNAME}
+ RUNTIME DESTINATION bin
+ ARCHIVE DESTINATION lib
+ LIBRARY DESTINATION lib )
endif()
-if(NOT SKIP_INSTALL_HEADERS AND NOT SKIP_INSTALL_ALL)
- install(FILES ${ASMJIT_PUBLIC_HDRS} DESTINATION include)
+if(NOT SKIP_INSTALL_HEADERS AND NOT SKIP_INSTALL_ALL )
+ install(FILES ${ASMJIT_PUBLIC_HDRS} DESTINATION include)
endif()
diff --git a/libraries/asmjit/asmjit/arm.h b/libraries/asmjit/asmjit/arm.h
new file mode 100644
index 00000000000..0a916d9cd99
--- /dev/null
+++ b/libraries/asmjit/asmjit/arm.h
@@ -0,0 +1,21 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_ARM_H
+#define _ASMJIT_ARM_H
+
+// [Dependencies]
+#include "./base.h"
+
+#include "./arm/armassembler.h"
+#include "./arm/armbuilder.h"
+#include "./arm/armcompiler.h"
+#include "./arm/arminst.h"
+#include "./arm/armoperand.h"
+
+// [Guard]
+#endif // _ASMJIT_ARM_H
diff --git a/libraries/asmjit/asmjit/asmjit.h b/libraries/asmjit/asmjit/asmjit.h
index 44281295d2e..ead90f0c257 100644
--- a/libraries/asmjit/asmjit/asmjit.h
+++ b/libraries/asmjit/asmjit/asmjit.h
@@ -1,41 +1,47 @@
// [AsmJit]
-// Machine Code Generation for C++.
+// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
+// [Guard]
#ifndef _ASMJIT_ASMJIT_H
#define _ASMJIT_ASMJIT_H
-//! \mainpage API Reference
+// ============================================================================
+// [asmjit_mainpage]
+// ============================================================================
+
+//! \mainpage
//!
-//! AsmJit C++ API reference documentation generated by Doxygen.
+//! AsmJit - Complete x86/x64 JIT and Remote Assembler for C++.
//!
//! Introduction provided by the project page at https://github.com/asmjit/asmjit.
+
+//! \defgroup asmjit_base AsmJit Base API (architecture independent)
//!
-//! \section main_groups Groups
-//!
-//! The documentation is split into the following groups:
-//!
-//! $$DOCS_GROUP_OVERVIEW$$
-//!
-//! \section main_other Other Pages
+//! \brief Backend Neutral API.
+
+//! \defgroup asmjit_x86 AsmJit X86/X64 API
//!
-//! - Class List - List of classes sorted alphabetically
-//! - AsmJit Namespace - List of symbols provided by `asmjit` namespace
+//! \brief X86/X64 Backend API.
-//! \namespace asmjit
+//! \defgroup asmjit_arm AsmJit ARM32/ARM64 API
//!
-//! Root namespace used by AsmJit.
+//! \brief ARM32/ARM64 Backend API.
-#include "./core.h"
+// [Dependencies]
+#include "./base.h"
-#ifdef ASMJIT_BUILD_X86
- #include "./x86.h"
-#endif
+// [X86/X64]
+#if defined(ASMJIT_BUILD_X86)
+#include "./x86.h"
+#endif // ASMJIT_BUILD_X86
-#ifdef ASMJIT_BUILD_ARM
- #include "./arm.h"
-#endif
+// [ARM32/ARM64]
+#if defined(ASMJIT_BUILD_ARM)
+#include "./arm.h"
+#endif // ASMJIT_BUILD_ARM
+// [Guard]
#endif // _ASMJIT_ASMJIT_H
diff --git a/libraries/asmjit/asmjit/asmjit_apibegin.h b/libraries/asmjit/asmjit/asmjit_apibegin.h
new file mode 100644
index 00000000000..58d16dbaef6
--- /dev/null
+++ b/libraries/asmjit/asmjit/asmjit_apibegin.h
@@ -0,0 +1,117 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Dependencies]
+#if !defined(_ASMJIT_BUILD_H)
+# include "./build.h"
+#endif // !_ASMJIT_BUILD_H
+
+// [Guard]
+#if !defined(ASMJIT_API_SCOPE)
+# define ASMJIT_API_SCOPE
+#else
+# error "[asmjit] api-scope is already active, previous scope not closed by asmjit_apiend.h?"
+#endif // ASMJIT_API_SCOPE
+
+// ============================================================================
+// [C++ Support]
+// ============================================================================
+
+// [NoExcept]
+#if !ASMJIT_CC_HAS_NOEXCEPT && !defined(noexcept)
+# define noexcept ASMJIT_NOEXCEPT
+# define ASMJIT_UNDEF_NOEXCEPT
+#endif // !ASMJIT_CC_HAS_NOEXCEPT && !noexcept
+
+// [NullPtr]
+#if !ASMJIT_CC_HAS_NULLPTR && !defined(nullptr)
+# define nullptr NULL
+# define ASMJIT_UNDEF_NULLPTR
+#endif // !ASMJIT_CC_HAS_NULLPTR && !nullptr
+
+// [Override]
+#if !ASMJIT_CC_HAS_OVERRIDE && !defined(override)
+# define override
+# define ASMJIT_UNDEF_OVERRIDE
+#endif // !ASMJIT_CC_HAS_OVERRIDE && !override
+
+// ============================================================================
+// [Compiler Support]
+// ============================================================================
+
+// [Clang]
+#if ASMJIT_CC_CLANG
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wc++11-extensions"
+# pragma clang diagnostic ignored "-Wconstant-logical-operand"
+# pragma clang diagnostic ignored "-Wunnamed-type-template-args"
+#endif // ASMJIT_CC_CLANG
+
+// [GCC]
+#if ASMJIT_CC_GCC
+# pragma GCC diagnostic push
+#endif // ASMJIT_CC_GCC
+
+// [MSC]
+#if ASMJIT_CC_MSC
+# pragma warning(push)
+# pragma warning(disable: 4127) // conditional expression is constant
+# pragma warning(disable: 4201) // nameless struct/union
+# pragma warning(disable: 4244) // '+=' : conversion from 'int' to 'x', possible loss of data
+# pragma warning(disable: 4251) // struct needs to have dll-interface to be used by clients of struct ...
+# pragma warning(disable: 4275) // non dll-interface struct ... used as base for dll-interface struct
+# pragma warning(disable: 4355) // this used in base member initializer list
+# pragma warning(disable: 4480) // specifying underlying type for enum
+# pragma warning(disable: 4800) // forcing value to bool 'true' or 'false'
+# if _MSC_VER < 1900
+# if !defined(vsnprintf)
+# define ASMJIT_UNDEF_VSNPRINTF
+# define vsnprintf _vsnprintf
+# endif // !vsnprintf
+# if !defined(snprintf)
+# define ASMJIT_UNDEF_SNPRINTF
+# define snprintf _snprintf
+# endif // !snprintf
+# endif
+#endif // ASMJIT_CC_MSC
+
+// ============================================================================
+// [Custom Macros]
+// ============================================================================
+
+// [ASMJIT_NON...]
+#if ASMJIT_CC_HAS_DELETE_FUNCTION
+#define ASMJIT_NONCONSTRUCTIBLE(...) \
+private: \
+ __VA_ARGS__() = delete; \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+public:
+#define ASMJIT_NONCOPYABLE(...) \
+private: \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+public:
+#else
+#define ASMJIT_NONCONSTRUCTIBLE(...) \
+private: \
+ inline __VA_ARGS__(); \
+ inline __VA_ARGS__(const __VA_ARGS__& other); \
+ inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \
+public:
+#define ASMJIT_NONCOPYABLE(...) \
+private: \
+ inline __VA_ARGS__(const __VA_ARGS__& other); \
+ inline __VA_ARGS__& operator=(const __VA_ARGS__& other); \
+public:
+#endif // ASMJIT_CC_HAS_DELETE_FUNCTION
+
+// [ASMJIT_ENUM]
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+# define ASMJIT_ENUM(NAME) enum NAME : uint32_t
+#else
+# define ASMJIT_ENUM(NAME) enum NAME
+#endif
diff --git a/libraries/asmjit/asmjit/asmjit_apiend.h b/libraries/asmjit/asmjit/asmjit_apiend.h
new file mode 100644
index 00000000000..a51630b5a24
--- /dev/null
+++ b/libraries/asmjit/asmjit/asmjit_apiend.h
@@ -0,0 +1,74 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#if defined(ASMJIT_API_SCOPE)
+# undef ASMJIT_API_SCOPE
+#else
+# error "[asmjit] api-scope not active, forgot to include asmjit_apibegin.h?"
+#endif // ASMJIT_API_SCOPE
+
+// ============================================================================
+// [C++ Support]
+// ============================================================================
+
+// [NoExcept]
+#if defined(ASMJIT_UNDEF_NOEXCEPT)
+# undef noexcept
+# undef ASMJIT_UNDEF_NOEXCEPT
+#endif // ASMJIT_UNDEF_NOEXCEPT
+
+// [NullPtr]
+#if defined(ASMJIT_UNDEF_NULLPTR)
+# undef nullptr
+# undef ASMJIT_UNDEF_NULLPTR
+#endif // ASMJIT_UNDEF_NULLPTR
+
+// [Override]
+#if defined(ASMJIT_UNDEF_OVERRIDE)
+# undef override
+# undef ASMJIT_UNDEF_OVERRIDE
+#endif // ASMJIT_UNDEF_OVERRIDE
+
+// ============================================================================
+// [Compiler Support]
+// ============================================================================
+
+// [Clang]
+#if ASMJIT_CC_CLANG
+# pragma clang diagnostic pop
+#endif // ASMJIT_CC_CLANG
+
+// [GCC]
+#if ASMJIT_CC_GCC
+# pragma GCC diagnostic pop
+#endif // ASMJIT_CC_GCC
+
+// [MSC]
+#if ASMJIT_CC_MSC
+# pragma warning(pop)
+# if _MSC_VER < 1900
+# if defined(ASMJIT_UNDEF_VSNPRINTF)
+# undef vsnprintf
+# undef ASMJIT_UNDEF_VSNPRINTF
+# endif // ASMJIT_UNDEF_VSNPRINTF
+# if defined(ASMJIT_UNDEF_SNPRINTF)
+# undef snprintf
+# undef ASMJIT_UNDEF_SNPRINTF
+# endif // ASMJIT_UNDEF_SNPRINTF
+# endif
+#endif // ASMJIT_CC_MSC
+
+// ============================================================================
+// [Custom Macros]
+// ============================================================================
+
+// [ASMJIT_NON...]
+#undef ASMJIT_NONCONSTRUCTIBLE
+#undef ASMJIT_NONCOPYABLE
+
+// [ASMJIT_ENUM]
+#undef ASMJIT_ENUM
diff --git a/libraries/asmjit/asmjit/asmjit_build.h b/libraries/asmjit/asmjit/asmjit_build.h
new file mode 100644
index 00000000000..77b151ac3a1
--- /dev/null
+++ b/libraries/asmjit/asmjit/asmjit_build.h
@@ -0,0 +1,949 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BUILD_H
+#define _ASMJIT_BUILD_H
+
+// ============================================================================
+// [asmjit::Build - Configuration]
+// ============================================================================
+
+// AsmJit is by default compiled only for a host processor for the purpose of
+// JIT code generation. Both Assembler and CodeCompiler emitters are compiled
+// by default. Preprocessor macros can be used to change the default behavior.
+
+// External Config File
+// --------------------
+//
+// Define in case your configuration is generated in an external file to be
+// included.
+
+#if defined(ASMJIT_CONFIG_FILE)
+# include ASMJIT_CONFIG_FILE
+#endif // ASMJIT_CONFIG_FILE
+
+// AsmJit Static Builds and Embedding
+// ----------------------------------
+//
+// These definitions can be used to enable static library build. Embed is used
+// when AsmJit's source code is embedded directly in another project, implies
+// static build as well.
+//
+// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_STATIC).
+// #define ASMJIT_STATIC // Define to enable static-library build.
+
+// AsmJit Build Modes
+// ------------------
+//
+// These definitions control the build mode and tracing support. The build mode
+// should be auto-detected at compile time, but it's possible to override it in
+// case that the auto-detection fails.
+//
+// Tracing is a feature that is never compiled by default and it's only used to
+// debug AsmJit itself.
+//
+// #define ASMJIT_DEBUG // Define to enable debug-mode.
+// #define ASMJIT_RELEASE // Define to enable release-mode.
+
+// AsmJit Build Backends
+// ---------------------
+//
+// These definitions control which backends to compile. If none of these is
+// defined AsmJit will use host architecture by default (for JIT code generation).
+//
+// #define ASMJIT_BUILD_X86 // Define to enable X86 and X64 code-generation.
+// #define ASMJIT_BUILD_ARM // Define to enable ARM32 and ARM64 code-generation.
+// #define ASMJIT_BUILD_HOST // Define to enable host instruction set.
+
+// AsmJit Build Features
+// ---------------------
+//
+// Flags can be defined to disable standard features. These are handy especially
+// when building AsmJit statically and some features are not needed or unwanted
+// (like CodeCompiler).
+//
+// AsmJit features are enabled by default.
+// #define ASMJIT_DISABLE_COMPILER // Disable CodeCompiler (completely).
+// #define ASMJIT_DISABLE_LOGGING // Disable logging and formatting (completely).
+// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text
+// // representation (instructions, errors, ...).
+// #define ASMJIT_DISABLE_VALIDATION // Disable Validation (completely).
+
+// Prevent compile-time errors caused by misconfiguration.
+#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGING)
+# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGING to be defined."
+#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGING
+
+// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside.
+#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE)
+# if !defined(NDEBUG)
+# define ASMJIT_DEBUG
+# else
+# define ASMJIT_RELEASE
+# endif
+#endif
+
+// ASMJIT_EMBED implies ASMJIT_STATIC.
+#if defined(ASMJIT_EMBED) && !defined(ASMJIT_STATIC)
+# define ASMJIT_STATIC
+#endif
+
+// ============================================================================
+// [asmjit::Build - VERSION]
+// ============================================================================
+
+// [@VERSION{@]
+#define ASMJIT_VERSION_MAJOR 1
+#define ASMJIT_VERSION_MINOR 0
+#define ASMJIT_VERSION_PATCH 0
+#define ASMJIT_VERSION_STRING "1.0.0"
+// [@VERSION}@]
+
+// ============================================================================
+// [asmjit::Build - WIN32]
+// ============================================================================
+
+// [@WIN32_CRT_NO_DEPRECATE{@]
+#if defined(_MSC_VER) && defined(ASMJIT_EXPORTS)
+# if !defined(_CRT_SECURE_NO_DEPRECATE)
+# define _CRT_SECURE_NO_DEPRECATE
+# endif
+# if !defined(_CRT_SECURE_NO_WARNINGS)
+# define _CRT_SECURE_NO_WARNINGS
+# endif
+#endif
+// [@WIN32_CRT_NO_DEPRECATE}@]
+
+// [@WIN32_LEAN_AND_MEAN{@]
+#if (defined(_WIN32) || defined(_WINDOWS)) && !defined(_WINDOWS_)
+# if !defined(WIN32_LEAN_AND_MEAN)
+# define WIN32_LEAN_AND_MEAN
+# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
+# endif
+# if !defined(NOMINMAX)
+# define NOMINMAX
+# define ASMJIT_UNDEF_NOMINMAX
+# endif
+# include
+# if defined(ASMJIT_UNDEF_NOMINMAX)
+# undef NOMINMAX
+# undef ASMJIT_UNDEF_NOMINMAX
+# endif
+# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN)
+# undef WIN32_LEAN_AND_MEAN
+# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
+# endif
+#endif
+// [@WIN32_LEAN_AND_MEAN}@]
+
+// ============================================================================
+// [asmjit::Build - OS]
+// ============================================================================
+
+// [@OS{@]
+#if defined(_WIN32) || defined(_WINDOWS)
+#define ASMJIT_OS_WINDOWS (1)
+#else
+#define ASMJIT_OS_WINDOWS (0)
+#endif
+
+#if defined(__APPLE__)
+# include
+# define ASMJIT_OS_MAC (TARGET_OS_MAC)
+# define ASMJIT_OS_IOS (TARGET_OS_IPHONE)
+#else
+# define ASMJIT_OS_MAC (0)
+# define ASMJIT_OS_IOS (0)
+#endif
+
+#if defined(__ANDROID__)
+# define ASMJIT_OS_ANDROID (1)
+#else
+# define ASMJIT_OS_ANDROID (0)
+#endif
+
+#if defined(__linux__) || defined(__ANDROID__)
+# define ASMJIT_OS_LINUX (1)
+#else
+# define ASMJIT_OS_LINUX (0)
+#endif
+
+#if defined(__DragonFly__)
+# define ASMJIT_OS_DRAGONFLYBSD (1)
+#else
+# define ASMJIT_OS_DRAGONFLYBSD (0)
+#endif
+
+#if defined(__FreeBSD__)
+# define ASMJIT_OS_FREEBSD (1)
+#else
+# define ASMJIT_OS_FREEBSD (0)
+#endif
+
+#if defined(__NetBSD__)
+# define ASMJIT_OS_NETBSD (1)
+#else
+# define ASMJIT_OS_NETBSD (0)
+#endif
+
+#if defined(__OpenBSD__)
+# define ASMJIT_OS_OPENBSD (1)
+#else
+# define ASMJIT_OS_OPENBSD (0)
+#endif
+
+#if defined(__QNXNTO__)
+# define ASMJIT_OS_QNX (1)
+#else
+# define ASMJIT_OS_QNX (0)
+#endif
+
+#if defined(__sun)
+# define ASMJIT_OS_SOLARIS (1)
+#else
+# define ASMJIT_OS_SOLARIS (0)
+#endif
+
+#if defined(__CYGWIN__)
+# define ASMJIT_OS_CYGWIN (1)
+#else
+# define ASMJIT_OS_CYGWIN (0)
+#endif
+
+#define ASMJIT_OS_BSD ( \
+ ASMJIT_OS_FREEBSD || \
+ ASMJIT_OS_DRAGONFLYBSD || \
+ ASMJIT_OS_NETBSD || \
+ ASMJIT_OS_OPENBSD || \
+ ASMJIT_OS_MAC)
+#define ASMJIT_OS_POSIX (!ASMJIT_OS_WINDOWS)
+// [@OS}@]
+
+// ============================================================================
+// [asmjit::Build - ARCH]
+// ============================================================================
+
+// [@ARCH{@]
+// \def ASMJIT_ARCH_ARM32
+// True if the target architecture is a 32-bit ARM.
+//
+// \def ASMJIT_ARCH_ARM64
+// True if the target architecture is a 64-bit ARM.
+//
+// \def ASMJIT_ARCH_X86
+// True if the target architecture is a 32-bit X86/IA32
+//
+// \def ASMJIT_ARCH_X64
+// True if the target architecture is a 64-bit X64/AMD64
+//
+// \def ASMJIT_ARCH_LE
+// True if the target architecture is little endian.
+//
+// \def ASMJIT_ARCH_BE
+// True if the target architecture is big endian.
+//
+// \def ASMJIT_ARCH_64BIT
+// True if the target architecture is 64-bit.
+
+#if (defined(_M_X64 ) || defined(__x86_64) || defined(__x86_64__) || \
+ defined(_M_AMD64) || defined(__amd64 ) || defined(__amd64__ ))
+# define ASMJIT_ARCH_X64 1
+#else
+# define ASMJIT_ARCH_X64 0
+#endif
+
+#if (defined(_M_IX86 ) || defined(__X86__ ) || defined(__i386 ) || \
+ defined(__IA32__) || defined(__I86__ ) || defined(__i386__) || \
+ defined(__i486__) || defined(__i586__) || defined(__i686__))
+# define ASMJIT_ARCH_X86 (!ASMJIT_ARCH_X64)
+#else
+# define ASMJIT_ARCH_X86 0
+#endif
+
+#if defined(__aarch64__)
+# define ASMJIT_ARCH_ARM64 1
+#else
+# define ASMJIT_ARCH_ARM64 0
+#endif
+
+#if (defined(_M_ARM ) || defined(__arm ) || defined(__thumb__ ) || \
+ defined(_M_ARMT ) || defined(__arm__ ) || defined(__thumb2__))
+# define ASMJIT_ARCH_ARM32 (!ASMJIT_ARCH_ARM64)
+#else
+# define ASMJIT_ARCH_ARM32 0
+#endif
+
+#define ASMJIT_ARCH_LE ( \
+ ASMJIT_ARCH_X86 || \
+ ASMJIT_ARCH_X64 || \
+ ASMJIT_ARCH_ARM32 || \
+ ASMJIT_ARCH_ARM64 )
+#define ASMJIT_ARCH_BE (!(ASMJIT_ARCH_LE))
+#define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64)
+// [@ARCH}@]
+
+// [@ARCH_UNALIGNED_RW{@]
+// \def ASMJIT_ARCH_UNALIGNED_16
+// True if the target architecture allows unaligned 16-bit reads and writes.
+//
+// \def ASMJIT_ARCH_UNALIGNED_32
+// True if the target architecture allows unaligned 32-bit reads and writes.
+//
+// \def ASMJIT_ARCH_UNALIGNED_64
+// True if the target architecture allows unaligned 64-bit reads and writes.
+
+#define ASMJIT_ARCH_UNALIGNED_16 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
+#define ASMJIT_ARCH_UNALIGNED_32 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
+#define ASMJIT_ARCH_UNALIGNED_64 (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64)
+// [@ARCH_UNALIGNED_RW}@]
+
+// ============================================================================
+// [asmjit::Build - CC]
+// ============================================================================
+
+// [@CC{@]
+// \def ASMJIT_CC_CLANG
+// Non-zero if the detected C++ compiler is CLANG (contains normalized CLANG version).
+//
+// \def ASMJIT_CC_CODEGEAR
+// Non-zero if the detected C++ compiler is CODEGEAR or BORLAND (version not normalized).
+//
+// \def ASMJIT_CC_INTEL
+// Non-zero if the detected C++ compiler is INTEL (version not normalized).
+//
+// \def ASMJIT_CC_GCC
+// Non-zero if the detected C++ compiler is GCC (contains normalized GCC version).
+//
+// \def ASMJIT_CC_MSC
+// Non-zero if the detected C++ compiler is MSC (contains normalized MSC version).
+//
+// \def ASMJIT_CC_MINGW
+// Non-zero if the detected C++ compiler is MINGW32 (set to 32) or MINGW64 (set to 64).
+
+#define ASMJIT_CC_CLANG 0
+#define ASMJIT_CC_CODEGEAR 0
+#define ASMJIT_CC_GCC 0
+#define ASMJIT_CC_INTEL 0
+#define ASMJIT_CC_MSC 0
+
+// Intel masquerades as GCC, so check for it first.
+#if defined(__INTEL_COMPILER)
+# undef ASMJIT_CC_INTEL
+# define ASMJIT_CC_INTEL __INTEL_COMPILER
+#elif defined(__CODEGEARC__)
+# undef ASMJIT_CC_CODEGEAR
+# define ASMJIT_CC_CODEGEAR (__CODEGEARC__)
+#elif defined(__BORLANDC__)
+# undef ASMJIT_CC_CODEGEAR
+# define ASMJIT_CC_CODEGEAR (__BORLANDC__)
+#elif defined(__clang__) && defined(__clang_minor__)
+# undef ASMJIT_CC_CLANG
+# define ASMJIT_CC_CLANG (__clang_major__ * 10000000 + __clang_minor__ * 100000 + __clang_patchlevel__)
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+# undef ASMJIT_CC_GCC
+# define ASMJIT_CC_GCC (__GNUC__ * 10000000 + __GNUC_MINOR__ * 100000 + __GNUC_PATCHLEVEL__)
+#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
+# undef ASMJIT_CC_MSC
+# if _MSC_VER == _MSC_FULL_VER / 10000
+# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 10000))
+# else
+# define ASMJIT_CC_MSC (_MSC_VER * 100000 + (_MSC_FULL_VER % 100000))
+# endif
+#else
+# error "[asmjit] Unable to detect the C/C++ compiler."
+#endif
+
+#if ASMJIT_CC_INTEL && (defined(__GNUC__) || defined(__clang__))
+# define ASMJIT_CC_INTEL_COMPAT_MODE 1
+# else
+# define ASMJIT_CC_INTEL_COMPAT_MODE 0
+#endif
+
+#define ASMJIT_CC_CODEGEAR_EQ(x, y) (ASMJIT_CC_CODEGEAR == (((x) << 8) + (y)))
+#define ASMJIT_CC_CODEGEAR_GE(x, y) (ASMJIT_CC_CODEGEAR >= (((x) << 8) + (y)))
+
+#define ASMJIT_CC_CLANG_EQ(x, y, z) (ASMJIT_CC_CLANG == ((x) * 10000000 + (y) * 100000 + (z)))
+#define ASMJIT_CC_CLANG_GE(x, y, z) (ASMJIT_CC_CLANG >= ((x) * 10000000 + (y) * 100000 + (z)))
+
+#define ASMJIT_CC_GCC_EQ(x, y, z) (ASMJIT_CC_GCC == ((x) * 10000000 + (y) * 100000 + (z)))
+#define ASMJIT_CC_GCC_GE(x, y, z) (ASMJIT_CC_GCC >= ((x) * 10000000 + (y) * 100000 + (z)))
+
+#define ASMJIT_CC_INTEL_EQ(x, y) (ASMJIT_CC_INTEL == (((x) * 100) + (y)))
+#define ASMJIT_CC_INTEL_GE(x, y) (ASMJIT_CC_INTEL >= (((x) * 100) + (y)))
+
+#define ASMJIT_CC_MSC_EQ(x, y, z) (ASMJIT_CC_MSC == ((x) * 10000000 + (y) * 100000 + (z)))
+#define ASMJIT_CC_MSC_GE(x, y, z) (ASMJIT_CC_MSC >= ((x) * 10000000 + (y) * 100000 + (z)))
+
+#if defined(__MINGW64__)
+# define ASMJIT_CC_MINGW 64
+#elif defined(__MINGW32__)
+# define ASMJIT_CC_MINGW 32
+#else
+# define ASMJIT_CC_MINGW 0
+#endif
+
+#if defined(__cplusplus)
+# if __cplusplus >= 201103L
+# define ASMJIT_CC_CXX_VERSION __cplusplus
+# elif defined(__GXX_EXPERIMENTAL_CXX0X__) || ASMJIT_CC_MSC_GE(18, 0, 0) || ASMJIT_CC_INTEL_GE(14, 0)
+# define ASMJIT_CC_CXX_VERSION 201103L
+# else
+# define ASMJIT_CC_CXX_VERSION 199711L
+# endif
+#endif
+
+#if !defined(ASMJIT_CC_CXX_VERSION)
+# define ASMJIT_CC_CXX_VERSION 0
+#endif
+// [@CC}@]
+
+// [@CC_FEATURES{@]
+#if ASMJIT_CC_CLANG
+# define ASMJIT_CC_HAS_ATTRIBUTE (1)
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (__has_attribute(__aligned__))
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(__always_inline__))
+# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (__has_attribute(__noinline__))
+# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (__has_attribute(__noreturn__))
+# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (__has_attribute(__optimize__))
+# define ASMJIT_CC_HAS_BUILTIN_ASSUME (__has_builtin(__builtin_assume))
+# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned))
+# define ASMJIT_CC_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
+# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (__has_builtin(__builtin_unreachable))
+# define ASMJIT_CC_HAS_ALIGNAS (__has_extension(__cxx_alignas__))
+# define ASMJIT_CC_HAS_ALIGNOF (__has_extension(__cxx_alignof__))
+# define ASMJIT_CC_HAS_CONSTEXPR (__has_extension(__cxx_constexpr__))
+# define ASMJIT_CC_HAS_DECLTYPE (__has_extension(__cxx_decltype__))
+# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (__has_extension(__cxx_defaulted_functions__))
+# define ASMJIT_CC_HAS_DELETE_FUNCTION (__has_extension(__cxx_deleted_functions__))
+# define ASMJIT_CC_HAS_FINAL (__has_extension(__cxx_override_control__))
+# define ASMJIT_CC_HAS_INITIALIZER_LIST (__has_extension(__cxx_generalized_initializers__))
+# define ASMJIT_CC_HAS_LAMBDA (__has_extension(__cxx_lambdas__))
+# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
+# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
+# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (__has_extension(__cxx_unicode_literals__))
+# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (__has_extension(__cxx_unicode_literals__))
+# define ASMJIT_CC_HAS_NOEXCEPT (__has_extension(__cxx_noexcept__))
+# define ASMJIT_CC_HAS_NULLPTR (__has_extension(__cxx_nullptr__))
+# define ASMJIT_CC_HAS_OVERRIDE (__has_extension(__cxx_override_control__))
+# define ASMJIT_CC_HAS_RVALUE (__has_extension(__cxx_rvalue_references__))
+# define ASMJIT_CC_HAS_STATIC_ASSERT (__has_extension(__cxx_static_assert__))
+# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (__has_extension(__cxx_variadic_templates__))
+#endif
+
+#if ASMJIT_CC_CODEGEAR
+# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_CODEGEAR >= 0x0610)
+# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
+# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
+# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_CODEGEAR >= 0x0610)
+# define ASMJIT_CC_HAS_ALIGNAS (0)
+# define ASMJIT_CC_HAS_ALIGNOF (0)
+# define ASMJIT_CC_HAS_CONSTEXPR (0)
+# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_CODEGEAR >= 0x0610)
+# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (0)
+# define ASMJIT_CC_HAS_DELETE_FUNCTION (0)
+# define ASMJIT_CC_HAS_FINAL (0)
+# define ASMJIT_CC_HAS_INITIALIZER_LIST (0)
+# define ASMJIT_CC_HAS_LAMBDA (0)
+# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
+# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
+# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (0)
+# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (0)
+# define ASMJIT_CC_HAS_NOEXCEPT (0)
+# define ASMJIT_CC_HAS_NULLPTR (0)
+# define ASMJIT_CC_HAS_OVERRIDE (0)
+# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_CODEGEAR >= 0x0610)
+# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_CODEGEAR >= 0x0610)
+# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (0)
+#endif
+
+#if ASMJIT_CC_GCC
+# define ASMJIT_CC_HAS_ATTRIBUTE (1)
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_GCC_GE(2, 7, 0))
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_GCC_GE(4, 4, 0) && !ASMJIT_CC_MINGW)
+# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_GCC_GE(3, 4, 0) && !ASMJIT_CC_MINGW)
+# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_GCC_GE(2, 5, 0))
+# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_GCC_GE(4, 4, 0))
+# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
+# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (ASMJIT_CC_GCC_GE(4, 7, 0))
+# define ASMJIT_CC_HAS_BUILTIN_EXPECT (1)
+# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_GCC_GE(4, 8, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_GCC_GE(4, 4, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
+# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
+# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_GCC_GE(4, 5, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_GCC_GE(4, 6, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_GCC_GE(4, 7, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_GCC_GE(4, 3, 0) && ASMJIT_CC_CXX_VERSION >= 201103L)
+#endif
+
+#if ASMJIT_CC_INTEL
+# define ASMJIT_CC_HAS_ATTRIBUTE (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_BUILTIN_EXPECT (ASMJIT_CC_INTEL_COMPAT_MODE)
+# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
+# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
+# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
+# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (ASMJIT_CC_INTEL_COMPAT_MODE == 0)
+# define ASMJIT_CC_HAS_ASSUME (1)
+# define ASMJIT_CC_HAS_ASSUME_ALIGNED (1)
+# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_INTEL >= 1500)
+# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_INTEL >= 1500)
+# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_INTEL >= 1400)
+# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_INTEL >= 1200)
+# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_INTEL >= 1200)
+# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_INTEL >= 1200)
+# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_INTEL >= 1400)
+# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_INTEL >= 1400)
+# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_INTEL >= 1200)
+# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
+# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
+# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206))
+# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_INTEL >= 1400 || (ASMJIT_CC_INTEL_COMPAT_MODE > 0 && ASMJIT_CC_INTEL >= 1206))
+# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_INTEL >= 1400)
+# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_INTEL >= 1206)
+# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_INTEL >= 1400)
+# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_INTEL >= 1110)
+# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_INTEL >= 1110)
+# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_INTEL >= 1206)
+#endif
+
+#if ASMJIT_CC_MSC
+# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (1)
+# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (1)
+# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (1)
+# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (1)
+# define ASMJIT_CC_HAS_ASSUME (1)
+# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
+# define ASMJIT_CC_HAS_ALIGNAS (ASMJIT_CC_MSC_GE(19, 0, 0))
+# define ASMJIT_CC_HAS_ALIGNOF (ASMJIT_CC_MSC_GE(19, 0, 0))
+# define ASMJIT_CC_HAS_CONSTEXPR (ASMJIT_CC_MSC_GE(19, 0, 0))
+# define ASMJIT_CC_HAS_DECLTYPE (ASMJIT_CC_MSC_GE(16, 0, 0))
+# define ASMJIT_CC_HAS_DEFAULT_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
+# define ASMJIT_CC_HAS_DELETE_FUNCTION (ASMJIT_CC_MSC_GE(18, 0, 0))
+# define ASMJIT_CC_HAS_FINAL (ASMJIT_CC_MSC_GE(14, 0, 0))
+# define ASMJIT_CC_HAS_INITIALIZER_LIST (ASMJIT_CC_MSC_GE(18, 0, 0))
+# define ASMJIT_CC_HAS_LAMBDA (ASMJIT_CC_MSC_GE(16, 0, 0))
+# define ASMJIT_CC_HAS_NATIVE_CHAR (1)
+# if defined(_NATIVE_WCHAR_T_DEFINED)
+# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (1)
+# else
+# define ASMJIT_CC_HAS_NATIVE_WCHAR_T (0)
+# endif
+# define ASMJIT_CC_HAS_NATIVE_CHAR16_T (ASMJIT_CC_MSC_GE(19, 0, 0))
+# define ASMJIT_CC_HAS_NATIVE_CHAR32_T (ASMJIT_CC_MSC_GE(19, 0, 0))
+# define ASMJIT_CC_HAS_NOEXCEPT (ASMJIT_CC_MSC_GE(19, 0, 0))
+# define ASMJIT_CC_HAS_NULLPTR (ASMJIT_CC_MSC_GE(16, 0, 0))
+# define ASMJIT_CC_HAS_OVERRIDE (ASMJIT_CC_MSC_GE(14, 0, 0))
+# define ASMJIT_CC_HAS_RVALUE (ASMJIT_CC_MSC_GE(16, 0, 0))
+# define ASMJIT_CC_HAS_STATIC_ASSERT (ASMJIT_CC_MSC_GE(16, 0, 0))
+# define ASMJIT_CC_HAS_VARIADIC_TEMPLATES (ASMJIT_CC_MSC_GE(18, 0, 0))
+#endif
+
+// Fixup some vendor specific keywords.
+#if !defined(ASMJIT_CC_HAS_ASSUME)
+# define ASMJIT_CC_HAS_ASSUME (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_ASSUME_ALIGNED)
+# define ASMJIT_CC_HAS_ASSUME_ALIGNED (0)
+#endif
+
+// Fixup compilers that don't support '__attribute__'.
+#if !defined(ASMJIT_CC_HAS_ATTRIBUTE)
+# define ASMJIT_CC_HAS_ATTRIBUTE (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED)
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALIGNED (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_NORETURN)
+# define ASMJIT_CC_HAS_ATTRIBUTE_NORETURN (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE)
+# define ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE (0)
+#endif
+
+// Fixup compilers that don't support '__builtin?'.
+#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME)
+# define ASMJIT_CC_HAS_BUILTIN_ASSUME (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED)
+# define ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_BUILTIN_EXPECT)
+# define ASMJIT_CC_HAS_BUILTIN_EXPECT (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_BUILTIN_UNREACHABLE)
+# define ASMJIT_CC_HAS_BUILTIN_UNREACHABLE (0)
+#endif
+
+// Fixup compilers that don't support 'declspec'.
+#if !defined(ASMJIT_CC_HAS_DECLSPEC_ALIGN)
+# define ASMJIT_CC_HAS_DECLSPEC_ALIGN (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE)
+# define ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_DECLSPEC_NOINLINE)
+# define ASMJIT_CC_HAS_DECLSPEC_NOINLINE (0)
+#endif
+#if !defined(ASMJIT_CC_HAS_DECLSPEC_NORETURN)
+# define ASMJIT_CC_HAS_DECLSPEC_NORETURN (0)
+#endif
+// [@CC_FEATURES}@]
+
+// [@CC_API{@]
+// \def ASMJIT_API
+// The decorated function is asmjit API and should be exported.
+#if !defined(ASMJIT_API)
+# if defined(ASMJIT_STATIC)
+# define ASMJIT_API
+# elif ASMJIT_OS_WINDOWS
+# if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_CC_MINGW
+# if defined(ASMJIT_EXPORTS)
+# define ASMJIT_API __attribute__((__dllexport__))
+# else
+# define ASMJIT_API __attribute__((__dllimport__))
+# endif
+# else
+# if defined(ASMJIT_EXPORTS)
+# define ASMJIT_API __declspec(dllexport)
+# else
+# define ASMJIT_API __declspec(dllimport)
+# endif
+# endif
+# else
+# if ASMJIT_CC_CLANG || ASMJIT_CC_GCC_GE(4, 0, 0) || ASMJIT_CC_INTEL
+# define ASMJIT_API __attribute__((__visibility__("default")))
+# endif
+# endif
+#endif
+// [@CC_API}@]
+
+// [@CC_VARAPI{@]
+// \def ASMJIT_VARAPI
+// The decorated variable is part of asmjit API and is exported.
+#if !defined(ASMJIT_VARAPI)
+# define ASMJIT_VARAPI extern ASMJIT_API
+#endif
+// [@CC_VARAPI}@]
+
+// [@CC_VIRTAPI{@]
+// \def ASMJIT_VIRTAPI
+// The decorated class has a virtual table and is part of asmjit API.
+//
+// This is basically a workaround. When using MSVC and marking class as DLL
+// export everything gets exported, which is unwanted in most projects. MSVC
+// automatically exports typeinfo and vtable if at least one symbol of the
+// class is exported. However, GCC has some strange behavior that even if
+// one or more symbol is exported it doesn't export typeinfo unless the
+// class itself is decorated with "visibility(default)" (i.e. asmjit_API).
+#if (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && !ASMJIT_OS_WINDOWS
+# define ASMJIT_VIRTAPI ASMJIT_API
+#else
+# define ASMJIT_VIRTAPI
+#endif
+// [@CC_VIRTAPI}@]
+
+// [@CC_INLINE{@]
+// \def ASMJIT_INLINE
+// Always inline the decorated function.
+#if ASMJIT_CC_HAS_ATTRIBUTE_ALWAYS_INLINE
+# define ASMJIT_INLINE inline __attribute__((__always_inline__))
+#elif ASMJIT_CC_HAS_DECLSPEC_FORCEINLINE
+# define ASMJIT_INLINE __forceinline
+#else
+# define ASMJIT_INLINE inline
+#endif
+// [@CC_INLINE}@]
+
+// [@CC_NOINLINE{@]
+// \def ASMJIT_NOINLINE
+// Never inline the decorated function.
+#if ASMJIT_CC_HAS_ATTRIBUTE_NOINLINE
+# define ASMJIT_NOINLINE __attribute__((__noinline__))
+#elif ASMJIT_CC_HAS_DECLSPEC_NOINLINE
+# define ASMJIT_NOINLINE __declspec(noinline)
+#else
+# define ASMJIT_NOINLINE
+#endif
+// [@CC_NOINLINE}@]
+
+// [@CC_NORETURN{@]
+// \def ASMJIT_NORETURN
+// The decorated function never returns (exit, assertion failure, etc...).
+#if ASMJIT_CC_HAS_ATTRIBUTE_NORETURN
+# define ASMJIT_NORETURN __attribute__((__noreturn__))
+#elif ASMJIT_CC_HAS_DECLSPEC_NORETURN
+# define ASMJIT_NORETURN __declspec(noreturn)
+#else
+# define ASMJIT_NORETURN
+#endif
+// [@CC_NORETURN}@]
+
+// [@CC_CDECL{@]
+// \def ASMJIT_CDECL
+// Standard C function calling convention decorator (__cdecl).
+#if ASMJIT_ARCH_X86
+# if ASMJIT_CC_HAS_ATTRIBUTE
+# define ASMJIT_CDECL __attribute__((__cdecl__))
+# else
+# define ASMJIT_CDECL __cdecl
+# endif
+#else
+# define ASMJIT_CDECL
+#endif
+// [@CC_CDECL}@]
+
+// [@CC_STDCALL{@]
+// \def ASMJIT_STDCALL
+// StdCall function calling convention decorator (__stdcall).
+#if ASMJIT_ARCH_X86
+# if ASMJIT_CC_HAS_ATTRIBUTE
+# define ASMJIT_STDCALL __attribute__((__stdcall__))
+# else
+# define ASMJIT_STDCALL __stdcall
+# endif
+#else
+# define ASMJIT_STDCALL
+#endif
+// [@CC_STDCALL}@]
+
+// [@CC_FASTCALL{@]
+// \def ASMJIT_FASTCALL
+// FastCall function calling convention decorator (__fastcall).
+#if ASMJIT_ARCH_X86
+# if ASMJIT_CC_HAS_ATTRIBUTE
+# define ASMJIT_FASTCALL __attribute__((__fastcall__))
+# else
+# define ASMJIT_FASTCALL __fastcall
+# endif
+#else
+# define ASMJIT_FASTCALL
+#endif
+// [@CC_FASTCALL}@]
+
+// [@CC_REGPARM{@]
+// \def ASMJIT_REGPARM(n)
+// A custom calling convention which passes n arguments in registers.
+#if ASMJIT_ARCH_X86 && ASMJIT_CC_HAS_ATTRIBUTE
+# define ASMJIT_REGPARM(n) __attribute__((__regparm__(n)))
+#else
+# define ASMJIT_REGPARM(n)
+#endif
+// [@CC_REGPARM}@]
+
+// [@CC_NOEXCEPT{@]
+// \def ASMJIT_NOEXCEPT
+// The decorated function never throws an exception (noexcept).
+#if ASMJIT_CC_HAS_NOEXCEPT
+# define ASMJIT_NOEXCEPT noexcept
+#else
+# define ASMJIT_NOEXCEPT
+#endif
+// [@CC_NOEXCEPT}@]
+
+// [@CC_NOP{@]
+// \def ASMJIT_NOP
+// No operation.
+#if !defined(ASMJIT_NOP)
+# define ASMJIT_NOP ((void)0)
+#endif
+// [@CC_NOP}@]
+
+// [@CC_ASSUME{@]
+// \def ASMJIT_ASSUME(exp)
+// Assume that the expression exp is always true.
+#if ASMJIT_CC_HAS_ASSUME
+# define ASMJIT_ASSUME(exp) __assume(exp)
+#elif ASMJIT_CC_HAS_BUILTIN_ASSUME
+# define ASMJIT_ASSUME(exp) __builtin_assume(exp)
+#elif ASMJIT_CC_HAS_BUILTIN_UNREACHABLE
+# define ASMJIT_ASSUME(exp) do { if (!(exp)) __builtin_unreachable(); } while (0)
+#else
+# define ASMJIT_ASSUME(exp) ((void)0)
+#endif
+// [@CC_ASSUME}@]
+
+// [@CC_ASSUME_ALIGNED{@]
+// \def ASMJIT_ASSUME_ALIGNED(p, alignment)
+// Assume that the pointer 'p' is aligned to at least 'alignment' bytes.
+#if ASMJIT_CC_HAS_ASSUME_ALIGNED
+# define ASMJIT_ASSUME_ALIGNED(p, alignment) __assume_aligned(p, alignment)
+#elif ASMJIT_CC_HAS_BUILTIN_ASSUME_ALIGNED
+# define ASMJIT_ASSUME_ALIGNED(p, alignment) p = __builtin_assume_aligned(p, alignment)
+#else
+# define ASMJIT_ASSUME_ALIGNED(p, alignment) ((void)0)
+#endif
+// [@CC_ASSUME_ALIGNED}@]
+
+// [@CC_EXPECT{@]
+// \def ASMJIT_LIKELY(exp)
+// Expression exp is likely to be true.
+//
+// \def ASMJIT_UNLIKELY(exp)
+// Expression exp is likely to be false.
+#if ASMJIT_CC_HAS_BUILTIN_EXPECT
+# define ASMJIT_LIKELY(exp) __builtin_expect(!!(exp), 1)
+# define ASMJIT_UNLIKELY(exp) __builtin_expect(!!(exp), 0)
+#else
+# define ASMJIT_LIKELY(exp) (exp)
+# define ASMJIT_UNLIKELY(exp) (exp)
+#endif
+// [@CC_EXPECT}@]
+
+// [@CC_FALLTHROUGH{@]
+// \def ASMJIT_FALLTHROUGH
+// The code falls through annotation (switch / case).
+#if ASMJIT_CC_CLANG && __cplusplus >= 201103L
+# define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
+#else
+# define ASMJIT_FALLTHROUGH (void)0
+#endif
+// [@CC_FALLTHROUGH}@]
+
+// [@CC_UNUSED{@]
+// \def ASMJIT_UNUSED(x)
+// Mark a variable x as unused.
+#define ASMJIT_UNUSED(x) (void)(x)
+// [@CC_UNUSED}@]
+
+// [@CC_OFFSET_OF{@]
+// \def ASMJIT_OFFSET_OF(x, y).
+// Get the offset of a member y of a struct x at compile-time.
+#define ASMJIT_OFFSET_OF(x, y) ((int)(intptr_t)((const char*)&((const x*)0x1)->y) - 1)
+// [@CC_OFFSET_OF}@]
+
+// [@CC_ARRAY_SIZE{@]
+// \def ASMJIT_ARRAY_SIZE(x)
+// Get the array size of x at compile-time.
+#define ASMJIT_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+// [@CC_ARRAY_SIZE}@]
+
+// ============================================================================
+// [asmjit::Build - STDTYPES]
+// ============================================================================
+
+// [@STDTYPES{@]
+#if defined(__MINGW32__) || defined(__MINGW64__)
+# include
+#endif
+#if defined(_MSC_VER) && (_MSC_VER < 1600)
+# include
+# if !defined(ASMJIT_SUPPRESS_STD_TYPES)
+# if (_MSC_VER < 1300)
+typedef signed char int8_t;
+typedef signed short int16_t;
+typedef signed int int32_t;
+typedef signed __int64 int64_t;
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned __int64 uint64_t;
+# else
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+# endif
+# endif
+#else
+# include
+# include
+#endif
+// [@STDTYPES}@]
+
+// ============================================================================
+// [asmjit::Build - Dependencies]
+// ============================================================================
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#if ASMJIT_OS_POSIX
+# include
+#endif // ASMJIT_OS_POSIX
+
+// ============================================================================
+// [asmjit::Build - Additional]
+// ============================================================================
+
+// Build host architecture if no architecture is selected.
+#if !defined(ASMJIT_BUILD_HOST) && \
+ !defined(ASMJIT_BUILD_X86) && \
+ !defined(ASMJIT_BUILD_ARM)
+# define ASMJIT_BUILD_HOST
+#endif
+
+// Detect host architecture if building only for host.
+#if defined(ASMJIT_BUILD_HOST)
+# if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && !defined(ASMJIT_BUILD_X86)
+# define ASMJIT_BUILD_X86
+# endif // ASMJIT_ARCH_X86
+#endif // ASMJIT_BUILD_HOST
+
+#if ASMJIT_CC_MSC
+# define ASMJIT_UINT64_C(x) x##ui64
+#else
+# define ASMJIT_UINT64_C(x) x##ull
+#endif
+
+#if ASMJIT_ARCH_LE
+# define ASMJIT_PACK32_4x8(A, B, C, D) ((A) + ((B) << 8) + ((C) << 16) + ((D) << 24))
+#else
+# define ASMJIT_PACK32_4x8(A, B, C, D) ((D) + ((C) << 8) + ((B) << 16) + ((A) << 24))
+#endif
+
+// Internal macros that are only used when building AsmJit itself.
+#if defined(ASMJIT_EXPORTS)
+# if !defined(ASMJIT_DEBUG) && ASMJIT_CC_HAS_ATTRIBUTE_OPTIMIZE
+# define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
+# else
+# define ASMJIT_FAVOR_SIZE
+# endif
+#endif // ASMJIT_EXPORTS
+
+// ============================================================================
+// [asmjit::Build - Test]
+// ============================================================================
+
+// Include a unit testing package if this is a `asmjit_test` build.
+#if defined(ASMJIT_TEST)
+# include "../../test/broken.h"
+#endif // ASMJIT_TEST
+
+// [Guard]
+#endif // _ASMJIT_BUILD_H
diff --git a/libraries/asmjit/asmjit/base.h b/libraries/asmjit/asmjit/base.h
new file mode 100644
index 00000000000..70b7e82f6ba
--- /dev/null
+++ b/libraries/asmjit/asmjit/base.h
@@ -0,0 +1,34 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_H
+#define _ASMJIT_BASE_H
+
+// [Dependencies]
+#include "./base/arch.h"
+#include "./base/assembler.h"
+#include "./base/codebuilder.h"
+#include "./base/codecompiler.h"
+#include "./base/codeemitter.h"
+#include "./base/codeholder.h"
+#include "./base/constpool.h"
+#include "./base/cpuinfo.h"
+#include "./base/func.h"
+#include "./base/globals.h"
+#include "./base/inst.h"
+#include "./base/logging.h"
+#include "./base/operand.h"
+#include "./base/osutils.h"
+#include "./base/runtime.h"
+#include "./base/simdtypes.h"
+#include "./base/string.h"
+#include "./base/utils.h"
+#include "./base/vmem.h"
+#include "./base/zone.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_H
diff --git a/libraries/asmjit/asmjit/base/arch.cpp b/libraries/asmjit/asmjit/base/arch.cpp
new file mode 100644
index 00000000000..2e849c67a1d
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/arch.cpp
@@ -0,0 +1,161 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/arch.h"
+
+#if defined(ASMJIT_BUILD_X86)
+#include "../x86/x86operand.h"
+#endif // ASMJIT_BUILD_X86
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::ArchInfo]
+// ============================================================================
+
+static const uint32_t archInfoTable[] = {
+ // <-------------+---------------------+-----------------------+-------+
+ // | Type | SubType | GPInfo|
+ // <-------------+---------------------+-----------------------+-------+
+ ASMJIT_PACK32_4x8(ArchInfo::kTypeNone , ArchInfo::kSubTypeNone, 0, 0),
+ ASMJIT_PACK32_4x8(ArchInfo::kTypeX86 , ArchInfo::kSubTypeNone, 4, 8),
+ ASMJIT_PACK32_4x8(ArchInfo::kTypeX64 , ArchInfo::kSubTypeNone, 8, 16),
+ ASMJIT_PACK32_4x8(ArchInfo::kTypeX32 , ArchInfo::kSubTypeNone, 8, 16),
+ ASMJIT_PACK32_4x8(ArchInfo::kTypeA32 , ArchInfo::kSubTypeNone, 4, 16),
+ ASMJIT_PACK32_4x8(ArchInfo::kTypeA64 , ArchInfo::kSubTypeNone, 8, 32)
+};
+
+ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t type, uint32_t subType) noexcept {
+ uint32_t index = type < ASMJIT_ARRAY_SIZE(archInfoTable) ? type : uint32_t(0);
+
+ // Make sure the `archInfoTable` array is correctly indexed.
+ _signature = archInfoTable[index];
+ ASMJIT_ASSERT(_type == index);
+
+ // Even if the architecture is not known we setup its type and sub-type,
+ // however, such architecture is not really useful.
+ _type = type;
+ _subType = subType;
+}
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept {
+ uint32_t typeId = typeIdInOut;
+
+ // Zero the signature so it's clear in case that typeId is not invalid.
+ regInfo._signature = 0;
+
+#if defined(ASMJIT_BUILD_X86)
+ if (ArchInfo::isX86Family(archType)) {
+ // Passed RegType instead of TypeId?
+ if (typeId <= Reg::kRegMax)
+ typeId = x86OpData.archRegs.regTypeToTypeId[typeId];
+
+ if (ASMJIT_UNLIKELY(!TypeId::isValid(typeId)))
+ return DebugUtils::errored(kErrorInvalidTypeId);
+
+ // First normalize architecture dependent types.
+ if (TypeId::isAbstract(typeId)) {
+ if (typeId == TypeId::kIntPtr)
+ typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kI32 : TypeId::kI64;
+ else
+ typeId = (archType == ArchInfo::kTypeX86) ? TypeId::kU32 : TypeId::kU64;
+ }
+
+ // Type size helps to construct all kinds of registers. If the size is zero
+ // then the TypeId is invalid.
+ uint32_t size = TypeId::sizeOf(typeId);
+ if (ASMJIT_UNLIKELY(!size))
+ return DebugUtils::errored(kErrorInvalidTypeId);
+
+ if (ASMJIT_UNLIKELY(typeId == TypeId::kF80))
+ return DebugUtils::errored(kErrorInvalidUseOfF80);
+
+ uint32_t regType = 0;
+
+ switch (typeId) {
+ case TypeId::kI8:
+ case TypeId::kU8:
+ regType = X86Reg::kRegGpbLo;
+ break;
+
+ case TypeId::kI16:
+ case TypeId::kU16:
+ regType = X86Reg::kRegGpw;
+ break;
+
+ case TypeId::kI32:
+ case TypeId::kU32:
+ regType = X86Reg::kRegGpd;
+ break;
+
+ case TypeId::kI64:
+ case TypeId::kU64:
+ if (archType == ArchInfo::kTypeX86)
+ return DebugUtils::errored(kErrorInvalidUseOfGpq);
+
+ regType = X86Reg::kRegGpq;
+ break;
+
+ // F32 and F64 are always promoted to use vector registers.
+ case TypeId::kF32:
+ typeId = TypeId::kF32x1;
+ regType = X86Reg::kRegXmm;
+ break;
+
+ case TypeId::kF64:
+ typeId = TypeId::kF64x1;
+ regType = X86Reg::kRegXmm;
+ break;
+
+ // Mask registers {k}.
+ case TypeId::kMask8:
+ case TypeId::kMask16:
+ case TypeId::kMask32:
+ case TypeId::kMask64:
+ regType = X86Reg::kRegK;
+ break;
+
+ // MMX registers.
+ case TypeId::kMmx32:
+ case TypeId::kMmx64:
+ regType = X86Reg::kRegMm;
+ break;
+
+ // XMM|YMM|ZMM registers.
+ default:
+ if (size <= 16)
+ regType = X86Reg::kRegXmm;
+ else if (size == 32)
+ regType = X86Reg::kRegYmm;
+ else
+ regType = X86Reg::kRegZmm;
+ break;
+ }
+
+ typeIdInOut = typeId;
+ regInfo._signature = x86OpData.archRegs.regInfo[regType].getSignature();
+ return kErrorOk;
+ }
+#endif // ASMJIT_BUILD_X86
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/arch.h b/libraries/asmjit/asmjit/base/arch.h
new file mode 100644
index 00000000000..e03c6af4e0a
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/arch.h
@@ -0,0 +1,199 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_ARCH_H
+#define _ASMJIT_BASE_ARCH_H
+
+// [Dependencies]
+#include "../base/globals.h"
+#include "../base/operand.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [asmjit::ArchInfo]
+// ============================================================================
+
+class ArchInfo {
+public:
+ //! Architecture type.
+ ASMJIT_ENUM(Type) {
+ kTypeNone = 0, //!< No/Unknown architecture.
+
+ // X86 architectures.
+ kTypeX86 = 1, //!< X86 architecture (32-bit).
+ kTypeX64 = 2, //!< X64 architecture (64-bit) (AMD64).
+ kTypeX32 = 3, //!< X32 architecture (DEAD-END).
+
+ // ARM architectures.
+ kTypeA32 = 4, //!< ARM 32-bit architecture (AArch32/ARM/THUMB).
+ kTypeA64 = 5, //!< ARM 64-bit architecture (AArch64).
+
+ //! Architecture detected at compile-time (architecture of the host).
+ kTypeHost = ASMJIT_ARCH_X86 ? kTypeX86 :
+ ASMJIT_ARCH_X64 ? kTypeX64 :
+ ASMJIT_ARCH_ARM32 ? kTypeA32 :
+ ASMJIT_ARCH_ARM64 ? kTypeA64 : kTypeNone
+ };
+
+ //! Architecture sub-type or execution mode.
+ ASMJIT_ENUM(SubType) {
+ kSubTypeNone = 0, //!< Default mode (or no specific mode).
+
+ // X86 sub-types.
+ kSubTypeX86_AVX = 1, //!< Code generation uses AVX by default (VEC instructions).
+ kSubTypeX86_AVX2 = 2, //!< Code generation uses AVX2 by default (VEC instructions).
+ kSubTypeX86_AVX512 = 3, //!< Code generation uses AVX-512F by default (+32 vector regs).
+ kSubTypeX86_AVX512VL = 4, //!< Code generation uses AVX-512F-VL by default (+VL extensions).
+
+ // ARM sub-types.
+ kSubTypeA32_Thumb = 8, //!< THUMB|THUMB2 sub-type (only ARM in 32-bit mode).
+
+#if (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512VL__)
+ kSubTypeHost = kSubTypeX86_AVX512VL
+#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX512F__)
+ kSubTypeHost = kSubTypeX86_AVX512
+#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX2__)
+ kSubTypeHost = kSubTypeX86_AVX2
+#elif (ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64) && defined(__AVX__)
+ kSubTypeHost = kSubTypeX86_AVX
+#elif (ASMJIT_ARCH_ARM32) && (defined(_M_ARMT) || defined(__thumb__) || defined(__thumb2__))
+ kSubTypeHost = kSubTypeA32_Thumb
+#else
+ kSubTypeHost = 0
+#endif
+ };
+
+ // --------------------------------------------------------------------------
+ // [Utilities]
+ // --------------------------------------------------------------------------
+
+ static ASMJIT_INLINE bool isX86Family(uint32_t archType) noexcept { return archType >= kTypeX86 && archType <= kTypeX32; }
+ static ASMJIT_INLINE bool isArmFamily(uint32_t archType) noexcept { return archType >= kTypeA32 && archType <= kTypeA64; }
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE ArchInfo() noexcept : _signature(0) {}
+ ASMJIT_INLINE ArchInfo(const ArchInfo& other) noexcept : _signature(other._signature) {}
+ explicit ASMJIT_INLINE ArchInfo(uint32_t type, uint32_t subType = kSubTypeNone) noexcept { init(type, subType); }
+
+ ASMJIT_INLINE static ArchInfo host() noexcept { return ArchInfo(kTypeHost, kSubTypeHost); }
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE bool isInitialized() const noexcept { return _type != kTypeNone; }
+
+ ASMJIT_API void init(uint32_t type, uint32_t subType = kSubTypeNone) noexcept;
+ ASMJIT_INLINE void reset() noexcept { _signature = 0; }
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get if the architecture is 32-bit.
+ ASMJIT_INLINE bool is32Bit() const noexcept { return _gpSize == 4; }
+ //! Get if the architecture is 64-bit.
+ ASMJIT_INLINE bool is64Bit() const noexcept { return _gpSize == 8; }
+
+ //! Get architecture type, see \ref Type.
+ ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
+
+ //! Get architecture sub-type, see \ref SubType.
+ //!
+ //! X86 & X64
+ //! ---------
+ //!
+ //! Architecture subtype describe the highest instruction-set level that can
+ //! be used.
+ //!
+ //! ARM32
+ //! -----
+ //!
+ //! Architecture mode means the instruction encoding to be used when generating
+ //! machine code, thus mode can be used to force generation of THUMB and THUMB2
+ //! encoding or regular ARM encoding.
+ //!
+ //! ARM64
+ //! -----
+ //!
+ //! No meaning yet.
+ ASMJIT_INLINE uint32_t getSubType() const noexcept { return _subType; }
+
+ //! Get if the architecture is X86, X64, or X32.
+ ASMJIT_INLINE bool isX86Family() const noexcept { return isX86Family(_type); }
+ //! Get if the architecture is ARM32 or ARM64.
+ ASMJIT_INLINE bool isArmFamily() const noexcept { return isArmFamily(_type); }
+
+ //! Get a size of a general-purpose register.
+ ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _gpSize; }
+ //! Get number of general-purpose registers.
+ ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _gpCount; }
+
+ // --------------------------------------------------------------------------
+ // [Operator Overload]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE const ArchInfo& operator=(const ArchInfo& other) noexcept { _signature = other._signature; return *this; }
+ ASMJIT_INLINE bool operator==(const ArchInfo& other) const noexcept { return _signature == other._signature; }
+ ASMJIT_INLINE bool operator!=(const ArchInfo& other) const noexcept { return _signature != other._signature; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ union {
+ struct {
+ uint8_t _type; //!< Architecture type.
+ uint8_t _subType; //!< Architecture sub-type.
+ uint8_t _gpSize; //!< Default size of a general purpose register.
+ uint8_t _gpCount; //!< Count of all general purpose registers.
+ };
+ uint32_t _signature; //!< Architecture signature (32-bit int).
+ };
+};
+
+// ============================================================================
+// [asmjit::ArchRegs]
+// ============================================================================
+
+//! Information about all architecture registers.
+struct ArchRegs {
+ //! Register information and signatures indexed by \ref Reg::Type.
+ RegInfo regInfo[Reg::kRegMax + 1];
+ //! Count (maximum) of registers per \ref Reg::Type.
+ uint8_t regCount[Reg::kRegMax + 1];
+ //! Converts RegType to TypeId, see \ref TypeId::Id.
+ uint8_t regTypeToTypeId[Reg::kRegMax + 1];
+};
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+struct ArchUtils {
+ ASMJIT_API static Error typeIdToRegInfo(uint32_t archType, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept;
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_ARCH_H
diff --git a/libraries/asmjit/asmjit/base/assembler.cpp b/libraries/asmjit/asmjit/base/assembler.cpp
new file mode 100644
index 00000000000..79a26665115
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/assembler.cpp
@@ -0,0 +1,447 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/assembler.h"
+#include "../base/constpool.h"
+#include "../base/utils.h"
+#include "../base/vmem.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::Assembler - Construction / Destruction]
+// ============================================================================
+
+Assembler::Assembler() noexcept
+ : CodeEmitter(kTypeAssembler),
+ _section(nullptr),
+ _bufferData(nullptr),
+ _bufferEnd(nullptr),
+ _bufferPtr(nullptr),
+ _op4(),
+ _op5() {}
+
+Assembler::~Assembler() noexcept {
+ if (_code) sync();
+}
+
+// ============================================================================
+// [asmjit::Assembler - Events]
+// ============================================================================
+
+Error Assembler::onAttach(CodeHolder* code) noexcept {
+ // Attach to the end of the .text section.
+ _section = code->_sections[0];
+ uint8_t* p = _section->_buffer._data;
+
+ _bufferData = p;
+ _bufferEnd = p + _section->_buffer._capacity;
+ _bufferPtr = p + _section->_buffer._length;
+
+ _op4.reset();
+ _op5.reset();
+
+ return Base::onAttach(code);
+}
+
+Error Assembler::onDetach(CodeHolder* code) noexcept {
+ _section = nullptr;
+ _bufferData = nullptr;
+ _bufferEnd = nullptr;
+ _bufferPtr = nullptr;
+
+ _op4.reset();
+ _op5.reset();
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::Assembler - Code-Generation]
+// ============================================================================
+
+Error Assembler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
+ _op4 = o4;
+ _op5 = o5;
+ _options |= kOptionOp4Op5Used;
+ return _emit(instId, o0, o1, o2, o3);
+}
+
+Error Assembler::_emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) {
+ const Operand_* op = opArray;
+ switch (opCount) {
+ case 0: return _emit(instId, _none, _none, _none, _none);
+ case 1: return _emit(instId, op[0], _none, _none, _none);
+ case 2: return _emit(instId, op[0], op[1], _none, _none);
+ case 3: return _emit(instId, op[0], op[1], op[2], _none);
+ case 4: return _emit(instId, op[0], op[1], op[2], op[3]);
+
+ case 5:
+ _op4 = op[4];
+ _op5.reset();
+ _options |= kOptionOp4Op5Used;
+ return _emit(instId, op[0], op[1], op[2], op[3]);
+
+ case 6:
+ _op4 = op[4];
+ _op5 = op[5];
+ _options |= kOptionOp4Op5Used;
+ return _emit(instId, op[0], op[1], op[2], op[3]);
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+}
+
+// ============================================================================
+// [asmjit::Assembler - Sync]
+// ============================================================================
+
+void Assembler::sync() noexcept {
+ ASMJIT_ASSERT(_code != nullptr); // Only called by CodeHolder, so we must be attached.
+ ASMJIT_ASSERT(_section != nullptr); // One section must always be active, no matter what.
+ ASMJIT_ASSERT(_bufferData == _section->_buffer._data); // `_bufferStart` is a shortcut to `_section->buffer.data`.
+
+ // Update only if the current offset is greater than the section length.
+ size_t offset = (size_t)(_bufferPtr - _bufferData);
+ if (_section->getBuffer().getLength() < offset)
+ _section->_buffer._length = offset;
+}
+
+// ============================================================================
+// [asmjit::Assembler - Code-Buffer]
+// ============================================================================
+
+Error Assembler::setOffset(size_t offset) {
+ if (_lastError) return _lastError;
+
+ size_t length = std::max(_section->getBuffer().getLength(), getOffset());
+ if (ASMJIT_UNLIKELY(offset > length))
+ return setLastError(DebugUtils::errored(kErrorInvalidArgument));
+
+ // If the `Assembler` generated any code the `_bufferPtr` may be higher than
+ // the section length stored in `CodeHolder` as it doesn't update it each
+ // time it generates machine code. This is the same as calling `sync()`.
+ if (_section->_buffer._length < length)
+ _section->_buffer._length = length;
+
+ _bufferPtr = _bufferData + offset;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::Assembler - Comment]
+// ============================================================================
+
+Error Assembler::comment(const char* s, size_t len) {
+ if (_lastError) return _lastError;
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled) {
+ Logger* logger = _code->getLogger();
+ logger->log(s, len);
+ logger->log("\n", 1);
+ return kErrorOk;
+ }
+#else
+ ASMJIT_UNUSED(s);
+ ASMJIT_UNUSED(len);
+#endif
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::Assembler - Building Blocks]
+// ============================================================================
+
+Label Assembler::newLabel() {
+ uint32_t id = 0;
+ if (!_lastError) {
+ ASMJIT_ASSERT(_code != nullptr);
+ Error err = _code->newLabelId(id);
+ if (ASMJIT_UNLIKELY(err)) setLastError(err);
+ }
+ return Label(id);
+}
+
+Label Assembler::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) {
+ uint32_t id = 0;
+ if (!_lastError) {
+ ASMJIT_ASSERT(_code != nullptr);
+ Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId);
+ if (ASMJIT_UNLIKELY(err)) setLastError(err);
+ }
+ return Label(id);
+}
+
+Error Assembler::bind(const Label& label) {
+ if (_lastError) return _lastError;
+ ASMJIT_ASSERT(_code != nullptr);
+
+ LabelEntry* le = _code->getLabelEntry(label);
+ if (ASMJIT_UNLIKELY(!le))
+ return setLastError(DebugUtils::errored(kErrorInvalidLabel));
+
+ // Label can be bound only once.
+ if (ASMJIT_UNLIKELY(le->isBound()))
+ return setLastError(DebugUtils::errored(kErrorLabelAlreadyBound));
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled) {
+ StringBuilderTmp<256> sb;
+ if (le->hasName())
+ sb.setFormat("%s:", le->getName());
+ else
+ sb.setFormat("L%u:", Operand::unpackId(label.getId()));
+
+ size_t binSize = 0;
+ if (!_code->_logger->hasOption(Logger::kOptionBinaryForm))
+ binSize = Globals::kInvalidIndex;
+
+ Logging::formatLine(sb, nullptr, binSize, 0, 0, getInlineComment());
+ _code->_logger->log(sb.getData(), sb.getLength());
+ }
+#endif // !ASMJIT_DISABLE_LOGGING
+
+ Error err = kErrorOk;
+ size_t pos = getOffset();
+
+ LabelLink* link = le->_links;
+ LabelLink* prev = nullptr;
+
+ while (link) {
+ intptr_t offset = link->offset;
+ uint32_t relocId = link->relocId;
+
+ if (relocId != RelocEntry::kInvalidId) {
+ // Adjust relocation data.
+ RelocEntry* re = _code->_relocations[relocId];
+ re->_data += static_cast(pos);
+ }
+ else {
+ // Not using relocId, this means that we are overwriting a real
+ // displacement in the CodeBuffer.
+ int32_t patchedValue = static_cast(
+ static_cast(pos) - offset + link->rel);
+
+ // Size of the value we are going to patch. Only BYTE/DWORD is allowed.
+ uint32_t size = _bufferData[offset];
+ if (size == 4)
+ Utils::writeI32u(_bufferData + offset, static_cast(patchedValue));
+ else if (size == 1 && Utils::isInt8(patchedValue))
+ _bufferData[offset] = static_cast(patchedValue & 0xFF);
+ else
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ }
+
+ prev = link->prev;
+ _code->_unresolvedLabelsCount--;
+ _code->_baseHeap.release(link, sizeof(LabelLink));
+
+ link = prev;
+ }
+
+ // Set as bound.
+ le->_sectionId = _section->getId();
+ le->_offset = pos;
+ le->_links = nullptr;
+ resetInlineComment();
+
+ if (err != kErrorOk)
+ return setLastError(err);
+
+ return kErrorOk;
+}
+
+Error Assembler::embed(const void* data, uint32_t size) {
+ if (_lastError) return _lastError;
+
+ if (getRemainingSpace() < size) {
+ Error err = _code->growBuffer(&_section->_buffer, size);
+ if (ASMJIT_UNLIKELY(err != kErrorOk)) return setLastError(err);
+ }
+
+ ::memcpy(_bufferPtr, data, size);
+ _bufferPtr += size;
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled)
+ _code->_logger->logBinary(data, size);
+#endif // !ASMJIT_DISABLE_LOGGING
+
+ return kErrorOk;
+}
+
+Error Assembler::embedLabel(const Label& label) {
+ if (_lastError) return _lastError;
+ ASMJIT_ASSERT(_code != nullptr);
+
+ RelocEntry* re;
+ LabelEntry* le = _code->getLabelEntry(label);
+
+ if (ASMJIT_UNLIKELY(!le))
+ return setLastError(DebugUtils::errored(kErrorInvalidLabel));
+
+ Error err;
+ uint32_t gpSize = getGpSize();
+
+ if (getRemainingSpace() < gpSize) {
+ err = _code->growBuffer(&_section->_buffer, gpSize);
+ if (ASMJIT_UNLIKELY(err)) return setLastError(err);
+ }
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled)
+ _code->_logger->logf(gpSize == 4 ? ".dd L%u\n" : ".dq L%u\n", Operand::unpackId(label.getId()));
+#endif // !ASMJIT_DISABLE_LOGGING
+
+ err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, gpSize);
+ if (ASMJIT_UNLIKELY(err)) return setLastError(err);
+
+ re->_sourceSectionId = _section->getId();
+ re->_sourceOffset = static_cast(getOffset());
+
+ if (le->isBound()) {
+ re->_targetSectionId = le->getSectionId();
+ re->_data = static_cast(static_cast(le->getOffset()));
+ }
+ else {
+ LabelLink* link = _code->newLabelLink(le, _section->getId(), getOffset(), 0);
+ if (ASMJIT_UNLIKELY(!link))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ link->relocId = re->getId();
+ }
+
+ // Emit dummy DWORD/QWORD depending on the address size.
+ ::memset(_bufferPtr, 0, gpSize);
+ _bufferPtr += gpSize;
+
+ return kErrorOk;
+}
+
+Error Assembler::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (_lastError) return _lastError;
+
+ if (!isLabelValid(label))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ ASMJIT_PROPAGATE(align(kAlignData, static_cast(pool.getAlignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ size_t size = pool.getSize();
+ if (getRemainingSpace() < size) {
+ Error err = _code->growBuffer(&_section->_buffer, size);
+ if (ASMJIT_UNLIKELY(err)) return setLastError(err);
+ }
+
+ uint8_t* p = _bufferPtr;
+ pool.fill(p);
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled)
+ _code->_logger->logBinary(p, size);
+#endif // !ASMJIT_DISABLE_LOGGING
+
+ _bufferPtr += size;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::Assembler - Emit-Helpers]
+// ============================================================================
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+void Assembler::_emitLog(
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
+ uint32_t relSize, uint32_t imLen, uint8_t* afterCursor) {
+
+ Logger* logger = _code->getLogger();
+ ASMJIT_ASSERT(logger != nullptr);
+ ASMJIT_ASSERT(options & CodeEmitter::kOptionLoggingEnabled);
+
+ StringBuilderTmp<256> sb;
+ uint32_t logOptions = logger->getOptions();
+
+ uint8_t* beforeCursor = _bufferPtr;
+ intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
+
+ sb.appendString(logger->getIndentation());
+
+ Operand_ opArray[6];
+ opArray[0].copyFrom(o0);
+ opArray[1].copyFrom(o1);
+ opArray[2].copyFrom(o2);
+ opArray[3].copyFrom(o3);
+
+ if (options & kOptionOp4Op5Used) {
+ opArray[4].copyFrom(_op4);
+ opArray[5].copyFrom(_op5);
+ }
+ else {
+ opArray[4].reset();
+ opArray[5].reset();
+ }
+
+ Logging::formatInstruction(
+ sb, logOptions,
+ this, getArchType(),
+ Inst::Detail(instId, options, _extraReg), opArray, 6);
+
+ if ((logOptions & Logger::kOptionBinaryForm) != 0)
+ Logging::formatLine(sb, _bufferPtr, emittedSize, relSize, imLen, getInlineComment());
+ else
+ Logging::formatLine(sb, nullptr, Globals::kInvalidIndex, 0, 0, getInlineComment());
+
+ logger->log(sb.getData(), sb.getLength());
+}
+
+Error Assembler::_emitFailed(
+ Error err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+
+ StringBuilderTmp<256> sb;
+ sb.appendString(DebugUtils::errorAsString(err));
+ sb.appendString(": ");
+
+ Operand_ opArray[6];
+ opArray[0].copyFrom(o0);
+ opArray[1].copyFrom(o1);
+ opArray[2].copyFrom(o2);
+ opArray[3].copyFrom(o3);
+
+ if (options & kOptionOp4Op5Used) {
+ opArray[4].copyFrom(_op4);
+ opArray[5].copyFrom(_op5);
+ }
+ else {
+ opArray[4].reset();
+ opArray[5].reset();
+ }
+
+ Logging::formatInstruction(
+ sb, 0,
+ this, getArchType(),
+ Inst::Detail(instId, options, _extraReg), opArray, 6);
+
+ resetOptions();
+ resetExtraReg();
+ resetInlineComment();
+ return setLastError(err, sb.getData());
+}
+#endif
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/assembler.h b/libraries/asmjit/asmjit/base/assembler.h
new file mode 100644
index 00000000000..55fbb142e3d
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/assembler.h
@@ -0,0 +1,154 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_ASSEMBLER_H
+#define _ASMJIT_BASE_ASSEMBLER_H
+
+// [Dependencies]
+#include "../base/codeemitter.h"
+#include "../base/codeholder.h"
+#include "../base/operand.h"
+#include "../base/simdtypes.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [asmjit::Assembler]
+// ============================================================================
+
+//! Base assembler.
+//!
+//! This class implements a base interface that is used by architecture
+//! specific assemblers.
+//!
+//! \sa CodeCompiler.
+class ASMJIT_VIRTAPI Assembler : public CodeEmitter {
+public:
+ ASMJIT_NONCOPYABLE(Assembler)
+ typedef CodeEmitter Base;
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `Assembler` instance.
+ ASMJIT_API Assembler() noexcept;
+ //! Destroy the `Assembler` instance.
+ ASMJIT_API virtual ~Assembler() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Events]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ // --------------------------------------------------------------------------
+ // [Code-Generation]
+ // --------------------------------------------------------------------------
+
+ using CodeEmitter::_emit;
+
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override;
+ ASMJIT_API Error _emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) override;
+
+ // --------------------------------------------------------------------------
+ // [Code-Buffer]
+ // --------------------------------------------------------------------------
+
+ //! Called by \ref CodeHolder::sync().
+ ASMJIT_API virtual void sync() noexcept;
+
+ //! Get the capacity of the current CodeBuffer.
+ ASMJIT_INLINE size_t getBufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
+ //! Get the number of remaining bytes in the current CodeBuffer.
+ ASMJIT_INLINE size_t getRemainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
+
+ //! Get the current position in the CodeBuffer.
+ ASMJIT_INLINE size_t getOffset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
+ //! Set the current position in the CodeBuffer to `offset`.
+ //!
+ //! NOTE: The `offset` cannot be outside of the buffer length (even if it's
+ //! within buffer's capacity).
+ ASMJIT_API Error setOffset(size_t offset);
+
+ //! Get start of the CodeBuffer of the current section.
+ ASMJIT_INLINE uint8_t* getBufferData() const noexcept { return _bufferData; }
+ //! Get end (first invalid byte) of the current section.
+ ASMJIT_INLINE uint8_t* getBufferEnd() const noexcept { return _bufferEnd; }
+ //! Get pointer in the CodeBuffer of the current section.
+ ASMJIT_INLINE uint8_t* getBufferPtr() const noexcept { return _bufferPtr; }
+
+ // --------------------------------------------------------------------------
+ // [Code-Generation]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(
+ const char* name,
+ size_t nameLength = Globals::kInvalidIndex,
+ uint32_t type = Label::kTypeGlobal,
+ uint32_t parentId = 0) override;
+ ASMJIT_API Error bind(const Label& label) override;
+ ASMJIT_API Error embed(const void* data, uint32_t size) override;
+ ASMJIT_API Error embedLabel(const Label& label) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+ ASMJIT_API Error comment(const char* s, size_t len = Globals::kInvalidIndex) override;
+
+ // --------------------------------------------------------------------------
+ // [Emit-Helpers]
+ // --------------------------------------------------------------------------
+
+protected:
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ void _emitLog(
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
+ uint32_t relSize, uint32_t imLen, uint8_t* afterCursor);
+
+ Error _emitFailed(
+ Error err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
+#else
+ ASMJIT_INLINE Error _emitFailed(
+ uint32_t err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+
+ resetOptions();
+ resetInlineComment();
+ return setLastError(err);
+ }
+#endif
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+public:
+ SectionEntry* _section; //!< Current section where the assembling happens.
+ uint8_t* _bufferData; //!< Start of the CodeBuffer of the current section.
+ uint8_t* _bufferEnd; //!< End (first invalid byte) of the current section.
+ uint8_t* _bufferPtr; //!< Pointer in the CodeBuffer of the current section.
+
+ Operand_ _op4; //!< 5th operand data, used only temporarily.
+ Operand_ _op5; //!< 6th operand data, used only temporarily.
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_ASSEMBLER_H
diff --git a/libraries/asmjit/asmjit/base/codebuilder.cpp b/libraries/asmjit/asmjit/base/codebuilder.cpp
new file mode 100644
index 00000000000..1f0024833e8
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codebuilder.cpp
@@ -0,0 +1,584 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Guard]
+#include "../asmjit_build.h"
+#if !defined(ASMJIT_DISABLE_BUILDER)
+
+// [Dependencies]
+#include "../base/codebuilder.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::CodeBuilder - Construction / Destruction]
+// ============================================================================
+
+CodeBuilder::CodeBuilder() noexcept
+ : CodeEmitter(kTypeBuilder),
+ _cbBaseZone(32768 - Zone::kZoneOverhead),
+ _cbDataZone(16384 - Zone::kZoneOverhead),
+ _cbPassZone(32768 - Zone::kZoneOverhead),
+ _cbHeap(&_cbBaseZone),
+ _cbPasses(),
+ _cbLabels(),
+ _firstNode(nullptr),
+ _lastNode(nullptr),
+ _cursor(nullptr),
+ _position(0),
+ _nodeFlags(0) {}
+CodeBuilder::~CodeBuilder() noexcept {}
+
+// ============================================================================
+// [asmjit::CodeBuilder - Events]
+// ============================================================================
+
+Error CodeBuilder::onAttach(CodeHolder* code) noexcept {
+ return Base::onAttach(code);
+}
+
+Error CodeBuilder::onDetach(CodeHolder* code) noexcept {
+ _cbPasses.reset();
+ _cbLabels.reset();
+ _cbHeap.reset(&_cbBaseZone);
+
+ _cbBaseZone.reset(false);
+ _cbDataZone.reset(false);
+ _cbPassZone.reset(false);
+
+ _position = 0;
+ _nodeFlags = 0;
+
+ _firstNode = nullptr;
+ _lastNode = nullptr;
+ _cursor = nullptr;
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::CodeBuilder - Node-Factory]
+// ============================================================================
+
+Error CodeBuilder::getCBLabel(CBLabel** pOut, uint32_t id) noexcept {
+ if (_lastError) return _lastError;
+ ASMJIT_ASSERT(_code != nullptr);
+
+ size_t index = Operand::unpackId(id);
+ if (ASMJIT_UNLIKELY(index >= _code->getLabelsCount()))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (index >= _cbLabels.getLength())
+ ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1));
+
+ CBLabel* node = _cbLabels[index];
+ if (!node) {
+ node = newNodeT(id);
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+ _cbLabels[index] = node;
+ }
+
+ *pOut = node;
+ return kErrorOk;
+}
+
+Error CodeBuilder::registerLabelNode(CBLabel* node) noexcept {
+ if (_lastError) return _lastError;
+ ASMJIT_ASSERT(_code != nullptr);
+
+ // Don't call setLastError() from here, we are noexcept and we are called
+ // by `newLabelNode()` and `newFuncNode()`, which are noexcept as well.
+ uint32_t id;
+ ASMJIT_PROPAGATE(_code->newLabelId(id));
+ size_t index = Operand::unpackId(id);
+
+ // We just added one label so it must be true.
+ ASMJIT_ASSERT(_cbLabels.getLength() < index + 1);
+ ASMJIT_PROPAGATE(_cbLabels.resize(&_cbHeap, index + 1));
+
+ _cbLabels[index] = node;
+ node->_id = id;
+ return kErrorOk;
+}
+
+CBLabel* CodeBuilder::newLabelNode() noexcept {
+ CBLabel* node = newNodeT();
+ if (!node || registerLabelNode(node) != kErrorOk)
+ return nullptr;
+ return node;
+}
+
+CBAlign* CodeBuilder::newAlignNode(uint32_t mode, uint32_t alignment) noexcept {
+ return newNodeT(mode, alignment);
+}
+
+CBData* CodeBuilder::newDataNode(const void* data, uint32_t size) noexcept {
+ if (size > CBData::kInlineBufferSize) {
+ void* cloned = _cbDataZone.alloc(size);
+ if (!cloned) return nullptr;
+
+ if (data) ::memcpy(cloned, data, size);
+ data = cloned;
+ }
+
+ return newNodeT(const_cast(data), size);
+}
+
+CBConstPool* CodeBuilder::newConstPool() noexcept {
+ CBConstPool* node = newNodeT();
+ if (!node || registerLabelNode(node) != kErrorOk)
+ return nullptr;
+ return node;
+}
+
+CBComment* CodeBuilder::newCommentNode(const char* s, size_t len) noexcept {
+ if (s) {
+ if (len == Globals::kInvalidIndex) len = ::strlen(s);
+ if (len > 0) {
+ s = static_cast(_cbDataZone.dup(s, len, true));
+ if (!s) return nullptr;
+ }
+ }
+
+ return newNodeT(s);
+}
+
+// ============================================================================
+// [asmjit::CodeBuilder - Code-Emitter]
+// ============================================================================
+
+Label CodeBuilder::newLabel() {
+ uint32_t id = kInvalidValue;
+
+ if (!_lastError) {
+ CBLabel* node = newNodeT(id);
+ if (ASMJIT_UNLIKELY(!node)) {
+ setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ }
+ else {
+ Error err = registerLabelNode(node);
+ if (ASMJIT_UNLIKELY(err))
+ setLastError(err);
+ else
+ id = node->getId();
+ }
+ }
+
+ return Label(id);
+}
+
+Label CodeBuilder::newNamedLabel(const char* name, size_t nameLength, uint32_t type, uint32_t parentId) {
+ uint32_t id = kInvalidValue;
+
+ if (!_lastError) {
+ CBLabel* node = newNodeT(id);
+ if (ASMJIT_UNLIKELY(!node)) {
+ setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ }
+ else {
+ Error err = _code->newNamedLabelId(id, name, nameLength, type, parentId);
+ if (ASMJIT_UNLIKELY(err))
+ setLastError(err);
+ else
+ id = node->getId();
+ }
+ }
+
+ return Label(id);
+}
+
+Error CodeBuilder::bind(const Label& label) {
+ if (_lastError) return _lastError;
+
+ CBLabel* node;
+ Error err = getCBLabel(&node, label);
+ if (ASMJIT_UNLIKELY(err))
+ return setLastError(err);
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error CodeBuilder::align(uint32_t mode, uint32_t alignment) {
+ if (_lastError) return _lastError;
+
+ CBAlign* node = newAlignNode(mode, alignment);
+ if (ASMJIT_UNLIKELY(!node))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error CodeBuilder::embed(const void* data, uint32_t size) {
+ if (_lastError) return _lastError;
+
+ CBData* node = newDataNode(data, size);
+ if (ASMJIT_UNLIKELY(!node))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error CodeBuilder::embedLabel(const Label& label) {
+ if (_lastError) return _lastError;
+
+ CBLabelData* node = newNodeT(label.getId());
+ if (ASMJIT_UNLIKELY(!node))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error CodeBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (_lastError) return _lastError;
+
+ if (!isLabelValid(label))
+ return setLastError(DebugUtils::errored(kErrorInvalidLabel));
+
+ ASMJIT_PROPAGATE(align(kAlignData, static_cast(pool.getAlignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ CBData* node = newDataNode(nullptr, static_cast(pool.getSize()));
+ if (ASMJIT_UNLIKELY(!node))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ pool.fill(node->getData());
+ addNode(node);
+ return kErrorOk;
+}
+
+Error CodeBuilder::comment(const char* s, size_t len) {
+ if (_lastError) return _lastError;
+
+ CBComment* node = newCommentNode(s, len);
+ if (ASMJIT_UNLIKELY(!node))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeBuilder - Node-Management]
+// ============================================================================
+
+CBNode* CodeBuilder::addNode(CBNode* node) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(node->_prev == nullptr);
+ ASMJIT_ASSERT(node->_next == nullptr);
+
+ if (!_cursor) {
+ if (!_firstNode) {
+ _firstNode = node;
+ _lastNode = node;
+ }
+ else {
+ node->_next = _firstNode;
+ _firstNode->_prev = node;
+ _firstNode = node;
+ }
+ }
+ else {
+ CBNode* prev = _cursor;
+ CBNode* next = _cursor->_next;
+
+ node->_prev = prev;
+ node->_next = next;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+ }
+
+ _cursor = node;
+ return node;
+}
+
+CBNode* CodeBuilder::addAfter(CBNode* node, CBNode* ref) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(ref);
+
+ ASMJIT_ASSERT(node->_prev == nullptr);
+ ASMJIT_ASSERT(node->_next == nullptr);
+
+ CBNode* prev = ref;
+ CBNode* next = ref->_next;
+
+ node->_prev = prev;
+ node->_next = next;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+
+ return node;
+}
+
+CBNode* CodeBuilder::addBefore(CBNode* node, CBNode* ref) noexcept {
+ ASMJIT_ASSERT(node != nullptr);
+ ASMJIT_ASSERT(node->_prev == nullptr);
+ ASMJIT_ASSERT(node->_next == nullptr);
+ ASMJIT_ASSERT(ref != nullptr);
+
+ CBNode* prev = ref->_prev;
+ CBNode* next = ref;
+
+ node->_prev = prev;
+ node->_next = next;
+
+ next->_prev = node;
+ if (prev)
+ prev->_next = node;
+ else
+ _firstNode = node;
+
+ return node;
+}
+
+static ASMJIT_INLINE void CodeBuilder_nodeRemoved(CodeBuilder* self, CBNode* node_) noexcept {
+ if (node_->isJmpOrJcc()) {
+ CBJump* node = static_cast(node_);
+ CBLabel* label = node->getTarget();
+
+ if (label) {
+ // Disconnect.
+ CBJump** pPrev = &label->_from;
+ for (;;) {
+ ASMJIT_ASSERT(*pPrev != nullptr);
+
+ CBJump* current = *pPrev;
+ if (!current) break;
+
+ if (current == node) {
+ *pPrev = node->_jumpNext;
+ break;
+ }
+
+ pPrev = ¤t->_jumpNext;
+ }
+
+ label->subNumRefs();
+ }
+ }
+}
+
+CBNode* CodeBuilder::removeNode(CBNode* node) noexcept {
+ CBNode* prev = node->_prev;
+ CBNode* next = node->_next;
+
+ if (_firstNode == node)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == node)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+
+ if (_cursor == node)
+ _cursor = prev;
+ CodeBuilder_nodeRemoved(this, node);
+
+ return node;
+}
+
+void CodeBuilder::removeNodes(CBNode* first, CBNode* last) noexcept {
+ if (first == last) {
+ removeNode(first);
+ return;
+ }
+
+ CBNode* prev = first->_prev;
+ CBNode* next = last->_next;
+
+ if (_firstNode == first)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == last)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ CBNode* node = first;
+ for (;;) {
+ CBNode* next = node->getNext();
+ ASMJIT_ASSERT(next != nullptr);
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+
+ if (_cursor == node)
+ _cursor = prev;
+ CodeBuilder_nodeRemoved(this, node);
+
+ if (node == last)
+ break;
+ node = next;
+ }
+}
+
+CBNode* CodeBuilder::setCursor(CBNode* node) noexcept {
+ CBNode* old = _cursor;
+ _cursor = node;
+ return old;
+}
+
+// ============================================================================
+// [asmjit::CodeBuilder - Passes]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE CBPass* CodeBuilder::getPassByName(const char* name) const noexcept {
+ for (size_t i = 0, len = _cbPasses.getLength(); i < len; i++) {
+ CBPass* pass = _cbPasses[i];
+ if (::strcmp(pass->getName(), name) == 0)
+ return pass;
+ }
+
+ return nullptr;
+}
+
+ASMJIT_FAVOR_SIZE Error CodeBuilder::addPass(CBPass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(pass == nullptr)) {
+ // Since this is directly called by `addPassT()` we treat `null` argument
+ // as out-of-memory condition. Otherwise it would be API misuse.
+ return DebugUtils::errored(kErrorNoHeapMemory);
+ }
+ else if (ASMJIT_UNLIKELY(pass->_cb)) {
+ // Kind of weird, but okay...
+ if (pass->_cb == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ ASMJIT_PROPAGATE(_cbPasses.append(&_cbHeap, pass));
+ pass->_cb = this;
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error CodeBuilder::deletePass(CBPass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(pass == nullptr))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (pass->_cb != nullptr) {
+ if (pass->_cb != this)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ size_t index = _cbPasses.indexOf(pass);
+ ASMJIT_ASSERT(index != Globals::kInvalidIndex);
+
+ pass->_cb = nullptr;
+ _cbPasses.removeAt(index);
+ }
+
+ pass->~CBPass();
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeBuilder - Serialization]
+// ============================================================================
+
+Error CodeBuilder::serialize(CodeEmitter* dst) {
+ Error err = kErrorOk;
+ CBNode* node_ = getFirstNode();
+
+ do {
+ dst->setInlineComment(node_->getInlineComment());
+
+ switch (node_->getType()) {
+ case CBNode::kNodeAlign: {
+ CBAlign* node = static_cast(node_);
+ err = dst->align(node->getMode(), node->getAlignment());
+ break;
+ }
+
+ case CBNode::kNodeData: {
+ CBData* node = static_cast(node_);
+ err = dst->embed(node->getData(), node->getSize());
+ break;
+ }
+
+ case CBNode::kNodeFunc:
+ case CBNode::kNodeLabel: {
+ CBLabel* node = static_cast(node_);
+ err = dst->bind(node->getLabel());
+ break;
+ }
+
+ case CBNode::kNodeLabelData: {
+ CBLabelData* node = static_cast(node_);
+ err = dst->embedLabel(node->getLabel());
+ break;
+ }
+
+ case CBNode::kNodeConstPool: {
+ CBConstPool* node = static_cast(node_);
+ err = dst->embedConstPool(node->getLabel(), node->getConstPool());
+ break;
+ }
+
+ case CBNode::kNodeInst:
+ case CBNode::kNodeFuncCall: {
+ CBInst* node = node_->as();
+ dst->setOptions(node->getOptions());
+ dst->setExtraReg(node->getExtraReg());
+ err = dst->emitOpArray(node->getInstId(), node->getOpArray(), node->getOpCount());
+ break;
+ }
+
+ case CBNode::kNodeComment: {
+ CBComment* node = static_cast(node_);
+ err = dst->comment(node->getInlineComment());
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (err) break;
+ node_ = node_->getNext();
+ } while (node_);
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::CBPass]
+// ============================================================================
+
+CBPass::CBPass(const char* name) noexcept
+ : _cb(nullptr),
+ _name(name) {}
+CBPass::~CBPass() noexcept {}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // !ASMJIT_DISABLE_BUILDER
diff --git a/libraries/asmjit/asmjit/base/codebuilder.h b/libraries/asmjit/asmjit/base/codebuilder.h
new file mode 100644
index 00000000000..231dd8449fd
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codebuilder.h
@@ -0,0 +1,915 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_CODEBUILDER_H
+#define _ASMJIT_BASE_CODEBUILDER_H
+
+#include "../asmjit_build.h"
+#if !defined(ASMJIT_DISABLE_BUILDER)
+
+// [Dependencies]
+#include "../base/assembler.h"
+#include "../base/codeholder.h"
+#include "../base/constpool.h"
+#include "../base/inst.h"
+#include "../base/operand.h"
+#include "../base/utils.h"
+#include "../base/zone.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class CBNode;
+class CBPass;
+
+class CBAlign;
+class CBComment;
+class CBConstPool;
+class CBData;
+class CBInst;
+class CBJump;
+class CBLabel;
+class CBLabelData;
+class CBSentinel;
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [asmjit::CodeBuilder]
+// ============================================================================
+
+class ASMJIT_VIRTAPI CodeBuilder : public CodeEmitter {
+public:
+ ASMJIT_NONCOPYABLE(CodeBuilder)
+ typedef CodeEmitter Base;
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CodeBuilder` instance.
+ ASMJIT_API CodeBuilder() noexcept;
+ //! Destroy the `CodeBuilder` instance.
+ ASMJIT_API virtual ~CodeBuilder() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Events]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override;
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get a vector of CBPass objects that will be executed by `process()`.
+ ASMJIT_INLINE const ZoneVector& getPasses() const noexcept { return _cbPasses; }
+
+ //! Get a vector of CBLabel nodes.
+ //!
+ //! NOTE: If a label of some index is not associated with `CodeBuilder` it
+ //! would be null, so always check for nulls if you iterate over the vector.
+ ASMJIT_INLINE const ZoneVector& getLabels() const noexcept { return _cbLabels; }
+
+ //! Get the first node.
+ ASMJIT_INLINE CBNode* getFirstNode() const noexcept { return _firstNode; }
+ //! Get the last node.
+ ASMJIT_INLINE CBNode* getLastNode() const noexcept { return _lastNode; }
+
+ // --------------------------------------------------------------------------
+ // [Node-Management]
+ // --------------------------------------------------------------------------
+
+ //! \internal
+ template
+ ASMJIT_INLINE T* newNodeT() noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this); }
+
+ //! \internal
+ template
+ ASMJIT_INLINE T* newNodeT(P0 p0) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0); }
+
+ //! \internal
+ template
+ ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1); }
+
+ //! \internal
+ template
+ ASMJIT_INLINE T* newNodeT(P0 p0, P1 p1, P2 p2) noexcept { return new(_cbHeap.alloc(sizeof(T))) T(this, p0, p1, p2); }
+
+ ASMJIT_API Error registerLabelNode(CBLabel* node) noexcept;
+ //! Get `CBLabel` by `id`.
+ ASMJIT_API Error getCBLabel(CBLabel** pOut, uint32_t id) noexcept;
+ //! Get `CBLabel` by `label`.
+ ASMJIT_INLINE Error getCBLabel(CBLabel** pOut, const Label& label) noexcept { return getCBLabel(pOut, label.getId()); }
+
+ //! Create a new \ref CBLabel node.
+ ASMJIT_API CBLabel* newLabelNode() noexcept;
+ //! Create a new \ref CBAlign node.
+ ASMJIT_API CBAlign* newAlignNode(uint32_t mode, uint32_t alignment) noexcept;
+ //! Create a new \ref CBData node.
+ ASMJIT_API CBData* newDataNode(const void* data, uint32_t size) noexcept;
+ //! Create a new \ref CBConstPool node.
+ ASMJIT_API CBConstPool* newConstPool() noexcept;
+ //! Create a new \ref CBComment node.
+ ASMJIT_API CBComment* newCommentNode(const char* s, size_t len) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Code-Emitter]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API virtual Label newLabel() override;
+ ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t type = Label::kTypeGlobal, uint32_t parentId = kInvalidValue) override;
+ ASMJIT_API virtual Error bind(const Label& label) override;
+ ASMJIT_API virtual Error align(uint32_t mode, uint32_t alignment) override;
+ ASMJIT_API virtual Error embed(const void* data, uint32_t size) override;
+ ASMJIT_API virtual Error embedLabel(const Label& label) override;
+ ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool) override;
+ ASMJIT_API virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) override;
+
+ // --------------------------------------------------------------------------
+ // [Node-Management]
+ // --------------------------------------------------------------------------
+
+ //! Add `node` after the current and set current to `node`.
+ ASMJIT_API CBNode* addNode(CBNode* node) noexcept;
+ //! Insert `node` after `ref`.
+ ASMJIT_API CBNode* addAfter(CBNode* node, CBNode* ref) noexcept;
+ //! Insert `node` before `ref`.
+ ASMJIT_API CBNode* addBefore(CBNode* node, CBNode* ref) noexcept;
+ //! Remove `node`.
+ ASMJIT_API CBNode* removeNode(CBNode* node) noexcept;
+ //! Remove multiple nodes.
+ ASMJIT_API void removeNodes(CBNode* first, CBNode* last) noexcept;
+
+ //! Get current node.
+ //!
+ //! \note If this method returns null it means that nothing has been
+ //! emitted yet.
+ ASMJIT_INLINE CBNode* getCursor() const noexcept { return _cursor; }
+ //! Set the current node without returning the previous node.
+ ASMJIT_INLINE void _setCursor(CBNode* node) noexcept { _cursor = node; }
+ //! Set the current node to `node` and return the previous one.
+ ASMJIT_API CBNode* setCursor(CBNode* node) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Passes]
+ // --------------------------------------------------------------------------
+
+ template
+ ASMJIT_INLINE T* newPassT() noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(); }
+ template
+ ASMJIT_INLINE T* newPassT(P0 p0) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0); }
+ template
+ ASMJIT_INLINE T* newPassT(P0 p0, P1 p1) noexcept { return new(_cbBaseZone.alloc(sizeof(T))) T(p0, p1); }
+
+ template
+ ASMJIT_INLINE Error addPassT() noexcept { return addPass(newPassT()); }
+ template
+ ASMJIT_INLINE Error addPassT(P0 p0) noexcept { return addPass(newPassT(p0)); }
+ template
+ ASMJIT_INLINE Error addPassT(P0 p0, P1 p1) noexcept { return addPass(newPassT(p0, p1)); }
+
+ //! Get a `CBPass` by name.
+ ASMJIT_API CBPass* getPassByName(const char* name) const noexcept;
+ //! Add `pass` to the list of passes.
+ ASMJIT_API Error addPass(CBPass* pass) noexcept;
+ //! Remove `pass` from the list of passes and delete it.
+ ASMJIT_API Error deletePass(CBPass* pass) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Serialization]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API virtual Error serialize(CodeEmitter* dst);
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ Zone _cbBaseZone; //!< Base zone used to allocate nodes and `CBPass`.
+ Zone _cbDataZone; //!< Data zone used to allocate data and names.
+ Zone _cbPassZone; //!< Zone passed to `CBPass::process()`.
+ ZoneHeap _cbHeap; //!< ZoneHeap that uses `_cbBaseZone`.
+
+ ZoneVector _cbPasses; //!< Array of `CBPass` objects.
+ ZoneVector _cbLabels; //!< Maps label indexes to `CBLabel` nodes.
+
+ CBNode* _firstNode; //!< First node of the current section.
+ CBNode* _lastNode; //!< Last node of the current section.
+ CBNode* _cursor; //!< Current node (cursor).
+
+ uint32_t _position; //!< Flow-id assigned to each new node.
+ uint32_t _nodeFlags; //!< Flags assigned to each new node.
+};
+
+// ============================================================================
+// [asmjit::CBPass]
+// ============================================================================
+
+//! `CodeBuilder` pass used to code transformations, analysis, and lowering.
+class ASMJIT_VIRTAPI CBPass {
+public:
+ ASMJIT_NONCOPYABLE(CBPass);
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API CBPass(const char* name) noexcept;
+ ASMJIT_API virtual ~CBPass() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Interface]
+ // --------------------------------------------------------------------------
+
+ //! Process the code stored in CodeBuffer `cb`.
+ //!
+ //! This is the only function that is called by the `CodeBuilder` to process
+ //! the code. It passes the CodeBuilder itself (`cb`) and also a zone memory
+ //! allocator `zone`, which will be reset after the `process()` returns. The
+ //! allocator should be used for all allocations as it's fast and everything
+ //! it allocates will be released at once when `process()` returns.
+ virtual Error process(Zone* zone) noexcept = 0;
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE const CodeBuilder* cb() const noexcept { return _cb; }
+ ASMJIT_INLINE const char* getName() const noexcept { return _name; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CodeBuilder* _cb; //!< CodeBuilder this pass is assigned to.
+ const char* _name; //!< Name of the pass.
+};
+
+// ============================================================================
+// [asmjit::CBNode]
+// ============================================================================
+
+//! Node (CodeBuilder).
+//!
+//! Every node represents a building-block used by \ref CodeBuilder. It can be
+//! instruction, data, label, comment, directive, or any other high-level
+//! representation that can be transformed to the building blocks mentioned.
+//! Every class that inherits \ref CodeBuilder can define its own nodes that it
+//! can lower to basic nodes.
+class CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBNode)
+
+ // --------------------------------------------------------------------------
+ // [Type]
+ // --------------------------------------------------------------------------
+
+ //! Type of \ref CBNode.
+ ASMJIT_ENUM(NodeType) {
+ kNodeNone = 0, //!< Invalid node (internal, don't use).
+
+ // [CodeBuilder]
+ kNodeInst = 1, //!< Node is \ref CBInst or \ref CBJump.
+ kNodeData = 2, //!< Node is \ref CBData.
+ kNodeAlign = 3, //!< Node is \ref CBAlign.
+ kNodeLabel = 4, //!< Node is \ref CBLabel.
+ kNodeLabelData = 5, //!< Node is \ref CBLabelData.
+ kNodeConstPool = 6, //!< Node is \ref CBConstPool.
+ kNodeComment = 7, //!< Node is \ref CBComment.
+ kNodeSentinel = 8, //!< Node is \ref CBSentinel.
+
+ // [CodeCompiler]
+ kNodeFunc = 16, //!< Node is \ref CCFunc (considered as \ref CBLabel by \ref CodeBuilder).
+ kNodeFuncExit = 17, //!< Node is \ref CCFuncRet.
+ kNodeFuncCall = 18, //!< Node is \ref CCFuncCall.
+ kNodePushArg = 19, //!< Node is \ref CCPushArg.
+ kNodeHint = 20, //!< Node is \ref CCHint.
+
+ // [UserDefined]
+ kNodeUser = 32 //!< First id of a user-defined node.
+ };
+
+ // --------------------------------------------------------------------------
+ // [Flags]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_ENUM(Flags) {
+ //! The node has been translated by the CodeCompiler.
+ kFlagIsTranslated = 0x0001,
+ //! If the node can be safely removed (has no effect).
+ kFlagIsRemovable = 0x0004,
+ //! If the node is informative only and can be safely removed.
+ kFlagIsInformative = 0x0008,
+
+ //! If the `CBInst` is a jump.
+ kFlagIsJmp = 0x0010,
+ //! If the `CBInst` is a conditional jump.
+ kFlagIsJcc = 0x0020,
+
+ //! If the `CBInst` is an unconditional jump or conditional jump that is
+ //! likely to be taken.
+ kFlagIsTaken = 0x0040,
+
+ //! If the `CBNode` will return from a function.
+ //!
+ //! This flag is used by both `CBSentinel` and `CCFuncRet`.
+ kFlagIsRet = 0x0080,
+
+ //! Whether the instruction is special.
+ kFlagIsSpecial = 0x0100,
+
+ //! Whether the instruction is an FPU instruction.
+ kFlagIsFp = 0x0200
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new \ref CBNode - always use \ref CodeBuilder to allocate nodes.
+ ASMJIT_INLINE CBNode(CodeBuilder* cb, uint32_t type) noexcept {
+ _prev = nullptr;
+ _next = nullptr;
+ _type = static_cast(type);
+ _opCount = 0;
+ _flags = static_cast(cb->_nodeFlags);
+ _position = cb->_position;
+ _inlineComment = nullptr;
+ _passData = nullptr;
+ }
+ //! Destroy the `CBNode` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBNode() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ template
+ ASMJIT_INLINE T* as() noexcept { return static_cast(this); }
+ template
+ ASMJIT_INLINE const T* as() const noexcept { return static_cast(this); }
+
+ //! Get previous node in the compiler stream.
+ ASMJIT_INLINE CBNode* getPrev() const noexcept { return _prev; }
+ //! Get next node in the compiler stream.
+ ASMJIT_INLINE CBNode* getNext() const noexcept { return _next; }
+
+ //! Get the node type, see \ref Type.
+ ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
+ //! Get the node flags.
+ ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
+
+ //! Get whether the instruction has flag `flag`.
+ ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (static_cast(_flags) & flag) != 0; }
+ //! Set node flags to `flags`.
+ ASMJIT_INLINE void setFlags(uint32_t flags) noexcept { _flags = static_cast(flags); }
+ //! Add instruction `flags`.
+ ASMJIT_INLINE void orFlags(uint32_t flags) noexcept { _flags |= static_cast(flags); }
+ //! And instruction `flags`.
+ ASMJIT_INLINE void andFlags(uint32_t flags) noexcept { _flags &= static_cast(flags); }
+ //! Clear instruction `flags`.
+ ASMJIT_INLINE void andNotFlags(uint32_t flags) noexcept { _flags &= ~static_cast(flags); }
+
+ //! Get whether the node has been translated.
+ ASMJIT_INLINE bool isTranslated() const noexcept { return hasFlag(kFlagIsTranslated); }
+
+ //! Get whether the node is removable if it's in unreachable code block.
+ ASMJIT_INLINE bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); }
+ //! Get whether the node is informative only (comment, hint).
+ ASMJIT_INLINE bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); }
+
+ //! Whether the node is `CBLabel`.
+ ASMJIT_INLINE bool isLabel() const noexcept { return _type == kNodeLabel; }
+ //! Whether the `CBInst` node is an unconditional jump.
+ ASMJIT_INLINE bool isJmp() const noexcept { return hasFlag(kFlagIsJmp); }
+ //! Whether the `CBInst` node is a conditional jump.
+ ASMJIT_INLINE bool isJcc() const noexcept { return hasFlag(kFlagIsJcc); }
+ //! Whether the `CBInst` node is a conditional/unconditional jump.
+ ASMJIT_INLINE bool isJmpOrJcc() const noexcept { return hasFlag(kFlagIsJmp | kFlagIsJcc); }
+ //! Whether the `CBInst` node is a return.
+ ASMJIT_INLINE bool isRet() const noexcept { return hasFlag(kFlagIsRet); }
+
+ //! Get whether the node is `CBInst` and the instruction is special.
+ ASMJIT_INLINE bool isSpecial() const noexcept { return hasFlag(kFlagIsSpecial); }
+ //! Get whether the node is `CBInst` and the instruction uses x87-FPU.
+ ASMJIT_INLINE bool isFp() const noexcept { return hasFlag(kFlagIsFp); }
+
+ ASMJIT_INLINE bool hasPosition() const noexcept { return _position != 0; }
+ //! Get flow index.
+ ASMJIT_INLINE uint32_t getPosition() const noexcept { return _position; }
+ //! Set flow index.
+ ASMJIT_INLINE void setPosition(uint32_t position) noexcept { _position = position; }
+
+ //! Get if the node has an inline comment.
+ ASMJIT_INLINE bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
+ //! Get an inline comment string.
+ ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; }
+ //! Set an inline comment string to `s`.
+ ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Set an inline comment string to null.
+ ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! Get if the node has associated work-data.
+ ASMJIT_INLINE bool hasPassData() const noexcept { return _passData != nullptr; }
+ //! Get work-data - data used during processing & transformations.
+ template
+ ASMJIT_INLINE T* getPassData() const noexcept { return (T*)_passData; }
+ //! Set work-data to `data`.
+ template
+ ASMJIT_INLINE void setPassData(T* data) noexcept { _passData = (void*)data; }
+ //! Reset work-data to null.
+ ASMJIT_INLINE void resetPassData() noexcept { _passData = nullptr; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CBNode* _prev; //!< Previous node.
+ CBNode* _next; //!< Next node.
+
+ uint8_t _type; //!< Node type, see \ref NodeType.
+ uint8_t _opCount; //!< Count of operands or zero.
+ uint16_t _flags; //!< Flags, different meaning for every type of the node.
+ uint32_t _position; //!< Flow index.
+
+ const char* _inlineComment; //!< Inline comment or null if not used.
+ void* _passData; //!< Data used exclusively by the current `CBPass`.
+};
+
+// ============================================================================
+// [asmjit::CBInst]
+// ============================================================================
+
+//! Instruction (CodeBuilder).
+//!
+//! Wraps an instruction with its options and operands.
+class CBInst : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBInst)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBInst` instance.
+ ASMJIT_INLINE CBInst(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
+ : CBNode(cb, kNodeInst) {
+
+ orFlags(kFlagIsRemovable);
+ _instDetail.instId = static_cast(instId);
+ _instDetail.options = options;
+
+ _opCount = static_cast(opCount);
+ _opArray = opArray;
+
+ _updateMemOp();
+ }
+
+ //! Destroy the `CBInst` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBInst() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE Inst::Detail& getInstDetail() noexcept { return _instDetail; }
+ ASMJIT_INLINE const Inst::Detail& getInstDetail() const noexcept { return _instDetail; }
+
+ //! Get the instruction id, see \ref Inst::Id.
+ ASMJIT_INLINE uint32_t getInstId() const noexcept { return _instDetail.instId; }
+ //! Set the instruction id to `instId`, see \ref Inst::Id.
+ ASMJIT_INLINE void setInstId(uint32_t instId) noexcept { _instDetail.instId = instId; }
+
+ //! Whether the instruction is either a jump or a conditional jump likely to be taken.
+ ASMJIT_INLINE bool isTaken() const noexcept { return hasFlag(kFlagIsTaken); }
+
+ //! Get emit options.
+ ASMJIT_INLINE uint32_t getOptions() const noexcept { return _instDetail.options; }
+ //! Set emit options.
+ ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _instDetail.options = options; }
+ //! Add emit options.
+ ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _instDetail.options |= options; }
+ //! Mask emit options.
+ ASMJIT_INLINE void andOptions(uint32_t options) noexcept { _instDetail.options &= options; }
+ //! Clear emit options.
+ ASMJIT_INLINE void delOptions(uint32_t options) noexcept { _instDetail.options &= ~options; }
+
+ //! Get if the node has an extra register operand.
+ ASMJIT_INLINE bool hasExtraReg() const noexcept { return _instDetail.hasExtraReg(); }
+ //! Get extra register operand.
+ ASMJIT_INLINE RegOnly& getExtraReg() noexcept { return _instDetail.extraReg; }
+ //! \overload
+ ASMJIT_INLINE const RegOnly& getExtraReg() const noexcept { return _instDetail.extraReg; }
+ //! Set extra register operand to `reg`.
+ ASMJIT_INLINE void setExtraReg(const Reg& reg) noexcept { _instDetail.extraReg.init(reg); }
+ //! Set extra register operand to `reg`.
+ ASMJIT_INLINE void setExtraReg(const RegOnly& reg) noexcept { _instDetail.extraReg.init(reg); }
+ //! Reset extra register operand.
+ ASMJIT_INLINE void resetExtraReg() noexcept { _instDetail.extraReg.reset(); }
+
+ //! Get operands count.
+ ASMJIT_INLINE uint32_t getOpCount() const noexcept { return _opCount; }
+ //! Get operands list.
+ ASMJIT_INLINE Operand* getOpArray() noexcept { return _opArray; }
+ //! \overload
+ ASMJIT_INLINE const Operand* getOpArray() const noexcept { return _opArray; }
+
+ //! Get whether the instruction contains a memory operand.
+ ASMJIT_INLINE bool hasMemOp() const noexcept { return _memOpIndex != 0xFF; }
+ //! Get memory operand.
+ //!
+ //! NOTE: Can only be called if the instruction has such operand,
+ //! see `hasMemOp()`.
+ ASMJIT_INLINE Mem* getMemOp() const noexcept {
+ ASMJIT_ASSERT(hasMemOp());
+ return static_cast(&_opArray[_memOpIndex]);
+ }
+ //! \overload
+ template
+ ASMJIT_INLINE T* getMemOp() const noexcept {
+ ASMJIT_ASSERT(hasMemOp());
+ return static_cast(&_opArray[_memOpIndex]);
+ }
+
+ //! Set memory operand index, `0xFF` means no memory operand.
+ ASMJIT_INLINE void setMemOpIndex(uint32_t index) noexcept { _memOpIndex = static_cast(index); }
+ //! Reset memory operand index to `0xFF` (no operand).
+ ASMJIT_INLINE void resetMemOpIndex() noexcept { _memOpIndex = 0xFF; }
+
+ // --------------------------------------------------------------------------
+ // [Utils]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE void _updateMemOp() noexcept {
+ Operand* opArray = getOpArray();
+ uint32_t opCount = getOpCount();
+
+ uint32_t i;
+ for (i = 0; i < opCount; i++)
+ if (opArray[i].isMem())
+ goto Update;
+ i = 0xFF;
+
+Update:
+ setMemOpIndex(i);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ Inst::Detail _instDetail; //!< Instruction id, options, and extra register.
+ uint8_t _memOpIndex; //!< \internal
+ uint8_t _reserved[7]; //!< \internal
+ Operand* _opArray; //!< Instruction operands.
+};
+
+// ============================================================================
+// [asmjit::CBInstEx]
+// ============================================================================
+
+struct CBInstEx : public CBInst {
+ Operand _op4;
+ Operand _op5;
+};
+
+// ============================================================================
+// [asmjit::CBJump]
+// ============================================================================
+
+//! Asm jump (conditional or direct).
+//!
+//! Extension of `CBInst` node, which stores more information about the jump.
+class CBJump : public CBInst {
+public:
+ ASMJIT_NONCOPYABLE(CBJump)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE CBJump(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
+ : CBInst(cb, instId, options, opArray, opCount),
+ _target(nullptr),
+ _jumpNext(nullptr) {}
+ ASMJIT_INLINE ~CBJump() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE CBLabel* getTarget() const noexcept { return _target; }
+ ASMJIT_INLINE CBJump* getJumpNext() const noexcept { return _jumpNext; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CBLabel* _target; //!< Target node.
+ CBJump* _jumpNext; //!< Next jump to the same target in a single linked-list.
+};
+
+// ============================================================================
+// [asmjit::CBData]
+// ============================================================================
+
+//! Asm data (CodeBuilder).
+//!
+//! Wraps `.data` directive. The node contains data that will be placed at the
+//! node's position in the assembler stream. The data is considered to be RAW;
+//! no analysis nor byte-order conversion is performed on RAW data.
+class CBData : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBData)
+ enum { kInlineBufferSize = static_cast(64 - sizeof(CBNode) - 4) };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBData` instance.
+ ASMJIT_INLINE CBData(CodeBuilder* cb, void* data, uint32_t size) noexcept : CBNode(cb, kNodeData) {
+ if (size <= kInlineBufferSize) {
+ if (data) ::memcpy(_buf, data, size);
+ }
+ else {
+ _externalPtr = static_cast(data);
+ }
+ _size = size;
+ }
+
+ //! Destroy the `CBData` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBData() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get size of the data.
+ uint32_t getSize() const noexcept { return _size; }
+ //! Get pointer to the data.
+ uint8_t* getData() const noexcept { return _size <= kInlineBufferSize ? const_cast(_buf) : _externalPtr; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ union {
+ struct {
+ uint8_t _buf[kInlineBufferSize]; //!< Embedded data buffer.
+ uint32_t _size; //!< Size of the data.
+ };
+ struct {
+ uint8_t* _externalPtr; //!< Pointer to external data.
+ };
+ };
+};
+
+// ============================================================================
+// [asmjit::CBAlign]
+// ============================================================================
+
+//! Align directive (CodeBuilder).
+//!
+//! Wraps `.align` directive.
+class CBAlign : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBAlign)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBAlign` instance.
+ ASMJIT_INLINE CBAlign(CodeBuilder* cb, uint32_t mode, uint32_t alignment) noexcept
+ : CBNode(cb, kNodeAlign),
+ _mode(mode),
+ _alignment(alignment) {}
+ //! Destroy the `CBAlign` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBAlign() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get align mode.
+ ASMJIT_INLINE uint32_t getMode() const noexcept { return _mode; }
+ //! Set align mode.
+ ASMJIT_INLINE void setMode(uint32_t mode) noexcept { _mode = mode; }
+
+ //! Get align offset in bytes.
+ ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
+ //! Set align offset in bytes to `offset`.
+ ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint32_t _mode; //!< Align mode, see \ref AlignMode.
+ uint32_t _alignment; //!< Alignment (in bytes).
+};
+
+// ============================================================================
+// [asmjit::CBLabel]
+// ============================================================================
+
+//! Label (CodeBuilder).
+class CBLabel : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBLabel)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBLabel` instance.
+ ASMJIT_INLINE CBLabel(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
+ : CBNode(cb, kNodeLabel),
+ _id(id),
+ _numRefs(0),
+ _from(nullptr) {}
+ //! Destroy the `CBLabel` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBLabel() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get the label id.
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
+ //! Get the label as `Label` operand.
+ ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); }
+
+ //! Get first jmp instruction.
+ ASMJIT_INLINE CBJump* getFrom() const noexcept { return _from; }
+
+ //! Get number of jumps to this target.
+ ASMJIT_INLINE uint32_t getNumRefs() const noexcept { return _numRefs; }
+ //! Set number of jumps to this target.
+ ASMJIT_INLINE void setNumRefs(uint32_t i) noexcept { _numRefs = i; }
+
+ //! Add number of jumps to this target.
+ ASMJIT_INLINE void addNumRefs(uint32_t i = 1) noexcept { _numRefs += i; }
+ //! Subtract number of jumps to this target.
+ ASMJIT_INLINE void subNumRefs(uint32_t i = 1) noexcept { _numRefs -= i; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint32_t _id; //!< Label id.
+ uint32_t _numRefs; //!< Count of jumps here.
+ CBJump* _from; //!< Linked-list of nodes that can jump here.
+};
+
+// ============================================================================
+// [asmjit::CBLabelData]
+// ============================================================================
+
+class CBLabelData : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBLabelData)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBLabelData` instance.
+ ASMJIT_INLINE CBLabelData(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
+ : CBNode(cb, kNodeLabelData),
+ _id(id) {}
+
+ //! Destroy the `CBLabelData` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBLabelData() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Interface]
+ // --------------------------------------------------------------------------
+
+ //! Get the label id.
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
+ //! Get the label as `Label` operand.
+ ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint32_t _id;
+};
+
+// ============================================================================
+// [asmjit::CBConstPool]
+// ============================================================================
+
+class CBConstPool : public CBLabel {
+public:
+ ASMJIT_NONCOPYABLE(CBConstPool)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBConstPool` instance.
+ ASMJIT_INLINE CBConstPool(CodeBuilder* cb, uint32_t id = kInvalidValue) noexcept
+ : CBLabel(cb, id),
+ _constPool(&cb->_cbBaseZone) { _type = kNodeConstPool; }
+
+ //! Destroy the `CBConstPool` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBConstPool() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Interface]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE ConstPool& getConstPool() noexcept { return _constPool; }
+ ASMJIT_INLINE const ConstPool& getConstPool() const noexcept { return _constPool; }
+
+ //! Get whether the constant-pool is empty.
+ ASMJIT_INLINE bool isEmpty() const noexcept { return _constPool.isEmpty(); }
+ //! Get the size of the constant-pool in bytes.
+ ASMJIT_INLINE size_t getSize() const noexcept { return _constPool.getSize(); }
+ //! Get minimum alignment.
+ ASMJIT_INLINE size_t getAlignment() const noexcept { return _constPool.getAlignment(); }
+
+ //! See \ref ConstPool::add().
+ ASMJIT_INLINE Error add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ return _constPool.add(data, size, dstOffset);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ ConstPool _constPool;
+};
+
+// ============================================================================
+// [asmjit::CBComment]
+// ============================================================================
+
+//! Comment (CodeBuilder).
+class CBComment : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBComment)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBComment` instance.
+ ASMJIT_INLINE CBComment(CodeBuilder* cb, const char* comment) noexcept : CBNode(cb, kNodeComment) {
+ orFlags(kFlagIsRemovable | kFlagIsInformative);
+ _inlineComment = comment;
+ }
+
+ //! Destroy the `CBComment` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBComment() noexcept {}
+};
+
+// ============================================================================
+// [asmjit::CBSentinel]
+// ============================================================================
+
+//! Sentinel (CodeBuilder).
+//!
+//! Sentinel is a marker that is completely ignored by the code builder. It's
+//! used to remember a position in a code as it never gets removed by any pass.
+class CBSentinel : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CBSentinel)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CBSentinel` instance.
+ ASMJIT_INLINE CBSentinel(CodeBuilder* cb) noexcept : CBNode(cb, kNodeSentinel) {}
+ //! Destroy the `CBSentinel` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CBSentinel() noexcept {}
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // !ASMJIT_DISABLE_BUILDER
+#endif // _ASMJIT_BASE_CODEBUILDER_H
diff --git a/libraries/asmjit/asmjit/base/codecompiler.cpp b/libraries/asmjit/asmjit/base/codecompiler.cpp
new file mode 100644
index 00000000000..582e94a9056
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codecompiler.cpp
@@ -0,0 +1,573 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Guard]
+#include "../asmjit_build.h"
+#if !defined(ASMJIT_DISABLE_COMPILER)
+
+// [Dependencies]
+#include "../base/assembler.h"
+#include "../base/codecompiler.h"
+#include "../base/cpuinfo.h"
+#include "../base/logging.h"
+#include "../base/regalloc_p.h"
+#include "../base/utils.h"
+#include
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [Constants]
+// ============================================================================
+
+static const char noName[1] = { '\0' };
+
+// ============================================================================
+// [asmjit::CCFuncCall - Arg / Ret]
+// ============================================================================
+
+bool CCFuncCall::_setArg(uint32_t i, const Operand_& op) noexcept {
+ if ((i & ~kFuncArgHi) >= _funcDetail.getArgCount())
+ return false;
+
+ _args[i] = op;
+ return true;
+}
+
+bool CCFuncCall::_setRet(uint32_t i, const Operand_& op) noexcept {
+ if (i >= 2)
+ return false;
+
+ _ret[i] = op;
+ return true;
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Construction / Destruction]
+// ============================================================================
+
+CodeCompiler::CodeCompiler() noexcept
+ : CodeBuilder(),
+ _func(nullptr),
+ _vRegZone(4096 - Zone::kZoneOverhead),
+ _vRegArray(),
+ _localConstPool(nullptr),
+ _globalConstPool(nullptr) {
+
+ _type = kTypeCompiler;
+}
+CodeCompiler::~CodeCompiler() noexcept {}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Events]
+// ============================================================================
+
+Error CodeCompiler::onAttach(CodeHolder* code) noexcept {
+ return Base::onAttach(code);
+}
+
+Error CodeCompiler::onDetach(CodeHolder* code) noexcept {
+ _func = nullptr;
+
+ _localConstPool = nullptr;
+ _globalConstPool = nullptr;
+
+ _vRegArray.reset();
+ _vRegZone.reset(false);
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Node-Factory]
+// ============================================================================
+
+CCHint* CodeCompiler::newHintNode(Reg& r, uint32_t hint, uint32_t value) noexcept {
+ if (!r.isVirtReg()) return nullptr;
+
+ VirtReg* vr = getVirtReg(r);
+ return newNodeT(vr, hint, value);
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Func]
+// ============================================================================
+
+CCFunc* CodeCompiler::newFunc(const FuncSignature& sign) noexcept {
+ Error err;
+
+ CCFunc* func = newNodeT();
+ if (!func) goto _NoMemory;
+
+ err = registerLabelNode(func);
+ if (ASMJIT_UNLIKELY(err)) {
+ // TODO: Calls setLastError, maybe rethink noexcept?
+ setLastError(err);
+ return nullptr;
+ }
+
+ // Create helper nodes.
+ func->_exitNode = newLabelNode();
+ func->_end = newNodeT();
+
+ if (!func->_exitNode || !func->_end)
+ goto _NoMemory;
+
+ // Function prototype.
+ err = func->getDetail().init(sign);
+ if (err != kErrorOk) {
+ setLastError(err);
+ return nullptr;
+ }
+
+ // If the CodeInfo guarantees higher alignment honor it.
+ if (_codeInfo.getStackAlignment() > func->_funcDetail._callConv.getNaturalStackAlignment())
+ func->_funcDetail._callConv.setNaturalStackAlignment(_codeInfo.getStackAlignment());
+
+ // Allocate space for function arguments.
+ func->_args = nullptr;
+ if (func->getArgCount() != 0) {
+ func->_args = _cbHeap.allocT(func->getArgCount() * sizeof(VirtReg*));
+ if (!func->_args) goto _NoMemory;
+
+ ::memset(func->_args, 0, func->getArgCount() * sizeof(VirtReg*));
+ }
+
+ return func;
+
+_NoMemory:
+ setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ return nullptr;
+}
+
+CCFunc* CodeCompiler::addFunc(CCFunc* func) {
+ ASMJIT_ASSERT(_func == nullptr);
+ _func = func;
+
+ addNode(func); // Function node.
+ CBNode* cursor = getCursor(); // {CURSOR}.
+ addNode(func->getExitNode()); // Function exit label.
+ addNode(func->getEnd()); // Function end marker.
+
+ _setCursor(cursor);
+ return func;
+}
+
+CCFunc* CodeCompiler::addFunc(const FuncSignature& sign) {
+ CCFunc* func = newFunc(sign);
+
+ if (!func) {
+ setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ return nullptr;
+ }
+
+ return addFunc(func);
+}
+
+CBSentinel* CodeCompiler::endFunc() {
+ CCFunc* func = getFunc();
+ if (!func) {
+ // TODO:
+ return nullptr;
+ }
+
+ // Add the local constant pool at the end of the function (if exists).
+ if (_localConstPool) {
+ setCursor(func->getEnd()->getPrev());
+ addNode(_localConstPool);
+ _localConstPool = nullptr;
+ }
+
+ // Mark as finished.
+ func->_isFinished = true;
+ _func = nullptr;
+
+ CBSentinel* end = func->getEnd();
+ setCursor(end);
+ return end;
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Ret]
+// ============================================================================
+
+CCFuncRet* CodeCompiler::newRet(const Operand_& o0, const Operand_& o1) noexcept {
+ CCFuncRet* node = newNodeT(o0, o1);
+ if (!node) {
+ setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ return nullptr;
+ }
+ return node;
+}
+
+CCFuncRet* CodeCompiler::addRet(const Operand_& o0, const Operand_& o1) noexcept {
+ CCFuncRet* node = newRet(o0, o1);
+ if (!node) return nullptr;
+ return static_cast(addNode(node));
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Call]
+// ============================================================================
+
+CCFuncCall* CodeCompiler::newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
+ Error err;
+ uint32_t nArgs;
+
+ CCFuncCall* node = _cbHeap.allocT(sizeof(CCFuncCall) + sizeof(Operand));
+ Operand* opArray = reinterpret_cast(reinterpret_cast(node) + sizeof(CCFuncCall));
+
+ if (ASMJIT_UNLIKELY(!node))
+ goto _NoMemory;
+
+ opArray[0].copyFrom(o0);
+ new (node) CCFuncCall(this, instId, 0, opArray, 1);
+
+ if ((err = node->getDetail().init(sign)) != kErrorOk) {
+ setLastError(err);
+ return nullptr;
+ }
+
+ // If there are no arguments skip the allocation.
+ if ((nArgs = sign.getArgCount()) == 0)
+ return node;
+
+ node->_args = static_cast(_cbHeap.alloc(nArgs * sizeof(Operand)));
+ if (!node->_args) goto _NoMemory;
+
+ ::memset(node->_args, 0, nArgs * sizeof(Operand));
+ return node;
+
+_NoMemory:
+ setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ return nullptr;
+}
+
+CCFuncCall* CodeCompiler::addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
+ CCFuncCall* node = newCall(instId, o0, sign);
+ if (!node) return nullptr;
+ return static_cast(addNode(node));
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Vars]
+// ============================================================================
+
+Error CodeCompiler::setArg(uint32_t argIndex, const Reg& r) {
+ CCFunc* func = getFunc();
+
+ if (!func)
+ return setLastError(DebugUtils::errored(kErrorInvalidState));
+
+ if (!isVirtRegValid(r))
+ return setLastError(DebugUtils::errored(kErrorInvalidVirtId));
+
+ VirtReg* vr = getVirtReg(r);
+ func->setArg(argIndex, vr);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Hint]
+// ============================================================================
+
+Error CodeCompiler::_hint(Reg& r, uint32_t hint, uint32_t value) {
+ if (!r.isVirtReg()) return kErrorOk;
+
+ CCHint* node = newHintNode(r, hint, value);
+ if (!node) return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeCompiler - Vars]
+// ============================================================================
+
+VirtReg* CodeCompiler::newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept {
+ size_t index = _vRegArray.getLength();
+ if (ASMJIT_UNLIKELY(index > Operand::kPackedIdCount))
+ return nullptr;
+
+ VirtReg* vreg;
+ if (_vRegArray.willGrow(&_cbHeap, 1) != kErrorOk || !(vreg = _vRegZone.allocZeroedT()))
+ return nullptr;
+
+ vreg->_id = Operand::packId(static_cast(index));
+ vreg->_regInfo._signature = signature;
+ vreg->_name = noName;
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (name && name[0] != '\0')
+ vreg->_name = static_cast(_cbDataZone.dup(name, ::strlen(name), true));
+#endif // !ASMJIT_DISABLE_LOGGING
+
+ vreg->_size = TypeId::sizeOf(typeId);
+ vreg->_typeId = typeId;
+ vreg->_alignment = static_cast(std::min(vreg->_size, 64));
+ vreg->_priority = 10;
+
+ // The following are only used by `RAPass`.
+ vreg->_raId = kInvalidValue;
+ vreg->_state = VirtReg::kStateNone;
+ vreg->_physId = Globals::kInvalidRegId;
+
+ _vRegArray.appendUnsafe(vreg);
+ return vreg;
+}
+
+Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* name) {
+ RegInfo regInfo;
+
+ Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo);
+ if (ASMJIT_UNLIKELY(err)) return setLastError(err);
+
+ VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name);
+ if (ASMJIT_UNLIKELY(!vReg)) {
+ out.reset();
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ }
+
+ out._initReg(regInfo.getSignature(), vReg->getId());
+ return kErrorOk;
+}
+
+Error CodeCompiler::_newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap) {
+ StringBuilderTmp<256> sb;
+ sb.appendFormatVA(nameFmt, ap);
+ return _newReg(out, typeId, sb.getData());
+}
+
+Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* name) {
+ RegInfo regInfo;
+ uint32_t typeId;
+
+ if (isVirtRegValid(ref)) {
+ VirtReg* vRef = getVirtReg(ref);
+ typeId = vRef->getTypeId();
+
+ // NOTE: It's possible to cast one register type to another if it's the
+ // same register kind. However, VirtReg always contains the TypeId that
+ // was used to create the register. This means that in some cases we may
+ // end up having different size of `ref` and `vRef`. In such case we
+ // adjust the TypeId to match the `ref` register type instead of the
+ // original register type, which should be the expected behavior.
+ uint32_t typeSize = TypeId::sizeOf(typeId);
+ uint32_t refSize = ref.getSize();
+
+ if (typeSize != refSize) {
+ if (TypeId::isInt(typeId)) {
+ // GP register - change TypeId to match `ref`, but keep sign of `vRef`.
+ switch (refSize) {
+ case 1: typeId = TypeId::kI8 | (typeId & 1); break;
+ case 2: typeId = TypeId::kI16 | (typeId & 1); break;
+ case 4: typeId = TypeId::kI32 | (typeId & 1); break;
+ case 8: typeId = TypeId::kI64 | (typeId & 1); break;
+ default: typeId = TypeId::kVoid; break;
+ }
+ }
+ else if (TypeId::isMmx(typeId)) {
+ // MMX register - always use 64-bit.
+ typeId = TypeId::kMmx64;
+ }
+ else if (TypeId::isMask(typeId)) {
+ // Mask register - change TypeId to match `ref` size.
+ switch (refSize) {
+ case 1: typeId = TypeId::kMask8; break;
+ case 2: typeId = TypeId::kMask16; break;
+ case 4: typeId = TypeId::kMask32; break;
+ case 8: typeId = TypeId::kMask64; break;
+ default: typeId = TypeId::kVoid; break;
+ }
+ }
+ else {
+ // VEC register - change TypeId to match `ref` size, keep vector metadata.
+ uint32_t elementTypeId = TypeId::elementOf(typeId);
+
+ switch (refSize) {
+ case 16: typeId = TypeId::_kVec128Start + (elementTypeId - TypeId::kI8); break;
+ case 32: typeId = TypeId::_kVec256Start + (elementTypeId - TypeId::kI8); break;
+ case 64: typeId = TypeId::_kVec512Start + (elementTypeId - TypeId::kI8); break;
+ default: typeId = TypeId::kVoid; break;
+ }
+ }
+
+ if (typeId == TypeId::kVoid)
+ return setLastError(DebugUtils::errored(kErrorInvalidState));
+ }
+ }
+ else {
+ typeId = ref.getType();
+ }
+
+ Error err = ArchUtils::typeIdToRegInfo(getArchType(), typeId, regInfo);
+ if (ASMJIT_UNLIKELY(err)) return setLastError(err);
+
+ VirtReg* vReg = newVirtReg(typeId, regInfo.getSignature(), name);
+ if (ASMJIT_UNLIKELY(!vReg)) {
+ out.reset();
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ }
+
+ out._initReg(regInfo.getSignature(), vReg->getId());
+ return kErrorOk;
+}
+
+Error CodeCompiler::_newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap) {
+ StringBuilderTmp<256> sb;
+ sb.appendFormatVA(nameFmt, ap);
+ return _newReg(out, ref, sb.getData());
+}
+
+Error CodeCompiler::_newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name) {
+ if (size == 0)
+ return setLastError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment == 0) alignment = 1;
+ if (!Utils::isPowerOf2(alignment))
+ return setLastError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment > 64) alignment = 64;
+
+ VirtReg* vReg = newVirtReg(0, 0, name);
+ if (ASMJIT_UNLIKELY(!vReg)) {
+ out.reset();
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+ }
+
+ vReg->_size = size;
+ vReg->_isStack = true;
+ vReg->_alignment = static_cast(alignment);
+
+ // Set the memory operand to GPD/GPQ and its id to VirtReg.
+ out = Mem(Init, _nativeGpReg.getType(), vReg->getId(), Reg::kRegNone, kInvalidValue, 0, 0, Mem::kSignatureMemRegHomeFlag);
+ return kErrorOk;
+}
+
+Error CodeCompiler::_newConst(Mem& out, uint32_t scope, const void* data, size_t size) {
+ CBConstPool** pPool;
+ if (scope == kConstScopeLocal)
+ pPool = &_localConstPool;
+ else if (scope == kConstScopeGlobal)
+ pPool = &_globalConstPool;
+ else
+ return setLastError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (!*pPool && !(*pPool = newConstPool()))
+ return setLastError(DebugUtils::errored(kErrorNoHeapMemory));
+
+ CBConstPool* pool = *pPool;
+ size_t off;
+
+ Error err = pool->add(data, size, off);
+ if (ASMJIT_UNLIKELY(err)) return setLastError(err);
+
+ out = Mem(Init,
+ Label::kLabelTag, // Base type.
+ pool->getId(), // Base id.
+ 0, // Index type.
+ kInvalidValue, // Index id.
+ static_cast(off), // Offset.
+ static_cast(size), // Size.
+ 0); // Flags.
+ return kErrorOk;
+}
+
+Error CodeCompiler::alloc(Reg& reg) {
+ if (!reg.isVirtReg()) return kErrorOk;
+ return _hint(reg, CCHint::kHintAlloc, kInvalidValue);
+}
+
+Error CodeCompiler::alloc(Reg& reg, uint32_t physId) {
+ if (!reg.isVirtReg()) return kErrorOk;
+ return _hint(reg, CCHint::kHintAlloc, physId);
+}
+
+Error CodeCompiler::alloc(Reg& reg, const Reg& physReg) {
+ if (!reg.isVirtReg()) return kErrorOk;
+ return _hint(reg, CCHint::kHintAlloc, physReg.getId());
+}
+
+Error CodeCompiler::save(Reg& reg) {
+ if (!reg.isVirtReg()) return kErrorOk;
+ return _hint(reg, CCHint::kHintSave, kInvalidValue);
+}
+
+Error CodeCompiler::spill(Reg& reg) {
+ if (!reg.isVirtReg()) return kErrorOk;
+ return _hint(reg, CCHint::kHintSpill, kInvalidValue);
+}
+
+Error CodeCompiler::unuse(Reg& reg) {
+ if (!reg.isVirtReg()) return kErrorOk;
+ return _hint(reg, CCHint::kHintUnuse, kInvalidValue);
+}
+
+uint32_t CodeCompiler::getPriority(Reg& reg) const {
+ if (!reg.isVirtReg()) return 0;
+ return getVirtRegById(reg.getId())->getPriority();
+}
+
+void CodeCompiler::setPriority(Reg& reg, uint32_t priority) {
+ if (!reg.isVirtReg()) return;
+ if (priority > 255) priority = 255;
+
+ VirtReg* vreg = getVirtRegById(reg.getId());
+ if (vreg) vreg->_priority = static_cast(priority);
+}
+
+bool CodeCompiler::getSaveOnUnuse(Reg& reg) const {
+ if (!reg.isVirtReg()) return false;
+
+ VirtReg* vreg = getVirtRegById(reg.getId());
+ return static_cast(vreg->_saveOnUnuse);
+}
+
+void CodeCompiler::setSaveOnUnuse(Reg& reg, bool value) {
+ if (!reg.isVirtReg()) return;
+
+ VirtReg* vreg = getVirtRegById(reg.getId());
+ if (!vreg) return;
+
+ vreg->_saveOnUnuse = value;
+}
+
+void CodeCompiler::rename(Reg& reg, const char* fmt, ...) {
+ if (!reg.isVirtReg()) return;
+
+ VirtReg* vreg = getVirtRegById(reg.getId());
+ if (!vreg) return;
+
+ vreg->_name = noName;
+ if (fmt && fmt[0] != '\0') {
+ char buf[64];
+
+ va_list ap;
+ va_start(ap, fmt);
+
+ vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
+ buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
+
+ vreg->_name = static_cast(_cbDataZone.dup(buf, ::strlen(buf), true));
+ va_end(ap);
+ }
+}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // !ASMJIT_DISABLE_COMPILER
diff --git a/libraries/asmjit/asmjit/base/codecompiler.h b/libraries/asmjit/asmjit/base/codecompiler.h
new file mode 100644
index 00000000000..44b9644b71b
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codecompiler.h
@@ -0,0 +1,738 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_CODECOMPILER_H
+#define _ASMJIT_BASE_CODECOMPILER_H
+
+#include "../asmjit_build.h"
+#if !defined(ASMJIT_DISABLE_COMPILER)
+
+// [Dependencies]
+#include "../base/assembler.h"
+#include "../base/codebuilder.h"
+#include "../base/constpool.h"
+#include "../base/func.h"
+#include "../base/operand.h"
+#include "../base/utils.h"
+#include "../base/zone.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+struct VirtReg;
+struct TiedReg;
+struct RAState;
+struct RACell;
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [asmjit::ConstScope]
+// ============================================================================
+
+//! Scope of the constant.
+ASMJIT_ENUM(ConstScope) {
+ //! Local constant, always embedded right after the current function.
+ kConstScopeLocal = 0,
+ //! Global constant, embedded at the end of the currently compiled code.
+ kConstScopeGlobal = 1
+};
+
+// ============================================================================
+// [asmjit::VirtReg]
+// ============================================================================
+
+//! Virtual register data (CodeCompiler).
+struct VirtReg {
+ //! A state of a virtual register (used during register allocation).
+ ASMJIT_ENUM(State) {
+ kStateNone = 0, //!< Not allocated, not used.
+ kStateReg = 1, //!< Allocated in register.
+ kStateMem = 2 //!< Allocated in memory or spilled.
+ };
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get the virtual-register id.
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
+ //! Get virtual-register's name.
+ ASMJIT_INLINE const char* getName() const noexcept { return _name; }
+
+ //! Get a physical register type.
+ ASMJIT_INLINE uint32_t getType() const noexcept { return _regInfo.getType(); }
+ //! Get a physical register kind.
+ ASMJIT_INLINE uint32_t getKind() const noexcept { return _regInfo.getKind(); }
+ //! Get a physical register size.
+ ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regInfo.getSize(); }
+ //! Get a register signature of this virtual register.
+ ASMJIT_INLINE uint32_t getSignature() const noexcept { return _regInfo.getSignature(); }
+
+ //! Get a register's type-id, see \ref TypeId.
+ ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; }
+
+ //! Get virtual-register's size.
+ ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
+ //! Get virtual-register's alignment.
+ ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
+
+ //! Get the virtual-register priority, used by compiler to decide which variable to spill.
+ ASMJIT_INLINE uint32_t getPriority() const noexcept { return _priority; }
+ //! Set the virtual-register priority.
+ ASMJIT_INLINE void setPriority(uint32_t priority) noexcept {
+ ASMJIT_ASSERT(priority <= 0xFF);
+ _priority = static_cast(priority);
+ }
+
+ //! Get variable state, only used by `RAPass`.
+ ASMJIT_INLINE uint32_t getState() const noexcept { return _state; }
+ //! Set variable state, only used by `RAPass`.
+ ASMJIT_INLINE void setState(uint32_t state) {
+ ASMJIT_ASSERT(state <= 0xFF);
+ _state = static_cast(state);
+ }
+
+ //! Get register index.
+ ASMJIT_INLINE uint32_t getPhysId() const noexcept { return _physId; }
+ //! Set register index.
+ ASMJIT_INLINE void setPhysId(uint32_t physId) {
+ ASMJIT_ASSERT(physId <= Globals::kInvalidRegId);
+ _physId = static_cast(physId);
+ }
+ //! Reset register index.
+ ASMJIT_INLINE void resetPhysId() {
+ _physId = static_cast(Globals::kInvalidRegId);
+ }
+
+ //! Get home registers mask.
+ ASMJIT_INLINE uint32_t getHomeMask() const { return _homeMask; }
+ //! Add a home register index to the home registers mask.
+ ASMJIT_INLINE void addHomeId(uint32_t physId) { _homeMask |= Utils::mask(physId); }
+
+ ASMJIT_INLINE bool isFixed() const noexcept { return static_cast(_isFixed); }
+
+ //! Get whether the VirtReg is only memory allocated on the stack.
+ ASMJIT_INLINE bool isStack() const noexcept { return static_cast(_isStack); }
+
+ //! Get whether to save variable when it's unused (spill).
+ ASMJIT_INLINE bool saveOnUnuse() const noexcept { return static_cast(_saveOnUnuse); }
+
+ //! Get whether the variable was changed.
+ ASMJIT_INLINE bool isModified() const noexcept { return static_cast(_modified); }
+ //! Set whether the variable was changed.
+ ASMJIT_INLINE void setModified(bool modified) noexcept { _modified = modified; }
+
+ //! Get home memory offset.
+ ASMJIT_INLINE int32_t getMemOffset() const noexcept { return _memOffset; }
+ //! Set home memory offset.
+ ASMJIT_INLINE void setMemOffset(int32_t offset) noexcept { _memOffset = offset; }
+
+ //! Get home memory cell.
+ ASMJIT_INLINE RACell* getMemCell() const noexcept { return _memCell; }
+ //! Set home memory cell.
+ ASMJIT_INLINE void setMemCell(RACell* cell) noexcept { _memCell = cell; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint32_t _id; //!< Virtual register id.
+ RegInfo _regInfo; //!< Physical register info & signature.
+ const char* _name; //!< Virtual name (user provided).
+ uint32_t _size; //!< Virtual size (can be smaller than `regInfo._size`).
+ uint8_t _typeId; //!< Type-id.
+ uint8_t _alignment; //!< Register's natural alignment (for spilling).
+ uint8_t _priority; //!< Allocation priority (hint for RAPass that can be ignored).
+ uint8_t _isFixed : 1; //!< True if this is a fixed register, never reallocated.
+ uint8_t _isStack : 1; //!< True if the virtual register is only used as a stack.
+ uint8_t _isMaterialized : 1; //!< Register is constant that is easily created by a single instruction.
+ uint8_t _saveOnUnuse : 1; //!< Save on unuse (at end of the variable scope).
+
+ // -------------------------------------------------------------------------
+ // The following members are used exclusively by RAPass. They are initialized
+ // when the VirtReg is created and then changed during RAPass.
+ // -------------------------------------------------------------------------
+
+ uint32_t _raId; //!< Register allocator work-id (used by RAPass).
+ int32_t _memOffset; //!< Home memory offset.
+ uint32_t _homeMask; //!< Mask of all registers variable has been allocated to.
+
+ uint8_t _state; //!< Variable state (connected with actual `RAState)`.
+ uint8_t _physId; //!< Actual register index (only used by `RAPass)`, during translate.
+ uint8_t _modified; //!< Whether variable was changed (connected with actual `RAState)`.
+
+ RACell* _memCell; //!< Home memory cell, used by `RAPass` (initially nullptr).
+
+ //! Temporary link to TiedReg* used by the `RAPass` used in
+ //! various phases, but always set back to nullptr when finished.
+ //!
+ //! This temporary data is designed to be used by algorithms that need to
+ //! store some data into variables themselves during compilation. But it's
+ //! expected that after variable is compiled & translated the data is set
+ //! back to zero/null. Initial value is nullptr.
+ TiedReg* _tied;
+};
+
+// ============================================================================
+// [asmjit::CCHint]
+// ============================================================================
+
+//! Hint for register allocator (CodeCompiler).
+class CCHint : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CCHint)
+
+ //! Hint type.
+ ASMJIT_ENUM(Hint) {
+ //! Alloc to physical reg.
+ kHintAlloc = 0,
+ //! Spill to memory.
+ kHintSpill = 1,
+ //! Save if modified.
+ kHintSave = 2,
+ //! Save if modified and mark it as unused.
+ kHintSaveAndUnuse = 3,
+ //! Mark as unused.
+ kHintUnuse = 4
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCHint` instance.
+ ASMJIT_INLINE CCHint(CodeBuilder* cb, VirtReg* vreg, uint32_t hint, uint32_t value) noexcept : CBNode(cb, kNodeHint) {
+ orFlags(kFlagIsRemovable | kFlagIsInformative);
+ _vreg = vreg;
+ _hint = hint;
+ _value = value;
+ }
+
+ //! Destroy the `CCHint` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CCHint() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get variable.
+ ASMJIT_INLINE VirtReg* getVReg() const noexcept { return _vreg; }
+
+ //! Get hint it, see \ref Hint.
+ ASMJIT_INLINE uint32_t getHint() const noexcept { return _hint; }
+ //! Set hint it, see \ref Hint.
+ ASMJIT_INLINE void setHint(uint32_t hint) noexcept { _hint = hint; }
+
+ //! Get hint value.
+ ASMJIT_INLINE uint32_t getValue() const noexcept { return _value; }
+ //! Set hint value.
+ ASMJIT_INLINE void setValue(uint32_t value) noexcept { _value = value; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ //! Variable.
+ VirtReg* _vreg;
+ //! Hint id.
+ uint32_t _hint;
+ //! Value.
+ uint32_t _value;
+};
+
+// ============================================================================
+// [asmjit::CCFunc]
+// ============================================================================
+
+//! Function entry (CodeCompiler).
+class CCFunc : public CBLabel {
+public:
+ ASMJIT_NONCOPYABLE(CCFunc)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCFunc` instance.
+ //!
+ //! Always use `CodeCompiler::addFunc()` to create \ref CCFunc.
+ ASMJIT_INLINE CCFunc(CodeBuilder* cb) noexcept
+ : CBLabel(cb),
+ _funcDetail(),
+ _frameInfo(),
+ _exitNode(nullptr),
+ _end(nullptr),
+ _args(nullptr),
+ _isFinished(false) {
+
+ _type = kNodeFunc;
+ }
+
+ //! Destroy the `CCFunc` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CCFunc() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get function exit `CBLabel`.
+ ASMJIT_INLINE CBLabel* getExitNode() const noexcept { return _exitNode; }
+ //! Get function exit label.
+ ASMJIT_INLINE Label getExitLabel() const noexcept { return _exitNode->getLabel(); }
+
+ //! Get "End of Func" sentinel.
+ ASMJIT_INLINE CBSentinel* getEnd() const noexcept { return _end; }
+
+ //! Get function declaration.
+ ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; }
+ //! Get function declaration.
+ ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; }
+
+ //! Get function declaration.
+ ASMJIT_INLINE FuncFrameInfo& getFrameInfo() noexcept { return _frameInfo; }
+ //! Get function declaration.
+ ASMJIT_INLINE const FuncFrameInfo& getFrameInfo() const noexcept { return _frameInfo; }
+
+ //! Get arguments count.
+ ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _funcDetail.getArgCount(); }
+ //! Get returns count.
+ ASMJIT_INLINE uint32_t getRetCount() const noexcept { return _funcDetail.getRetCount(); }
+
+ //! Get arguments list.
+ ASMJIT_INLINE VirtReg** getArgs() const noexcept { return _args; }
+
+ //! Get argument at `i`.
+ ASMJIT_INLINE VirtReg* getArg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < getArgCount());
+ return _args[i];
+ }
+
+ //! Set argument at `i`.
+ ASMJIT_INLINE void setArg(uint32_t i, VirtReg* vreg) noexcept {
+ ASMJIT_ASSERT(i < getArgCount());
+ _args[i] = vreg;
+ }
+
+ //! Reset argument at `i`.
+ ASMJIT_INLINE void resetArg(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < getArgCount());
+ _args[i] = nullptr;
+ }
+
+ ASMJIT_INLINE uint32_t getAttributes() const noexcept { return _frameInfo.getAttributes(); }
+ ASMJIT_INLINE void addAttributes(uint32_t attrs) noexcept { _frameInfo.addAttributes(attrs); }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ FuncDetail _funcDetail; //!< Function detail.
+ FuncFrameInfo _frameInfo; //!< Function frame information.
+
+ CBLabel* _exitNode; //!< Function exit.
+ CBSentinel* _end; //!< Function end.
+
+ VirtReg** _args; //!< Arguments array as `VirtReg`.
+
+ //! Function was finished by `Compiler::endFunc()`.
+ uint8_t _isFinished;
+};
+
+// ============================================================================
+// [asmjit::CCFuncRet]
+// ============================================================================
+
+//! Function return (CodeCompiler).
+class CCFuncRet : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CCFuncRet)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCFuncRet` instance.
+ ASMJIT_INLINE CCFuncRet(CodeBuilder* cb, const Operand_& o0, const Operand_& o1) noexcept : CBNode(cb, kNodeFuncExit) {
+ orFlags(kFlagIsRet);
+ _ret[0].copyFrom(o0);
+ _ret[1].copyFrom(o1);
+ }
+
+ //! Destroy the `CCFuncRet` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CCFuncRet() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get the first return operand.
+ ASMJIT_INLINE Operand& getFirst() noexcept { return static_cast(_ret[0]); }
+ //! \overload
+ ASMJIT_INLINE const Operand& getFirst() const noexcept { return static_cast(_ret[0]); }
+
+ //! Get the second return operand.
+ ASMJIT_INLINE Operand& getSecond() noexcept { return static_cast(_ret[1]); }
+ //! \overload
+ ASMJIT_INLINE const Operand& getSecond() const noexcept { return static_cast(_ret[1]); }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ //! Return operands.
+ Operand_ _ret[2];
+};
+
+// ============================================================================
+// [asmjit::CCFuncCall]
+// ============================================================================
+
+//! Function call (CodeCompiler).
+class CCFuncCall : public CBInst {
+public:
+ ASMJIT_NONCOPYABLE(CCFuncCall)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCFuncCall` instance.
+ ASMJIT_INLINE CCFuncCall(CodeBuilder* cb, uint32_t instId, uint32_t options, Operand* opArray, uint32_t opCount) noexcept
+ : CBInst(cb, instId, options, opArray, opCount),
+ _funcDetail(),
+ _args(nullptr) {
+
+ _type = kNodeFuncCall;
+ _ret[0].reset();
+ _ret[1].reset();
+ orFlags(kFlagIsRemovable);
+ }
+
+ //! Destroy the `CCFuncCall` instance (NEVER CALLED).
+ ASMJIT_INLINE ~CCFuncCall() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Signature]
+ // --------------------------------------------------------------------------
+
+ //! Set function signature.
+ ASMJIT_INLINE Error setSignature(const FuncSignature& sign) noexcept {
+ return _funcDetail.init(sign);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get function declaration.
+ ASMJIT_INLINE FuncDetail& getDetail() noexcept { return _funcDetail; }
+ //! Get function declaration.
+ ASMJIT_INLINE const FuncDetail& getDetail() const noexcept { return _funcDetail; }
+
+ //! Get target operand.
+ ASMJIT_INLINE Operand& getTarget() noexcept { return static_cast(_opArray[0]); }
+ //! \overload
+ ASMJIT_INLINE const Operand& getTarget() const noexcept { return static_cast(_opArray[0]); }
+
+ //! Get return at `i`.
+ ASMJIT_INLINE Operand& getRet(uint32_t i = 0) noexcept {
+ ASMJIT_ASSERT(i < 2);
+ return static_cast(_ret[i]);
+ }
+ //! \overload
+ ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const noexcept {
+ ASMJIT_ASSERT(i < 2);
+ return static_cast(_ret[i]);
+ }
+
+ //! Get argument at `i`.
+ ASMJIT_INLINE Operand& getArg(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < kFuncArgCountLoHi);
+ return static_cast(_args[i]);
+ }
+ //! \overload
+ ASMJIT_INLINE const Operand& getArg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < kFuncArgCountLoHi);
+ return static_cast(_args[i]);
+ }
+
+ //! Set argument at `i` to `op`.
+ ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept;
+ //! Set return at `i` to `op`.
+ ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept;
+
+ //! Set argument at `i` to `reg`.
+ ASMJIT_INLINE bool setArg(uint32_t i, const Reg& reg) noexcept { return _setArg(i, reg); }
+ //! Set argument at `i` to `imm`.
+ ASMJIT_INLINE bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); }
+
+ //! Set return at `i` to `var`.
+ ASMJIT_INLINE bool setRet(uint32_t i, const Reg& reg) noexcept { return _setRet(i, reg); }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ FuncDetail _funcDetail; //!< Function detail.
+ Operand_ _ret[2]; //!< Return.
+ Operand_* _args; //!< Arguments.
+};
+
+// ============================================================================
+// [asmjit::CCPushArg]
+// ============================================================================
+
+//! Push argument before a function call (CodeCompiler).
+class CCPushArg : public CBNode {
+public:
+ ASMJIT_NONCOPYABLE(CCPushArg)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCPushArg` instance.
+ ASMJIT_INLINE CCPushArg(CodeBuilder* cb, CCFuncCall* call, VirtReg* src, VirtReg* cvt) noexcept
+ : CBNode(cb, kNodePushArg),
+ _call(call),
+ _src(src),
+ _cvt(cvt),
+ _args(0) {
+ orFlags(kFlagIsRemovable);
+ }
+
+ //! Destroy the `CCPushArg` instance.
+ ASMJIT_INLINE ~CCPushArg() noexcept {}
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get the associated function-call.
+ ASMJIT_INLINE CCFuncCall* getCall() const noexcept { return _call; }
+ //! Get source variable.
+ ASMJIT_INLINE VirtReg* getSrcReg() const noexcept { return _src; }
+ //! Get conversion variable.
+ ASMJIT_INLINE VirtReg* getCvtReg() const noexcept { return _cvt; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CCFuncCall* _call; //!< Associated `CCFuncCall`.
+ VirtReg* _src; //!< Source variable.
+ VirtReg* _cvt; //!< Temporary variable used for conversion (or null).
+ uint32_t _args; //!< Affected arguments bit-array.
+};
+
+// ============================================================================
+// [asmjit::CodeCompiler]
+// ============================================================================
+
+//! Code emitter that uses virtual registers and performs register allocation.
+//!
+//! Compiler is a high-level code-generation tool that provides register
+//! allocation and automatic handling of function calling conventions. It was
+//! primarily designed for merging multiple parts of code into a function
+//! without worrying about registers and function calling conventions.
+//!
+//! CodeCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit
+//! code at the same time.
+//!
+//! CodeCompiler is based on CodeBuilder and contains all the features it
+//! provides. It means that the code it stores can be modified (removed, added,
+//! injected) and analyzed. When the code is finalized the compiler can emit
+//! the code into an Assembler to translate the abstract representation into a
+//! machine code.
+class ASMJIT_VIRTAPI CodeCompiler : public CodeBuilder {
+public:
+ ASMJIT_NONCOPYABLE(CodeCompiler)
+ typedef CodeBuilder Base;
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CodeCompiler` instance.
+ ASMJIT_API CodeCompiler() noexcept;
+ //! Destroy the `CodeCompiler` instance.
+ ASMJIT_API virtual ~CodeCompiler() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Events]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API virtual Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API virtual Error onDetach(CodeHolder* code) noexcept override;
+
+ // --------------------------------------------------------------------------
+ // [Node-Factory]
+ // --------------------------------------------------------------------------
+
+ //! \internal
+ //!
+ //! Create a new `CCHint`.
+ ASMJIT_API CCHint* newHintNode(Reg& reg, uint32_t hint, uint32_t value) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Func]
+ // --------------------------------------------------------------------------
+
+ //! Get the current function.
+ ASMJIT_INLINE CCFunc* getFunc() const noexcept { return _func; }
+
+ //! Create a new `CCFunc`.
+ ASMJIT_API CCFunc* newFunc(const FuncSignature& sign) noexcept;
+ //! Add a function `node` to the stream.
+ ASMJIT_API CCFunc* addFunc(CCFunc* func);
+ //! Add a new function.
+ ASMJIT_API CCFunc* addFunc(const FuncSignature& sign);
+ //! Emit a sentinel that marks the end of the current function.
+ ASMJIT_API CBSentinel* endFunc();
+
+ // --------------------------------------------------------------------------
+ // [Ret]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCFuncRet`.
+ ASMJIT_API CCFuncRet* newRet(const Operand_& o0, const Operand_& o1) noexcept;
+ //! Add a new `CCFuncRet`.
+ ASMJIT_API CCFuncRet* addRet(const Operand_& o0, const Operand_& o1) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Call]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `CCFuncCall`.
+ ASMJIT_API CCFuncCall* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
+ //! Add a new `CCFuncCall`.
+ ASMJIT_API CCFuncCall* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Args]
+ // --------------------------------------------------------------------------
+
+ //! Set a function argument at `argIndex` to `reg`.
+ ASMJIT_API Error setArg(uint32_t argIndex, const Reg& reg);
+
+ // --------------------------------------------------------------------------
+ // [Hint]
+ // --------------------------------------------------------------------------
+
+ //! Emit a new hint (purely informational node).
+ ASMJIT_API Error _hint(Reg& reg, uint32_t hint, uint32_t value);
+
+ // --------------------------------------------------------------------------
+ // [VirtReg / Stack]
+ // --------------------------------------------------------------------------
+
+ //! Create a new virtual register representing the given `vti` and `signature`.
+ //!
+ //! This function accepts either register type representing a machine-specific
+ //! register, like `X86Reg`, or RegTag representation, which represents
+ //! machine independent register, and from the machine-specific register
+ //! is deduced.
+ ASMJIT_API VirtReg* newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept;
+
+ ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* name);
+ ASMJIT_API Error _newReg(Reg& out, uint32_t typeId, const char* nameFmt, va_list ap);
+
+ ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* name);
+ ASMJIT_API Error _newReg(Reg& out, const Reg& ref, const char* nameFmt, va_list ap);
+
+ ASMJIT_API Error _newStack(Mem& out, uint32_t size, uint32_t alignment, const char* name);
+ ASMJIT_API Error _newConst(Mem& out, uint32_t scope, const void* data, size_t size);
+
+ // --------------------------------------------------------------------------
+ // [VirtReg]
+ // --------------------------------------------------------------------------
+
+ //! Get whether the virtual register `r` is valid.
+ ASMJIT_INLINE bool isVirtRegValid(const Reg& reg) const noexcept {
+ return isVirtRegValid(reg.getId());
+ }
+ //! \overload
+ ASMJIT_INLINE bool isVirtRegValid(uint32_t id) const noexcept {
+ size_t index = Operand::unpackId(id);
+ return index < _vRegArray.getLength();
+ }
+
+ //! Get \ref VirtReg associated with the given `r`.
+ ASMJIT_INLINE VirtReg* getVirtReg(const Reg& reg) const noexcept {
+ return getVirtRegById(reg.getId());
+ }
+ //! Get \ref VirtReg associated with the given `id`.
+ ASMJIT_INLINE VirtReg* getVirtRegById(uint32_t id) const noexcept {
+ ASMJIT_ASSERT(id != kInvalidValue);
+ size_t index = Operand::unpackId(id);
+
+ ASMJIT_ASSERT(index < _vRegArray.getLength());
+ return _vRegArray[index];
+ }
+
+ //! Get an array of all virtual registers managed by CodeCompiler.
+ ASMJIT_INLINE const ZoneVector& getVirtRegArray() const noexcept { return _vRegArray; }
+
+ //! Alloc a virtual register `reg`.
+ ASMJIT_API Error alloc(Reg& reg);
+ //! Alloc a virtual register `reg` using `physId` as a register id.
+ ASMJIT_API Error alloc(Reg& reg, uint32_t physId);
+ //! Alloc a virtual register `reg` using `ref` as a register operand.
+ ASMJIT_API Error alloc(Reg& reg, const Reg& ref);
+ //! Spill a virtual register `reg`.
+ ASMJIT_API Error spill(Reg& reg);
+ //! Save a virtual register `reg` if the status is `modified` at this point.
+ ASMJIT_API Error save(Reg& reg);
+ //! Unuse a virtual register `reg`.
+ ASMJIT_API Error unuse(Reg& reg);
+
+ //! Get priority of a virtual register `reg`.
+ ASMJIT_API uint32_t getPriority(Reg& reg) const;
+ //! Set priority of variable `reg` to `priority`.
+ ASMJIT_API void setPriority(Reg& reg, uint32_t priority);
+
+ //! Get save-on-unuse `reg` property.
+ ASMJIT_API bool getSaveOnUnuse(Reg& reg) const;
+ //! Set save-on-unuse `reg` property to `value`.
+ ASMJIT_API void setSaveOnUnuse(Reg& reg, bool value);
+
+ //! Rename variable `reg` to `name`.
+ //!
+ //! NOTE: Only new name will appear in the logger.
+ ASMJIT_API void rename(Reg& reg, const char* fmt, ...);
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CCFunc* _func; //!< Current function.
+
+ Zone _vRegZone; //!< Allocates \ref VirtReg objects.
+ ZoneVector _vRegArray; //!< Stores array of \ref VirtReg pointers.
+
+ CBConstPool* _localConstPool; //!< Local constant pool, flushed at the end of each function.
+ CBConstPool* _globalConstPool; //!< Global constant pool, flushed at the end of the compilation.
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // !ASMJIT_DISABLE_COMPILER
+#endif // _ASMJIT_BASE_CODECOMPILER_H
diff --git a/libraries/asmjit/asmjit/base/codeemitter.cpp b/libraries/asmjit/asmjit/base/codeemitter.cpp
new file mode 100644
index 00000000000..48a4c9a21c0
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codeemitter.cpp
@@ -0,0 +1,236 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/assembler.h"
+#include "../base/utils.h"
+#include "../base/vmem.h"
+
+#if defined(ASMJIT_BUILD_X86)
+#include "../x86/x86inst.h"
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+#include "../arm/arminst.h"
+#endif // ASMJIT_BUILD_ARM
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::CodeEmitter - Construction / Destruction]
+// ============================================================================
+
+CodeEmitter::CodeEmitter(uint32_t type) noexcept
+ : _codeInfo(),
+ _code(nullptr),
+ _nextEmitter(nullptr),
+ _type(static_cast(type)),
+ _destroyed(false),
+ _finalized(false),
+ _reserved(false),
+ _lastError(kErrorNotInitialized),
+ _privateData(0),
+ _globalHints(0),
+ _globalOptions(kOptionMaybeFailureCase),
+ _options(0),
+ _extraReg(),
+ _inlineComment(nullptr),
+ _none(),
+ _nativeGpReg(),
+ _nativeGpArray(nullptr) {}
+
+CodeEmitter::~CodeEmitter() noexcept {
+ if (_code) {
+ _destroyed = true;
+ _code->detach(this);
+ }
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Events]
+// ============================================================================
+
+Error CodeEmitter::onAttach(CodeHolder* code) noexcept {
+ _codeInfo = code->getCodeInfo();
+ _lastError = kErrorOk;
+
+ _globalHints = code->getGlobalHints();
+ _globalOptions = code->getGlobalOptions();
+
+ return kErrorOk;
+}
+
+Error CodeEmitter::onDetach(CodeHolder* code) noexcept {
+ _codeInfo.reset();
+ _finalized = false;
+ _lastError = kErrorNotInitialized;
+
+ _privateData = 0;
+ _globalHints = 0;
+ _globalOptions = kOptionMaybeFailureCase;
+
+ _options = 0;
+ _extraReg.reset();
+ _inlineComment = nullptr;
+
+ _nativeGpReg.reset();
+ _nativeGpArray = nullptr;
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Code-Generation]
+// ============================================================================
+
+Error CodeEmitter::_emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) {
+ const Operand_* op = opArray;
+ switch (opCount) {
+ case 0: return _emit(instId, _none, _none, _none, _none);
+ case 1: return _emit(instId, op[0], _none, _none, _none);
+ case 2: return _emit(instId, op[0], op[1], _none, _none);
+ case 3: return _emit(instId, op[0], op[1], op[2], _none);
+ case 4: return _emit(instId, op[0], op[1], op[2], op[3]);
+ case 5: return _emit(instId, op[0], op[1], op[2], op[3], op[4], _none);
+ case 6: return _emit(instId, op[0], op[1], op[2], op[3], op[4], op[5]);
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Finalize]
+// ============================================================================
+
+Label CodeEmitter::getLabelByName(const char* name, size_t nameLength, uint32_t parentId) noexcept {
+ return Label(_code ? _code->getLabelIdByName(name, nameLength, parentId) : static_cast(0));
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Finalize]
+// ============================================================================
+
+Error CodeEmitter::finalize() {
+ // Finalization does nothing by default, overridden by `CodeBuilder`.
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Error Handling]
+// ============================================================================
+
+Error CodeEmitter::setLastError(Error error, const char* message) {
+ // This is fatal, CodeEmitter can't set error without being attached to `CodeHolder`.
+ ASMJIT_ASSERT(_code != nullptr);
+
+ // Special case used to reset the last error.
+ if (error == kErrorOk) {
+ _lastError = kErrorOk;
+ _globalOptions &= ~kOptionMaybeFailureCase;
+ return kErrorOk;
+ }
+
+ if (!message)
+ message = DebugUtils::errorAsString(error);
+
+ // Logging is skipped if the error is handled by `ErrorHandler`.
+ ErrorHandler* handler = _code->_errorHandler;
+ if (handler && handler->handleError(error, message, this))
+ return error;
+
+ // The handler->handleError() function may throw an exception or longjmp()
+ // to terminate the execution of `setLastError()`. This is the reason why
+ // we have delayed changing the `_error` member until now.
+ _lastError = error;
+ _globalOptions |= kOptionMaybeFailureCase;
+
+ return error;
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Helpers]
+// ============================================================================
+
+bool CodeEmitter::isLabelValid(uint32_t id) const noexcept {
+ size_t index = Operand::unpackId(id);
+ return _code && index < _code->_labels.getLength();
+}
+
+Error CodeEmitter::commentf(const char* fmt, ...) {
+ Error err = _lastError;
+ if (err) return err;
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled) {
+ va_list ap;
+ va_start(ap, fmt);
+ err = _code->_logger->logv(fmt, ap);
+ va_end(ap);
+ }
+#else
+ ASMJIT_UNUSED(fmt);
+#endif
+
+ return err;
+}
+
+Error CodeEmitter::commentv(const char* fmt, va_list ap) {
+ Error err = _lastError;
+ if (err) return err;
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (_globalOptions & kOptionLoggingEnabled)
+ err = _code->_logger->logv(fmt, ap);
+#else
+ ASMJIT_UNUSED(fmt);
+ ASMJIT_UNUSED(ap);
+#endif
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Emit]
+// ============================================================================
+
+#define OP const Operand_&
+
+Error CodeEmitter::emit(uint32_t instId) { return _emit(instId, _none, _none, _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0) { return _emit(instId, o0, _none, _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1) { return _emit(instId, o0, o1, _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2) { return _emit(instId, o0, o1, o2, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3) { return _emit(instId, o0, o1, o2, o3); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4) { return _emit(instId, o0, o1, o2, o3, o4, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, OP o5) { return _emit(instId, o0, o1, o2, o3, o4, o5); }
+
+Error CodeEmitter::emit(uint32_t instId, int o0) { return _emit(instId, Imm(o0), _none, _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, int o1) { return _emit(instId, o0, Imm(o1), _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int o2) { return _emit(instId, o0, o1, Imm(o2), _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); }
+
+Error CodeEmitter::emit(uint32_t instId, int64_t o0) { return _emit(instId, Imm(o0), _none, _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, int64_t o1) { return _emit(instId, o0, Imm(o1), _none, _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, int64_t o2) { return _emit(instId, o0, o1, Imm(o2), _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, int64_t o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
+
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int64_t o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), _none); }
+Error CodeEmitter::emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int64_t o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); }
+
+#undef OP
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/codeemitter.h b/libraries/asmjit/asmjit/base/codeemitter.h
new file mode 100644
index 00000000000..93a2de36fff
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codeemitter.h
@@ -0,0 +1,499 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_CODEEMITTER_H
+#define _ASMJIT_BASE_CODEEMITTER_H
+
+// [Dependencies]
+#include "../base/arch.h"
+#include "../base/codeholder.h"
+#include "../base/operand.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class ConstPool;
+
+// ============================================================================
+// [asmjit::CodeEmitter]
+// ============================================================================
+
+//! Provides a base foundation to emit code - specialized by \ref Assembler and
+//! \ref CodeBuilder.
+class ASMJIT_VIRTAPI CodeEmitter {
+public:
+ //! CodeEmitter type.
+ ASMJIT_ENUM(Type) {
+ kTypeNone = 0,
+ kTypeAssembler = 1,
+ kTypeBuilder = 2,
+ kTypeCompiler = 3,
+ kTypeCount = 4
+ };
+
+ //! CodeEmitter hints - global settings that affect machine-code generation.
+ ASMJIT_ENUM(Hints) {
+ //! Emit optimized code-alignment sequences.
+ //!
+ //! Default `true`.
+ //!
+ //! X86/X64 Specific
+ //! ----------------
+ //!
+ //! Default align sequence used by X86/X64 architecture is one-byte (0x90)
+ //! opcode that is often shown by disassemblers as nop. However there are
+ //! more optimized align sequences for 2-11 bytes that may execute faster.
+ //! If this feature is enabled AsmJit will generate specialized sequences
+ //! for alignment between 2 to 11 bytes.
+ kHintOptimizedAlign = 0x00000001U,
+
+ //! Emit jump-prediction hints.
+ //!
+ //! Default `false`.
+ //!
+ //! X86/X64 Specific
+ //! ----------------
+ //!
+ //! Jump prediction is usually based on the direction of the jump. If the
+ //! jump is backward it is usually predicted as taken; and if the jump is
+ //! forward it is usually predicted as not-taken. The reason is that loops
+ //! generally use backward jumps and conditions usually use forward jumps.
+ //! However this behavior can be overridden by using instruction prefixes.
+ //! If this option is enabled these hints will be emitted.
+ //!
+ //! This feature is disabled by default, because the only processor that
+ //! used to take into consideration prediction hints was P4. Newer processors
+ //! implement heuristics for branch prediction that ignores any static hints.
+ kHintPredictedJumps = 0x00000002U
+ };
+
+ //! CodeEmitter options that are merged with instruction options.
+ ASMJIT_ENUM(Options) {
+ //! Reserved, used to check for errors in `Assembler::_emit()`. In addition,
+ //! if an emitter is in error state it will have `kOptionMaybeFailureCase`
+ //! set
+ kOptionMaybeFailureCase = 0x00000001U,
+
+ //! Perform a strict validation before the instruction is emitted.
+ kOptionStrictValidation = 0x00000002U,
+
+ //! Logging is enabled and `CodeHolder::getLogger()` should return a valid
+ //! \ref Logger pointer.
+ kOptionLoggingEnabled = 0x00000004U,
+
+ //! Mask of all internal options that are not used to represent instruction
+ //! options, but are used to instrument Assembler and CodeBuilder. These
+ //! options are internal and should not be used outside of AsmJit itself.
+ //!
+ //! NOTE: Reserved options should never appear in `CBInst` options.
+ kOptionReservedMask = 0x00000007U,
+
+ //! Used only by Assembler to mark `_op4` and `_op5` are used.
+ kOptionOp4Op5Used = 0x00000008U,
+
+ //! Prevents following a jump during compilation (CodeCompiler).
+ kOptionUnfollow = 0x00000010U,
+
+ //! Overwrite the destination operand (CodeCompiler).
+ //!
+ //! Hint that is important for register liveness analysis. It tells the
+ //! compiler that the destination operand will be overwritten now or by
+ //! adjacent instructions. CodeCompiler knows when a register is completely
+ //! overwritten by a single instruction, for example you don't have to
+ //! mark "movaps" or "pxor x, x", however, if a pair of instructions is
+ //! used and the first of them doesn't completely overwrite the content
+ //! of the destination, CodeCompiler fails to mark that register as dead.
+ //!
+ //! X86/X64 Specific
+ //! ----------------
+ //!
+ //! - All instructions that always overwrite at least the size of the
+ //! register the virtual-register uses , for example "mov", "movq",
+ //! "movaps" don't need the overwrite option to be used - conversion,
+ //! shuffle, and other miscellaneous instructions included.
+ //!
+ //! - All instructions that clear the destination register if all operands
+ //! are the same, for example "xor x, x", "pcmpeqb x x", etc...
+ //!
+ //! - Consecutive instructions that partially overwrite the variable until
+ //! there is no old content require the `overwrite()` to be used. Some
+ //! examples (not always the best use cases thought):
+ //!
+ //! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
+ //! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
+ //! - `mov al, ?` followed by `and ax, 0xFF`
+ //! - `mov al, ?` followed by `mov ah, al`
+ //! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
+ //!
+ //! - If allocated variable is used temporarily for scalar operations. For
+ //! example if you allocate a full vector like `X86Compiler::newXmm()`
+ //! and then use that vector for scalar operations you should use
+ //! `overwrite()` directive:
+ //!
+ //! - `sqrtss x, y` - only LO element of `x` is changed, if you don't use
+ //! HI elements, use `X86Compiler.overwrite().sqrtss(x, y)`.
+ kOptionOverwrite = 0x00000020U
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API CodeEmitter(uint32_t type) noexcept;
+ ASMJIT_API virtual ~CodeEmitter() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Events]
+ // --------------------------------------------------------------------------
+
+ //! Called after the \ref CodeEmitter was attached to the \ref CodeHolder.
+ virtual Error onAttach(CodeHolder* code) noexcept = 0;
+ //! Called after the \ref CodeEmitter was detached from the \ref CodeHolder.
+ virtual Error onDetach(CodeHolder* code) noexcept = 0;
+
+ // --------------------------------------------------------------------------
+ // [Code-Generation]
+ // --------------------------------------------------------------------------
+
+ //! Emit instruction having max 4 operands.
+ virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) = 0;
+ //! Emit instruction having max 6 operands.
+ virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) = 0;
+ //! Emit instruction having operands stored in array.
+ virtual Error _emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount);
+
+ //! Create a new label.
+ virtual Label newLabel() = 0;
+ //! Create a new named label.
+ virtual Label newNamedLabel(
+ const char* name,
+ size_t nameLength = Globals::kInvalidIndex,
+ uint32_t type = Label::kTypeGlobal,
+ uint32_t parentId = 0) = 0;
+
+ //! Get a label by name.
+ //!
+ //! Returns invalid Label in case that the name is invalid or label was not found.
+ //!
+ //! NOTE: This function doesn't trigger ErrorHandler in case the name is
+ //! invalid or no such label exist. You must always check the validity of the
+ //! \ref Label returned.
+ ASMJIT_API Label getLabelByName(
+ const char* name,
+ size_t nameLength = Globals::kInvalidIndex,
+ uint32_t parentId = 0) noexcept;
+
+ //! Bind the `label` to the current position of the current section.
+ //!
+ //! NOTE: Attempt to bind the same label multiple times will return an error.
+ virtual Error bind(const Label& label) = 0;
+
+ //! Align to the `alignment` specified.
+ //!
+ //! The sequence that is used to fill the gap between the aligned location
+ //! and the current location depends on the align `mode`, see \ref AlignMode.
+ virtual Error align(uint32_t mode, uint32_t alignment) = 0;
+
+ //! Embed raw data into the code-buffer.
+ virtual Error embed(const void* data, uint32_t size) = 0;
+
+ //! Embed absolute label address as data (4 or 8 bytes).
+ virtual Error embedLabel(const Label& label) = 0;
+
+ //! Embed a constant pool into the code-buffer in the following steps:
+ //! 1. Align by using kAlignData to the minimum `pool` alignment.
+ //! 2. Bind `label` so it's bound to an aligned location.
+ //! 3. Emit constant pool data.
+ virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
+
+ //! Emit a comment string `s` with an optional `len` parameter.
+ virtual Error comment(const char* s, size_t len = Globals::kInvalidIndex) = 0;
+
+ // --------------------------------------------------------------------------
+ // [Code-Generation Status]
+ // --------------------------------------------------------------------------
+
+ //! Get if the CodeEmitter is initialized (i.e. attached to a \ref CodeHolder).
+ ASMJIT_INLINE bool isInitialized() const noexcept { return _code != nullptr; }
+
+ ASMJIT_API virtual Error finalize();
+
+ // --------------------------------------------------------------------------
+ // [Code Information]
+ // --------------------------------------------------------------------------
+
+ //! Get information about the code, see \ref CodeInfo.
+ ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
+ //! Get \ref CodeHolder this CodeEmitter is attached to.
+ ASMJIT_INLINE CodeHolder* getCode() const noexcept { return _code; }
+
+ //! Get information about the architecture, see \ref ArchInfo.
+ ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); }
+
+ //! Get if the target architecture is 32-bit.
+ ASMJIT_INLINE bool is32Bit() const noexcept { return getArchInfo().is32Bit(); }
+ //! Get if the target architecture is 64-bit.
+ ASMJIT_INLINE bool is64Bit() const noexcept { return getArchInfo().is64Bit(); }
+
+ //! Get the target architecture type.
+ ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); }
+ //! Get the target architecture sub-type.
+ ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); }
+ //! Get the target architecture's GP register size (4 or 8 bytes).
+ ASMJIT_INLINE uint32_t getGpSize() const noexcept { return getArchInfo().getGpSize(); }
+ //! Get the number of target GP registers.
+ ASMJIT_INLINE uint32_t getGpCount() const noexcept { return getArchInfo().getGpCount(); }
+
+ // --------------------------------------------------------------------------
+ // [Code-Emitter Type]
+ // --------------------------------------------------------------------------
+
+ //! Get the type of this CodeEmitter, see \ref Type.
+ ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
+
+ ASMJIT_INLINE bool isAssembler() const noexcept { return _type == kTypeAssembler; }
+ ASMJIT_INLINE bool isCodeBuilder() const noexcept { return _type == kTypeBuilder; }
+ ASMJIT_INLINE bool isCodeCompiler() const noexcept { return _type == kTypeCompiler; }
+
+ // --------------------------------------------------------------------------
+ // [Global Information]
+ // --------------------------------------------------------------------------
+
+ //! Get global hints.
+ ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; }
+
+ //! Get global options.
+ //!
+ //! Global options are merged with instruction options before the instruction
+ //! is encoded. These options have some bits reserved that are used for error
+ //! checking, logging, and strict validation. Other options are globals that
+ //! affect each instruction, for example if VEX3 is set globally, it will all
+ //! instructions, even those that don't have such option set.
+ ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; }
+
+ // --------------------------------------------------------------------------
+ // [Error Handling]
+ // --------------------------------------------------------------------------
+
+ //! Get if the object is in error state.
+ //!
+ //! Error state means that it does not consume anything unless the error
+ //! state is reset by calling `resetLastError()`. Use `getLastError()` to
+ //! get the last error that put the object into the error state.
+ ASMJIT_INLINE bool isInErrorState() const noexcept { return _lastError != kErrorOk; }
+
+ //! Get the last error code.
+ ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; }
+ //! Set the last error code and propagate it through the error handler.
+ ASMJIT_API Error setLastError(Error error, const char* message = nullptr);
+ //! Clear the last error code and return `kErrorOk`.
+ ASMJIT_INLINE Error resetLastError() noexcept { return setLastError(kErrorOk); }
+
+ // --------------------------------------------------------------------------
+ // [Accessors That Affect the Next Instruction]
+ // --------------------------------------------------------------------------
+
+ //! Get options of the next instruction.
+ ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
+ //! Set options of the next instruction.
+ ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _options = options; }
+ //! Add options of the next instruction.
+ ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
+ //! Reset options of the next instruction.
+ ASMJIT_INLINE void resetOptions() noexcept { _options = 0; }
+
+ //! Get if the extra register operand is valid.
+ ASMJIT_INLINE bool hasExtraReg() const noexcept { return _extraReg.isValid(); }
+ //! Get an extra operand that will be used by the next instruction (architecture specific).
+ ASMJIT_INLINE const RegOnly& getExtraReg() const noexcept { return _extraReg; }
+ //! Set an extra operand that will be used by the next instruction (architecture specific).
+ ASMJIT_INLINE void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); }
+ //! Set an extra operand that will be used by the next instruction (architecture specific).
+ ASMJIT_INLINE void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
+ //! Reset an extra operand that will be used by the next instruction (architecture specific).
+ ASMJIT_INLINE void resetExtraReg() noexcept { _extraReg.reset(); }
+
+ //! Get annotation of the next instruction.
+ ASMJIT_INLINE const char* getInlineComment() const noexcept { return _inlineComment; }
+ //! Set annotation of the next instruction.
+ //!
+ //! NOTE: This string is set back to null by `_emit()`, but until that it has
+ //! to remain valid as `CodeEmitter` is not required to make a copy of it (and
+ //! it would be slow to do that for each instruction).
+ ASMJIT_INLINE void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Reset annotation of the next instruction to null.
+ ASMJIT_INLINE void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ // --------------------------------------------------------------------------
+ // [Helpers]
+ // --------------------------------------------------------------------------
+
+ //! Get if the `label` is valid (i.e. registered).
+ ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
+ return isLabelValid(label.getId());
+ }
+
+ //! Get if the label `id` is valid (i.e. registered).
+ ASMJIT_API bool isLabelValid(uint32_t id) const noexcept;
+
+ //! Emit a formatted string `fmt`.
+ ASMJIT_API Error commentf(const char* fmt, ...);
+ //! Emit a formatted string `fmt` (va_list version).
+ ASMJIT_API Error commentv(const char* fmt, va_list ap);
+
+ // --------------------------------------------------------------------------
+ // [Emit]
+ // --------------------------------------------------------------------------
+
+ // NOTE: These `emit()` helpers are designed to address a code-bloat generated
+ // by C++ compilers to call a function having many arguments. Each parameter to
+ // `_emit()` requires code to pass it, which means that if we default to 4
+ // operand parameters in `_emit()` and instId the C++ compiler would have to
+ // generate a virtual function call having 5 parameters, which is quite a lot.
+ // Since by default asm instructions have 2 to 3 operands it's better to
+ // introduce helpers that pass those and fill all the remaining with `_none`.
+
+ //! Emit an instruction.
+ ASMJIT_API Error emit(uint32_t instId);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
+
+ //! Emit an instruction that has a 32-bit signed immediate operand.
+ ASMJIT_API Error emit(uint32_t instId, int o0);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int o1);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int o2);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int o3);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int o4);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int o5);
+
+ //! Emit an instruction that has a 64-bit signed immediate operand.
+ ASMJIT_API Error emit(uint32_t instId, int64_t o0);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, int64_t o1);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, int64_t o2);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, int64_t o3);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, int64_t o4);
+ //! \overload
+ ASMJIT_API Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, int64_t o5);
+
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, unsigned int o0) {
+ return emit(instId, static_cast(o0));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, unsigned int o1) {
+ return emit(instId, o0, static_cast(o1));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, unsigned int o2) {
+ return emit(instId, o0, o1, static_cast(o2));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, unsigned int o3) {
+ return emit(instId, o0, o1, o2, static_cast(o3));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, unsigned int o4) {
+ return emit(instId, o0, o1, o2, o3, static_cast(o4));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, unsigned int o5) {
+ return emit(instId, o0, o1, o2, o3, o4, static_cast(o5));
+ }
+
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, uint64_t o0) {
+ return emit(instId, static_cast(o0));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, uint64_t o1) {
+ return emit(instId, o0, static_cast(o1));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, uint64_t o2) {
+ return emit(instId, o0, o1, static_cast(o2));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, uint64_t o3) {
+ return emit(instId, o0, o1, o2, static_cast(o3));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, uint64_t o4) {
+ return emit(instId, o0, o1, o2, o3, static_cast(o4));
+ }
+ //! \overload
+ ASMJIT_INLINE Error emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, uint64_t o5) {
+ return emit(instId, o0, o1, o2, o3, o4, static_cast(o5));
+ }
+
+ ASMJIT_INLINE Error emitOpArray(uint32_t instId, const Operand_* opArray, size_t opCount) {
+ return _emitOpArray(instId, opArray, opCount);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CodeInfo _codeInfo; //!< Basic information about the code (matches CodeHolder::_codeInfo).
+ CodeHolder* _code; //!< CodeHolder the CodeEmitter is attached to.
+ CodeEmitter* _nextEmitter; //!< Linked list of `CodeEmitter`s attached to the same \ref CodeHolder.
+
+ uint8_t _type; //!< See CodeEmitter::Type.
+ uint8_t _destroyed; //!< Set by ~CodeEmitter() before calling `_code->detach()`.
+ uint8_t _finalized; //!< True if the CodeEmitter is finalized (CodeBuilder & CodeCompiler).
+ uint8_t _reserved; //!< \internal
+ Error _lastError; //!< Last error code.
+
+ uint32_t _privateData; //!< Internal private data used freely by any CodeEmitter.
+ uint32_t _globalHints; //!< Global hints, always in sync with CodeHolder.
+ uint32_t _globalOptions; //!< Global options, combined with `_options` before used by each instruction.
+
+ uint32_t _options; //!< Used to pass instruction options (affects the next instruction).
+ RegOnly _extraReg; //!< Extra register (op-mask {k} on AVX-512) (affects the next instruction).
+ const char* _inlineComment; //!< Inline comment of the next instruction (affects the next instruction).
+
+ Operand_ _none; //!< Used to pass unused operands to `_emit()` instead of passing null.
+ Reg _nativeGpReg; //!< Native GP register with zero id.
+ const Reg* _nativeGpArray; //!< Array of native registers indexed from zero.
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_CODEEMITTER_H
diff --git a/libraries/asmjit/asmjit/base/codeholder.cpp b/libraries/asmjit/asmjit/base/codeholder.cpp
new file mode 100644
index 00000000000..282f01289ca
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codeholder.cpp
@@ -0,0 +1,696 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/assembler.h"
+#include "../base/utils.h"
+#include "../base/vmem.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+ErrorHandler::ErrorHandler() noexcept {}
+ErrorHandler::~ErrorHandler() noexcept {}
+
+// ============================================================================
+// [asmjit::CodeHolder - Utilities]
+// ============================================================================
+
+static void CodeHolder_setGlobalOption(CodeHolder* self, uint32_t clear, uint32_t add) noexcept {
+ // Modify global options of `CodeHolder` itself.
+ self->_globalOptions = (self->_globalOptions & ~clear) | add;
+
+ // Modify all global options of all `CodeEmitter`s attached.
+ CodeEmitter* emitter = self->_emitters;
+ while (emitter) {
+ emitter->_globalOptions = (emitter->_globalOptions & ~clear) | add;
+ emitter = emitter->_nextEmitter;
+ }
+}
+
+static void CodeHolder_resetInternal(CodeHolder* self, bool releaseMemory) noexcept {
+ // Detach all `CodeEmitter`s.
+ while (self->_emitters)
+ self->detach(self->_emitters);
+
+ // Reset everything into its construction state.
+ self->_codeInfo.reset();
+ self->_globalHints = 0;
+ self->_globalOptions = 0;
+ self->_logger = nullptr;
+ self->_errorHandler = nullptr;
+
+ self->_unresolvedLabelsCount = 0;
+ self->_trampolinesSize = 0;
+
+ // Reset all sections.
+ size_t numSections = self->_sections.getLength();
+ for (size_t i = 0; i < numSections; i++) {
+ SectionEntry* section = self->_sections[i];
+ if (section->_buffer.hasData() && !section->_buffer.isExternal())
+ Internal::releaseMemory(section->_buffer._data);
+ section->_buffer._data = nullptr;
+ section->_buffer._capacity = 0;
+ }
+
+ // Reset zone allocator and all containers using it.
+ ZoneHeap* heap = &self->_baseHeap;
+
+ self->_namedLabels.reset(heap);
+ self->_relocations.reset();
+ self->_labels.reset();
+ self->_sections.reset();
+
+ heap->reset(&self->_baseZone);
+ self->_baseZone.reset(releaseMemory);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Construction / Destruction]
+// ============================================================================
+
+CodeHolder::CodeHolder() noexcept
+ : _codeInfo(),
+ _globalHints(0),
+ _globalOptions(0),
+ _emitters(nullptr),
+ _cgAsm(nullptr),
+ _logger(nullptr),
+ _errorHandler(nullptr),
+ _unresolvedLabelsCount(0),
+ _trampolinesSize(0),
+ _baseZone(16384 - Zone::kZoneOverhead),
+ _dataZone(16384 - Zone::kZoneOverhead),
+ _baseHeap(&_baseZone),
+ _namedLabels(&_baseHeap) {}
+
+CodeHolder::~CodeHolder() noexcept {
+ CodeHolder_resetInternal(this, true);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Init / Reset]
+// ============================================================================
+
+Error CodeHolder::init(const CodeInfo& info) noexcept {
+ // Cannot reinitialize if it's locked or there is one or more CodeEmitter
+ // attached.
+ if (isInitialized())
+ return DebugUtils::errored(kErrorAlreadyInitialized);
+
+ // If we are just initializing there should be no emitters attached).
+ ASMJIT_ASSERT(_emitters == nullptr);
+
+ // Create the default section and insert it to the `_sections` array.
+ Error err = _sections.willGrow(&_baseHeap);
+ if (err == kErrorOk) {
+ SectionEntry* se = _baseZone.allocZeroedT();
+ if (ASMJIT_LIKELY(se)) {
+ se->_flags = SectionEntry::kFlagExec | SectionEntry::kFlagConst;
+ se->_setDefaultName('.', 't', 'e', 'x', 't');
+ _sections.appendUnsafe(se);
+ }
+ else {
+ err = DebugUtils::errored(kErrorNoHeapMemory);
+ }
+ }
+
+ if (ASMJIT_UNLIKELY(err)) {
+ _baseZone.reset(false);
+ return err;
+ }
+ else {
+ _codeInfo = info;
+ return kErrorOk;
+ }
+}
+
+void CodeHolder::reset(bool releaseMemory) noexcept {
+ CodeHolder_resetInternal(this, releaseMemory);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Attach / Detach]
+// ============================================================================
+
+Error CodeHolder::attach(CodeEmitter* emitter) noexcept {
+ // Catch a possible misuse of the API.
+ if (!emitter)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ uint32_t type = emitter->getType();
+ if (type == CodeEmitter::kTypeNone || type >= CodeEmitter::kTypeCount)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // This is suspicious, but don't fail if `emitter` matches.
+ if (emitter->_code != nullptr) {
+ if (emitter->_code == this) return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ // Special case - attach `Assembler`.
+ CodeEmitter** pSlot = nullptr;
+ if (type == CodeEmitter::kTypeAssembler) {
+ if (_cgAsm)
+ return DebugUtils::errored(kErrorSlotOccupied);
+ pSlot = reinterpret_cast(&_cgAsm);
+ }
+
+ Error err = emitter->onAttach(this);
+ if (err != kErrorOk) return err;
+
+ // Add to a single-linked list of `CodeEmitter`s.
+ emitter->_nextEmitter = _emitters;
+ _emitters = emitter;
+ if (pSlot) *pSlot = emitter;
+
+ // Establish the connection.
+ emitter->_code = this;
+ return kErrorOk;
+}
+
+Error CodeHolder::detach(CodeEmitter* emitter) noexcept {
+ if (!emitter)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (emitter->_code != this)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t type = emitter->getType();
+ Error err = kErrorOk;
+
+ // NOTE: We always detach if we were asked to, if error happens during
+ // `emitter->onDetach()` we just propagate it, but the CodeEmitter will
+ // be detached.
+ if (!emitter->_destroyed) {
+ if (type == CodeEmitter::kTypeAssembler)
+ static_cast(emitter)->sync();
+ err = emitter->onDetach(this);
+ }
+
+ // Special case - detach `Assembler`.
+ if (type == CodeEmitter::kTypeAssembler)
+ _cgAsm = nullptr;
+
+ // Remove from a single-linked list of `CodeEmitter`s.
+ CodeEmitter** pPrev = &_emitters;
+ for (;;) {
+ ASMJIT_ASSERT(*pPrev != nullptr);
+ CodeEmitter* cur = *pPrev;
+
+ if (cur == emitter) {
+ *pPrev = emitter->_nextEmitter;
+ break;
+ }
+
+ pPrev = &cur->_nextEmitter;
+ }
+
+ emitter->_code = nullptr;
+ emitter->_nextEmitter = nullptr;
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Sync]
+// ============================================================================
+
+void CodeHolder::sync() noexcept {
+ if (_cgAsm) _cgAsm->sync();
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Result Information]
+// ============================================================================
+
+size_t CodeHolder::getCodeSize() const noexcept {
+ // Reflect all changes first.
+ const_cast(this)->sync();
+
+ // TODO: Support sections.
+ return _sections[0]->_buffer._length + getTrampolinesSize();
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Logging & Error Handling]
+// ============================================================================
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+void CodeHolder::setLogger(Logger* logger) noexcept {
+ uint32_t opt = 0;
+ if (logger) opt = CodeEmitter::kOptionLoggingEnabled;
+
+ _logger = logger;
+ CodeHolder_setGlobalOption(this, CodeEmitter::kOptionLoggingEnabled, opt);
+}
+#endif // !ASMJIT_DISABLE_LOGGING
+
+Error CodeHolder::setErrorHandler(ErrorHandler* handler) noexcept {
+ _errorHandler = handler;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Sections]
+// ============================================================================
+
+static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
+ uint8_t* oldData = cb->_data;
+ uint8_t* newData;
+
+ if (oldData && !cb->isExternal())
+ newData = static_cast(Internal::reallocMemory(oldData, n));
+ else
+ newData = static_cast(Internal::allocMemory(n));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+
+ cb->_data = newData;
+ cb->_capacity = n;
+
+ // Update the `Assembler` pointers if attached. Maybe we should introduce an
+ // event for this, but since only one Assembler can be attached at a time it
+ // should not matter how these pointers are updated.
+ Assembler* a = self->_cgAsm;
+ if (a && &a->_section->_buffer == cb) {
+ size_t offset = a->getOffset();
+
+ a->_bufferData = newData;
+ a->_bufferEnd = newData + n;
+ a->_bufferPtr = newData + offset;
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
+ // This is most likely called by `Assembler` so `sync()` shouldn't be needed,
+ // however, if this is called by the user and the currently attached Assembler
+ // did generate some code we could lose that, so sync now and make sure the
+ // section length is updated.
+ if (_cgAsm) _cgAsm->sync();
+
+ // Now the length of the section must be valid.
+ size_t length = cb->getLength();
+ if (ASMJIT_UNLIKELY(n > IntTraits::maxValue() - length))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+
+ // We can now check if growing the buffer is really necessary. It's unlikely
+ // that this function is called while there is still room for `n` bytes.
+ size_t capacity = cb->getCapacity();
+ size_t required = cb->getLength() + n;
+ if (ASMJIT_UNLIKELY(required <= capacity)) return kErrorOk;
+
+ if (cb->isFixedSize())
+ return DebugUtils::errored(kErrorCodeTooLarge);
+
+ if (capacity < 8096)
+ capacity = 8096;
+ else
+ capacity += Globals::kAllocOverhead;
+
+ do {
+ size_t old = capacity;
+ if (capacity < Globals::kAllocThreshold)
+ capacity *= 2;
+ else
+ capacity += Globals::kAllocThreshold;
+
+ if (capacity < Globals::kAllocThreshold)
+ capacity *= 2;
+ else
+ capacity += Globals::kAllocThreshold;
+
+ // Overflow.
+ if (ASMJIT_UNLIKELY(old > capacity))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+ } while (capacity - Globals::kAllocOverhead < required);
+
+ return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
+}
+
+Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
+ size_t capacity = cb->getCapacity();
+ if (n <= capacity) return kErrorOk;
+
+ if (cb->isFixedSize())
+ return DebugUtils::errored(kErrorCodeTooLarge);
+
+ // We must sync, as mentioned in `growBuffer()` as well.
+ if (_cgAsm) _cgAsm->sync();
+
+ return CodeHolder_reserveInternal(this, cb, n);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Labels & Symbols]
+// ============================================================================
+
+namespace {
+
+//! \internal
+//!
+//! Only used to lookup a label from `_namedLabels`.
+class LabelByName {
+public:
+ ASMJIT_INLINE LabelByName(const char* name, size_t nameLength, uint32_t hVal) noexcept
+ : name(name),
+ nameLength(static_cast(nameLength)) {}
+
+ ASMJIT_INLINE bool matches(const LabelEntry* entry) const noexcept {
+ return static_cast(entry->getNameLength()) == nameLength &&
+ ::memcmp(entry->getName(), name, nameLength) == 0;
+ }
+
+ const char* name;
+ uint32_t nameLength;
+ uint32_t hVal;
+};
+
+// Returns a hash of `name` and fixes `nameLength` if it's `Globals::kInvalidIndex`.
+static uint32_t CodeHolder_hashNameAndFixLen(const char* name, size_t& nameLength) noexcept {
+ uint32_t hVal = 0;
+ if (nameLength == Globals::kInvalidIndex) {
+ size_t i = 0;
+ for (;;) {
+ uint8_t c = static_cast(name[i]);
+ if (!c) break;
+ hVal = Utils::hashRound(hVal, c);
+ i++;
+ }
+ nameLength = i;
+ }
+ else {
+ for (size_t i = 0; i < nameLength; i++) {
+ uint8_t c = static_cast(name[i]);
+ if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
+ hVal = Utils::hashRound(hVal, c);
+ }
+ }
+ return hVal;
+}
+
+} // anonymous namespace
+
+LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept {
+ LabelLink* link = _baseHeap.allocT();
+ if (ASMJIT_UNLIKELY(!link)) return nullptr;
+
+ link->prev = le->_links;
+ le->_links = link;
+
+ link->sectionId = sectionId;
+ link->relocId = RelocEntry::kInvalidId;
+ link->offset = offset;
+ link->rel = rel;
+
+ _unresolvedLabelsCount++;
+ return link;
+}
+
+Error CodeHolder::newLabelId(uint32_t& idOut) noexcept {
+ idOut = 0;
+
+ size_t index = _labels.getLength();
+ if (ASMJIT_LIKELY(index >= Operand::kPackedIdCount))
+ return DebugUtils::errored(kErrorLabelIndexOverflow);
+
+ ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap));
+ LabelEntry* le = _baseHeap.allocZeroedT();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorNoHeapMemory);;
+
+ uint32_t id = Operand::packId(static_cast(index));
+ le->_setId(id);
+ le->_parentId = 0;
+ le->_sectionId = SectionEntry::kInvalidId;
+ le->_offset = 0;
+
+ _labels.appendUnsafe(le);
+ idOut = id;
+ return kErrorOk;
+}
+
+Error CodeHolder::newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept {
+ idOut = 0;
+ uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength);
+
+ if (ASMJIT_UNLIKELY(nameLength == 0))
+ return DebugUtils::errored(kErrorInvalidLabelName);
+
+ if (ASMJIT_UNLIKELY(nameLength > Globals::kMaxLabelLength))
+ return DebugUtils::errored(kErrorLabelNameTooLong);
+
+ switch (type) {
+ case Label::kTypeLocal:
+ if (ASMJIT_UNLIKELY(Operand::unpackId(parentId) >= _labels.getLength()))
+ return DebugUtils::errored(kErrorInvalidParentLabel);
+
+ hVal ^= parentId;
+ break;
+
+ case Label::kTypeGlobal:
+ if (ASMJIT_UNLIKELY(parentId != 0))
+ return DebugUtils::errored(kErrorNonLocalLabelCantHaveParent);
+
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+
+ // Don't allow to insert duplicates. Local labels allow duplicates that have
+ // different id, this is already accomplished by having a different hashes
+ // between the same label names having different parent labels.
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal));
+ if (ASMJIT_UNLIKELY(le))
+ return DebugUtils::errored(kErrorLabelAlreadyDefined);
+
+ Error err = kErrorOk;
+ size_t index = _labels.getLength();
+
+ if (ASMJIT_UNLIKELY(index >= Operand::kPackedIdCount))
+ return DebugUtils::errored(kErrorLabelIndexOverflow);
+
+ ASMJIT_PROPAGATE(_labels.willGrow(&_baseHeap));
+ le = _baseHeap.allocZeroedT();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+
+ uint32_t id = Operand::packId(static_cast(index));
+ le->_hVal = hVal;
+ le->_setId(id);
+ le->_type = static_cast(type);
+ le->_parentId = 0;
+ le->_sectionId = SectionEntry::kInvalidId;
+ le->_offset = 0;
+
+ if (le->_name.mustEmbed(nameLength)) {
+ le->_name.setEmbedded(name, nameLength);
+ }
+ else {
+ char* nameExternal = static_cast(_dataZone.dup(name, nameLength, true));
+ if (ASMJIT_UNLIKELY(!nameExternal))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+ le->_name.setExternal(nameExternal, nameLength);
+ }
+
+ _labels.appendUnsafe(le);
+ _namedLabels.put(le);
+
+ idOut = id;
+ return err;
+}
+
+uint32_t CodeHolder::getLabelIdByName(const char* name, size_t nameLength, uint32_t parentId) noexcept {
+ uint32_t hVal = CodeHolder_hashNameAndFixLen(name, nameLength);
+ if (ASMJIT_UNLIKELY(!nameLength)) return 0;
+
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameLength, hVal));
+ return le ? le->getId() : static_cast(0);
+}
+
+// ============================================================================
+// [asmjit::CodeEmitter - Relocations]
+// ============================================================================
+
+//! Encode MOD byte.
+static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
+ return (m << 6) | (o << 3) | rm;
+}
+
+Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept {
+ ASMJIT_PROPAGATE(_relocations.willGrow(&_baseHeap));
+
+ size_t index = _relocations.getLength();
+ if (ASMJIT_UNLIKELY(index > size_t(0xFFFFFFFFU)))
+ return DebugUtils::errored(kErrorRelocIndexOverflow);
+
+ RelocEntry* re = _baseHeap.allocZeroedT();
+ if (ASMJIT_UNLIKELY(!re))
+ return DebugUtils::errored(kErrorNoHeapMemory);
+
+ re->_id = static_cast(index);
+ re->_type = static_cast(type);
+ re->_size = static_cast(size);
+ re->_sourceSectionId = SectionEntry::kInvalidId;
+ re->_targetSectionId = SectionEntry::kInvalidId;
+ _relocations.appendUnsafe(re);
+
+ *dst = re;
+ return kErrorOk;
+}
+
+// TODO: Support multiple sections, this only relocates the first.
+// TODO: This should go to Runtime as it's responsible for relocating the
+// code, CodeHolder should just hold it.
+size_t CodeHolder::relocate(void* _dst, uint64_t baseAddress) const noexcept {
+ SectionEntry* section = _sections[0];
+ ASMJIT_ASSERT(section != nullptr);
+
+ uint8_t* dst = static_cast(_dst);
+ if (baseAddress == Globals::kNoBaseAddress)
+ baseAddress = static_cast((uintptr_t)dst);
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ Logger* logger = getLogger();
+#endif // ASMJIT_DISABLE_LOGGING
+
+ size_t minCodeSize = section->getBuffer().getLength(); // Minimum code size.
+ size_t maxCodeSize = getCodeSize(); // Includes all possible trampolines.
+
+ // We will copy the exact size of the generated code. Extra code for trampolines
+ // is generated on-the-fly by the relocator (this code doesn't exist at the moment).
+ ::memcpy(dst, section->_buffer._data, minCodeSize);
+
+ // Trampoline offset from the beginning of dst/baseAddress.
+ size_t trampOffset = minCodeSize;
+
+ // Relocate all recorded locations.
+ size_t numRelocs = _relocations.getLength();
+ const RelocEntry* const* reArray = _relocations.getData();
+
+ for (size_t i = 0; i < numRelocs; i++) {
+ const RelocEntry* re = reArray[i];
+
+ // Possibly deleted or optimized out relocation entry.
+ if (re->getType() == RelocEntry::kTypeNone)
+ continue;
+
+ uint64_t ptr = re->getData();
+ size_t codeOffset = static_cast(re->getSourceOffset());
+
+ // Make sure that the `RelocEntry` is correct, we don't want to write
+ // out of bounds in `dst`.
+ if (ASMJIT_UNLIKELY(codeOffset + re->getSize() > maxCodeSize))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // Whether to use trampoline, can be only used if relocation type is `kRelocTrampoline`.
+ bool useTrampoline = false;
+
+ switch (re->getType()) {
+ case RelocEntry::kTypeAbsToAbs: {
+ break;
+ }
+
+ case RelocEntry::kTypeRelToAbs: {
+ ptr += baseAddress;
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToRel: {
+ ptr -= baseAddress + re->getSourceOffset() + re->getSize();
+ break;
+ }
+
+ case RelocEntry::kTypeTrampoline: {
+ if (re->getSize() != 4)
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ ptr -= baseAddress + re->getSourceOffset() + re->getSize();
+ if (!Utils::isInt32(static_cast(ptr))) {
+ ptr = (uint64_t)trampOffset - re->getSourceOffset() - re->getSize();
+ useTrampoline = true;
+ }
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ switch (re->getSize()) {
+ case 1:
+ Utils::writeU8(dst + codeOffset, static_cast(ptr & 0xFFU));
+ break;
+
+ case 4:
+ Utils::writeU32u(dst + codeOffset, static_cast(ptr & 0xFFFFFFFFU));
+ break;
+
+ case 8:
+ Utils::writeU64u(dst + codeOffset, ptr);
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ // Handle the trampoline case.
+ if (useTrampoline) {
+ // Bytes that replace [REX, OPCODE] bytes.
+ uint32_t byte0 = 0xFF;
+ uint32_t byte1 = dst[codeOffset - 1];
+
+ if (byte1 == 0xE8) {
+ // Patch CALL/MOD byte to FF/2 (-> 0x15).
+ byte1 = x86EncodeMod(0, 2, 5);
+ }
+ else if (byte1 == 0xE9) {
+ // Patch JMP/MOD byte to FF/4 (-> 0x25).
+ byte1 = x86EncodeMod(0, 4, 5);
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ // Patch `jmp/call` instruction.
+ ASMJIT_ASSERT(codeOffset >= 2);
+ dst[codeOffset - 2] = static_cast(byte0);
+ dst[codeOffset - 1] = static_cast(byte1);
+
+ // Store absolute address and advance the trampoline pointer.
+ Utils::writeU64u(dst + trampOffset, re->getData());
+ trampOffset += 8;
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ if (logger)
+ logger->logf("[reloc] dq 0x%016llX ; Trampoline\n", re->getData());
+#endif // !ASMJIT_DISABLE_LOGGING
+ }
+ }
+
+ // If there are no trampolines this is the same as `minCodeSize`.
+ return trampOffset;
+}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/codeholder.h b/libraries/asmjit/asmjit/base/codeholder.h
new file mode 100644
index 00000000000..f753ecc3786
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/codeholder.h
@@ -0,0 +1,748 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_CODEHOLDER_H
+#define _ASMJIT_BASE_CODEHOLDER_H
+
+// [Dependencies]
+#include "../base/arch.h"
+#include "../base/func.h"
+#include "../base/logging.h"
+#include "../base/operand.h"
+#include "../base/simdtypes.h"
+#include "../base/utils.h"
+#include "../base/zone.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class Assembler;
+class CodeEmitter;
+class CodeHolder;
+
+// ============================================================================
+// [asmjit::AlignMode]
+// ============================================================================
+
+//! Align mode.
+ASMJIT_ENUM(AlignMode) {
+ kAlignCode = 0, //!< Align executable code.
+ kAlignData = 1, //!< Align non-executable code.
+ kAlignZero = 2, //!< Align by a sequence of zeros.
+ kAlignCount //!< Count of alignment modes.
+};
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+//! Error handler can be used to override the default behavior of error handling
+//! available to all classes that inherit \ref CodeEmitter. See \ref handleError().
+class ASMJIT_VIRTAPI ErrorHandler {
+public:
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create a new `ErrorHandler` instance.
+ ASMJIT_API ErrorHandler() noexcept;
+ //! Destroy the `ErrorHandler` instance.
+ ASMJIT_API virtual ~ErrorHandler() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Handle Error]
+ // --------------------------------------------------------------------------
+
+ //! Error handler (abstract).
+ //!
+ //! Error handler is called after an error happened and before it's propagated
+ //! to the caller. There are multiple ways how the error handler can be used:
+ //!
+ //! 1. Returning `true` or `false` from `handleError()`. If `true` is returned
+ //! it means that the error was reported and AsmJit can continue execution.
+ //! The reported error still be propagated to the caller, but won't put the
+ //! CodeEmitter into an error state (it won't set last-error). However,
+ //! returning `false` means that the error cannot be handled - in such case
+ //! it stores the error, which can be then retrieved by using `getLastError()`.
+ //! Returning `false` is the default behavior when no error handler is present.
+ //! To put the assembler into a non-error state again a `resetLastError()` must
+ //! be called.
+ //!
+ //! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
+ //! exception-safe, but you can throw exception from your error handler if
+ //! this way is the preferred way of handling errors in your project. Throwing
+ //! an exception acts virtually as returning `true` as AsmJit won't be able
+ //! to store the error because the exception changes execution path.
+ //!
+ //! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
+ //! `CodeEmitter` to a consistent state before calling the `handleError()`
+ //! so `longjmp()` can be used without any issues to cancel the code
+ //! generation if an error occurred. There is no difference between
+ //! exceptions and longjmp() from AsmJit's perspective.
+ virtual bool handleError(Error err, const char* message, CodeEmitter* origin) = 0;
+};
+
+// ============================================================================
+// [asmjit::CodeInfo]
+// ============================================================================
+
+//! Basic information about a code (or target). It describes its architecture,
+//! code generation mode (or optimization level), and base address.
+class CodeInfo {
+public:
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE CodeInfo() noexcept
+ : _archInfo(),
+ _stackAlignment(0),
+ _cdeclCallConv(CallConv::kIdNone),
+ _stdCallConv(CallConv::kIdNone),
+ _fastCallConv(CallConv::kIdNone),
+ _baseAddress(Globals::kNoBaseAddress) {}
+ ASMJIT_INLINE CodeInfo(const CodeInfo& other) noexcept { init(other); }
+
+ explicit ASMJIT_INLINE CodeInfo(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
+ : _archInfo(archType, archMode),
+ _packedMiscInfo(0),
+ _baseAddress(baseAddress) {}
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE bool isInitialized() const noexcept {
+ return _archInfo._type != ArchInfo::kTypeNone;
+ }
+
+ ASMJIT_INLINE void init(const CodeInfo& other) noexcept {
+ _archInfo = other._archInfo;
+ _packedMiscInfo = other._packedMiscInfo;
+ _baseAddress = other._baseAddress;
+ }
+
+ ASMJIT_INLINE void init(uint32_t archType, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
+ _archInfo.init(archType, archMode);
+ _packedMiscInfo = 0;
+ _baseAddress = baseAddress;
+ }
+
+ ASMJIT_INLINE void reset() noexcept {
+ _archInfo.reset();
+ _stackAlignment = 0;
+ _cdeclCallConv = CallConv::kIdNone;
+ _stdCallConv = CallConv::kIdNone;
+ _fastCallConv = CallConv::kIdNone;
+ _baseAddress = Globals::kNoBaseAddress;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Architecture Information]
+ // --------------------------------------------------------------------------
+
+ //! Get architecture information, see \ref ArchInfo.
+ ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; }
+
+ //! Get architecture type, see \ref ArchInfo::Type.
+ ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); }
+ //! Get architecture sub-type, see \ref ArchInfo::SubType.
+ ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); }
+ //! Get a size of a GP register of the architecture the code is using.
+ ASMJIT_INLINE uint32_t getGpSize() const noexcept { return _archInfo.getGpSize(); }
+ //! Get number of GP registers available of the architecture the code is using.
+ ASMJIT_INLINE uint32_t getGpCount() const noexcept { return _archInfo.getGpCount(); }
+
+ // --------------------------------------------------------------------------
+ // [High-Level Information]
+ // --------------------------------------------------------------------------
+
+ //! Get a natural stack alignment that must be honored (or 0 if not known).
+ ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; }
+ //! Set a natural stack alignment that must be honored.
+ ASMJIT_INLINE void setStackAlignment(uint8_t sa) noexcept { _stackAlignment = static_cast(sa); }
+
+ ASMJIT_INLINE uint32_t getCdeclCallConv() const noexcept { return _cdeclCallConv; }
+ ASMJIT_INLINE void setCdeclCallConv(uint32_t cc) noexcept { _cdeclCallConv = static_cast(cc); }
+
+ ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; }
+ ASMJIT_INLINE void setStdCallConv(uint32_t cc) noexcept { _stdCallConv = static_cast(cc); }
+
+ ASMJIT_INLINE uint32_t getFastCallConv() const noexcept { return _fastCallConv; }
+ ASMJIT_INLINE void setFastCallConv(uint32_t cc) noexcept { _fastCallConv = static_cast(cc); }
+
+ // --------------------------------------------------------------------------
+ // [Addressing Information]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
+ ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _baseAddress; }
+ ASMJIT_INLINE void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
+ ASMJIT_INLINE void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
+
+ // --------------------------------------------------------------------------
+ // [Operator Overload]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE CodeInfo& operator=(const CodeInfo& other) noexcept { init(other); return *this; }
+ ASMJIT_INLINE bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
+ ASMJIT_INLINE bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ ArchInfo _archInfo; //!< Architecture information.
+
+ union {
+ struct {
+ uint8_t _stackAlignment; //!< Natural stack alignment (ARCH+OS).
+ uint8_t _cdeclCallConv; //!< Default CDECL calling convention.
+ uint8_t _stdCallConv; //!< Default STDCALL calling convention.
+ uint8_t _fastCallConv; //!< Default FASTCALL calling convention.
+ };
+ uint32_t _packedMiscInfo; //!< \internal
+ };
+
+ uint64_t _baseAddress; //!< Base address.
+};
+
+// ============================================================================
+// [asmjit::CodeBuffer]
+// ============================================================================
+
+//! Code or data buffer.
+struct CodeBuffer {
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE bool hasData() const noexcept { return _data != nullptr; }
+ ASMJIT_INLINE uint8_t* getData() noexcept { return _data; }
+ ASMJIT_INLINE const uint8_t* getData() const noexcept { return _data; }
+
+ ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
+ ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
+
+ ASMJIT_INLINE bool isExternal() const noexcept { return _isExternal; }
+ ASMJIT_INLINE bool isFixedSize() const noexcept { return _isFixedSize; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint8_t* _data; //!< The content of the buffer (data).
+ size_t _length; //!< Number of bytes of `data` used.
+ size_t _capacity; //!< Buffer capacity (in bytes).
+ bool _isExternal; //!< True if this is external buffer.
+ bool _isFixedSize; //!< True if this buffer cannot grow.
+};
+
+// ============================================================================
+// [asmjit::SectionEntry]
+// ============================================================================
+
+//! Section entry.
+class SectionEntry {
+public:
+ ASMJIT_ENUM(Id) {
+ kInvalidId = 0xFFFFFFFFU //!< Invalid section id.
+ };
+
+ //! Section flags.
+ ASMJIT_ENUM(Flags) {
+ kFlagExec = 0x00000001U, //!< Executable (.text sections).
+ kFlagConst = 0x00000002U, //!< Read-only (.text and .data sections).
+ kFlagZero = 0x00000004U, //!< Zero initialized by the loader (BSS).
+ kFlagInfo = 0x00000008U, //!< Info / comment flag.
+ kFlagImplicit = 0x80000000U //!< Section created implicitly (can be deleted by the Runtime).
+ };
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
+ ASMJIT_INLINE const char* getName() const noexcept { return _name; }
+
+ ASMJIT_INLINE void _setDefaultName(
+ char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0,
+ char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept {
+ _nameAsU32[0] = Utils::pack32_4x8(c0, c1, c2, c3);
+ _nameAsU32[1] = Utils::pack32_4x8(c4, c5, c6, c7);
+ }
+
+ ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
+ ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ ASMJIT_INLINE void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ ASMJIT_INLINE void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ ASMJIT_INLINE uint32_t getAlignment() const noexcept { return _alignment; }
+ ASMJIT_INLINE void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ ASMJIT_INLINE size_t getPhysicalSize() const noexcept { return _buffer.getLength(); }
+
+ ASMJIT_INLINE size_t getVirtualSize() const noexcept { return _virtualSize; }
+ ASMJIT_INLINE void setVirtualSize(uint32_t size) noexcept { _virtualSize = size; }
+
+ ASMJIT_INLINE CodeBuffer& getBuffer() noexcept { return _buffer; }
+ ASMJIT_INLINE const CodeBuffer& getBuffer() const noexcept { return _buffer; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint32_t _id; //!< Section id.
+ uint32_t _flags; //!< Section flags.
+ uint32_t _alignment; //!< Section alignment requirements (0 if no requirements).
+ uint32_t _virtualSize; //!< Virtual size of the section (zero initialized mostly).
+ union {
+ char _name[36]; //!< Section name (max 35 characters, PE allows max 8).
+ uint32_t _nameAsU32[36 / 4]; //!< Section name as `uint32_t[]` (only optimization).
+ };
+ CodeBuffer _buffer; //!< Code or data buffer.
+};
+
+// ============================================================================
+// [asmjit::LabelLink]
+// ============================================================================
+
+//! Data structure used to link labels.
+struct LabelLink {
+ LabelLink* prev; //!< Previous link (single-linked list).
+ uint32_t sectionId; //!< Section id.
+ uint32_t relocId; //!< Relocation id or RelocEntry::kInvalidId.
+ size_t offset; //!< Label offset relative to the start of the section.
+ intptr_t rel; //!< Inlined rel8/rel32.
+};
+
+// ============================================================================
+// [asmjit::LabelEntry]
+// ============================================================================
+
+//! Label entry.
+//!
+//! Contains the following properties:
+//! * Label id - This is the only thing that is set to the `Label` operand.
+//! * Label name - Optional, used mostly to create executables and libraries.
+//! * Label type - Type of the label, default `Label::kTypeAnonymous`.
+//! * Label parent id - Derived from many assemblers that allow to define a
+//! local label that falls under a global label. This allows to define
+//! many labels of the same name that have different parent (global) label.
+//! * Offset - offset of the label bound by `Assembler`.
+//! * Links - single-linked list that contains locations of code that has
+//! to be patched when the label gets bound. Every use of unbound label
+//! adds one link to `_links` list.
+//! * HVal - Hash value of label's name and optionally parentId.
+//! * HashNext - Hash-table implementation detail.
+class LabelEntry : public ZoneHashNode {
+public:
+ // NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode
+ // to fill a padding that a C++ compiler targeting 64-bit CPU will add to align
+ // the structure to 64-bits.
+
+ //! Get label id.
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _customData; }
+ //! Set label id (internal, used only by \ref CodeHolder).
+ ASMJIT_INLINE void _setId(uint32_t id) noexcept { _customData = id; }
+
+ //! Get label type, see \ref Label::Type.
+ ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
+ //! Get label flags, returns 0 at the moment.
+ ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
+
+ ASMJIT_INLINE bool hasParent() const noexcept { return _parentId != 0; }
+ //! Get label's parent id.
+ ASMJIT_INLINE uint32_t getParentId() const noexcept { return _parentId; }
+
+ //! Get label's section id where it's bound to (or `SectionEntry::kInvalidId` if it's not bound yet).
+ ASMJIT_INLINE uint32_t getSectionId() const noexcept { return _sectionId; }
+
+ //! Get if the label has name.
+ ASMJIT_INLINE bool hasName() const noexcept { return !_name.isEmpty(); }
+
+ //! Get the label's name.
+ //!
+ //! NOTE: Local labels will return their local name without their parent
+ //! part, for example ".L1".
+ ASMJIT_INLINE const char* getName() const noexcept { return _name.getData(); }
+
+ //! Get length of label's name.
+ //!
+ //! NOTE: Label name is always null terminated, so you can use `strlen()` to
+ //! get it, however, it's also cached in `LabelEntry`, so if you want to know
+ //! the length the easiest way is to use `LabelEntry::getNameLength()`.
+ ASMJIT_INLINE size_t getNameLength() const noexcept { return _name.getLength(); }
+
+ //! Get if the label is bound.
+ ASMJIT_INLINE bool isBound() const noexcept { return _sectionId != SectionEntry::kInvalidId; }
+ //! Get the label offset (only useful if the label is bound).
+ ASMJIT_INLINE intptr_t getOffset() const noexcept { return _offset; }
+
+ //! Get the hash-value of label's name and its parent label (if any).
+ //!
+ //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function
+ //! is implemented in `Utils::hashString()` and `Utils::hashRound()`.
+ ASMJIT_INLINE uint32_t getHVal() const noexcept { return _hVal; }
+
+ // ------------------------------------------------------------------------
+ // [Members]
+ // ------------------------------------------------------------------------
+
+ // Let's round the size of `LabelEntry` to 64 bytes (as ZoneHeap has 32
+ // bytes granularity anyway). This gives `_name` the remaining space, which
+ // is roughly 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
+ enum { kNameBytes = 64 - (sizeof(ZoneHashNode) + 16 + sizeof(intptr_t) + sizeof(LabelLink*)) };
+
+ uint8_t _type; //!< Label type, see Label::Type.
+ uint8_t _flags; //!< Must be zero.
+ uint16_t _reserved16; //!< Reserved.
+ uint32_t _parentId; //!< Label parent id or zero.
+ uint32_t _sectionId; //!< Section id or `SectionEntry::kInvalidId`.
+ uint32_t _reserved32; //!< Reserved.
+ intptr_t _offset; //!< Label offset.
+ LabelLink* _links; //!< Label links.
+ SmallString _name; //!< Label name.
+};
+
+// ============================================================================
+// [asmjit::RelocEntry]
+// ============================================================================
+
+//! Relocation entry.
+struct RelocEntry {
+ ASMJIT_ENUM(Id) {
+ kInvalidId = 0xFFFFFFFFU //!< Invalid relocation id.
+ };
+
+ //! Relocation type.
+ ASMJIT_ENUM(Type) {
+ kTypeNone = 0, //!< Deleted entry (no relocation).
+ kTypeAbsToAbs = 1, //!< Relocate absolute to absolute.
+ kTypeRelToAbs = 2, //!< Relocate relative to absolute.
+ kTypeAbsToRel = 3, //!< Relocate absolute to relative.
+ kTypeTrampoline = 4 //!< Relocate absolute to relative or use trampoline.
+ };
+
+ // ------------------------------------------------------------------------
+ // [Accessors]
+ // ------------------------------------------------------------------------
+
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
+
+ ASMJIT_INLINE uint32_t getType() const noexcept { return _type; }
+ ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
+
+ ASMJIT_INLINE uint32_t getSourceSectionId() const noexcept { return _sourceSectionId; }
+ ASMJIT_INLINE uint32_t getTargetSectionId() const noexcept { return _targetSectionId; }
+
+ ASMJIT_INLINE uint64_t getSourceOffset() const noexcept { return _sourceOffset; }
+ ASMJIT_INLINE uint64_t getData() const noexcept { return _data; }
+
+ // ------------------------------------------------------------------------
+ // [Members]
+ // ------------------------------------------------------------------------
+
+ uint32_t _id; //!< Relocation id.
+ uint8_t _type; //!< Type of the relocation.
+ uint8_t _size; //!< Size of the relocation (1, 2, 4 or 8 bytes).
+ uint8_t _reserved[2]; //!< Reserved.
+ uint32_t _sourceSectionId; //!< Source section id.
+ uint32_t _targetSectionId; //!< Destination section id.
+ uint64_t _sourceOffset; //!< Source offset (relative to start of the section).
+ uint64_t _data; //!< Relocation data (target offset, target address, etc).
+};
+
+// ============================================================================
+// [asmjit::CodeHolder]
+// ============================================================================
+
+//! Contains basic information about the target architecture plus its settings,
+//! and holds code & data (including sections, labels, and relocation information).
+//! CodeHolder can store both binary and intermediate representation of assembly,
+//! which can be generated by \ref Assembler and/or \ref CodeBuilder.
+//!
+//! NOTE: CodeHolder has ability to attach an \ref ErrorHandler, however, this
+//! error handler is not triggered by CodeHolder itself, it's only used by the
+//! attached code generators.
+class CodeHolder {
+public:
+ ASMJIT_NONCOPYABLE(CodeHolder)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Create an uninitialized CodeHolder (you must init() it before it can be used).
+ ASMJIT_API CodeHolder() noexcept;
+ //! Destroy the CodeHolder.
+ ASMJIT_API ~CodeHolder() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE bool isInitialized() const noexcept { return _codeInfo.isInitialized(); }
+
+ //! Initialize to CodeHolder to hold code described by `codeInfo`.
+ ASMJIT_API Error init(const CodeInfo& info) noexcept;
+ //! Detach all code-generators attached and reset the \ref CodeHolder.
+ ASMJIT_API void reset(bool releaseMemory = false) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Attach / Detach]
+ // --------------------------------------------------------------------------
+
+ //! Attach a \ref CodeEmitter to this \ref CodeHolder.
+ ASMJIT_API Error attach(CodeEmitter* emitter) noexcept;
+ //! Detach a \ref CodeEmitter from this \ref CodeHolder.
+ ASMJIT_API Error detach(CodeEmitter* emitter) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Sync]
+ // --------------------------------------------------------------------------
+
+ //! Synchronize all states of all `CodeEmitter`s associated with the CodeHolder.
+ //! This is required as some code generators don't sync every time they do
+ //! something - for example \ref Assembler generally syncs when it needs to
+ //! reallocate the \ref CodeBuffer, but not each time it encodes instruction
+ //! or directive.
+ ASMJIT_API void sync() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Code-Information]
+ // --------------------------------------------------------------------------
+
+ //! Get code/target information, see \ref CodeInfo.
+ ASMJIT_INLINE const CodeInfo& getCodeInfo() const noexcept { return _codeInfo; }
+ //! Get architecture information, see \ref ArchInfo.
+ ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _codeInfo.getArchInfo(); }
+
+ //! Get the target's architecture type.
+ ASMJIT_INLINE uint32_t getArchType() const noexcept { return getArchInfo().getType(); }
+ //! Get the target's architecture sub-type.
+ ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return getArchInfo().getSubType(); }
+
+ //! Get if a static base-address is set.
+ ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _codeInfo.hasBaseAddress(); }
+ //! Get a static base-address (uint64_t).
+ ASMJIT_INLINE uint64_t getBaseAddress() const noexcept { return _codeInfo.getBaseAddress(); }
+
+ // --------------------------------------------------------------------------
+ // [Global Information]
+ // --------------------------------------------------------------------------
+
+ //! Get global hints, internally propagated to all `CodeEmitter`s attached.
+ ASMJIT_INLINE uint32_t getGlobalHints() const noexcept { return _globalHints; }
+ //! Get global options, internally propagated to all `CodeEmitter`s attached.
+ ASMJIT_INLINE uint32_t getGlobalOptions() const noexcept { return _globalOptions; }
+
+ // --------------------------------------------------------------------------
+ // [Result Information]
+ // --------------------------------------------------------------------------
+
+ //! Get the size code & data of all sections.
+ ASMJIT_API size_t getCodeSize() const noexcept;
+
+ //! Get size of all possible trampolines.
+ //!
+ //! Trampolines are needed to successfully generate relative jumps to absolute
+ //! addresses. This value is only non-zero if jmp of call instructions were
+ //! used with immediate operand (this means jumping or calling an absolute
+ //! address directly).
+ ASMJIT_INLINE size_t getTrampolinesSize() const noexcept { return _trampolinesSize; }
+
+ // --------------------------------------------------------------------------
+ // [Logging & Error Handling]
+ // --------------------------------------------------------------------------
+
+#if !defined(ASMJIT_DISABLE_LOGGING)
+ //! Get if a logger attached.
+ ASMJIT_INLINE bool hasLogger() const noexcept { return _logger != nullptr; }
+ //! Get the attached logger.
+ ASMJIT_INLINE Logger* getLogger() const noexcept { return _logger; }
+ //! Attach a `logger` to CodeHolder and propagate it to all attached `CodeEmitter`s.
+ ASMJIT_API void setLogger(Logger* logger) noexcept;
+ //! Reset the logger (does nothing if not attached).
+ ASMJIT_INLINE void resetLogger() noexcept { setLogger(nullptr); }
+#endif // !ASMJIT_DISABLE_LOGGING
+
+ //! Get if error-handler is attached.
+ ASMJIT_INLINE bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+ //! Get the error-handler.
+ ASMJIT_INLINE ErrorHandler* getErrorHandler() const noexcept { return _errorHandler; }
+ //! Set the error handler, will affect all attached `CodeEmitter`s.
+ ASMJIT_API Error setErrorHandler(ErrorHandler* handler) noexcept;
+ //! Reset the error handler (does nothing if not attached).
+ ASMJIT_INLINE void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ // --------------------------------------------------------------------------
+ // [Sections]
+ // --------------------------------------------------------------------------
+
+ //! Get array of `SectionEntry*` records.
+ ASMJIT_INLINE const ZoneVector& getSections() const noexcept { return _sections; }
+
+ //! Get a section entry of the given index.
+ ASMJIT_INLINE SectionEntry* getSectionEntry(size_t index) const noexcept { return _sections[index]; }
+
+ ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept;
+ ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Labels & Symbols]
+ // --------------------------------------------------------------------------
+
+ //! Create a new anonymous label and return its id in `idOut`.
+ //!
+ //! Returns `Error`, does not report error to \ref ErrorHandler.
+ ASMJIT_API Error newLabelId(uint32_t& idOut) noexcept;
+
+ //! Create a new named label label-type `type`.
+ //!
+ //! Returns `Error`, does not report error to \ref ErrorHandler.
+ ASMJIT_API Error newNamedLabelId(uint32_t& idOut, const char* name, size_t nameLength, uint32_t type, uint32_t parentId) noexcept;
+
+ //! Get a label id by name.
+ ASMJIT_API uint32_t getLabelIdByName(const char* name, size_t nameLength = Globals::kInvalidIndex, uint32_t parentId = 0) noexcept;
+
+ //! Create a new label-link used to store information about yet unbound labels.
+ //!
+ //! Returns `null` if the allocation failed.
+ ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept;
+
+ //! Get array of `LabelEntry*` records.
+ ASMJIT_INLINE const ZoneVector& getLabelEntries() const noexcept { return _labels; }
+
+ //! Get number of labels created.
+ ASMJIT_INLINE size_t getLabelsCount() const noexcept { return _labels.getLength(); }
+
+ //! Get number of label references, which are unresolved at the moment.
+ ASMJIT_INLINE size_t getUnresolvedLabelsCount() const noexcept { return _unresolvedLabelsCount; }
+
+ //! Get if the `label` is valid (i.e. created by `newLabelId()`).
+ ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
+ return isLabelValid(label.getId());
+ }
+ //! Get if the label having `id` is valid (i.e. created by `newLabelId()`).
+ ASMJIT_INLINE bool isLabelValid(uint32_t labelId) const noexcept {
+ size_t index = Operand::unpackId(labelId);
+ return index < _labels.getLength();
+ }
+
+ //! Get if the `label` is already bound.
+ //!
+ //! Returns `false` if the `label` is not valid.
+ ASMJIT_INLINE bool isLabelBound(const Label& label) const noexcept {
+ return isLabelBound(label.getId());
+ }
+ //! \overload
+ ASMJIT_INLINE bool isLabelBound(uint32_t id) const noexcept {
+ size_t index = Operand::unpackId(id);
+ return index < _labels.getLength() && _labels[index]->isBound();
+ }
+
+ //! Get a `label` offset or -1 if the label is not yet bound.
+ ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const noexcept {
+ return getLabelOffset(label.getId());
+ }
+ //! \overload
+ ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(id));
+ return _labels[Operand::unpackId(id)]->getOffset();
+ }
+
+ //! Get information about the given `label`.
+ ASMJIT_INLINE LabelEntry* getLabelEntry(const Label& label) const noexcept {
+ return getLabelEntry(label.getId());
+ }
+ //! Get information about a label having the given `id`.
+ ASMJIT_INLINE LabelEntry* getLabelEntry(uint32_t id) const noexcept {
+ size_t index = static_cast(Operand::unpackId(id));
+ return index < _labels.getLength() ? _labels[index] : static_cast(nullptr);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Relocations]
+ // --------------------------------------------------------------------------
+
+ //! Create a new relocation entry of type `type` and size `size`.
+ //!
+ //! Additional fields can be set after the relocation entry was created.
+ ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t type, uint32_t size) noexcept;
+
+ //! Get if the code contains relocations.
+ ASMJIT_INLINE bool hasRelocations() const noexcept { return !_relocations.isEmpty(); }
+ //! Get array of `RelocEntry*` records.
+ ASMJIT_INLINE const ZoneVector& getRelocEntries() const noexcept { return _relocations; }
+
+ ASMJIT_INLINE RelocEntry* getRelocEntry(uint32_t id) const noexcept { return _relocations[id]; }
+
+ //! Relocate the code to `baseAddress` and copy it to `dst`.
+ //!
+ //! \param dst Contains the location where the relocated code should be
+ //! copied. The pointer can be address returned by virtual memory allocator
+ //! or any other address that has sufficient space.
+ //!
+ //! \param baseAddress Base address used for relocation. `JitRuntime` always
+ //! sets the `baseAddress` to be the same as `dst`.
+ //!
+ //! \return The number bytes actually used. If the code emitter reserved
+ //! space for possible trampolines, but didn't use it, the number of bytes
+ //! used can actually be less than the expected worst case. Virtual memory
+ //! allocator can shrink the memory it allocated initially.
+ //!
+ //! A given buffer will be overwritten, to get the number of bytes required,
+ //! use `getCodeSize()`.
+ ASMJIT_API size_t relocate(void* dst, uint64_t baseAddress = Globals::kNoBaseAddress) const noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ CodeInfo _codeInfo; //!< Basic information about the code (architecture and other info).
+
+ uint32_t _globalHints; //!< Global hints, propagated to all `CodeEmitter`s.
+ uint32_t _globalOptions; //!< Global options, propagated to all `CodeEmitter`s.
+
+ CodeEmitter* _emitters; //!< Linked-list of all attached `CodeEmitter`s.
+ Assembler* _cgAsm; //!< Attached \ref Assembler (only one at a time).
+
+ Logger* _logger; //!< Attached \ref Logger, used by all consumers.
+ ErrorHandler* _errorHandler; //!< Attached \ref ErrorHandler.
+
+ uint32_t _unresolvedLabelsCount; //!< Count of label references which were not resolved.
+ uint32_t _trampolinesSize; //!< Size of all possible trampolines.
+
+ Zone _baseZone; //!< Base zone (used to allocate core structures).
+ Zone _dataZone; //!< Data zone (used to allocate extra data like label names).
+ ZoneHeap _baseHeap; //!< Zone allocator, used to manage internal containers.
+
+ ZoneVector _sections; //!< Section entries.
+ ZoneVector _labels; //!< Label entries (each label is stored here).
+ ZoneVector _relocations; //!< Relocation entries.
+ ZoneHash _namedLabels; //!< Label name -> LabelEntry (only named labels).
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_CODEHOLDER_H
diff --git a/libraries/asmjit/asmjit/base/constpool.cpp b/libraries/asmjit/asmjit/base/constpool.cpp
new file mode 100644
index 00000000000..799abd1c2a9
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/constpool.cpp
@@ -0,0 +1,511 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/constpool.h"
+#include "../base/utils.h"
+
+#include
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// Binary tree code is based on Julienne Walker's "Andersson Binary Trees"
+// article and implementation. However, only three operations are implemented -
+// get, insert and traverse.
+
+// ============================================================================
+// [asmjit::ConstPool::Tree - Ops]
+// ============================================================================
+
+//! \internal
+//!
+//! Remove left horizontal links.
+static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) noexcept {
+ ConstPool::Node* link = node->_link[0];
+ uint32_t level = node->_level;
+
+ if (level != 0 && link && link->_level == level) {
+ node->_link[0] = link->_link[1];
+ link->_link[1] = node;
+
+ node = link;
+ }
+
+ return node;
+}
+
+//! \internal
+//!
+//! Remove consecutive horizontal links.
+static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) noexcept {
+ ConstPool::Node* link = node->_link[1];
+ uint32_t level = node->_level;
+
+ if (level != 0 && link && link->_link[1] && link->_link[1]->_level == level) {
+ node->_link[1] = link->_link[0];
+ link->_link[0] = node;
+
+ node = link;
+ node->_level++;
+ }
+
+ return node;
+}
+
+ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept {
+ ConstPool::Node* node = _root;
+ size_t dataSize = _dataSize;
+
+ while (node) {
+ int c = ::memcmp(node->getData(), data, dataSize);
+ if (c == 0)
+ return node;
+ node = node->_link[c < 0];
+ }
+
+ return nullptr;
+}
+
+void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
+ size_t dataSize = _dataSize;
+ _length++;
+
+ if (!_root) {
+ _root = newNode;
+ return;
+ }
+
+ ConstPool::Node* node = _root;
+ ConstPool::Node* stack[kHeightLimit];
+
+ unsigned int top = 0;
+ unsigned int dir;
+
+ // Find a spot and save the stack.
+ for (;;) {
+ stack[top++] = node;
+ dir = ::memcmp(node->getData(), newNode->getData(), dataSize) < 0;
+
+ ConstPool::Node* link = node->_link[dir];
+ if (!link) break;
+
+ node = link;
+ }
+
+ // Link and rebalance.
+ node->_link[dir] = newNode;
+
+ while (top > 0) {
+ // Which child?
+ node = stack[--top];
+
+ if (top != 0) {
+ dir = stack[top - 1]->_link[1] == node;
+ }
+
+ node = ConstPoolTree_skewNode(node);
+ node = ConstPoolTree_splitNode(node);
+
+ // Fix the parent.
+ if (top != 0)
+ stack[top - 1]->_link[dir] = node;
+ else
+ _root = node;
+ }
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Construction / Destruction]
+// ============================================================================
+
+ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
+ConstPool::~ConstPool() noexcept {}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+void ConstPool::reset(Zone* zone) noexcept {
+ _zone = zone;
+
+ size_t dataSize = 1;
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].reset();
+ _tree[i].setDataSize(dataSize);
+ _gaps[i] = nullptr;
+ dataSize <<= 1;
+ }
+
+ _gapPool = nullptr;
+ _size = 0;
+ _alignment = 0;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Ops]
+// ============================================================================
+
+static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
+ ConstPool::Gap* gap = self->_gapPool;
+ if (!gap) return self->_zone->allocT();
+
+ self->_gapPool = gap->_next;
+ return gap;
+}
+
+static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
+ gap->_next = self->_gapPool;
+ self->_gapPool = gap;
+}
+
+static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noexcept {
+ ASMJIT_ASSERT(length > 0);
+
+ while (length > 0) {
+ size_t gapIndex;
+ size_t gapLength;
+
+ gapIndex = ConstPool::kIndex16;
+ if (length >= 16 && Utils::isAligned(offset, 16)) {
+ gapLength = 16;
+ }
+ else if (length >= 8 && Utils::isAligned(offset, 8)) {
+ gapIndex = ConstPool::kIndex8;
+ gapLength = 8;
+ }
+ else if (length >= 4 && Utils::isAligned(offset, 4)) {
+ gapIndex = ConstPool::kIndex4;
+ gapLength = 4;
+ }
+ else if (length >= 2 && Utils::isAligned(offset, 2)) {
+ gapIndex = ConstPool::kIndex2;
+ gapLength = 2;
+ }
+ else {
+ gapIndex = ConstPool::kIndex1;
+ gapLength = 1;
+ }
+
+ // We don't have to check for errors here, if this failed nothing really
+ // happened (just the gap won't be visible) and it will fail again at
+ // place where checking will cause kErrorNoHeapMemory.
+ ConstPool::Gap* gap = ConstPool_allocGap(self);
+ if (!gap) return;
+
+ gap->_next = self->_gaps[gapIndex];
+ self->_gaps[gapIndex] = gap;
+
+ gap->_offset = offset;
+ gap->_length = gapLength;
+
+ offset += gapLength;
+ length -= gapLength;
+ }
+}
+
+Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ size_t treeIndex;
+
+ if (size == 32)
+ treeIndex = kIndex32;
+ else if (size == 16)
+ treeIndex = kIndex16;
+ else if (size == 8)
+ treeIndex = kIndex8;
+ else if (size == 4)
+ treeIndex = kIndex4;
+ else if (size == 2)
+ treeIndex = kIndex2;
+ else if (size == 1)
+ treeIndex = kIndex1;
+ else
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ConstPool::Node* node = _tree[treeIndex].get(data);
+ if (node) {
+ dstOffset = node->_offset;
+ return kErrorOk;
+ }
+
+ // Before incrementing the current offset try if there is a gap that can
+ // be used for the requested data.
+ size_t offset = ~static_cast(0);
+ size_t gapIndex = treeIndex;
+
+ while (gapIndex != kIndexCount - 1) {
+ ConstPool::Gap* gap = _gaps[treeIndex];
+
+ // Check if there is a gap.
+ if (gap) {
+ size_t gapOffset = gap->_offset;
+ size_t gapLength = gap->_length;
+
+ // Destroy the gap for now.
+ _gaps[treeIndex] = gap->_next;
+ ConstPool_freeGap(this, gap);
+
+ offset = gapOffset;
+ ASMJIT_ASSERT(Utils::isAligned(offset, size));
+
+ gapLength -= size;
+ if (gapLength > 0)
+ ConstPool_addGap(this, gapOffset, gapLength);
+ }
+
+ gapIndex++;
+ }
+
+ if (offset == ~static_cast(0)) {
+ // Get how many bytes have to be skipped so the address is aligned accordingly
+ // to the 'size'.
+ size_t diff = Utils::alignDiff(_size, size);
+
+ if (diff != 0) {
+ ConstPool_addGap(this, _size, diff);
+ _size += diff;
+ }
+
+ offset = _size;
+ _size += size;
+ }
+
+ // Add the initial node to the right index.
+ node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
+ if (!node) return DebugUtils::errored(kErrorNoHeapMemory);
+
+ _tree[treeIndex].put(node);
+ _alignment = std::max(_alignment, size);
+
+ dstOffset = offset;
+
+ // Now create a bunch of shared constants that are based on the data pattern.
+ // We stop at size 4, it probably doesn't make sense to split constants down
+ // to 1 byte.
+ size_t pCount = 1;
+ while (size > 4) {
+ size >>= 1;
+ pCount <<= 1;
+
+ ASMJIT_ASSERT(treeIndex != 0);
+ treeIndex--;
+
+ const uint8_t* pData = static_cast(data);
+ for (size_t i = 0; i < pCount; i++, pData += size) {
+ node = _tree[treeIndex].get(pData);
+ if (node) continue;
+
+ node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
+ _tree[treeIndex].put(node);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+struct ConstPoolFill {
+ ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
+ _dst(dst),
+ _dataSize(dataSize) {}
+
+ ASMJIT_INLINE void visit(const ConstPool::Node* node) noexcept {
+ if (!node->_shared)
+ ::memcpy(_dst + node->_offset, node->getData(), _dataSize);
+ }
+
+ uint8_t* _dst;
+ size_t _dataSize;
+};
+
+void ConstPool::fill(void* dst) const noexcept {
+ // Clears possible gaps, asmjit should never emit garbage to the output.
+ ::memset(dst, 0, _size);
+
+ ConstPoolFill filler(static_cast(dst), 1);
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].iterate(filler);
+ filler._dataSize <<= 1;
+ }
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Test]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(base_constpool) {
+ Zone zone(32384 - Zone::kZoneOverhead);
+ ConstPool pool(&zone);
+
+ uint32_t i;
+ uint32_t kCount = 1000000;
+
+ INFO("Adding %u constants to the pool.", kCount);
+ {
+ size_t prevOffset;
+ size_t curOffset;
+ uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
+
+ EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(prevOffset == 0,
+ "pool.add() - First constant should have zero offset");
+
+ for (i = 1; i < kCount; i++) {
+ c++;
+ EXPECT(pool.add(&c, 8, curOffset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(prevOffset + 8 == curOffset,
+ "pool.add() - Returned incorrect curOffset");
+ EXPECT(pool.getSize() == (i + 1) * 8,
+ "pool.getSize() - Reported incorrect size");
+ prevOffset = curOffset;
+ }
+
+ EXPECT(pool.getAlignment() == 8,
+ "pool.getAlignment() - Expected 8-byte alignment");
+ }
+
+ INFO("Retrieving %u constants from the pool.", kCount);
+ {
+ uint64_t c = ASMJIT_UINT64_C(0x0101010101010101);
+
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(offset == i * 8,
+ "pool.add() - Should have reused constant");
+ c++;
+ }
+ }
+
+ INFO("Checking if the constants were split into 4-byte patterns");
+ {
+ uint32_t c = 0x01010101;
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 4, offset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(offset == i * 8,
+ "pool.add() - Should reuse existing constant");
+ c++;
+ }
+ }
+
+ INFO("Adding 2 byte constant to misalign the current offset");
+ {
+ uint16_t c = 0xFFFF;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(offset == kCount * 8,
+ "pool.add() - Didn't return expected position");
+ EXPECT(pool.getAlignment() == 8,
+ "pool.getAlignment() - Expected 8-byte alignment");
+ }
+
+ INFO("Adding 8 byte constant to check if pool gets aligned again");
+ {
+ uint64_t c = ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF);
+ size_t offset;
+
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(offset == kCount * 8 + 8,
+ "pool.add() - Didn't return aligned offset");
+ }
+
+ INFO("Adding 2 byte constant to verify the gap is filled");
+ {
+ uint16_t c = 0xFFFE;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk,
+ "pool.add() - Returned error");
+ EXPECT(offset == kCount * 8 + 2,
+ "pool.add() - Didn't fill the gap");
+ EXPECT(pool.getAlignment() == 8,
+ "pool.getAlignment() - Expected 8-byte alignment");
+ }
+
+ INFO("Checking reset functionality");
+ {
+ pool.reset(&zone);
+ zone.reset();
+
+ EXPECT(pool.getSize() == 0,
+ "pool.getSize() - Expected pool size to be zero");
+ EXPECT(pool.getAlignment() == 0,
+ "pool.getSize() - Expected pool alignment to be zero");
+ }
+
+ INFO("Checking pool alignment when combined constants are added");
+ {
+ uint8_t bytes[32] = { 0 };
+ size_t offset;
+
+ pool.add(bytes, 1, offset);
+
+ EXPECT(pool.getSize() == 1,
+ "pool.getSize() - Expected pool size to be 1 byte");
+ EXPECT(pool.getAlignment() == 1,
+ "pool.getSize() - Expected pool alignment to be 1 byte");
+ EXPECT(offset == 0,
+ "pool.getSize() - Expected offset returned to be zero");
+
+ pool.add(bytes, 2, offset);
+
+ EXPECT(pool.getSize() == 4,
+ "pool.getSize() - Expected pool size to be 4 bytes");
+ EXPECT(pool.getAlignment() == 2,
+ "pool.getSize() - Expected pool alignment to be 2 bytes");
+ EXPECT(offset == 2,
+ "pool.getSize() - Expected offset returned to be 2");
+
+ pool.add(bytes, 4, offset);
+
+ EXPECT(pool.getSize() == 8,
+ "pool.getSize() - Expected pool size to be 8 bytes");
+ EXPECT(pool.getAlignment() == 4,
+ "pool.getSize() - Expected pool alignment to be 4 bytes");
+ EXPECT(offset == 4,
+ "pool.getSize() - Expected offset returned to be 4");
+
+ pool.add(bytes, 4, offset);
+
+ EXPECT(pool.getSize() == 8,
+ "pool.getSize() - Expected pool size to be 8 bytes");
+ EXPECT(pool.getAlignment() == 4,
+ "pool.getSize() - Expected pool alignment to be 4 bytes");
+ EXPECT(offset == 4,
+ "pool.getSize() - Expected offset returned to be 8");
+
+ pool.add(bytes, 32, offset);
+ EXPECT(pool.getSize() == 64,
+ "pool.getSize() - Expected pool size to be 64 bytes");
+ EXPECT(pool.getAlignment() == 32,
+ "pool.getSize() - Expected pool alignment to be 32 bytes");
+ EXPECT(offset == 32,
+ "pool.getSize() - Expected offset returned to be 32");
+ }
+}
+#endif // ASMJIT_TEST
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/constpool.h b/libraries/asmjit/asmjit/base/constpool.h
new file mode 100644
index 00000000000..945ea647bbc
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/constpool.h
@@ -0,0 +1,257 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_CONSTPOOL_H
+#define _ASMJIT_BASE_CONSTPOOL_H
+
+// [Dependencies]
+#include "../base/zone.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [asmjit::ConstPool]
+// ============================================================================
+
+//! Constant pool.
+class ConstPool {
+public:
+ ASMJIT_NONCOPYABLE(ConstPool)
+
+ enum {
+ kIndex1 = 0,
+ kIndex2 = 1,
+ kIndex4 = 2,
+ kIndex8 = 3,
+ kIndex16 = 4,
+ kIndex32 = 5,
+ kIndexCount = 6
+ };
+
+ // --------------------------------------------------------------------------
+ // [Gap]
+ // --------------------------------------------------------------------------
+
+ //! \internal
+ //!
+ //! Zone-allocated const-pool gap.
+ struct Gap {
+ Gap* _next; //!< Pointer to the next gap
+ size_t _offset; //!< Offset of the gap.
+ size_t _length; //!< Remaining bytes of the gap (basically a gap size).
+ };
+
+ // --------------------------------------------------------------------------
+ // [Node]
+ // --------------------------------------------------------------------------
+
+ //! \internal
+ //!
+ //! Zone-allocated const-pool node.
+ struct Node {
+ ASMJIT_INLINE void* getData() const noexcept {
+ return static_cast(const_cast(this) + 1);
+ }
+
+ Node* _link[2]; //!< Left/Right nodes.
+ uint32_t _level : 31; //!< Horizontal level for balance.
+ uint32_t _shared : 1; //!< If this constant is shared with another.
+ uint32_t _offset; //!< Data offset from the beginning of the pool.
+ };
+
+ // --------------------------------------------------------------------------
+ // [Tree]
+ // --------------------------------------------------------------------------
+
+ //! \internal
+ //!
+ //! Zone-allocated const-pool tree.
+ struct Tree {
+ enum {
+ //! Maximum tree height == log2(1 << 64).
+ kHeightLimit = 64
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE Tree(size_t dataSize = 0) noexcept
+ : _root(nullptr),
+ _length(0),
+ _dataSize(dataSize) {}
+ ASMJIT_INLINE ~Tree() {}
+
+ // --------------------------------------------------------------------------
+ // [Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE void reset() noexcept {
+ _root = nullptr;
+ _length = 0;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; }
+ ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
+
+ ASMJIT_INLINE void setDataSize(size_t dataSize) noexcept {
+ ASMJIT_ASSERT(isEmpty());
+ _dataSize = dataSize;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Ops]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API Node* get(const void* data) noexcept;
+ ASMJIT_API void put(Node* node) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Iterate]
+ // --------------------------------------------------------------------------
+
+ template
+ ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept {
+ Node* node = const_cast(_root);
+ if (!node) return;
+
+ Node* stack[kHeightLimit];
+ size_t top = 0;
+
+ for (;;) {
+ Node* left = node->_link[0];
+ if (left != nullptr) {
+ ASMJIT_ASSERT(top != kHeightLimit);
+ stack[top++] = node;
+
+ node = left;
+ continue;
+ }
+
+Visit:
+ visitor.visit(node);
+ node = node->_link[1];
+ if (node != nullptr)
+ continue;
+
+ if (top == 0)
+ return;
+
+ node = stack[--top];
+ goto Visit;
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [Helpers]
+ // --------------------------------------------------------------------------
+
+ static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
+ Node* node = zone->allocT(sizeof(Node) + size);
+ if (ASMJIT_UNLIKELY(!node)) return nullptr;
+
+ node->_link[0] = nullptr;
+ node->_link[1] = nullptr;
+ node->_level = 1;
+ node->_shared = shared;
+ node->_offset = static_cast(offset);
+
+ ::memcpy(node->getData(), data, size);
+ return node;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ Node* _root; //!< Root of the tree
+ size_t _length; //!< Length of the tree (count of nodes).
+ size_t _dataSize; //!< Size of the data.
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API ConstPool(Zone* zone) noexcept;
+ ASMJIT_API ~ConstPool() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API void reset(Zone* zone) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Ops]
+ // --------------------------------------------------------------------------
+
+ //! Get whether the constant-pool is empty.
+ ASMJIT_INLINE bool isEmpty() const noexcept { return _size == 0; }
+ //! Get the size of the constant-pool in bytes.
+ ASMJIT_INLINE size_t getSize() const noexcept { return _size; }
+ //! Get minimum alignment.
+ ASMJIT_INLINE size_t getAlignment() const noexcept { return _alignment; }
+
+ //! Add a constant to the constant pool.
+ //!
+ //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
+ //! The constant is added to the pool only if it doesn't not exist, otherwise
+ //! cached value is returned.
+ //!
+ //! AsmJit is able to subdivide added constants, so for example if you add
+ //! 8-byte constant 0x1122334455667788 it will create the following slots:
+ //!
+ //! 8-byte: 0x1122334455667788
+ //! 4-byte: 0x11223344, 0x55667788
+ //!
+ //! The reason is that when combining MMX/SSE/AVX code some patterns are used
+ //! frequently. However, AsmJit is not able to reallocate a constant that has
+ //! been already added. For example if you try to add 4-byte constant and then
+ //! 8-byte constant having the same 4-byte pattern as the previous one, two
+ //! independent slots will be generated by the pool.
+ ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Fill]
+ // --------------------------------------------------------------------------
+
+ //! Fill the destination with the constants from the pool.
+ ASMJIT_API void fill(void* dst) const noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ Zone* _zone; //!< Zone allocator.
+ Tree _tree[kIndexCount]; //!< Tree per size.
+ Gap* _gaps[kIndexCount]; //!< Gaps per size.
+ Gap* _gapPool; //!< Gaps pool
+
+ size_t _size; //!< Size of the pool (in bytes).
+ size_t _alignment; //!< Required pool alignment.
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_CONSTPOOL_H
diff --git a/libraries/asmjit/asmjit/base/cpuinfo.cpp b/libraries/asmjit/asmjit/base/cpuinfo.cpp
new file mode 100644
index 00000000000..c8421735d2e
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/cpuinfo.cpp
@@ -0,0 +1,674 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/cpuinfo.h"
+#include "../base/utils.h"
+
+#if ASMJIT_OS_POSIX
+# include
+# include
+# include
+#endif // ASMJIT_OS_POSIX
+
+#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
+# if ASMJIT_CC_MSC_GE(14, 0, 0)
+ # include // Required by `__cpuid()` and `_xgetbv()`.
+# endif // _MSC_VER >= 1400
+#endif
+
+#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
+# if ASMJIT_OS_LINUX
+# include // Required by `getauxval()`.
+# endif
+#endif
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect ARM]
+// ============================================================================
+
+// ARM information has to be retrieved by the OS (this is how ARM was designed).
+#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
+
+#if ASMJIT_ARCH_ARM32
+static ASMJIT_INLINE void armPopulateBaselineA32Features(CpuInfo* cpuInfo) noexcept {
+ cpuInfo->_archInfo.init(ArchInfo::kTypeA32);
+}
+#endif // ASMJIT_ARCH_ARM32
+
+#if ASMJIT_ARCH_ARM64
+static ASMJIT_INLINE void armPopulateBaselineA64Features(CpuInfo* cpuInfo) noexcept {
+ cpuInfo->_archInfo.init(ArchInfo::kTypeA64);
+
+ // Thumb (including all variations) is supported on A64 (but not accessible from A64).
+ cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
+
+ // A64 is based on ARMv8 and newer.
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
+
+ // A64 comes with these features by default.
+ cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv4);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVA);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureIDIVT);
+}
+#endif // ASMJIT_ARCH_ARM64
+
+#if ASMJIT_OS_WINDOWS
+//! \internal
+//!
+//! Detect ARM CPU features on Windows.
+//!
+//! The detection is based on `IsProcessorFeaturePresent()` API call.
+static ASMJIT_INLINE void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
+#if ASMJIT_ARCH_ARM32
+ armPopulateBaselineA32Features(cpuInfo);
+
+ // Windows for ARM requires at least ARMv7 with DSP extensions.
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureEDSP);
+
+ // Windows for ARM requires VFPv3.
+ cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv3);
+
+ // Windows for ARM requires and uses THUMB2.
+ cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
+ cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
+#else
+ armPopulateBaselineA64Features(cpuInfo);
+#endif
+
+ // Windows for ARM requires ASIMD.
+ cpuInfo->addFeature(CpuInfo::kArmFeatureASIMD);
+
+ // Detect additional CPU features by calling `IsProcessorFeaturePresent()`.
+ struct WinPFPMapping {
+ uint32_t pfpId;
+ uint32_t featureId;
+ };
+
+ static const WinPFPMapping mapping[] = {
+ { PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFPv4 },
+ { PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 },
+ { PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIVT },
+ { PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 }
+ };
+
+ for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++)
+ if (::IsProcessorFeaturePresent(mapping[i].pfpId))
+ cpuInfo->addFeature(mapping[i].featureId);
+}
+#endif // ASMJIT_OS_WINDOWS
+
+#if ASMJIT_OS_LINUX
+struct LinuxHWCapMapping {
+ uint32_t hwcapMask;
+ uint32_t featureId;
+};
+
+static void armDetectHWCaps(CpuInfo* cpuInfo, unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept {
+ unsigned long mask = getauxval(type);
+
+ for (size_t i = 0; i < length; i++)
+ if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask)
+ cpuInfo->addFeature(mapping[i].featureId);
+}
+
+//! \internal
+//!
+//! Detect ARM CPU features on Linux.
+//!
+//! The detection is based on `getauxval()`.
+ASMJIT_FAVOR_SIZE static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
+#if ASMJIT_ARCH_ARM32
+ armPopulateBaselineA32Features(cpuInfo);
+
+ // `AT_HWCAP` provides ARMv7 (and less) related flags.
+ static const LinuxHWCapMapping hwCapMapping[] = {
+ { /* HWCAP_VFP */ (1 << 6), CpuInfo::kArmFeatureVFPv2 },
+ { /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureEDSP },
+ { /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureASIMD },
+ { /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFPv3 },
+ { /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFPv4 },
+ { /* HWCAP_IDIVA */ (1 << 17), CpuInfo::kArmFeatureIDIVA },
+ { /* HWCAP_IDIVT */ (1 << 18), CpuInfo::kArmFeatureIDIVT },
+ { /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 }
+ };
+ armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
+
+ // VFPv3 implies VFPv2.
+ if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3)) {
+ cpuInfo->addFeature(CpuInfo::kArmFeatureVFPv2);
+ }
+
+ // VFPv2 implies ARMv6.
+ if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv2)) {
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
+ }
+
+ // VFPv3 or ASIMD implies ARMv7.
+ if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFPv3) ||
+ cpuInfo->hasFeature(CpuInfo::kArmFeatureASIMD)) {
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
+ }
+
+ // `AT_HWCAP2` provides ARMv8+ related flags.
+ static const LinuxHWCapMapping hwCap2Mapping[] = {
+ { /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES },
+ { /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL },
+ { /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 },
+ { /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 },
+ { /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 }
+ };
+ armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCap2Mapping));
+
+ if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) ||
+ cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) ||
+ cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) ||
+ cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) ||
+ cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) {
+ cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
+ }
+#else
+ armPopulateBaselineA64Features(cpuInfo);
+
+ // `AT_HWCAP` provides ARMv8+ related flags.
+ static const LinuxHWCapMapping hwCapMapping[] = {
+ { /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureASIMD },
+ { /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES },
+ { /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 },
+ { /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL },
+ { /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 },
+ { /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 },
+ { /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 }
+ };
+ armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
+
+ // `AT_HWCAP2` is not used at the moment.
+#endif
+}
+#endif // ASMJIT_OS_LINUX
+
+ASMJIT_FAVOR_SIZE static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
+#if ASMJIT_OS_WINDOWS
+ armDetectCpuInfoOnWindows(cpuInfo);
+#elif ASMJIT_OS_LINUX
+ armDetectCpuInfoOnLinux(cpuInfo);
+#else
+# error "[asmjit] armDetectCpuInfo() - Unsupported OS."
+#endif
+}
+#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect X86]
+// ============================================================================
+
+#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
+
+//! \internal
+//!
+//! X86 CPUID result.
+struct CpuIdResult {
+ uint32_t eax, ebx, ecx, edx;
+};
+
+//! \internal
+//!
+//! Content of XCR register, result of XGETBV instruction.
+struct XGetBVResult {
+ uint32_t eax, edx;
+};
+
+#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(15, 0, 30729) && ASMJIT_ARCH_X64
+//! \internal
+//!
+//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However,
+//! 64-bit calling convention specifies the first parameter to be passed by
+//! ECX, so we may be lucky if compiler doesn't move the register, otherwise
+//! the result would be wrong.
+static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept {
+ __cpuid(reinterpret_cast(result), inEax);
+}
+#endif
+
+//! \internal
+//!
+//! Wrapper to call `cpuid` instruction.
+static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint32_t inEcx = 0) noexcept {
+#if ASMJIT_CC_MSC && ASMJIT_CC_MSC_GE(15, 0, 30729)
+ __cpuidex(reinterpret_cast(result), inEax, inEcx);
+#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X64
+ x86CallCpuIdWorkaround(inEcx, inEax, result);
+#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X86
+ uint32_t paramEax = inEax;
+ uint32_t paramEcx = inEcx;
+ uint32_t* out = reinterpret_cast(result);
+
+ __asm {
+ mov eax, paramEax
+ mov ecx, paramEcx
+ mov edi, out
+ cpuid
+ mov dword ptr[edi + 0], eax
+ mov dword ptr[edi + 4], ebx
+ mov dword ptr[edi + 8], ecx
+ mov dword ptr[edi + 12], edx
+ }
+#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X86
+ __asm__ __volatile__(
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n"
+ : "=a"(result->eax),
+ "=D"(result->ebx),
+ "=c"(result->ecx),
+ "=d"(result->edx)
+ : "a"(inEax),
+ "c"(inEcx));
+#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG || ASMJIT_CC_INTEL) && ASMJIT_ARCH_X64
+ __asm__ __volatile__(
+ "mov %%rbx, %%rdi\n"
+ "cpuid\n"
+ "xchg %%rdi, %%rbx\n"
+ : "=a"(result->eax),
+ "=D"(result->ebx),
+ "=c"(result->ecx),
+ "=d"(result->edx)
+ : "a"(inEax),
+ "c"(inEcx));
+#else
+# error "[asmjit] x86CallCpuid() - Unsupported compiler."
+#endif
+}
+
+//! \internal
+//!
+//! Wrapper to call `xgetbv` instruction.
+static ASMJIT_INLINE void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
+#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+
+ uint64_t value = _xgetbv(inEcx);
+ result->eax = static_cast(value & 0xFFFFFFFFU);
+ result->edx = static_cast(value >> 32);
+#elif ASMJIT_CC_GCC || ASMJIT_CC_CLANG
+ uint32_t outEax;
+ uint32_t outEdx;
+
+ // Replaced, because the world is not perfect:
+ // __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
+ __asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
+
+ result->eax = outEax;
+ result->edx = outEdx;
+#else
+ result->eax = 0;
+ result->edx = 0;
+#endif
+}
+
+//! \internal
+//!
+//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID.
+static ASMJIT_INLINE uint32_t x86GetCpuVendorID(const char* vendorString) noexcept {
+ struct VendorData {
+ uint32_t id;
+ char text[12];
+ };
+
+ static const VendorData vendorList[] = {
+ { CpuInfo::kVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } },
+ { CpuInfo::kVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } },
+ { CpuInfo::kVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } },
+ { CpuInfo::kVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } }
+ };
+
+ uint32_t dw0 = reinterpret_cast(vendorString)[0];
+ uint32_t dw1 = reinterpret_cast(vendorString)[1];
+ uint32_t dw2 = reinterpret_cast(vendorString)[2];
+
+ for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(vendorList); i++) {
+ if (dw0 == reinterpret_cast(vendorList[i].text)[0] &&
+ dw1 == reinterpret_cast(vendorList[i].text)[1] &&
+ dw2 == reinterpret_cast(vendorList[i].text)[2])
+ return vendorList[i].id;
+ }
+
+ return CpuInfo::kVendorNone;
+}
+
+static ASMJIT_INLINE void x86SimplifyBrandString(char* s) noexcept {
+ // Used to always clear the current character to ensure that the result
+ // doesn't contain garbage after the new zero terminator.
+ char* d = s;
+
+ char prev = 0;
+ char curr = s[0];
+ s[0] = '\0';
+
+ for (;;) {
+ if (curr == 0)
+ break;
+
+ if (curr == ' ') {
+ if (prev == '@' || s[1] == ' ' || s[1] == '@')
+ goto L_Skip;
+ }
+
+ d[0] = curr;
+ d++;
+ prev = curr;
+
+L_Skip:
+ curr = *++s;
+ s[0] = '\0';
+ }
+
+ d[0] = '\0';
+}
+
+ASMJIT_FAVOR_SIZE static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
+ uint32_t i, maxId;
+
+ CpuIdResult regs;
+ XGetBVResult xcr0 = { 0, 0 };
+
+ cpuInfo->_archInfo.init(ArchInfo::kTypeHost);
+ cpuInfo->addFeature(CpuInfo::kX86FeatureI486);
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x0]
+ // --------------------------------------------------------------------------
+
+ // Get vendor string/id.
+ x86CallCpuId(®s, 0x0);
+
+ maxId = regs.eax;
+ ::memcpy(cpuInfo->_vendorString + 0, ®s.ebx, 4);
+ ::memcpy(cpuInfo->_vendorString + 4, ®s.edx, 4);
+ ::memcpy(cpuInfo->_vendorString + 8, ®s.ecx, 4);
+ cpuInfo->_vendorId = x86GetCpuVendorID(cpuInfo->_vendorString);
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x1]
+ // --------------------------------------------------------------------------
+
+ if (maxId >= 0x1) {
+ // Get feature flags in ECX/EDX and family/model in EAX.
+ x86CallCpuId(®s, 0x1);
+
+ // Fill family and model fields.
+ cpuInfo->_family = (regs.eax >> 8) & 0x0F;
+ cpuInfo->_model = (regs.eax >> 4) & 0x0F;
+ cpuInfo->_stepping = (regs.eax ) & 0x0F;
+
+ // Use extended family and model fields.
+ if (cpuInfo->_family == 0x0F) {
+ cpuInfo->_family += ((regs.eax >> 20) & 0xFF);
+ cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4;
+ }
+
+ cpuInfo->_x86Data._processorType = ((regs.eax >> 12) & 0x03);
+ cpuInfo->_x86Data._brandIndex = ((regs.ebx ) & 0xFF);
+ cpuInfo->_x86Data._flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8;
+ cpuInfo->_x86Data._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF);
+
+ if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE3);
+ if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCLMULQDQ);
+ if (regs.ecx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureMONITOR);
+ if (regs.ecx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSSE3);
+ if (regs.ecx & 0x00002000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG16B);
+ if (regs.ecx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_1);
+ if (regs.ecx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_2);
+ if (regs.ecx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVBE);
+ if (regs.ecx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePOPCNT);
+ if (regs.ecx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAESNI);
+ if (regs.ecx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE);
+ if (regs.ecx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureOSXSAVE);
+ if (regs.ecx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDRAND);
+ if (regs.edx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSC);
+ if (regs.edx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSR);
+ if (regs.edx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG8B);
+ if (regs.edx & 0x00008000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMOV);
+ if (regs.edx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH);
+ if (regs.edx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX);
+ if (regs.edx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR);
+ if (regs.edx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
+ .addFeature(CpuInfo::kX86FeatureMMX2);
+ if (regs.edx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
+ .addFeature(CpuInfo::kX86FeatureSSE2);
+ if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT);
+
+ // Get the content of XCR0 if supported by CPU and enabled by OS.
+ if ((regs.ecx & 0x0C000000U) == 0x0C000000U) {
+ x86CallXGetBV(&xcr0, 0);
+ }
+
+ // Detect AVX+.
+ if (regs.ecx & 0x10000000U) {
+ // - XCR0[2:1] == 11b
+ // XMM & YMM states need to be enabled by OS.
+ if ((xcr0.eax & 0x00000006U) == 0x00000006U) {
+ cpuInfo->addFeature(CpuInfo::kX86FeatureAVX);
+
+ if (regs.ecx & 0x00001000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA);
+ if (regs.ecx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureF16C);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x7]
+ // --------------------------------------------------------------------------
+
+ // Detect new features if the processor supports CPUID-07.
+ bool maybeMPX = false;
+
+ if (maxId >= 0x7) {
+ x86CallCpuId(®s, 0x7);
+
+ if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureFSGSBASE);
+ if (regs.ebx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI);
+ if (regs.ebx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureHLE);
+ if (regs.ebx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMEP);
+ if (regs.ebx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI2);
+ if (regs.ebx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureERMS);
+ if (regs.ebx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureRTM);
+ if (regs.ebx & 0x00004000U) maybeMPX = true;
+ if (regs.ebx & 0x00040000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDSEED);
+ if (regs.ebx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureADX);
+ if (regs.ebx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSMAP);
+ if (regs.ebx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCOMMIT);
+ if (regs.ebx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSHOPT);
+ if (regs.ebx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLWB);
+ if (regs.ebx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSHA);
+ if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1);
+
+ // TSX is supported if at least one of `HLE` and `RTM` is supported.
+ if (regs.ebx & 0x00000810U) cpuInfo->addFeature(CpuInfo::kX86FeatureTSX);
+
+ // Detect AVX2.
+ if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
+ if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2);
+ }
+
+ // Detect AVX-512+.
+ if (regs.ebx & 0x00010000U) {
+ // - XCR0[2:1] == 11b
+ // XMM/YMM states need to be enabled by OS.
+ // - XCR0[7:5] == 111b
+ // Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by the OS.
+ if ((xcr0.eax & 0x000000E6U) == 0x000000E6U) {
+ cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_F);
+
+ if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_DQ);
+ if (regs.ebx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_IFMA);
+ if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_PFI);
+ if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_ERI);
+ if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_CDI);
+ if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_BW);
+ if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VL);
+ if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VBMI);
+ if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_VPOPCNTDQ);
+ if (regs.edx & 0x00000004U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4VNNIW);
+ if (regs.edx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512_4FMAPS);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0xD]
+ // --------------------------------------------------------------------------
+
+ if (maxId >= 0xD) {
+ x86CallCpuId(®s, 0xD, 0);
+
+ // Both CPUID result and XCR0 has to be enabled to have support for MPX.
+ if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U && maybeMPX)
+ cpuInfo->addFeature(CpuInfo::kX86FeatureMPX);
+
+ x86CallCpuId(®s, 0xD, 1);
+ if (regs.eax & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVEOPT);
+ if (regs.eax & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVEC);
+ if (regs.eax & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVES);
+ }
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x80000000...maxId]
+ // --------------------------------------------------------------------------
+
+ // The highest EAX that we understand.
+ uint32_t kHighestProcessedEAX = 0x80000008U;
+
+ // Several CPUID calls are required to get the whole branc string. It's easy
+ // to copy one DWORD at a time instead of performing a byte copy.
+ uint32_t* brand = reinterpret_cast(cpuInfo->_brandString);
+
+ i = maxId = 0x80000000U;
+ do {
+ x86CallCpuId(®s, i);
+ switch (i) {
+ case 0x80000000U:
+ maxId = std::min(regs.eax, kHighestProcessedEAX);
+ break;
+
+ case 0x80000001U:
+ if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureLAHFSAHF);
+ if (regs.ecx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureLZCNT);
+ if (regs.ecx & 0x00000040U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4A);
+ if (regs.ecx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSSE);
+ if (regs.ecx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHW);
+ if (regs.ecx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureTBM);
+ if (regs.edx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureNX);
+ if (regs.edx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSROPT);
+ if (regs.edx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX2);
+ if (regs.edx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSCP);
+ if (regs.edx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW2)
+ .addFeature(CpuInfo::kX86FeatureMMX2);
+ if (regs.edx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW);
+
+ if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
+ if (regs.ecx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureXOP);
+ if (regs.ecx & 0x00010000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA4);
+ }
+
+ // These seem to be only supported by AMD.
+ if (cpuInfo->getVendorId() == CpuInfo::kVendorAMD) {
+ if (regs.ecx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureALTMOVCR8);
+ }
+ break;
+
+ case 0x80000002U:
+ case 0x80000003U:
+ case 0x80000004U:
+ *brand++ = regs.eax;
+ *brand++ = regs.ebx;
+ *brand++ = regs.ecx;
+ *brand++ = regs.edx;
+
+ // Go directly to the last one.
+ if (i == 0x80000004U) i = 0x80000008U - 1;
+ break;
+
+ case 0x80000008U:
+ if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLZERO);
+ break;
+ }
+ } while (++i <= maxId);
+
+ // Simplify CPU brand string by removing unnecessary spaces.
+ x86SimplifyBrandString(cpuInfo->_brandString);
+}
+#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - HWThreadsCount]
+// ============================================================================
+
+static ASMJIT_INLINE uint32_t cpuDetectHWThreadsCount() noexcept {
+#if ASMJIT_OS_WINDOWS
+ SYSTEM_INFO info;
+ ::GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+#elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN)
+ long res = ::sysconf(_SC_NPROCESSORS_ONLN);
+ if (res <= 0) return 1;
+ return static_cast(res);
+#else
+ return 1;
+#endif
+}
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE void CpuInfo::detect() noexcept {
+ reset();
+
+#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
+ armDetectCpuInfo(this);
+#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
+
+#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
+ x86DetectCpuInfo(this);
+#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
+
+ _hwThreadsCount = cpuDetectHWThreadsCount();
+}
+
+// ============================================================================
+// [asmjit::CpuInfo - GetHost]
+// ============================================================================
+
+struct HostCpuInfo : public CpuInfo {
+ ASMJIT_INLINE HostCpuInfo() noexcept : CpuInfo() { detect(); }
+};
+
+const CpuInfo& CpuInfo::getHost() noexcept {
+ static HostCpuInfo host;
+ return host;
+}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/cpuinfo.h b/libraries/asmjit/asmjit/base/cpuinfo.h
new file mode 100644
index 00000000000..268d37e8dd3
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/cpuinfo.h
@@ -0,0 +1,373 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_CPUINFO_H
+#define _ASMJIT_BASE_CPUINFO_H
+
+// [Dependencies]
+#include "../base/arch.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [asmjit::CpuFeatures]
+// ============================================================================
+
+class CpuFeatures {
+public:
+ typedef uintptr_t BitWord;
+
+ enum {
+ kMaxFeatures = 128,
+ kBitWordSize = static_cast(sizeof(BitWord)) * 8,
+ kNumBitWords = kMaxFeatures / kBitWordSize
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE CpuFeatures() noexcept { reset(); }
+ ASMJIT_INLINE CpuFeatures(const CpuFeatures& other) noexcept { init(other); }
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE void init(const CpuFeatures& other) noexcept { ::memcpy(this, &other, sizeof(*this)); }
+ ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); }
+
+ // --------------------------------------------------------------------------
+ // [Ops]
+ // --------------------------------------------------------------------------
+
+ //! Get all features as `BitWord` array.
+ ASMJIT_INLINE BitWord* getBits() noexcept { return _bits; }
+ //! Get all features as `BitWord` array (const).
+ ASMJIT_INLINE const BitWord* getBits() const noexcept { return _bits; }
+
+ //! Get if feature `feature` is present.
+ ASMJIT_INLINE bool has(uint32_t feature) const noexcept {
+ ASMJIT_ASSERT(feature < kMaxFeatures);
+
+ uint32_t idx = feature / kBitWordSize;
+ uint32_t bit = feature % kBitWordSize;
+
+ return static_cast((_bits[idx] >> bit) & 0x1);
+ }
+
+ //! Get if all features as defined by `other` are present.
+ ASMJIT_INLINE bool hasAll(const CpuFeatures& other) const noexcept {
+ for (uint32_t i = 0; i < kNumBitWords; i++)
+ if ((_bits[i] & other._bits[i]) != other._bits[i])
+ return false;
+ return true;
+ }
+
+ //! Add a CPU `feature`.
+ ASMJIT_INLINE CpuFeatures& add(uint32_t feature) noexcept {
+ ASMJIT_ASSERT(feature < kMaxFeatures);
+
+ uint32_t idx = feature / kBitWordSize;
+ uint32_t bit = feature % kBitWordSize;
+
+ _bits[idx] |= static_cast(1) << bit;
+ return *this;
+ }
+
+ //! Remove a CPU `feature`.
+ ASMJIT_INLINE CpuFeatures& remove(uint32_t feature) noexcept {
+ ASMJIT_ASSERT(feature < kMaxFeatures);
+
+ uint32_t idx = feature / kBitWordSize;
+ uint32_t bit = feature % kBitWordSize;
+
+ _bits[idx] &= ~(static_cast(1) << bit);
+ return *this;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ BitWord _bits[kNumBitWords];
+};
+
+// ============================================================================
+// [asmjit::CpuInfo]
+// ============================================================================
+
+//! CPU information.
+class CpuInfo {
+public:
+ //! CPU vendor ID.
+ ASMJIT_ENUM(Vendor) {
+ kVendorNone = 0, //!< Generic or unknown.
+ kVendorIntel = 1, //!< Intel vendor.
+ kVendorAMD = 2, //!< AMD vendor.
+ kVendorVIA = 3 //!< VIA vendor.
+ };
+
+ //! ARM/ARM64 CPU features.
+ ASMJIT_ENUM(ArmFeatures) {
+ kArmFeatureV6 = 1, //!< ARMv6 instruction set.
+ kArmFeatureV7, //!< ARMv7 instruction set.
+ kArmFeatureV8, //!< ARMv8 instruction set.
+ kArmFeatureTHUMB, //!< CPU provides THUMB v1 instruction set (THUMB mode).
+ kArmFeatureTHUMB2, //!< CPU provides THUMB v2 instruction set (THUMB mode).
+ kArmFeatureVFPv2, //!< CPU provides VFPv2 instruction set.
+ kArmFeatureVFPv3, //!< CPU provides VFPv3 instruction set.
+ kArmFeatureVFPv4, //!< CPU provides VFPv4 instruction set.
+ kArmFeatureVFP_D32, //!< CPU provides 32 VFP-D (64-bit) registers.
+ kArmFeatureEDSP, //!< CPU provides EDSP extensions.
+ kArmFeatureASIMD, //!< CPU provides 'Advanced SIMD'.
+ kArmFeatureIDIVA, //!< CPU provides hardware SDIV and UDIV (ARM mode).
+ kArmFeatureIDIVT, //!< CPU provides hardware SDIV and UDIV (THUMB mode).
+ kArmFeatureAES, //!< CPU provides AES instructions (ARM64 only).
+ kArmFeatureCRC32, //!< CPU provides CRC32 instructions.
+ kArmFeaturePMULL, //!< CPU provides PMULL instructions (ARM64 only).
+ kArmFeatureSHA1, //!< CPU provides SHA1 instructions.
+ kArmFeatureSHA256, //!< CPU provides SHA256 instructions.
+ kArmFeatureAtomics64, //!< CPU provides 64-bit load/store atomics (ARM64 only).
+
+ kArmFeaturesCount //!< Count of ARM/ARM64 CPU features.
+ };
+
+ //! X86/X64 CPU features.
+ ASMJIT_ENUM(X86Features) {
+ kX86FeatureI486 = 1, //!< CPU is at least I486.
+ kX86FeatureNX, //!< CPU has Not-Execute-Bit.
+ kX86FeatureMT, //!< CPU has multi-threading.
+ kX86FeatureALTMOVCR8, //!< CPU supports `LOCK MOV CR8` (AMD CPUs).
+ kX86FeatureCMOV, //!< CPU has CMOV.
+ kX86FeatureCMPXCHG8B, //!< CPU has CMPXCHG8B.
+ kX86FeatureCMPXCHG16B, //!< CPU has CMPXCHG16B (x64).
+ kX86FeatureMSR, //!< CPU has RDMSR/WRMSR.
+ kX86FeatureRDTSC, //!< CPU has RDTSC.
+ kX86FeatureRDTSCP, //!< CPU has RDTSCP.
+ kX86FeatureCLFLUSH, //!< CPU has CLFUSH.
+ kX86FeatureCLFLUSHOPT, //!< CPU has CLFUSHOPT.
+ kX86FeatureCLWB, //!< CPU has CLWB.
+ kX86FeatureCLZERO, //!< CPU has CLZERO.
+ kX86FeaturePCOMMIT, //!< CPU has PCOMMIT.
+ kX86FeaturePREFETCHW, //!< CPU has PREFETCHW.
+ kX86FeaturePREFETCHWT1, //!< CPU has PREFETCHWT1.
+ kX86FeatureLAHFSAHF, //!< CPU has LAHF/SAHF.
+ kX86FeatureFXSR, //!< CPU has FXSAVE/FXRSTOR.
+ kX86FeatureFXSROPT, //!< CPU has FXSAVE/FXRSTOR (optimized).
+ kX86FeatureMMX, //!< CPU has MMX.
+ kX86FeatureMMX2, //!< CPU has extended MMX.
+ kX86Feature3DNOW, //!< CPU has 3DNOW.
+ kX86Feature3DNOW2, //!< CPU has 3DNOW2 (enhanced).
+ kX86FeatureGEODE, //!< CPU has GEODE extensions (few additions to 3DNOW).
+ kX86FeatureSSE, //!< CPU has SSE.
+ kX86FeatureSSE2, //!< CPU has SSE2.
+ kX86FeatureSSE3, //!< CPU has SSE3.
+ kX86FeatureSSSE3, //!< CPU has SSSE3.
+ kX86FeatureSSE4A, //!< CPU has SSE4.A.
+ kX86FeatureSSE4_1, //!< CPU has SSE4.1.
+ kX86FeatureSSE4_2, //!< CPU has SSE4.2.
+ kX86FeatureMSSE, //!< CPU has Misaligned SSE (MSSE).
+ kX86FeatureMONITOR, //!< CPU has MONITOR and MWAIT.
+ kX86FeatureMOVBE, //!< CPU has MOVBE.
+ kX86FeaturePOPCNT, //!< CPU has POPCNT.
+ kX86FeatureLZCNT, //!< CPU has LZCNT.
+ kX86FeatureAESNI, //!< CPU has AESNI.
+ kX86FeaturePCLMULQDQ, //!< CPU has PCLMULQDQ.
+ kX86FeatureRDRAND, //!< CPU has RDRAND.
+ kX86FeatureRDSEED, //!< CPU has RDSEED.
+ kX86FeatureSMAP, //!< CPU has SMAP (supervisor-mode access prevention).
+ kX86FeatureSMEP, //!< CPU has SMEP (supervisor-mode execution prevention).
+ kX86FeatureSHA, //!< CPU has SHA-1 and SHA-256.
+ kX86FeatureXSAVE, //!< CPU has XSAVE support (XSAVE/XRSTOR, XSETBV/XGETBV, and XCR).
+ kX86FeatureXSAVEC, //!< CPU has XSAVEC support (XSAVEC).
+ kX86FeatureXSAVES, //!< CPU has XSAVES support (XSAVES/XRSTORS).
+ kX86FeatureXSAVEOPT, //!< CPU has XSAVEOPT support (XSAVEOPT/XSAVEOPT64).
+ kX86FeatureOSXSAVE, //!< CPU has XSAVE enabled by OS.
+ kX86FeatureAVX, //!< CPU has AVX.
+ kX86FeatureAVX2, //!< CPU has AVX2.
+ kX86FeatureF16C, //!< CPU has F16C.
+ kX86FeatureFMA, //!< CPU has FMA.
+ kX86FeatureFMA4, //!< CPU has FMA4.
+ kX86FeatureXOP, //!< CPU has XOP.
+ kX86FeatureBMI, //!< CPU has BMI (bit manipulation instructions #1).
+ kX86FeatureBMI2, //!< CPU has BMI2 (bit manipulation instructions #2).
+ kX86FeatureADX, //!< CPU has ADX (multi-precision add-carry instruction extensions).
+ kX86FeatureTBM, //!< CPU has TBM (trailing bit manipulation).
+ kX86FeatureMPX, //!< CPU has MPX (memory protection extensions).
+ kX86FeatureHLE, //!< CPU has HLE.
+ kX86FeatureRTM, //!< CPU has RTM.
+ kX86FeatureTSX, //!< CPU has TSX.
+ kX86FeatureERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB).
+ kX86FeatureFSGSBASE, //!< CPU has FSGSBASE.
+ kX86FeatureAVX512_F, //!< CPU has AVX512-F (foundation).
+ kX86FeatureAVX512_CDI, //!< CPU has AVX512-CDI (conflict detection).
+ kX86FeatureAVX512_PFI, //!< CPU has AVX512-PFI (prefetch instructions).
+ kX86FeatureAVX512_ERI, //!< CPU has AVX512-ERI (exponential and reciprocal).
+ kX86FeatureAVX512_DQ, //!< CPU has AVX512-DQ (DWORD/QWORD).
+ kX86FeatureAVX512_BW, //!< CPU has AVX512-BW (BYTE/WORD).
+ kX86FeatureAVX512_VL, //!< CPU has AVX512-VL (vector length extensions).
+ kX86FeatureAVX512_IFMA, //!< CPU has AVX512-IFMA (integer fused-multiply-add using 52-bit precision).
+ kX86FeatureAVX512_VBMI, //!< CPU has AVX512-VBMI (vector byte manipulation).
+ kX86FeatureAVX512_VPOPCNTDQ, //!< CPU has AVX512-VPOPCNTDQ (VPOPCNT[D|Q] instructions).
+ kX86FeatureAVX512_4VNNIW, //!< CPU has AVX512-VNNIW (vector NN instructions word variable precision).
+ kX86FeatureAVX512_4FMAPS, //!< CPU has AVX512-FMAPS (FMA packed single).
+
+ kX86FeaturesCount //!< Count of X86/X64 CPU features.
+ };
+
+ // --------------------------------------------------------------------------
+ // [ArmInfo]
+ // --------------------------------------------------------------------------
+
+ struct ArmData {
+ };
+
+ // --------------------------------------------------------------------------
+ // [X86Info]
+ // --------------------------------------------------------------------------
+
+ struct X86Data {
+ uint32_t _processorType; //!< Processor type.
+ uint32_t _brandIndex; //!< Brand index.
+ uint32_t _flushCacheLineSize; //!< Flush cache line size (in bytes).
+ uint32_t _maxLogicalProcessors; //!< Maximum number of addressable IDs for logical processors.
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE CpuInfo() noexcept { reset(); }
+ ASMJIT_INLINE CpuInfo(const CpuInfo& other) noexcept { init(other); }
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ //! Initialize CpuInfo to the given architecture, see \ArchInfo.
+ ASMJIT_INLINE void initArch(uint32_t archType, uint32_t archMode = 0) noexcept {
+ _archInfo.init(archType, archMode);
+ }
+
+ ASMJIT_INLINE void init(const CpuInfo& other) noexcept { ::memcpy(this, &other, sizeof(*this)); }
+ ASMJIT_INLINE void reset() noexcept { ::memset(this, 0, sizeof(*this)); }
+
+ // --------------------------------------------------------------------------
+ // [Detect]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API void detect() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get generic architecture information.
+ ASMJIT_INLINE const ArchInfo& getArchInfo() const noexcept { return _archInfo; }
+ //! Get CPU architecture type, see \ArchInfo::Type.
+ ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archInfo.getType(); }
+ //! Get CPU architecture sub-type, see \ArchInfo::SubType.
+ ASMJIT_INLINE uint32_t getArchSubType() const noexcept { return _archInfo.getSubType(); }
+
+ //! Get CPU vendor ID.
+ ASMJIT_INLINE uint32_t getVendorId() const noexcept { return _vendorId; }
+ //! Get CPU family ID.
+ ASMJIT_INLINE uint32_t getFamily() const noexcept { return _family; }
+ //! Get CPU model ID.
+ ASMJIT_INLINE uint32_t getModel() const noexcept { return _model; }
+ //! Get CPU stepping.
+ ASMJIT_INLINE uint32_t getStepping() const noexcept { return _stepping; }
+
+ //! Get number of hardware threads available.
+ ASMJIT_INLINE uint32_t getHwThreadsCount() const noexcept {
+ return _hwThreadsCount;
+ }
+
+ //! Get all CPU features.
+ ASMJIT_INLINE const CpuFeatures& getFeatures() const noexcept { return _features; }
+ //! Get whether CPU has a `feature`.
+ ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept { return _features.has(feature); }
+ //! Add a CPU `feature`.
+ ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) noexcept { _features.add(feature); return *this; }
+
+ //! Get CPU vendor string.
+ ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; }
+ //! Get CPU brand string.
+ ASMJIT_INLINE const char* getBrandString() const noexcept { return _brandString; }
+
+ // --------------------------------------------------------------------------
+ // [Accessors - ARM]
+ // --------------------------------------------------------------------------
+
+ // --------------------------------------------------------------------------
+ // [Accessors - X86]
+ // --------------------------------------------------------------------------
+
+ //! Get processor type.
+ ASMJIT_INLINE uint32_t getX86ProcessorType() const noexcept {
+ return _x86Data._processorType;
+ }
+
+ //! Get brand index.
+ ASMJIT_INLINE uint32_t getX86BrandIndex() const noexcept {
+ return _x86Data._brandIndex;
+ }
+
+ //! Get flush cache line size.
+ ASMJIT_INLINE uint32_t getX86FlushCacheLineSize() const noexcept {
+ return _x86Data._flushCacheLineSize;
+ }
+
+ //! Get maximum logical processors count.
+ ASMJIT_INLINE uint32_t getX86MaxLogicalProcessors() const noexcept {
+ return _x86Data._maxLogicalProcessors;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Statics]
+ // --------------------------------------------------------------------------
+
+ //! Get the host CPU information.
+ ASMJIT_API static const CpuInfo& getHost() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ ArchInfo _archInfo; //!< CPU architecture information.
+ uint32_t _vendorId; //!< CPU vendor id, see \ref Vendor.
+ uint32_t _family; //!< CPU family ID.
+ uint32_t _model; //!< CPU model ID.
+ uint32_t _stepping; //!< CPU stepping.
+ uint32_t _hwThreadsCount; //!< Number of hardware threads.
+ CpuFeatures _features; //!< CPU features.
+ char _vendorString[16]; //!< CPU vendor string.
+ char _brandString[64]; //!< CPU brand string.
+
+ // Architecture specific data.
+ union {
+ ArmData _armData;
+ X86Data _x86Data;
+ };
+};
+
+//! \}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
+
+// [Guard]
+#endif // _ASMJIT_BASE_CPUINFO_H
diff --git a/libraries/asmjit/asmjit/base/func.cpp b/libraries/asmjit/asmjit/base/func.cpp
new file mode 100644
index 00000000000..52107655584
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/func.cpp
@@ -0,0 +1,186 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Export]
+#define ASMJIT_EXPORTS
+
+// [Dependencies]
+#include "../base/arch.h"
+#include "../base/func.h"
+
+#if defined(ASMJIT_BUILD_X86)
+#include "../x86/x86internal_p.h"
+#include "../x86/x86operand.h"
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+#include "../arm/arminternal_p.h"
+#include "../arm/armoperand.h"
+#endif // ASMJIT_BUILD_ARM
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+// ============================================================================
+// [asmjit::CallConv - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId) noexcept {
+ reset();
+
+#if defined(ASMJIT_BUILD_X86)
+ if (CallConv::isX86Family(ccId))
+ return X86Internal::initCallConv(*this, ccId);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (CallConv::isArmFamily(ccId))
+ return ArmInternal::initCallConv(*this, ccId);
+#endif // ASMJIT_BUILD_ARM
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncDetail - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& sign) {
+ uint32_t ccId = sign.getCallConv();
+ CallConv& cc = _callConv;
+
+ uint32_t argCount = sign.getArgCount();
+ if (ASMJIT_UNLIKELY(argCount > kFuncArgCount))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ASMJIT_PROPAGATE(cc.init(ccId));
+
+ uint32_t gpSize = (cc.getArchType() == ArchInfo::kTypeX86) ? 4 : 8;
+ uint32_t deabstractDelta = TypeId::deabstractDeltaOfSize(gpSize);
+
+ const uint8_t* args = sign.getArgs();
+ for (uint32_t i = 0; i < argCount; i++) {
+ Value& arg = _args[i];
+ arg.initTypeId(TypeId::deabstract(args[i], deabstractDelta));
+ }
+ _argCount = static_cast(argCount);
+
+ uint32_t ret = sign.getRet();
+ if (ret != TypeId::kVoid) {
+ _rets[0].initTypeId(TypeId::deabstract(ret, deabstractDelta));
+ _retCount = 1;
+ }
+
+#if defined(ASMJIT_BUILD_X86)
+ if (CallConv::isX86Family(ccId))
+ return X86Internal::initFuncDetail(*this, sign, gpSize);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (CallConv::isArmFamily(ccId))
+ return ArmInternal::initFuncDetail(*this, sign, gpSize);
+#endif // ASMJIT_BUILD_ARM
+
+ // We should never bubble here as if `cc.init()` succeeded then there has to
+ // be an implementation for the current architecture. However, stay safe.
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncFrameLayout - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncFrameLayout::init(const FuncDetail& func, const FuncFrameInfo& ffi) noexcept {
+ uint32_t ccId = func.getCallConv().getId();
+
+#if defined(ASMJIT_BUILD_X86)
+ if (CallConv::isX86Family(ccId))
+ return X86Internal::initFrameLayout(*this, func, ffi);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (CallConv::isArmFamily(ccId))
+ return ArmInternal::initFrameLayout(*this, func, ffi);
+#endif // ASMJIT_BUILD_ARM
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncArgsMapper]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncArgsMapper::updateFrameInfo(FuncFrameInfo& ffi) const noexcept {
+ const FuncDetail* func = getFuncDetail();
+ if (!func) return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t ccId = func->getCallConv().getId();
+
+#if defined(ASMJIT_BUILD_X86)
+ if (CallConv::isX86Family(ccId))
+ return X86Internal::argsToFrameInfo(*this, ffi);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (CallConv::isArmFamily(ccId))
+ return ArmInternal::argsToFrameInfo(*this, ffi);
+#endif // ASMJIT_BUILD_X86
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+// ============================================================================
+// [asmjit::FuncUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncUtils::emitProlog(CodeEmitter* emitter, const FuncFrameLayout& layout) {
+#if defined(ASMJIT_BUILD_X86)
+ if (emitter->getArchInfo().isX86Family())
+ return X86Internal::emitProlog(static_cast(emitter), layout);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (emitter->getArchInfo().isArmFamily())
+ return ArmInternal::emitProlog(static_cast(emitter), layout);
+#endif // ASMJIT_BUILD_ARM
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error FuncUtils::emitEpilog(CodeEmitter* emitter, const FuncFrameLayout& layout) {
+#if defined(ASMJIT_BUILD_X86)
+ if (emitter->getArchInfo().isX86Family())
+ return X86Internal::emitEpilog(static_cast(emitter), layout);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (emitter->getArchInfo().isArmFamily())
+ return ArmInternal::emitEpilog(static_cast(emitter), layout);
+#endif // ASMJIT_BUILD_ARM
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error FuncUtils::allocArgs(CodeEmitter* emitter, const FuncFrameLayout& layout, const FuncArgsMapper& args) {
+#if defined(ASMJIT_BUILD_X86)
+ if (emitter->getArchInfo().isX86Family())
+ return X86Internal::allocArgs(static_cast(emitter), layout, args);
+#endif // ASMJIT_BUILD_X86
+
+#if defined(ASMJIT_BUILD_ARM)
+ if (emitter->getArchInfo().isArmFamily())
+ return ArmInternal::allocArgs(static_cast(emitter), layout, args);
+#endif // ASMJIT_BUILD_ARM
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+} // asmjit namespace
+
+// [Api-End]
+#include "../asmjit_apiend.h"
diff --git a/libraries/asmjit/asmjit/base/func.h b/libraries/asmjit/asmjit/base/func.h
new file mode 100644
index 00000000000..c9ab0529d2f
--- /dev/null
+++ b/libraries/asmjit/asmjit/base/func.h
@@ -0,0 +1,1296 @@
+// [AsmJit]
+// Complete x86/x64 JIT and Remote Assembler for C++.
+//
+// [License]
+// Zlib - See LICENSE.md file in the package.
+
+// [Guard]
+#ifndef _ASMJIT_BASE_FUNC_H
+#define _ASMJIT_BASE_FUNC_H
+
+#include "../asmjit_build.h"
+
+// [Dependencies]
+#include "../base/arch.h"
+#include "../base/operand.h"
+#include "../base/utils.h"
+
+// [Api-Begin]
+#include "../asmjit_apibegin.h"
+
+namespace asmjit {
+
+//! \addtogroup asmjit_base
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class CodeEmitter;
+
+// ============================================================================
+// [asmjit::CallConv]
+// ============================================================================
+
+//! Function calling convention.
+//!
+//! Function calling convention is a scheme that defines how function parameters
+//! are passed and how function returns its result. AsmJit defines a variety of
+//! architecture and OS specific calling conventions and also provides a compile
+//! time detection to make JIT code-generation easier.
+struct CallConv {
+ //! Calling convention id.
+ ASMJIT_ENUM(Id) {
+ //! None or invalid (can't be used).
+ kIdNone = 0,
+
+ // ------------------------------------------------------------------------
+ // [Universal]
+ // ------------------------------------------------------------------------
+
+ // TODO: To make this possible we need to know target ARCH and ABI.
+
+ /*
+
+ // Universal calling conventions are applicable to any target and are
+ // converted to target dependent conventions at runtime. The purpose of
+ // these conventions is to make using functions less target dependent.
+
+ kIdCDecl = 1,
+ kIdStdCall = 2,
+ kIdFastCall = 3,
+
+ //! AsmJit specific calling convention designed for calling functions
+ //! inside a multimedia code like that don't use many registers internally,
+ //! but are long enough to be called and not inlined. These functions are
+ //! usually used to calculate trigonometric functions, logarithms, etc...
+ kIdFastEval2 = 10,
+ kIdFastEval3 = 11,
+ kIdFastEval4 = 12,
+ */
+
+ // ------------------------------------------------------------------------
+ // [X86]
+ // ------------------------------------------------------------------------
+
+ //! X86 `__cdecl` calling convention (used by C runtime and libraries).
+ kIdX86CDecl = 16,
+ //! X86 `__stdcall` calling convention (used mostly by WinAPI).
+ kIdX86StdCall = 17,
+ //! X86 `__thiscall` calling convention (MSVC/Intel).
+ kIdX86MsThisCall = 18,
+ //! X86 `__fastcall` convention (MSVC/Intel).
+ kIdX86MsFastCall = 19,
+ //! X86 `__fastcall` convention (GCC and Clang).
+ kIdX86GccFastCall = 20,
+ //! X86 `regparm(1)` convention (GCC and Clang).
+ kIdX86GccRegParm1 = 21,
+ //! X86 `regparm(2)` convention (GCC and Clang).
+ kIdX86GccRegParm2 = 22,
+ //! X86 `regparm(3)` convention (GCC and Clang).
+ kIdX86GccRegParm3 = 23,
+
+ kIdX86FastEval2 = 29,
+ kIdX86FastEval3 = 30,
+ kIdX86FastEval4 = 31,
+
+ //! X64 calling convention defined by WIN64-ABI.
+ //!
+ //! Links:
+ //! * .
+ kIdX86Win64 = 32,
+ //! X64 calling convention used by Unix platforms (SYSV/AMD64-ABI).
+ kIdX86SysV64 = 33,
+
+ kIdX64FastEval2 = 45,
+ kIdX64FastEval3 = 46,
+ kIdX64FastEval4 = 47,
+
+ // ------------------------------------------------------------------------
+ // [ARM]
+ // ------------------------------------------------------------------------
+
+ //! Legacy calling convention, floating point arguments are passed via GP registers.
+ kIdArm32SoftFP = 48,
+ //! Modern calling convention, uses VFP registers to pass floating point arguments.
+ kIdArm32HardFP = 49,
+
+ // ------------------------------------------------------------------------
+ // [Internal]
+ // ------------------------------------------------------------------------
+
+ _kIdX86Start = 16, //!< \internal
+ _kIdX86End = 31, //!< \internal
+
+ _kIdX64Start = 32, //!< \internal
+ _kIdX64End = 47, //!< \internal
+
+ _kIdArmStart = 48, //!< \internal
+ _kIdArmEnd = 49, //!< \internal
+
+ // ------------------------------------------------------------------------
+ // [Host]
+ // ------------------------------------------------------------------------
+
+#if defined(ASMJIT_DOCGEN)
+ //! Default calling convention based on the current C++ compiler's settings.
+ //!
+ //! NOTE: This should be always the same as `kIdHostCDecl`, but some
+ //! compilers allow to override the default calling convention. Overriding
+ //! is not detected at the moment.
+ kIdHost = DETECTED_AT_COMPILE_TIME,
+
+ //! Default CDECL calling convention based on the current C++ compiler's settings.
+ kIdHostCDecl = DETECTED_AT_COMPILE_TIME,
+
+ //! Default STDCALL calling convention based on the current C++ compiler's settings.
+ //!
+ //! NOTE: If not defined by the host then it's the same as `kIdHostCDecl`.
+ kIdHostStdCall = DETECTED_AT_COMPILE_TIME,
+
+ //! Compatibility for `__fastcall` calling convention.
+ //!
+ //! NOTE: If not defined by the host then it's the same as `kIdHostCDecl`.
+ kIdHostFastCall = DETECTED_AT_COMPILE_TIME
+#elif ASMJIT_ARCH_X86
+ kIdHost = kIdX86CDecl,
+ kIdHostCDecl = kIdX86CDecl,
+ kIdHostStdCall = kIdX86StdCall,
+ kIdHostFastCall = ASMJIT_CC_MSC ? kIdX86MsFastCall :
+ ASMJIT_CC_GCC ? kIdX86GccFastCall :
+ ASMJIT_CC_CLANG ? kIdX86GccFastCall : kIdNone,
+ kIdHostFastEval2 = kIdX86FastEval2,
+ kIdHostFastEval3 = kIdX86FastEval3,
+ kIdHostFastEval4 = kIdX86FastEval4
+#elif ASMJIT_ARCH_X64
+ kIdHost = ASMJIT_OS_WINDOWS ? kIdX86Win64 : kIdX86SysV64,
+ kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostFastCall = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostFastEval2 = kIdX64FastEval2,
+ kIdHostFastEval3 = kIdX64FastEval3,
+ kIdHostFastEval4 = kIdX64FastEval4
+#elif ASMJIT_ARCH_ARM32
+# if defined(__SOFTFP__)
+ kIdHost = kIdArm32SoftFP,
+# else
+ kIdHost = kIdArm32HardFP,
+# endif
+ // These don't exist on ARM.
+ kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostFastCall = kIdHost // Doesn't exist, redirected to host.
+#else
+# error "[asmjit] Couldn't determine the target's calling convention."
+#endif
+ };
+
+ //! Calling convention algorithm.
+ //!
+ //! This is AsmJit specific. It basically describes how should AsmJit convert
+ //! the function arguments defined by `FuncSignature` into register ids or
+ //! stack offsets. The default algorithm is a standard algorithm that assigns
+ //! registers first, and then assigns stack. The Win64 algorithm does register
+ //! shadowing as defined by `WIN64` calling convention - it applies to 64-bit
+ //! calling conventions only.
+ ASMJIT_ENUM(Algorithm) {
+ kAlgorithmDefault = 0, //!< Default algorithm (cross-platform).
+ kAlgorithmWin64 = 1 //!< WIN64 specific algorithm.
+ };
+
+ //! Calling convention flags.
+ ASMJIT_ENUM(Flags) {
+ kFlagCalleePopsStack = 0x01, //!< Callee is responsible for cleaning up the stack.
+ kFlagPassFloatsByVec = 0x02, //!< Pass F32 and F64 arguments by VEC128 register.
+ kFlagVectorCall = 0x04, //!< This is a '__vectorcall' calling convention.
+ kFlagIndirectVecArgs = 0x08 //!< Pass vector arguments indirectly (as a pointer).
+ };
+
+ //! Internal limits of AsmJit/CallConv.
+ ASMJIT_ENUM(Limits) {
+ kMaxVRegKinds = Globals::kMaxVRegKinds,
+ kNumRegArgsPerKind = 8
+ };
+
+ //! Passed registers' order.
+ union RegOrder {
+ uint8_t id[kNumRegArgsPerKind]; //!< Passed registers, ordered.
+ uint32_t packed[(kNumRegArgsPerKind + 3) / 4];
+ };
+
+ // --------------------------------------------------------------------------
+ // [Utilities]
+ // --------------------------------------------------------------------------
+
+ static ASMJIT_INLINE bool isX86Family(uint32_t ccId) noexcept { return ccId >= _kIdX86Start && ccId <= _kIdX64End; }
+ static ASMJIT_INLINE bool isArmFamily(uint32_t ccId) noexcept { return ccId >= _kIdArmStart && ccId <= _kIdArmEnd; }
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_API Error init(uint32_t ccId) noexcept;
+
+ ASMJIT_INLINE void reset() noexcept {
+ ::memset(this, 0, sizeof(*this));
+ ::memset(_passedOrder, 0xFF, sizeof(_passedOrder));
+ }
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get calling convention id, see \ref Id.
+ ASMJIT_INLINE uint32_t getId() const noexcept { return _id; }
+ //! Set calling convention id, see \ref Id.
+ ASMJIT_INLINE void setId(uint32_t id) noexcept { _id = static_cast(id); }
+
+ //! Get architecture type.
+ ASMJIT_INLINE uint32_t getArchType() const noexcept { return _archType; }
+ //! Set architecture type.
+ ASMJIT_INLINE void setArchType(uint32_t archType) noexcept { _archType = static_cast(archType); }
+
+ //! Get calling convention algorithm, see \ref Algorithm.
+ ASMJIT_INLINE uint32_t getAlgorithm() const noexcept { return _algorithm; }
+ //! Set calling convention algorithm, see \ref Algorithm.
+ ASMJIT_INLINE void setAlgorithm(uint32_t algorithm) noexcept { _algorithm = static_cast(algorithm); }
+
+ //! Get if the calling convention has the given `flag` set.
+ ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Get calling convention flags, see \ref Flags.
+ ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
+ //! Add calling convention flags, see \ref Flags.
+ ASMJIT_INLINE void setFlags(uint32_t flag) noexcept { _flags = flag; };
+ //! Add calling convention flags, see \ref Flags.
+ ASMJIT_INLINE void addFlags(uint32_t flag) noexcept { _flags |= flag; };
+
+ //! Get a natural stack alignment.
+ ASMJIT_INLINE uint32_t getNaturalStackAlignment() const noexcept { return _naturalStackAlignment; }
+
+ //! Set a natural stack alignment.
+ //!
+ //! This function can be used to override the default stack alignment in case
+ //! that you know that it's alignment is different. For example it allows to
+ //! implement custom calling conventions that guarantee higher stack alignment.
+ ASMJIT_INLINE void setNaturalStackAlignment(uint32_t value) noexcept {
+ ASMJIT_ASSERT(value < 256);
+ _naturalStackAlignment = static_cast(value);
+ }
+
+ //! Get if this calling convention specifies 'SpillZone'.
+ ASMJIT_INLINE bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
+ //! Get size of 'SpillZone'.
+ ASMJIT_INLINE uint32_t getSpillZoneSize() const noexcept { return _spillZoneSize; }
+ //! Set size of 'SpillZone'.
+ ASMJIT_INLINE void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = static_cast(size); }
+
+ //! Get if this calling convention specifies 'RedZone'.
+ ASMJIT_INLINE bool hasRedZone() const noexcept { return _redZoneSize != 0; }
+ //! Get size of 'RedZone'.
+ ASMJIT_INLINE uint32_t getRedZoneSize() const noexcept { return _redZoneSize; }
+ //! Set size of 'RedZone'.
+ ASMJIT_INLINE void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = static_cast(size); }
+
+ ASMJIT_INLINE const uint8_t* getPassedOrder(uint32_t kind) const noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+ return _passedOrder[kind].id;
+ }
+
+ ASMJIT_INLINE uint32_t getPassedRegs(uint32_t kind) const noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+ return _passedRegs[kind];
+ }
+
+ ASMJIT_INLINE void _setPassedPacked(uint32_t kind, uint32_t p0, uint32_t p1) noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+
+ _passedOrder[kind].packed[0] = p0;
+ _passedOrder[kind].packed[1] = p1;
+ }
+
+ ASMJIT_INLINE void setPassedToNone(uint32_t kind) noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+
+ _setPassedPacked(kind, ASMJIT_PACK32_4x8(0xFF, 0xFF, 0xFF, 0xFF),
+ ASMJIT_PACK32_4x8(0xFF, 0xFF, 0xFF, 0xFF));
+ _passedRegs[kind] = 0;
+ }
+
+ ASMJIT_INLINE void setPassedOrder(uint32_t kind, uint32_t a0, uint32_t a1 = 0xFF, uint32_t a2 = 0xFF, uint32_t a3 = 0xFF, uint32_t a4 = 0xFF, uint32_t a5 = 0xFF, uint32_t a6 = 0xFF, uint32_t a7 = 0xFF) noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+
+ _setPassedPacked(kind, ASMJIT_PACK32_4x8(a0, a1, a2, a3),
+ ASMJIT_PACK32_4x8(a4, a5, a6, a7));
+
+ // NOTE: This should always be called with all arguments known at compile
+ // time, so even if it looks scary it should be translated to a single
+ // instruction.
+ _passedRegs[kind] = (a0 != 0xFF ? 1U << a0 : 0U) |
+ (a1 != 0xFF ? 1U << a1 : 0U) |
+ (a2 != 0xFF ? 1U << a2 : 0U) |
+ (a3 != 0xFF ? 1U << a3 : 0U) |
+ (a4 != 0xFF ? 1U << a4 : 0U) |
+ (a5 != 0xFF ? 1U << a5 : 0U) |
+ (a6 != 0xFF ? 1U << a6 : 0U) |
+ (a7 != 0xFF ? 1U << a7 : 0U) ;
+ }
+
+ ASMJIT_INLINE uint32_t getPreservedRegs(uint32_t kind) const noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+ return _preservedRegs[kind];
+ }
+
+
+ ASMJIT_INLINE void setPreservedRegs(uint32_t kind, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(kind < kMaxVRegKinds);
+ _preservedRegs[kind] = regs;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint8_t _id; //!< Calling convention id, see \ref Id.
+ uint8_t _archType; //!< Architecture type (see \ref ArchInfo::Type).
+ uint8_t _algorithm; //!< Calling convention algorithm.
+ uint8_t _flags; //!< Calling convention flags.
+
+ uint8_t _naturalStackAlignment; //!< Natural stack alignment as defined by OS/ABI.
+ uint8_t _spillZoneSize; //!< Spill zone size (WIN64 == 32 bytes).
+ uint16_t _redZoneSize; //!< Red zone size (AMD64 == 128 bytes).
+
+ RegOrder _passedOrder[kMaxVRegKinds]; //!< Passed registers' order, per kind.
+ uint32_t _passedRegs[kMaxVRegKinds]; //!< Mask of all passed registers, per kind.
+ uint32_t _preservedRegs[kMaxVRegKinds];//!< Mask of all preserved registers, per kind.
+};
+
+// ============================================================================
+// [asmjit::FuncArgIndex]
+// ============================================================================
+
+//! Function argument index (lo/hi).
+ASMJIT_ENUM(FuncArgIndex) {
+ //! Maximum number of function arguments supported by AsmJit.
+ kFuncArgCount = 16,
+ //! Extended maximum number of arguments (used internally).
+ kFuncArgCountLoHi = kFuncArgCount * 2,
+
+ //! Index to the LO part of function argument (default).
+ //!
+ //! This value is typically omitted and added only if there is HI argument
+ //! accessed.
+ kFuncArgLo = 0,
+
+ //! Index to the HI part of function argument.
+ //!
+ //! HI part of function argument depends on target architecture. On x86 it's
+ //! typically used to transfer 64-bit integers (they form a pair of 32-bit
+ //! integers).
+ kFuncArgHi = kFuncArgCount
+};
+
+// ============================================================================
+// [asmjit::FuncSignature]
+// ============================================================================
+
+//! Function signature.
+//!
+//! Contains information about function return type, count of arguments and
+//! their TypeIds. Function signature is a low level structure which doesn't
+//! contain platform specific or calling convention specific information.
+struct FuncSignature {
+ enum {
+ //! Doesn't have variable number of arguments (`...`).
+ kNoVarArgs = 0xFF
+ };
+
+ // --------------------------------------------------------------------------
+ // [Init / Reset]
+ // --------------------------------------------------------------------------
+
+ //! Initialize the function signature.
+ ASMJIT_INLINE void init(uint32_t ccId, uint32_t ret, const uint8_t* args, uint32_t argCount) noexcept {
+ ASMJIT_ASSERT(ccId <= 0xFF);
+ ASMJIT_ASSERT(argCount <= 0xFF);
+
+ _callConv = static_cast(ccId);
+ _argCount = static_cast(argCount);
+ _vaIndex = kNoVarArgs;
+ _ret = ret;
+ _args = args;
+ }
+
+ ASMJIT_INLINE void reset() noexcept {
+ memset(this, 0, sizeof(*this));
+ }
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Get the function's calling convention.
+ ASMJIT_INLINE uint32_t getCallConv() const noexcept { return _callConv; }
+
+ //! Get if the function has variable number of arguments (...).
+ ASMJIT_INLINE bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
+ //! Get the variable arguments (...) index, `kNoVarArgs` if none.
+ ASMJIT_INLINE uint32_t getVAIndex() const noexcept { return _vaIndex; }
+
+ //! Get the number of function arguments.
+ ASMJIT_INLINE uint32_t getArgCount() const noexcept { return _argCount; }
+
+ ASMJIT_INLINE bool hasRet() const noexcept { return _ret != TypeId::kVoid; }
+ //! Get the return value type.
+ ASMJIT_INLINE uint32_t getRet() const noexcept { return _ret; }
+
+ //! Get the type of the argument at index `i`.
+ ASMJIT_INLINE uint32_t getArg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < _argCount);
+ return _args[i];
+ }
+ //! Get the array of function arguments' types.
+ ASMJIT_INLINE const uint8_t* getArgs() const noexcept { return _args; }
+
+ // --------------------------------------------------------------------------
+ // [Members]
+ // --------------------------------------------------------------------------
+
+ uint8_t _callConv; //!< Calling convention id.
+ uint8_t _argCount; //!< Count of arguments.
+ uint8_t _vaIndex; //!< Index to a first vararg or `kNoVarArgs`.
+ uint8_t _ret; //!< TypeId of a return value.
+ const uint8_t* _args; //!< TypeIds of function arguments.
+};
+
+// ============================================================================
+// [asmjit::FuncSignatureT]
+// ============================================================================
+
+//! \internal
+#define T(TYPE) TypeIdOf::kTypeId
+
+//! Static function signature (no arguments).
+template
+class FuncSignature0 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature0(uint32_t ccId = CallConv::kIdHost) noexcept {
+ init(ccId, T(RET), nullptr, 0);
+ }
+};
+
+//! Static function signature (1 argument).
+template
+class FuncSignature1 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature1(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (2 arguments).
+template
+class FuncSignature2 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature2(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (3 arguments).
+template
+class FuncSignature3 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature3(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (4 arguments).
+template
+class FuncSignature4 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature4(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (5 arguments).
+template
+class FuncSignature5 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature5(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (6 arguments).
+template
+class FuncSignature6 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature6(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (7 arguments).
+template
+class FuncSignature7 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature7(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (8 arguments).
+template
+class FuncSignature8 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature8(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6), T(A7) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (9 arguments).
+template
+class FuncSignature9 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature9(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6), T(A7), T(A8) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+//! Static function signature (10 arguments).
+template
+class FuncSignature10 : public FuncSignature {
+public:
+ ASMJIT_INLINE FuncSignature10(uint32_t ccId = CallConv::kIdHost) noexcept {
+ static const uint8_t args[] = { T(A0), T(A1), T(A2), T(A3), T(A4), T(A5), T(A6), T(A7), T(A8), T(A9) };
+ init(ccId, T(RET), args, ASMJIT_ARRAY_SIZE(args));
+ }
+};
+
+#if ASMJIT_CC_HAS_VARIADIC_TEMPLATES
+//! Static function signature (variadic).
+template