Skip to content

Commit

Permalink
[NFC][Clang][Headers] Update refs to ACLE in comments (#78305)
Browse files Browse the repository at this point in the history
Co-authored-by: Max Iyengar <Max.Iyengar@arm.com>
  • Loading branch information
vhscampos and Blue-Dot committed Jan 17, 2024
1 parent 2fe5b15 commit 837cde8
Showing 1 changed file with 38 additions and 32 deletions.
70 changes: 38 additions & 32 deletions clang/lib/Headers/arm_acle.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
* The Arm C Language Extensions specifications can be found in the following
* link: https://github.com/ARM-software/acle/releases
*
* The ACLE section numbers are subject to change. When consulting the
* specifications, it is recommended to search using section titles if
* the section numbers look outdated.
*
*===-----------------------------------------------------------------------===
*/

Expand All @@ -20,8 +27,8 @@
extern "C" {
#endif

/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
/* 8.3 Memory barriers */
/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
/* 7.3 Memory barriers */
#if !__has_builtin(__dmb)
#define __dmb(i) __builtin_arm_dmb(i)
#endif
Expand All @@ -32,7 +39,7 @@ extern "C" {
#define __isb(i) __builtin_arm_isb(i)
#endif

/* 8.4 Hints */
/* 7.4 Hints */

#if !__has_builtin(__wfi)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
Expand Down Expand Up @@ -68,7 +75,7 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v
#define __dbg(t) __builtin_arm_dbg(t)
#endif

/* 8.5 Swap */
/* 7.5 Swap */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__swp(uint32_t __x, volatile uint32_t *__p) {
uint32_t v;
Expand All @@ -78,8 +85,8 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
return v;
}

/* 8.6 Memory prefetch intrinsics */
/* 8.6.1 Data prefetch */
/* 7.6 Memory prefetch intrinsics */
/* 7.6.1 Data prefetch */
#define __pld(addr) __pldx(0, 0, 0, addr)

#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
Expand All @@ -90,7 +97,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
__builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
#endif

/* 8.6.2 Instruction prefetch */
/* 7.6.2 Instruction prefetch */
#define __pli(addr) __plix(0, 0, addr)

#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
Expand All @@ -101,15 +108,15 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
__builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
#endif

/* 8.7 NOP */
/* 7.7 NOP */
#if !defined(_MSC_VER) || !defined(__aarch64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
__builtin_arm_nop();
}
#endif

/* 9 DATA-PROCESSING INTRINSICS */
/* 9.2 Miscellaneous data-processing intrinsics */
/* 8 DATA-PROCESSING INTRINSICS */
/* 8.2 Miscellaneous data-processing intrinsics */
/* ROR */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__ror(uint32_t __x, uint32_t __y) {
Expand Down Expand Up @@ -248,9 +255,7 @@ __rbitl(unsigned long __t) {
#endif
}

/*
* 9.3 16-bit multiplications
*/
/* 8.3 16-bit multiplications */
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
__smulbb(int32_t __a, int32_t __b) {
Expand Down Expand Up @@ -279,18 +284,18 @@ __smulwt(int32_t __a, int32_t __b) {
#endif

/*
* 9.4 Saturating intrinsics
* 8.4 Saturating intrinsics
*
* FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
* intrinsics are implemented and the flag is enabled.
*/
/* 9.4.1 Width-specified saturation intrinsics */
/* 8.4.1 Width-specified saturation intrinsics */
#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
#define __ssat(x, y) __builtin_arm_ssat(x, y)
#define __usat(x, y) __builtin_arm_usat(x, y)
#endif

/* 9.4.2 Saturating addition and subtraction intrinsics */
/* 8.4.2 Saturating addition and subtraction intrinsics */
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__qadd(int32_t __t, int32_t __v) {
Expand All @@ -308,7 +313,7 @@ __qdbl(int32_t __t) {
}
#endif

/* 9.4.3 Accumultating multiplications */
/* 8.4.3 Accumultating multiplications */
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlabb(int32_t __a, int32_t __b, int32_t __c) {
Expand Down Expand Up @@ -337,13 +342,13 @@ __smlawt(int32_t __a, int32_t __b, int32_t __c) {
#endif


/* 9.5.4 Parallel 16-bit saturation */
/* 8.5.4 Parallel 16-bit saturation */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
#define __usat16(x, y) __builtin_arm_usat16(x, y)
#endif

/* 9.5.5 Packing and unpacking */
/* 8.5.5 Packing and unpacking */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
typedef int32_t int8x4_t;
typedef int32_t int16x2_t;
Expand All @@ -368,15 +373,15 @@ __uxtb16(int8x4_t __a) {
}
#endif

/* 9.5.6 Parallel selection */
/* 8.5.6 Parallel selection */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
__sel(uint8x4_t __a, uint8x4_t __b) {
return __builtin_arm_sel(__a, __b);
}
#endif

/* 9.5.7 Parallel 8-bit addition and subtraction */
/* 8.5.7 Parallel 8-bit addition and subtraction */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
__qadd8(int8x4_t __a, int8x4_t __b) {
Expand Down Expand Up @@ -428,7 +433,7 @@ __usub8(uint8x4_t __a, uint8x4_t __b) {
}
#endif

/* 9.5.8 Sum of 8-bit absolute differences */
/* 8.5.8 Sum of 8-bit absolute differences */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__usad8(uint8x4_t __a, uint8x4_t __b) {
Expand All @@ -440,7 +445,7 @@ __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
}
#endif

/* 9.5.9 Parallel 16-bit addition and subtraction */
/* 8.5.9 Parallel 16-bit addition and subtraction */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
__qadd16(int16x2_t __a, int16x2_t __b) {
Expand Down Expand Up @@ -540,7 +545,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) {
}
#endif

/* 9.5.10 Parallel 16-bit multiplications */
/* 8.5.10 Parallel 16-bit multiplications */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
Expand Down Expand Up @@ -607,7 +612,7 @@ __rintnf(float __a) {
}
#endif

/* 9.7 CRC32 intrinsics */
/* 8.8 CRC32 intrinsics */
#if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \
(defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
Expand Down Expand Up @@ -651,6 +656,7 @@ __crc32cd(uint32_t __a, uint64_t __b) {
}
#endif

/* 8.6 Floating-point data-processing intrinsics */
/* Armv8.3-A Javascript conversion intrinsic */
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a")))
Expand Down Expand Up @@ -702,7 +708,7 @@ __rint64x(double __a) {
}
#endif

/* Armv8.7-A load/store 64-byte intrinsics */
/* 8.9 Armv8.7-A load/store 64-byte intrinsics */
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
typedef struct {
uint64_t val[8];
Expand All @@ -728,7 +734,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
}
#endif

/* 10.1 Special register intrinsics */
/* 11.1 Special register intrinsics */
#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg)
Expand All @@ -742,7 +748,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))

/* Memory Tagging Extensions (MTE) Intrinsics */
/* 10.3 Memory Tagging Extensions (MTE) Intrinsics */
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
Expand All @@ -751,12 +757,12 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)

/* Memory Operations Intrinsics */
/* 18 Memory Operations Intrinsics */
#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
__builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
#endif

/* Coprocessor Intrinsics */
/* 11.3 Coprocessor Intrinsics */
#if defined(__ARM_FEATURE_COPROC)

#if (__ARM_FEATURE_COPROC & 0x1)
Expand Down Expand Up @@ -815,7 +821,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {

#endif // __ARM_FEATURE_COPROC

/* Transactional Memory Extension (TME) Intrinsics */
/* 17 Transactional Memory Extension (TME) Intrinsics */
#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME

#define _TMFAILURE_REASON 0x00007fffu
Expand All @@ -837,7 +843,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {

#endif /* __ARM_FEATURE_TME */

/* Armv8.5-A Random number generation intrinsics */
/* 8.7 Armv8.5-A Random number generation intrinsics */
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
__rndr(uint64_t *__p) {
Expand Down

0 comments on commit 837cde8

Please sign in to comment.