diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index b77a091224f66d..eaee860bd3c094 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -626,9 +626,6 @@ defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad">; defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla">; defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls">; defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb">; -//////////////////////////////////////////////////////////////////////////////// -// Permutations and selection -def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>; //////////////////////////////////////////////////////////////////////////////// // Shifts @@ -856,6 +853,44 @@ def SVCVTXNT_F32 : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch6 def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>; def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>; +//////////////////////////////////////////////////////////////////////////////// +// Permutations and selection + +def SVCLASTA : SInst<"svclasta[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clasta">; +def SVCLASTA_N : SInst<"svclasta[_n_{d}]", "sPsd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clasta_n">; +def SVCLASTB : SInst<"svclastb[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clastb">; +def SVCLASTB_N : SInst<"svclastb[_n_{d}]", "sPsd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_clastb_n">; +def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact">; +// SVDUP_LANE (to land in D78750) +// SVDUPQ_LANE (to land in D78750) +def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>; +def SVLASTA : SInst<"svlasta[_{d}]", "sPd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_lasta">; +def SVLASTB : SInst<"svlastb[_{d}]", "sPd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_lastb">; +def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_rev">; +def SVSEL : SInst<"svsel[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_sel">; +def SVSPLICE : SInst<"svsplice[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_splice">; +def SVTBL : SInst<"svtbl[_{d}]", "ddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">; +def SVTRN1 : SInst<"svtrn1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1">; +def SVTRN2 : SInst<"svtrn2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2">; +def SVUNPKHI_S : SInst<"svunpkhi[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpkhi">; +def SVUNPKHI_U : SInst<"svunpkhi[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpkhi">; +def SVUNPKLO_S : SInst<"svunpklo[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpklo">; +def SVUNPKLO_U : SInst<"svunpklo[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpklo">; +def SVUZP1 : SInst<"svuzp1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1">; +def SVUZP2 : SInst<"svuzp2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2">; +def SVZIP1 : SInst<"svzip1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1">; +def SVZIP2 : SInst<"svzip2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2">; + +def SVREV_B : SInst<"svrev_{d}", "PP", "PcPsPiPl", MergeNone, "aarch64_sve_rev">; +def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel">; +def SVTRN1_B : SInst<"svtrn1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn1">; +def SVTRN2_B : SInst<"svtrn2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn2">; +def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi">; +def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo">; +def SVUZP1_B : SInst<"svuzp1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp1">; +def SVUZP2_B : SInst<"svuzp2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp2">; +def SVZIP1_B : SInst<"svzip1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip1">; +def SVZIP2_B : SInst<"svzip2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip2">; //////////////////////////////////////////////////////////////////////////////// // Predicate creation diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_clasta.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_clasta.c new file mode 100644 index 00000000000000..db5f8bf6ce2f75 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_clasta.c @@ -0,0 +1,205 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svclasta_s8(svbool_t pg, svint8_t fallback, svint8_t data) +{ + // CHECK-LABEL: test_svclasta_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv16i8( %pg, %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_s8,,)(pg, fallback, data); +} + +svint16_t test_svclasta_s16(svbool_t pg, svint16_t fallback, svint16_t data) +{ + // CHECK-LABEL: test_svclasta_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv8i16( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_s16,,)(pg, fallback, data); +} + +svint32_t test_svclasta_s32(svbool_t pg, svint32_t fallback, svint32_t data) +{ + // CHECK-LABEL: test_svclasta_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv4i32( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_s32,,)(pg, fallback, data); +} + +svint64_t test_svclasta_s64(svbool_t pg, svint64_t fallback, svint64_t data) +{ + // CHECK-LABEL: test_svclasta_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv2i64( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_s64,,)(pg, fallback, data); +} + +svuint8_t test_svclasta_u8(svbool_t pg, svuint8_t fallback, svuint8_t data) +{ + // CHECK-LABEL: test_svclasta_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv16i8( %pg, %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_u8,,)(pg, fallback, data); +} + +svuint16_t test_svclasta_u16(svbool_t pg, svuint16_t fallback, svuint16_t data) +{ + // CHECK-LABEL: test_svclasta_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv8i16( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_u16,,)(pg, fallback, data); +} + +svuint32_t test_svclasta_u32(svbool_t pg, svuint32_t fallback, svuint32_t data) +{ + // CHECK-LABEL: test_svclasta_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv4i32( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_u32,,)(pg, fallback, data); +} + +svuint64_t test_svclasta_u64(svbool_t pg, svuint64_t fallback, svuint64_t data) +{ + // CHECK-LABEL: test_svclasta_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv2i64( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_u64,,)(pg, fallback, data); +} + +svfloat16_t test_svclasta_f16(svbool_t pg, svfloat16_t fallback, svfloat16_t data) +{ + // CHECK-LABEL: test_svclasta_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv8f16( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_f16,,)(pg, fallback, data); +} + +svfloat32_t test_svclasta_f32(svbool_t pg, svfloat32_t fallback, svfloat32_t data) +{ + // CHECK-LABEL: test_svclasta_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv4f32( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_f32,,)(pg, fallback, data); +} + +svfloat64_t test_svclasta_f64(svbool_t pg, svfloat64_t fallback, svfloat64_t data) +{ + // CHECK-LABEL: test_svclasta_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clasta.nxv2f64( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_f64,,)(pg, fallback, data); +} + +int8_t test_svclasta_n_s8(svbool_t pg, int8_t fallback, svint8_t data) +{ + // CHECK-LABEL: test_svclasta_n_s8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clasta.n.nxv16i8( %pg, i8 %fallback, %data) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_s8,,)(pg, fallback, data); +} + +int16_t test_svclasta_n_s16(svbool_t pg, int16_t fallback, svint16_t data) +{ + // CHECK-LABEL: test_svclasta_n_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clasta.n.nxv8i16( %[[PG]], i16 %fallback, %data) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_s16,,)(pg, fallback, data); +} + +int32_t test_svclasta_n_s32(svbool_t pg, int32_t fallback, svint32_t data) +{ + // CHECK-LABEL: test_svclasta_n_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clasta.n.nxv4i32( %[[PG]], i32 %fallback, %data) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_s32,,)(pg, fallback, data); +} + +int64_t test_svclasta_n_s64(svbool_t pg, int64_t fallback, svint64_t data) +{ + // CHECK-LABEL: test_svclasta_n_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clasta.n.nxv2i64( %[[PG]], i64 %fallback, %data) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_s64,,)(pg, fallback, data); +} + +uint8_t test_svclasta_n_u8(svbool_t pg, uint8_t fallback, svuint8_t data) +{ + // CHECK-LABEL: test_svclasta_n_u8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clasta.n.nxv16i8( %pg, i8 %fallback, %data) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_u8,,)(pg, fallback, data); +} + +uint16_t test_svclasta_n_u16(svbool_t pg, uint16_t fallback, svuint16_t data) +{ + // CHECK-LABEL: test_svclasta_n_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clasta.n.nxv8i16( %[[PG]], i16 %fallback, %data) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_u16,,)(pg, fallback, data); +} + +uint32_t test_svclasta_n_u32(svbool_t pg, uint32_t fallback, svuint32_t data) +{ + // CHECK-LABEL: test_svclasta_n_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clasta.n.nxv4i32( %[[PG]], i32 %fallback, %data) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_u32,,)(pg, fallback, data); +} + +uint64_t test_svclasta_n_u64(svbool_t pg, uint64_t fallback, svuint64_t data) +{ + // CHECK-LABEL: test_svclasta_n_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clasta.n.nxv2i64( %[[PG]], i64 %fallback, %data) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_u64,,)(pg, fallback, data); +} + +float16_t test_svclasta_n_f16(svbool_t pg, float16_t fallback, svfloat16_t data) +{ + // CHECK-LABEL: test_svclasta_n_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.clasta.n.nxv8f16( %[[PG]], half %fallback, %data) + // CHECK: ret half %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_f16,,)(pg, fallback, data); +} + +float32_t test_svclasta_n_f32(svbool_t pg, float32_t fallback, svfloat32_t data) +{ + // CHECK-LABEL: test_svclasta_n_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.clasta.n.nxv4f32( %[[PG]], float %fallback, %data) + // CHECK: ret float %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_f32,,)(pg, fallback, data); +} + +float64_t test_svclasta_n_f64(svbool_t pg, float64_t fallback, svfloat64_t data) +{ + // CHECK-LABEL: test_svclasta_n_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.clasta.n.nxv2f64( %[[PG]], double %fallback, %data) + // CHECK: ret double %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclasta,_n_f64,,)(pg, fallback, data); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_clastb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_clastb.c new file mode 100644 index 00000000000000..0d25c48d22ba53 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_clastb.c @@ -0,0 +1,205 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svclastb_s8(svbool_t pg, svint8_t fallback, svint8_t data) +{ + // CHECK-LABEL: test_svclastb_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv16i8( %pg, %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_s8,,)(pg, fallback, data); +} + +svint16_t test_svclastb_s16(svbool_t pg, svint16_t fallback, svint16_t data) +{ + // CHECK-LABEL: test_svclastb_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv8i16( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_s16,,)(pg, fallback, data); +} + +svint32_t test_svclastb_s32(svbool_t pg, svint32_t fallback, svint32_t data) +{ + // CHECK-LABEL: test_svclastb_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv4i32( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_s32,,)(pg, fallback, data); +} + +svint64_t test_svclastb_s64(svbool_t pg, svint64_t fallback, svint64_t data) +{ + // CHECK-LABEL: test_svclastb_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv2i64( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_s64,,)(pg, fallback, data); +} + +svuint8_t test_svclastb_u8(svbool_t pg, svuint8_t fallback, svuint8_t data) +{ + // CHECK-LABEL: test_svclastb_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv16i8( %pg, %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_u8,,)(pg, fallback, data); +} + +svuint16_t test_svclastb_u16(svbool_t pg, svuint16_t fallback, svuint16_t data) +{ + // CHECK-LABEL: test_svclastb_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv8i16( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_u16,,)(pg, fallback, data); +} + +svuint32_t test_svclastb_u32(svbool_t pg, svuint32_t fallback, svuint32_t data) +{ + // CHECK-LABEL: test_svclastb_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv4i32( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_u32,,)(pg, fallback, data); +} + +svuint64_t test_svclastb_u64(svbool_t pg, svuint64_t fallback, svuint64_t data) +{ + // CHECK-LABEL: test_svclastb_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv2i64( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_u64,,)(pg, fallback, data); +} + +svfloat16_t test_svclastb_f16(svbool_t pg, svfloat16_t fallback, svfloat16_t data) +{ + // CHECK-LABEL: test_svclastb_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv8f16( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_f16,,)(pg, fallback, data); +} + +svfloat32_t test_svclastb_f32(svbool_t pg, svfloat32_t fallback, svfloat32_t data) +{ + // CHECK-LABEL: test_svclastb_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv4f32( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_f32,,)(pg, fallback, data); +} + +svfloat64_t test_svclastb_f64(svbool_t pg, svfloat64_t fallback, svfloat64_t data) +{ + // CHECK-LABEL: test_svclastb_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.clastb.nxv2f64( %[[PG]], %fallback, %data) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_f64,,)(pg, fallback, data); +} + +int8_t test_svclastb_n_s8(svbool_t pg, int8_t fallback, svint8_t data) +{ + // CHECK-LABEL: test_svclastb_n_s8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clastb.n.nxv16i8( %pg, i8 %fallback, %data) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_s8,,)(pg, fallback, data); +} + +int16_t test_svclastb_n_s16(svbool_t pg, int16_t fallback, svint16_t data) +{ + // CHECK-LABEL: test_svclastb_n_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clastb.n.nxv8i16( %[[PG]], i16 %fallback, %data) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_s16,,)(pg, fallback, data); +} + +int32_t test_svclastb_n_s32(svbool_t pg, int32_t fallback, svint32_t data) +{ + // CHECK-LABEL: test_svclastb_n_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clastb.n.nxv4i32( %[[PG]], i32 %fallback, %data) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_s32,,)(pg, fallback, data); +} + +int64_t test_svclastb_n_s64(svbool_t pg, int64_t fallback, svint64_t data) +{ + // CHECK-LABEL: test_svclastb_n_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clastb.n.nxv2i64( %[[PG]], i64 %fallback, %data) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_s64,,)(pg, fallback, data); +} + +uint8_t test_svclastb_n_u8(svbool_t pg, uint8_t fallback, svuint8_t data) +{ + // CHECK-LABEL: test_svclastb_n_u8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.clastb.n.nxv16i8( %pg, i8 %fallback, %data) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_u8,,)(pg, fallback, data); +} + +uint16_t test_svclastb_n_u16(svbool_t pg, uint16_t fallback, svuint16_t data) +{ + // CHECK-LABEL: test_svclastb_n_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.clastb.n.nxv8i16( %[[PG]], i16 %fallback, %data) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_u16,,)(pg, fallback, data); +} + +uint32_t test_svclastb_n_u32(svbool_t pg, uint32_t fallback, svuint32_t data) +{ + // CHECK-LABEL: test_svclastb_n_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.clastb.n.nxv4i32( %[[PG]], i32 %fallback, %data) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_u32,,)(pg, fallback, data); +} + +uint64_t test_svclastb_n_u64(svbool_t pg, uint64_t fallback, svuint64_t data) +{ + // CHECK-LABEL: test_svclastb_n_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.clastb.n.nxv2i64( %[[PG]], i64 %fallback, %data) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_u64,,)(pg, fallback, data); +} + +float16_t test_svclastb_n_f16(svbool_t pg, float16_t fallback, svfloat16_t data) +{ + // CHECK-LABEL: test_svclastb_n_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.clastb.n.nxv8f16( %[[PG]], half %fallback, %data) + // CHECK: ret half %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_f16,,)(pg, fallback, data); +} + +float32_t test_svclastb_n_f32(svbool_t pg, float32_t fallback, svfloat32_t data) +{ + // CHECK-LABEL: test_svclastb_n_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.clastb.n.nxv4f32( %[[PG]], float %fallback, %data) + // CHECK: ret float %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_f32,,)(pg, fallback, data); +} + +float64_t test_svclastb_n_f64(svbool_t pg, float64_t fallback, svfloat64_t data) +{ + // CHECK-LABEL: test_svclastb_n_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.clastb.n.nxv2f64( %[[PG]], double %fallback, %data) + // CHECK: ret double %[[INTRINSIC]] + return SVE_ACLE_FUNC(svclastb,_n_f64,,)(pg, fallback, data); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_compact.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_compact.c new file mode 100644 index 00000000000000..03cf3f36d8d8cd --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_compact.c @@ -0,0 +1,65 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint32_t test_svcompact_s32(svbool_t pg, svint32_t op) +{ + // CHECK-LABEL: test_svcompact_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.compact.nxv4i32( %[[PG]], %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svcompact,_s32,,)(pg, op); +} + +svint64_t test_svcompact_s64(svbool_t pg, svint64_t op) +{ + // CHECK-LABEL: test_svcompact_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.compact.nxv2i64( %[[PG]], %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svcompact,_s64,,)(pg, op); +} + +svuint32_t test_svcompact_u32(svbool_t pg, svuint32_t op) +{ + // CHECK-LABEL: test_svcompact_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.compact.nxv4i32( %[[PG]], %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svcompact,_u32,,)(pg, op); +} + +svuint64_t test_svcompact_u64(svbool_t pg, svuint64_t op) +{ + // CHECK-LABEL: test_svcompact_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.compact.nxv2i64( %[[PG]], %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svcompact,_u64,,)(pg, op); +} + +svfloat32_t test_svcompact_f32(svbool_t pg, svfloat32_t op) +{ + // CHECK-LABEL: test_svcompact_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.compact.nxv4f32( %[[PG]], %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svcompact,_f32,,)(pg, op); +} + +svfloat64_t test_svcompact_f64(svbool_t pg, svfloat64_t op) +{ + // CHECK-LABEL: test_svcompact_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.compact.nxv2f64( %[[PG]], %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svcompact,_f64,,)(pg, op); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_lasta.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_lasta.c new file mode 100644 index 00000000000000..2e99d68e51c457 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_lasta.c @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +int8_t test_svlasta_s8(svbool_t pg, svint8_t op) +{ + // CHECK-LABEL: test_svlasta_s8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lasta.nxv16i8( %pg, %op) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_s8,,)(pg, op); +} + +int16_t test_svlasta_s16(svbool_t pg, svint16_t op) +{ + // CHECK-LABEL: test_svlasta_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lasta.nxv8i16( %[[PG]], %op) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_s16,,)(pg, op); +} + +int32_t test_svlasta_s32(svbool_t pg, svint32_t op) +{ + // CHECK-LABEL: test_svlasta_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lasta.nxv4i32( %[[PG]], %op) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_s32,,)(pg, op); +} + +int64_t test_svlasta_s64(svbool_t pg, svint64_t op) +{ + // CHECK-LABEL: test_svlasta_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lasta.nxv2i64( %[[PG]], %op) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_s64,,)(pg, op); +} + +uint8_t test_svlasta_u8(svbool_t pg, svuint8_t op) +{ + // CHECK-LABEL: test_svlasta_u8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lasta.nxv16i8( %pg, %op) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_u8,,)(pg, op); +} + +uint16_t test_svlasta_u16(svbool_t pg, svuint16_t op) +{ + // CHECK-LABEL: test_svlasta_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lasta.nxv8i16( %[[PG]], %op) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_u16,,)(pg, op); +} + +uint32_t test_svlasta_u32(svbool_t pg, svuint32_t op) +{ + // CHECK-LABEL: test_svlasta_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lasta.nxv4i32( %[[PG]], %op) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_u32,,)(pg, op); +} + +uint64_t test_svlasta_u64(svbool_t pg, svuint64_t op) +{ + // CHECK-LABEL: test_svlasta_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lasta.nxv2i64( %[[PG]], %op) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_u64,,)(pg, op); +} + +float16_t test_svlasta_f16(svbool_t pg, svfloat16_t op) +{ + // CHECK-LABEL: test_svlasta_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.lasta.nxv8f16( %[[PG]], %op) + // CHECK: ret half %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_f16,,)(pg, op); +} + +float32_t test_svlasta_f32(svbool_t pg, svfloat32_t op) +{ + // CHECK-LABEL: test_svlasta_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.lasta.nxv4f32( %[[PG]], %op) + // CHECK: ret float %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_f32,,)(pg, op); +} + +float64_t test_svlasta_f64(svbool_t pg, svfloat64_t op) +{ + // CHECK-LABEL: test_svlasta_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.lasta.nxv2f64( %[[PG]], %op) + // CHECK: ret double %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlasta,_f64,,)(pg, op); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_lastb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_lastb.c new file mode 100644 index 00000000000000..c48fb76da7220b --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_lastb.c @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +int8_t test_svlastb_s8(svbool_t pg, svint8_t op) +{ + // CHECK-LABEL: test_svlastb_s8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lastb.nxv16i8( %pg, %op) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_s8,,)(pg, op); +} + +int16_t test_svlastb_s16(svbool_t pg, svint16_t op) +{ + // CHECK-LABEL: test_svlastb_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lastb.nxv8i16( %[[PG]], %op) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_s16,,)(pg, op); +} + +int32_t test_svlastb_s32(svbool_t pg, svint32_t op) +{ + // CHECK-LABEL: test_svlastb_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lastb.nxv4i32( %[[PG]], %op) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_s32,,)(pg, op); +} + +int64_t test_svlastb_s64(svbool_t pg, svint64_t op) +{ + // CHECK-LABEL: test_svlastb_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lastb.nxv2i64( %[[PG]], %op) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_s64,,)(pg, op); +} + +uint8_t test_svlastb_u8(svbool_t pg, svuint8_t op) +{ + // CHECK-LABEL: test_svlastb_u8 + // CHECK: %[[INTRINSIC:.*]] = call i8 @llvm.aarch64.sve.lastb.nxv16i8( %pg, %op) + // CHECK: ret i8 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_u8,,)(pg, op); +} + +uint16_t test_svlastb_u16(svbool_t pg, svuint16_t op) +{ + // CHECK-LABEL: test_svlastb_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i16 @llvm.aarch64.sve.lastb.nxv8i16( %[[PG]], %op) + // CHECK: ret i16 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_u16,,)(pg, op); +} + +uint32_t test_svlastb_u32(svbool_t pg, svuint32_t op) +{ + // CHECK-LABEL: test_svlastb_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.lastb.nxv4i32( %[[PG]], %op) + // CHECK: ret i32 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_u32,,)(pg, op); +} + +uint64_t test_svlastb_u64(svbool_t pg, svuint64_t op) +{ + // CHECK-LABEL: test_svlastb_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.lastb.nxv2i64( %[[PG]], %op) + // CHECK: ret i64 %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_u64,,)(pg, op); +} + +float16_t test_svlastb_f16(svbool_t pg, svfloat16_t op) +{ + // CHECK-LABEL: test_svlastb_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call half @llvm.aarch64.sve.lastb.nxv8f16( %[[PG]], %op) + // CHECK: ret half %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_f16,,)(pg, op); +} + +float32_t test_svlastb_f32(svbool_t pg, svfloat32_t op) +{ + // CHECK-LABEL: test_svlastb_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call float @llvm.aarch64.sve.lastb.nxv4f32( %[[PG]], %op) + // CHECK: ret float %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_f32,,)(pg, op); +} + +float64_t test_svlastb_f64(svbool_t pg, svfloat64_t op) +{ + // CHECK-LABEL: test_svlastb_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call double @llvm.aarch64.sve.lastb.nxv2f64( %[[PG]], %op) + // CHECK: ret double %[[INTRINSIC]] + return SVE_ACLE_FUNC(svlastb,_f64,,)(pg, op); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_rev.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_rev.c new file mode 100644 index 00000000000000..fa275a8fab4ff4 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_rev.c @@ -0,0 +1,137 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svrev_s8(svint8_t op) +{ + // CHECK-LABEL: test_svrev_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv16i8( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_s8,,)(op); +} + +svint16_t test_svrev_s16(svint16_t op) +{ + // CHECK-LABEL: test_svrev_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv8i16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_s16,,)(op); +} + +svint32_t test_svrev_s32(svint32_t op) +{ + // CHECK-LABEL: test_svrev_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv4i32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_s32,,)(op); +} + +svint64_t test_svrev_s64(svint64_t op) +{ + // CHECK-LABEL: test_svrev_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv2i64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_s64,,)(op); +} + +svuint8_t test_svrev_u8(svuint8_t op) +{ + // CHECK-LABEL: test_svrev_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv16i8( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_u8,,)(op); +} + +svuint16_t test_svrev_u16(svuint16_t op) +{ + // CHECK-LABEL: test_svrev_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv8i16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_u16,,)(op); +} + +svuint32_t test_svrev_u32(svuint32_t op) +{ + // CHECK-LABEL: test_svrev_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv4i32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_u32,,)(op); +} + +svuint64_t test_svrev_u64(svuint64_t op) +{ + // CHECK-LABEL: test_svrev_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv2i64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_u64,,)(op); +} + +svfloat16_t test_svrev_f16(svfloat16_t op) +{ + // CHECK-LABEL: test_svrev_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv8f16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_f16,,)(op); +} + +svfloat32_t test_svrev_f32(svfloat32_t op) +{ + // CHECK-LABEL: test_svrev_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv4f32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_f32,,)(op); +} + +svfloat64_t test_svrev_f64(svfloat64_t op) +{ + // CHECK-LABEL: test_svrev_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv2f64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svrev,_f64,,)(op); +} + +svbool_t test_svrev_b8(svbool_t op) +{ + // CHECK-LABEL: test_svrev_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv16i1( %op) + // CHECK: ret %[[INTRINSIC]] + return svrev_b8(op); +} + +svbool_t test_svrev_b16(svbool_t op) +{ + // CHECK-LABEL: test_svrev_b16 + // CHECK: %[[OP:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv8i1( %[[OP]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svrev_b16(op); +} + +svbool_t test_svrev_b32(svbool_t op) +{ + // CHECK-LABEL: test_svrev_b32 + // CHECK: %[[OP:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv4i1( %[[OP]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svrev_b32(op); +} + +svbool_t test_svrev_b64(svbool_t op) +{ + // CHECK-LABEL: test_svrev_b64 + // CHECK: %[[OP:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.rev.nxv2i1( %[[OP]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svrev_b64(op); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_sel.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_sel.c new file mode 100644 index 00000000000000..97acd393f4d03f --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_sel.c @@ -0,0 +1,116 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svsel_s8(svbool_t pg, svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsel_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv16i8( %pg, %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_s8,,)(pg, op1, op2); +} + +svint16_t test_svsel_s16(svbool_t pg, svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsel_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv8i16( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_s16,,)(pg, op1, op2); +} + +svint32_t test_svsel_s32(svbool_t pg, svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsel_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv4i32( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_s32,,)(pg, op1, op2); +} + +svint64_t test_svsel_s64(svbool_t pg, svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsel_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv2i64( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_s64,,)(pg, op1, op2); +} + +svuint8_t test_svsel_u8(svbool_t pg, svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsel_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv16i8( %pg, %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_u8,,)(pg, op1, op2); +} + +svuint16_t test_svsel_u16(svbool_t pg, svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsel_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv8i16( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_u16,,)(pg, op1, op2); +} + +svuint32_t test_svsel_u32(svbool_t pg, svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsel_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv4i32( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_u32,,)(pg, op1, op2); +} + +svuint64_t test_svsel_u64(svbool_t pg, svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsel_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv2i64( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_u64,,)(pg, op1, op2); +} + +svfloat16_t test_svsel_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svsel_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv8f16( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_f16,,)(pg, op1, op2); +} + +svfloat32_t test_svsel_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svsel_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv4f32( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_f32,,)(pg, op1, op2); +} + +svfloat64_t test_svsel_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svsel_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv2f64( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_f64,,)(pg, op1, op2); +} + +svbool_t test_svsel_b(svbool_t pg, svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svsel_b + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sel.nxv16i1( %pg, %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsel,_b,,)(pg, op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_splice.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_splice.c new file mode 100644 index 00000000000000..795fb704267447 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_splice.c @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svsplice_s8(svbool_t pg, svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svsplice_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv16i8( %pg, %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_s8,,)(pg, op1, op2); +} + +svint16_t test_svsplice_s16(svbool_t pg, svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svsplice_s16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv8i16( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_s16,,)(pg, op1, op2); +} + +svint32_t test_svsplice_s32(svbool_t pg, svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svsplice_s32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv4i32( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_s32,,)(pg, op1, op2); +} + +svint64_t test_svsplice_s64(svbool_t pg, svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svsplice_s64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv2i64( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_s64,,)(pg, op1, op2); +} + +svuint8_t test_svsplice_u8(svbool_t pg, svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svsplice_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv16i8( %pg, %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_u8,,)(pg, op1, op2); +} + +svuint16_t test_svsplice_u16(svbool_t pg, svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svsplice_u16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv8i16( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_u16,,)(pg, op1, op2); +} + +svuint32_t test_svsplice_u32(svbool_t pg, svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svsplice_u32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv4i32( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_u32,,)(pg, op1, op2); +} + +svuint64_t test_svsplice_u64(svbool_t pg, svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svsplice_u64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv2i64( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_u64,,)(pg, op1, op2); +} + +svfloat16_t test_svsplice_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svsplice_f16 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv8f16( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_f16,,)(pg, op1, op2); +} + +svfloat32_t test_svsplice_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svsplice_f32 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv4f32( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_f32,,)(pg, op1, op2); +} + +svfloat64_t test_svsplice_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svsplice_f64 + // CHECK: %[[PG:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %pg) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.splice.nxv2f64( %[[PG]], %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svsplice,_f64,,)(pg, op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tbl.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tbl.c new file mode 100644 index 00000000000000..a2b7ee5f7495fe --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_tbl.c @@ -0,0 +1,99 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svtbl_s8(svint8_t data, svuint8_t indices) +{ + // CHECK-LABEL: test_svtbl_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv16i8( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_s8,,)(data, indices); +} + +svint16_t test_svtbl_s16(svint16_t data, svuint16_t indices) +{ + // CHECK-LABEL: test_svtbl_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv8i16( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_s16,,)(data, indices); +} + +svint32_t test_svtbl_s32(svint32_t data, svuint32_t indices) +{ + // CHECK-LABEL: test_svtbl_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv4i32( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_s32,,)(data, indices); +} + +svint64_t test_svtbl_s64(svint64_t data, svuint64_t indices) +{ + // CHECK-LABEL: test_svtbl_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv2i64( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_s64,,)(data, indices); +} + +svuint8_t test_svtbl_u8(svuint8_t data, svuint8_t indices) +{ + // CHECK-LABEL: test_svtbl_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv16i8( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_u8,,)(data, indices); +} + +svuint16_t test_svtbl_u16(svuint16_t data, svuint16_t indices) +{ + // CHECK-LABEL: test_svtbl_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv8i16( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_u16,,)(data, indices); +} + +svuint32_t test_svtbl_u32(svuint32_t data, svuint32_t indices) +{ + // CHECK-LABEL: test_svtbl_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv4i32( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_u32,,)(data, indices); +} + +svuint64_t test_svtbl_u64(svuint64_t data, svuint64_t indices) +{ + // CHECK-LABEL: test_svtbl_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv2i64( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_u64,,)(data, indices); +} + +svfloat16_t test_svtbl_f16(svfloat16_t data, svuint16_t indices) +{ + // CHECK-LABEL: test_svtbl_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv8f16( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_f16,,)(data, indices); +} + +svfloat32_t test_svtbl_f32(svfloat32_t data, svuint32_t indices) +{ + // CHECK-LABEL: test_svtbl_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv4f32( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_f32,,)(data, indices); +} + +svfloat64_t test_svtbl_f64(svfloat64_t data, svuint64_t indices) +{ + // CHECK-LABEL: test_svtbl_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.tbl.nxv2f64( %data, %indices) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtbl,_f64,,)(data, indices); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1.c new file mode 100644 index 00000000000000..6e7cbed5350e9c --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn1.c @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svtrn1_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svtrn1_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_s8,,)(op1, op2); +} + +svint16_t test_svtrn1_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svtrn1_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_s16,,)(op1, op2); +} + +svint32_t test_svtrn1_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svtrn1_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_s32,,)(op1, op2); +} + +svint64_t test_svtrn1_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svtrn1_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_s64,,)(op1, op2); +} + +svuint8_t test_svtrn1_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svtrn1_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_u8,,)(op1, op2); +} + +svuint16_t test_svtrn1_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svtrn1_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_u16,,)(op1, op2); +} + +svuint32_t test_svtrn1_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svtrn1_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_u32,,)(op1, op2); +} + +svuint64_t test_svtrn1_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svtrn1_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_u64,,)(op1, op2); +} + +svfloat16_t test_svtrn1_f16(svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svtrn1_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv8f16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_f16,,)(op1, op2); +} + +svfloat32_t test_svtrn1_f32(svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svtrn1_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv4f32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_f32,,)(op1, op2); +} + +svfloat64_t test_svtrn1_f64(svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svtrn1_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv2f64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn1,_f64,,)(op1, op2); +} + +svbool_t test_svtrn1_b8(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn1_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv16i1( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return svtrn1_b8(op1, op2); +} + +svbool_t test_svtrn1_b16(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn1_b16 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv8i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svtrn1_b16(op1, op2); +} + +svbool_t test_svtrn1_b32(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn1_b32 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv4i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svtrn1_b32(op1, op2); +} + +svbool_t test_svtrn1_b64(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn1_b64 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn1.nxv2i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svtrn1_b64(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2.c new file mode 100644 index 00000000000000..432370bb459f9a --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_trn2.c @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svtrn2_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svtrn2_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_s8,,)(op1, op2); +} + +svint16_t test_svtrn2_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svtrn2_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_s16,,)(op1, op2); +} + +svint32_t test_svtrn2_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svtrn2_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_s32,,)(op1, op2); +} + +svint64_t test_svtrn2_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svtrn2_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_s64,,)(op1, op2); +} + +svuint8_t test_svtrn2_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svtrn2_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_u8,,)(op1, op2); +} + +svuint16_t test_svtrn2_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svtrn2_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_u16,,)(op1, op2); +} + +svuint32_t test_svtrn2_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svtrn2_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_u32,,)(op1, op2); +} + +svuint64_t test_svtrn2_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svtrn2_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_u64,,)(op1, op2); +} + +svfloat16_t test_svtrn2_f16(svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svtrn2_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv8f16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_f16,,)(op1, op2); +} + +svfloat32_t test_svtrn2_f32(svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svtrn2_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv4f32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_f32,,)(op1, op2); +} + +svfloat64_t test_svtrn2_f64(svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svtrn2_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv2f64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svtrn2,_f64,,)(op1, op2); +} + +svbool_t test_svtrn2_b8(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn2_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv16i1( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return svtrn2_b8(op1, op2); +} + +svbool_t test_svtrn2_b16(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn2_b16 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv8i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svtrn2_b16(op1, op2); +} + +svbool_t test_svtrn2_b32(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn2_b32 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv4i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svtrn2_b32(op1, op2); +} + +svbool_t test_svtrn2_b64(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svtrn2_b64 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.trn2.nxv2i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svtrn2_b64(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_unpkhi.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_unpkhi.c new file mode 100644 index 00000000000000..5bab85658561fa --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_unpkhi.c @@ -0,0 +1,68 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint16_t test_svunpkhi_s16(svint8_t op) +{ + // CHECK-LABEL: test_svunpkhi_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sunpkhi.nxv8i16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpkhi,_s16,,)(op); +} + +svint32_t test_svunpkhi_s32(svint16_t op) +{ + // CHECK-LABEL: test_svunpkhi_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sunpkhi.nxv4i32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpkhi,_s32,,)(op); +} + +svint64_t test_svunpkhi_s64(svint32_t op) +{ + // CHECK-LABEL: test_svunpkhi_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sunpkhi.nxv2i64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpkhi,_s64,,)(op); +} + +svuint16_t test_svunpkhi_u16(svuint8_t op) +{ + // CHECK-LABEL: test_svunpkhi_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uunpkhi.nxv8i16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpkhi,_u16,,)(op); +} + +svuint32_t test_svunpkhi_u32(svuint16_t op) +{ + // CHECK-LABEL: test_svunpkhi_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uunpkhi.nxv4i32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpkhi,_u32,,)(op); +} + +svuint64_t test_svunpkhi_u64(svuint32_t op) +{ + // CHECK-LABEL: test_svunpkhi_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uunpkhi.nxv2i64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpkhi,_u64,,)(op); +} + +svbool_t test_svunpkhi_b(svbool_t op) +{ + // CHECK-LABEL: test_svunpkhi_b + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.punpkhi.nxv16i1( %op) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return SVE_ACLE_FUNC(svunpkhi,_b,,)(op); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_unpklo.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_unpklo.c new file mode 100644 index 00000000000000..343f61c5257f65 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_unpklo.c @@ -0,0 +1,68 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint16_t test_svunpklo_s16(svint8_t op) +{ + // CHECK-LABEL: test_svunpklo_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sunpklo.nxv8i16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpklo,_s16,,)(op); +} + +svint32_t test_svunpklo_s32(svint16_t op) +{ + // CHECK-LABEL: test_svunpklo_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sunpklo.nxv4i32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpklo,_s32,,)(op); +} + +svint64_t test_svunpklo_s64(svint32_t op) +{ + // CHECK-LABEL: test_svunpklo_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.sunpklo.nxv2i64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpklo,_s64,,)(op); +} + +svuint16_t test_svunpklo_u16(svuint8_t op) +{ + // CHECK-LABEL: test_svunpklo_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uunpklo.nxv8i16( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpklo,_u16,,)(op); +} + +svuint32_t test_svunpklo_u32(svuint16_t op) +{ + // CHECK-LABEL: test_svunpklo_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uunpklo.nxv4i32( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpklo,_u32,,)(op); +} + +svuint64_t test_svunpklo_u64(svuint32_t op) +{ + // CHECK-LABEL: test_svunpklo_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uunpklo.nxv2i64( %op) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svunpklo,_u64,,)(op); +} + +svbool_t test_svunpklo_b(svbool_t op) +{ + // CHECK-LABEL: test_svunpklo_b + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.punpklo.nxv16i1( %op) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return SVE_ACLE_FUNC(svunpklo,_b,,)(op); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1.c new file mode 100644 index 00000000000000..d95cb5ec9bf894 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp1.c @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svuzp1_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svuzp1_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_s8,,)(op1, op2); +} + +svint16_t test_svuzp1_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svuzp1_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_s16,,)(op1, op2); +} + +svint32_t test_svuzp1_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svuzp1_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_s32,,)(op1, op2); +} + +svint64_t test_svuzp1_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svuzp1_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_s64,,)(op1, op2); +} + +svuint8_t test_svuzp1_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svuzp1_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_u8,,)(op1, op2); +} + +svuint16_t test_svuzp1_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svuzp1_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_u16,,)(op1, op2); +} + +svuint32_t test_svuzp1_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svuzp1_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_u32,,)(op1, op2); +} + +svuint64_t test_svuzp1_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svuzp1_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_u64,,)(op1, op2); +} + +svfloat16_t test_svuzp1_f16(svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svuzp1_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv8f16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_f16,,)(op1, op2); +} + +svfloat32_t test_svuzp1_f32(svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svuzp1_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv4f32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_f32,,)(op1, op2); +} + +svfloat64_t test_svuzp1_f64(svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svuzp1_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv2f64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp1,_f64,,)(op1, op2); +} + +svbool_t test_svuzp1_b8(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp1_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv16i1( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return svuzp1_b8(op1, op2); +} + +svbool_t test_svuzp1_b16(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp1_b16 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv8i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svuzp1_b16(op1, op2); +} + +svbool_t test_svuzp1_b32(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp1_b32 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv4i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svuzp1_b32(op1, op2); +} + +svbool_t test_svuzp1_b64(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp1_b64 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp1.nxv2i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svuzp1_b64(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2.c new file mode 100644 index 00000000000000..b359aa68e8c06a --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_uzp2.c @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svuzp2_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svuzp2_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_s8,,)(op1, op2); +} + +svint16_t test_svuzp2_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svuzp2_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_s16,,)(op1, op2); +} + +svint32_t test_svuzp2_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svuzp2_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_s32,,)(op1, op2); +} + +svint64_t test_svuzp2_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svuzp2_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_s64,,)(op1, op2); +} + +svuint8_t test_svuzp2_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svuzp2_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_u8,,)(op1, op2); +} + +svuint16_t test_svuzp2_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svuzp2_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_u16,,)(op1, op2); +} + +svuint32_t test_svuzp2_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svuzp2_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_u32,,)(op1, op2); +} + +svuint64_t test_svuzp2_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svuzp2_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_u64,,)(op1, op2); +} + +svfloat16_t test_svuzp2_f16(svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svuzp2_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv8f16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_f16,,)(op1, op2); +} + +svfloat32_t test_svuzp2_f32(svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svuzp2_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv4f32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_f32,,)(op1, op2); +} + +svfloat64_t test_svuzp2_f64(svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svuzp2_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv2f64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svuzp2,_f64,,)(op1, op2); +} + +svbool_t test_svuzp2_b8(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp2_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv16i1( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return svuzp2_b8(op1, op2); +} + +svbool_t test_svuzp2_b16(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp2_b16 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv8i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svuzp2_b16(op1, op2); +} + +svbool_t test_svuzp2_b32(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp2_b32 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv4i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svuzp2_b32(op1, op2); +} + +svbool_t test_svuzp2_b64(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svuzp2_b64 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.uzp2.nxv2i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svuzp2_b64(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1.c new file mode 100644 index 00000000000000..3444ac9aa1abb7 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip1.c @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svzip1_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svzip1_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_s8,,)(op1, op2); +} + +svint16_t test_svzip1_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svzip1_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_s16,,)(op1, op2); +} + +svint32_t test_svzip1_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svzip1_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_s32,,)(op1, op2); +} + +svint64_t test_svzip1_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svzip1_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_s64,,)(op1, op2); +} + +svuint8_t test_svzip1_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svzip1_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_u8,,)(op1, op2); +} + +svuint16_t test_svzip1_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svzip1_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_u16,,)(op1, op2); +} + +svuint32_t test_svzip1_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svzip1_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_u32,,)(op1, op2); +} + +svuint64_t test_svzip1_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svzip1_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_u64,,)(op1, op2); +} + +svfloat16_t test_svzip1_f16(svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svzip1_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv8f16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_f16,,)(op1, op2); +} + +svfloat32_t test_svzip1_f32(svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svzip1_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv4f32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_f32,,)(op1, op2); +} + +svfloat64_t test_svzip1_f64(svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svzip1_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv2f64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip1,_f64,,)(op1, op2); +} + +svbool_t test_svzip1_b8(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip1_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv16i1( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return svzip1_b8(op1, op2); +} + +svbool_t test_svzip1_b16(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip1_b16 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv8i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svzip1_b16(op1, op2); +} + +svbool_t test_svzip1_b32(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip1_b32 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv4i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svzip1_b32(op1, op2); +} + +svbool_t test_svzip1_b64(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip1_b64 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip1.nxv2i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svzip1_b64(op1, op2); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2.c new file mode 100644 index 00000000000000..bba3aca2502a33 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_zip2.c @@ -0,0 +1,140 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +svint8_t test_svzip2_s8(svint8_t op1, svint8_t op2) +{ + // CHECK-LABEL: test_svzip2_s8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_s8,,)(op1, op2); +} + +svint16_t test_svzip2_s16(svint16_t op1, svint16_t op2) +{ + // CHECK-LABEL: test_svzip2_s16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_s16,,)(op1, op2); +} + +svint32_t test_svzip2_s32(svint32_t op1, svint32_t op2) +{ + // CHECK-LABEL: test_svzip2_s32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_s32,,)(op1, op2); +} + +svint64_t test_svzip2_s64(svint64_t op1, svint64_t op2) +{ + // CHECK-LABEL: test_svzip2_s64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_s64,,)(op1, op2); +} + +svuint8_t test_svzip2_u8(svuint8_t op1, svuint8_t op2) +{ + // CHECK-LABEL: test_svzip2_u8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv16i8( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_u8,,)(op1, op2); +} + +svuint16_t test_svzip2_u16(svuint16_t op1, svuint16_t op2) +{ + // CHECK-LABEL: test_svzip2_u16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv8i16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_u16,,)(op1, op2); +} + +svuint32_t test_svzip2_u32(svuint32_t op1, svuint32_t op2) +{ + // CHECK-LABEL: test_svzip2_u32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv4i32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_u32,,)(op1, op2); +} + +svuint64_t test_svzip2_u64(svuint64_t op1, svuint64_t op2) +{ + // CHECK-LABEL: test_svzip2_u64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv2i64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_u64,,)(op1, op2); +} + +svfloat16_t test_svzip2_f16(svfloat16_t op1, svfloat16_t op2) +{ + // CHECK-LABEL: test_svzip2_f16 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv8f16( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_f16,,)(op1, op2); +} + +svfloat32_t test_svzip2_f32(svfloat32_t op1, svfloat32_t op2) +{ + // CHECK-LABEL: test_svzip2_f32 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv4f32( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_f32,,)(op1, op2); +} + +svfloat64_t test_svzip2_f64(svfloat64_t op1, svfloat64_t op2) +{ + // CHECK-LABEL: test_svzip2_f64 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv2f64( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return SVE_ACLE_FUNC(svzip2,_f64,,)(op1, op2); +} + +svbool_t test_svzip2_b8(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip2_b8 + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv16i1( %op1, %op2) + // CHECK: ret %[[INTRINSIC]] + return svzip2_b8(op1, op2); +} + +svbool_t test_svzip2_b16(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip2_b16 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv8i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svzip2_b16(op1, op2); +} + +svbool_t test_svzip2_b32(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip2_b32 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv4i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svzip2_b32(op1, op2); +} + +svbool_t test_svzip2_b64(svbool_t op1, svbool_t op2) +{ + // CHECK-LABEL: test_svzip2_b64 + // CHECK-DAG: %[[OP1:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op1) + // CHECK-DAG: %[[OP2:.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( %op2) + // CHECK: %[[INTRINSIC:.*]] = call @llvm.aarch64.sve.zip2.nxv2i1( %[[OP1]], %[[OP2]]) + // CHECK: %[[CAST:.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %[[INTRINSIC]]) + // CHECK: ret %[[CAST]] + return svzip2_b64(op1, op2); +}