diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td index d2b7b78b9970f..716c2cd68ffcc 100644 --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -984,6 +984,11 @@ let SMETargetGuard = "sme2p2" in { def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNone, "aarch64_sve_compact", [VerifyRuntimeMode]>; } +let SVETargetGuard = "sve2p2|sme2p2", SMETargetGuard = "sme2p2" in { +def SVCOMPACT_BH : SInst<"svcompact[_{d}]", "dPd", "cUcsUsmbh", MergeNone, "aarch64_sve_compact", [VerifyRuntimeMode]>; +def SVEXPAND : SInst<"svexpand[_{d}]", "dPd", "cUcsUsiUilUlmbhfd", MergeNone, "aarch64_sve_expand", [VerifyRuntimeMode]>; +} + // Note: svdup_lane is implemented using the intrinsic for TBL to represent a // splat of any possible lane. It is upto LLVM to pick a more efficient // instruction such as DUP (indexed) if the lane index fits the range of the @@ -1111,6 +1116,11 @@ def SVCNTD : SInst<"svcntd", "nv", "", MergeNone, "aarch64_sve_cntd", [IsAppendS def SVCNTP : SInst<"svcntp_{d}", "nPP", "PcPsPiPl", MergeNone, "aarch64_sve_cntp", [VerifyRuntimeMode]>; def SVLEN : SInst<"svlen[_{d}]", "nd", "csilUcUsUiUlhfdb", MergeNone, "", [VerifyRuntimeMode]>; +let SVETargetGuard = "sve2p2|sme2p2", SMETargetGuard = "sve2p2|sme2p2" in { + def SVFIRSTP : SInst<"svfirstp_{d}", "lPP", "PcPsPiPl", MergeNone, "aarch64_sve_firstp", [VerifyRuntimeMode], []>; + def SVLASTP : SInst<"svlastp_{d}", "lPP", "PcPsPiPl", MergeNone, "aarch64_sve_lastp", [VerifyRuntimeMode], []>; +} + //////////////////////////////////////////////////////////////////////////////// // Saturating scalar arithmetic @@ -2388,4 +2398,4 @@ let SVETargetGuard = "sve2,fp8fma", SMETargetGuard = "ssve-fp8fma" in { def SVFMLALLBT_LANE : SInst<"svmlallbt_lane[_f32_mf8]", "dd~~i>", "f", MergeNone, "aarch64_sve_fp8_fmlallbt_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheck0_7>]>; def SVFMLALLTB_LANE : SInst<"svmlalltb_lane[_f32_mf8]", "dd~~i>", "f", MergeNone, "aarch64_sve_fp8_fmlalltb_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheck0_7>]>; def SVFMLALLTT_LANE : SInst<"svmlalltt_lane[_f32_mf8]", "dd~~i>", "f", MergeNone, "aarch64_sve_fp8_fmlalltt_lane", [VerifyRuntimeMode], [ImmCheck<3, ImmCheck0_7>]>; -} +} \ No newline at end of file diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_compact.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_compact.c index 4c18969e78f0c..75ee18cb134d7 100644 --- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_compact.c +++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_compact.c @@ -14,6 +14,12 @@ #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 #endif +#ifdef __ARM_FEATURE_SME +#define STREAMING __arm_streaming +#else +#define STREAMING +#endif + // CHECK-LABEL: @test_svcompact_s32( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) diff --git a/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_compact.c b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_compact.c new file mode 100644 index 0000000000000..8bee2ed1121a6 --- /dev/null +++ b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_compact.c @@ -0,0 +1,142 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: aarch64-registered-target +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s + +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s + +#ifdef __ARM_FEATURE_SME +#include "arm_sme.h" +#else +#include "arm_sve.h" +#endif + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#ifdef __ARM_FEATURE_SME +#define STREAMING __arm_streaming +#else +#define STREAMING +#endif + +// CHECK-LABEL: @test_svcompact_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.compact.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +// CPP-CHECK-LABEL: @_Z17test_svcompact_s8u10__SVBool_tu10__SVInt8_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.compact.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP0]] +// +svint8_t test_svcompact_s8(svbool_t pg, svint8_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_s8,,)(pg, op); +} + +// CHECK-LABEL: @test_svcompact_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z18test_svcompact_s16u10__SVBool_tu11__SVInt16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svint16_t test_svcompact_s16(svbool_t pg, svint16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_s16,,)(pg, op); +} + +// CHECK-LABEL: @test_svcompact_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.compact.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +// CPP-CHECK-LABEL: @_Z17test_svcompact_u8u10__SVBool_tu11__SVUint8_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.compact.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP0]] +// +svuint8_t test_svcompact_u8(svbool_t pg, svuint8_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_u8,,)(pg, op); +} + +// CHECK-LABEL: @test_svcompact_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z18test_svcompact_u16u10__SVBool_tu12__SVUint16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svuint16_t test_svcompact_u16(svbool_t pg, svuint16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_u16,,)(pg, op); +} + +// CHECK-LABEL: @test_svcompact_mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.compact.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +// CPP-CHECK-LABEL: @_Z18test_svcompact_mf8u10__SVBool_tu13__SVMfloat8_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.compact.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP0]] +// +svmfloat8_t test_svcompact_mf8(svbool_t pg, svmfloat8_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_mf8,,)(pg, op); +} + +// CHECK-LABEL: @test_svcompact_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8f16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z18test_svcompact_f16u10__SVBool_tu13__SVFloat16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8f16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svfloat16_t test_svcompact_f16(svbool_t pg, svfloat16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_f16,,)(pg, op); +} + +// CHECK-LABEL: @test_svcompact_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8bf16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z19test_svcompact_bf16u10__SVBool_tu14__SVBfloat16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.compact.nxv8bf16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svbfloat16_t test_svcompact_bf16(svbool_t pg, svbfloat16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svcompact,_bf16,,)(pg, op); +} diff --git a/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_expand.c b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_expand.c new file mode 100644 index 0000000000000..ece0ce795df39 --- /dev/null +++ b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_expand.c @@ -0,0 +1,243 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: aarch64-registered-target +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s + +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s +#ifdef __ARM_FEATURE_SME +#include "arm_sme.h" +#else +#include "arm_sve.h" +#endif + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 +#endif + +#ifdef __ARM_FEATURE_SME +#define STREAMING __arm_streaming +#else +#define STREAMING +#endif + +// CHECK-LABEL: @test_svexpand_s8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.expand.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +// CPP-CHECK-LABEL: @_Z16test_svexpand_s8u10__SVBool_tu10__SVInt8_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.expand.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP0]] +// +svint8_t test_svexpand_s8(svbool_t pg, svint8_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_s8,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_s16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_s16u10__SVBool_tu11__SVInt16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svint16_t test_svexpand_s16(svbool_t pg, svint16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_s16,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_u8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.expand.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +// CPP-CHECK-LABEL: @_Z16test_svexpand_u8u10__SVBool_tu11__SVUint8_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.expand.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP0]] +// +svuint8_t test_svexpand_u8(svbool_t pg, svuint8_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_u8,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_u16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_u16u10__SVBool_tu12__SVUint16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8i16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svuint16_t test_svexpand_u16(svbool_t pg, svuint16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_u16,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.expand.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_mf8u10__SVBool_tu13__SVMfloat8_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.expand.nxv16i8( [[PG:%.*]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP0]] +// +svmfloat8_t test_svexpand_mf8(svbool_t pg, svmfloat8_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_mf8,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8f16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_f16u10__SVBool_tu13__SVFloat16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8f16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svfloat16_t test_svexpand_f16(svbool_t pg, svfloat16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_f16,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_bf16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8bf16( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z18test_svexpand_bf16u10__SVBool_tu14__SVBfloat16_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv8bf16( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svbfloat16_t test_svexpand_bf16(svbool_t pg, svbfloat16_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_bf16,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_s32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv4i32( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_s32u10__SVBool_tu11__SVInt32_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv4i32( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svint32_t test_svexpand_s32(svbool_t pg, svint32_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_s32,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_s64( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv2i64( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_s64u10__SVBool_tu11__SVInt64_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv2i64( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svint64_t test_svexpand_s64(svbool_t pg, svint64_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_s64,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_u32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv4i32( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_u32u10__SVBool_tu12__SVUint32_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv4i32( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svuint32_t test_svexpand_u32(svbool_t pg, svuint32_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_u32,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_u64( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv2i64( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_u64u10__SVBool_tu12__SVUint64_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv2i64( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svuint64_t test_svexpand_u64(svbool_t pg, svuint64_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_u64,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv4f32( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_f32u10__SVBool_tu13__SVFloat32_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv4f32( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svfloat32_t test_svexpand_f32(svbool_t pg, svfloat32_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_f32,,)(pg, op); +} + +// CHECK-LABEL: @test_svexpand_f64( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv2f64( [[TMP0]], [[OP:%.*]]) +// CHECK-NEXT: ret [[TMP1]] +// +// CPP-CHECK-LABEL: @_Z17test_svexpand_f64u10__SVBool_tu13__SVFloat64_t( +// CPP-CHECK-NEXT: entry: +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.expand.nxv2f64( [[TMP0]], [[OP:%.*]]) +// CPP-CHECK-NEXT: ret [[TMP1]] +// +svfloat64_t test_svexpand_f64(svbool_t pg, svfloat64_t op) STREAMING +{ + return SVE_ACLE_FUNC(svexpand,_f64,,)(pg, op); +} diff --git a/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_firstp.c b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_firstp.c new file mode 100644 index 0000000000000..1656f10a83a90 --- /dev/null +++ b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_firstp.c @@ -0,0 +1,101 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 +// REQUIRES: aarch64-registered-target +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s + +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s + +#ifdef __ARM_FEATURE_SME +#include "arm_sme.h" +#else +#include "arm_sve.h" +#endif + +#ifdef __ARM_FEATURE_SME +#define STREAMING __arm_streaming +#else +#define STREAMING +#endif + +// CHECK-LABEL: define dso_local i64 @test_svfirstp_b8( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv16i1( [[PG]], [[OP]]) +// CHECK-NEXT: ret i64 [[TMP0]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z16test_svfirstp_b8u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0:[0-9]+]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv16i1( [[PG]], [[OP]]) +// CPP-CHECK-NEXT: ret i64 [[TMP0]] +// +int64_t test_svfirstp_b8(svbool_t pg, svbool_t op) STREAMING +{ + return svfirstp_b8(pg, op); +} + +// CHECK-LABEL: define dso_local i64 @test_svfirstp_b16( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[OP]]) +// CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv8i1( [[TMP0]], [[TMP1]]) +// CHECK-NEXT: ret i64 [[TMP2]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z17test_svfirstp_b16u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[OP]]) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv8i1( [[TMP0]], [[TMP1]]) +// CPP-CHECK-NEXT: ret i64 [[TMP2]] +// +int64_t test_svfirstp_b16(svbool_t pg, svbool_t op) STREAMING +{ + return svfirstp_b16(pg, op); +} + +// CHECK-LABEL: define dso_local i64 @test_svfirstp_b32( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[OP]]) +// CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv4i1( [[TMP0]], [[TMP1]]) +// CHECK-NEXT: ret i64 [[TMP2]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z17test_svfirstp_b32u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[OP]]) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv4i1( [[TMP0]], [[TMP1]]) +// CPP-CHECK-NEXT: ret i64 [[TMP2]] +// +int64_t test_svfirstp_b32(svbool_t pg, svbool_t op) STREAMING +{ + return svfirstp_b32(pg, op); +} + +// CHECK-LABEL: define dso_local i64 @test_svfirstp_b64( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[OP]]) +// CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv2i1( [[TMP0]], [[TMP1]]) +// CHECK-NEXT: ret i64 [[TMP2]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z17test_svfirstp_b64u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[OP]]) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.firstp.nxv2i1( [[TMP0]], [[TMP1]]) +// CPP-CHECK-NEXT: ret i64 [[TMP2]] +// +int64_t test_svfirstp_b64(svbool_t pg, svbool_t op) STREAMING +{ + return svfirstp_b64(pg, op); +} diff --git a/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_lastp.c b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_lastp.c new file mode 100644 index 0000000000000..bfe82af07f20c --- /dev/null +++ b/clang/test/CodeGen/AArch64/sve2p2-intriniscs/acle_sve2p2_lastp.c @@ -0,0 +1,101 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 +// REQUIRES: aarch64-registered-target +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sme2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s + +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +sve2p2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s + +#ifdef __ARM_FEATURE_SME +#include "arm_sme.h" +#else +#include "arm_sve.h" +#endif + +#ifdef __ARM_FEATURE_SME +#define STREAMING __arm_streaming +#else +#define STREAMING +#endif + +// CHECK-LABEL: define dso_local i64 @test_svlastp_b8( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv16i1( [[PG]], [[OP]]) +// CHECK-NEXT: ret i64 [[TMP0]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z15test_svlastp_b8u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0:[0-9]+]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv16i1( [[PG]], [[OP]]) +// CPP-CHECK-NEXT: ret i64 [[TMP0]] +// +int64_t test_svlastp_b8(svbool_t pg, svbool_t op) STREAMING +{ + return svlastp_b8(pg, op); +} + +// CHECK-LABEL: define dso_local i64 @test_svlastp_b16( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[OP]]) +// CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv8i1( [[TMP0]], [[TMP1]]) +// CHECK-NEXT: ret i64 [[TMP2]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z16test_svlastp_b16u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[OP]]) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv8i1( [[TMP0]], [[TMP1]]) +// CPP-CHECK-NEXT: ret i64 [[TMP2]] +// +int64_t test_svlastp_b16(svbool_t pg, svbool_t op) STREAMING +{ + return svlastp_b16(pg, op); +} + +// CHECK-LABEL: define dso_local i64 @test_svlastp_b32( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[OP]]) +// CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv4i1( [[TMP0]], [[TMP1]]) +// CHECK-NEXT: ret i64 [[TMP2]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z16test_svlastp_b32u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[OP]]) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv4i1( [[TMP0]], [[TMP1]]) +// CPP-CHECK-NEXT: ret i64 [[TMP2]] +// +int64_t test_svlastp_b32(svbool_t pg, svbool_t op) STREAMING +{ + return svlastp_b32(pg, op); +} + +// CHECK-LABEL: define dso_local i64 @test_svlastp_b64( +// CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG]]) +// CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[OP]]) +// CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv2i1( [[TMP0]], [[TMP1]]) +// CHECK-NEXT: ret i64 [[TMP2]] +// +// CPP-CHECK-LABEL: define dso_local noundef i64 @_Z16test_svlastp_b64u10__SVBool_tS_( +// CPP-CHECK-SAME: [[PG:%.*]], [[OP:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG]]) +// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[OP]]) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.aarch64.sve.lastp.nxv2i1( [[TMP0]], [[TMP1]]) +// CPP-CHECK-NEXT: ret i64 [[TMP2]] +// +int64_t test_svlastp_b64(svbool_t pg, svbool_t op) STREAMING +{ + return svlastp_b64(pg, op); +} diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index b0269eec3347a..2a06867cb31a7 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1930,6 +1930,8 @@ def int_aarch64_sve_cntw : AdvSIMD_SVE_CNTB_Intrinsic; def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic; def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic; +def int_aarch64_sve_firstp : AdvSIMD_SVE_CNTP_Intrinsic; +def int_aarch64_sve_lastp : AdvSIMD_SVE_CNTP_Intrinsic; // // FFR manipulation @@ -2026,6 +2028,7 @@ def int_aarch64_sve_clasta_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic; def int_aarch64_sve_clastb : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_clastb_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic; def int_aarch64_sve_compact : AdvSIMD_Pred1VectorArg_Intrinsic; +def int_aarch64_sve_expand : AdvSIMD_Pred1VectorArg_Intrinsic; def int_aarch64_sve_dupq_lane : AdvSIMD_SVE_DUPQ_Intrinsic; def int_aarch64_sve_dup_laneq : SVE2_1VectorArgIndexed_Intrinsic; def int_aarch64_sve_ext : AdvSIMD_2VectorArgIndexed_Intrinsic; diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 98a128e582866..2d9ab2cb033b1 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -4604,8 +4604,8 @@ let Predicates = [HasSVE2p2_or_SME2p2] in { defm UXTW_ZPzZ : sve_int_un_pred_arit_d_z<0b101, "uxtw", AArch64uxt_mt>; // SVE predicate count - defm FIRSTP_XPP : sve_int_pcount_pred_tmp<0b001, "firstp">; - defm LASTP_XPP : sve_int_pcount_pred_tmp<0b010, "lastp">; + defm FIRSTP_XPP : sve_int_pcount_pred<0b001, "firstp", int_aarch64_sve_firstp>; + defm LASTP_XPP : sve_int_pcount_pred<0b010, "lastp", int_aarch64_sve_lastp>; // SVE reverse within elements, zeroing predicate defm RBIT_ZPzZ : sve_int_perm_rev_rbit_z<"rbit", AArch64rbit_mt>; @@ -4620,7 +4620,7 @@ let Predicates = [HasSVE2p2_or_SME2p2] in { //===----------------------------------------------------------------------===// let Predicates = [HasNonStreamingSVE2p2_or_SME2p2] in { // SVE2 EXPAND - defm EXPAND_ZPZ : sve2_int_perm_expand<"expand">; + defm EXPAND_ZPZ : sve2_int_perm_expand<"expand", int_aarch64_sve_expand>; // SVE COMPACT - byte and halfword defm COMPACT_ZPZ : sve_int_perm_compact_bh<"compact", int_aarch64_sve_compact>; } diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index 3cdd505f12116..fe5e6a9e3583a 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -1173,13 +1173,6 @@ multiclass sve_int_pcount_pred opc, string asm, def : SVE_2_Op_Pat(NAME # _S)>; def : SVE_2_Op_Pat(NAME # _D)>; } - -multiclass sve_int_pcount_pred_tmp opc, string asm> { - def _B : sve_int_pcount_pred<0b00, opc, asm, PPR8>; - def _H : sve_int_pcount_pred<0b01, opc, asm, PPR16>; - def _S : sve_int_pcount_pred<0b10, opc, asm, PPR32>; - def _D : sve_int_pcount_pred<0b11, opc, asm, PPR64>; -} //===----------------------------------------------------------------------===// // SVE Element Count Group //===----------------------------------------------------------------------===// @@ -7678,11 +7671,20 @@ class sve2_int_perm_expand sz, string asm, let hasSideEffects = 0; } -multiclass sve2_int_perm_expand { +multiclass sve2_int_perm_expand { def _B : sve2_int_perm_expand<0b00, asm, ZPR8>; def _H : sve2_int_perm_expand<0b01, asm, ZPR16>; def _S : sve2_int_perm_expand<0b10, asm, ZPR32>; def _D : sve2_int_perm_expand<0b11, asm, ZPR64>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; + def : SVE_2_Op_Pat(NAME # _D)>; } class sve_int_perm_rev sz8_64, bits<2> opc, string asm, diff --git a/llvm/test/CodeGen/AArch64/sve2p2-intrinsics.ll b/llvm/test/CodeGen/AArch64/sve2p2-intrinsics.ll index 6017070b114a5..e4d25407add4a 100644 --- a/llvm/test/CodeGen/AArch64/sve2p2-intrinsics.ll +++ b/llvm/test/CodeGen/AArch64/sve2p2-intrinsics.ll @@ -87,6 +87,179 @@ define @compact_bf16( %pg, %out } +; +; EXPAND +; + +define @expand_i8( %pg, %a) { +; CHECK-LABEL: expand_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.b, p0, z0.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv16i8( %pg, + %a) + ret %out +} + +define @expand_i16( %pg, %a) { +; CHECK-LABEL: expand_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.h, p0, z0.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv8i16( %pg, + %a) + ret %out +} + +define @expand_i32( %pg, %a) { +; CHECK-LABEL: expand_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.s, p0, z0.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv4i32( %pg, + %a) + ret %out +} + +define @expand_i64( %pg, %a) { +; CHECK-LABEL: expand_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.d, p0, z0.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv2i64( %pg, + %a) + ret %out +} + +define @expand_f16( %pg, %a) { +; CHECK-LABEL: expand_f16: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.h, p0, z0.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv8f16( %pg, + %a) + ret %out +} + +define @expand_f32( %pg, %a) { +; CHECK-LABEL: expand_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.s, p0, z0.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv4f32( %pg, + %a) + ret %out +} + +define @expand_f64( %pg, %a) { +; CHECK-LABEL: expand_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.d, p0, z0.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv2f64( %pg, + %a) + ret %out +} + +define @expand_bf16( %pg, %a) { +; CHECK-LABEL: expand_bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: expand z0.h, p0, z0.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.expand.nxv8bf16( %pg, + %a) + ret %out +} + +; +; FIRSTP +; + +define i64 @firstp_b8( %pg, %a) { +; CHECK-LABEL: firstp_b8: +; CHECK: // %bb.0: +; CHECK-NEXT: firstp x0, p0, p1.b +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.firstp.nxv16i1( %pg, + %a) + ret i64 %out +} + +define i64 @firstp_b16( %pg, %a) { +; CHECK-LABEL: firstp_b16: +; CHECK: // %bb.0: +; CHECK-NEXT: firstp x0, p0, p1.h +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.firstp.nxv8i1( %pg, + %a) + ret i64 %out +} + +define i64 @firstp_b32( %pg, %a) { +; CHECK-LABEL: firstp_b32: +; CHECK: // %bb.0: +; CHECK-NEXT: firstp x0, p0, p1.s +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.firstp.nxv4i1( %pg, + %a) + ret i64 %out +} + +define i64 @firstp_b64( %pg, %a) { +; CHECK-LABEL: firstp_b64: +; CHECK: // %bb.0: +; CHECK-NEXT: firstp x0, p0, p1.d +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.firstp.nxv2i1( %pg, + %a) + ret i64 %out +} + +; +; LASTP +; + +define i64 @lastp_b8( %pg, %a) { +; CHECK-LABEL: lastp_b8: +; CHECK: // %bb.0: +; CHECK-NEXT: lastp x0, p0, p1.b +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.lastp.nxv16i1( %pg, + %a) + ret i64 %out +} + +define i64 @lastp_b16( %pg, %a) { +; CHECK-LABEL: lastp_b16: +; CHECK: // %bb.0: +; CHECK-NEXT: lastp x0, p0, p1.h +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.lastp.nxv8i1( %pg, + %a) + ret i64 %out +} + +define i64 @lastp_b32( %pg, %a) { +; CHECK-LABEL: lastp_b32: +; CHECK: // %bb.0: +; CHECK-NEXT: lastp x0, p0, p1.s +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.lastp.nxv4i1( %pg, + %a) + ret i64 %out +} + +define i64 @lastp_b64( %pg, %a) { +; CHECK-LABEL: lastp_b64: +; CHECK: // %bb.0: +; CHECK-NEXT: lastp x0, p0, p1.d +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.lastp.nxv2i1( %pg, + %a) + ret i64 %out +} + + declare @llvm.aarch64.sve.compact.nxv16i8(, ) declare @llvm.aarch64.sve.compact.nxv8i16(, ) declare @llvm.aarch64.sve.compact.nxv4i32(, )