| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svabalb_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_s16'}} | ||
| return SVE_ACLE_FUNC(svabalb,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svabalb_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_s32'}} | ||
| return SVE_ACLE_FUNC(svabalb,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svabalb_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_s64'}} | ||
| return SVE_ACLE_FUNC(svabalb,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svabalb_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_u16'}} | ||
| return SVE_ACLE_FUNC(svabalb,_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svabalb_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_u32'}} | ||
| return SVE_ACLE_FUNC(svabalb,_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint64_t test_svabalb_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_u64'}} | ||
| return SVE_ACLE_FUNC(svabalb,_u64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svabalb_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svabalb,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svabalb_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svabalb,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svabalb_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svabalb,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svabalb_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svabalb,_n_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svabalb_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svabalb,_n_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint64_t test_svabalb_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svabalb,_n_u64,,)(op1, op2, op3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svabalt_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_s16'}} | ||
| return SVE_ACLE_FUNC(svabalt,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svabalt_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_s32'}} | ||
| return SVE_ACLE_FUNC(svabalt,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svabalt_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_s64'}} | ||
| return SVE_ACLE_FUNC(svabalt,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svabalt_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_u16'}} | ||
| return SVE_ACLE_FUNC(svabalt,_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svabalt_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_u32'}} | ||
| return SVE_ACLE_FUNC(svabalt,_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint64_t test_svabalt_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_u64'}} | ||
| return SVE_ACLE_FUNC(svabalt,_u64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svabalt_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svabalt,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svabalt_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svabalt,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svabalt_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svabalt,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svabalt_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svabalt,_n_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svabalt_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svabalt,_n_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint64_t test_svabalt_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svabalt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabalt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svabalt,_n_u64,,)(op1, op2, op3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svabdlb_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_s16'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svabdlb_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_s32'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svabdlb_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_s64'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svabdlb_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_u16'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svabdlb_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_u32'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svabdlb_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_u64'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svabdlb_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svabdlb_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svabdlb_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svabdlb_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svabdlb_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svabdlb_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svabdlb,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svabdlt_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_s16'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svabdlt_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_s32'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svabdlt_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_s64'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svabdlt_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_u16'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svabdlt_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_u32'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svabdlt_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_u64'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svabdlt_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svabdlt_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svabdlt_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svabdlt_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svabdlt_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svabdlt_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svabdlt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svabdlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svabdlt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svabdlt,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint8_t test_svaddhnb_s16(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_s16'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddhnb_s32(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_s32'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddhnb_s64(svint64_t op1, svint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_s64'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint8_t test_svaddhnb_u16(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_u16'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddhnb_u32(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_u32'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddhnb_u64(svuint64_t op1, svuint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_u64'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint8_t test_svaddhnb_n_s16(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddhnb_n_s32(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddhnb_n_s64(svint64_t op1, int64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint8_t test_svaddhnb_n_u16(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddhnb_n_u32(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddhnb_n_u64(svuint64_t op1, uint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svaddhnb,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint8_t test_svaddhnt_s16(svint8_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_s16'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svaddhnt_s32(svint16_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_s32'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svaddhnt_s64(svint32_t op1, svint64_t op2, svint64_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_s64'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint8_t test_svaddhnt_u16(svuint8_t op1, svuint16_t op2, svuint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_u16'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svaddhnt_u32(svuint16_t op1, svuint32_t op2, svuint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_u32'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svaddhnt_u64(svuint32_t op1, svuint64_t op2, svuint64_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_u64'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_u64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint8_t test_svaddhnt_n_s16(svint8_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svaddhnt_n_s32(svint16_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svaddhnt_n_s64(svint32_t op1, svint64_t op2, int64_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint8_t test_svaddhnt_n_u16(svuint8_t op1, svuint16_t op2, uint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_n_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svaddhnt_n_u32(svuint16_t op1, svuint32_t op2, uint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_n_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svaddhnt_n_u64(svuint32_t op1, svuint64_t op2, uint64_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svaddhnt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddhnt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddhnt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svaddhnt,_n_u64,,)(op1, op2, op3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svaddlb_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_s16'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddlb_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_s32'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddlb_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_s64'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddlb_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_u16'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddlb_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_u32'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddlb_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_u64'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddlb_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddlb_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddlb_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddlb_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddlb_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddlb_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svaddlb,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,76 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svaddlbt_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlbt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlbt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlbt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlbt_s16'}} | ||
| return SVE_ACLE_FUNC(svaddlbt,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddlbt_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlbt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlbt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlbt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlbt_s32'}} | ||
| return SVE_ACLE_FUNC(svaddlbt,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddlbt_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlbt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlbt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlbt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlbt_s64'}} | ||
| return SVE_ACLE_FUNC(svaddlbt,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddlbt_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlbt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlbt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlbt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlbt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddlbt,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddlbt_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlbt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlbt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlbt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlbt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddlbt,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddlbt_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlbt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlbt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlbt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlbt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddlbt,_n_s64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svaddlt_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_s16'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddlt_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_s32'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddlt_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_s64'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddlt_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_u16'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddlt_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_u32'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddlt_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_u64'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddlt_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddlt_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddlt_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddlt_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddlt_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddlt_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddlt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddlt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddlt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svaddlt,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svaddwb_s16(svint16_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_s16'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddwb_s32(svint32_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_s32'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddwb_s64(svint64_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_s64'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddwb_u16(svuint16_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_u16'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddwb_u32(svuint32_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_u32'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddwb_u64(svuint64_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_u64'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddwb_n_s16(svint16_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddwb_n_s32(svint32_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddwb_n_s64(svint64_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddwb_n_u16(svuint16_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddwb_n_u32(svuint32_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddwb_n_u64(svuint64_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svaddwb,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svaddwt_s16(svint16_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_s16'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddwt_s32(svint32_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_s32'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddwt_s64(svint64_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_s64'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddwt_u16(svuint16_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_u16'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddwt_u32(svuint32_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_u32'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddwt_u64(svuint64_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_u64'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svaddwt_n_s16(svint16_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svaddwt_n_s32(svint32_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svaddwt_n_s64(svint64_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svaddwt_n_u16(svuint16_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svaddwt_n_u32(svuint32_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svaddwt_n_u64(svuint64_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svaddwt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svaddwt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svaddwt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svaddwt,_n_u64,,)(op1, op2); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,173 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint8_t test_svcadd_s8(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s8 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 90) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s8'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s8,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svint8_t test_svcadd_s8_1(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s8_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 270) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s8'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s8,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svint16_t test_svcadd_s16(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 90) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s16'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s16,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svint16_t test_svcadd_s16_1(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s16_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 270) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s16'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s16,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svint32_t test_svcadd_s32(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 90) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s32'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s32,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svint32_t test_svcadd_s32_1(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 270) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s32'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s32,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svint64_t test_svcadd_s64(svint64_t op1, svint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 90) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s64'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s64,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svint64_t test_svcadd_s64_1(svint64_t op1, svint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 270) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_s64'}} | ||
| return SVE_ACLE_FUNC(svcadd,_s64,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svuint8_t test_svcadd_u8(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u8 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 90) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u8'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u8,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svuint8_t test_svcadd_u8_1(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u8_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.cadd.x.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 270) | ||
| // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u8'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u8,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svuint16_t test_svcadd_u16(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 90) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u16'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u16,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svuint16_t test_svcadd_u16_1(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u16_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.cadd.x.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 270) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u16'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u16,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svuint32_t test_svcadd_u32(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 90) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u32'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u32,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svuint32_t test_svcadd_u32_1(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cadd.x.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 270) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u32'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u32,,)(op1, op2, 270); | ||
| } | ||
|
|
||
| svuint64_t test_svcadd_u64(svuint64_t op1, svuint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 90) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u64'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u64,,)(op1, op2, 90); | ||
| } | ||
|
|
||
| svuint64_t test_svcadd_u64_1(svuint64_t op1, svuint64_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svcadd_u64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cadd.x.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 270) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcadd'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcadd_u64'}} | ||
| return SVE_ACLE_FUNC(svcadd,_u64,,)(op1, op2, 270); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,123 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint32_t test_svcdot_s32(svint32_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cdot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s32'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint32_t test_svcdot_s32_1(svint32_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cdot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 90) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s32'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s32,,)(op1, op2, op3, 90); | ||
| } | ||
|
|
||
| svint32_t test_svcdot_s32_2(svint32_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s32_2 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cdot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 180) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s32'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s32,,)(op1, op2, op3, 180); | ||
| } | ||
|
|
||
| svint32_t test_svcdot_s32_3(svint32_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s32_3 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cdot.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 270) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s32'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s32,,)(op1, op2, op3, 270); | ||
| } | ||
|
|
||
| svint64_t test_svcdot_s64(svint64_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cdot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s64'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint64_t test_svcdot_s64_1(svint64_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cdot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 90) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s64'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s64,,)(op1, op2, op3, 90); | ||
| } | ||
|
|
||
| svint64_t test_svcdot_s64_2(svint64_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s64_2 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cdot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 180) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s64'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s64,,)(op1, op2, op3, 180); | ||
| } | ||
|
|
||
| svint64_t test_svcdot_s64_3(svint64_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_s64_3 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cdot.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 270) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_s64'}} | ||
| return SVE_ACLE_FUNC(svcdot,_s64,,)(op1, op2, op3, 270); | ||
| } | ||
|
|
||
| svint32_t test_svcdot_lane_s32(svint32_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cdot.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 0, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svcdot_lane,_s32,,)(op1, op2, op3, 0, 0); | ||
| } | ||
|
|
||
| svint32_t test_svcdot_lane_s32_1(svint32_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.cdot.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3, i32 2, i32 90) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svcdot_lane,_s32,,)(op1, op2, op3, 2, 90); | ||
| } | ||
|
|
||
| svint64_t test_svcdot_lane_s64(svint64_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svcdot_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.cdot.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0, i32 180) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svcdot_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svcdot_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svcdot_lane,_s64,,)(op1, op2, op3, 0, 180); | ||
| } |