| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,219 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svmlslt_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_s16'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svmlslt_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_s32'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svmlslt_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_s64'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svmlslt_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_u16'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svmlslt_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_u32'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint64_t test_svmlslt_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_u64'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_u64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svmlslt_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svmlslt_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svmlslt_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint16_t test_svmlslt_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_n_u16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint32_t test_svmlslt_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_n_u32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svuint64_t test_svmlslt_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svmlslt,_n_u64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svmlslt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_s32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint32_t test_svmlslt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_s32,,)(op1, op2, op3, 7); | ||
| } | ||
|
|
||
| svint64_t test_svmlslt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_s64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint64_t test_svmlslt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_s64,,)(op1, op2, op3, 3); | ||
| } | ||
|
|
||
| svuint32_t test_svmlslt_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u32'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_u32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svuint32_t test_svmlslt_lane_u32_1(svuint32_t op1, svuint16_t op2, svuint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_u32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u32'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_u32,,)(op1, op2, op3, 7); | ||
| } | ||
|
|
||
| svuint64_t test_svmlslt_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u64'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_u64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svuint64_t test_svmlslt_lane_u64_1(svuint64_t op1, svuint32_t op2, svuint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svmlslt_lane_u64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmlslt_lane_u64'}} | ||
| return SVE_ACLE_FUNC(svmlslt_lane,_u64,,)(op1, op2, op3, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,219 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svmullb_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_s16'}} | ||
| return SVE_ACLE_FUNC(svmullb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svmullb_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_s32'}} | ||
| return SVE_ACLE_FUNC(svmullb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svmullb_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_s64'}} | ||
| return SVE_ACLE_FUNC(svmullb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svmullb_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_u16'}} | ||
| return SVE_ACLE_FUNC(svmullb,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svmullb_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_u32'}} | ||
| return SVE_ACLE_FUNC(svmullb,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svmullb_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_u64'}} | ||
| return SVE_ACLE_FUNC(svmullb,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svmullb_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svmullb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svmullb_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svmullb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svmullb_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svmullb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svmullb_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svmullb,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svmullb_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svmullb,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svmullb_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svmullb,_n_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svmullb_lane_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_s32,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint32_t test_svmullb_lane_s32_1(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_s32,,)(op1, op2, 7); | ||
| } | ||
|
|
||
| svint64_t test_svmullb_lane_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_s64,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint64_t test_svmullb_lane_s64_1(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_s64,,)(op1, op2, 3); | ||
| } | ||
|
|
||
| svuint32_t test_svmullb_lane_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_u32'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_u32,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svuint32_t test_svmullb_lane_u32_1(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_u32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_u32'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_u32,,)(op1, op2, 7); | ||
| } | ||
|
|
||
| svuint64_t test_svmullb_lane_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_u64'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_u64,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svuint64_t test_svmullb_lane_u64_1(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullb_lane_u64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullb_lane_u64'}} | ||
| return SVE_ACLE_FUNC(svmullb_lane,_u64,,)(op1, op2, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,219 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svmullt_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_s16'}} | ||
| return SVE_ACLE_FUNC(svmullt,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svmullt_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_s32'}} | ||
| return SVE_ACLE_FUNC(svmullt,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svmullt_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_s64'}} | ||
| return SVE_ACLE_FUNC(svmullt,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svmullt_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_u16'}} | ||
| return SVE_ACLE_FUNC(svmullt,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svmullt_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_u32'}} | ||
| return SVE_ACLE_FUNC(svmullt,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svmullt_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_u64'}} | ||
| return SVE_ACLE_FUNC(svmullt,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svmullt_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svmullt,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svmullt_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svmullt,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svmullt_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svmullt,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svmullt_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svmullt,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svmullt_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svmullt,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svmullt_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svmullt,_n_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svmullt_lane_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_s32,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint32_t test_svmullt_lane_s32_1(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_s32,,)(op1, op2, 7); | ||
| } | ||
|
|
||
| svint64_t test_svmullt_lane_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_s64,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint64_t test_svmullt_lane_s64_1(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_s64,,)(op1, op2, 3); | ||
| } | ||
|
|
||
| svuint32_t test_svmullt_lane_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_u32'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_u32,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svuint32_t test_svmullt_lane_u32_1(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_u32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_u32'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_u32,,)(op1, op2, 7); | ||
| } | ||
|
|
||
| svuint64_t test_svmullt_lane_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_u64'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_u64,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svuint64_t test_svmullt_lane_u64_1(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svmullt_lane_u64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svmullt_lane_u64'}} | ||
| return SVE_ACLE_FUNC(svmullt_lane,_u64,,)(op1, op2, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,116 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svqdmlalb_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalb_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalb_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svqdmlalb_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalb_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalb_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb_lane,_s32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalb_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb_lane,_s32,,)(op1, op2, op3, 7); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb_lane,_s64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalb_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalb_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalb_lane,_s64,,)(op1, op2, op3, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,116 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svqdmlalt_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalt_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalt_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svqdmlalt_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalt_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalt_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt_lane,_s32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlalt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt_lane,_s32,,)(op1, op2, op3, 7); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt_lane,_s64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlalt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlalt_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlalt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlalt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlalt_lane,_s64,,)(op1, op2, op3, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,116 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svqdmlslb_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslb_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslb_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svqdmlslb_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslb_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslb_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb_lane,_s32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslb_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb_lane,_s32,,)(op1, op2, op3, 7); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb_lane,_s64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslb_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslb_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslb_lane,_s64,,)(op1, op2, op3, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,116 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svqdmlslt_s16(svint16_t op1, svint8_t op2, svint8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt,_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslt_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt,_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslt_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt,_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint16_t test_svqdmlslt_n_s16(svint16_t op1, svint8_t op2, int8_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt,_n_s16,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslt_n_s32(svint32_t op1, svint16_t op2, int16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt,_n_s32,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslt_n_s64(svint64_t op1, svint32_t op2, int32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt,_n_s64,,)(op1, op2, op3); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt_lane,_s32,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint32_t test_svqdmlslt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt_lane,_s32,,)(op1, op2, op3, 7); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt_lane,_s64,,)(op1, op2, op3, 0); | ||
| } | ||
|
|
||
| svint64_t test_svqdmlslt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3) | ||
| { | ||
| // CHECK-LABEL: test_svqdmlslt_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmlslt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmlslt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmlslt_lane,_s64,,)(op1, op2, op3, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,116 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svqdmullb_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmullb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullb_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullb_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svqdmullb_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmullb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullb_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullb_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullb_lane_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullb_lane,_s32,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullb_lane_s32_1(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullb_lane,_s32,,)(op1, op2, 7); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullb_lane_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullb_lane,_s64,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullb_lane_s64_1(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullb_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullb_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullb_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullb_lane,_s64,,)(op1, op2, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,116 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svqdmullt_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmullt,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullt_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullt,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullt_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullt,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svqdmullt_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svqdmullt,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullt_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullt,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullt_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullt,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullt_lane_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_lane_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullt_lane,_s32,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint32_t test_svqdmullt_lane_s32_1(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_lane_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_lane_s32'}} | ||
| return SVE_ACLE_FUNC(svqdmullt_lane,_s32,,)(op1, op2, 7); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullt_lane_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_lane_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullt_lane,_s64,,)(op1, op2, 0); | ||
| } | ||
|
|
||
| svint64_t test_svqdmullt_lane_s64_1(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svqdmullt_lane_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svqdmullt_lane'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svqdmullt_lane_s64'}} | ||
| return SVE_ACLE_FUNC(svqdmullt_lane,_s64,,)(op1, op2, 3); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,133 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svshllb_n_s16(svint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllb.nxv8i16(<vscale x 16 x i8> %op1, i32 0) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_s16,,)(op1, 0); | ||
| } | ||
|
|
||
| svint16_t test_svshllb_n_s16_1(svint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_s16_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllb.nxv8i16(<vscale x 16 x i8> %op1, i32 7) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_s16,,)(op1, 7); | ||
| } | ||
|
|
||
| svint32_t test_svshllb_n_s32(svint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllb.nxv4i32(<vscale x 8 x i16> %op1, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_s32,,)(op1, 0); | ||
| } | ||
|
|
||
| svint32_t test_svshllb_n_s32_1(svint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllb.nxv4i32(<vscale x 8 x i16> %op1, i32 15) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_s32,,)(op1, 15); | ||
| } | ||
|
|
||
| svint64_t test_svshllb_n_s64(svint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllb.nxv2i64(<vscale x 4 x i32> %op1, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_s64,,)(op1, 0); | ||
| } | ||
|
|
||
| svint64_t test_svshllb_n_s64_1(svint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllb.nxv2i64(<vscale x 4 x i32> %op1, i32 31) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_s64,,)(op1, 31); | ||
| } | ||
|
|
||
| svuint16_t test_svshllb_n_u16(svuint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllb.nxv8i16(<vscale x 16 x i8> %op1, i32 0) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_u16,,)(op1, 0); | ||
| } | ||
|
|
||
| svuint16_t test_svshllb_n_u16_1(svuint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_u16_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllb.nxv8i16(<vscale x 16 x i8> %op1, i32 7) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_u16,,)(op1, 7); | ||
| } | ||
|
|
||
| svuint32_t test_svshllb_n_u32(svuint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllb.nxv4i32(<vscale x 8 x i16> %op1, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_u32,,)(op1, 0); | ||
| } | ||
|
|
||
| svuint32_t test_svshllb_n_u32_1(svuint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_u32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllb.nxv4i32(<vscale x 8 x i16> %op1, i32 15) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_u32,,)(op1, 15); | ||
| } | ||
|
|
||
| svuint64_t test_svshllb_n_u64(svuint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllb.nxv2i64(<vscale x 4 x i32> %op1, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_u64,,)(op1, 0); | ||
| } | ||
|
|
||
| svuint64_t test_svshllb_n_u64_1(svuint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllb_n_u64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllb.nxv2i64(<vscale x 4 x i32> %op1, i32 31) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svshllb,_n_u64,,)(op1, 31); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,133 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svshllt_n_s16(svint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllt.nxv8i16(<vscale x 16 x i8> %op1, i32 0) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_s16,,)(op1, 0); | ||
| } | ||
|
|
||
| svint16_t test_svshllt_n_s16_1(svint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_s16_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllt.nxv8i16(<vscale x 16 x i8> %op1, i32 7) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_s16'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_s16,,)(op1, 7); | ||
| } | ||
|
|
||
| svint32_t test_svshllt_n_s32(svint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllt.nxv4i32(<vscale x 8 x i16> %op1, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_s32,,)(op1, 0); | ||
| } | ||
|
|
||
| svint32_t test_svshllt_n_s32_1(svint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_s32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllt.nxv4i32(<vscale x 8 x i16> %op1, i32 15) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_s32'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_s32,,)(op1, 15); | ||
| } | ||
|
|
||
| svint64_t test_svshllt_n_s64(svint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllt.nxv2i64(<vscale x 4 x i32> %op1, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_s64,,)(op1, 0); | ||
| } | ||
|
|
||
| svint64_t test_svshllt_n_s64_1(svint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_s64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllt.nxv2i64(<vscale x 4 x i32> %op1, i32 31) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_s64'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_s64,,)(op1, 31); | ||
| } | ||
|
|
||
| svuint16_t test_svshllt_n_u16(svuint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllt.nxv8i16(<vscale x 16 x i8> %op1, i32 0) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_u16,,)(op1, 0); | ||
| } | ||
|
|
||
| svuint16_t test_svshllt_n_u16_1(svuint8_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_u16_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllt.nxv8i16(<vscale x 16 x i8> %op1, i32 7) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_u16'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_u16,,)(op1, 7); | ||
| } | ||
|
|
||
| svuint32_t test_svshllt_n_u32(svuint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllt.nxv4i32(<vscale x 8 x i16> %op1, i32 0) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_u32,,)(op1, 0); | ||
| } | ||
|
|
||
| svuint32_t test_svshllt_n_u32_1(svuint16_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_u32_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllt.nxv4i32(<vscale x 8 x i16> %op1, i32 15) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_u32'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_u32,,)(op1, 15); | ||
| } | ||
|
|
||
| svuint64_t test_svshllt_n_u64(svuint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllt.nxv2i64(<vscale x 4 x i32> %op1, i32 0) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_u64,,)(op1, 0); | ||
| } | ||
|
|
||
| svuint64_t test_svshllt_n_u64_1(svuint32_t op1) | ||
| { | ||
| // CHECK-LABEL: test_svshllt_n_u64_1 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllt.nxv2i64(<vscale x 4 x i32> %op1, i32 31) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svshllt'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svshllt_n_u64'}} | ||
| return SVE_ACLE_FUNC(svshllt,_n_u64,,)(op1, 31); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,139 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint16_t test_svsublb_s16(svint8_t op1, svint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_s16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_s16'}} | ||
| return SVE_ACLE_FUNC(svsublb,_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svsublb_s32(svint16_t op1, svint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_s32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_s32'}} | ||
| return SVE_ACLE_FUNC(svsublb,_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svsublb_s64(svint32_t op1, svint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_s64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_s64'}} | ||
| return SVE_ACLE_FUNC(svsublb,_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svsublb_u16(svuint8_t op1, svuint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_u16 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_u16'}} | ||
| return SVE_ACLE_FUNC(svsublb,_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svsublb_u32(svuint16_t op1, svuint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_u32 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_u32'}} | ||
| return SVE_ACLE_FUNC(svsublb,_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svsublb_u64(svuint32_t op1, svuint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_u64 | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_u64'}} | ||
| return SVE_ACLE_FUNC(svsublb,_u64,,)(op1, op2); | ||
| } | ||
|
|
||
| svint16_t test_svsublb_n_s16(svint8_t op1, int8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_n_s16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_n_s16'}} | ||
| return SVE_ACLE_FUNC(svsublb,_n_s16,,)(op1, op2); | ||
| } | ||
|
|
||
| svint32_t test_svsublb_n_s32(svint16_t op1, int16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_n_s32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_n_s32'}} | ||
| return SVE_ACLE_FUNC(svsublb,_n_s32,,)(op1, op2); | ||
| } | ||
|
|
||
| svint64_t test_svsublb_n_s64(svint32_t op1, int32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_n_s64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_n_s64'}} | ||
| return SVE_ACLE_FUNC(svsublb,_n_s64,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint16_t test_svsublb_n_u16(svuint8_t op1, uint8_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_n_u16 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_n_u16'}} | ||
| return SVE_ACLE_FUNC(svsublb,_n_u16,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint32_t test_svsublb_n_u32(svuint16_t op1, uint16_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_n_u32 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_n_u32'}} | ||
| return SVE_ACLE_FUNC(svsublb,_n_u32,,)(op1, op2); | ||
| } | ||
|
|
||
| svuint64_t test_svsublb_n_u64(svuint32_t op1, uint32_t op2) | ||
| { | ||
| // CHECK-LABEL: test_svsublb_n_u64 | ||
| // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2) | ||
| // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]] | ||
| // overload-warning@+2 {{implicit declaration of function 'svsublb'}} | ||
| // expected-warning@+1 {{implicit declaration of function 'svsublb_n_u64'}} | ||
| return SVE_ACLE_FUNC(svsublb,_n_u64,,)(op1, op2); | ||
| } |