| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,91 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| svint32_t test_svldnf1sh_s32(svbool_t pg, const int16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_s32 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[SEXT]] | ||
| return svldnf1sh_s32(pg, base); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1sh_s64(svbool_t pg, const int16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_s64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sh_s64(pg, base); | ||
| } | ||
|
|
||
| svuint32_t test_svldnf1sh_u32(svbool_t pg, const int16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_u32 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[SEXT]] | ||
| return svldnf1sh_u32(pg, base); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1sh_u64(svbool_t pg, const int16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_u64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sh_u64(pg, base); | ||
| } | ||
|
|
||
| svint32_t test_svldnf1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[SEXT]] | ||
| return svldnf1sh_vnum_s32(pg, base, vnum); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sh_vnum_s64(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint32_t test_svldnf1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[SEXT]] | ||
| return svldnf1sh_vnum_u32(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1sh_vnum_u64(svbool_t pg, const int16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sh_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sh_vnum_u64(pg, base, vnum); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,47 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| svint64_t test_svldnf1sw_s64(svbool_t pg, const int32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sw_s64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %base) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sw_s64(pg, base); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1sw_u64(svbool_t pg, const int32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sw_u64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %base) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sw_u64(pg, base); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sw_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sw_vnum_s64(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1sw_vnum_u64(svbool_t pg, const int32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1sw_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK: %[[SEXT:.*]] = sext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[SEXT]] | ||
| return svldnf1sw_vnum_u64(pg, base, vnum); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,135 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| svint16_t test_svldnf1ub_s16(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_s16 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16> | ||
| // CHECK: ret <vscale x 8 x i16> %[[ZEXT]] | ||
| return svldnf1ub_s16(pg, base); | ||
| } | ||
|
|
||
| svint32_t test_svldnf1ub_s32(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_s32 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1ub_s32(pg, base); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1ub_s64(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_s64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1ub_s64(pg, base); | ||
| } | ||
|
|
||
| svuint16_t test_svldnf1ub_u16(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_u16 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16> | ||
| // CHECK: ret <vscale x 8 x i16> %[[ZEXT]] | ||
| return svldnf1ub_u16(pg, base); | ||
| } | ||
|
|
||
| svuint32_t test_svldnf1ub_u32(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_u32 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1ub_u32(pg, base); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1ub_u64(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_u64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1ub_u64(pg, base); | ||
| } | ||
|
|
||
| svint16_t test_svldnf1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_vnum_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 8 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16> | ||
| // CHECK: ret <vscale x 8 x i16> %[[ZEXT]] | ||
| return svldnf1ub_vnum_s16(pg, base, vnum); | ||
| } | ||
|
|
||
| svint32_t test_svldnf1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 4 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1ub_vnum_s32(pg, base, vnum); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 2 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1ub_vnum_s64(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint16_t test_svldnf1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_vnum_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 8 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %[[PG]], i8* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 8 x i8> %[[LOAD]] to <vscale x 8 x i16> | ||
| // CHECK: ret <vscale x 8 x i16> %[[ZEXT]] | ||
| return svldnf1ub_vnum_u16(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint32_t test_svldnf1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 4 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %[[PG]], i8* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i8> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1ub_vnum_u32(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1ub_vnum_u64(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1ub_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 2 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %[[PG]], i8* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i8> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1ub_vnum_u64(pg, base, vnum); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,91 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| svint32_t test_svldnf1uh_s32(svbool_t pg, const uint16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_s32 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1uh_s32(pg, base); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1uh_s64(svbool_t pg, const uint16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_s64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uh_s64(pg, base); | ||
| } | ||
|
|
||
| svuint32_t test_svldnf1uh_u32(svbool_t pg, const uint16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_u32 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1uh_u32(pg, base); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1uh_u64(svbool_t pg, const uint16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_u64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uh_u64(pg, base); | ||
| } | ||
|
|
||
| svint32_t test_svldnf1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1uh_vnum_s32(pg, base, vnum); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uh_vnum_s64(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint32_t test_svldnf1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 4 x i16> %[[LOAD]] to <vscale x 4 x i32> | ||
| // CHECK: ret <vscale x 4 x i32> %[[ZEXT]] | ||
| return svldnf1uh_vnum_u32(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1uh_vnum_u64(svbool_t pg, const uint16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uh_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i16> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uh_vnum_u64(pg, base, vnum); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,47 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| svint64_t test_svldnf1uw_s64(svbool_t pg, const uint32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uw_s64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uw_s64(pg, base); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1uw_u64(svbool_t pg, const uint32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uw_u64 | ||
| // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %base) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uw_u64(pg, base); | ||
| } | ||
|
|
||
| svint64_t test_svldnf1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uw_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uw_vnum_s64(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint64_t test_svldnf1uw_vnum_u64(svbool_t pg, const uint32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnf1uw_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK: %[[ZEXT:.*]] = zext <vscale x 2 x i32> %[[LOAD]] to <vscale x 2 x i64> | ||
| // CHECK: ret <vscale x 2 x i64> %[[ZEXT]] | ||
| return svldnf1uw_vnum_u64(pg, base, vnum); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,227 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| svint8_t test_svldnt1_s8(svbool_t pg, const int8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_s8 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base) | ||
| // CHECK: ret <vscale x 16 x i8> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_s8,,)(pg, base); | ||
| } | ||
|
|
||
| svint16_t test_svldnt1_s16(svbool_t pg, const int16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %base) | ||
| // CHECK: ret <vscale x 8 x i16> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_s16,,)(pg, base); | ||
| } | ||
|
|
||
| svint32_t test_svldnt1_s32(svbool_t pg, const int32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base) | ||
| // CHECK: ret <vscale x 4 x i32> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_s32,,)(pg, base); | ||
| } | ||
|
|
||
| svint64_t test_svldnt1_s64(svbool_t pg, const int64_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base) | ||
| // CHECK: ret <vscale x 2 x i64> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_s64,,)(pg, base); | ||
| } | ||
|
|
||
| svuint8_t test_svldnt1_u8(svbool_t pg, const uint8_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_u8 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base) | ||
| // CHECK: ret <vscale x 16 x i8> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_u8,,)(pg, base); | ||
| } | ||
|
|
||
| svuint16_t test_svldnt1_u16(svbool_t pg, const uint16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %base) | ||
| // CHECK: ret <vscale x 8 x i16> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_u16,,)(pg, base); | ||
| } | ||
|
|
||
| svuint32_t test_svldnt1_u32(svbool_t pg, const uint32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %base) | ||
| // CHECK: ret <vscale x 4 x i32> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_u32,,)(pg, base); | ||
| } | ||
|
|
||
| svuint64_t test_svldnt1_u64(svbool_t pg, const uint64_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %base) | ||
| // CHECK: ret <vscale x 2 x i64> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_u64,,)(pg, base); | ||
| } | ||
|
|
||
| svfloat16_t test_svldnt1_f16(svbool_t pg, const float16_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_f16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %[[PG]], half* %base) | ||
| // CHECK: ret <vscale x 8 x half> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_f16,,)(pg, base); | ||
| } | ||
|
|
||
| svfloat32_t test_svldnt1_f32(svbool_t pg, const float32_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_f32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %base) | ||
| // CHECK: ret <vscale x 4 x float> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_f32,,)(pg, base); | ||
| } | ||
|
|
||
| svfloat64_t test_svldnt1_f64(svbool_t pg, const float64_t *base) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_f64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %base) | ||
| // CHECK: ret <vscale x 2 x double> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1,_f64,,)(pg, base); | ||
| } | ||
|
|
||
| svint8_t test_svldnt1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_s8 | ||
| // CHECK: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pg, i8* %[[GEP]]) | ||
| // CHECK: ret <vscale x 16 x i8> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_s8,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svint16_t test_svldnt1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_s16,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svint32_t test_svldnt1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_s32,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svint64_t test_svldnt1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %[[GEP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_s64,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint8_t test_svldnt1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_u8 | ||
| // CHECK: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pg, i8* %[[GEP]]) | ||
| // CHECK: ret <vscale x 16 x i8> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_u8,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint16_t test_svldnt1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK: ret <vscale x 8 x i16> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_u16,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint32_t test_svldnt1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK: ret <vscale x 4 x i32> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_u32,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svuint64_t test_svldnt1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %[[PG]], i64* %[[GEP]]) | ||
| // CHECK: ret <vscale x 2 x i64> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_u64,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svfloat16_t test_svldnt1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_f16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast half* %base to <vscale x 8 x half>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %[[PG]], half* %[[GEP]]) | ||
| // CHECK: ret <vscale x 8 x half> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_f16,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svfloat32_t test_svldnt1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_f32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast float* %base to <vscale x 4 x float>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %[[PG]], float* %[[GEP]]) | ||
| // CHECK: ret <vscale x 4 x float> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_f32,,)(pg, base, vnum); | ||
| } | ||
|
|
||
| svfloat64_t test_svldnt1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum) | ||
| { | ||
| // CHECK-LABEL: test_svldnt1_vnum_f64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast double* %base to <vscale x 2 x double>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: %[[LOAD:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %[[PG]], double* %[[GEP]]) | ||
| // CHECK: ret <vscale x 2 x double> %[[LOAD]] | ||
| return SVE_ACLE_FUNC(svldnt1_vnum,_f64,,)(pg, base, vnum); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,216 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| void test_svst1_s8(svbool_t pg, int8_t *base, svint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_s8 | ||
| // CHECK: %[[BASE:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK: call void @llvm.masked.store.nxv16i8.p0nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %[[BASE]], i32 1, <vscale x 16 x i1> %pg) | ||
| return SVE_ACLE_FUNC(svst1,_s8,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_s16(svbool_t pg, int16_t *base, svint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK: call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16>* %[[BASE]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_s16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_s32(svbool_t pg, int32_t *base, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK: call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_s32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_s64(svbool_t pg, int64_t *base, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK: call void @llvm.masked.store.nxv2i64.p0nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_s64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_u8(svbool_t pg, uint8_t *base, svuint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_u8 | ||
| // CHECK: %[[BASE:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK: call void @llvm.masked.store.nxv16i8.p0nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %[[BASE]], i32 1, <vscale x 16 x i1> %pg) | ||
| return SVE_ACLE_FUNC(svst1,_u8,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_u16(svbool_t pg, uint16_t *base, svuint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK: call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16>* %[[BASE]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_u16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_u32(svbool_t pg, uint32_t *base, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK: call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_u32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_u64(svbool_t pg, uint64_t *base, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK: call void @llvm.masked.store.nxv2i64.p0nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_u64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_f16(svbool_t pg, float16_t *base, svfloat16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_f16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast half* %base to <vscale x 8 x half>* | ||
| // CHECK: call void @llvm.masked.store.nxv8f16.p0nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x half>* %[[BASE]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_f16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_f32(svbool_t pg, float32_t *base, svfloat32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_f32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast float* %base to <vscale x 4 x float>* | ||
| // CHECK: call void @llvm.masked.store.nxv4f32.p0nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x float>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_f32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_f64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast double* %base to <vscale x 2 x double>* | ||
| // CHECK: call void @llvm.masked.store.nxv2f64.p0nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1,_f64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_s8 | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv16i8.p0nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %[[GEP]], i32 1, <vscale x 16 x i1> %pg) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_s8,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16>* %[[GEP]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_s16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_s32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i64.p0nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_s64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_u8 | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv16i8.p0nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %[[GEP]], i32 1, <vscale x 16 x i1> %pg) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_u8,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16>* %[[GEP]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_u16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_u32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i64.p0nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_u64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_f16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast half* %base to <vscale x 8 x half>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv8f16.p0nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x half>* %[[GEP]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_f16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_f32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast float* %base to <vscale x 4 x float>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4f32.p0nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x float>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_f32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1_vnum_f64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast double* %base to <vscale x 2 x double>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2f64.p0nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| return SVE_ACLE_FUNC(svst1_vnum,_f64,,)(pg, base, vnum, data); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,149 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - -emit-llvm %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - -emit-llvm %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| void test_svst1b_s16(svbool_t pg, int8_t *base, svint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 8 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8> | ||
| // CHECK: call void @llvm.masked.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8> %[[DATA]], <vscale x 8 x i8>* %[[BASE]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b,_s16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1b_s32(svbool_t pg, int8_t *base, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 4 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8> | ||
| // CHECK: call void @llvm.masked.store.nxv4i8.p0nxv4i8(<vscale x 4 x i8> %[[DATA]], <vscale x 4 x i8>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b,_s32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1b_s64(svbool_t pg, int8_t *base, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 2 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8> | ||
| // CHECK: call void @llvm.masked.store.nxv2i8.p0nxv2i8(<vscale x 2 x i8> %[[DATA]], <vscale x 2 x i8>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b,_s64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1b_u16(svbool_t pg, uint8_t *base, svuint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 8 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8> | ||
| // CHECK: call void @llvm.masked.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8> %[[DATA]], <vscale x 8 x i8>* %[[BASE]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b,_u16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1b_u32(svbool_t pg, uint8_t *base, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 4 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8> | ||
| // CHECK: call void @llvm.masked.store.nxv4i8.p0nxv4i8(<vscale x 4 x i8> %[[DATA]], <vscale x 4 x i8>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b,_u32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 2 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8> | ||
| // CHECK: call void @llvm.masked.store.nxv2i8.p0nxv2i8(<vscale x 2 x i8> %[[DATA]], <vscale x 2 x i8>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b,_u64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_vnum_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 8 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8> %[[DATA]], <vscale x 8 x i8>* %[[GEP]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b_vnum,_s16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 4 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4i8.p0nxv4i8(<vscale x 4 x i8> %[[DATA]], <vscale x 4 x i8>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b_vnum,_s32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 2 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i8.p0nxv2i8(<vscale x 2 x i8> %[[DATA]], <vscale x 2 x i8>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b_vnum,_s64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_vnum_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 8 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8> %[[DATA]], <vscale x 8 x i8>* %[[GEP]], i32 1, <vscale x 8 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b_vnum,_u16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 4 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4i8.p0nxv4i8(<vscale x 4 x i8> %[[DATA]], <vscale x 4 x i8>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b_vnum,_u32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1b_vnum_u64(svbool_t pg, uint8_t *base, int64_t vnum, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1b_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i8* %base to <vscale x 2 x i8>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i8.p0nxv2i8(<vscale x 2 x i8> %[[DATA]], <vscale x 2 x i8>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1b_vnum,_u64,,)(pg, base, vnum, data); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,103 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - -emit-llvm %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - -emit-llvm %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| void test_svst1h_s32(svbool_t pg, int16_t *base, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16> | ||
| // CHECK: call void @llvm.masked.store.nxv4i16.p0nxv4i16(<vscale x 4 x i16> %[[DATA]], <vscale x 4 x i16>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h,_s32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1h_s64(svbool_t pg, int16_t *base, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16> | ||
| // CHECK: call void @llvm.masked.store.nxv2i16.p0nxv2i16(<vscale x 2 x i16> %[[DATA]], <vscale x 2 x i16>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h,_s64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1h_u32(svbool_t pg, uint16_t *base, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16> | ||
| // CHECK: call void @llvm.masked.store.nxv4i16.p0nxv4i16(<vscale x 4 x i16> %[[DATA]], <vscale x 4 x i16>* %[[BASE]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h,_u32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1h_u64(svbool_t pg, uint16_t *base, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16> | ||
| // CHECK: call void @llvm.masked.store.nxv2i16.p0nxv2i16(<vscale x 2 x i16> %[[DATA]], <vscale x 2 x i16>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h,_u64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1h_vnum_s32(svbool_t pg, int16_t *base, int64_t vnum, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_vnum_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4i16.p0nxv4i16(<vscale x 4 x i16> %[[DATA]], <vscale x 4 x i16>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h_vnum,_s32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1h_vnum_s64(svbool_t pg, int16_t *base, int64_t vnum, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i16.p0nxv2i16(<vscale x 2 x i16> %[[DATA]], <vscale x 2 x i16>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h_vnum,_s64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1h_vnum_u32(svbool_t pg, uint16_t *base, int64_t vnum, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_vnum_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 4 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv4i16.p0nxv4i16(<vscale x 4 x i16> %[[DATA]], <vscale x 4 x i16>* %[[GEP]], i32 1, <vscale x 4 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h_vnum,_u32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1h_vnum_u64(svbool_t pg, uint16_t *base, int64_t vnum, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1h_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i16* %base to <vscale x 2 x i16>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i16.p0nxv2i16(<vscale x 2 x i16> %[[DATA]], <vscale x 2 x i16>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1h_vnum,_u64,,)(pg, base, vnum, data); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,57 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - -emit-llvm %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - -emit-llvm %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| void test_svst1w_s64(svbool_t pg, int32_t *base, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1w_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32> | ||
| // CHECK: call void @llvm.masked.store.nxv2i32.p0nxv2i32(<vscale x 2 x i32> %[[DATA]], <vscale x 2 x i32>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1w,_s64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1w_u64(svbool_t pg, uint32_t *base, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1w_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32> | ||
| // CHECK: call void @llvm.masked.store.nxv2i32.p0nxv2i32(<vscale x 2 x i32> %[[DATA]], <vscale x 2 x i32>* %[[BASE]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1w,_u64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svst1w_vnum_s64(svbool_t pg, int32_t *base, int64_t vnum, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1w_vnum_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i32.p0nxv2i32(<vscale x 2 x i32> %[[DATA]], <vscale x 2 x i32>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1w_vnum,_s64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svst1w_vnum_u64(svbool_t pg, uint32_t *base, int64_t vnum, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svst1w_vnum_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK-DAG: %[[BASE:.*]] = bitcast i32* %base to <vscale x 2 x i32>* | ||
| // CHECK-DAG: %[[DATA:.*]] = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32> | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %[[BASE]], i64 %vnum | ||
| // CHECK: call void @llvm.masked.store.nxv2i32.p0nxv2i32(<vscale x 2 x i32> %[[DATA]], <vscale x 2 x i32>* %[[GEP]], i32 1, <vscale x 2 x i1> %[[PG]]) | ||
| // CHECK: ret void | ||
| return SVE_ACLE_FUNC(svst1w_vnum,_u64,,)(pg, base, vnum, data); | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,227 @@ | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
| // RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s | ||
|
|
||
| #include <arm_sve.h> | ||
|
|
||
| #ifdef SVE_OVERLOADED_FORMS | ||
| // A simple used,unused... macro, long enough to represent any SVE builtin. | ||
| #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 | ||
| #else | ||
| #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 | ||
| #endif | ||
|
|
||
| void test_svstnt1_s8(svbool_t pg, int8_t *base, svint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_s8 | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_s8,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_s16(svbool_t pg, int16_t *base, svint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_s16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %[[PG]], i16* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_s16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_s32(svbool_t pg, int32_t *base, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_s32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %[[PG]], i32* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_s32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_s64(svbool_t pg, int64_t *base, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_s64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %[[PG]], i64* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_s64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_u8(svbool_t pg, uint8_t *base, svuint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_u8 | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_u8,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_u16(svbool_t pg, uint16_t *base, svuint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_u16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %[[PG]], i16* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_u16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_u32(svbool_t pg, uint32_t *base, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_u32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %[[PG]], i32* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_u32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_u64(svbool_t pg, uint64_t *base, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_u64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %[[PG]], i64* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_u64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_f16(svbool_t pg, float16_t *base, svfloat16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_f16 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %[[PG]], half* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_f16,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_f32(svbool_t pg, float32_t *base, svfloat32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_f32 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %[[PG]], float* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_f32,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_f64(svbool_t pg, float64_t *base, svfloat64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_f64 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %[[PG]], double* %base) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1,_f64,,)(pg, base, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_s8 | ||
| // CHECK: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_s8,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_s16 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_s16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_s32 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_s32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_s64 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %[[PG]], i64* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_s64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_u8 | ||
| // CHECK: %[[BITCAST:.*]] = bitcast i8* %base to <vscale x 16 x i8>* | ||
| // CHECK: %[[GEP:.*]] = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_u8,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_u16 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i16* %base to <vscale x 8 x i16>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %[[PG]], i16* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_u16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_u32 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i32* %base to <vscale x 4 x i32>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %[[PG]], i32* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_u32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_u64 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast i64* %base to <vscale x 2 x i64>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %[[PG]], i64* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_u64,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_f16 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast half* %base to <vscale x 8 x half>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %[[PG]], half* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_f16,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_f32 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast float* %base to <vscale x 4 x float>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %[[PG]], float* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_f32,,)(pg, base, vnum, data); | ||
| } | ||
|
|
||
| void test_svstnt1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t data) | ||
| { | ||
| // CHECK-LABEL: test_svstnt1_vnum_f64 | ||
| // CHECK-DAG: %[[BITCAST:.*]] = bitcast double* %base to <vscale x 2 x double>* | ||
| // CHECK-DAG: %[[GEP:.*]] = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %[[BITCAST]], i64 %vnum, i64 0 | ||
| // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) | ||
| // CHECK: call void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %[[PG]], double* %[[GEP]]) | ||
| // CHECK-NEXT: ret | ||
| return SVE_ACLE_FUNC(svstnt1_vnum,_f64,,)(pg, base, vnum, data); | ||
| } |