diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 02b80f3aba21c..dbba8cc96f814 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -1869,6 +1869,23 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return Visit(E->getInit(0)); } + if (isa(VType)) { + if (NumInitElements == 0) { + // C++11 value-initialization for the vector. + return EmitNullValue(E->getType()); + } + + if (NumInitElements == 1) { + Expr *InitVector = E->getInit(0); + + // Initialize from another scalable vector of the same type. + if (InitVector->getType() == E->getType()) + return Visit(InitVector); + } + + llvm_unreachable("Unexpected initialization of a scalable vector!"); + } + unsigned ResElts = cast(VType)->getNumElements(); // Loop over initializers collecting the Value for each, and remembering diff --git a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp new file mode 100644 index 0000000000000..2088e80acfc80 --- /dev/null +++ b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp @@ -0,0 +1,881 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -emit-llvm -o - %s | FileCheck %s + +// CHECK-LABEL: define dso_local void @_Z11test_localsv +// CHECK-SAME: () #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[S8:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S16:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S32:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S64:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U8:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U16:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U32:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U64:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F16:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F32:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F64:%.*]] = alloca , align 16 +// CHECK-NEXT: [[BF16:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S8X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S16X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S32X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[X64X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U8X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U16X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U32X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U64X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F16X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F32X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F64X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[BF16X2:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S8X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S16X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S32X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[X64X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U8X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U16X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U32X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U64X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F16X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F32X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F64X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[BF16X3:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S8X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S16X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[S32X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[X64X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U8X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U16X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U32X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[U64X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F16X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F32X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[F64X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[BF16X4:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B8:%.*]] = alloca , align 2 +// CHECK-NEXT: [[B8X2:%.*]] = alloca , align 2 +// CHECK-NEXT: [[B8X4:%.*]] = alloca , align 2 +// CHECK-NEXT: store zeroinitializer, ptr [[S8]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S16]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S32]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S64]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U8]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U16]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U32]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U64]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F16]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F32]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F64]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[BF16]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S8X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S16X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S32X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[X64X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U8X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U16X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U32X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U64X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F16X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F32X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F64X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[BF16X2]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S8X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S16X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S32X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[X64X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U8X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U16X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U32X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U64X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F16X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F32X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F64X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[BF16X3]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S8X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S16X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[S32X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[X64X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U8X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U16X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U32X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[U64X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F16X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F32X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[F64X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[BF16X4]], align 16 +// CHECK-NEXT: store zeroinitializer, ptr [[B8]], align 2 +// CHECK-NEXT: store zeroinitializer, ptr [[B8X2]], align 2 +// CHECK-NEXT: store zeroinitializer, ptr [[B8X4]], align 2 +// CHECK-NEXT: ret void +// +void test_locals(void) { + __SVInt8_t s8{}; + __SVInt16_t s16{}; + __SVInt32_t s32{}; + __SVInt64_t s64{}; + __SVUint8_t u8{}; + __SVUint16_t u16{}; + __SVUint32_t u32{}; + __SVUint64_t u64{}; + __SVFloat16_t f16{}; + __SVFloat32_t f32{}; + __SVFloat64_t f64{}; + __SVBFloat16_t bf16{}; + + __clang_svint8x2_t s8x2{}; + __clang_svint16x2_t s16x2{}; + __clang_svint32x2_t s32x2{}; + __clang_svint64x2_t x64x2{}; + __clang_svuint8x2_t u8x2{}; + __clang_svuint16x2_t u16x2{}; + __clang_svuint32x2_t u32x2{}; + __clang_svuint64x2_t u64x2{}; + __clang_svfloat16x2_t f16x2{}; + __clang_svfloat32x2_t f32x2{}; + __clang_svfloat64x2_t f64x2{}; + __clang_svbfloat16x2_t bf16x2{}; + + __clang_svint8x3_t s8x3{}; + __clang_svint16x3_t s16x3{}; + __clang_svint32x3_t s32x3{}; + __clang_svint64x3_t x64x3{}; + __clang_svuint8x3_t u8x3{}; + __clang_svuint16x3_t u16x3{}; + __clang_svuint32x3_t u32x3{}; + __clang_svuint64x3_t u64x3{}; + __clang_svfloat16x3_t f16x3{}; + __clang_svfloat32x3_t f32x3{}; + __clang_svfloat64x3_t f64x3{}; + __clang_svbfloat16x3_t bf16x3{}; + + __clang_svint8x4_t s8x4{}; + __clang_svint16x4_t s16x4{}; + __clang_svint32x4_t s32x4{}; + __clang_svint64x4_t x64x4{}; + __clang_svuint8x4_t u8x4{}; + __clang_svuint16x4_t u16x4{}; + __clang_svuint32x4_t u32x4{}; + __clang_svuint64x4_t u64x4{}; + __clang_svfloat16x4_t f16x4{}; + __clang_svfloat32x4_t f32x4{}; + __clang_svfloat64x4_t f64x4{}; + __clang_svbfloat16x4_t bf16x4{}; + + __SVBool_t b8{}; + __clang_svboolx2_t b8x2{}; + __clang_svboolx4_t b8x4{}; +} + +// CHECK-LABEL: define dso_local void @_Z12test_copy_s8u10__SVInt8_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s8(__SVInt8_t a) { + __SVInt8_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_s16u11__SVInt16_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s16(__SVInt16_t a) { + __SVInt16_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_s32u11__SVInt32_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s32(__SVInt32_t a) { + __SVInt32_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_s64u11__SVInt64_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s64(__SVInt64_t a) { + __SVInt64_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z12test_copy_u8u11__SVUint8_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u8(__SVUint8_t a) { + __SVUint8_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_u16u12__SVUint16_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u16(__SVUint16_t a) { + __SVUint16_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_u32u12__SVUint32_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u32(__SVUint32_t a) { + __SVUint32_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_u64u12__SVUint64_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u64(__SVUint64_t a) { + __SVUint64_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_f16u13__SVFloat16_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f16(__SVFloat16_t a) { + __SVFloat16_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_f32u13__SVFloat32_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f32(__SVFloat32_t a) { + __SVFloat32_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z13test_copy_f64u13__SVFloat64_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f64(__SVFloat64_t a) { + __SVFloat64_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_bf16u14__SVBFloat16_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_bf16(__SVBFloat16_t a) { + __SVBFloat16_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_s8x210svint8x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s8x2(__clang_svint8x2_t a) { + __clang_svint8x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s16x211svint16x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s16x2(__clang_svint16x2_t a) { + __clang_svint16x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s32x211svint32x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s32x2(__clang_svint32x2_t a) { + __clang_svint32x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s64x211svint64x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s64x2(__clang_svint64x2_t a) { + __clang_svint64x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_u8x211svuint8x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u8x2(__clang_svuint8x2_t a) { + __clang_svuint8x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u16x212svuint16x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u16x2(__clang_svuint16x2_t a) { + __clang_svuint16x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u32x212svuint32x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u32x2(__clang_svuint32x2_t a) { + __clang_svuint32x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u64x212svuint64x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u64x2(__clang_svuint64x2_t a) { + __clang_svuint64x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f16x213svfloat16x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f16x2(__clang_svfloat16x2_t a) { + __clang_svfloat16x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f32x213svfloat32x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f32x2(__clang_svfloat32x2_t a) { + __clang_svfloat32x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f64x213svfloat64x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f64x2(__clang_svfloat64x2_t a) { + __clang_svfloat64x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z16test_copy_bf16x214svbfloat16x2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_bf16x2(__clang_svbfloat16x2_t a) { + __clang_svbfloat16x2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_s8x310svint8x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s8x3(__clang_svint8x3_t a) { + __clang_svint8x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s16x311svint16x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s16x3(__clang_svint16x3_t a) { + __clang_svint16x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s32x311svint32x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s32x3(__clang_svint32x3_t a) { + __clang_svint32x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s64x311svint64x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s64x3(__clang_svint64x3_t a) { + __clang_svint64x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_u8x311svuint8x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u8x3(__clang_svuint8x3_t a) { + __clang_svuint8x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u16x312svuint16x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u16x3(__clang_svuint16x3_t a) { + __clang_svuint16x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u32x312svuint32x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u32x3(__clang_svuint32x3_t a) { + __clang_svuint32x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u64x312svuint64x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u64x3(__clang_svuint64x3_t a) { + __clang_svuint64x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f16x313svfloat16x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f16x3(__clang_svfloat16x3_t a) { + __clang_svfloat16x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f32x313svfloat32x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f32x3(__clang_svfloat32x3_t a) { + __clang_svfloat32x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f64x313svfloat64x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f64x3(__clang_svfloat64x3_t a) { + __clang_svfloat64x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z16test_copy_bf16x314svbfloat16x3_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_bf16x3(__clang_svbfloat16x3_t a) { + __clang_svbfloat16x3_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_s8x410svint8x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s8x4(__clang_svint8x4_t a) { + __clang_svint8x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s16x411svint16x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s16x4(__clang_svint16x4_t a) { + __clang_svint16x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s32x411svint32x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s32x4(__clang_svint32x4_t a) { + __clang_svint32x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_s64x411svint64x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_s64x4(__clang_svint64x4_t a) { + __clang_svint64x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_u8x411svuint8x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u8x4(__clang_svuint8x4_t a) { + __clang_svuint8x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u16x412svuint16x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u16x4(__clang_svuint16x4_t a) { + __clang_svuint16x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u32x412svuint32x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u32x4(__clang_svuint32x4_t a) { + __clang_svuint32x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_u64x412svuint64x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_u64x4(__clang_svuint64x4_t a) { + __clang_svuint64x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f16x413svfloat16x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f16x4(__clang_svfloat16x4_t a) { + __clang_svfloat16x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f32x413svfloat32x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f32x4(__clang_svfloat32x4_t a) { + __clang_svfloat32x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z15test_copy_f64x413svfloat64x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_f64x4(__clang_svfloat64x4_t a) { + __clang_svfloat64x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z16test_copy_bf16x414svbfloat16x4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[B:%.*]] = alloca , align 16 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 16 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 16 +// CHECK-NEXT: ret void +// +void test_copy_bf16x4(__clang_svbfloat16x4_t a) { + __clang_svbfloat16x4_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z12test_copy_b8u10__SVBool_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 2 +// CHECK-NEXT: [[B:%.*]] = alloca , align 2 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 2 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void test_copy_b8(__SVBool_t a) { + __SVBool_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_b8x210svboolx2_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 2 +// CHECK-NEXT: [[B:%.*]] = alloca , align 2 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 2 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void test_copy_b8x2(__clang_svboolx2_t a) { + __clang_svboolx2_t b{a}; +} + +// CHECK-LABEL: define dso_local void @_Z14test_copy_b8x410svboolx4_t +// CHECK-SAME: ( [[A:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[A_ADDR:%.*]] = alloca , align 2 +// CHECK-NEXT: [[B:%.*]] = alloca , align 2 +// CHECK-NEXT: store [[A]], ptr [[A_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[A_ADDR]], align 2 +// CHECK-NEXT: store [[TMP0]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void test_copy_b8x4(__clang_svboolx4_t a) { + __clang_svboolx4_t b{a}; +}