diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 8e3f00cc77db42..79cf91f114ba99 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -982,6 +982,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { setTargetDAGCombine(ISD::BUILD_VECTOR); setTargetDAGCombine(ISD::VECTOR_SHUFFLE); + setTargetDAGCombine(ISD::INSERT_SUBVECTOR); setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); @@ -15036,6 +15037,47 @@ static SDValue FlattenVectorShuffle(ShuffleVectorSDNode *N, SelectionDAG &DAG) { Op0->getOperand(0), Op1->getOperand(0)); } +static SDValue +PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { + SDValue Vec = N->getOperand(0); + SDValue SubVec = N->getOperand(1); + uint64_t IdxVal = N->getConstantOperandVal(2); + EVT VecVT = Vec.getValueType(); + EVT SubVT = SubVec.getValueType(); + + // Only do this for legal fixed vector types. + if (!VecVT.isFixedLengthVector() || + !DCI.DAG.getTargetLoweringInfo().isTypeLegal(VecVT) || + !DCI.DAG.getTargetLoweringInfo().isTypeLegal(SubVT)) + return SDValue(); + + // Ignore widening patterns. + if (IdxVal == 0 && Vec.isUndef()) + return SDValue(); + + // Subvector must be half the width and an "aligned" insertion. + unsigned NumSubElts = SubVT.getVectorNumElements(); + if ((SubVT.getSizeInBits() * 2) != VecVT.getSizeInBits() || + (IdxVal != 0 && IdxVal != NumSubElts)) + return SDValue(); + + // Fold insert_subvector -> concat_vectors + // insert_subvector(Vec,Sub,lo) -> concat_vectors(Sub,extract(Vec,hi)) + // insert_subvector(Vec,Sub,hi) -> concat_vectors(extract(Vec,lo),Sub) + SDLoc DL(N); + SDValue Lo, Hi; + if (IdxVal == 0) { + Lo = SubVec; + Hi = DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec, + DCI.DAG.getVectorIdxConstant(NumSubElts, DL)); + } else { + Lo = DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, Vec, + DCI.DAG.getVectorIdxConstant(0, DL)); + Hi = SubVec; + } + return DCI.DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Lo, Hi); +} + // shuffle(MVETrunc(x, y)) -> VMOVN(x, y) static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, SelectionDAG &DAG) { @@ -17846,6 +17888,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, case ISD::EXTRACT_VECTOR_ELT: return PerformExtractEltCombine(N, DCI, Subtarget); case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DCI.DAG); + case ISD::INSERT_SUBVECTOR: return PerformInsertSubvectorCombine(N, DCI); case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); case ARMISD::VDUP: return PerformVDUPCombine(N, DCI.DAG, Subtarget); diff --git a/llvm/test/CodeGen/ARM/neon-copy.ll b/llvm/test/CodeGen/ARM/neon-copy.ll new file mode 100644 index 00000000000000..e222939328c841 --- /dev/null +++ b/llvm/test/CodeGen/ARM/neon-copy.ll @@ -0,0 +1,2095 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -verify-machineinstrs -mtriple=armv7a-linux-gn | FileCheck %s + +define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) { +; CHECK-LABEL: ins16bw: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vld1.8 {d17[7]}, [r0] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15 + ret <16 x i8> %tmp3 +} + +define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) { +; CHECK-LABEL: ins8hw: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vld1.16 {d17[2]}, [r0:16] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6 + ret <8 x i16> %tmp3 +} + +define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) { +; CHECK-LABEL: ins4sw: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vld1.32 {d17[0]}, [r0:32] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2 + ret <4 x i32> %tmp3 +} + +define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) { +; CHECK-LABEL: ins2dw: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldm sp, {r2, r3} +; CHECK-NEXT: bx lr + %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1 + ret <2 x i64> %tmp3 +} + +define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) { +; CHECK-LABEL: ins8bw: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.8 d16[5], r2 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5 + ret <8 x i8> %tmp3 +} + +define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) { +; CHECK-LABEL: ins4hw: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.16 d16[3], r2 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3 + ret <4 x i16> %tmp3 +} + +define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) { +; CHECK-LABEL: ins2sw: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r1, r2 +; CHECK-NEXT: bx lr + %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1 + ret <2 x i32> %tmp3 +} + +define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) { +; CHECK-LABEL: ins16b16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r1, sp +; CHECK-NEXT: vmov.u8 r0, d16[2] +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vmov.8 d17[7], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <16 x i8> %tmp1, i32 2 + %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15 + ret <16 x i8> %tmp4 +} + +define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) { +; CHECK-LABEL: ins8h8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r1, sp +; CHECK-NEXT: vmov.u16 r0, d16[2] +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vmov.16 d17[3], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i16> %tmp1, i32 2 + %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7 + ret <8 x i16> %tmp4 +} + +define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) { +; CHECK-LABEL: ins4s4: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: mov r1, sp +; CHECK-NEXT: vmov.32 r0, d17[0] +; CHECK-NEXT: vld1.64 {d16, d17}, [r1] +; CHECK-NEXT: vmov.32 d16[1], r0 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i32> %tmp1, i32 2 + %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1 + ret <4 x i32> %tmp4 +} + +define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) { +; CHECK-LABEL: ins2d2: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r3, r1 +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: ldm sp, {r0, r1} +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i64> %tmp1, i32 0 + %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1 + ret <2 x i64> %tmp4 +} + +define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) { +; CHECK-LABEL: ins4f4: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d3, r2, r3 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vld1.64 {d0, d1}, [r0] +; CHECK-NEXT: vmov.f32 s1, s6 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x float> %tmp1, i32 2 + %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1 + ret <4 x float> %tmp4 +} + +define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) { +; CHECK-LABEL: ins2df2: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r3, r1 +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: ldm sp, {r0, r1} +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x double> %tmp1, i32 0 + %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1 + ret <2 x double> %tmp4 +} + +define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) { +; CHECK-LABEL: ins8b16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u8 r0, d16[2] +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r2, r3 +; CHECK-NEXT: vmov.8 d17[7], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i8> %tmp1, i32 2 + %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15 + ret <16 x i8> %tmp4 +} + +define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) { +; CHECK-LABEL: ins4h8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u16 r0, d16[2] +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r2, r3 +; CHECK-NEXT: vmov.16 d17[3], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i16> %tmp1, i32 2 + %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7 + ret <8 x i16> %tmp4 +} + +define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) { +; CHECK-LABEL: ins2s4: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[1] +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r2, r3 +; CHECK-NEXT: vmov.32 d16[1], r0 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i32> %tmp1, i32 1 + %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1 + ret <4 x i32> %tmp4 +} + +define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) { +; CHECK-LABEL: ins1d2: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r12, r3 +; CHECK-NEXT: mov lr, r2 +; CHECK-NEXT: mov r1, r12 +; CHECK-NEXT: vmov.32 r2, d16[0] +; CHECK-NEXT: mov r0, lr +; CHECK-NEXT: vmov.32 r3, d16[1] +; CHECK-NEXT: pop {r11, pc} + %tmp3 = extractelement <1 x i64> %tmp1, i32 0 + %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1 + ret <2 x i64> %tmp4 +} + +define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) { +; CHECK-LABEL: ins2f4: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d1, [sp] +; CHECK-NEXT: vmov d2, r0, r1 +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vmov.f32 s1, s5 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x float> %tmp1, i32 1 + %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1 + ret <4 x float> %tmp4 +} + +define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) { +; CHECK-LABEL: ins1f2: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: mov lr, r3 +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: mov r3, r0 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r2, r3 +; CHECK-NEXT: mov r1, lr +; CHECK-NEXT: mov r3, r12 +; CHECK-NEXT: pop {r11, pc} + %tmp3 = extractelement <1 x double> %tmp1, i32 0 + %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1 + ret <2 x double> %tmp4 +} + +define <2 x double> @ins1f2_args_flipped(<2 x double> %tmp2, <1 x double> %tmp1) { +; CHECK-LABEL: ins1f2_args_flipped: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldm sp, {r2, r3} +; CHECK-NEXT: bx lr + %tmp3 = extractelement <1 x double> %tmp1, i32 0 + %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1 + ret <2 x double> %tmp4 +} + +define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) { +; CHECK-LABEL: ins16b8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u8 r0, d16[2] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vmov.8 d16[7], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <16 x i8> %tmp1, i32 2 + %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7 + ret <8 x i8> %tmp4 +} + +define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) { +; CHECK-LABEL: ins8h4: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u16 r0, d16[2] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vmov.16 d16[3], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i16> %tmp1, i32 2 + %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3 + ret <4 x i16> %tmp4 +} + +define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) { +; CHECK-LABEL: ins4s2: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov.32 r0, d17[0] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vmov.32 d16[1], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i32> %tmp1, i32 2 + %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1 + ret <2 x i32> %tmp4 +} + +define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) { +; CHECK-LABEL: ins2d1: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i64> %tmp1, i32 0 + %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0 + ret <1 x i64> %tmp4 +} + +define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) { +; CHECK-LABEL: ins4f2: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d1, r2, r3 +; CHECK-NEXT: vldr d2, [sp] +; CHECK-NEXT: vmov.f32 s5, s2 +; CHECK-NEXT: vmov r0, r1, d2 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x float> %tmp1, i32 2 + %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1 + ret <2 x float> %tmp4 +} + +define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) { +; CHECK-LABEL: ins2f1: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x double> %tmp1, i32 1 + %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0 + ret <1 x double> %tmp4 +} + +define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) { +; CHECK-LABEL: ins8b8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u8 r0, d16[2] +; CHECK-NEXT: vmov d16, r2, r3 +; CHECK-NEXT: vmov.8 d16[4], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i8> %tmp1, i32 2 + %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4 + ret <8 x i8> %tmp4 +} + +define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) { +; CHECK-LABEL: ins4h4: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u16 r0, d16[2] +; CHECK-NEXT: vmov d16, r2, r3 +; CHECK-NEXT: vmov.16 d16[3], r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i16> %tmp1, i32 2 + %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3 + ret <4 x i16> %tmp4 +} + +define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) { +; CHECK-LABEL: ins2s2: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: vmov.32 r1, d16[0] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i32> %tmp1, i32 0 + %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1 + ret <2 x i32> %tmp4 +} + +define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) { +; CHECK-LABEL: ins1d1: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <1 x i64> %tmp1, i32 0 + %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0 + ret <1 x i64> %tmp4 +} + +define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) { +; CHECK-LABEL: ins2f2: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d0, r0, r1 +; CHECK-NEXT: vmov d1, r2, r3 +; CHECK-NEXT: vmov.f32 s3, s0 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x float> %tmp1, i32 0 + %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1 + ret <2 x float> %tmp4 +} + +define <1 x double> @ins1df1(<1 x double> %tmp1, <1 x double> %tmp2) { +; CHECK-LABEL: ins1df1: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %tmp3 = extractelement <1 x double> %tmp1, i32 0 + %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0 + ret <1 x double> %tmp4 +} + +define i32 @umovw16b(<16 x i8> %tmp1) { +; CHECK-LABEL: umovw16b: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u8 r0, d17[0] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <16 x i8> %tmp1, i32 8 + %tmp4 = zext i8 %tmp3 to i32 + ret i32 %tmp4 +} + +define i32 @umovw8h(<8 x i16> %tmp1) { +; CHECK-LABEL: umovw8h: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u16 r0, d16[2] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i16> %tmp1, i32 2 + %tmp4 = zext i16 %tmp3 to i32 + ret i32 %tmp4 +} + +define i32 @umovw4s(<4 x i32> %tmp1) { +; CHECK-LABEL: umovw4s: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov.32 r0, d17[0] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i32> %tmp1, i32 2 + ret i32 %tmp3 +} + +define i64 @umovx2d(<2 x i64> %tmp1) { +; CHECK-LABEL: umovx2d: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i64> %tmp1, i32 1 + ret i64 %tmp3 +} + +define i32 @umovw8b(<8 x i8> %tmp1) { +; CHECK-LABEL: umovw8b: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u8 r0, d16[7] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i8> %tmp1, i32 7 + %tmp4 = zext i8 %tmp3 to i32 + ret i32 %tmp4 +} + +define i32 @umovw4h(<4 x i16> %tmp1) { +; CHECK-LABEL: umovw4h: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u16 r0, d16[2] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i16> %tmp1, i32 2 + %tmp4 = zext i16 %tmp3 to i32 + ret i32 %tmp4 +} + +define i32 @umovw2s(<2 x i32> %tmp1) { +; CHECK-LABEL: umovw2s: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[1] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i32> %tmp1, i32 1 + ret i32 %tmp3 +} + +define i64 @umovx1d(<1 x i64> %tmp1) { +; CHECK-LABEL: umovx1d: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <1 x i64> %tmp1, i32 0 + ret i64 %tmp3 +} + +define i32 @smovw16b(<16 x i8> %tmp1) { +; CHECK-LABEL: smovw16b: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s8 r0, d17[0] +; CHECK-NEXT: add r0, r0, r0 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <16 x i8> %tmp1, i32 8 + %tmp4 = sext i8 %tmp3 to i32 + %tmp5 = add i32 %tmp4, %tmp4 + ret i32 %tmp5 +} + +define i32 @smovw8h(<8 x i16> %tmp1) { +; CHECK-LABEL: smovw8h: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s16 r0, d16[2] +; CHECK-NEXT: add r0, r0, r0 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i16> %tmp1, i32 2 + %tmp4 = sext i16 %tmp3 to i32 + %tmp5 = add i32 %tmp4, %tmp4 + ret i32 %tmp5 +} + +define i64 @smovx16b(<16 x i8> %tmp1) { +; CHECK-LABEL: smovx16b: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov.s8 r0, d17[0] +; CHECK-NEXT: asr r1, r0, #31 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <16 x i8> %tmp1, i32 8 + %tmp4 = sext i8 %tmp3 to i64 + ret i64 %tmp4 +} + +define i64 @smovx8h(<8 x i16> %tmp1) { +; CHECK-LABEL: smovx8h: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s16 r0, d16[2] +; CHECK-NEXT: asr r1, r0, #31 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i16> %tmp1, i32 2 + %tmp4 = sext i16 %tmp3 to i64 + ret i64 %tmp4 +} + +define i64 @smovx4s(<4 x i32> %tmp1) { +; CHECK-LABEL: smovx4s: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: asr r1, r2, #31 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i32> %tmp1, i32 2 + %tmp4 = sext i32 %tmp3 to i64 + ret i64 %tmp4 +} + +define i32 @smovw8b(<8 x i8> %tmp1) { +; CHECK-LABEL: smovw8b: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s8 r0, d16[4] +; CHECK-NEXT: add r0, r0, r0 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i8> %tmp1, i32 4 + %tmp4 = sext i8 %tmp3 to i32 + %tmp5 = add i32 %tmp4, %tmp4 + ret i32 %tmp5 +} + +define i32 @smovw4h(<4 x i16> %tmp1) { +; CHECK-LABEL: smovw4h: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s16 r0, d16[2] +; CHECK-NEXT: add r0, r0, r0 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i16> %tmp1, i32 2 + %tmp4 = sext i16 %tmp3 to i32 + %tmp5 = add i32 %tmp4, %tmp4 + ret i32 %tmp5 +} + +define i32 @smovx8b(<8 x i8> %tmp1) { +; CHECK-LABEL: smovx8b: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s8 r0, d16[6] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <8 x i8> %tmp1, i32 6 + %tmp4 = sext i8 %tmp3 to i32 + ret i32 %tmp4 +} + +define i32 @smovx4h(<4 x i16> %tmp1) { +; CHECK-LABEL: smovx4h: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.s16 r0, d16[2] +; CHECK-NEXT: bx lr + %tmp3 = extractelement <4 x i16> %tmp1, i32 2 + %tmp4 = sext i16 %tmp3 to i32 + ret i32 %tmp4 +} + +define i64 @smovx2s(<2 x i32> %tmp1) { +; CHECK-LABEL: smovx2s: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[1] +; CHECK-NEXT: asr r1, r0, #31 +; CHECK-NEXT: bx lr + %tmp3 = extractelement <2 x i32> %tmp1, i32 1 + %tmp4 = sext i32 %tmp3 to i64 + ret i64 %tmp4 +} + +define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) { +; CHECK-LABEL: test_vcopy_lane_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vldr d18, .LCPI50_0 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vtbl.8 d16, {d16, d17}, d18 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI50_0: +; CHECK-NEXT: .byte 0 @ 0x0 +; CHECK-NEXT: .byte 1 @ 0x1 +; CHECK-NEXT: .byte 2 @ 0x2 +; CHECK-NEXT: .byte 3 @ 0x3 +; CHECK-NEXT: .byte 4 @ 0x4 +; CHECK-NEXT: .byte 11 @ 0xb +; CHECK-NEXT: .byte 6 @ 0x6 +; CHECK-NEXT: .byte 7 @ 0x7 + %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> + ret <8 x i8> %vset_lane +} + +define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) { +; CHECK-LABEL: test_vcopyq_laneq_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r2, r3 +; CHECK-NEXT: vldr d18, .LCPI51_0 +; CHECK-NEXT: vtbl.8 d17, {d16, d17}, d18 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI51_0: +; CHECK-NEXT: .byte 0 @ 0x0 +; CHECK-NEXT: .byte 1 @ 0x1 +; CHECK-NEXT: .byte 2 @ 0x2 +; CHECK-NEXT: .byte 3 @ 0x3 +; CHECK-NEXT: .byte 4 @ 0x4 +; CHECK-NEXT: .byte 5 @ 0x5 +; CHECK-NEXT: .byte 14 @ 0xe +; CHECK-NEXT: .byte 7 @ 0x7 + %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> + ret <16 x i8> %vset_lane +} + +define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) { +; CHECK-LABEL: test_vcopy_lane_swap_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vldr d18, .LCPI52_0 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vtbl.8 d16, {d16, d17}, d18 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI52_0: +; CHECK-NEXT: .byte 8 @ 0x8 +; CHECK-NEXT: .byte 9 @ 0x9 +; CHECK-NEXT: .byte 10 @ 0xa +; CHECK-NEXT: .byte 11 @ 0xb +; CHECK-NEXT: .byte 12 @ 0xc +; CHECK-NEXT: .byte 13 @ 0xd +; CHECK-NEXT: .byte 14 @ 0xe +; CHECK-NEXT: .byte 0 @ 0x0 + %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> + ret <8 x i8> %vset_lane +} + +define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) { +; CHECK-LABEL: test_vcopyq_laneq_swap_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vld1.64 {d2, d3}, [r0] +; CHECK-NEXT: vmov d1, r2, r3 +; CHECK-NEXT: vldr d16, .LCPI53_0 +; CHECK-NEXT: vtbl.8 d2, {d1, d2}, d16 +; CHECK-NEXT: vmov r2, r3, d3 +; CHECK-NEXT: vmov r0, r1, d2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI53_0: +; CHECK-NEXT: .byte 7 @ 0x7 +; CHECK-NEXT: .byte 9 @ 0x9 +; CHECK-NEXT: .byte 10 @ 0xa +; CHECK-NEXT: .byte 11 @ 0xb +; CHECK-NEXT: .byte 12 @ 0xc +; CHECK-NEXT: .byte 13 @ 0xd +; CHECK-NEXT: .byte 14 @ 0xe +; CHECK-NEXT: .byte 15 @ 0xf + %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> + ret <16 x i8> %vset_lane +} + +define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 { +; CHECK-LABEL: test_vdup_n_u8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.8 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <8 x i8> undef, i8 %v1, i32 0 + %vecinit1.i = insertelement <8 x i8> %vecinit.i, i8 %v1, i32 1 + %vecinit2.i = insertelement <8 x i8> %vecinit1.i, i8 %v1, i32 2 + %vecinit3.i = insertelement <8 x i8> %vecinit2.i, i8 %v1, i32 3 + %vecinit4.i = insertelement <8 x i8> %vecinit3.i, i8 %v1, i32 4 + %vecinit5.i = insertelement <8 x i8> %vecinit4.i, i8 %v1, i32 5 + %vecinit6.i = insertelement <8 x i8> %vecinit5.i, i8 %v1, i32 6 + %vecinit7.i = insertelement <8 x i8> %vecinit6.i, i8 %v1, i32 7 + ret <8 x i8> %vecinit7.i +} + +define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 { +; CHECK-LABEL: test_vdup_n_u16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.16 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <4 x i16> undef, i16 %v1, i32 0 + %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %v1, i32 1 + %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %v1, i32 2 + %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %v1, i32 3 + ret <4 x i16> %vecinit3.i +} + +define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 { +; CHECK-LABEL: test_vdup_n_u32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.32 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <2 x i32> undef, i32 %v1, i32 0 + %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %v1, i32 1 + ret <2 x i32> %vecinit1.i +} + +define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 { +; CHECK-LABEL: test_vdup_n_u64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <1 x i64> undef, i64 %v1, i32 0 + ret <1 x i64> %vecinit.i +} + +define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 { +; CHECK-LABEL: test_vdupq_n_u8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.8 q8, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <16 x i8> undef, i8 %v1, i32 0 + %vecinit1.i = insertelement <16 x i8> %vecinit.i, i8 %v1, i32 1 + %vecinit2.i = insertelement <16 x i8> %vecinit1.i, i8 %v1, i32 2 + %vecinit3.i = insertelement <16 x i8> %vecinit2.i, i8 %v1, i32 3 + %vecinit4.i = insertelement <16 x i8> %vecinit3.i, i8 %v1, i32 4 + %vecinit5.i = insertelement <16 x i8> %vecinit4.i, i8 %v1, i32 5 + %vecinit6.i = insertelement <16 x i8> %vecinit5.i, i8 %v1, i32 6 + %vecinit7.i = insertelement <16 x i8> %vecinit6.i, i8 %v1, i32 7 + %vecinit8.i = insertelement <16 x i8> %vecinit7.i, i8 %v1, i32 8 + %vecinit9.i = insertelement <16 x i8> %vecinit8.i, i8 %v1, i32 9 + %vecinit10.i = insertelement <16 x i8> %vecinit9.i, i8 %v1, i32 10 + %vecinit11.i = insertelement <16 x i8> %vecinit10.i, i8 %v1, i32 11 + %vecinit12.i = insertelement <16 x i8> %vecinit11.i, i8 %v1, i32 12 + %vecinit13.i = insertelement <16 x i8> %vecinit12.i, i8 %v1, i32 13 + %vecinit14.i = insertelement <16 x i8> %vecinit13.i, i8 %v1, i32 14 + %vecinit15.i = insertelement <16 x i8> %vecinit14.i, i8 %v1, i32 15 + ret <16 x i8> %vecinit15.i +} + +define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 { +; CHECK-LABEL: test_vdupq_n_u16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.16 q8, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <8 x i16> undef, i16 %v1, i32 0 + %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %v1, i32 1 + %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %v1, i32 2 + %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %v1, i32 3 + %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %v1, i32 4 + %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %v1, i32 5 + %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %v1, i32 6 + %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %v1, i32 7 + ret <8 x i16> %vecinit7.i +} + +define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 { +; CHECK-LABEL: test_vdupq_n_u32: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: mov r3, r0 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <4 x i32> undef, i32 %v1, i32 0 + %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %v1, i32 1 + %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %v1, i32 2 + %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %v1, i32 3 + ret <4 x i32> %vecinit3.i +} + +define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 { +; CHECK-LABEL: test_vdupq_n_u64: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: mov r3, r1 +; CHECK-NEXT: bx lr + %vecinit.i = insertelement <2 x i64> undef, i64 %v1, i32 0 + %vecinit1.i = insertelement <2 x i64> %vecinit.i, i64 %v1, i32 1 + ret <2 x i64> %vecinit1.i +} + +define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 { +; CHECK-LABEL: test_vdup_lane_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.8 d16, d16[5] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <8 x i32> + ret <8 x i8> %shuffle +} + +define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 { +; CHECK-LABEL: test_vdup_lane_s16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.16 d16, d16[2] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <4 x i32> + ret <4 x i16> %shuffle +} + +define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 { +; CHECK-LABEL: test_vdup_lane_s32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 d16, d16[1] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <2 x i32> + ret <2 x i32> %shuffle +} + +define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 { +; CHECK-LABEL: test_vdupq_lane_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.8 q8, d16[5] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <16 x i32> + ret <16 x i8> %shuffle +} + +define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 { +; CHECK-LABEL: test_vdupq_lane_s16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.16 q8, d16[2] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <8 x i32> + ret <8 x i16> %shuffle +} + +define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 { +; CHECK-LABEL: test_vdupq_lane_s32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 q8, d16[1] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <4 x i32> + ret <4 x i32> %shuffle +} + +define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 { +; CHECK-LABEL: test_vdupq_lane_s64: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vorr d17, d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <1 x i64> %v1, <1 x i64> undef, <2 x i32> zeroinitializer + ret <2 x i64> %shuffle +} + +define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 { +; CHECK-LABEL: test_vdup_laneq_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.8 d16, d16[5] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <8 x i32> + ret <8 x i8> %shuffle +} + +define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 { +; CHECK-LABEL: test_vdup_laneq_s16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.16 d16, d16[2] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <4 x i32> + ret <4 x i16> %shuffle +} + +define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 { +; CHECK-LABEL: test_vdup_laneq_s32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 d16, d16[1] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <2 x i32> + ret <2 x i32> %shuffle +} + +define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 { +; CHECK-LABEL: test_vdupq_laneq_s8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.8 q8, d16[5] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> + ret <16 x i8> %shuffle +} + +define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 { +; CHECK-LABEL: test_vdupq_laneq_s16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.16 q8, d16[2] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> + ret <8 x i16> %shuffle +} + +define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 { +; CHECK-LABEL: test_vdupq_laneq_s32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 q8, d16[1] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> + ret <4 x i32> %shuffle +} + +define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 { +; CHECK-LABEL: test_vdupq_laneq_s64: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: mov r3, r1 +; CHECK-NEXT: bx lr + %shuffle = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer + ret <2 x i64> %shuffle +} + +define i64 @test_bitcastv8i8toi64(<8 x i8> %in) { +; CHECK-LABEL: test_bitcastv8i8toi64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast <8 x i8> %in to i64 + ret i64 %res +} + +define i64 @test_bitcastv4i16toi64(<4 x i16> %in) { +; CHECK-LABEL: test_bitcastv4i16toi64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast <4 x i16> %in to i64 + ret i64 %res +} + +define i64 @test_bitcastv2i32toi64(<2 x i32> %in) { +; CHECK-LABEL: test_bitcastv2i32toi64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast <2 x i32> %in to i64 + ret i64 %res +} + +define i64 @test_bitcastv2f32toi64(<2 x float> %in) { +; CHECK-LABEL: test_bitcastv2f32toi64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast <2 x float> %in to i64 + ret i64 %res +} + +define i64 @test_bitcastv1i64toi64(<1 x i64> %in) { +; CHECK-LABEL: test_bitcastv1i64toi64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast <1 x i64> %in to i64 + ret i64 %res +} + +define i64 @test_bitcastv1f64toi64(<1 x double> %in) { +; CHECK-LABEL: test_bitcastv1f64toi64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast <1 x double> %in to i64 + ret i64 %res +} + +define <8 x i8> @test_bitcasti64tov8i8(i64 %in) { +; CHECK-LABEL: test_bitcasti64tov8i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast i64 %in to <8 x i8> + ret <8 x i8> %res +} + +define <4 x i16> @test_bitcasti64tov4i16(i64 %in) { +; CHECK-LABEL: test_bitcasti64tov4i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast i64 %in to <4 x i16> + ret <4 x i16> %res +} + +define <2 x i32> @test_bitcasti64tov2i32(i64 %in) { +; CHECK-LABEL: test_bitcasti64tov2i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast i64 %in to <2 x i32> + ret <2 x i32> %res +} + +define <2 x float> @test_bitcasti64tov2f32(i64 %in) { +; CHECK-LABEL: test_bitcasti64tov2f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast i64 %in to <2 x float> + ret <2 x float> %res +} + +define <1 x i64> @test_bitcasti64tov1i64(i64 %in) { +; CHECK-LABEL: test_bitcasti64tov1i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast i64 %in to <1 x i64> + ret <1 x i64> %res +} + +define <1 x double> @test_bitcasti64tov1f64(i64 %in) { +; CHECK-LABEL: test_bitcasti64tov1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %res = bitcast i64 %in to <1 x double> + ret <1 x double> %res +} + +define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 { +; CHECK-LABEL: test_bitcastv8i8tov1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.s8 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bl __fixdfdi +; CHECK-NEXT: pop {r11, pc} + %sub.i = sub <8 x i8> zeroinitializer, %a + %1 = bitcast <8 x i8> %sub.i to <1 x double> + %vcvt.i = fptosi <1 x double> %1 to <1 x i64> + ret <1 x i64> %vcvt.i +} + +define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 { +; CHECK-LABEL: test_bitcastv4i16tov1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.s16 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bl __fixdfdi +; CHECK-NEXT: pop {r11, pc} + %sub.i = sub <4 x i16> zeroinitializer, %a + %1 = bitcast <4 x i16> %sub.i to <1 x double> + %vcvt.i = fptosi <1 x double> %1 to <1 x i64> + ret <1 x i64> %vcvt.i +} + +define <1 x i64> @test_bitcastv2i32tov1f64(<2 x i32> %a) #0 { +; CHECK-LABEL: test_bitcastv2i32tov1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.s32 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bl __fixdfdi +; CHECK-NEXT: pop {r11, pc} + %sub.i = sub <2 x i32> zeroinitializer, %a + %1 = bitcast <2 x i32> %sub.i to <1 x double> + %vcvt.i = fptosi <1 x double> %1 to <1 x i64> + ret <1 x i64> %vcvt.i +} + +define <1 x i64> @test_bitcastv1i64tov1f64(<1 x i64> %a) #0 { +; CHECK-LABEL: test_bitcastv1i64tov1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov.i32 d16, #0x0 +; CHECK-NEXT: vmov d17, r0, r1 +; CHECK-NEXT: vsub.i64 d16, d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bl __fixdfdi +; CHECK-NEXT: pop {r11, pc} + %sub.i = sub <1 x i64> zeroinitializer, %a + %1 = bitcast <1 x i64> %sub.i to <1 x double> + %vcvt.i = fptosi <1 x double> %1 to <1 x i64> + ret <1 x i64> %vcvt.i +} + +define <1 x i64> @test_bitcastv2f32tov1f64(<2 x float> %a) #0 { +; CHECK-LABEL: test_bitcastv2f32tov1f64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.f32 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bl __fixdfdi +; CHECK-NEXT: pop {r11, pc} + %sub.i = fsub <2 x float> , %a + %1 = bitcast <2 x float> %sub.i to <1 x double> + %vcvt.i = fptosi <1 x double> %1 to <1 x i64> + ret <1 x i64> %vcvt.i +} + +define <8 x i8> @test_bitcastv1f64tov8i8(<1 x i64> %a) #0 { +; CHECK-LABEL: test_bitcastv1f64tov8i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bl __floatdidf +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.s8 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: pop {r11, pc} + %vcvt.i = sitofp <1 x i64> %a to <1 x double> + %1 = bitcast <1 x double> %vcvt.i to <8 x i8> + %sub.i = sub <8 x i8> zeroinitializer, %1 + ret <8 x i8> %sub.i +} + +define <4 x i16> @test_bitcastv1f64tov4i16(<1 x i64> %a) #0 { +; CHECK-LABEL: test_bitcastv1f64tov4i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bl __floatdidf +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.s16 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: pop {r11, pc} + %vcvt.i = sitofp <1 x i64> %a to <1 x double> + %1 = bitcast <1 x double> %vcvt.i to <4 x i16> + %sub.i = sub <4 x i16> zeroinitializer, %1 + ret <4 x i16> %sub.i +} + +define <2 x i32> @test_bitcastv1f64tov2i32(<1 x i64> %a) #0 { +; CHECK-LABEL: test_bitcastv1f64tov2i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bl __floatdidf +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.s32 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: pop {r11, pc} + %vcvt.i = sitofp <1 x i64> %a to <1 x double> + %1 = bitcast <1 x double> %vcvt.i to <2 x i32> + %sub.i = sub <2 x i32> zeroinitializer, %1 + ret <2 x i32> %sub.i +} + +define <1 x i64> @test_bitcastv1f64tov1i64(<1 x i64> %a) #0 { +; CHECK-LABEL: test_bitcastv1f64tov1i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bl __floatdidf +; CHECK-NEXT: vmov.i32 d16, #0x0 +; CHECK-NEXT: vmov d17, r0, r1 +; CHECK-NEXT: vsub.i64 d16, d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: pop {r11, pc} + %vcvt.i = sitofp <1 x i64> %a to <1 x double> + %1 = bitcast <1 x double> %vcvt.i to <1 x i64> + %sub.i = sub <1 x i64> zeroinitializer, %1 + ret <1 x i64> %sub.i +} + +define <2 x float> @test_bitcastv1f64tov2f32(<1 x i64> %a) #0 { +; CHECK-LABEL: test_bitcastv1f64tov2f32: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.32 r1, d16[1] +; CHECK-NEXT: bl __floatdidf +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vneg.f32 d16, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: pop {r11, pc} + %vcvt.i = sitofp <1 x i64> %a to <1 x double> + %1 = bitcast <1 x double> %vcvt.i to <2 x float> + %sub.i = fsub <2 x float> , %1 + ret <2 x float> %sub.i +} + +; Test insert element into an undef vector +define <8 x i8> @scalar_to_vector.v8i8(i8 %a) { + %b = insertelement <8 x i8> undef, i8 %a, i32 0 + ret <8 x i8> %b +} + +define <16 x i8> @scalar_to_vector.v16i8(i8 %a) { + %b = insertelement <16 x i8> undef, i8 %a, i32 0 + ret <16 x i8> %b +} + +define <4 x i16> @scalar_to_vector.v4i16(i16 %a) { + %b = insertelement <4 x i16> undef, i16 %a, i32 0 + ret <4 x i16> %b +} + +define <8 x i16> @scalar_to_vector.v8i16(i16 %a) { + %b = insertelement <8 x i16> undef, i16 %a, i32 0 + ret <8 x i16> %b +} + +define <2 x i32> @scalar_to_vector.v2i32(i32 %a) { + %b = insertelement <2 x i32> undef, i32 %a, i32 0 + ret <2 x i32> %b +} + +define <4 x i32> @scalar_to_vector.v4i32(i32 %a) { + %b = insertelement <4 x i32> undef, i32 %a, i32 0 + ret <4 x i32> %b +} + +define <2 x i64> @scalar_to_vector.v2i64(i64 %a) { + %b = insertelement <2 x i64> undef, i64 %a, i32 0 + ret <2 x i64> %b +} + +define <8 x i8> @testDUP.v1i8(<1 x i8> %a) { + %b = extractelement <1 x i8> %a, i32 0 + %c = insertelement <8 x i8> undef, i8 %b, i32 0 + %d = insertelement <8 x i8> %c, i8 %b, i32 1 + %e = insertelement <8 x i8> %d, i8 %b, i32 2 + %f = insertelement <8 x i8> %e, i8 %b, i32 3 + %g = insertelement <8 x i8> %f, i8 %b, i32 4 + %h = insertelement <8 x i8> %g, i8 %b, i32 5 + %i = insertelement <8 x i8> %h, i8 %b, i32 6 + %j = insertelement <8 x i8> %i, i8 %b, i32 7 + ret <8 x i8> %j +} + +define <8 x i16> @testDUP.v1i16(<1 x i16> %a) { + %b = extractelement <1 x i16> %a, i32 0 + %c = insertelement <8 x i16> undef, i16 %b, i32 0 + %d = insertelement <8 x i16> %c, i16 %b, i32 1 + %e = insertelement <8 x i16> %d, i16 %b, i32 2 + %f = insertelement <8 x i16> %e, i16 %b, i32 3 + %g = insertelement <8 x i16> %f, i16 %b, i32 4 + %h = insertelement <8 x i16> %g, i16 %b, i32 5 + %i = insertelement <8 x i16> %h, i16 %b, i32 6 + %j = insertelement <8 x i16> %i, i16 %b, i32 7 + ret <8 x i16> %j +} + +define <4 x i32> @testDUP.v1i32(<1 x i32> %a) { + %b = extractelement <1 x i32> %a, i32 0 + %c = insertelement <4 x i32> undef, i32 %b, i32 0 + %d = insertelement <4 x i32> %c, i32 %b, i32 1 + %e = insertelement <4 x i32> %d, i32 %b, i32 2 + %f = insertelement <4 x i32> %e, i32 %b, i32 3 + ret <4 x i32> %f +} + +define <8 x i8> @getl(<16 x i8> %x) #0 { +; CHECK-LABEL: getl: +; CHECK: @ %bb.0: +; CHECK-NEXT: bx lr + %vecext = extractelement <16 x i8> %x, i32 0 + %vecinit = insertelement <8 x i8> undef, i8 %vecext, i32 0 + %vecext1 = extractelement <16 x i8> %x, i32 1 + %vecinit2 = insertelement <8 x i8> %vecinit, i8 %vecext1, i32 1 + %vecext3 = extractelement <16 x i8> %x, i32 2 + %vecinit4 = insertelement <8 x i8> %vecinit2, i8 %vecext3, i32 2 + %vecext5 = extractelement <16 x i8> %x, i32 3 + %vecinit6 = insertelement <8 x i8> %vecinit4, i8 %vecext5, i32 3 + %vecext7 = extractelement <16 x i8> %x, i32 4 + %vecinit8 = insertelement <8 x i8> %vecinit6, i8 %vecext7, i32 4 + %vecext9 = extractelement <16 x i8> %x, i32 5 + %vecinit10 = insertelement <8 x i8> %vecinit8, i8 %vecext9, i32 5 + %vecext11 = extractelement <16 x i8> %x, i32 6 + %vecinit12 = insertelement <8 x i8> %vecinit10, i8 %vecext11, i32 6 + %vecext13 = extractelement <16 x i8> %x, i32 7 + %vecinit14 = insertelement <8 x i8> %vecinit12, i8 %vecext13, i32 7 + ret <8 x i8> %vecinit14 +} + +define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) { +; CHECK-LABEL: test_extracts_inserts_varidx_extract: +; CHECK: @ %bb.0: +; CHECK-NEXT: push {r11} +; CHECK-NEXT: mov r11, sp +; CHECK-NEXT: sub sp, sp, #28 +; CHECK-NEXT: bfc sp, #0, #4 +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: ldr r3, [r11, #4] +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: and r3, r3, #7 +; CHECK-NEXT: vmov.u16 r0, d16[1] +; CHECK-NEXT: vmov.u16 r1, d16[2] +; CHECK-NEXT: lsl r3, r3, #1 +; CHECK-NEXT: vmov.u16 r12, d16[3] +; CHECK-NEXT: vst1.64 {d16, d17}, [r2:128], r3 +; CHECK-NEXT: vld1.16 {d16[0]}, [r2:16] +; CHECK-NEXT: vmov.16 d16[1], r0 +; CHECK-NEXT: vmov.16 d16[2], r1 +; CHECK-NEXT: vmov.16 d16[3], r12 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: mov sp, r11 +; CHECK-NEXT: pop {r11} +; CHECK-NEXT: bx lr + %tmp = extractelement <8 x i16> %x, i32 %idx + %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 0 + %tmp3 = extractelement <8 x i16> %x, i32 1 + %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 1 + %tmp5 = extractelement <8 x i16> %x, i32 2 + %tmp6 = insertelement <4 x i16> %tmp4, i16 %tmp5, i32 2 + %tmp7 = extractelement <8 x i16> %x, i32 3 + %tmp8 = insertelement <4 x i16> %tmp6, i16 %tmp7, i32 3 + ret <4 x i16> %tmp8 +} + +define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) { +; CHECK-LABEL: test_extracts_inserts_varidx_insert: +; CHECK: @ %bb.0: +; CHECK-NEXT: sub sp, sp, #8 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: ldr r3, [sp, #8] +; CHECK-NEXT: mov r2, sp +; CHECK-NEXT: vmov.u16 r0, d16[1] +; CHECK-NEXT: and r3, r3, #3 +; CHECK-NEXT: vmov.u16 r1, d16[2] +; CHECK-NEXT: vmov.u16 r12, d16[3] +; CHECK-NEXT: orr r2, r2, r3, lsl #1 +; CHECK-NEXT: vst1.16 {d16[0]}, [r2:16] +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vmov.16 d16[1], r0 +; CHECK-NEXT: vmov.16 d16[2], r1 +; CHECK-NEXT: vmov.16 d16[3], r12 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: add sp, sp, #8 +; CHECK-NEXT: bx lr + %tmp = extractelement <8 x i16> %x, i32 0 + %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 %idx + %tmp3 = extractelement <8 x i16> %x, i32 1 + %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 1 + %tmp5 = extractelement <8 x i16> %x, i32 2 + %tmp6 = insertelement <4 x i16> %tmp4, i16 %tmp5, i32 2 + %tmp7 = extractelement <8 x i16> %x, i32 3 + %tmp8 = insertelement <4 x i16> %tmp6, i16 %tmp7, i32 3 + ret <4 x i16> %tmp8 +} + +define <4 x i16> @test_dup_v2i32_v4i16(<2 x i32> %a) { +; CHECK-LABEL: test_dup_v2i32_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[1] +; CHECK-NEXT: vmov.16 d16[1], r0 +; CHECK-NEXT: vdup.16 d16, d16[1] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <2 x i32> %a, i32 1 + %vget_lane = trunc i32 %x to i16 + %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0 + %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1 + %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2 + %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3 + ret <4 x i16> %vecinit3.i +} + +define <8 x i16> @test_dup_v4i32_v8i16(<4 x i32> %a) { +; CHECK-LABEL: test_dup_v4i32_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 q8, r3 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <4 x i32> %a, i32 3 + %vget_lane = trunc i32 %x to i16 + %vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0 + %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1 + %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %vget_lane, i32 2 + %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %vget_lane, i32 3 + %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %vget_lane, i32 4 + %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %vget_lane, i32 5 + %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %vget_lane, i32 6 + %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %vget_lane, i32 7 + ret <8 x i16> %vecinit7.i +} + +define <4 x i16> @test_dup_v1i64_v4i16(<1 x i64> %a) { +; CHECK-LABEL: test_dup_v1i64_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.32 r0, d16[0] +; CHECK-NEXT: vmov.16 d16[0], r0 +; CHECK-NEXT: vdup.16 d16, d16[0] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <1 x i64> %a, i32 0 + %vget_lane = trunc i64 %x to i16 + %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0 + %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1 + %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2 + %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3 + ret <4 x i16> %vecinit3.i +} + +define <2 x i32> @test_dup_v1i64_v2i32(<1 x i64> %a) { +; CHECK-LABEL: test_dup_v1i64_v2i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 d16, d16[0] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <1 x i64> %a, i32 0 + %vget_lane = trunc i64 %x to i32 + %vecinit.i = insertelement <2 x i32> undef, i32 %vget_lane, i32 0 + %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %vget_lane, i32 1 + ret <2 x i32> %vecinit1.i +} + +define <8 x i16> @test_dup_v2i64_v8i16(<2 x i64> %a) { +; CHECK-LABEL: test_dup_v2i64_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 q8, r2 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <2 x i64> %a, i32 1 + %vget_lane = trunc i64 %x to i16 + %vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0 + %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1 + %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %vget_lane, i32 2 + %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %vget_lane, i32 3 + %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %vget_lane, i32 4 + %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %vget_lane, i32 5 + %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %vget_lane, i32 6 + %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %vget_lane, i32 7 + ret <8 x i16> %vecinit7.i +} + +define <4 x i32> @test_dup_v2i64_v4i32(<2 x i64> %a) { +; CHECK-LABEL: test_dup_v2i64_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r2 +; CHECK-NEXT: mov r3, r2 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <2 x i64> %a, i32 1 + %vget_lane = trunc i64 %x to i32 + %vecinit.i = insertelement <4 x i32> undef, i32 %vget_lane, i32 0 + %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %vget_lane, i32 1 + %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %vget_lane, i32 2 + %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %vget_lane, i32 3 + ret <4 x i32> %vecinit3.i +} + +define <4 x i16> @test_dup_v4i32_v4i16(<4 x i32> %a) { +; CHECK-LABEL: test_dup_v4i32_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 d16, r1 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <4 x i32> %a, i32 1 + %vget_lane = trunc i32 %x to i16 + %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0 + %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1 + %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2 + %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3 + ret <4 x i16> %vecinit3.i +} + +define <4 x i16> @test_dup_v2i64_v4i16(<2 x i64> %a) { +; CHECK-LABEL: test_dup_v2i64_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.16 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <2 x i64> %a, i32 0 + %vget_lane = trunc i64 %x to i16 + %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0 + %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1 + %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %vget_lane, i32 2 + %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %vget_lane, i32 3 + ret <4 x i16> %vecinit3.i +} + +define <2 x i32> @test_dup_v2i64_v2i32(<2 x i64> %a) { +; CHECK-LABEL: test_dup_v2i64_v2i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vdup.32 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %x = extractelement <2 x i64> %a, i32 0 + %vget_lane = trunc i64 %x to i32 + %vecinit.i = insertelement <2 x i32> undef, i32 %vget_lane, i32 0 + %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %vget_lane, i32 1 + ret <2 x i32> %vecinit1.i +} + +define <2 x i32> @test_concat_undef_v1i32(<2 x i32> %a) { +; CHECK-LABEL: test_concat_undef_v1i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 d16, d16[0] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <2 x i32> %a, i32 0 + %vecinit1.i = insertelement <2 x i32> undef, i32 %0, i32 1 + ret <2 x i32> %vecinit1.i +} + +define <2 x i32> @test_concat_same_v1i32_v1i32(<2 x i32> %a) { +; CHECK-LABEL: test_concat_same_v1i32_v1i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vdup.32 d16, d16[0] +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %0 = extractelement <2 x i32> %a, i32 0 + %vecinit.i = insertelement <2 x i32> undef, i32 %0, i32 0 + %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %0, i32 1 + ret <2 x i32> %vecinit1.i +} + + +define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 { +; CHECK-LABEL: test_concat_v16i8_v16i8_v16i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecinit30 = shufflevector <16 x i8> %x, <16 x i8> %y, <16 x i32> + ret <16 x i8> %vecinit30 +} + +define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 { +; CHECK-LABEL: test_concat_v16i8_v8i8_v16i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <8 x i8> %x, i32 0 + %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0 + %vecext1 = extractelement <8 x i8> %x, i32 1 + %vecinit2 = insertelement <16 x i8> %vecinit, i8 %vecext1, i32 1 + %vecext3 = extractelement <8 x i8> %x, i32 2 + %vecinit4 = insertelement <16 x i8> %vecinit2, i8 %vecext3, i32 2 + %vecext5 = extractelement <8 x i8> %x, i32 3 + %vecinit6 = insertelement <16 x i8> %vecinit4, i8 %vecext5, i32 3 + %vecext7 = extractelement <8 x i8> %x, i32 4 + %vecinit8 = insertelement <16 x i8> %vecinit6, i8 %vecext7, i32 4 + %vecext9 = extractelement <8 x i8> %x, i32 5 + %vecinit10 = insertelement <16 x i8> %vecinit8, i8 %vecext9, i32 5 + %vecext11 = extractelement <8 x i8> %x, i32 6 + %vecinit12 = insertelement <16 x i8> %vecinit10, i8 %vecext11, i32 6 + %vecext13 = extractelement <8 x i8> %x, i32 7 + %vecinit14 = insertelement <16 x i8> %vecinit12, i8 %vecext13, i32 7 + %vecinit30 = shufflevector <16 x i8> %vecinit14, <16 x i8> %y, <16 x i32> + ret <16 x i8> %vecinit30 +} + +define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 { +; CHECK-LABEL: test_concat_v16i8_v16i8_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <16 x i8> %x, i32 0 + %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0 + %vecext1 = extractelement <16 x i8> %x, i32 1 + %vecinit2 = insertelement <16 x i8> %vecinit, i8 %vecext1, i32 1 + %vecext3 = extractelement <16 x i8> %x, i32 2 + %vecinit4 = insertelement <16 x i8> %vecinit2, i8 %vecext3, i32 2 + %vecext5 = extractelement <16 x i8> %x, i32 3 + %vecinit6 = insertelement <16 x i8> %vecinit4, i8 %vecext5, i32 3 + %vecext7 = extractelement <16 x i8> %x, i32 4 + %vecinit8 = insertelement <16 x i8> %vecinit6, i8 %vecext7, i32 4 + %vecext9 = extractelement <16 x i8> %x, i32 5 + %vecinit10 = insertelement <16 x i8> %vecinit8, i8 %vecext9, i32 5 + %vecext11 = extractelement <16 x i8> %x, i32 6 + %vecinit12 = insertelement <16 x i8> %vecinit10, i8 %vecext11, i32 6 + %vecext13 = extractelement <16 x i8> %x, i32 7 + %vecinit14 = insertelement <16 x i8> %vecinit12, i8 %vecext13, i32 7 + %vecext15 = extractelement <8 x i8> %y, i32 0 + %vecinit16 = insertelement <16 x i8> %vecinit14, i8 %vecext15, i32 8 + %vecext17 = extractelement <8 x i8> %y, i32 1 + %vecinit18 = insertelement <16 x i8> %vecinit16, i8 %vecext17, i32 9 + %vecext19 = extractelement <8 x i8> %y, i32 2 + %vecinit20 = insertelement <16 x i8> %vecinit18, i8 %vecext19, i32 10 + %vecext21 = extractelement <8 x i8> %y, i32 3 + %vecinit22 = insertelement <16 x i8> %vecinit20, i8 %vecext21, i32 11 + %vecext23 = extractelement <8 x i8> %y, i32 4 + %vecinit24 = insertelement <16 x i8> %vecinit22, i8 %vecext23, i32 12 + %vecext25 = extractelement <8 x i8> %y, i32 5 + %vecinit26 = insertelement <16 x i8> %vecinit24, i8 %vecext25, i32 13 + %vecext27 = extractelement <8 x i8> %y, i32 6 + %vecinit28 = insertelement <16 x i8> %vecinit26, i8 %vecext27, i32 14 + %vecext29 = extractelement <8 x i8> %y, i32 7 + %vecinit30 = insertelement <16 x i8> %vecinit28, i8 %vecext29, i32 15 + ret <16 x i8> %vecinit30 +} + +define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 { +; CHECK-LABEL: test_concat_v16i8_v8i8_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <8 x i8> %x, i32 0 + %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0 + %vecext1 = extractelement <8 x i8> %x, i32 1 + %vecinit2 = insertelement <16 x i8> %vecinit, i8 %vecext1, i32 1 + %vecext3 = extractelement <8 x i8> %x, i32 2 + %vecinit4 = insertelement <16 x i8> %vecinit2, i8 %vecext3, i32 2 + %vecext5 = extractelement <8 x i8> %x, i32 3 + %vecinit6 = insertelement <16 x i8> %vecinit4, i8 %vecext5, i32 3 + %vecext7 = extractelement <8 x i8> %x, i32 4 + %vecinit8 = insertelement <16 x i8> %vecinit6, i8 %vecext7, i32 4 + %vecext9 = extractelement <8 x i8> %x, i32 5 + %vecinit10 = insertelement <16 x i8> %vecinit8, i8 %vecext9, i32 5 + %vecext11 = extractelement <8 x i8> %x, i32 6 + %vecinit12 = insertelement <16 x i8> %vecinit10, i8 %vecext11, i32 6 + %vecext13 = extractelement <8 x i8> %x, i32 7 + %vecinit14 = insertelement <16 x i8> %vecinit12, i8 %vecext13, i32 7 + %vecext15 = extractelement <8 x i8> %y, i32 0 + %vecinit16 = insertelement <16 x i8> %vecinit14, i8 %vecext15, i32 8 + %vecext17 = extractelement <8 x i8> %y, i32 1 + %vecinit18 = insertelement <16 x i8> %vecinit16, i8 %vecext17, i32 9 + %vecext19 = extractelement <8 x i8> %y, i32 2 + %vecinit20 = insertelement <16 x i8> %vecinit18, i8 %vecext19, i32 10 + %vecext21 = extractelement <8 x i8> %y, i32 3 + %vecinit22 = insertelement <16 x i8> %vecinit20, i8 %vecext21, i32 11 + %vecext23 = extractelement <8 x i8> %y, i32 4 + %vecinit24 = insertelement <16 x i8> %vecinit22, i8 %vecext23, i32 12 + %vecext25 = extractelement <8 x i8> %y, i32 5 + %vecinit26 = insertelement <16 x i8> %vecinit24, i8 %vecext25, i32 13 + %vecext27 = extractelement <8 x i8> %y, i32 6 + %vecinit28 = insertelement <16 x i8> %vecinit26, i8 %vecext27, i32 14 + %vecext29 = extractelement <8 x i8> %y, i32 7 + %vecinit30 = insertelement <16 x i8> %vecinit28, i8 %vecext29, i32 15 + ret <16 x i8> %vecinit30 +} + +define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 { +; CHECK-LABEL: test_concat_v8i16_v8i16_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecinit14 = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> + ret <8 x i16> %vecinit14 +} + +define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 { +; CHECK-LABEL: test_concat_v8i16_v4i16_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <4 x i16> %x, i32 0 + %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0 + %vecext1 = extractelement <4 x i16> %x, i32 1 + %vecinit2 = insertelement <8 x i16> %vecinit, i16 %vecext1, i32 1 + %vecext3 = extractelement <4 x i16> %x, i32 2 + %vecinit4 = insertelement <8 x i16> %vecinit2, i16 %vecext3, i32 2 + %vecext5 = extractelement <4 x i16> %x, i32 3 + %vecinit6 = insertelement <8 x i16> %vecinit4, i16 %vecext5, i32 3 + %vecinit14 = shufflevector <8 x i16> %vecinit6, <8 x i16> %y, <8 x i32> + ret <8 x i16> %vecinit14 +} + +define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 { +; CHECK-LABEL: test_concat_v8i16_v8i16_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <8 x i16> %x, i32 0 + %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0 + %vecext1 = extractelement <8 x i16> %x, i32 1 + %vecinit2 = insertelement <8 x i16> %vecinit, i16 %vecext1, i32 1 + %vecext3 = extractelement <8 x i16> %x, i32 2 + %vecinit4 = insertelement <8 x i16> %vecinit2, i16 %vecext3, i32 2 + %vecext5 = extractelement <8 x i16> %x, i32 3 + %vecinit6 = insertelement <8 x i16> %vecinit4, i16 %vecext5, i32 3 + %vecext7 = extractelement <4 x i16> %y, i32 0 + %vecinit8 = insertelement <8 x i16> %vecinit6, i16 %vecext7, i32 4 + %vecext9 = extractelement <4 x i16> %y, i32 1 + %vecinit10 = insertelement <8 x i16> %vecinit8, i16 %vecext9, i32 5 + %vecext11 = extractelement <4 x i16> %y, i32 2 + %vecinit12 = insertelement <8 x i16> %vecinit10, i16 %vecext11, i32 6 + %vecext13 = extractelement <4 x i16> %y, i32 3 + %vecinit14 = insertelement <8 x i16> %vecinit12, i16 %vecext13, i32 7 + ret <8 x i16> %vecinit14 +} + +define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 { +; CHECK-LABEL: test_concat_v8i16_v4i16_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <4 x i16> %x, i32 0 + %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0 + %vecext1 = extractelement <4 x i16> %x, i32 1 + %vecinit2 = insertelement <8 x i16> %vecinit, i16 %vecext1, i32 1 + %vecext3 = extractelement <4 x i16> %x, i32 2 + %vecinit4 = insertelement <8 x i16> %vecinit2, i16 %vecext3, i32 2 + %vecext5 = extractelement <4 x i16> %x, i32 3 + %vecinit6 = insertelement <8 x i16> %vecinit4, i16 %vecext5, i32 3 + %vecext7 = extractelement <4 x i16> %y, i32 0 + %vecinit8 = insertelement <8 x i16> %vecinit6, i16 %vecext7, i32 4 + %vecext9 = extractelement <4 x i16> %y, i32 1 + %vecinit10 = insertelement <8 x i16> %vecinit8, i16 %vecext9, i32 5 + %vecext11 = extractelement <4 x i16> %y, i32 2 + %vecinit12 = insertelement <8 x i16> %vecinit10, i16 %vecext11, i32 6 + %vecext13 = extractelement <4 x i16> %y, i32 3 + %vecinit14 = insertelement <8 x i16> %vecinit12, i16 %vecext13, i32 7 + ret <8 x i16> %vecinit14 +} + +define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 { +; CHECK-LABEL: test_concat_v4i32_v4i32_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vext.32 q8, q8, q8, #2 +; CHECK-NEXT: vext.32 q8, q8, q9, #2 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr +entry: + %vecinit6 = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> + ret <4 x i32> %vecinit6 +} + +define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 { +; CHECK-LABEL: test_concat_v4i32_v2i32_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov d18, r2, r3 +; CHECK-NEXT: vext.32 q8, q8, q8, #2 +; CHECK-NEXT: vext.32 q8, q8, q9, #2 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <2 x i32> %x, i32 0 + %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 + %vecext1 = extractelement <2 x i32> %x, i32 1 + %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1 + %vecinit6 = shufflevector <4 x i32> %vecinit2, <4 x i32> %y, <4 x i32> + ret <4 x i32> %vecinit6 +} + +define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 { +; CHECK-LABEL: test_concat_v4i32_v4i32_v2i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vmov.32 r2, d16[0] +; CHECK-NEXT: vmov.32 r3, d16[1] +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <4 x i32> %x, i32 0 + %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 + %vecext1 = extractelement <4 x i32> %x, i32 1 + %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1 + %vecext3 = extractelement <2 x i32> %y, i32 0 + %vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext3, i32 2 + %vecext5 = extractelement <2 x i32> %y, i32 1 + %vecinit6 = insertelement <4 x i32> %vecinit4, i32 %vecext5, i32 3 + ret <4 x i32> %vecinit6 +} + +define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 { +; CHECK-LABEL: test_concat_v4i32_v2i32_v2i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> + ret <4 x i32> %vecinit6 +} + +define <2 x i64> @test_concat_v2i64_v2i64_v2i64(<2 x i64> %x, <2 x i64> %y) #0 { +; CHECK-LABEL: test_concat_v2i64_v2i64_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d16, [sp] +; CHECK-NEXT: vmov r2, r3, d16 +; CHECK-NEXT: bx lr +entry: + %vecinit2 = shufflevector <2 x i64> %x, <2 x i64> %y, <2 x i32> + ret <2 x i64> %vecinit2 +} + +define <2 x i64> @test_concat_v2i64_v1i64_v2i64(<1 x i64> %x, <2 x i64> %y) #0 { +; CHECK-LABEL: test_concat_v2i64_v1i64_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <1 x i64> %x, i32 0 + %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0 + %vecinit2 = shufflevector <2 x i64> %vecinit, <2 x i64> %y, <2 x i32> + ret <2 x i64> %vecinit2 +} + +define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 { +; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr d17, [sp] +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <2 x i64> %x, i32 0 + %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0 + %vecext1 = extractelement <1 x i64> %y, i32 0 + %vecinit2 = insertelement <2 x i64> %vecinit, i64 %vecext1, i32 1 + ret <2 x i64> %vecinit2 +} + +define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 { +; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr +entry: + %vecext = extractelement <1 x i64> %x, i32 0 + %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0 + %vecext1 = extractelement <1 x i64> %y, i32 0 + %vecinit2 = insertelement <2 x i64> %vecinit, i64 %vecext1, i32 1 + ret <2 x i64> %vecinit2 +} + + +define <4 x i16> @concat_vector_v4i16_const() { +; CHECK-LABEL: concat_vector_v4i16_const: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i32 d16, #0x0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <4 x i32> zeroinitializer + ret <4 x i16> %r +} + +define <4 x i16> @concat_vector_v4i16_const_one() { +; CHECK-LABEL: concat_vector_v4i16_const_one: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i16 d16, #0x1 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i16> , <1 x i16> undef, <4 x i32> zeroinitializer + ret <4 x i16> %r +} + +define <4 x i32> @concat_vector_v4i32_const() { +; CHECK-LABEL: concat_vector_v4i32_const: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: mov r1, #0 +; CHECK-NEXT: mov r2, #0 +; CHECK-NEXT: mov r3, #0 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i32> zeroinitializer, <1 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %r +} + +define <8 x i8> @concat_vector_v8i8_const() { +; CHECK-LABEL: concat_vector_v8i8_const: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i32 d16, #0x0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <8 x i32> zeroinitializer + ret <8 x i8> %r +} + +define <8 x i16> @concat_vector_v8i16_const() { +; CHECK-LABEL: concat_vector_v8i16_const: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i32 q8, #0x0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <8 x i32> zeroinitializer + ret <8 x i16> %r +} + +define <8 x i16> @concat_vector_v8i16_const_one() { +; CHECK-LABEL: concat_vector_v8i16_const_one: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i16 q8, #0x1 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i16> , <1 x i16> undef, <8 x i32> zeroinitializer + ret <8 x i16> %r +} + +define <16 x i8> @concat_vector_v16i8_const() { +; CHECK-LABEL: concat_vector_v16i8_const: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.i32 q8, #0x0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <16 x i32> zeroinitializer + ret <16 x i8> %r +} + +define <4 x i16> @concat_vector_v4i16(<1 x i16> %a) { +; CHECK-LABEL: concat_vector_v4i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.16 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i16> %a, <1 x i16> undef, <4 x i32> zeroinitializer + ret <4 x i16> %r +} + +define <4 x i32> @concat_vector_v4i32(<1 x i32> %a) { +; CHECK-LABEL: concat_vector_v4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: mov r2, r0 +; CHECK-NEXT: mov r3, r0 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i32> %a, <1 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %r +} + +define <8 x i8> @concat_vector_v8i8(<1 x i8> %a) { +; CHECK-LABEL: concat_vector_v8i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.8 d16, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i8> %a, <1 x i8> undef, <8 x i32> zeroinitializer + ret <8 x i8> %r +} + +define <8 x i16> @concat_vector_v8i16(<1 x i16> %a) { +; CHECK-LABEL: concat_vector_v8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.16 q8, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i16> %a, <1 x i16> undef, <8 x i32> zeroinitializer + ret <8 x i16> %r +} + +define <16 x i8> @concat_vector_v16i8(<1 x i8> %a) { +; CHECK-LABEL: concat_vector_v16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vdup.8 q8, r0 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: bx lr + %r = shufflevector <1 x i8> %a, <1 x i8> undef, <16 x i32> zeroinitializer + ret <16 x i8> %r +}