diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst index dda367607d043..33b0152bd7b49 100644 --- a/llvm/docs/GlobalISel/GenericOpcode.rst +++ b/llvm/docs/GlobalISel/GenericOpcode.rst @@ -639,11 +639,6 @@ Concatenate two vectors and shuffle the elements according to the mask operand. The mask operand should be an IR Constant which exactly matches the corresponding mask for the IR shufflevector instruction. -G_SPLAT_VECTOR -^^^^^^^^^^^^^^^^ - -Create a vector where all elements are the scalar from the source operand. - Vector Reduction Operations --------------------------- diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index 6762b1b360d5e..1387a0a37561c 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -1063,7 +1063,8 @@ class MachineIRBuilder { /// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill /// the number of elements - MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src); + MachineInstrBuilder buildSplatVector(const DstOp &Res, + const SrcOp &Src); /// Build and insert \p Res = G_BUILD_VECTOR_TRUNC \p Op0, ... /// @@ -1098,15 +1099,6 @@ class MachineIRBuilder { MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef Mask); - /// Build and insert \p Res = G_SPLAT_VECTOR \p Val - /// - /// \pre setBasicBlock or setMI must have been called. - /// \pre \p Res must be a generic virtual register with vector type. - /// \pre \p Val must be a generic virtual register with scalar type. - /// - /// \return a MachineInstrBuilder for the newly created instruction. - MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val); - /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ... /// /// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def index 94fba491148b2..6aded2ceebe13 100644 --- a/llvm/include/llvm/Support/TargetOpcodes.def +++ b/llvm/include/llvm/Support/TargetOpcodes.def @@ -736,9 +736,6 @@ HANDLE_TARGET_OPCODE(G_EXTRACT_VECTOR_ELT) /// Generic shufflevector. HANDLE_TARGET_OPCODE(G_SHUFFLE_VECTOR) -/// Generic splatvector. -HANDLE_TARGET_OPCODE(G_SPLAT_VECTOR) - /// Generic count trailing zeroes. HANDLE_TARGET_OPCODE(G_CTTZ) diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td index d967885aa2d75..d2036e478d18f 100644 --- a/llvm/include/llvm/Target/GenericOpcodes.td +++ b/llvm/include/llvm/Target/GenericOpcodes.td @@ -1450,13 +1450,6 @@ def G_SHUFFLE_VECTOR: GenericInstruction { let hasSideEffects = false; } -// Generic splatvector. -def G_SPLAT_VECTOR: GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type1:$val); - let hasSideEffects = false; -} - //------------------------------------------------------------------------------ // Vector reductions //------------------------------------------------------------------------------ diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp index 1869e0d41a51f..64e2d517e3b9c 100644 --- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp @@ -309,7 +309,7 @@ MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res, // For vectors, CSE the element only for now. LLT Ty = Res.getLLTTy(*getMRI()); if (Ty.isVector()) - return buildSplatBuildVector(Res, buildConstant(Ty.getElementType(), Val)); + return buildSplatVector(Res, buildConstant(Ty.getElementType(), Val)); FoldingSetNodeID ID; GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); @@ -336,7 +336,7 @@ MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res, // For vectors, CSE the element only for now. LLT Ty = Res.getLLTTy(*getMRI()); if (Ty.isVector()) - return buildSplatBuildVector(Res, buildFConstant(Ty.getElementType(), Val)); + return buildSplatVector(Res, buildFConstant(Ty.getElementType(), Val)); FoldingSetNodeID ID; GISelInstProfileBuilder ProfBuilder(ID, *getMRI()); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 365870f540dae..7c986dbbc2c7c 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1598,10 +1598,10 @@ bool IRTranslator::translateGetElementPtr(const User &U, // We might need to splat the base pointer into a vector if the offsets // are vectors. if (WantSplatVector && !PtrTy.isVector()) { - BaseReg = MIRBuilder - .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy), - BaseReg) - .getReg(0); + BaseReg = + MIRBuilder + .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg) + .getReg(0); PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); PtrTy = getLLTForType(*PtrIRTy, *DL); OffsetIRTy = DL->getIndexType(PtrIRTy); @@ -1639,10 +1639,8 @@ bool IRTranslator::translateGetElementPtr(const User &U, LLT IdxTy = MRI->getType(IdxReg); if (IdxTy != OffsetTy) { if (!IdxTy.isVector() && WantSplatVector) { - IdxReg = MIRBuilder - .buildSplatBuildVector(OffsetTy.changeElementType(IdxTy), - IdxReg) - .getReg(0); + IdxReg = MIRBuilder.buildSplatVector( + OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); } IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); @@ -2999,19 +2997,6 @@ bool IRTranslator::translateExtractElement(const User &U, bool IRTranslator::translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder) { - // A ShuffleVector that has operates on scalable vectors is a splat vector - // where the value of the splat vector is the 0th element of the first - // operand, since the index mask operand is the zeroinitializer (undef and - // poison are treated as zeroinitializer here). - if (U.getOperand(0)->getType()->isScalableTy()) { - Value *Op0 = U.getOperand(0); - auto SplatVal = MIRBuilder.buildExtractVectorElementConstant( - LLT::scalar(Op0->getType()->getScalarSizeInBits()), - getOrCreateVReg(*Op0), 0); - MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal); - return true; - } - ArrayRef Mask; if (auto *SVI = dyn_cast(&U)) Mask = SVI->getShuffleMask(); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 2ec47f72aca39..1d016e684c48f 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -8391,7 +8391,7 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { // For vector types create a G_BUILD_VECTOR. if (Ty.isVector()) - Val = MIB.buildSplatBuildVector(Ty, Val).getReg(0); + Val = MIB.buildSplatVector(Ty, Val).getReg(0); return Val; } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index a5a136e2effc6..cdd605a5221ad 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -326,7 +326,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res, auto Const = buildInstr(TargetOpcode::G_CONSTANT) .addDef(getMRI()->createGenericVirtualRegister(EltTy)) .addCImm(&Val); - return buildSplatBuildVector(Res, Const); + return buildSplatVector(Res, Const); } auto Const = buildInstr(TargetOpcode::G_CONSTANT); @@ -363,7 +363,7 @@ MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res, .addDef(getMRI()->createGenericVirtualRegister(EltTy)) .addFPImm(&Val); - return buildSplatBuildVector(Res, Const); + return buildSplatVector(Res, Const); } auto Const = buildInstr(TargetOpcode::G_FCONSTANT); @@ -711,8 +711,8 @@ MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res, return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); } -MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res, - const SrcOp &Src) { +MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, + const SrcOp &Src) { SmallVector TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src); return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec); } @@ -742,14 +742,6 @@ MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res, return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask); } -MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res, - const SrcOp &Src) { - LLT DstTy = Res.getLLTTy(*getMRI()); - assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() && - "Expected Src to match Dst elt ty"); - return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src); -} - MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index ecb3bd33bdfd4..1d0757c5d7f5f 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -1640,24 +1640,6 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { break; } - - case TargetOpcode::G_SPLAT_VECTOR: { - LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); - LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); - - if (!DstTy.isScalableVector()) - report("Destination type must be a scalable vector", MI); - - if (!SrcTy.isScalar()) - report("Source type must be a scalar", MI); - - if (DstTy.getScalarType() != SrcTy) - report("Element type of the destination must be the same type as the " - "source type", - MI); - - break; - } case TargetOpcode::G_DYN_STACKALLOC: { const MachineOperand &DstOp = MI->getOperand(0); const MachineOperand &AllocOp = MI->getOperand(1); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 4713bd605c243..750d70c03eabd 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20920,8 +20920,7 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const { unsigned Op = Inst.getOpcode(); if (Op == Instruction::Add || Op == Instruction::Sub || Op == Instruction::And || Op == Instruction::Or || - Op == Instruction::Xor || Op == Instruction::InsertElement || - Op == Instruction::Xor || Op == Instruction::ShuffleVector) + Op == Instruction::Xor || Op == Instruction::InsertElement) return false; if (Inst.getType()->isScalableTy()) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir index 7774158e15ec5..d87704cf45d5d 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -625,9 +625,6 @@ # DEBUG-NEXT: G_SHUFFLE_VECTOR (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected -# DEBUG-NEXT: G_SPLAT_VECTOR (opcode 217): 2 type indices, 0 imm indices -# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_CTTZ (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll deleted file mode 100644 index df7778899b0d0..0000000000000 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll +++ /dev/null @@ -1,1774 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 -; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \ -; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s -; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \ -; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s - -define @shufflevector_nxv1i1_0() { - ; RV32-LABEL: name: shufflevector_nxv1i1_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv1i1_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv1i1_1() { - ; RV32-LABEL: name: shufflevector_nxv1i1_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv1i1_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv1i1_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv1i1_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v0 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv1i1_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v0 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv2i1_0() { - ; RV32-LABEL: name: shufflevector_nxv2i1_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv2i1_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv2i1_1() { - ; RV32-LABEL: name: shufflevector_nxv2i1_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv2i1_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv2i1_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv2i1_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v0 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv2i1_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v0 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv4i1_0() { - ; RV32-LABEL: name: shufflevector_nxv4i1_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv4i1_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv4i1_1() { - ; RV32-LABEL: name: shufflevector_nxv4i1_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv4i1_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv4i1_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv4i1_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v0 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv4i1_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v0 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv8i1_0() { - ; RV32-LABEL: name: shufflevector_nxv8i1_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv8i1_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv8i1_1() { - ; RV32-LABEL: name: shufflevector_nxv8i1_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv8i1_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv8i1_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv8i1_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v0 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv8i1_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v0 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv16i1_0() { - ; RV32-LABEL: name: shufflevector_nxv16i1_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv16i1_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv16i1_1() { - ; RV32-LABEL: name: shufflevector_nxv16i1_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv16i1_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv16i1_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv16i1_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v0 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v0 - ; - ; RV64-LABEL: name: shufflevector_nxv16i1_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v0 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v0 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s1) - ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v0 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv1i8_0() { - ; RV32-LABEL: name: shufflevector_nxv1i8_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i8_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv1i8_1() { - ; RV32-LABEL: name: shufflevector_nxv1i8_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i8_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv1i8_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv1i8_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i8_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv2i8_0() { - ; RV32-LABEL: name: shufflevector_nxv2i8_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i8_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv2i8_1() { - ; RV32-LABEL: name: shufflevector_nxv2i8_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i8_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv2i8_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv2i8_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i8_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv4i8_0() { - ; RV32-LABEL: name: shufflevector_nxv4i8_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv4i8_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv4i8_1() { - ; RV32-LABEL: name: shufflevector_nxv4i8_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv4i8_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv4i8_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv4i8_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv4i8_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv8i8_0() { - ; RV32-LABEL: name: shufflevector_nxv8i8_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv8i8_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv8i8_1() { - ; RV32-LABEL: name: shufflevector_nxv8i8_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv8i8_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv8i8_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv8i8_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv8i8_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv16i8_0() { - ; RV32-LABEL: name: shufflevector_nxv16i8_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv16i8_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv16i8_1() { - ; RV32-LABEL: name: shufflevector_nxv16i8_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv16i8_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv16i8_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv16i8_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m2 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv16i8_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m2 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s8) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv1i16_0() { - ; RV32-LABEL: name: shufflevector_nxv1i16_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i16_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv1i16_1() { - ; RV32-LABEL: name: shufflevector_nxv1i16_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i16_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv1i16_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv1i16_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i16_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv2i16_0() { - ; RV32-LABEL: name: shufflevector_nxv2i16_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i16_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv2i16_1() { - ; RV32-LABEL: name: shufflevector_nxv2i16_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i16_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv2i16_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv2i16_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i16_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv4i16_0() { - ; RV32-LABEL: name: shufflevector_nxv4i16_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv4i16_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv4i16_1() { - ; RV32-LABEL: name: shufflevector_nxv4i16_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv4i16_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv4i16_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv4i16_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv4i16_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv8i16_0() { - ; RV32-LABEL: name: shufflevector_nxv8i16_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv8i16_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv8i16_1() { - ; RV32-LABEL: name: shufflevector_nxv8i16_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv8i16_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv8i16_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv8i16_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m2 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv8i16_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m2 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv16i16_0() { - ; RV32-LABEL: name: shufflevector_nxv16i16_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv16i16_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv16i16_1() { - ; RV32-LABEL: name: shufflevector_nxv16i16_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv16i16_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv16i16_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv16i16_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m4 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv16i16_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m4 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s16) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv1i32_0() { - ; RV32-LABEL: name: shufflevector_nxv1i32_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i32_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv1i32_1() { - ; RV32-LABEL: name: shufflevector_nxv1i32_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i32_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv1i32_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv1i32_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i32_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv2i32_0() { - ; RV32-LABEL: name: shufflevector_nxv2i32_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i32_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv2i32_1() { - ; RV32-LABEL: name: shufflevector_nxv2i32_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i32_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv2i32_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv2i32_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv2i32_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv4i32_0() { - ; RV32-LABEL: name: shufflevector_nxv4i32_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv4i32_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv4i32_1() { - ; RV32-LABEL: name: shufflevector_nxv4i32_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv4i32_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv4i32_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv4i32_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m2 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv4i32_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m2 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv8i32_0() { - ; RV32-LABEL: name: shufflevector_nxv8i32_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv8i32_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv8i32_1() { - ; RV32-LABEL: name: shufflevector_nxv8i32_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv8i32_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv8i32_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv8i32_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m4 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv8i32_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m4 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv16i32_0() { - ; RV32-LABEL: name: shufflevector_nxv16i32_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m8 - ; - ; RV64-LABEL: name: shufflevector_nxv16i32_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv16i32_1() { - ; RV32-LABEL: name: shufflevector_nxv16i32_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m8 - ; - ; RV64-LABEL: name: shufflevector_nxv16i32_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv16i32_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv16i32_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m8 - ; - ; RV64-LABEL: name: shufflevector_nxv16i32_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s32) - ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv1i64_0() { - ; RV32-LABEL: name: shufflevector_nxv1i64_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i64_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv1i64_1() { - ; RV32-LABEL: name: shufflevector_nxv1i64_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i64_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv1i64_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv1i64_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8 - ; - ; RV64-LABEL: name: shufflevector_nxv1i64_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv2i64_0() { - ; RV32-LABEL: name: shufflevector_nxv2i64_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv2i64_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv2i64_1() { - ; RV32-LABEL: name: shufflevector_nxv2i64_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv2i64_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv2i64_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv2i64_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m2 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m2 - ; - ; RV64-LABEL: name: shufflevector_nxv2i64_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m2 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m2 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m2 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv4i64_0() { - ; RV32-LABEL: name: shufflevector_nxv4i64_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv4i64_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv4i64_1() { - ; RV32-LABEL: name: shufflevector_nxv4i64_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv4i64_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv4i64_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv4i64_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m4 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m4 - ; - ; RV64-LABEL: name: shufflevector_nxv4i64_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m4 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m4 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m4 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv8i64_0() { - ; RV32-LABEL: name: shufflevector_nxv8i64_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m8 - ; - ; RV64-LABEL: name: shufflevector_nxv8i64_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv8i64_1() { - ; RV32-LABEL: name: shufflevector_nxv8i64_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m8 - ; - ; RV64-LABEL: name: shufflevector_nxv8i64_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv8i64_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv8i64_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV32-NEXT: PseudoRET implicit $v8m8 - ; - ; RV64-LABEL: name: shufflevector_nxv8i64_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]]() - ; RV64-NEXT: PseudoRET implicit $v8m8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - -define @shufflevector_nxv16i64_0() { - ; RV32-LABEL: name: shufflevector_nxv16i64_0 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: [[UV:%[0-9]+]]:_(), [[UV1:%[0-9]+]]:_() = G_UNMERGE_VALUES [[SPLAT_VECTOR]]() - ; RV32-NEXT: $v8m8 = COPY [[UV]]() - ; RV32-NEXT: $v16m8 = COPY [[UV1]]() - ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8 - ; - ; RV64-LABEL: name: shufflevector_nxv16i64_0 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: [[UV:%[0-9]+]]:_(), [[UV1:%[0-9]+]]:_() = G_UNMERGE_VALUES [[SPLAT_VECTOR]]() - ; RV64-NEXT: $v8m8 = COPY [[UV]]() - ; RV64-NEXT: $v16m8 = COPY [[UV1]]() - ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8 - %a = shufflevector poison, poison, poison - ret %a -} - -define @shufflevector_nxv16i64_1() { - ; RV32-LABEL: name: shufflevector_nxv16i64_1 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: [[UV:%[0-9]+]]:_(), [[UV1:%[0-9]+]]:_() = G_UNMERGE_VALUES [[SPLAT_VECTOR]]() - ; RV32-NEXT: $v8m8 = COPY [[UV]]() - ; RV32-NEXT: $v16m8 = COPY [[UV1]]() - ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8 - ; - ; RV64-LABEL: name: shufflevector_nxv16i64_1 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: [[DEF:%[0-9]+]]:_() = G_IMPLICIT_DEF - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: [[UV:%[0-9]+]]:_(), [[UV1:%[0-9]+]]:_() = G_UNMERGE_VALUES [[SPLAT_VECTOR]]() - ; RV64-NEXT: $v8m8 = COPY [[UV]]() - ; RV64-NEXT: $v16m8 = COPY [[UV1]]() - ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8 - %a = shufflevector undef, undef, undef - ret %a -} - -define @shufflevector_nxv16i64_2( %a) { - ; RV32-LABEL: name: shufflevector_nxv16i64_2 - ; RV32: bb.1 (%ir-block.0): - ; RV32-NEXT: liveins: $v8m8, $v16m8 - ; RV32-NEXT: {{ $}} - ; RV32-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 - ; RV32-NEXT: [[COPY1:%[0-9]+]]:_() = COPY $v16m8 - ; RV32-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_() = G_CONCAT_VECTORS [[COPY]](), [[COPY1]]() - ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[CONCAT_VECTORS]](), [[C]](s64) - ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV32-NEXT: [[UV:%[0-9]+]]:_(), [[UV1:%[0-9]+]]:_() = G_UNMERGE_VALUES [[SPLAT_VECTOR]]() - ; RV32-NEXT: $v8m8 = COPY [[UV]]() - ; RV32-NEXT: $v16m8 = COPY [[UV1]]() - ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8 - ; - ; RV64-LABEL: name: shufflevector_nxv16i64_2 - ; RV64: bb.1 (%ir-block.0): - ; RV64-NEXT: liveins: $v8m8, $v16m8 - ; RV64-NEXT: {{ $}} - ; RV64-NEXT: [[COPY:%[0-9]+]]:_() = COPY $v8m8 - ; RV64-NEXT: [[COPY1:%[0-9]+]]:_() = COPY $v16m8 - ; RV64-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_() = G_CONCAT_VECTORS [[COPY]](), [[COPY1]]() - ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[CONCAT_VECTORS]](), [[C]](s64) - ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_() = G_SPLAT_VECTOR [[EVEC]](s64) - ; RV64-NEXT: [[UV:%[0-9]+]]:_(), [[UV1:%[0-9]+]]:_() = G_UNMERGE_VALUES [[SPLAT_VECTOR]]() - ; RV64-NEXT: $v8m8 = COPY [[UV]]() - ; RV64-NEXT: $v16m8 = COPY [[UV1]]() - ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8 - %b = shufflevector %a , poison, zeroinitializer - ret %b -} - - - diff --git a/llvm/test/MachineVerifier/test_g_splat_vector.mir b/llvm/test/MachineVerifier/test_g_splat_vector.mir deleted file mode 100644 index 0d1d8a3e6dcc6..0000000000000 --- a/llvm/test/MachineVerifier/test_g_splat_vector.mir +++ /dev/null @@ -1,27 +0,0 @@ -# RUN: not --crash llc -o - -mtriple=arm64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s -# REQUIRES: aarch64-registered-target ---- -name: g_splat_vector -tracksRegLiveness: true -liveins: -body: | - bb.0: - %0:_(s32) = G_CONSTANT i32 0 - %1:_(<2 x s32>) = G_IMPLICIT_DEF - %2:_() = G_IMPLICIT_DEF - - ; CHECK: Destination type must be a scalable vector - %3:_(s32) = G_SPLAT_VECTOR %0 - - ; CHECK: Destination type must be a scalable vector - %4:_(<2 x s32>) = G_SPLAT_VECTOR %0 - - ; CHECK: Source type must be a scalar - %5:_() = G_SPLAT_VECTOR %1 - - ; CHECK: Source type must be a scalar - %6:_() = G_SPLAT_VECTOR %2 - - ; CHECK: Element type of the destination must be the same type as the source type - %7:_() = G_SPLAT_VECTOR %0 -... diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp index 33155d2c9a964..73837279701a9 100644 --- a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp @@ -147,9 +147,9 @@ TEST_F(AArch64GISelMITest, LowerRotatesVector) { LLT S32 = LLT::scalar(32); LLT V4S32 = LLT::fixed_vector(4, S32); auto SrcTrunc = B.buildTrunc(S32, Copies[0]); - auto Src = B.buildSplatBuildVector(V4S32, SrcTrunc); + auto Src = B.buildSplatVector(V4S32, SrcTrunc); auto AmtTrunc = B.buildTrunc(S32, Copies[1]); - auto Amt = B.buildSplatBuildVector(V4S32, AmtTrunc); + auto Amt = B.buildSplatVector(V4S32, AmtTrunc); auto ROTR = B.buildInstr(TargetOpcode::G_ROTR, {V4S32}, {Src, Amt}); AInfo Info(MF->getSubtarget()); diff --git a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp index 59a86fa5646f3..f52e49df0bcde 100644 --- a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -61,7 +61,7 @@ TEST_F(AArch64GISelMITest, MatchIntConstantSplat) { LLT v4s64 = LLT::fixed_vector(4, s64); MachineInstrBuilder FortyTwoSplat = - B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42)); + B.buildSplatVector(v4s64, B.buildConstant(s64, 42)); int64_t Cst; EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_ICstOrSplat(Cst))); EXPECT_EQ(Cst, 42); @@ -625,7 +625,7 @@ TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) { LLT v4s64 = LLT::fixed_vector(4, s64); MachineInstrBuilder FortyTwoSplat = - B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42)); + B.buildSplatVector(v4s64, B.buildConstant(s64, 42)); MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42); EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42))); @@ -655,7 +655,7 @@ TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) { LLT v4s64 = LLT::fixed_vector(4, s64); MachineInstrBuilder FortyTwoSplat = - B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42)); + B.buildSplatVector(v4s64, B.buildConstant(s64, 42)); MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42); EXPECT_TRUE(