diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 61bdbfc47d947..890b9c419050b 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -16,6 +16,7 @@ #include "RISCVSubtarget.h" #include "RISCVTargetMachine.h" #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" +#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" @@ -159,9 +160,71 @@ RISCVInstructionSelector::RISCVInstructionSelector( InstructionSelector::ComplexRendererFns RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const { - // TODO: Also check if we are seeing the result of an AND operation which - // could be bypassed since we only check the lower log2(xlen) bits. - return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}}; + if (!Root.isReg()) + return std::nullopt; + + using namespace llvm::MIPatternMatch; + MachineRegisterInfo &MRI = MF->getRegInfo(); + + Register RootReg = Root.getReg(); + Register ShAmtReg = RootReg; + const LLT ShiftLLT = MRI.getType(RootReg); + unsigned ShiftWidth = ShiftLLT.getSizeInBits(); + assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!"); + // Peek through zext. + Register ZExtSrcReg; + if (mi_match(ShAmtReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) { + ShAmtReg = ZExtSrcReg; + } + + APInt AndMask; + Register AndSrcReg; + if (mi_match(ShAmtReg, MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) { + APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1); + if (ShMask.isSubsetOf(AndMask)) { + ShAmtReg = AndSrcReg; + } else { + // SimplifyDemandedBits may have optimized the mask so try restoring any + // bits that are known zero. + KnownBits Known = KB->getKnownBits(ShAmtReg); + if (ShMask.isSubsetOf(AndMask | Known.Zero)) + ShAmtReg = AndSrcReg; + } + } + + APInt Imm; + Register Reg; + if (mi_match(ShAmtReg, MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) { + if (Imm != 0 && Imm.urem(ShiftWidth) == 0) + // If we are shifting by X+N where N == 0 mod Size, then just shift by X + // to avoid the ADD. + ShAmtReg = Reg; + } else if (mi_match(ShAmtReg, MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) { + if (Imm != 0 && Imm.urem(ShiftWidth) == 0) { + // If we are shifting by N-X where N == 0 mod Size, then just shift by -X + // to generate a NEG instead of a SUB of a constant. + ShAmtReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); + unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB; + return {{[=](MachineInstrBuilder &MIB) { + MachineIRBuilder(*MIB.getInstr()) + .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg}); + MIB.addReg(ShAmtReg); + }}}; + } + if (Imm.urem(ShiftWidth) == ShiftWidth - 1) { + // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X + // to generate a NOT instead of a SUB of a constant. + ShAmtReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); + return {{[=](MachineInstrBuilder &MIB) { + MachineIRBuilder(*MIB.getInstr()) + .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg}) + .addImm(-1); + MIB.addReg(ShAmtReg); + }}}; + } + } + + return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}}; } InstructionSelector::ComplexRendererFns diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir index 6731f54e055d7..b9e9f36f766fb 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir @@ -18,9 +18,7 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY1]], 32 - ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[SLLI]], 32 - ; CHECK-NEXT: [[ROLW:%[0-9]+]]:gpr = ROLW [[COPY]], [[SRLI]] + ; CHECK-NEXT: [[ROLW:%[0-9]+]]:gpr = ROLW [[COPY]], [[COPY1]] ; CHECK-NEXT: $x10 = COPY [[ROLW]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gprb(s64) = COPY $x10 @@ -71,9 +69,7 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY1]], 32 - ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[SLLI]], 32 - ; CHECK-NEXT: [[RORW:%[0-9]+]]:gpr = RORW [[COPY]], [[SRLI]] + ; CHECK-NEXT: [[RORW:%[0-9]+]]:gpr = RORW [[COPY]], [[COPY1]] ; CHECK-NEXT: $x10 = COPY [[RORW]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gprb(s64) = COPY $x10 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv32.mir new file mode 100644 index 0000000000000..7d6c228c80861 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv32.mir @@ -0,0 +1,190 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +# RUN: llc -mtriple=riscv32 -run-pass=instruction-select \ +# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s + +# This 32-bit version doesn't have tests for zext, because there is no legal type to zext from. +--- +name: shl +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %3:gprb(s32) = G_SHL %0, %1 + $x10 = COPY %3(s32) + PseudoRET implicit $x10 +... + +--- +name: shl_and +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_and + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %2:gprb(s32) = G_CONSTANT i32 31 + %3:gprb(s32) = G_AND %1, %2 + %4:gprb(s32) = G_SHL %0, %3(s32) + $x10 = COPY %4(s32) + PseudoRET implicit $x10 +... + +--- +name: shl_and_with_simplified_mask +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_and_with_simplified_mask + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY1]], 31 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[ANDI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %2:gprb(s32) = G_CONSTANT i32 31 + %3:gprb(s32) = G_AND %1, %2 + %4:gprb(s32) = G_CONSTANT i32 31 + %5:gprb(s32) = G_AND %3, %4 + %6:gprb(s32) = G_SHL %0, %5(s32) + $x10 = COPY %6(s32) + PseudoRET implicit $x10 +... + +--- +name: shl_add +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_add + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %2:gprb(s32) = G_CONSTANT i32 32 + %3:gprb(s32) = G_ADD %1, %2 + %4:gprb(s32) = G_SHL %0, %3(s32) + $x10 = COPY %4(s32) + PseudoRET implicit $x10 +... + +--- +name: shl_sub +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_sub + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr = SUB $x0, [[COPY1]] + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[SUB]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %2:gprb(s32) = G_CONSTANT i32 32 + %3:gprb(s32) = G_SUB %2, %1 + %4:gprb(s32) = G_SHL %0, %3(s32) + $x10 = COPY %4(s32) + PseudoRET implicit $x10 +... + +--- +name: shl_bitwise_not +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_bitwise_not + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY1]], -1 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[XORI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %2:gprb(s32) = G_CONSTANT i32 -1 + %3:gprb(s32) = G_SUB %2, %1 + %4:gprb(s32) = G_SHL %0, %3(s32) + $x10 = COPY %4(s32) + PseudoRET implicit $x10 +... + +--- +name: shl_bitwise_not_2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_bitwise_not_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY1]], -1 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[XORI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s32) = COPY $x10 + %1:gprb(s32) = COPY $x11 + %2:gprb(s32) = G_CONSTANT i32 31 + %3:gprb(s32) = G_SUB %2, %1 + %4:gprb(s32) = G_SHL %0, %3(s32) + $x10 = COPY %4(s32) + PseudoRET implicit $x10 +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir new file mode 100644 index 0000000000000..1e6890098498e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/shift-rv64.mir @@ -0,0 +1,243 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +# RUN: llc -mtriple=riscv64 -run-pass=instruction-select \ +# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: shl +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %3:gprb(s64) = G_SHL %0, %1 + $x10 = COPY %3(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_zext +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10 + + ; CHECK-LABEL: name: shl_zext + ; CHECK: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[ADDI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s32) = G_CONSTANT i32 1 + %2:gprb(s64) = G_ZEXT %1 + %3:gprb(s64) = G_SHL %0, %2(s64) + $x10 = COPY %3(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_and +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_and + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_CONSTANT i64 63 + %3:gprb(s64) = G_AND %1, %2 + %4:gprb(s64) = G_SHL %0, %3(s64) + $x10 = COPY %4(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_and_with_simplified_mask +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_and_with_simplified_mask + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY1]], 62 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[ANDI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_CONSTANT i64 62 + %3:gprb(s64) = G_AND %1, %2 + %4:gprb(s64) = G_CONSTANT i64 62 + %5:gprb(s64) = G_AND %3, %4 + %6:gprb(s64) = G_SHL %0, %5(s64) + $x10 = COPY %6(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_add +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_add + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[COPY1]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_CONSTANT i64 64 + %3:gprb(s64) = G_ADD %1, %2 + %4:gprb(s64) = G_SHL %0, %3(s64) + $x10 = COPY %4(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_sub +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_sub + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[SUBW:%[0-9]+]]:gpr = SUBW $x0, [[COPY1]] + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[SUBW]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_CONSTANT i64 64 + %3:gprb(s64) = G_SUB %2, %1 + %4:gprb(s64) = G_SHL %0, %3(s64) + $x10 = COPY %4(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_bitwise_not +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_bitwise_not + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY1]], -1 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[XORI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_CONSTANT i64 -1 + %3:gprb(s64) = G_SUB %2, %1 + %4:gprb(s64) = G_SHL %0, %3(s64) + $x10 = COPY %4(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_bitwise_not_2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_bitwise_not_2 + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[XORI:%[0-9]+]]:gpr = XORI [[COPY1]], -1 + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[XORI]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(s64) = COPY $x11 + %2:gprb(s64) = G_CONSTANT i64 63 + %3:gprb(s64) = G_SUB %2, %1 + %4:gprb(s64) = G_SHL %0, %3(s64) + $x10 = COPY %4(s64) + PseudoRET implicit $x10 +... + +--- +name: shl_and_zext +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $x11 + + ; CHECK-LABEL: name: shl_and_zext + ; CHECK: liveins: $x10, $x11 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 + ; CHECK-NEXT: [[LW:%[0-9]+]]:gpr = LW [[COPY1]], 0 :: (load (s32)) + ; CHECK-NEXT: [[SLL:%[0-9]+]]:gpr = SLL [[COPY]], [[LW]] + ; CHECK-NEXT: $x10 = COPY [[SLL]] + ; CHECK-NEXT: PseudoRET implicit $x10 + %0:gprb(s64) = COPY $x10 + %1:gprb(p0) = COPY $x11 + %2:gprb(s32) = G_LOAD %1(p0) :: (load (s32)) + %3:gprb(s32) = G_CONSTANT i32 63 + %4:gprb(s32) = G_AND %2, %3 + %5:gprb(s64) = G_ZEXT %4 + %6:gprb(s64) = G_SHL %0, %5(s64) + $x10 = COPY %6(s64) + PseudoRET implicit $x10 +...