diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 9a513891b765d..78d64ea67324f 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1119,25 +1119,26 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB, LIS->InsertMachineInstrInMaps(*MI); LiveInterval &LI = LIS->getInterval(AVLReg); SlotIndex SI = LIS->getInstructionIndex(*MI).getRegSlot(); + const VNInfo *CurVNI = Info.getAVLVNInfo(); // If the AVL value isn't live at MI, do a quick check to see if it's easily // extendable. Otherwise, we need to copy it. - if (LI.getVNInfoBefore(SI) != Info.getAVLVNInfo()) { + if (LI.getVNInfoBefore(SI) != CurVNI) { if (!LI.liveAt(SI) && LI.containsOneValue()) LIS->extendToIndices(LI, SI); else { Register AVLCopyReg = MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass); + MachineBasicBlock *MBB = LIS->getMBBFromIndex(CurVNI->def); MachineBasicBlock::iterator II; - if (Info.getAVLVNInfo()->isPHIDef()) - II = LIS->getMBBFromIndex(Info.getAVLVNInfo()->def)->getFirstNonPHI(); + if (CurVNI->isPHIDef()) + II = MBB->getFirstNonPHI(); else { - II = LIS->getInstructionFromIndex(Info.getAVLVNInfo()->def); + II = LIS->getInstructionFromIndex(CurVNI->def); II = std::next(II); } assert(II.isValid()); - auto AVLCopy = - BuildMI(*II->getParent(), II, DL, TII->get(RISCV::COPY), AVLCopyReg) - .addReg(AVLReg); + auto AVLCopy = BuildMI(*MBB, II, DL, TII->get(RISCV::COPY), AVLCopyReg) + .addReg(AVLReg); LIS->InsertMachineInstrInMaps(*AVLCopy); MI->getOperand(1).setReg(AVLCopyReg); LIS->createAndComputeVirtRegInterval(AVLCopyReg); diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir index 140875c4b24ad..e09fc1828fec5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -142,6 +142,10 @@ ret void } + define void @avl_is_last_instr() { + ret void + } + declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , , i64) #1 @@ -1099,3 +1103,31 @@ body: | renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5, 0 renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed %v, %outvl:gprnox0, 5, 0 PseudoRET implicit $v8m2 +... +--- +name: avl_is_last_instr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: avl_is_last_instr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x10 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %avl:gprnox0 = COPY $x10 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY %avl + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: dead %avl:gprnox0 = ADDI %avl, -1 + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8 = PseudoVMV_S_X undef renamable $v8, $x0, 1, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, $noreg, 3 /* e8 */, 3 /* ta, ma */, implicit $vl, implicit $vtype + bb.0: + liveins: $x10 + %avl:gprnox0 = COPY $x10 + + bb.1: + %vl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 192, implicit-def dead $vl, implicit-def dead $vtype + %avl:gprnox0 = ADDI %avl:gprnox0, -1 + $v8 = PseudoVMV_S_X undef renamable $v8, $x0, 1, 3 + $v8 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, %vl:gprnox0, 3, 3