diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 3400b24e0abb0..ee149810ca4d6 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1459,20 +1459,6 @@ static void doUnion(DemandedFields &A, DemandedFields B) { A.MaskPolicy |= B.MaskPolicy; } -static bool isNonZeroAVL(const MachineOperand &MO, - const MachineRegisterInfo &MRI) { - if (MO.isReg()) { - if (MO.getReg() == RISCV::X0) - return true; - if (MachineInstr *MI = MRI.getVRegDef(MO.getReg()); - MI && isNonZeroLoadImmediate(*MI)) - return true; - return false; - } - assert(MO.isImm()); - return 0 != MO.getImm(); -} - // Return true if we can mutate PrevMI to match MI without changing any the // fields which would be observed. static bool canMutatePriorConfig(const MachineInstr &PrevMI, @@ -1486,21 +1472,26 @@ static bool canMutatePriorConfig(const MachineInstr &PrevMI, if (Used.VLAny) return false; - // We don't bother to handle the equally zero case here as it's largely - // uninteresting. if (Used.VLZeroness) { if (isVLPreservingConfig(PrevMI)) return false; - if (!isNonZeroAVL(MI.getOperand(1), MRI) || - !isNonZeroAVL(PrevMI.getOperand(1), MRI)) + if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(MI), + MRI)) return false; } - // TODO: Track whether the register is defined between - // PrevMI and MI. - if (MI.getOperand(1).isReg() && - RISCV::X0 != MI.getOperand(1).getReg()) - return false; + auto &AVL = MI.getOperand(1); + auto &PrevAVL = PrevMI.getOperand(1); + assert(MRI.isSSA()); + + // If the AVL is a register, we need to make sure MI's AVL dominates PrevMI. + // For now just check that PrevMI uses the same virtual register. + if (AVL.isReg() && AVL.getReg() != RISCV::X0) { + if (AVL.getReg().isPhysical()) + return false; + if (!PrevAVL.isReg() || PrevAVL.getReg() != AVL.getReg()) + return false; + } } if (!PrevMI.getOperand(2).isImm() || !MI.getOperand(2).isImm()) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll index 57760070603b2..4954827876c19 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -63,9 +63,8 @@ define <32 x i32> @insertelt_v32i32_31(<32 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v32i32_31: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vslideup.vi v8, v16, 31 ; CHECK-NEXT: ret %b = insertelement <32 x i32> %a, i32 %y, i32 31 @@ -101,9 +100,8 @@ define <64 x i32> @insertelt_v64i32_63(<64 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v64i32_63: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vslideup.vi v16, v24, 31 ; CHECK-NEXT: ret %b = insertelement <64 x i32> %a, i32 %y, i32 63 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index e15c5a3323cbe..7c95d81306655 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -643,9 +643,8 @@ define @fp_reduction_vfmv_s_f(float %0, @int_reduction_vmv_s_x(i32 signext %0, %1, i64 %2) { ; CHECK-LABEL: int_reduction_vmv_s_x: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: ret %4 = tail call @llvm.riscv.vmv.s.x.nxv8i32.i64( poison, i32 %0, i64 %2)