diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 7719158f693a6..00eece9534b08 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -918,16 +918,18 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Depth + 1)) return I; } else { // fshl is a rotate - // Avoid converting rotate into funnel shift. - // Only simplify if one operand is constant. - KnownBits LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, I); - if (DemandedMaskLHS.isSubsetOf(LHSKnown.Zero | LHSKnown.One)) { + // Avoid converting rotate into funnel shift. + // Only simplify if one operand is constant. + LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, I); + if (DemandedMaskLHS.isSubsetOf(LHSKnown.Zero | LHSKnown.One) && + !match(I->getOperand(0), m_SpecificInt(LHSKnown.One))) { replaceOperand(*I, 0, Constant::getIntegerValue(VTy, LHSKnown.One)); return I; } - KnownBits RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, I); - if (DemandedMaskRHS.isSubsetOf(RHSKnown.Zero | RHSKnown.One)) { + RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, I); + if (DemandedMaskRHS.isSubsetOf(RHSKnown.Zero | RHSKnown.One) && + !match(I->getOperand(1), m_SpecificInt(RHSKnown.One))) { replaceOperand(*I, 1, Constant::getIntegerValue(VTy, RHSKnown.One)); return I; } diff --git a/llvm/test/Transforms/InstCombine/2023-07-13-arm-infiniteloop.ll b/llvm/test/Transforms/InstCombine/2023-07-13-arm-infiniteloop.ll new file mode 100644 index 0000000000000..35335f648c167 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/2023-07-13-arm-infiniteloop.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -S -passes=instcombine < %s | FileCheck %s + +declare i1 @llvm.is.constant.i32(i32) + +define void @test(ptr %bpf_prog_calc_tag___trans_tmp_3, i32 %0) { +; CHECK-LABEL: define void @test +; CHECK-SAME: (ptr [[BPF_PROG_CALC_TAG___TRANS_TMP_3:%.*]], i32 [[TMP0:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 true, label [[IF_ELSE_I:%.*]], label [[IF_THEN_I:%.*]] +; CHECK: if.then.i: +; CHECK-NEXT: br label [[__FSWAB64_EXIT:%.*]] +; CHECK: if.else.i: +; CHECK-NEXT: br label [[__FSWAB64_EXIT]] +; CHECK: __fswab64.exit: +; CHECK-NEXT: store i32 0, ptr [[BPF_PROG_CALC_TAG___TRANS_TMP_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %conv = zext i32 %0 to i64 + %1 = lshr i64 %conv, 32 + %conv1.i = trunc i64 %1 to i32 + %2 = call i1 @llvm.is.constant.i32(i32 %conv1.i) + br i1 %2, label %if.else.i, label %if.then.i + +if.then.i: ; preds = %entry + %3 = load volatile i32, ptr null, align 2147483648 + br label %__fswab64.exit + +if.else.i: ; preds = %entry + %or.i = call i32 @llvm.fshl.i32(i32 %conv1.i, i32 0, i32 16) + br label %__fswab64.exit + +__fswab64.exit: ; preds = %if.then.i, %if.else.i + %t.0.i = phi i32 [ %or.i, %if.else.i ], [ %3, %if.then.i ] + %shr2.i = lshr i32 %t.0.i, 1 + store i32 %shr2.i, ptr %bpf_prog_calc_tag___trans_tmp_3, align 4 + ret void +} + +declare i32 @llvm.fshl.i32(i32, i32, i32)