diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index d934638c15e75..8d4a2a11d235e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1910,6 +1910,43 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) { if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I)) return Res; + { + Value *X; + const APInt *C1, *C2; + if (match(&I, + m_Add(m_NNegZExt(m_Add(m_Value(X), m_APInt(C1))), m_APInt(C2)))) { + // Check if inner const C1 is negative C1 < 0 and outer const C2 >= 0 + if (!C1->isNegative() || C2->isNegative()) + return nullptr; + + APInt Sum = C1->sext(C2->getBitWidth()) + *C2; + + if (!Sum.isSignedIntN(C1->getBitWidth())) + return nullptr; + + Value *Inner; + + if (Sum.isZero()) { + Inner = X; + } else { + unsigned XWidth = X->getType()->getIntegerBitWidth(); + APInt MinX = APInt::getSignedMinValue(XWidth); + if ((MinX + Sum.trunc(XWidth)).isNegative()) + return nullptr; + + auto *ZExt = cast(I.getOperand(0)); + auto *InnerAdd = ZExt->getOperand(0); + if (!ZExt->hasOneUse() || !InnerAdd->hasOneUse()) + return nullptr; + + Inner = Builder.CreateAdd( + X, ConstantInt::get(X->getType(), Sum.trunc(C1->getBitWidth()))); + } + + return new ZExtInst(Inner, I.getType()); + } + } + // Re-enqueue users of the induction variable of add recurrence if we infer // new nuw/nsw flags. if (Changed) { diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll index e4d18e9395219..2f9cbcdf00218 100644 --- a/llvm/test/Transforms/InstCombine/zext.ll +++ b/llvm/test/Transforms/InstCombine/zext.ll @@ -976,3 +976,70 @@ entry: %res = zext nneg i2 %x to i32 ret i32 %res } + +define i32 @zext_nneg_add_cancel(i8 %arg) { +; CHECK-LABEL: @zext_nneg_add_cancel( +; CHECK-NEXT: [[ADD2:%.*]] = zext i8 [[ARG:%.*]] to i32 +; CHECK-NEXT: ret i32 [[ADD2]] +; + %add = add i8 %arg, -2 + %zext = zext nneg i8 %add to i32 + %add2 = add i32 %zext, 2 + ret i32 %add2 +} + +define i32 @zext_nneg_add_cancel_multi_use(i8 %arg) { +; CHECK-LABEL: @zext_nneg_add_cancel_multi_use( +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[ARG:%.*]], -2 +; CHECK-NEXT: [[ZEXT:%.*]] = zext nneg i8 [[ADD]] to i32 +; CHECK-NEXT: call void @use32(i32 [[ZEXT]]) +; CHECK-NEXT: [[ADD2:%.*]] = zext i8 [[ARG]] to i32 +; CHECK-NEXT: ret i32 [[ADD2]] +; + %add = add i8 %arg, -2 + %zext = zext nneg i8 %add to i32 + call void @use32(i32 %zext) + %add2 = add i32 %zext, 2 + ret i32 %add2 +} + +define i32 @zext_nneg_no_cancel(i8 %arg) { +; CHECK-LABEL: @zext_nneg_no_cancel( +; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[ARG:%.*]], -1 +; CHECK-NEXT: [[ADD2:%.*]] = zext i8 [[TMP1]] to i32 +; CHECK-NEXT: ret i32 [[ADD2]] +; + %add = add i8 %arg, -2 + %zext = zext nneg i8 %add to i32 + %add2 = add i32 %zext, 1 + ret i32 %add2 +} + +define i32 @zext_nneg_no_cancel_multi_use(i8 %arg) { +; CHECK-LABEL: @zext_nneg_no_cancel_multi_use( +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[ARG:%.*]], -2 +; CHECK-NEXT: [[ZEXT:%.*]] = zext nneg i8 [[ADD]] to i32 +; CHECK-NEXT: call void @use32(i32 [[ZEXT]]) +; CHECK-NEXT: [[ADD2:%.*]] = add nuw nsw i32 [[ZEXT]], 1 +; CHECK-NEXT: ret i32 [[ADD2]] +; + %add = add i8 %arg, -2 + %zext = zext nneg i8 %add to i32 + call void @use32(i32 %zext) + %add2 = add i32 %zext, 1 + ret i32 %add2 +} + +define i32 @zext_nneg_overflow(i8 %arg) { +; CHECK-LABEL: @zext_nneg_overflow( +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[ARG:%.*]], -2 +; CHECK-NEXT: [[ZEXT:%.*]] = zext nneg i8 [[ADD]] to i32 +; CHECK-NEXT: [[ADD2:%.*]] = add nuw nsw i32 [[ZEXT]], 299 +; CHECK-NEXT: ret i32 [[ADD2]] +; + %add = add i8 %arg, -2 + %zext = zext nneg i8 %add to i32 + %add2 = add i32 %zext, 299 + ret i32 %add2 +} +