diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index b38ee71e7bf67..26fe8e08d066d 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -1165,11 +1165,11 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) { // Look for a "splat" mul pattern - it replicates bits across each half of // a value, so a right shift is just a mask of the low bits: - // lshr i32 (mul nuw X, Pow2+1), 16 --> and X, Pow2-1 + // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1 // TODO: Generalize to allow more than just half-width shifts? const APInt *MulC; if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC))) && - ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() && + BitWidth > 2 && ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() && MulC->logBase2() == ShAmtC) return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2)); diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll index 2db2c09911769..e4e869b921edd 100644 --- a/llvm/test/Transforms/InstCombine/lshr.ll +++ b/llvm/test/Transforms/InstCombine/lshr.ll @@ -399,6 +399,17 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) { ret i32 %t } +; Negative test (but simplifies before we reach the mul_splat transform)- need more than 2 bits + +define i2 @mul_splat_fold_too_narrow(i2 %x) { +; CHECK-LABEL: @mul_splat_fold_too_narrow( +; CHECK-NEXT: ret i2 [[X:%.*]] +; + %m = mul nuw i2 %x, 2 + %t = lshr i2 %m, 1 + ret i2 %t +} + define i32 @negative_and_odd(i32 %x) { ; CHECK-LABEL: @negative_and_odd( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31