Skip to content

Commit

Permalink
[InstCombine] fold (X >>u C) << C --> X & (-1 << C)
Browse files Browse the repository at this point in the history
We already have this fold when the lshr has one use, but it doesn't need that
restriction. We may be able to remove some code from foldShiftedShift().

Also, move the similar:
(X << C) >>u C --> X & (-1 >>u C)
...directly into visitLShr to help clean up foldShiftByConstOfShiftByConst().

That whole function seems questionable since it is called by commonShiftTransforms(),
but there's really not much in common if we're checking the shift opcodes for every
fold.

llvm-svn: 293215
  • Loading branch information
rotateright committed Jan 26, 2017
1 parent b67a3ce commit 50753f0
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 22 deletions.
35 changes: 17 additions & 18 deletions llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -360,21 +360,8 @@ foldShiftByConstOfShiftByConst(BinaryOperator &I, const APInt *COp1,
if (ShiftAmt1 == 0)
return nullptr; // Will be simplified in the future.

if (ShiftAmt1 == ShiftAmt2) {
// FIXME: This repeats a fold that exists in foldShiftedShift(), but we're
// not handling the related fold here:
// (X >>u C) << C --> X & (-1 << C).
// foldShiftedShift() is always called before this, but it is restricted to
// only handle cases where the ShiftOp has one use. We don't have that
// restriction here.
if (I.getOpcode() != Instruction::LShr ||
ShiftOp->getOpcode() != Instruction::Shl)
return nullptr;

// (X << C) >>u C --> X & (-1 >>u C).
APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
}
if (ShiftAmt1 == ShiftAmt2)
return nullptr;

// FIXME: Everything under here should be extended to work with vector types.

Expand Down Expand Up @@ -714,6 +701,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
const APInt *ShAmtAPInt;
if (match(Op1, m_APInt(ShAmtAPInt))) {
unsigned ShAmt = ShAmtAPInt->getZExtValue();
unsigned BitWidth = I.getType()->getScalarSizeInBits();

// shl (zext X), ShAmt --> zext (shl X, ShAmt)
// This is only valid if X would have zeros shifted out.
Expand All @@ -725,11 +713,15 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
return new ZExtInst(Builder->CreateShl(X, ShAmt), I.getType());
}

// (X >>u C) << C --> X & (-1 << C)
if (match(Op0, m_LShr(m_Value(X), m_Specific(Op1)))) {
APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
}

// If the shifted-out value is known-zero, then this is a NUW shift.
if (!I.hasNoUnsignedWrap() &&
MaskedValueIsZero(
Op0, APInt::getHighBitsSet(ShAmtAPInt->getBitWidth(), ShAmt), 0,
&I)) {
MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmt), 0, &I)) {
I.setHasNoUnsignedWrap();
return &I;
}
Expand Down Expand Up @@ -780,6 +772,13 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
return new ZExtInst(Cmp, II->getType());
}

// (X << C) >>u C --> X & (-1 >>u C)
Value *X;
if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
}

// If the shifted-out value is known-zero, then this is an exact shift.
if (!I.isExact() &&
MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/Transforms/InstCombine/apint-shift.ll
Original file line number Diff line number Diff line change
Expand Up @@ -459,12 +459,12 @@ define <2 x i44> @shl_lshr_eq_amt_multi_use_splat_vec(<2 x i44> %A) {
ret <2 x i44> %D
}

; FIXME: Fold shl (lshr X, C), C -> and X, C' regardless of the number of uses of the lshr.
; Fold shl (lshr X, C), C -> and X, C' regardless of the number of uses of the lshr.

define i43 @lshr_shl_eq_amt_multi_use(i43 %A) {
; CHECK-LABEL: @lshr_shl_eq_amt_multi_use(
; CHECK-NEXT: [[B:%.*]] = lshr i43 %A, 23
; CHECK-NEXT: [[C:%.*]] = shl nuw i43 [[B]], 23
; CHECK-NEXT: [[C:%.*]] = and i43 %A, -8388608
; CHECK-NEXT: [[D:%.*]] = mul i43 [[B]], [[C]]
; CHECK-NEXT: ret i43 [[D]]
;
Expand All @@ -474,12 +474,12 @@ define i43 @lshr_shl_eq_amt_multi_use(i43 %A) {
ret i43 %D
}

; FIXME: Fold vector shl (lshr X, C), C -> and X, C' regardless of the number of uses of the lshr.
; Fold vector shl (lshr X, C), C -> and X, C' regardless of the number of uses of the lshr.

define <2 x i43> @lshr_shl_eq_amt_multi_use_splat_vec(<2 x i43> %A) {
; CHECK-LABEL: @lshr_shl_eq_amt_multi_use_splat_vec(
; CHECK-NEXT: [[B:%.*]] = lshr <2 x i43> %A, <i43 23, i43 23>
; CHECK-NEXT: [[C:%.*]] = shl nuw <2 x i43> [[B]], <i43 23, i43 23>
; CHECK-NEXT: [[C:%.*]] = and <2 x i43> %A, <i43 -8388608, i43 -8388608>
; CHECK-NEXT: [[D:%.*]] = mul <2 x i43> [[B]], [[C]]
; CHECK-NEXT: ret <2 x i43> [[D]]
;
Expand Down

0 comments on commit 50753f0

Please sign in to comment.