Skip to content

Commit

Permalink
[InstCombine] Dropping redundant masking before left-shift [0/5] (PR4…
Browse files Browse the repository at this point in the history
…2563)

Summary:
If we have some pattern that leaves only some low bits set, and then performs
left-shift of those bits, if none of the bits that are left after the final
shift are modified by the mask, we can omit the mask.

There are many variants to this pattern:
a. `(x & ((1 << MaskShAmt) - 1)) << ShiftShAmt`
All these patterns can be simplified to just:
`x << ShiftShAmt`
iff:
a. `(MaskShAmt+ShiftShAmt) u>= bitwidth(x)`

alive proof:
a: https://rise4fun.com/Alive/wi9

Indeed, not all of these patterns are canonical.
But since this fold will only produce a single instruction
i'm really interested in handling even uncanonical patterns,
since i have this general kind of pattern in hotpaths,
and it is not totally outlandish for bit-twiddling code.

For now let's start with patterns where both shift amounts are variable,
with trivial constant "offset" between them, since i believe this is
both simplest to handle and i think this is most common.
But again, there are likely other variants where we could use
ValueTracking/ConstantRange to handle more cases.

https://bugs.llvm.org/show_bug.cgi?id=42563

Reviewers: spatel, nikic, huihuiz, xbolva00

Reviewed By: xbolva00

Subscribers: efriedma, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D64512

llvm-svn: 366535
  • Loading branch information
LebedevRI committed Jul 19, 2019
1 parent 3628d94 commit a5f0824
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 11 deletions.
50 changes: 50 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
Expand Up @@ -65,6 +65,53 @@ reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0,
return NewShift;
}

// If we have some pattern that leaves only some low bits set, and then performs
// left-shift of those bits, if none of the bits that are left after the final
// shift are modified by the mask, we can omit the mask.
//
// There are many variants to this pattern:
// a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt
// All these patterns can be simplified to just:
// x << ShiftShAmt
// iff:
// a) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
static Instruction *
dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
const SimplifyQuery &SQ) {
assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&
"The input must be 'shl'!");

Value *Masked = OuterShift->getOperand(0);
Value *ShiftShAmt = OuterShift->getOperand(1);

Value *MaskShAmt;

// ((1 << MaskShAmt) - 1)
auto MaskA = m_Add(m_Shl(m_One(), m_Value(MaskShAmt)), m_AllOnes());

Value *X;
if (!match(Masked, m_c_And(MaskA, m_Value(X))))
return nullptr;

// Can we simplify (MaskShAmt+ShiftShAmt) ?
Value *SumOfShAmts =
SimplifyAddInst(MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false,
SQ.getWithInstruction(OuterShift));
if (!SumOfShAmts)
return nullptr; // Did not simplify.
// Is the total shift amount *not* smaller than the bit width?
// FIXME: could also rely on ConstantRange.
unsigned BitWidth = X->getType()->getScalarSizeInBits();
if (!match(SumOfShAmts, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_UGE,
APInt(BitWidth, BitWidth))))
return nullptr;
// All good, we can do this fold.

// No 'NUW'/'NSW'!
// We no longer know that we won't shift-out non-0 bits.
return BinaryOperator::Create(OuterShift->getOpcode(), X, ShiftShAmt);
}

Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
assert(Op0->getType() == Op1->getType());
Expand Down Expand Up @@ -629,6 +676,9 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
if (Instruction *V = commonShiftTransforms(I))
return V;

if (Instruction *V = dropRedundantMaskingOfLeftShiftInput(&I, SQ))
return V;

Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
Type *Ty = I.getType();
unsigned BitWidth = Ty->getScalarSizeInBits();
Expand Down
Expand Up @@ -25,7 +25,7 @@ define i32 @t0_basic(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]]
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]]
; CHECK-NEXT: ret i32 [[T4]]
;
%t0 = shl i32 1, %nbits
Expand All @@ -50,7 +50,7 @@ define i32 @t1_bigger_shift(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]]
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]]
; CHECK-NEXT: ret i32 [[T4]]
;
%t0 = shl i32 1, %nbits
Expand All @@ -77,7 +77,7 @@ define i32 @t2_bigger_mask(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: call void @use32(i32 [[T4]])
; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T3]], [[T4]]
; CHECK-NEXT: [[T5:%.*]] = shl i32 [[X]], [[T4]]
; CHECK-NEXT: ret i32 [[T5]]
;
%t0 = add i32 %nbits, 1
Expand Down Expand Up @@ -109,7 +109,7 @@ define <3 x i32> @t3_vec_splat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T3]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T4]])
; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[T3]], [[T4]]
; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]]
; CHECK-NEXT: ret <3 x i32> [[T5]]
;
%t0 = add <3 x i32> %nbits, <i32 0, i32 0, i32 0>
Expand Down Expand Up @@ -138,7 +138,7 @@ define <3 x i32> @t4_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T3]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T4]])
; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[T3]], [[T4]]
; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]]
; CHECK-NEXT: ret <3 x i32> [[T5]]
;
%t0 = add <3 x i32> %nbits, <i32 -1, i32 0, i32 1>
Expand Down Expand Up @@ -166,7 +166,7 @@ define <3 x i32> @t5_vec_undef(<3 x i32> %x, <3 x i32> %nbits) {
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T3]])
; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T4]])
; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[T3]], [[T4]]
; CHECK-NEXT: [[T5:%.*]] = shl <3 x i32> [[X]], [[T4]]
; CHECK-NEXT: ret <3 x i32> [[T5]]
;
%t0 = add <3 x i32> %nbits, <i32 0, i32 undef, i32 0>
Expand Down Expand Up @@ -198,7 +198,7 @@ define i32 @t6_commutativity0(i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]]
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]]
; CHECK-NEXT: ret i32 [[T4]]
;
%x = call i32 @gen32()
Expand Down Expand Up @@ -260,7 +260,7 @@ define i32 @t8_commutativity2(i32 %nbits0, i32 %nbits1) {
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: call void @use32(i32 [[T4]])
; CHECK-NEXT: call void @use32(i32 [[T5]])
; CHECK-NEXT: [[T6:%.*]] = shl i32 [[T4]], [[T5]]
; CHECK-NEXT: [[T6:%.*]] = shl i32 [[T1]], [[T5]]
; CHECK-NEXT: ret i32 [[T6]]
;
%t0 = shl i32 1, %nbits0
Expand Down Expand Up @@ -291,7 +291,7 @@ define i32 @t9_nuw(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 [[T2]], [[T3]]
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]]
; CHECK-NEXT: ret i32 [[T4]]
;
%t0 = shl i32 1, %nbits
Expand All @@ -316,7 +316,7 @@ define i32 @t10_nsw(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = shl nsw i32 [[T2]], [[T3]]
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]]
; CHECK-NEXT: ret i32 [[T4]]
;
%t0 = shl i32 1, %nbits
Expand All @@ -341,7 +341,7 @@ define i32 @t11_nuw_nsw(i32 %x, i32 %nbits) {
; CHECK-NEXT: call void @use32(i32 [[T1]])
; CHECK-NEXT: call void @use32(i32 [[T2]])
; CHECK-NEXT: call void @use32(i32 [[T3]])
; CHECK-NEXT: [[T4:%.*]] = shl nuw nsw i32 [[T2]], [[T3]]
; CHECK-NEXT: [[T4:%.*]] = shl i32 [[X]], [[T3]]
; CHECK-NEXT: ret i32 [[T4]]
;
%t0 = shl i32 1, %nbits
Expand Down

0 comments on commit a5f0824

Please sign in to comment.