Skip to content

Commit

Permalink
[DAG] Move lshr narrowing from visitANDLike to SimplifyDemandedBits
Browse files Browse the repository at this point in the history
Inspired by some of the cases from D145468

Let SimplifyDemandedBits handle the narrowing of lshr to half-width if we don't require the upper bits, the narrowed shift is profitable and the zext/trunc are free.

A future patch will propose the equivalent shl narrowing combine.

Differential Revision: https://reviews.llvm.org/D146121
  • Loading branch information
RKSimon committed Jul 17, 2023
1 parent fbfff1c commit e9caa37
Show file tree
Hide file tree
Showing 23 changed files with 290 additions and 343 deletions.
49 changes: 0 additions & 49 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Expand Up @@ -6152,55 +6152,6 @@ SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
}
}

// Reduce bit extract of low half of an integer to the narrower type.
// (and (srl i64:x, K), KMask) ->
// (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask)
if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) {
if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
unsigned Size = VT.getSizeInBits();
const APInt &AndMask = CAnd->getAPIntValue();
unsigned ShiftBits = CShift->getZExtValue();

// Bail out, this node will probably disappear anyway.
if (ShiftBits == 0)
return SDValue();

unsigned MaskBits = AndMask.countr_one();
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2);

if (AndMask.isMask() &&
// Required bits must not span the two halves of the integer and
// must fit in the half size type.
(ShiftBits + MaskBits <= Size / 2) &&
TLI.isNarrowingProfitable(VT, HalfVT) &&
TLI.isTypeDesirableForOp(ISD::AND, HalfVT) &&
TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) &&
TLI.isTruncateFree(VT, HalfVT) &&
TLI.isZExtFree(HalfVT, VT)) {
// The isNarrowingProfitable is to avoid regressions on PPC and
// AArch64 which match a few 64-bit bit insert / bit extract patterns
// on downstream users of this. Those patterns could probably be
// extended to handle extensions mixed in.

SDValue SL(N0);
assert(MaskBits <= Size);

// Extracting the highest bit of the low half.
EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout());
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT,
N0.getOperand(0));

SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT);
SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT);
SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK);
SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask);
return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And);
}
}
}
}

return SDValue();
}

Expand Down
21 changes: 21 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Expand Up @@ -1863,6 +1863,27 @@ bool TargetLowering::SimplifyDemandedBits(
if (Op->getFlags().hasExact())
InDemandedMask.setLowBits(ShAmt);

// Narrow shift to lower half - similar to ShrinkDemandedOp.
// (srl i64:x, K) -> (i64 zero_extend (srl (i32 (trunc i64:x)), K))
if ((BitWidth % 2) == 0 && !VT.isVector() &&
((InDemandedMask.countLeadingZeros() >= (BitWidth / 2)) ||
TLO.DAG.MaskedValueIsZero(
Op0, APInt::getHighBitsSet(BitWidth, BitWidth / 2)))) {
EVT HalfVT = EVT::getIntegerVT(*TLO.DAG.getContext(), BitWidth / 2);
if (isNarrowingProfitable(VT, HalfVT) &&
isTypeDesirableForOp(ISD::SRL, HalfVT) &&
isTruncateFree(VT, HalfVT) && isZExtFree(HalfVT, VT) &&
(!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT))) {
SDValue NewOp = TLO.DAG.getNode(ISD::TRUNCATE, dl, HalfVT, Op0);
SDValue NewShiftAmt = TLO.DAG.getShiftAmountConstant(
ShAmt, HalfVT, dl, TLO.LegalTypes());
SDValue NewShift =
TLO.DAG.getNode(ISD::SRL, dl, HalfVT, NewOp, NewShiftAmt);
return TLO.CombineTo(
Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, NewShift));
}
}

// Compute the new bits that are at the top now.
if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO,
Depth + 1))
Expand Down
14 changes: 6 additions & 8 deletions llvm/test/CodeGen/AMDGPU/idot4s.ll
Expand Up @@ -963,21 +963,19 @@ define amdgpu_kernel void @idot4_acc16_vecMul(ptr addrspace(1) %src1,
; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0
; GFX7-NEXT: s_waitcnt vmcnt(2)
; GFX7-NEXT: v_bfe_i32 v3, v2, 16, 8
; GFX7-NEXT: v_bfe_i32 v4, v2, 0, 8
; GFX7-NEXT: v_bfe_i32 v3, v2, 16, 8
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_bfe_i32 v7, v0, 0, 8
; GFX7-NEXT: v_ashrrev_i32_e32 v5, 24, v2
; GFX7-NEXT: v_bfe_i32 v2, v2, 8, 8
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX7-NEXT: v_bfe_i32 v6, v0, 16, 8
; GFX7-NEXT: v_bfe_i32 v7, v0, 0, 8
; GFX7-NEXT: v_ashrrev_i32_e32 v8, 24, v0
; GFX7-NEXT: v_bfe_i32 v0, v0, 8, 8
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX7-NEXT: v_alignbit_b32 v2, 0, v2, 16
; GFX7-NEXT: v_alignbit_b32 v0, 0, v0, 16
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mad_u32_u24 v1, v4, v7, v1
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
Expand Down
30 changes: 13 additions & 17 deletions llvm/test/CodeGen/AMDGPU/idot4u.ll
Expand Up @@ -1850,28 +1850,24 @@ define amdgpu_kernel void @udot4_acc16_vecMul(ptr addrspace(1) %src1,
; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0
; GFX7-NEXT: s_waitcnt vmcnt(2)
; GFX7-NEXT: v_and_b32_e32 v3, 0xff00, v2
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v2
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_and_b32_e32 v6, 0xff00, v0
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2
; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v2
; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v0
; GFX7-NEXT: v_alignbit_b32 v2, v4, v2, 16
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; GFX7-NEXT: v_and_b32_e32 v4, 0xff, v0
; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; GFX7-NEXT: v_alignbit_b32 v0, v7, v0, 16
; GFX7-NEXT: v_alignbit_b32 v3, 0, v3, 16
; GFX7-NEXT: v_alignbit_b32 v6, 0, v6, 16
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0
; GFX7-NEXT: v_and_b32_e32 v7, 0xff, v0
; GFX7-NEXT: v_bfe_u32 v4, v2, 8, 8
; GFX7-NEXT: v_alignbit_b32 v2, v3, v2, 16
; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8
; GFX7-NEXT: v_alignbit_b32 v0, v6, v0, 16
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mad_u32_u24 v1, v5, v4, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v2
; GFX7-NEXT: v_mad_u32_u24 v1, v5, v7, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v0
; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX7-NEXT: v_mad_u32_u24 v1, v3, v6, v1
; GFX7-NEXT: v_mad_u32_u24 v1, v4, v3, v1
; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1
; GFX7-NEXT: v_mad_u32_u24 v0, v7, v8, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v6, v5, v0
; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
Expand Down
60 changes: 29 additions & 31 deletions llvm/test/CodeGen/AMDGPU/idot8s.ll
Expand Up @@ -2014,48 +2014,48 @@ define amdgpu_kernel void @idot8_acc16_vecMul(ptr addrspace(1) %src1,
; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0
; GFX7-NEXT: s_addc_u32 s13, s13, 0
; GFX7-NEXT: s_waitcnt vmcnt(2)
; GFX7-NEXT: v_bfe_i32 v8, v2, 0, 4
; GFX7-NEXT: v_bfe_i32 v7, v2, 4, 4
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_bfe_i32 v15, v0, 0, 4
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX7-NEXT: v_bfe_i32 v14, v0, 4, 4
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX7-NEXT: v_bfe_i32 v6, v2, 8, 4
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX7-NEXT: v_bfe_i32 v13, v0, 8, 4
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mad_u32_u24 v1, v8, v15, v1
; GFX7-NEXT: v_bfe_i32 v6, v2, 0, 4
; GFX7-NEXT: v_bfe_i32 v3, v2, 24, 4
; GFX7-NEXT: v_bfe_i32 v4, v2, 20, 4
; GFX7-NEXT: v_bfe_i32 v5, v2, 16, 4
; GFX7-NEXT: v_ashrrev_i32_e32 v9, 28, v2
; GFX7-NEXT: v_bfe_i32 v2, v2, 12, 4
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_bfe_i32 v13, v0, 0, 4
; GFX7-NEXT: v_bfe_i32 v4, v2, 16, 4
; GFX7-NEXT: v_bfe_i32 v5, v2, 8, 4
; GFX7-NEXT: v_ashrrev_i32_e32 v7, 28, v2
; GFX7-NEXT: v_bfe_i32 v8, v2, 20, 4
; GFX7-NEXT: v_bfe_i32 v9, v2, 12, 4
; GFX7-NEXT: v_bfe_i32 v2, v2, 4, 4
; GFX7-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX7-NEXT: v_bfe_i32 v10, v0, 24, 4
; GFX7-NEXT: v_bfe_i32 v11, v0, 20, 4
; GFX7-NEXT: v_bfe_i32 v12, v0, 16, 4
; GFX7-NEXT: v_ashrrev_i32_e32 v16, 28, v0
; GFX7-NEXT: v_bfe_i32 v0, v0, 12, 4
; GFX7-NEXT: v_bfe_i32 v11, v0, 16, 4
; GFX7-NEXT: v_bfe_i32 v12, v0, 8, 4
; GFX7-NEXT: v_ashrrev_i32_e32 v14, 28, v0
; GFX7-NEXT: v_bfe_i32 v15, v0, 20, 4
; GFX7-NEXT: v_bfe_i32 v16, v0, 12, 4
; GFX7-NEXT: v_bfe_i32 v0, v0, 4, 4
; GFX7-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX7-NEXT: v_mad_u32_u24 v1, v7, v14, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mad_u32_u24 v1, v6, v13, v1
; GFX7-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX7-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX7-NEXT: v_mad_u32_u24 v0, v5, v12, v0
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX7-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX7-NEXT: v_mad_u32_u24 v0, v5, v12, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v9, v16, v0
; GFX7-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX7-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX7-NEXT: v_mad_u32_u24 v0, v4, v11, v0
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX7-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX7-NEXT: v_mad_u32_u24 v0, v4, v11, v0
; GFX7-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX7-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX7-NEXT: v_mad_u32_u24 v0, v8, v15, v0
; GFX7-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX7-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX7-NEXT: v_mad_u32_u24 v0, v3, v10, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v9, v16, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v7, v14, v0
; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
Expand Down Expand Up @@ -2581,12 +2581,10 @@ define amdgpu_kernel void @idot8_acc8_vecMul(ptr addrspace(1) %src1,
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mad_u32_u24 v1, v7, v14, v1
; GFX7-NEXT: v_and_b32_e32 v6, 0xff, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9
; GFX7-NEXT: v_and_b32_e32 v13, 0xff, v13
; GFX7-NEXT: v_lshlrev_b32_e32 v16, 24, v16
; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1
; GFX7-NEXT: v_alignbit_b32 v9, 0, v9, 24
; GFX7-NEXT: v_alignbit_b32 v16, 0, v16, 24
; GFX7-NEXT: v_and_b32_e32 v9, 0xff, v9
; GFX7-NEXT: v_and_b32_e32 v16, 0xff, v16
; GFX7-NEXT: v_mad_u32_u24 v0, v6, v13, v0
; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX7-NEXT: v_and_b32_e32 v12, 0xff, v12
Expand Down
28 changes: 12 additions & 16 deletions llvm/test/CodeGen/AMDGPU/idot8u.ll
Expand Up @@ -2444,32 +2444,28 @@ define amdgpu_kernel void @udot8_acc8_vecMul(ptr addrspace(1) %src1,
; GFX7-NEXT: buffer_load_ubyte v1, off, s[0:3], 0
; GFX7-NEXT: s_addc_u32 s13, s13, 0
; GFX7-NEXT: s_waitcnt vmcnt(2)
; GFX7-NEXT: v_and_b32_e32 v9, 15, v2
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 28, v2
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_and_b32_e32 v16, 15, v0
; GFX7-NEXT: v_bfe_u32 v4, v2, 24, 4
; GFX7-NEXT: v_bfe_u32 v5, v2, 20, 4
; GFX7-NEXT: v_bfe_u32 v6, v2, 16, 4
; GFX7-NEXT: v_bfe_u32 v7, v2, 8, 4
; GFX7-NEXT: v_bfe_u32 v8, v2, 4, 4
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 12, v2
; GFX7-NEXT: v_bfe_u32 v7, v2, 12, 4
; GFX7-NEXT: v_bfe_u32 v8, v2, 8, 4
; GFX7-NEXT: v_bfe_u32 v9, v2, 4, 4
; GFX7-NEXT: v_and_b32_e32 v2, 15, v2
; GFX7-NEXT: s_waitcnt vmcnt(1)
; GFX7-NEXT: v_lshrrev_b32_e32 v10, 28, v0
; GFX7-NEXT: v_bfe_u32 v11, v0, 24, 4
; GFX7-NEXT: v_bfe_u32 v12, v0, 20, 4
; GFX7-NEXT: v_bfe_u32 v13, v0, 16, 4
; GFX7-NEXT: v_bfe_u32 v14, v0, 8, 4
; GFX7-NEXT: v_bfe_u32 v15, v0, 4, 4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 12, v0
; GFX7-NEXT: v_bfe_u32 v14, v0, 12, 4
; GFX7-NEXT: v_bfe_u32 v15, v0, 8, 4
; GFX7-NEXT: v_bfe_u32 v16, v0, 4, 4
; GFX7-NEXT: v_and_b32_e32 v0, 15, v0
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_mad_u32_u24 v1, v9, v16, v1
; GFX7-NEXT: v_and_b32_e32 v2, 0xf000000, v2
; GFX7-NEXT: v_and_b32_e32 v0, 0xf000000, v0
; GFX7-NEXT: v_mad_u32_u24 v1, v8, v15, v1
; GFX7-NEXT: v_alignbit_b32 v2, s10, v2, 24
; GFX7-NEXT: v_alignbit_b32 v0, 0, v0, 24
; GFX7-NEXT: v_mad_u32_u24 v1, v7, v14, v1
; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1
; GFX7-NEXT: v_mad_u32_u24 v0, v9, v16, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v8, v15, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v7, v14, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v6, v13, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v5, v12, v0
; GFX7-NEXT: v_mad_u32_u24 v0, v4, v11, v0
Expand Down

0 comments on commit e9caa37

Please sign in to comment.