Skip to content

Commit

Permalink
[GlobalISel] Combine for eliminating redundant operand negations
Browse files Browse the repository at this point in the history
Differential Revision: https://reviews.llvm.org/D111319
  • Loading branch information
mbrkusanin committed Oct 8, 2021
1 parent f8453ea commit d20840c
Show file tree
Hide file tree
Showing 6 changed files with 228 additions and 70 deletions.
9 changes: 9 additions & 0 deletions llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
Expand Up @@ -629,6 +629,15 @@ class CombinerHelper {
/// (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo);

/// Transform (fadd x, fneg(y)) -> (fsub x, y)
/// (fadd fneg(x), y) -> (fsub y, x)
/// (fsub x, fneg(y)) -> (fadd x, y)
/// (fmul fneg(x), fneg(y)) -> (fmul x, y)
/// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
/// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
/// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo);

private:
/// Given a non-indexed load or store instruction \p MI, find an offset that
/// can be usefully and legally folded into it as a post-indexing operation.
Expand Down
8 changes: 7 additions & 1 deletion llvm/include/llvm/Target/GlobalISel/Combine.td
Expand Up @@ -732,6 +732,12 @@ def mulh_to_lshr : GICombineRule<

def mulh_combines : GICombineGroup<[mulh_to_lshr]>;

def redundant_neg_operands: GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
(match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
[{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
Expand Down Expand Up @@ -779,7 +785,7 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
form_bitfield_extract, constant_fold, fabs_fneg_fold,
intdiv_combines, mulh_combines]>;
intdiv_combines, mulh_combines, redundant_neg_operands]>;

// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
Expand Down
45 changes: 45 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Expand Up @@ -4618,6 +4618,51 @@ void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
MI.eraseFromParent();
}

bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
BuildFnTy &MatchInfo) {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA);

Register Dst = MI.getOperand(0).getReg();
Register X = MI.getOperand(1).getReg();
Register Y = MI.getOperand(2).getReg();
LLT Type = MRI.getType(Dst);

// fold (fadd x, fneg(y)) -> (fsub x, y)
// fold (fadd fneg(y), x) -> (fsub x, y)
// G_ADD is commutative so both cases are checked by m_GFAdd
if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) {
Opc = TargetOpcode::G_FSUB;
}
/// fold (fsub x, fneg(y)) -> (fadd x, y)
else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) &&
isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) {
Opc = TargetOpcode::G_FADD;
}
// fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
// fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
// fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
// fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) &&
mi_match(X, MRI, m_GFNeg(m_Reg(X))) &&
mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) {
// no opcode change
} else
return false;

MatchInfo = [=, &MI](MachineIRBuilder &B) {
Observer.changingInstr(MI);
MI.setDesc(B.getTII().get(Opc));
MI.getOperand(1).setReg(X);
MI.getOperand(2).setReg(Y);
Observer.changedInstr(MI);
};
return true;
}

bool CombinerHelper::tryCombine(MachineInstr &MI) {
if (tryCombineCopy(MI))
Expand Down
137 changes: 137 additions & 0 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/combine-redundant-neg.mir
@@ -0,0 +1,137 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s

---
name: test_add_rhs
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: test_add_rhs
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[COPY1]]
; CHECK-NEXT: $vgpr0 = COPY [[FSUB]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FNEG %1
%3:_(s32) = G_FADD %0, %2
$vgpr0 = COPY %3(s32)
...
---
name: test_add_lhs
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: test_add_lhs
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY1]], [[COPY]]
; CHECK-NEXT: $vgpr0 = COPY [[FSUB]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FNEG %0
%3:_(s32) = G_FADD %2, %1
$vgpr0 = COPY %3(s32)
...
---
name: test_sub
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: test_sub
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
; CHECK-NEXT: $vgpr0 = COPY [[FADD]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FNEG %1
%3:_(s32) = G_FSUB %0, %2
$vgpr0 = COPY %3(s32)
...
---
name: test_mul
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: test_mul
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
; CHECK-NEXT: $vgpr0 = COPY [[FMUL]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FNEG %0
%3:_(s32) = G_FNEG %1
%4:_(s32) = G_FMUL %2, %3
$vgpr0 = COPY %4(s32)
...
---
name: test_div
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: test_div
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV [[COPY]], [[COPY1]]
; CHECK-NEXT: $vgpr0 = COPY [[FDIV]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FNEG %0
%3:_(s32) = G_FNEG %1
%4:_(s32) = G_FDIV %2, %3
$vgpr0 = COPY %4(s32)
...
---
name: test_fmad
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2
; CHECK-LABEL: name: test_fmad
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK-NEXT: [[FMAD:%[0-9]+]]:_(s32) = G_FMAD [[COPY]], [[COPY1]], [[COPY2]]
; CHECK-NEXT: $vgpr0 = COPY [[FMAD]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
%3:_(s32) = G_FNEG %0
%4:_(s32) = G_FNEG %1
%5:_(s32) = G_FMAD %3, %4, %2
$vgpr0 = COPY %5(s32)
...
---
name: test_fma
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2
; CHECK-LABEL: name: test_fma
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
; CHECK-NEXT: $vgpr0 = COPY [[FMA]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
%3:_(s32) = G_FNEG %0
%4:_(s32) = G_FNEG %1
%5:_(s32) = G_FMA %3, %4, %2
$vgpr0 = COPY %5(s32)
...
35 changes: 10 additions & 25 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/fma.ll
Expand Up @@ -253,36 +253,21 @@ define <2 x half> @v_fma_v2f16_fneg_lhs_rhs(<2 x half> %x, <2 x half> %y, <2 x h
; GFX6-LABEL: v_fma_v2f16_fneg_lhs_rhs:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v6, 0xffff
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX6-NEXT: v_and_b32_e32 v0, v0, v6
; GFX6-NEXT: v_or_b32_e32 v0, v1, v0
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v3
; GFX6-NEXT: v_and_b32_e32 v2, v2, v6
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
; GFX6-NEXT: s_mov_b32 s4, 0x80008000
; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0
; GFX6-NEXT: v_xor_b32_e32 v1, s4, v1
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v0, v0
; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v2
; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
; GFX6-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX6-NEXT: v_fma_f32 v0, v0, v1, v4
; GFX6-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX6-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX6-NEXT: v_fma_f32 v1, v2, v3, v5
; GFX6-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fma_v2f16_fneg_lhs_rhs:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: s_mov_b32 s4, 0x80008000
; GFX8-NEXT: v_xor_b32_e32 v0, s4, v0
; GFX8-NEXT: v_xor_b32_e32 v1, s4, v1
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v0
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v1
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v2
Expand All @@ -296,14 +281,14 @@ define <2 x half> @v_fma_v2f16_fneg_lhs_rhs(<2 x half> %x, <2 x half> %y, <2 x h
; GFX9-LABEL: v_fma_v2f16_fneg_lhs_rhs:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_pk_fma_f16 v0, v0, v1, v2 neg_lo:[1,1,0] neg_hi:[1,1,0]
; GFX9-NEXT: v_pk_fma_f16 v0, v0, v1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fma_v2f16_fneg_lhs_rhs:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_pk_fma_f16 v0, v0, v1, v2 neg_lo:[1,1,0] neg_hi:[1,1,0]
; GFX10-NEXT: v_pk_fma_f16 v0, v0, v1, v2
; GFX10-NEXT: s_setpc_b64 s[30:31]
%x.fneg = fneg <2 x half> %x
%y.fneg = fneg <2 x half> %y
Expand Down Expand Up @@ -414,26 +399,26 @@ define double @v_fma_f64_fneg_all(double %x, double %y, double %z) {
; GFX6-LABEL: v_fma_f64_fneg_all:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_fma_f64 v[0:1], -v[0:1], -v[2:3], -v[4:5]
; GFX6-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fma_f64_fneg_all:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_fma_f64 v[0:1], -v[0:1], -v[2:3], -v[4:5]
; GFX8-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_fma_f64_fneg_all:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_fma_f64 v[0:1], -v[0:1], -v[2:3], -v[4:5]
; GFX9-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fma_f64_fneg_all:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_fma_f64 v[0:1], -v[0:1], -v[2:3], -v[4:5]
; GFX10-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5]
; GFX10-NEXT: s_setpc_b64 s[30:31]
%neg.x = fneg double %x
%neg.y = fneg double %y
Expand Down

0 comments on commit d20840c

Please sign in to comment.