From 5d49f665704523fece06d9621b3deeb5dc4b44e6 Mon Sep 17 00:00:00 2001 From: Michael Berg Date: Tue, 12 Jun 2018 16:13:11 +0000 Subject: [PATCH] Utilize new SDNode flag functionality to expand current support for fmul Summary: This patch originated from D46562 and is a proper subset, with some issues addressed for fmul. Reviewers: spatel, hfinkel, wristow, arsenm Reviewed By: spatel Subscribers: nhaehnle, wdng Differential Revision: https://reviews.llvm.org/D47911 llvm-svn: 334514 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 7 ++++-- llvm/test/CodeGen/X86/fmul-combines.ll | 2 -- llvm/test/CodeGen/X86/fp-fold.ll | 25 +++++++++++-------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index da46a8474900b..0956cb519ef61 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -10542,12 +10542,15 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) { if (SDValue NewSel = foldBinOpIntoSelect(N)) return NewSel; - if (Options.UnsafeFPMath) { + if (Options.UnsafeFPMath || + (Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) { // fold (fmul A, 0) -> 0 if (N1CFP && N1CFP->isZero()) return N1; + } - // fmul (fmul X, C1), X2 -> fmul X, C1 * C2 + if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) { + // fmul (fmul X, C1), C2 -> fmul X, C1 * C2 if (N0.getOpcode() == ISD::FMUL) { // Fold scalars or any vector constants (not just splats). // This fold is done in general by InstCombine, but extra fmul insts diff --git a/llvm/test/CodeGen/X86/fmul-combines.ll b/llvm/test/CodeGen/X86/fmul-combines.ll index 8a170251e4907..cd493ad4be7a9 100644 --- a/llvm/test/CodeGen/X86/fmul-combines.ll +++ b/llvm/test/CodeGen/X86/fmul-combines.ll @@ -92,7 +92,6 @@ define <4 x float> @fmul_v4f32_two_consts_no_splat_reassoc(<4 x float> %x) { ; CHECK-LABEL: fmul_v4f32_two_consts_no_splat_reassoc: ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0 -; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq %y = fmul <4 x float> %x, %z = fmul reassoc <4 x float> %y, @@ -104,7 +103,6 @@ define <4 x float> @fmul_v4f32_two_consts_no_splat_reassoc(<4 x float> %x) { define <4 x float> @fmul_v4f32_two_consts_no_splat_reassoc_2(<4 x float> %x) { ; CHECK-LABEL: fmul_v4f32_two_consts_no_splat_reassoc_2: ; CHECK: # %bb.0: -; CHECK-NEXT: addps %xmm0, %xmm0 ; CHECK-NEXT: mulps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq %y = fadd <4 x float> %x, %x diff --git a/llvm/test/CodeGen/X86/fp-fold.ll b/llvm/test/CodeGen/X86/fp-fold.ll index 55bb21910af28..fa4404bb1c0c1 100644 --- a/llvm/test/CodeGen/X86/fp-fold.ll +++ b/llvm/test/CodeGen/X86/fp-fold.ll @@ -101,18 +101,11 @@ define float @fsub_negzero_nsz(float %x) { ret float %r } -; TODO: handle x*0 for fast flags the same as unsafe define float @fmul_zero(float %x) { -; STRICT-LABEL: fmul_zero: -; STRICT: # %bb.0: -; STRICT-NEXT: xorps %xmm1, %xmm1 -; STRICT-NEXT: mulss %xmm1, %xmm0 -; STRICT-NEXT: retq -; -; UNSAFE-LABEL: fmul_zero: -; UNSAFE: # %bb.0: -; UNSAFE-NEXT: xorps %xmm0, %xmm0 -; UNSAFE-NEXT: retq +; ANY-LABEL: fmul_zero: +; ANY: # %bb.0: +; ANY-NEXT: xorps %xmm0, %xmm0 +; ANY-NEXT: retq %r = fmul nnan nsz float %x, 0.0 ret float %r } @@ -124,3 +117,13 @@ define float @fmul_one(float %x) { %r = fmul float %x, 1.0 ret float %r } + +define float @fmul_x_const_const(float %x) { +; ANY-LABEL: fmul_x_const_const: +; ANY: # %bb.0: +; ANY-NEXT: mulss {{.*}}(%rip), %xmm0 +; ANY-NEXT: retq + %mul = fmul reassoc float %x, 9.0 + %r = fmul reassoc float %mul, 4.0 + ret float %r +}