Skip to content

Commit

Permalink
[x86] eliminate unnecessary shuffling/moves with unary scalar math op…
Browse files Browse the repository at this point in the history
…s (PR21507)

Finish the job that was abandoned in D6958 following the refactoring in
http://reviews.llvm.org/rL230221:

1. Uncomment the intrinsic def for the AVX r_Int instruction.
2. Add missing r_Int entries to the load folding tables; there are already
   tests that check these in "test/Codegen/X86/fold-load-unops.ll", so I
   haven't added any more in this patch.
3. Add patterns to solve PR21507 ( https://llvm.org/bugs/show_bug.cgi?id=21507 ).

So instead of this:

  movaps	%xmm0, %xmm1
  rcpss	%xmm1, %xmm1
  movss	%xmm1, %xmm0

We should now get:

  rcpss	%xmm0, %xmm0

And instead of this:

  vsqrtss	%xmm0, %xmm0, %xmm1
  vblendps	$1, %xmm1, %xmm0, %xmm0 ## xmm0 = xmm1[0],xmm0[1,2,3]

We should now get:

  vsqrtss	%xmm0, %xmm0, %xmm0


Differential Revision: http://reviews.llvm.org/D9504

llvm-svn: 236740
  • Loading branch information
rotateright committed May 7, 2015
1 parent 44faaa7 commit a9f6d35
Show file tree
Hide file tree
Showing 3 changed files with 124 additions and 10 deletions.
6 changes: 6 additions & 0 deletions llvm/lib/Target/X86/X86InstrInfo.cpp
Expand Up @@ -526,6 +526,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 },
{ X86::PTESTrr, X86::PTESTrm, TB_ALIGN_16 },
{ X86::RCPPSr, X86::RCPPSm, TB_ALIGN_16 },
{ X86::RCPSSr, X86::RCPSSm, 0 },
{ X86::RCPSSr_Int, X86::RCPSSm_Int, 0 },
{ X86::ROUNDPDr, X86::ROUNDPDm, TB_ALIGN_16 },
{ X86::ROUNDPSr, X86::ROUNDPSm, TB_ALIGN_16 },
{ X86::RSQRTPSr, X86::RSQRTPSm, TB_ALIGN_16 },
Expand Down Expand Up @@ -1239,9 +1241,13 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 },
{ X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 },
{ X86::VRCPSSr, X86::VRCPSSm, 0 },
{ X86::VRCPSSr_Int, X86::VRCPSSm_Int, 0 },
{ X86::VRSQRTSSr, X86::VRSQRTSSm, 0 },
{ X86::VRSQRTSSr_Int, X86::VRSQRTSSm_Int, 0 },
{ X86::VSQRTSDr, X86::VSQRTSDm, 0 },
{ X86::VSQRTSDr_Int, X86::VSQRTSDm_Int, 0 },
{ X86::VSQRTSSr, X86::VSQRTSSm, 0 },
{ X86::VSQRTSSr_Int, X86::VSQRTSSm_Int, 0 },
{ X86::VADDPDrr, X86::VADDPDrm, 0 },
{ X86::VADDPSrr, X86::VADDPSrm, 0 },
{ X86::VADDSDrr, X86::VADDSDrm, 0 },
Expand Down
55 changes: 45 additions & 10 deletions llvm/lib/Target/X86/X86InstrSSE.td
Expand Up @@ -3369,7 +3369,7 @@ multiclass sse_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
def : Pat<(Intr (load addr:$src)),
(vt (COPY_TO_REGCLASS(!cast<Instruction>(NAME#Suffix##m)
addr:$src), VR128))>;
def : Pat<(Intr mem_cpat:$src),
def : Pat<(Intr mem_cpat:$src),
(!cast<Instruction>(NAME#Suffix##m_Int)
(vt (IMPLICIT_DEF)), mem_cpat:$src)>;
}
Expand All @@ -3390,16 +3390,15 @@ multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[], itins.rm, d>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
let isCodeGenOnly = 1 in {
// todo: uncomment when all r_Int forms will be added to X86InstrInfo.cpp
//def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
// (ins VR128:$src1, VR128:$src2),
// !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
// []>, Sched<[itins.Sched.Folded]>;
def r_Int : I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, Sched<[itins.Sched.Folded]>;
let mayLoad = 1 in
def m_Int : I<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, vec_memop:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
[]>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
}

Expand All @@ -3411,13 +3410,11 @@ multiclass avx_fp_unop_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
(!cast<Instruction>("V"#NAME#Suffix##m_Int) (vt (IMPLICIT_DEF)),
mem_cpat:$src)>;

// todo: use r_Int form when it will be ready
//def : Pat<(Intr VR128:$src), (!cast<Instruction>("V"#NAME#Suffix##r_Int)
// (VT (IMPLICIT_DEF)), VR128:$src)>;
def : Pat<(Intr VR128:$src),
(vt (COPY_TO_REGCLASS(
!cast<Instruction>("V"#NAME#Suffix##r) (ScalarVT (IMPLICIT_DEF)),
(ScalarVT (COPY_TO_REGCLASS VR128:$src, RC))), VR128))>;

def : Pat<(Intr mem_cpat:$src),
(!cast<Instruction>("V"#NAME#Suffix##m_Int)
(vt (IMPLICIT_DEF)), mem_cpat:$src)>;
Expand Down Expand Up @@ -3540,6 +3537,44 @@ defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, SSE_RCPS>,

// There is no f64 version of the reciprocal approximation instructions.

// TODO: We should add *scalar* op patterns for these just like we have for
// the binops above. If the binop and unop patterns could all be unified
// that would be even better.

multiclass scalar_unary_math_patterns<Intrinsic Intr, string OpcPrefix,
SDNode Move, ValueType VT,
Predicate BasePredicate> {
let Predicates = [BasePredicate] in {
def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
(!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
}

// With SSE 4.1, blendi is preferred to movs*, so match that too.
let Predicates = [UseSSE41] in {
def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
(!cast<I>(OpcPrefix#r_Int) VT:$dst, VT:$src)>;
}

// Repeat for AVX versions of the instructions.
let Predicates = [HasAVX] in {
def : Pat<(VT (Move VT:$dst, (Intr VT:$src))),
(!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;

def : Pat<(VT (X86Blendi VT:$dst, (Intr VT:$src), (i8 1))),
(!cast<I>("V"#OpcPrefix#r_Int) VT:$dst, VT:$src)>;
}
}

defm : scalar_unary_math_patterns<int_x86_sse_rcp_ss, "RCPSS", X86Movss,
v4f32, UseSSE1>;
defm : scalar_unary_math_patterns<int_x86_sse_rsqrt_ss, "RSQRTSS", X86Movss,
v4f32, UseSSE1>;
defm : scalar_unary_math_patterns<int_x86_sse_sqrt_ss, "SQRTSS", X86Movss,
v4f32, UseSSE1>;
defm : scalar_unary_math_patterns<int_x86_sse2_sqrt_sd, "SQRTSD", X86Movsd,
v2f64, UseSSE2>;


//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Non-temporal stores
//===----------------------------------------------------------------------===//
Expand Down
73 changes: 73 additions & 0 deletions llvm/test/CodeGen/X86/sse-scalar-fp-arith-unary.ll
@@ -0,0 +1,73 @@
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2 < %s | FileCheck --check-prefix=SSE %s
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse4.1 < %s | FileCheck --check-prefix=SSE %s
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx < %s | FileCheck --check-prefix=AVX %s

; PR21507 - https://llvm.org/bugs/show_bug.cgi?id=21507
; Each function should be a single math op; no extra moves.


define <4 x float> @recip(<4 x float> %x) {
; SSE-LABEL: recip:
; SSE: # BB#0:
; SSE-NEXT: rcpss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: recip:
; AVX: # BB#0:
; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %x)
%shuf = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x float> %shuf
}

define <4 x float> @recip_square_root(<4 x float> %x) {
; SSE-LABEL: recip_square_root:
; SSE: # BB#0:
; SSE-NEXT: rsqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: recip_square_root:
; AVX: # BB#0:
; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %x)
%shuf = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x float> %shuf
}

define <4 x float> @square_root(<4 x float> %x) {
; SSE-LABEL: square_root:
; SSE: # BB#0:
; SSE-NEXT: sqrtss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: square_root:
; AVX: # BB#0:
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %x)
%shuf = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x float> %shuf
}

define <2 x double> @square_root_double(<2 x double> %x) {
; SSE-LABEL: square_root_double:
; SSE: # BB#0:
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: square_root_double:
; AVX: # BB#0:
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%y = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %x)
%shuf = shufflevector <2 x double> %y, <2 x double> %x, <2 x i32> <i32 0, i32 3>
ret <2 x double> %shuf
}

declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>)
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>)
declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>)
declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>)

0 comments on commit a9f6d35

Please sign in to comment.