From f509d85a6dc13980000ca2eba13c34f7d62e4139 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Thu, 2 Jun 2016 16:01:15 +0000 Subject: [PATCH] [DAG] use getBitcast() to reduce code Although this was intended to be NFC, the test case wiggle shows a change in code scheduling/RA caused by a difference in the SDLoc() generation. Depending on how you look at it, this is the (dis)advantage of exact checking in regression tests. llvm-svn: 271526 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 58 ++++++++----------- llvm/test/CodeGen/X86/vec_fneg.ll | 4 +- 2 files changed, 27 insertions(+), 35 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 8e7c89c26f148..d337848349961 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7381,13 +7381,12 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { TLI.isOperationLegal(ISD::ConstantFP, VT)) || (isa(N0) && VT.isInteger() && !VT.isVector() && TLI.isOperationLegal(ISD::Constant, VT))) - return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0); + return DAG.getBitcast(VT, N0); } // (conv (conv x, t1), t2) -> (conv x, t2) if (N0.getOpcode() == ISD::BITCAST) - return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, - N0.getOperand(0)); + return DAG.getBitcast(VT, N0.getOperand(0)); // fold (conv (load x)) -> (load (conv*)x) // If the resultant load doesn't need a higher alignment than the original! @@ -7432,8 +7431,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) && N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector() && !N0.getValueType().isVector()) { - SDValue NewConv = DAG.getNode(ISD::BITCAST, SDLoc(N0), VT, - N0.getOperand(0)); + SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0)); AddToWorklist(NewConv.getNode()); SDLoc DL(N); @@ -7486,8 +7484,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); if (isTypeLegal(IntXVT)) { - SDValue X = DAG.getNode(ISD::BITCAST, SDLoc(N0), - IntXVT, N0.getOperand(1)); + SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1)); AddToWorklist(X.getNode()); // If X has a different width than the result/lhs, sext it or truncate it. @@ -7535,8 +7532,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { X, DAG.getConstant(SignBit, SDLoc(X), VT)); AddToWorklist(X.getNode()); - SDValue Cst = DAG.getNode(ISD::BITCAST, SDLoc(N0), - VT, N0.getOperand(0)); + SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT, Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT)); AddToWorklist(Cst.getNode()); @@ -7568,7 +7564,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { return SDValue(Op.getOperand(0)); if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) || ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) - return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); + return DAG.getBitcast(VT, Op); return SDValue(); }; @@ -7625,8 +7621,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { // we can end up with a scalar-to-vector node here. if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT, - DAG.getNode(ISD::BITCAST, SDLoc(BV), - DstEltVT, BV->getOperand(0))); + DAG.getBitcast(DstEltVT, BV->getOperand(0))); SmallVector Ops; for (SDValue Op : BV->op_values()) { @@ -7634,8 +7629,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { // are promoted and implicitly truncated. Make that explicit here. if (Op.getValueType() != SrcEltVT) Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op); - Ops.push_back(DAG.getNode(ISD::BITCAST, SDLoc(BV), - DstEltVT, Op)); + Ops.push_back(DAG.getBitcast(DstEltVT, Op)); AddToWorklist(Ops.back().getNode()); } return DAG.getBuildVector(VT, SDLoc(BV), Ops); @@ -9311,7 +9305,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) { Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int, DAG.getConstant(SignMask, DL0, IntVT)); AddToWorklist(Int.getNode()); - return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Int); + return DAG.getBitcast(VT, Int); } } @@ -9416,7 +9410,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) { Int = DAG.getNode(ISD::AND, DL, IntVT, Int, DAG.getConstant(SignMask, DL, IntVT)); AddToWorklist(Int.getNode()); - return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Int); + return DAG.getBitcast(N->getValueType(0), Int); } } @@ -12304,7 +12298,7 @@ SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad( if (ResultVT.bitsLT(VecEltVT)) Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load); else - Load = DAG.getNode(ISD::BITCAST, SDLoc(EVE), ResultVT, Load); + Load = DAG.getBitcast(ResultVT, Load); } WorklistRemover DeadNodes(*this); SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) }; @@ -12622,7 +12616,7 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { // The new BUILD_VECTOR node has the potential to be further optimized. AddToWorklist(BV.getNode()); // Bitcast to the desired type. - return DAG.getNode(ISD::BITCAST, dl, VT, BV); + return DAG.getBitcast(VT, BV); } SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { @@ -12921,15 +12915,14 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) { if (Op.isUndef()) Op = ScalarUndef; else - Op = DAG.getNode(ISD::BITCAST, DL, SVT, Op); + Op = DAG.getBitcast(SVT, Op); } } } EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT, VT.getSizeInBits() / SVT.getSizeInBits()); - return DAG.getNode(ISD::BITCAST, DL, VT, - DAG.getBuildVector(VecVT, DL, Ops)); + return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops)); } // Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR @@ -13057,7 +13050,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { SDLoc dl = SDLoc(N); SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NVT, Scalar); - return DAG.getNode(ISD::BITCAST, dl, VT, Res); + return DAG.getBitcast(VT, Res); } } @@ -13214,11 +13207,11 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { // otherwise => (extract_subvec V1, ExtIdx) if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() == ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits()) - return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1)); - return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, - DAG.getNode(ISD::BITCAST, dl, - N->getOperand(0).getValueType(), - V->getOperand(0)), N->getOperand(1)); + return DAG.getBitcast(NVT, V->getOperand(1)); + return DAG.getNode( + ISD::EXTRACT_SUBVECTOR, dl, NVT, + DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)), + N->getOperand(1)); } } @@ -13482,7 +13475,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { // We may have jumped through bitcasts, so the type of the // BUILD_VECTOR may not match the type of the shuffle. if (V->getValueType(0) != VT) - NewBV = DAG.getNode(ISD::BITCAST, SDLoc(N), VT, NewBV); + NewBV = DAG.getBitcast(VT, NewBV); return NewBV; } } @@ -13604,11 +13597,10 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { } if (LegalMask) { - SV0 = DAG.getNode(ISD::BITCAST, SDLoc(N), ScaleVT, SV0); - SV1 = DAG.getNode(ISD::BITCAST, SDLoc(N), ScaleVT, SV1); - return DAG.getNode( - ISD::BITCAST, SDLoc(N), VT, - DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask)); + SV0 = DAG.getBitcast(ScaleVT, SV0); + SV1 = DAG.getBitcast(ScaleVT, SV1); + return DAG.getBitcast( + VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask)); } } } diff --git a/llvm/test/CodeGen/X86/vec_fneg.ll b/llvm/test/CodeGen/X86/vec_fneg.ll index 2dac7f25b4061..78799ff04fe1c 100644 --- a/llvm/test/CodeGen/X86/vec_fneg.ll +++ b/llvm/test/CodeGen/X86/vec_fneg.ll @@ -74,9 +74,9 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind { ; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-SSE2-NEXT: xorl %eax, %ecx +; X32-SSE2-NEXT: movd %ecx, %xmm1 ; X32-SSE2-NEXT: xorl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movd %eax, %xmm1 -; X32-SSE2-NEXT: movd %ecx, %xmm0 +; X32-SSE2-NEXT: movd %eax, %xmm0 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X32-SSE2-NEXT: retl ;