Skip to content

Commit

Permalink
[AArch64] Fix performPostLD1Combine to check for constant lane index.
Browse files Browse the repository at this point in the history
Summary:
performPostLD1Combine in AArch64ISelLowering looks for vector
insert_vector_elt of a loaded value which it can optimize into a single
LD1LANE instruction.  The code checking for the pattern was not checking
if the lane index was a constant which could cause two problems:

- an assert when lowering the LD1LANE ISD node since it assumes an
  constant operand

- an assert in isel if the lane index value depends on the
  post-incremented base register

Both of these issues are avoided by simply checking that the lane index
is a constant.

Fixes bug 35822.

Reviewers: t.p.northover, javed.absar

Subscribers: rengolin, kristof.beyls, mcrosier, llvm-commits

Differential Revision: https://reviews.llvm.org/D46591

llvm-svn: 332103
  • Loading branch information
geoffberry committed May 11, 2018
1 parent ae20108 commit 6046026
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 1 deletion.
11 changes: 10 additions & 1 deletion llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Expand Up @@ -9935,6 +9935,15 @@ static SDValue performPostLD1Combine(SDNode *N,
if (LD->getOpcode() != ISD::LOAD)
return SDValue();

// The vector lane must be a constant in the LD1LANE opcode.
SDValue Lane;
if (IsLaneOp) {
Lane = N->getOperand(2);
auto *LaneC = dyn_cast<ConstantSDNode>(Lane);
if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements())
return SDValue();
}

LoadSDNode *LoadSDN = cast<LoadSDNode>(LD);
EVT MemVT = LoadSDN->getMemoryVT();
// Check if memory operand is the same type as the vector element.
Expand Down Expand Up @@ -9991,7 +10000,7 @@ static SDValue performPostLD1Combine(SDNode *N,
Ops.push_back(LD->getOperand(0)); // Chain
if (IsLaneOp) {
Ops.push_back(Vector); // The vector to be inserted
Ops.push_back(N->getOperand(2)); // The lane to be inserted in the vector
Ops.push_back(Lane); // The lane to be inserted in the vector
}
Ops.push_back(Addr);
Ops.push_back(Inc);
Expand Down
22 changes: 22 additions & 0 deletions llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
Expand Up @@ -28,6 +28,28 @@ return: ; preds = %if.then172, %cond.e
ret void
}

; Avoid an assert/bad codegen in LD1LANEPOST lowering by not forming
; LD1LANEPOST ISD nodes with a non-constant lane index.
define <4 x i32> @f2(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) {
%L0 = load i32, i32* %p
%p1 = getelementptr i32, i32* %p, i64 1
%L1 = load i32, i32* %p1
%v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
%vret = insertelement <4 x i32> %v, i32 %L0, i32 %idx
store i32 %L1, i32 *%p
ret <4 x i32> %vret
}

; Check that a cycle is avoided during isel between the LD1LANEPOST instruction and the load of %L1.
define <4 x i32> @f3(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) {
%L0 = load i32, i32* %p
%p1 = getelementptr i32, i32* %p, i64 1
%L1 = load i32, i32* %p1
%v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
%vret = insertelement <4 x i32> %v, i32 %L0, i32 %L1
ret <4 x i32> %vret
}

; Function Attrs: nounwind readnone
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1

Expand Down

0 comments on commit 6046026

Please sign in to comment.