Skip to content

Commit

Permalink
[InstCombine, ARM, AArch64] Convert table lookup to shuffle vector
Browse files Browse the repository at this point in the history
Turning a table lookup intrinsic into a shuffle vector instruction
can be beneficial. If the mask used for the lookup is the constant
vector {7,6,5,4,3,2,1,0}, then the back-end generates byte reverse
instructions instead.

Differential Revision: https://reviews.llvm.org/D46133

llvm-svn: 333550
  • Loading branch information
labrinea committed May 30, 2018
1 parent 8df8b12 commit 52457d3
Show file tree
Hide file tree
Showing 3 changed files with 146 additions and 0 deletions.
46 changes: 46 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Expand Up @@ -1387,6 +1387,46 @@ static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
return maxnum(Src0, Src1);
}

/// Convert a table lookup to shufflevector if the mask is constant.
/// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
/// which case we could lower the shufflevector with rev64 instructions
/// as it's actually a byte reverse.
static Value *simplifyNeonTbl1(const IntrinsicInst &II,
InstCombiner::BuilderTy &Builder) {
// Bail out if the mask is not a constant.
auto *C = dyn_cast<Constant>(II.getArgOperand(1));
if (!C)
return nullptr;

auto *VecTy = cast<VectorType>(II.getType());
unsigned NumElts = VecTy->getNumElements();

// Only perform this transformation for <8 x i8> vector types.
if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
return nullptr;

uint32_t Indexes[8];

for (unsigned I = 0; I < NumElts; ++I) {
Constant *COp = C->getAggregateElement(I);

if (!COp || !isa<ConstantInt>(COp))
return nullptr;

Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();

// Make sure the mask indices are in range.
if (Indexes[I] >= NumElts)
return nullptr;
}

auto *ShuffleMask = ConstantDataVector::get(II.getContext(),
makeArrayRef(Indexes));
auto *V1 = II.getArgOperand(0);
auto *V2 = Constant::getNullValue(V1->getType());
return Builder.CreateShuffleVector(V1, V2, ShuffleMask);
}

// Returns true iff the 2 intrinsics have the same operands, limiting the
// comparison to the first NumOperands.
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
Expand Down Expand Up @@ -2928,6 +2968,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
}

case Intrinsic::arm_neon_vtbl1:
case Intrinsic::aarch64_neon_tbl1:
if (Value *V = simplifyNeonTbl1(*II, Builder))
return replaceInstUsesWith(*II, V);
break;

case Intrinsic::arm_neon_vmulls:
case Intrinsic::arm_neon_vmullu:
case Intrinsic::aarch64_neon_smull:
Expand Down
65 changes: 65 additions & 0 deletions llvm/test/Transforms/InstCombine/AArch64/tbl1.ll
@@ -0,0 +1,65 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s

target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-arm-none-eabi"

; Turning a table lookup intrinsic into a shuffle vector instruction
; can be beneficial. If the mask used for the lookup is the constant
; vector {7,6,5,4,3,2,1,0}, then the back-end generates rev64
; instructions instead.

define <8 x i8> @tbl1_8x8(<16 x i8> %vec) {
; CHECK-LABEL: @tbl1_8x8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[VEC:%.*]], <16 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i8> [[TMP0]]
;
entry:
%tbl1 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %vec, <8 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
ret <8 x i8> %tbl1
}

; Bail the optimization if a mask index is out of range.
define <8 x i8> @tbl1_8x8_out_of_range(<16 x i8> %vec) {
; CHECK-LABEL: @tbl1_8x8_out_of_range(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TBL1:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[VEC:%.*]], <8 x i8> <i8 8, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
; CHECK-NEXT: ret <8 x i8> [[TBL1]]
;
entry:
%tbl1 = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> %vec, <8 x i8> <i8 8, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
ret <8 x i8> %tbl1
}

; Bail the optimization if the size of the return vector is not 8 elements.
define <16 x i8> @tbl1_16x8(<16 x i8> %vec) {
; CHECK-LABEL: @tbl1_16x8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TBL1:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> [[VEC:%.*]], <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
; CHECK-NEXT: ret <16 x i8> [[TBL1]]
;
entry:
%tbl1 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %vec, <16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
ret <16 x i8> %tbl1
}

; Bail the optimization if the elements of the return vector are not of type i8.
define <8 x i16> @tbl1_8x16(<16 x i8> %vec) {
; CHECK-LABEL: @tbl1_8x16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TBL1:%.*]] = call <8 x i16> @llvm.aarch64.neon.tbl1.v8i16(<16 x i8> [[VEC:%.*]], <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
; CHECK-NEXT: ret <8 x i16> [[TBL1]]
;
entry:
%tbl1 = call <8 x i16> @llvm.aarch64.neon.tbl1.v8i16(<16 x i8> %vec, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
ret <8 x i16> %tbl1
}

; The type <8 x i16> is not a valid return type for this intrinsic,
; but we want to test that the optimization won't trigger for vector
; elements of type different than i8.
declare <8 x i16> @llvm.aarch64.neon.tbl1.v8i16(<16 x i8>, <8 x i16>)

declare <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8>, <8 x i8>)
declare <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8>, <16 x i8>)
35 changes: 35 additions & 0 deletions llvm/test/Transforms/InstCombine/ARM/tbl1.ll
@@ -0,0 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s

target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv8-arm-none-eabi"

; Turning a table lookup intrinsic into a shuffle vector instruction
; can be beneficial. If the mask used for the lookup is the constant
; vector {7,6,5,4,3,2,1,0}, then the back-end generates rev64
; instructions instead.

define <8 x i8> @tbl1_8x8(<8 x i8> %vec) {
; CHECK-LABEL: @tbl1_8x8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i8> [[VEC:%.*]], <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: ret <8 x i8> [[TMP0]]
;
entry:
%vtbl1 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %vec, <8 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
ret <8 x i8> %vtbl1
}

; Bail the optimization if a mask index is out of range.
define <8 x i8> @tbl1_8x8_out_of_range(<8 x i8> %vec) {
; CHECK-LABEL: @tbl1_8x8_out_of_range(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VTBL1:%.*]] = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> [[VEC:%.*]], <8 x i8> <i8 8, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
; CHECK-NEXT: ret <8 x i8> [[VTBL1]]
;
entry:
%vtbl1 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %vec, <8 x i8> <i8 8, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
ret <8 x i8> %vtbl1
}

declare <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8>, <8 x i8>)

0 comments on commit 52457d3

Please sign in to comment.