Skip to content

Commit

Permalink
Integrate LLVM at llvm/llvm-project@186f2ac612ad
Browse files Browse the repository at this point in the history
Updates LLVM usage to match
[186f2ac612ad](llvm/llvm-project@186f2ac612ad)

PiperOrigin-RevId: 380647265
Change-Id: Icc30dd2cd2d2ed5f4356e73e6000f4d71c4512a0
  • Loading branch information
tensorflower-gardener committed Jun 21, 2021
1 parent e8fd647 commit 4f9f14b
Show file tree
Hide file tree
Showing 11 changed files with 33 additions and 24 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1270,8 +1270,8 @@ class SliceConverter : public OpConversionPattern<OpTy> {
rewriter.create<linalg::CopyOp>(loc, linalg_op, args[1]);
rewriter.eraseOp(slice_op);
} else {
rewriter.replaceOpWithNewOp<SubTensorOp>(slice_op, args[0], offsets,
sizes, strides);
rewriter.replaceOpWithNewOp<tensor::ExtractSliceOp>(
slice_op, args[0], offsets, sizes, strides);
}
return success();
}
Expand Down Expand Up @@ -1340,9 +1340,9 @@ class DynamicSliceConverter : public OpConversionPattern<mhlo::DynamicSliceOp> {
this->typeConverter->convertType(dynamic_slice_op.getType())
.cast<RankedTensorType>();

rewriter.replaceOpWithNewOp<SubTensorOp>(dynamic_slice_op, result_type,
adaptor.operand(), start_indices,
sizes, strides);
rewriter.replaceOpWithNewOp<tensor::ExtractSliceOp>(
dynamic_slice_op, result_type, adaptor.operand(), start_indices, sizes,
strides);
return success();
}
};
Expand Down Expand Up @@ -1410,7 +1410,7 @@ class DynamicUpdateSliceConverter

int64_t rank = operand_type.getRank();
SmallVector<OpFoldResult, 3> strides(rank, rewriter.getI64IntegerAttr(1));
rewriter.replaceOpWithNewOp<SubTensorInsertOp>(
rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
op, adaptor.update(), adaptor.operand(), start_indices, sizes, strides);
return success();
}
Expand Down
14 changes: 7 additions & 7 deletions tensorflow/compiler/mlir/hlo/tests/hlo-legalize-to-linalg.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -1762,7 +1762,7 @@ func @slice_whole_stride(%arg0: tensor<3x4xi32>) -> tensor<1x4xi32> {
return %0 : tensor<1x4xi32>
}
// CHECK-LABEL: func @slice_whole_stride
// CHECK: subtensor %{{.*}}[1, 0] [1, 4] [1, 1] : tensor<3x4xi32> to tensor<1x4xi32>
// CHECK: tensor.extract_slice %{{.*}}[1, 0] [1, 4] [1, 1] : tensor<3x4xi32> to tensor<1x4xi32>

// -----

Expand All @@ -1775,7 +1775,7 @@ func @slice_stride_part(%arg0: tensor<3x4xi32>) -> tensor<1x2xi32> {
return %0 : tensor<1x2xi32>
}
// CHECK-LABEL: func @slice_stride_part
// CHECK: subtensor %{{.*}}[1, 1] [1, 2] [1, 1] : tensor<3x4xi32> to tensor<1x2xi32>
// CHECK: tensor.extract_slice %{{.*}}[1, 1] [1, 2] [1, 1] : tensor<3x4xi32> to tensor<1x2xi32>

// -----

Expand Down Expand Up @@ -1804,7 +1804,7 @@ func @dynamic_slice(%arg: tensor<3x4xf32>, %start1: tensor<i64>, %start2: tensor
// CHECK: %[[COND4:.*]] = cmpi sgt, %[[T2]], %[[C0]] : i64
// CHECK: %[[CLAMPED2:.*]] = select %[[COND4]], %[[T2]], %[[C0]] : i64
// CHECK: %[[START2:.*]] = index_cast %[[CLAMPED2]] : i64 to index
// CHECK: subtensor %[[ARG0]][%[[START1]], %[[START2]]] [1, 4] [1, 1]
// CHECK: tensor.extract_slice %[[ARG0]][%[[START1]], %[[START2]]] [1, 4] [1, 1]

// -----

Expand Down Expand Up @@ -1835,7 +1835,7 @@ func @dynamic_slice_unsigned(%arg: tensor<3x4xui32>, %start1: tensor<i64>, %star
// CHECK: %[[COND4:.*]] = cmpi sgt, %[[T2]], %[[C0]] : i64
// CHECK: %[[CLAMPED2:.*]] = select %[[COND4]], %[[T2]], %[[C0]] : i64
// CHECK: %[[START2:.*]] = index_cast %[[CLAMPED2]] : i64 to index
// CHECK: subtensor %[[SIGNLESS_ARG0]][%[[START1]], %[[START2]]] [1, 4] [1, 1]
// CHECK: tensor.extract_slice %[[SIGNLESS_ARG0]][%[[START1]], %[[START2]]] [1, 4] [1, 1]

// -----

Expand Down Expand Up @@ -1863,7 +1863,7 @@ func @dynamic_update_slice(%target: tensor<3x3xi32>, %update: tensor<2x2xi32>, %
// CHECK: %[[COND4:.*]] = cmpi sgt, %[[T2]], %[[C0]] : i32
// CHECK: %[[CLAMPED2:.*]] = select %[[COND4]], %[[T2]], %[[C0]] : i32
// CHECK: %[[START2:.*]] = index_cast %[[CLAMPED2]] : i32 to index
// CHECK: %[[RES:.*]] = subtensor_insert %[[ARG1]] into %[[ARG0]]
// CHECK: %[[RES:.*]] = tensor.insert_slice %[[ARG1]] into %[[ARG0]]
// CHECK-SAME: [%[[START1]], %[[START2]]] [2, 2] [1, 1]
// CHECK-SAME: : tensor<2x2xi32> into tensor<3x3xi32>
// CHECK: return %[[RES]] : tensor<3x3xi32>
Expand Down Expand Up @@ -1896,7 +1896,7 @@ func @dynamic_update_slice_unsigned(%target: tensor<3x3xui32>, %update: tensor<2
// CHECK: %[[COND4:.*]] = cmpi sgt, %[[T2]], %[[C0]] : i32
// CHECK: %[[CLAMPED2:.*]] = select %[[COND4]], %[[T2]], %[[C0]] : i32
// CHECK: %[[START2:.*]] = index_cast %[[CLAMPED2]] : i32 to index
// CHECK: %[[SIGNLESS_RES:.*]] = subtensor_insert %[[SIGNLESS_UPDATE]] into %[[SIGNLESS_TARGET]]
// CHECK: %[[SIGNLESS_RES:.*]] = tensor.insert_slice %[[SIGNLESS_UPDATE]] into %[[SIGNLESS_TARGET]]
// CHECK-SAME: [%[[START1]], %[[START2]]] [2, 2] [1, 1]
// CHECK-SAME: : tensor<2x2xi32> into tensor<3x3xi32>
// CHECK: %[[RES:.*]] = unrealized_conversion_cast %[[SIGNLESS_RES]] : tensor<3x3xi32> to tensor<3x3xui32>
Expand Down Expand Up @@ -1930,7 +1930,7 @@ func @dynamic_update_slice_float(%target: tensor<3x3xf32>,
// CHECK: %[[COND4:.*]] = cmpi sgt, %[[T2]], %[[C0]] : i32
// CHECK: %[[CLAMPED2:.*]] = select %[[COND4]], %[[T2]], %[[C0]] : i32
// CHECK: %[[START2:.*]] = index_cast %[[CLAMPED2]] : i32 to index
// CHECK: %[[RES:.*]] = subtensor_insert %[[ARG1]] into %[[ARG0]]
// CHECK: %[[RES:.*]] = tensor.insert_slice %[[ARG1]] into %[[ARG0]]
// CHECK-SAME: [%[[START1]], %[[START2]]] [2, 2] [1, 1]
// CHECK-SAME: : tensor<2x2xf32> into tensor<3x3xf32>
// CHECK: return %[[RES]] : tensor<3x3xf32>
Expand Down
1 change: 1 addition & 0 deletions tensorflow/compiler/mlir/lite/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,7 @@ tf_cc_test(
"@llvm-project//mlir:IR",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:TensorDialect",
],
)

Expand Down
5 changes: 3 additions & 2 deletions tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ limitations under the License.
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project
#include "mlir/Dialect/Tensor/IR/Tensor.h" // from @llvm-project
#include "mlir/IR/Attributes.h" // from @llvm-project
#include "mlir/IR/Builders.h" // from @llvm-project
#include "mlir/IR/BuiltinOps.h" // from @llvm-project
Expand Down Expand Up @@ -93,8 +94,8 @@ class LstmUtilsTest : public ::testing::Test {

void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<mlir::StandardOpsDialect, mlir::TF::TensorFlowDialect,
TensorFlowLiteDialect>();
context_->loadDialect<mlir::StandardOpsDialect, tensor::TensorDialect,
mlir::TF::TensorFlowDialect, TensorFlowLiteDialect>();
builder_ = std::unique_ptr<mlir::Builder>(new Builder(context_.get()));
fused_lstm_func_ = createLstmCompositeFunc(builder_.get(), false, false);
fused_lstm_func_cifg_ =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ namespace {

struct FunctionalControlFlowToCFG
: public PassWrapper<FunctionalControlFlowToCFG, FunctionPass> {
void getDependentDialects(mlir::DialectRegistry& registry) const override {
registry.insert<tensor::TensorDialect>();
}

void runOnFunction() override;
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_TF_PASS_DETAIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_TF_PASS_DETAIL_H_

#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Dialect.h"
#include "mlir/Pass/Pass.h"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -890,6 +890,7 @@ def TensorArrayOpsDecompositionPass : Pass<"tf-tensor-array-ops-decomposition",
or that can be inferred from a later write, and 3) all elements have the same
shape.
}];
let dependentDialects = ["tensor::TensorDialect"];

let constructor = "TF::CreateTensorArrayOpsDecompositionPass()";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,10 +251,10 @@ func @tensor_reshape(%t : tensor<1x2x2xf32>) -> tensor<4xf32> {
return %result : tensor<4xf32>
}

// CHECK-LABEL: @subtensor
// CHECK-LABEL: @slice
// CHECK-SAME: (%[[T:.*]]: memref<3xi32>)
func @subtensor(%t : tensor<3xi32>) -> tensor<1xi32> {
func @slice(%t : tensor<3xi32>) -> tensor<1xi32> {
// CHECK: memref.subview %[[T]][0] [1] [1] : memref<3xi32> to memref<1xi32>
%result = subtensor %t[0] [1] [1] : tensor<3xi32> to tensor<1xi32>
%result = tensor.extract_slice %t[0] [1] [1] : tensor<3xi32> to tensor<1xi32>
return %result : tensor<1xi32>
}
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ struct ComputeOpAndFuncBufferizePass
memref::MemRefDialect, StandardOpsDialect,
tensor::TensorDialect, math::MathDialect>();
target.addIllegalDialect<mhlo::MhloDialect>();
target.addIllegalOp<SubTensorOp, SubTensorInsertOp>();
target.addIllegalOp<tensor::ExtractSliceOp, tensor::InsertSliceOp>();

CustomBufferizeTypeConverter converter;
// Configure bufferize pattern for functions and lhlo.
Expand Down
4 changes: 2 additions & 2 deletions third_party/llvm/workspace.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ load("//third_party:repo.bzl", "tf_http_archive")

def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "b650778dc4acbab8a5415e743604a2a0afabea3d"
LLVM_SHA256 = "e406c4f09390dbfc534cad73122345b0b7e678968d911c1e97823457f9d7ac70"
LLVM_COMMIT = "186f2ac612ad3cd551dee649e3097f4284774ba0"
LLVM_SHA256 = "85807f6c21cc5a79584e0d80d57292e3b001e2c96a784d00a0418918fae616c9"

tf_http_archive(
name = name,
Expand Down
7 changes: 4 additions & 3 deletions third_party/mlir/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1541,7 +1541,6 @@ td_library(
":OpBaseTdFiles",
":SideEffectInterfacesTdFiles",
":VectorInterfacesTdFiles",
":ViewLikeInterfaceTdFiles",
],
)

Expand Down Expand Up @@ -2175,9 +2174,7 @@ cc_library(
":SideEffectInterfaces",
":StandardOpsIncGen",
":Support",
":TensorDialect",
":VectorInterfaces",
":ViewLikeInterface",
"@llvm-project//llvm:Support",
],
)
Expand Down Expand Up @@ -3606,6 +3603,7 @@ td_library(
":ControlFlowInterfacesTdFiles",
":OpBaseTdFiles",
":SideEffectInterfacesTdFiles",
":ViewLikeInterfaceTdFiles",
],
)

Expand Down Expand Up @@ -3649,8 +3647,10 @@ cc_library(
":ControlFlowInterfaces",
":IR",
":SideEffectInterfaces",
":StandardOps",
":Support",
":TensorOpsIncGen",
":ViewLikeInterface",
"@llvm-project//llvm:Support",
],
)
Expand Down Expand Up @@ -6009,6 +6009,7 @@ cc_library(
":QuantOps",
":SideEffectInterfaces",
":StandardOps",
":TensorDialect",
":TosaDialectIncGen",
":TosaInterfacesIncGen",
":TosaPassIncGen",
Expand Down

0 comments on commit 4f9f14b

Please sign in to comment.