Skip to content

Commit

Permalink
Merge pull request #2 from tensorflow/master
Browse files Browse the repository at this point in the history
follow tf branch
  • Loading branch information
zhizunbao-y committed Sep 6, 2019
2 parents d4e50e0 + 8b330d2 commit 6a0427d
Show file tree
Hide file tree
Showing 779 changed files with 33,675 additions and 7,277 deletions.
22 changes: 11 additions & 11 deletions WORKSPACE
Expand Up @@ -49,34 +49,34 @@ remote_config_workspace()
# Apple and Swift rules.
http_archive(
name = "build_bazel_rules_apple",
sha256 = "6efdde60c91724a2be7f89b0c0a64f01138a45e63ba5add2dca2645d981d23a1",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.17.2/rules_apple.0.17.2.tar.gz"],
sha256 = "a045a436b642c70fb0c10ca84ff0fd2dcbd59cc89100d597a61e8374afafb366",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.18.0/rules_apple.0.18.0.tar.gz"],
) # https://github.com/bazelbuild/rules_apple/releases
http_archive(
name = "build_bazel_rules_swift",
sha256 = "96a86afcbdab215f8363e65a10cf023b752e90b23abf02272c4fc668fcb70311",
urls = ["https://github.com/bazelbuild/rules_swift/releases/download/0.11.1/rules_swift.0.11.1.tar.gz"],
sha256 = "18cd4df4e410b0439a4935f9ca035bd979993d42372ba79e7f2d4fafe9596ef0",
urls = ["https://github.com/bazelbuild/rules_swift/releases/download/0.12.1/rules_swift.0.12.1.tar.gz"],
) # https://github.com/bazelbuild/rules_swift/releases
http_archive(
name = "build_bazel_apple_support",
sha256 = "7356dbd44dea71570a929d1d4731e870622151a5f27164d966dda97305f33471",
urls = ["https://github.com/bazelbuild/apple_support/releases/download/0.6.0/apple_support.0.6.0.tar.gz"],
sha256 = "122ebf7fe7d1c8e938af6aeaee0efe788a3a2449ece5a8d6a428cb18d6f88033",
urls = ["https://github.com/bazelbuild/apple_support/releases/download/0.7.1/apple_support.0.7.1.tar.gz"],
) # https://github.com/bazelbuild/apple_support/releases
http_archive(
name = "bazel_skylib",
sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz"],
sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel-skylib.0.9.0.tar.gz"],
) # https://github.com/bazelbuild/bazel-skylib/releases
http_archive(
name = "com_github_apple_swift_swift_protobuf",
type = "zip",
strip_prefix = "swift-protobuf-1.5.0/",
urls = ["https://github.com/apple/swift-protobuf/archive/1.5.0.zip"],
strip_prefix = "swift-protobuf-1.6.0/",
urls = ["https://github.com/apple/swift-protobuf/archive/1.6.0.zip"],
) # https://github.com/apple/swift-protobuf/releases
http_file(
name = "xctestrunner",
executable = 1,
urls = ["https://github.com/google/xctestrunner/releases/download/0.2.7/ios_test_runner.par"],
urls = ["https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par"],
) # https://github.com/google/xctestrunner/releases
# Use `swift_rules_dependencies` to fetch the toolchains. With the
# `git_repository` rules above, the following call will skip redefining them.
Expand Down
16 changes: 8 additions & 8 deletions arm_compiler.BUILD
Expand Up @@ -3,56 +3,56 @@ package(default_visibility = ["//visibility:public"])
filegroup(
name = "gcc",
srcs = [
"bin/arm-linux-gnueabihf-gcc",
"bin/arm-rpi-linux-gnueabihf-gcc",
],
)

filegroup(
name = "ar",
srcs = [
"bin/arm-linux-gnueabihf-ar",
"bin/arm-rpi-linux-gnueabihf-ar",
],
)

filegroup(
name = "ld",
srcs = [
"bin/arm-linux-gnueabihf-ld",
"bin/arm-rpi-linux-gnueabihf-ld",
],
)

filegroup(
name = "nm",
srcs = [
"bin/arm-linux-gnueabihf-nm",
"bin/arm-rpi-linux-gnueabihf-nm",
],
)

filegroup(
name = "objcopy",
srcs = [
"bin/arm-linux-gnueabihf-objcopy",
"bin/arm-rpi-linux-gnueabihf-objcopy",
],
)

filegroup(
name = "objdump",
srcs = [
"bin/arm-linux-gnueabihf-objdump",
"bin/arm-rpi-linux-gnueabihf-objdump",
],
)

filegroup(
name = "strip",
srcs = [
"bin/arm-linux-gnueabihf-strip",
"bin/arm-rpi-linux-gnueabihf-strip",
],
)

filegroup(
name = "as",
srcs = [
"bin/arm-linux-gnueabihf-as",
"bin/arm-rpi-linux-gnueabihf-as",
],
)

Expand Down
19 changes: 0 additions & 19 deletions tensorflow/api_template.__init__.py
Expand Up @@ -125,25 +125,6 @@ def _running_from_pip_package():
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)

# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
# pylint: enable=undefined-variable

# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
Expand Down
19 changes: 0 additions & 19 deletions tensorflow/api_template_v1.__init__.py
Expand Up @@ -138,22 +138,3 @@ def _running_from_pip_package():
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)

# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass

# pylint: enable=undefined-variable
1 change: 1 addition & 0 deletions tensorflow/c/BUILD
Expand Up @@ -270,6 +270,7 @@ tf_cuda_library(
"//tensorflow/core/platform",
"@com_google_absl//absl/strings",
],
alwayslink = 1,
)

exports_files(
Expand Down
2 changes: 2 additions & 0 deletions tensorflow/c/eager/BUILD
Expand Up @@ -80,6 +80,7 @@ tf_cuda_library(
"//tensorflow/core/profiler/lib:profiler_session",
"//tensorflow/core:gpu_runtime",
],
alwayslink = 1,
)

tf_cuda_library(
Expand Down Expand Up @@ -227,6 +228,7 @@ tf_cuda_library(
"//tensorflow/core/profiler/rpc/client:capture_profile",
"//tensorflow/core:gpu_runtime",
],
alwayslink = 1,
)

tf_cuda_cc_test(
Expand Down
5 changes: 0 additions & 5 deletions tensorflow/c/eager/c_api_experimental_test.cc
Expand Up @@ -84,11 +84,6 @@ void ExecuteWithProfiling(bool async) {
string profile_proto_str = profile_proto.DebugString();
if (!gpu_device_name.empty()) {
EXPECT_TRUE(HasSubstr(profile_proto_str, "/device:GPU:0"));
// device name with "stream:all" is collected by Device Tracer.
#ifndef TENSORFLOW_USE_ROCM
// ROCm platform does not yet support stream level tracing
EXPECT_TRUE(HasSubstr(profile_proto_str, "stream:all"));
#endif
}
// "/host:CPU" is collected by TraceMe
EXPECT_TRUE(HasSubstr(profile_proto_str, "/host:CPU"));
Expand Down
5 changes: 5 additions & 0 deletions tensorflow/c/eager/tape.h
Expand Up @@ -922,6 +922,11 @@ ForwardAccumulator<Gradient, BackwardFunction, TapeTensor>::ForwardpropFromTape(
for (const TapeTensor& output_tensor : output_tensors) {
// Ownership of `aid` transferred to CallBackwardFunction below.
Gradient* aid = vspace_.Ones(output_tensor);
if (TF_PREDICT_FALSE(aid == nullptr)) {
return tensorflow::errors::Internal(
"Failed to create ones tensor for tensor ", output_tensor.GetID(),
" with dtype ", output_tensor.GetDType());
}
forwardprop_aids.push_back(aid);
int64 aid_id = vspace_.TensorId(aid);
sources.push_back(aid_id);
Expand Down
1 change: 1 addition & 0 deletions tensorflow/cc/saved_model/BUILD
Expand Up @@ -123,6 +123,7 @@ cc_library(
"//tensorflow/core/util/tensor_bundle:naming",
# mobile not supported yet
]),
alwayslink = 1,
)

tf_cc_test(
Expand Down
28 changes: 21 additions & 7 deletions tensorflow/compiler/jit/compilability_check_util.cc
Expand Up @@ -130,17 +130,24 @@ RecursiveCompilabilityChecker::FindUncompilableNodes(
return uncompilable_nodes;
}

bool RecursiveCompilabilityChecker::HasXLAKernel(const Node& node) const {
bool RecursiveCompilabilityChecker::HasXLAKernel(
const Node& node, string* uncompilable_reason) const {
// There is a SymbolicGradient kernel on the XLA_JIT device, but the gradient
// is really a kind of function call and will be handled by
// IsCompilableCall().
if (node.type_string() == "SymbolicGradient") return false;
if (node.type_string() == "SymbolicGradient") {
*uncompilable_reason =
"SymbolicGradient should be handled by IsCompilableCall().";
return false;
}
if (node.type_string() == "Const") {
// Skip Const op with type DT_STRING, since XLA doesn't support it, but the
// registered Const KernelDef says that it does, to support no-op Assert for
// tfcompile.
const AttrValue* attr = node.attrs().Find("dtype");
if (attr != nullptr && attr->type() == DT_STRING) {
*uncompilable_reason =
"Const op with type DT_STRING is not supported by XLA.";
return false;
}
}
Expand All @@ -150,10 +157,16 @@ bool RecursiveCompilabilityChecker::HasXLAKernel(const Node& node) const {
// such nodes out of XLA clusters.
if (HasForwardedRefInput(node)) {
VLOG(2) << "Rejecting " << node.name() << ": Identity with unsafe cast.";
*uncompilable_reason = "Identity with unsafe cast.";
return false;
}

return FindKernelDef(jit_device_type_, node.def(), nullptr, nullptr).ok();
Status s = FindKernelDef(jit_device_type_, node.def(), nullptr, nullptr);
if (!s.ok()) {
*uncompilable_reason = s.error_message();
return false;
}
return true;
}

// Tests whether 'if_node' is compilable. Every operator in the then_branch and
Expand Down Expand Up @@ -336,16 +349,17 @@ bool RecursiveCompilabilityChecker::IsCompilableNode(
return false;
}

string uncompilable_reason;
if (IsFunctionCall(*lib_runtime->GetFunctionLibraryDefinition(), node)) {
if (!IsCompilableCall(node.def(), lib_runtime, stack_trace,
encapsulating_function, uncompilable_nodes)) {
LogNotCompilable(node, "unsupported function");
return false;
}
} else if (!HasXLAKernel(node)) {
absl::string_view uncompilable_reason = "unsupported op";
MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace,
encapsulating_function, uncompilable_nodes);
} else if (!HasXLAKernel(node, &uncompilable_reason)) {
MaybeMarkUncompilableNode(
absl::StrCat("unsupported op: ", uncompilable_reason), *stack_trace,
encapsulating_function, uncompilable_nodes);
LogNotCompilable(node, uncompilable_reason);
return false;
}
Expand Down
3 changes: 2 additions & 1 deletion tensorflow/compiler/jit/compilability_check_util.h
Expand Up @@ -247,7 +247,8 @@ class RecursiveCompilabilityChecker {
absl::c_any_of(node.output_types(), is_variant);
}

bool HasXLAKernel(const Node& node) const;
bool HasXLAKernel(const Node& node,
string* uncompilable_reason = nullptr) const;

static void MaybeMarkUncompilableNode(
const absl::string_view reason,
Expand Down
15 changes: 10 additions & 5 deletions tensorflow/compiler/jit/compilability_check_util_test.cc
Expand Up @@ -125,7 +125,8 @@ TEST_F(CompilabilityCheckUtilTest, CheckNonFunctionalNodes) {
const auto& uncompilable_nodes_inside_function = node_info_it->second.second;
ASSERT_EQ(1, uncompilable_nodes_inside_function.size());
const auto& uncompilable_node_info = uncompilable_nodes_inside_function.at(0);
EXPECT_EQ("unsupported op", uncompilable_node_info.uncompilable_reason);
EXPECT_TRUE(absl::StrContains(uncompilable_node_info.uncompilable_reason,
"unsupported op"));
ASSERT_EQ(1, uncompilable_node_info.stack_trace.size());
ASSERT_EQ("", uncompilable_node_info.stack_trace.at(0).function_name);
}
Expand Down Expand Up @@ -167,7 +168,8 @@ TEST_F(CompilabilityCheckUtilTest, CheckSimpleFunctionNode) {
EXPECT_EQ("D", node_stack.at(0).name);
EXPECT_EQ(kUncompilableFunctionNodeName, node_stack.at(1).name);
EXPECT_EQ(kUncompilableFunctionNodeName, node_info.name);
EXPECT_EQ("unsupported op", node_info.uncompilable_reason);
EXPECT_TRUE(
absl::StrContains(node_info.uncompilable_reason, "unsupported op"));
}

TEST_F(CompilabilityCheckUtilTest, CheckFunctionalWhileNode) {
Expand Down Expand Up @@ -246,7 +248,8 @@ TEST_F(CompilabilityCheckUtilTest, CheckFunctionalWhileNode) {
stacktrace_second_node_info.function_name);

EXPECT_EQ(kUncompilableFunctionNodeName, node_info.name);
EXPECT_EQ("unsupported op", node_info.uncompilable_reason);
EXPECT_TRUE(
absl::StrContains(node_info.uncompilable_reason, "unsupported op"));
}

TEST_F(CompilabilityCheckUtilTest, CheckFunctionalIfNode) {
Expand Down Expand Up @@ -322,7 +325,8 @@ TEST_F(CompilabilityCheckUtilTest, CheckFunctionalIfNode) {
stacktrace_second_node_info.function_name);

EXPECT_EQ(kUncompilableFunctionNodeName, uncompilable_node_one.name);
EXPECT_EQ("unsupported op", uncompilable_node_one.uncompilable_reason);
EXPECT_TRUE(absl::StrContains(uncompilable_node_one.uncompilable_reason,
"unsupported op"));

NameAttrList function_two;
function_two.set_name(kUncompilableFunctionTwoName);
Expand All @@ -345,7 +349,8 @@ TEST_F(CompilabilityCheckUtilTest, CheckFunctionalIfNode) {
node_two_stacktrace_second_node.function_name);

EXPECT_EQ(kUncompilableFunctionNodeTwoName, uncompilable_node_two.name);
EXPECT_EQ("unsupported op", uncompilable_node_two.uncompilable_reason);
EXPECT_TRUE(absl::StrContains(uncompilable_node_one.uncompilable_reason,
"unsupported op"));
}

} // namespace
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/compiler/jit/device_util.h
Expand Up @@ -29,6 +29,9 @@ limitations under the License.

namespace tensorflow {
namespace jit {
class DeviceInfoCache;
class DeviceSet;

// Instances of DeviceId represent TensorFlow devices as integers.
//
// This helps avoid having to manipulate device names as strings when
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/compiler/mlir/BUILD
Expand Up @@ -36,7 +36,6 @@ cc_library(
cc_library(
name = "tf_mlir_opt_main",
srcs = ["tf_mlir_opt_main.cc"],
copts = ["-std=c++14"],
deps = [
":init_mlir",
"//tensorflow/compiler/mlir/lite:tensorflow_lite",
Expand All @@ -46,14 +45,15 @@ cc_library(
"//tensorflow/compiler/mlir/lite:tensorflow_lite_quantize",
"//tensorflow/compiler/mlir/tensorflow",
"//tensorflow/compiler/mlir/tensorflow:tensorflow_dialect_registration",
"//tensorflow/compiler/mlir/tensorflow:tensorflow_fold_switch",
"//tensorflow/compiler/mlir/tensorflow:tf_dialect_passes",
"//tensorflow/compiler/mlir/xla:hlo",
"//tensorflow/compiler/mlir/xla:lhlo",
"//tensorflow/compiler/mlir/xla:lhlo_legalize_to_affine",
"//tensorflow/compiler/mlir/xla:xla_dialect_registration",
"//tensorflow/compiler/mlir/xla:xla_legalize_control_flow",
"//tensorflow/compiler/mlir/xla:xla_legalize_tf",
"//tensorflow/compiler/mlir/xla:xla_legalize_to_standard",
"//tensorflow/compiler/xla/service/mlir_gpu/transforms:legalize_to_affine",
"//tensorflow/core:lib",
"//tensorflow/core/platform:logging",
"@llvm//:support",
Expand Down

0 comments on commit 6a0427d

Please sign in to comment.