Skip to content

Commit

Permalink
Merge branch 'master' into resolve-exhaustive-test-compile-issue
Browse files Browse the repository at this point in the history
  • Loading branch information
DavidNorman committed Jul 12, 2019
2 parents 1ba4fd1 + 43dcd3d commit 20f510c
Show file tree
Hide file tree
Showing 604 changed files with 18,713 additions and 8,026 deletions.
1 change: 1 addition & 0 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ build:mkl -c opt
# This config option is used to enable MKL-DNN open source library only,
# without depending on MKL binary version.
build:mkl_open_source_only --define=build_with_mkl_dnn_only=true
build:mkl_open_source_only --define=build_with_mkl_dnn_v1_only=true
build:mkl_open_source_only --define=build_with_mkl=true --define=enable_mkl=true
build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=0

Expand Down
271 changes: 271 additions & 0 deletions RELEASE.md

Large diffs are not rendered by default.

36 changes: 20 additions & 16 deletions tensorflow/api_template.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,22 +41,30 @@

# API IMPORTS PLACEHOLDER

# WRAPPER_PLACEHOLDER

# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_API_MODULE = _sys.modules[__name__].bitwise
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
_current_module = _sys.modules[__name__]

if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)

# Hook external TensorFlow modules.

# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules
_current_module.compat.v2.compat.v1 = _current_module.compat.v1
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.summary API due to missing TensorBoard installation.")
Expand All @@ -65,13 +73,15 @@
from tensorflow_estimator.python.estimator.api._v2 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass

try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass

Expand Down Expand Up @@ -122,32 +132,26 @@ def _running_from_pip_package():
# pylint: disable=undefined-variable
try:
del python
if '__all__' in vars():
vars()['__all__'].remove('python')
except NameError:
pass
try:
del core
if '__all__' in vars():
vars()['__all__'].remove('core')
except NameError:
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
pass
# Similarly for compiler. Do it separately to make sure we do this even if the
# others don't exist.
try:
del compiler
if '__all__' in vars():
vars()['__all__'].remove('compiler')
except NameError:
pass
# pylint: enable=undefined-variable

# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers

compat.v2.compat.v1 = compat.v1
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
# pylint: enable=undefined-variable
28 changes: 11 additions & 17 deletions tensorflow/api_template_v1.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,12 @@

# API IMPORTS PLACEHOLDER

# WRAPPER_PLACEHOLDER

# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = bitwise # pylint: disable=undefined-variable
_API_MODULE = _sys.modules[__name__].bitwise # pylint: disable=undefined-variable
_current_module = _sys.modules[__name__]
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
if not hasattr(_current_module, '__path__'):
Expand All @@ -46,13 +48,15 @@
from tensorflow_estimator.python.estimator.api._v1 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass

try:
from tensorflow.python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass

Expand All @@ -77,9 +81,8 @@

from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
# The 'app' module will be imported as part of the placeholder section above.
app.flags = flags # pylint: disable=undefined-variable
if '__all__' in vars():
vars()['__all__'].append('flags')
_current_module.app.flags = flags # pylint: disable=undefined-variable
setattr(_current_module, "flags", flags)

# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
Expand Down Expand Up @@ -122,25 +125,16 @@ def _running_from_pip_package():
# pylint: disable=undefined-variable
try:
del python
if '__all__' in vars():
vars()['__all__'].remove('python')
except NameError:
pass
try:
del core
if '__all__' in vars():
vars()['__all__'].remove('core')
except NameError:
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
pass
# Similarly for compiler. Do it separately to make sure we do this even if the
# others don't exist.
try:
del compiler
if '__all__' in vars():
vars()['__all__'].remove('compiler')
except NameError:
pass

compat.v2.compat.v1 = compat.v1
_current_module.compat.v2.compat.v1 = _current_module.compat.v1
# pylint: enable=undefined-variable
1 change: 1 addition & 0 deletions tensorflow/c/eager/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ tf_cuda_library(
}) + [
"@com_google_absl//absl/memory",
"//tensorflow/core/common_runtime/eager:eager_operation",
"//tensorflow/core/distributed_runtime/eager:remote_mgr",
"//tensorflow/core/distributed_runtime/eager:eager_client",
"//tensorflow/core/distributed_runtime/rpc/eager:grpc_eager_client",
"//tensorflow/core/distributed_runtime/rpc:grpc_channel",
Expand Down
5 changes: 4 additions & 1 deletion tensorflow/c/eager/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ limitations under the License.
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#endif // !IS_MOBILE_PLATFORM
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/rendezvous.h"
Expand Down Expand Up @@ -260,12 +261,14 @@ tensorflow::Status UpdateTFE_ContextWithServerDef(
TF_RETURN_IF_ERROR(r->Initialize(worker_session.get()));

auto* device_mgr = grpc_server->worker_env()->device_mgr;
auto remote_mgr =
absl::make_unique<tensorflow::eager::RemoteMgr>(/*is_master=*/true);

return ctx->context->InitializeRemoteMaster(
std::move(server), grpc_server->worker_env(), worker_session,
std::move(remote_eager_workers), std::move(remote_device_mgr),
remote_workers, context_id, r, device_mgr, keep_alive_secs,
worker_session->cluster_flr.get());
worker_session->cluster_flr.get(), std::move(remote_mgr));
#undef LOG_AND_RETURN_IF_ERROR
}
#endif // !IS_MOBILE_PLATFORM
Expand Down
33 changes: 19 additions & 14 deletions tensorflow/c/eager/tape.h
Original file line number Diff line number Diff line change
Expand Up @@ -890,33 +890,38 @@ ForwardAccumulator<Gradient, BackwardFunction, TapeTensor>::ForwardpropFromTape(
// Stop the tape from recording
pop_backward_tape.release()();

if (grad.size() != in_grads.size()) {
return tensorflow::errors::Internal("Wrong number of gradients returned.");
}

std::vector<int64> targets;
std::vector<Gradient*> used_in_grads;
// We may end up with slightly fewer elements than we reserve, but grad.size()
// should be a reasonably tight upper bound.
targets.reserve(grad.size());
used_in_grads.reserve(grad.size());
std::unordered_map<int64, TapeTensor> sources_that_are_targets;
for (Gradient* grad_tensor : grad) {
for (int grad_index = 0; grad_index < grad.size(); ++grad_index) {
Gradient* grad_tensor = grad[grad_index];
if (grad_tensor != nullptr) {
int64 tensor_id = vspace_.TensorId(grad_tensor);
targets.push_back(tensor_id);
if (sources_set.find(tensor_id) != sources_set.end()) {
sources_that_are_targets.emplace(
tensor_id, vspace_.TapeTensorFromGradient(grad_tensor));
}
}
}
if (targets.size() > in_grads.size()) {
return tensorflow::errors::Internal("Too many gradients returned.");
}

for (int target_index = 0; target_index < targets.size(); ++target_index) {
Gradient* in_grad = in_grads[target_index];
Gradient* grad_tensor = grad[target_index];
if (grad_tensor != nullptr && in_grad != nullptr) {
// ComputeGradient steals a reference
vspace_.MarkAsResult(in_grad);
Gradient* in_grad = in_grads[grad_index];
if (in_grad != nullptr) {
// ComputeGradient steals a reference
vspace_.MarkAsResult(in_grad);
}
used_in_grads.push_back(in_grad);
}
}

return tape->ComputeGradient(vspace_, targets, sources,
sources_that_are_targets, in_grads, out_grads);
sources_that_are_targets, used_in_grads,
out_grads);
}

template <typename Gradient, typename BackwardFunction, typename TapeTensor>
Expand Down
12 changes: 11 additions & 1 deletion tensorflow/compat_template.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,16 @@

# API IMPORTS PLACEHOLDER

# WRAPPER_PLACEHOLDER

# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
# Make sure we get the correct summary module with lazy loading
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.compat.v2.summary API due to missing TensorBoard "
Expand All @@ -43,13 +47,15 @@
from tensorflow_estimator.python.estimator.api._v2 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass

try:
from tensorflow.python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass

Expand All @@ -61,11 +67,15 @@
#
# This make this one symbol available directly.
from tensorflow.python.compat.v2_compat import enable_v2_behavior # pylint: disable=g-import-not-at-top
setattr(_current_module, "enable_v2_behavior", enable_v2_behavior)

# Add module aliases
_current_module = _sys.modules[__name__]
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
7 changes: 6 additions & 1 deletion tensorflow/compat_template_v1.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,22 +27,27 @@

# API IMPORTS PLACEHOLDER

# WRAPPER_PLACEHOLDER

# Hook external TensorFlow modules.
_current_module = _sys.modules[__name__]
try:
from tensorflow_estimator.python.estimator.api._v1 import estimator
_current_module.__path__ = (
[_module_util.get_parent_dir(estimator)] + _current_module.__path__)
setattr(_current_module, "estimator", estimator)
except ImportError:
pass

try:
from tensorflow.python.keras.api._v1 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass


from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
app.flags = flags # pylint: disable=undefined-variable
_current_module.app.flags = flags # pylint: disable=undefined-variable
setattr(_current_module, "flags", flags)
9 changes: 2 additions & 7 deletions tensorflow/compiler/aot/tfcompile.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ load(
"tf_cc_test",
"tf_copts",
)
load("//tensorflow:tensorflow.bzl", "tfcompile_extra_flags")

def tf_library(
name,
Expand Down Expand Up @@ -180,13 +181,7 @@ def tf_library(
# `find` on such an object.
need_xla_data_proto = flags and flags.find("--gen_program_shape") != -1

# Pass --target_cpu=haswell to tfcompile if compiling for Haswell (bazel
# build --cpu=haswell). We put it at the beginning of the flags list so
# that tfcompile_flags can override if if desired.
flags = select({
"//tools/target_cpu:haswell": "--target_cpu=haswell ",
"//conditions:default": "",
}) + flags
flags = tfcompile_extra_flags() + flags

if enable_xla_hlo_profiling:
profiling_flag = "--xla_hlo_profile"
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1087,8 +1087,6 @@ Status Encapsulator::MakePrunedGraphCopyAndInline(
FunctionDefToBodyHelper(*fdef, node->attrs(), library, &fbody));

InlineFunctionBodyOptions inline_opts;
inline_opts.override_device = false;

TF_RETURN_IF_ERROR(InlineFunctionBody(*library, pruned_graph->get(), node,
fbody.get(), inline_opts));
}
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/compiler/jit/xla_device_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,9 @@ class XlaAssignVariableOp : public OpKernel {
REGISTER_KERNEL_BUILDER( \
Name("AnonymousIteratorV2").Device(DEVICE).HostMemory("deleter"), \
data::AnonymousIteratorHandleOp); \
REGISTER_KERNEL_BUILDER( \
Name("DeleteIterator").Device(DEVICE).HostMemory("deleter"), \
data::DeleteIteratorOp); \
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE), \
data::IteratorGetNextOp); \
REGISTER_KERNEL_BUILDER(Name("IteratorGetNextAsOptional").Device(DEVICE), \
Expand Down
9 changes: 8 additions & 1 deletion tensorflow/compiler/mlir/lite/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -462,9 +462,16 @@ cc_library(
alwayslink = 1,
)

filegroup(
name = "tf_tfl_translate_main",
srcs = [
"tf_tfl_translate.cc",
],
)

tf_cc_binary(
name = "tf_tfl_translate",
srcs = ["tf_tfl_translate.cc"],
srcs = [":tf_tfl_translate_main"],
deps = [
":flatbuffer_translate_lib",
":tensorflow_lite",
Expand Down

0 comments on commit 20f510c

Please sign in to comment.