Skip to content

Commit

Permalink
weekly upstream merge for Dec 17, 2018
Browse files Browse the repository at this point in the history
  • Loading branch information
deven-amd committed Dec 17, 2018
2 parents 4d1b60f + 6decf08 commit 434c356
Show file tree
Hide file tree
Showing 1,199 changed files with 40,015 additions and 19,764 deletions.
9 changes: 8 additions & 1 deletion tools/bazel.rc → .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ build:nonccl --define=no_nccl_support=true

build --define=use_fast_cpp_protos=true
build --define=allow_oversize_protos=true
build --define=grpc_no_ares=true

build --spawn_strategy=standalone
build --genrule_strategy=standalone
Expand All @@ -93,3 +92,11 @@ build:dynamic_kernels --copt=-DAUTOLOAD_DYNAMIC_KERNELS
build --define=PREFIX=/usr
build --define=LIBDIR=$(PREFIX)/lib
build --define=INCLUDEDIR=$(PREFIX)/include

# Default options should come above this line

# Options from ./configure
try-import %workspace%/.tf_configure.bazelrc

# Put user-specific options in .bazelrc.user
try-import %workspace%/.bazelrc.user
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
.DS_Store
.ipynb_checkpoints
node_modules
/.bazelrc
/.bazelrc.user
/.tf_configure.bazelrc
/bazel-*
/bazel_pip
Expand Down
9 changes: 5 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -131,11 +131,12 @@ The TensorFlow project strives to abide by generally accepted best practices in
Build Type | Status | Artifacts
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------
**IBM s390x** | [![Build Status](http://ibmz-ci.osuosl.org/job/TensorFlow_IBMZ_CI/badge/icon)](http://ibmz-ci.osuosl.org/job/TensorFlow_IBMZ_CI/) | TBA
**IBM ppc64le CPU** | [![Build Status](http://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Build/badge/icon)](http://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Build/) | TBA
**IBM ppc64le GPU** Nightly | [![Build Status](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Nightly_Artifact/badge/icon)](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Nightly_Artifact/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Nightly_Artifact/)
**IBM ppc64le GPU** Stable Release | [![Build Status](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/badge/icon)](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/)
**Linux ppc64le CPU** Nightly | [![Build Status](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Build/badge/icon)](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Build/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Nightly_Artifact/)
**Linux ppc64le CPU** Stable Release | [![Build Status](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/badge/icon)](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_CPU_Release_Build/)
**Linux ppc64le GPU** Nightly | [![Build Status](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Build/badge/icon)](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Build/) | [Nightly](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Nightly_Artifact/)
**Linux ppc64le GPU** Stable Release | [![Build Status](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/badge/icon)](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/) | [Release](https://powerci.osuosl.org/job/TensorFlow_PPC64LE_GPU_Release_Build/)
**Linux CPU with Intel® MKL-DNN** Nightly | [![Build Status](https://tensorflow-ci.intel.com/job/tensorflow-mkl-linux-cpu/badge/icon)](https://tensorflow-ci.intel.com/job/tensorflow-mkl-linux-cpu/) | [Nightly](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-whl-nightly/)
**Linux CPU with Intel® MKL-DNN** Python 2.7<br> **Linux CPU with Intel® MKL-DNN** Python 3.4<br> **Linux CPU with Intel® MKL-DNN** Python 3.5<br> **Linux CPU with Intel® MKL-DNN** Python 3.6 | [![Build Status](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-release-whl/badge/icon)](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-release-whl/lastStableBuild) | [1.11.0 py2.7](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.11.0-cp27-cp27mu-linux_x86_64.whl)<br>[1.11.0 py3.4](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.11.0-cp34-cp34m-linux_x86_64.whl)<br>[1.11.0 py3.5](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.11.0-cp35-cp35m-linux_x86_64.whl)<br>[1.11.0 py3.6](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.11.0-cp36-cp36m-linux_x86_64.whl)
**Linux CPU with Intel® MKL-DNN** Python 2.7<br> **Linux CPU with Intel® MKL-DNN** Python 3.4<br> **Linux CPU with Intel® MKL-DNN** Python 3.5<br> **Linux CPU with Intel® MKL-DNN** Python 3.6 | [![Build Status](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-release-whl/badge/icon)](https://tensorflow-ci.intel.com/job/tensorflow-mkl-build-release-whl/lastStableBuild) | [1.12.0 py2.7](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp27-cp27mu-linux_x86_64.whl)<br>[1.12.0 py3.4](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp34-cp34m-linux_x86_64.whl)<br>[1.12.0 py3.5](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp35-cp35m-linux_x86_64.whl)<br>[1.12.0 py3.6](https://storage.googleapis.com/intel-optimized-tensorflow/tensorflow-1.12.0-cp36-cp36m-linux_x86_64.whl)

## For more information

Expand Down
2 changes: 2 additions & 0 deletions RELEASE.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
Serving.
* Keras models now support evaluating with a `tf.data.Dataset`.
* TensorFlow binaries are built with XLA support linked in by default.
* Ignite Dataset added to contrib/ignite that allows to work with Apache
Ignite.

## ROCm Features and Improvements
* MIOpenv1.6 integration
Expand Down
37 changes: 17 additions & 20 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -16,38 +16,35 @@ load("@io_bazel_rules_closure//closure:defs.bzl", "closure_repositories")

closure_repositories()

http_archive(
name = "base_images_docker",
sha256 = "e2b1b7254270bb7605e814a9dbf6d1e4ae04a11136ff1714fbfdabe3f87f7cf9",
strip_prefix = "base-images-docker-12801524f867e657fbb5d1a74f31618aff181ac6",
urls = ["https://github.com/GoogleCloudPlatform/base-images-docker/archive/12801524f867e657fbb5d1a74f31618aff181ac6.tar.gz"],
)
load("//third_party/toolchains/preconfig/generate:archives.bzl",
"bazel_toolchains_archive")

http_archive(
name = "bazel_toolchains",
sha256 = "15b5858b1b5541ec44df31b94c3b8672815b31d71215a98398761ea9f4c4eedb",
strip_prefix = "bazel-toolchains-6200b238c9c2d137c0d9a7262c80cc71d98e692b",
urls = [
"https://github.com/bazelbuild/bazel-toolchains/archive/6200b238c9c2d137c0d9a7262c80cc71d98e692b.tar.gz",
],
bazel_toolchains_archive()

load(
"@bazel_toolchains//repositories:repositories.bzl",
bazel_toolchains_repositories = "repositories",
)

http_archive(
name = "io_bazel_rules_docker",
sha256 = "29d109605e0d6f9c892584f07275b8c9260803bf0c6fcb7de2623b2bedc910bd",
strip_prefix = "rules_docker-0.5.1",
urls = ["https://github.com/bazelbuild/rules_docker/archive/v0.5.1.tar.gz"],
bazel_toolchains_repositories()

load(
"@io_bazel_rules_docker//container:container.bzl",
container_repositories = "repositories",
)

load("//third_party/toolchains/preconfig/generate:workspace.bzl", "remote_config_workspace")
container_repositories()

load("//third_party/toolchains/preconfig/generate:workspace.bzl",
"remote_config_workspace")

remote_config_workspace()

# We must check the bazel version before trying to parse any other BUILD
# files, in case the parsing of those build files depends on the bazel
# version we require here.
load("//tensorflow:version_check.bzl", "check_bazel_version_at_least")
check_bazel_version_at_least("0.15.0")
check_bazel_version_at_least("0.18.0")

load("//tensorflow:workspace.bzl", "tf_workspace")

Expand Down
4 changes: 2 additions & 2 deletions build_rocm_python3
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ rm -f $TF_PKG_LOC/tensorflow*.whl

yes "" | TF_NEED_ROCM=1 PYTHON_BIN_PATH=/usr/bin/python3 ./configure
pip3 uninstall -y tensorflow || true
bazel build -s --config=opt --config=rocm //tensorflow/tools/pip_package:build_pip_package --verbose_failures &&
bazel build --config=opt --config=rocm //tensorflow/tools/pip_package:build_pip_package --verbose_failures &&
bazel-bin/tensorflow/tools/pip_package/build_pip_package $TF_PKG_LOC &&
pip3 install $TF_PKG_LOC/tensorflow-1.12.0rc0-cp35-cp35m-linux_x86_64.whl
pip3 install $TF_PKG_LOC/tensorflow*.whl
23 changes: 5 additions & 18 deletions configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,18 +255,6 @@ def setup_python(environ_cp):
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(_TF_WORKSPACE_ROOT, '.bazelrc')

data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
f.write('import %%workspace%%/%s\n' % _TF_BAZELRC_FILENAME)

def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
Expand Down Expand Up @@ -488,11 +476,12 @@ def check_bazel_version(min_version, max_version):
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
if curr_version_int > max_version_int:
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow!' % max_version)
sys.exit(0)
sys.exit(1)
return curr_version


Expand Down Expand Up @@ -1565,11 +1554,9 @@ def main():
# environment variables.
environ_cp = dict(os.environ)

check_bazel_version('0.15.0', '0.20.0')
check_bazel_version('0.19.0', '0.20.0')

reset_tf_configure_bazelrc()
# Explicitly import tools/bazel.rc, this is needed for Bazel 0.19.0 or later
write_to_bazelrc('import %workspace%/tools/bazel.rc')

cleanup_makefile()
setup_python(environ_cp)
Expand Down
15 changes: 13 additions & 2 deletions tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,15 @@ config_setting(
visibility = ["//visibility:public"],
)

# By default, XLA GPU is compiled into tensorflow when building with
# --config=cuda even when `with_xla_support` is false. The config setting
# here allows us to override the behavior if needed.
config_setting(
name = "no_xla_deps_in_cuda",
define_values = {"no_xla_deps_in_cuda": "true"},
visibility = ["//visibility:public"],
)

config_setting(
name = "with_gdr_support",
define_values = {"with_gdr_support": "true"},
Expand Down Expand Up @@ -613,9 +622,11 @@ py_library(
name = "tensorflow_py",
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
deps = select({
"api_version_2": [],
"//conditions:default": ["//tensorflow/contrib:contrib_py"],
}) + [
":tensorflow_py_no_contrib",
"//tensorflow/contrib:contrib_py",
"//tensorflow/python/estimator:estimator_py",
],
)
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/api_template.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@

import os as _os

# API IMPORTS PLACEHOLDER

# pylint: disable=g-bad-import-order
from tensorflow.python.tools import component_api_helper as _component_api_helper
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=('tensorflow_estimator.python.estimator.api.estimator'))

# API IMPORTS PLACEHOLDER

# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/api_template_v1.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import

# API IMPORTS PLACEHOLDER

from tensorflow.python.tools import component_api_helper as _component_api_helper
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=('tensorflow_estimator.python.estimator.api.estimator'))

# API IMPORTS PLACEHOLDER

from tensorflow.python.util.lazy_loader import LazyLoader # pylint: disable=g-import-not-at-top
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib')
del LazyLoader
Expand Down
1 change: 1 addition & 0 deletions tensorflow/c/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -488,6 +488,7 @@ static TF_Tensor* EmptyTensor(TF_DataType dtype, const TensorShape& shape) {
// Non-static for testing.
TF_Tensor* TF_TensorFromTensor(const tensorflow::Tensor& src,
TF_Status* status) {
TF_SetStatus(status, TF_OK, "");
if (!src.IsInitialized()) {
status->status = FailedPrecondition(
"attempt to use a tensor with an uninitialized value");
Expand Down
10 changes: 7 additions & 3 deletions tensorflow/c/c_api_experimental.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ void TF_EnableXLACompilation(TF_SessionOptions* options, unsigned char enable) {
}

TF_Buffer* TF_CreateConfig(unsigned char enable_xla_compilation,
unsigned char gpu_memory_allow_growth) {
unsigned char gpu_memory_allow_growth,
unsigned int num_cpu_devices) {
tensorflow::ConfigProto config;
auto* optimizer_options =
config.mutable_graph_options()->mutable_optimizer_options();
Expand All @@ -87,6 +88,8 @@ TF_Buffer* TF_CreateConfig(unsigned char enable_xla_compilation,
auto* gpu_options = config.mutable_gpu_options();
gpu_options->set_allow_growth(gpu_memory_allow_growth);

(*config.mutable_device_count())["CPU"] = num_cpu_devices;

// TODO(b/113217601): This is needed for EagerContext::runner_ to use a
// threadpool, so that we avoid the possibility of running the runner_ in the
// threadpool of GPU event mgr, as that can trigger more callbacks to be
Expand Down Expand Up @@ -8535,8 +8538,9 @@ TFE_Context* TFE_CreateContextFromSession(TF_Session* session,

// Reduce GPU memory allocation, and set appropriate config options for TFE
// context.
auto* config =
TF_CreateConfig(/*xla*/ false, /* gpu_memory_allow_growth */ true);
auto* config = TF_CreateConfig(
/*xla*/ false, /* gpu_memory_allow_growth */ true, /* num_cpu_devices */
10);
TFE_ContextOptionsSetConfig(opts, config->data, config->length, status);
if (!status->status.ok()) {
CHECK(!config);
Expand Down
5 changes: 3 additions & 2 deletions tensorflow/c/c_api_experimental.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,10 @@ TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options,
// a) ConfigProto.optimizer_options.global_jit_level is set to to ON_1 if
// `enable_xla_compilation` is non-zero, and OFF otherwise.
// b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`.
// c) ConfigProto.device_count is set to `num_cpu_devices`.
TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig(
unsigned char enable_xla_compilation,
unsigned char gpu_memory_allow_growth);
unsigned char enable_xla_compilation, unsigned char gpu_memory_allow_growth,
unsigned int num_cpu_devices);

// Create a serialized tensorflow.RunOptions proto, where RunOptions.trace_level
// is set to FULL_TRACE if `enable_full_trace` is non-zero, and NO_TRACE
Expand Down
22 changes: 5 additions & 17 deletions tensorflow/c/eager/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,23 +170,11 @@ TF_CAPI_EXPORT extern int64_t TFE_TensorHandleDim(TFE_TensorHandle* h,
int dim_index,
TF_Status* status);

// Returns the device of the operation that produced `h`.
// If `h` was produced by a copy, returns the destination device of
// the copy. Note that returned device name is not always the device
// holding the tensor handle's memory. If you want the latter, use
// TFE_TensorHandleBackingDeviceName.
// This function will block till the operation that produces `h` has completed.
//
// Device on which the kernel of the operation that produced `h` ran.
//
// If `h` was produced by a copy, returns the destination device of
// the copy.
//
// Note that returned device name is not always the device that owns the memory
// that backs the tensor handle. For the latter see
// TFE_TensorHandleBackingDeviceName.
//
// This function will block till the operation that produces `h` has completed.
// Returns the device of the operation that produced `h`. If `h` was produced by
// a copy, returns the destination device of the copy. Note that the returned
// device name is not always the device holding the tensor handle's memory. If
// you want the latter, use TFE_TensorHandleBackingDeviceName. This function
// will block till the operation that produces `h` has completed.
TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceName(
TFE_TensorHandle* h, TF_Status* status);

Expand Down
22 changes: 22 additions & 0 deletions tensorflow/c/env.cc
Original file line number Diff line number Diff line change
Expand Up @@ -159,3 +159,25 @@ TF_CAPI_EXPORT extern uint64_t TF_NowMicros(void) {
TF_CAPI_EXPORT extern uint64_t TF_NowSeconds(void) {
return ::tensorflow::Env::Default()->NowSeconds();
}

void TF_DefaultThreadOptions(TF_ThreadOptions* options) {
options->stack_size = 0;
options->guard_size = 0;
options->numa_node = -1;
}

TF_Thread* TF_StartThread(const TF_ThreadOptions* options,
const char* thread_name, void (*work_func)(void*),
void* param) {
::tensorflow::ThreadOptions cc_options;
cc_options.stack_size = options->stack_size;
cc_options.guard_size = options->guard_size;
cc_options.numa_node = options->numa_node;
return reinterpret_cast<TF_Thread*>(::tensorflow::Env::Default()->StartThread(
cc_options, thread_name, [=]() { (*work_func)(param); }));
}

void TF_JoinThread(TF_Thread* thread) {
// ::tensorflow::Thread joins on destruction
delete reinterpret_cast<::tensorflow::Thread*>(thread);
}

0 comments on commit 434c356

Please sign in to comment.