Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Branch 151792071 #8864

Merged
merged 14 commits into from Mar 31, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 0 additions & 2 deletions tensorflow/contrib/distributions/BUILD
Expand Up @@ -15,7 +15,6 @@ py_library(
srcs = glob(["python/ops/bijectors/*.py"]),
srcs_version = "PY2AND3",
deps = [
"//tensorflow/contrib/framework:framework_py",
"//tensorflow/contrib/linalg:linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:check_ops",
Expand All @@ -41,7 +40,6 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":bijectors_py",
"//tensorflow/contrib/framework:framework_py",
"//tensorflow/contrib/linalg:linalg_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:check_ops",
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/beta.py
Expand Up @@ -23,7 +23,6 @@
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -157,7 +156,7 @@ def __init__(self,
self._concentration0 = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration0, name="concentration0"),
validate_args)
contrib_tensor_util.assert_same_float_dtype([
check_ops.assert_same_float_dtype([
self._concentration1, self._concentration0])
self._total_concentration = self._concentration1 + self._concentration0
super(Beta, self).__init__(
Expand Down
Expand Up @@ -18,7 +18,6 @@
from __future__ import division
from __future__ import print_function

from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
Expand All @@ -27,6 +26,7 @@
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
Expand Down Expand Up @@ -359,7 +359,7 @@ def __init__(self,
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if contrib_framework.is_tensor(self._scale)
[self._scale] if tensor_util.is_tensor(self._scale)
else self._scale.inputs +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/distribution.py
Expand Up @@ -26,7 +26,6 @@
import numpy as np
import six

from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -381,7 +380,7 @@ def __init__(self,
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
Expand Down
Expand Up @@ -23,7 +23,6 @@
import math
import numpy as np

from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import linalg
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
Expand Down Expand Up @@ -514,7 +513,7 @@ def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
def tril_ids(n):
"""Internal helper to create vector of linear indices into y."""
# Build the ids statically; chose 512 because it implies 1MiB.
if not contrib_framework.is_tensor(n) and n <= 512:
if not tensor_util.is_tensor(n) and n <= 512:
ids = np.arange(n**2, dtype=np.int32)
rows = (ids / n).astype(np.int32) # Implicit floor.
# We need to stop incrementing the index when we encounter
Expand Down
5 changes: 2 additions & 3 deletions tensorflow/contrib/distributions/python/ops/gamma.py
Expand Up @@ -23,7 +23,6 @@
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -134,7 +133,7 @@ def __init__(self,
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
contrib_tensor_util.assert_same_float_dtype(
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
Expand Down Expand Up @@ -249,7 +248,7 @@ def _mode(self):
], mode)

def _maybe_assert_valid_sample(self, x):
contrib_tensor_util.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/gumbel.py
Expand Up @@ -21,7 +21,6 @@
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -129,7 +128,7 @@ def __init__(self,
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(_Gumbel, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
Expand Down
5 changes: 2 additions & 3 deletions tensorflow/contrib/distributions/python/ops/inverse_gamma.py
Expand Up @@ -22,7 +22,6 @@

from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -134,7 +133,7 @@ def __init__(self,
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
contrib_tensor_util.assert_same_float_dtype(
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
Expand Down Expand Up @@ -268,7 +267,7 @@ def _mode(self):
return self.rate / (1. + self.concentration)

def _maybe_assert_valid_sample(self, x):
contrib_tensor_util.assert_same_float_dtype(
check_ops.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/laplace.py
Expand Up @@ -24,7 +24,6 @@

from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import special_math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -105,7 +104,7 @@ def __init__(self,
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Laplace, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/logistic.py
Expand Up @@ -22,7 +22,6 @@
import numpy as np

from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -127,7 +126,7 @@ def __init__(self,
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/normal.py
Expand Up @@ -23,7 +23,6 @@
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import special_math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -136,7 +135,7 @@ def __init__(self,
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
Expand Down
Expand Up @@ -20,7 +20,6 @@


from tensorflow.contrib.distributions.python.ops import operator_pd
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -108,7 +107,7 @@ def _check_scale(self, scale, dtype):
return scale

# Further check that this is a rank 0, positive tensor.
scale = contrib_tensor_util.assert_scalar(scale)
scale = check_ops.assert_scalar(scale)
return control_flow_ops.with_dependencies(
[check_ops.assert_positive(scale)], scale)

Expand Down
Expand Up @@ -22,7 +22,6 @@

from tensorflow.contrib.distributions.python.ops import distribution as distributions
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
Expand Down Expand Up @@ -225,7 +224,7 @@ def __init__(self,
low = ops.convert_to_tensor(low, name="low")
if high is not None:
high = ops.convert_to_tensor(high, name="high")
contrib_tensor_util.assert_same_float_dtype(
check_ops.assert_same_float_dtype(
tensors=[self.distribution, low, high])

# We let QuantizedDistribution access _graph_parents since this class is
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/student_t.py
Expand Up @@ -22,7 +22,6 @@

from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -161,7 +160,7 @@ def __init__(self,
self._df = array_ops.identity(df, name="df")
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype(
check_ops.assert_same_float_dtype(
(self._df, self._loc, self._scale))
super(StudentT, self).__init__(
dtype=self._scale.dtype,
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/uniform.py
Expand Up @@ -21,7 +21,6 @@
import math

from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -108,7 +107,7 @@ def __init__(self,
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
contrib_tensor_util.assert_same_float_dtype([self._low, self._high])
check_ops.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/contrib/distributions/python/ops/wishart.py
Expand Up @@ -25,7 +25,6 @@
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -121,7 +120,7 @@ def __init__(self,
df,
dtype=scale_operator_pd.dtype,
name="df")
contrib_tensor_util.assert_same_float_dtype(
check_ops.assert_same_float_dtype(
(self._df, self._scale_operator_pd))
if (self._scale_operator_pd.get_shape().ndims is None or
self._scale_operator_pd.get_shape()[-1].value is None):
Expand Down
Expand Up @@ -417,7 +417,6 @@ def decode(self, serialized_example, items=None):
"""
example = parsing_ops.parse_single_example(serialized_example,
self._keys_to_features)
print(example.keys())

# Reshape non-sparse elements just once:
for k in self._keys_to_features:
Expand Down
20 changes: 20 additions & 0 deletions tensorflow/core/BUILD
Expand Up @@ -416,6 +416,14 @@ tf_cuda_library(
deps = [":framework_internal"],
)

cc_library(
name = "overflow",
hdrs = ["util/overflow.h"],
deps = [
":framework_lite",
],
)

cc_library(
name = "reader_base",
srcs = ["framework/reader_base.cc"],
Expand Down Expand Up @@ -1852,6 +1860,18 @@ tf_cc_test(
],
)

tf_cc_test(
name = "util_overflow_test",
size = "small",
srcs = ["util/overflow_test.cc"],
deps = [
":framework_lite",
":overflow",
":test",
":test_main",
],
)

cc_test(
name = "lib_jpeg_jpeg_mem_unittest",
srcs = ["lib/jpeg/jpeg_mem_unittest.cc"],
Expand Down
4 changes: 1 addition & 3 deletions tensorflow/core/framework/node_def.proto
Expand Up @@ -29,17 +29,15 @@ message NodeDef {
// node should be placed.
// The expected syntax for this string is as follows:
//
// DEVICE_SPEC ::= COLOCATED_NODE | PARTIAL_SPEC
// DEVICE_SPEC ::= PARTIAL_SPEC
//
// COLOCATED_NODE ::= "@" NODE_NAME // See NodeDef.name above.
// PARTIAL_SPEC ::= ("/" CONSTRAINT) *
// CONSTRAINT ::= ("job:" JOB_NAME)
// | ("replica:" [1-9][0-9]*)
// | ("task:" [1-9][0-9]*)
// | ( ("gpu" | "cpu") ":" ([1-9][0-9]* | "*") )
//
// Valid values for this string include:
// * "@other/node" (colocate with "other/node")
// * "/job:worker/replica:0/task:1/gpu:3" (full specification)
// * "/job:worker/gpu:3" (partial specification)
// * "" (no specification)
Expand Down
Expand Up @@ -6,7 +6,7 @@ option java_outer_classname = "RemoteFusedGraphExecuteInfoProto";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";

import "tensorflow/core/framework/node_def.proto";
import "tensorflow/core/framework/graph.proto";
import "tensorflow/core/framework/tensor_shape.proto";
import "tensorflow/core/framework/types.proto";

Expand All @@ -19,8 +19,8 @@ message RemoteFusedGraphExecuteInfo {
TensorShapeProto shape = 2;
}

// Nodes in remote fused graph
repeated NodeDef node = 1;
// Definition of remote graph
GraphDef remote_graph = 1;

// Remote fused graph input node name
repeated string graph_input_node_name = 2;
Expand Down