Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Branch 199194260 #19757

Merged
merged 33 commits into from
Jun 5, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
4519806
New NN API interface that uses the TensorFlow Lite delegate API.
aselle Jun 3, 2018
bab05a2
[tf.data] Input pipeline rewrites prototype.
jsimsa Jun 4, 2018
ee8b826
Update ops-related pbtxt files.
tensorflower-gardener Jun 4, 2018
327d4dc
Go: Update generated wrapper functions for TensorFlow ops.
tensorflower-gardener Jun 4, 2018
869dc91
Add debug output to CHECK for compatible shapes of multi-output fusions.
tensorflower-gardener Jun 4, 2018
5b498d5
[XLA] Remove unnecessary std::vector copies
d0k Jun 4, 2018
92415c0
Update README.md for tf.contrib.kfac and add deprecation warning.
Jun 4, 2018
256ef42
Add stored eager variables to graph collections.
tomhennigan Jun 4, 2018
edd936e
Temporary patch: properly handle expressions in subscripts. The long …
Jun 4, 2018
01c4773
[XLA:GPU] Add error message to CHECK for preconditions to lower fusio…
tensorflower-gardener Jun 4, 2018
1b4336c
Add LRN as unchanged rf layer operations for the receptive field calc…
tensorflower-gardener Jun 4, 2018
1a9f695
Disable flaky test tensorflow/contrib/distribute/python:minimize_loss…
hawkinsp Jun 4, 2018
a1e24eb
Internal change
tensorflower-gardener Jun 4, 2018
736e8fa
Enable cross-device dependency grouping optimization in non-AGGRESSIV…
hawkinsp Jun 4, 2018
0776129
Update the distributed SDCA test.
tensorflower-gardener Jun 4, 2018
52f3f70
Build TF on Windows with --config=opt
tensorflower-gardener Jun 4, 2018
b5f1ba2
Minor error message fix in TPUEstimator.
gmagogsfm Jun 4, 2018
f277fb6
[TF2XLA] Change to resize bilinear to between match a BackpropInput c…
blakehechtman Jun 4, 2018
f4048e5
Computing the volume of the set of correlation matrices with bounded …
tensorflower-gardener Jun 4, 2018
5f315a2
Fix visibility for tf.keras.__version__
MarkDaoust Jun 4, 2018
add0043
- Fix typo in evaluator
yunxing Jun 4, 2018
afb0950
Add a special functions module that contains non-Python abstractions,…
Jun 4, 2018
008fc03
[TF:XLA] Bump open source llvm revision to r333878
Jun 4, 2018
836fc09
Fix test user ops
Jun 4, 2018
d16877c
Fix Python API.
Jun 4, 2018
d88e871
added clearer description for invalid behavior when executing in eage…
tensorflower-gardener Jun 4, 2018
48acc50
Turns on optimization to convert division of sqrt to multiplication o…
tensorflower-gardener Jun 4, 2018
8c7a504
Fix a couple of doc typos.
tensorflower-gardener Jun 4, 2018
d1c2dbd
Fix broken distributed_runtime/remote_device_test by adding missing
tensorflower-gardener Jun 4, 2018
279b899
Improve TOCO error handling.
Jun 4, 2018
204fcd9
[XLA:GPU] Propagate layouts in a better order for performance and fus…
blakehechtman Jun 4, 2018
3c87b99
Remove --distinct_host_configuration=false from tools/bazel.rc
Jun 4, 2018
80e7bed
Fixing a conflict in building for Raspberry Pi.:wq
Jun 4, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,8 @@ xla::XlaOp ResizeUsingDilationAndConvolution(xla::XlaBuilder* builder,
dimension_numbers.add_output_spatial_dimensions(1 + i);
dimension_numbers.add_kernel_spatial_dimensions(i);
}
dimension_numbers.set_kernel_input_feature_dimension(num_spatial_dims);
dimension_numbers.set_kernel_output_feature_dimension(num_spatial_dims + 1);
dimension_numbers.set_kernel_input_feature_dimension(num_spatial_dims + 1);
dimension_numbers.set_kernel_output_feature_dimension(num_spatial_dims);

ResizeConvolutionDims dims =
ComputeResizeConvolutionParameters(in_size, out_size);
Expand Down
8 changes: 7 additions & 1 deletion tensorflow/compiler/xla/service/gpu/gpu_layout_assignment.cc
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,13 @@ Status GpuLayoutAssignment::AddBackendConstraintsToDnnConvCustomCall(

Status GpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
for (auto* instruction : constraints->computation()->instructions()) {
// Add convolution constraints in reverse postorder that the earliest
// convolution layout propagates first. This reduces the likelihood of fusion
// nodes with copies.
auto post_order = constraints->computation()->MakeInstructionPostOrder();
for (auto iterator = post_order.rbegin(); iterator != post_order.rend();
++iterator) {
HloInstruction* instruction = *iterator;
if (IsCustomCallToDnnConvolution(*instruction)) {
TF_RETURN_IF_ERROR(
AddBackendConstraintsToDnnConvCustomCall(instruction, constraints));
Expand Down
7 changes: 5 additions & 2 deletions tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2443,8 +2443,11 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
case HloOpcode::kReduce:
return inst->operand(1);
case HloOpcode::kTuple:
CHECK(hlo->IsMultiOutputFusion() &&
inst->operand(index.back())->opcode() == HloOpcode::kReduce);
CHECK(hlo->IsMultiOutputFusion())
<< ": " << hlo->ToString() << " is not a multi-output fusion.";
CHECK(inst->operand(index.back())->opcode() == HloOpcode::kReduce)
<< ": Found '" << inst->operand(index.back())->opcode() << "' in "
<< inst->ToString() << " but expected 'reduce'.";
// For multi-output fusion look through the tuple.
return inst->operand(index.back())->operand(1);
default:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1962,7 +1962,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {

// TODO(b/74360564): This is implementation defined behavior, but is
// currently respected by all implementations. Change this if we ever decide
// to oficially document different behavior.
// to officially document different behavior.
for (int64 i = 0; i < start.size(); ++i) {
start[i] = std::min<int64>(
std::max(int64{0}, start[i]),
Expand Down
14 changes: 3 additions & 11 deletions tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,18 +87,10 @@ llvm::Value* EmitCallToIntrinsic(
tensorflow::gtl::ArraySlice<llvm::Value*> operands,
tensorflow::gtl::ArraySlice<llvm::Type*> overloaded_types,
llvm::IRBuilder<>* ir_builder) {
std::vector<llvm::Type*> types;
for (auto type : overloaded_types) {
types.push_back(type);
}
llvm::Module* module = ModuleFromIRBuilder(ir_builder);
llvm::Function* intrinsic =
llvm::Intrinsic::getDeclaration(module, intrinsic_id, types);
std::vector<llvm::Value*> operands_vec;
for (auto operand : operands) {
operands_vec.push_back(operand);
}
return ir_builder->CreateCall(intrinsic, operands_vec);
llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(
module, intrinsic_id, AsArrayRef(overloaded_types));
return ir_builder->CreateCall(intrinsic, AsArrayRef(operands));
}

llvm::Value* EmitFloatMax(llvm::Value* lhs_value, llvm::Value* rhs_value,
Expand Down
4 changes: 3 additions & 1 deletion tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,9 @@ LoopEmitter::LoopEmitter(const ElementGenerator& target_element_generator,
// Sanity check: In multi-output fusion, all shapes produced must have the
// same dimensions.
for (const IrArray& array : target_arrays) {
CHECK(ShapeUtil::SameDimensions(shape_, array.GetShape()));
CHECK(ShapeUtil::SameDimensions(shape_, array.GetShape()))
<< ": '" << shape_.ShortDebugString() << "' does not match '"
<< array.GetShape().ShortDebugString() << "'";
}
}

Expand Down
16 changes: 14 additions & 2 deletions tensorflow/contrib/autograph/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,24 @@
from tensorflow.contrib.autograph.impl.api import RunMode
from tensorflow.contrib.autograph.impl.api import to_code
from tensorflow.contrib.autograph.impl.api import to_graph
from tensorflow.contrib.autograph.impl.special_functions import stack
from tensorflow.contrib.autograph.pyct.transformer import AutographParseError
from tensorflow.python.util.all_util import remove_undocumented

_allowed_symbols = [
'utils', 'convert', 'converted_call', 'do_not_convert', 'RunMode',
'to_code', 'to_graph', 'AutographParseError'
# Main API
'RunMode',
'convert',
'converted_call',
'do_not_convert',
'to_code',
'to_graph',
# Special functions
'stack',
# Exceptions
'AutographParseError',
# Utilities: to be removed
'utils',
]

remove_undocumented(__name__, _allowed_symbols)
11 changes: 11 additions & 0 deletions tensorflow/contrib/autograph/impl/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ py_library(
"config.py",
"conversion.py",
"naming.py",
"special_functions.py",
],
srcs_version = "PY2AND3",
visibility = ["//tensorflow:__subpackages__"],
Expand Down Expand Up @@ -69,3 +70,13 @@ py_test(
"//tensorflow/python:client_testlib",
],
)

py_test(
name = "special_functions_test",
srcs = ["special_functions_test.py"],
srcs_version = "PY2AND3",
deps = [
":impl",
"//tensorflow/python:client_testlib",
],
)
48 changes: 48 additions & 0 deletions tensorflow/contrib/autograph/impl/special_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special functions that only make sense for AutoGraph.

These functions are meant to ensure feature parity between Python and AutoGraph,
so that the exact same code works in both modes. In general, AutoGraph will
replace these calls.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.autograph.operators import data_structures


def stack(list_or_tensor, element_dtype=None):
"""Stacks the input, if it admits the notion of stacking. No-op otherwise.

For example, a list of tensors can be stacked into a larger tensor. This
function is similar to tf.stack, but it accepts non-lists and lists of
non-tensors as arguments. In the latter case, the function does nothing.

Args:
list_or_tensor: Any entity.
element_dtype: Optional dtype for the elements in the list. Required if the
input is stackable, and the list is untyped.

Returns:
If the input is stackable, a new object representing the stacked inputs.
Otherwise it returns list_or_tensor unchanged.
"""
return data_structures.list_stack(
list_or_tensor,
data_structures.ListStackOpts(
element_dtype=element_dtype, original_call=lambda x: x))
50 changes: 50 additions & 0 deletions tensorflow/contrib/autograph/impl/special_functions_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special_functions module."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.autograph.impl import special_functions
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test


class SpecialFunctionsTest(test.TestCase):

def test_basic(self):
self.assertEqual(special_functions.stack(1), 1)
self.assertListEqual(special_functions.stack([1, 2, 3]), [1, 2, 3])
# TODO(mdan): This should probably forward to tf.stack.
self.assertTrue(
isinstance(
special_functions.stack(
[constant_op.constant(1),
constant_op.constant(2)]), list))

t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(
t, element_shape=constant_op.constant([], dtype=dtypes.int32))
self.assertTrue(
tensor_util.is_tensor(
special_functions.stack(l, element_dtype=dtypes.float32)))


if __name__ == '__main__':
test.main()
7 changes: 6 additions & 1 deletion tensorflow/contrib/autograph/pyct/qual_names.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ def visit_Attribute(self, node):
return node

def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if not isinstance(s, gast.Index):
Expand All @@ -216,7 +217,11 @@ def visit_Subscript(self, node):
elif isinstance(s.value, gast.Str):
subscript = QN(StringLiteral(s.value.s))
else:
subscript = anno.getanno(node.slice.value, anno.Basic.QN)
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(node.slice.value, anno.Basic.QN):
subscript = anno.getanno(node.slice.value, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
Expand Down
13 changes: 13 additions & 0 deletions tensorflow/contrib/data/python/kernel_tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,19 @@ py_test(
],
)

py_test(
name = "optimize_dataset_op_test",
size = "small",
srcs = ["optimize_dataset_op_test.py"],
srcs_version = "PY2AND3",
deps = [
":dataset_serialization_test",
"//tensorflow/contrib/data/python/ops:optimization",
"//tensorflow/python:platform",
"//tensorflow/python/data/ops:dataset_ops",
],
)

py_test(
name = "prefetch_dataset_op_test",
size = "small",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import test


class OptimizeDatasetTest(test.TestCase):

def testDefaultOptimizations(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x).batch(
10).apply(optimization.optimize())
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()

with self.test_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
self.assertTrue(
all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)

def testEmptyOptimizations(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x).batch(
10).apply(optimization.optimize([]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()

with self.test_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
self.assertTrue(
all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)

def testOptimization(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x).batch(
10).apply(optimization.optimize(["map_and_batch_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()

with self.test_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
self.assertTrue(
any([node.op == "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)


class OptimizeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):

def testCore(self):

def build_dataset(num_elements, batch_size):
return dataset_ops.Dataset.range(num_elements).map(lambda x: x * x).batch(
batch_size).apply(optimization.optimize(["map_and_batch_fusion"]))

self.run_core_tests(lambda: build_dataset(200, 10), None, 20)


if __name__ == "__main__":
test.main()
15 changes: 15 additions & 0 deletions tensorflow/contrib/data/python/ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,20 @@ py_library(
],
)

py_library(
name = "optimization",
srcs = ["optimization.py"],
srcs_version = "PY2AND3",
deps = [
":contrib_op_loader",
":gen_dataset_ops",
"//tensorflow/python:dtypes",
"//tensorflow/python:framework_ops",
"//tensorflow/python/data/util:nest",
"//tensorflow/python/data/util:sparse",
],
)

py_library(
name = "resampling",
srcs = ["resampling.py"],
Expand Down Expand Up @@ -368,6 +382,7 @@ py_library(
":get_single_element",
":grouping",
":interleave_ops",
":optimization",
":prefetching_ops",
":readers",
":resampling",
Expand Down