Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import numpy as np
import tensorflow as tf

from tensorflow.python.framework import tensor_util


def make_categorical(batch_shape, num_classes, dtype=tf.int32):
logits = tf.random_uniform(
Expand All @@ -40,10 +42,22 @@ def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.get_batch_shape().as_list())
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual([], dist.get_event_shape().as_list())
self.assertAllEqual([], dist.get_event_shape())
self.assertAllEqual([], dist.event_shape().eval())
self.assertEqual(10, dist.num_classes.eval())
# num_classes is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10, tensor_util.constant_value(dist.num_classes))

for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, tf.constant(10, dtype=tf.int32))
self.assertAllEqual(len(batch_shape), dist.get_batch_shape().ndims)
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual([], dist.get_event_shape())
self.assertAllEqual([], dist.event_shape().eval())
self.assertEqual(10, dist.num_classes.eval())

def testDtype(self):
dist = make_categorical([], 5, dtype=tf.int32)
Expand All @@ -59,6 +73,20 @@ def testDtype(self):
self.assertEqual(dist.logits.dtype, dist.pmf(0).dtype)
self.assertEqual(dist.logits.dtype, dist.log_pmf(0).dtype)

def testUnknownShape(self):
with self.test_session():
logits = tf.placeholder(dtype=tf.float32)
dist = tf.contrib.distributions.Categorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertEqual(1, sample_value)

# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([1, 0], sample_value_batch)

def testPMFWithBatch(self):
histograms = [[0.2, 0.8], [0.6, 0.4]]
dist = tf.contrib.distributions.Categorical(tf.log(histograms) - 50.)
Expand Down
24 changes: 19 additions & 5 deletions tensorflow/contrib/distributions/python/ops/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,24 @@ def __init__(
self._validate_args = validate_args
with ops.name_scope(name, values=[logits]):
self._logits = ops.convert_to_tensor(logits, name="logits")
logits_shape = array_ops.shape(self._logits)
self._batch_rank = array_ops.size(logits_shape) - 1
self._batch_shape = array_ops.slice(
logits_shape, [0], array_ops.pack([self._batch_rank]))
self._num_classes = array_ops.gather(logits_shape, self._batch_rank)
logits_shape = array_ops.shape(self._logits, name="logits_shape")
static_logits_shape = self._logits.get_shape().with_rank_at_least(1)
static_logits_rank = static_logits_shape.ndims
if static_logits_rank is not None:
self._batch_rank = ops.convert_to_tensor(
static_logits_rank - 1, dtype=dtypes.int32,
name="batch_rank")
else:
self._batch_rank = array_ops.rank(self._logits) - 1

if static_logits_shape[-1].value is not None:
self._num_classes = ops.convert_to_tensor(
static_logits_shape[-1].value,
dtype=dtypes.int32, name="num_classes")
else:
self._num_classes = array_ops.gather(logits_shape, self._batch_rank)

self._batch_shape = logits_shape[:-1]

@property
def allow_nan_stats(self):
Expand Down Expand Up @@ -108,6 +121,7 @@ def get_event_shape(self):

@property
def num_classes(self):
"""Scalar `int32` tensor: the number of classes."""
return self._num_classes

@property
Expand Down
24 changes: 24 additions & 0 deletions tensorflow/contrib/learn/python/learn/estimators/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import checkpoints
from tensorflow.contrib.learn.python.learn.utils import export

from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -375,6 +376,29 @@ def get_variable_names(self):
def model_dir(self):
return self._model_dir

def export(self, export_dir, signature_fn=None,
input_fn=export.default_input_fn, default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.

Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
input_fn: Function that given `Tensor` of `Example` strings, parses it
into features that are then passed to the model.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
"""
export.export_estimator(estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)

@abc.abstractproperty
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def input_fn():
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
tf.contrib.learn.utils.export.export_estimator(classifier, export_dir)
classifier.export(export_dir)

def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
Expand Down
32 changes: 18 additions & 14 deletions tensorflow/contrib/learn/python/learn/evaluable.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,34 +33,38 @@ def evaluate(
steps=None, metrics=None, name=None):
"""Evaluates given model with provided evaluation data.

Evaluates on the given input data. If `input_fn` is provided, that
input function should raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`) after one epoch of the training data has been provided.

By default, the whole evaluation dataset is used. If `steps` is provided,
only `steps` batches of size `batch_size` are processed.
Stop conditions - we evaluate on the given input data until one of the
following:
- If `steps` is provided, and `steps` batches of size `batch_size` are
processed.
- If `input_fn` is provided, and it raises an end-of-input
exception (`OutOfRangeError` or `StopIteration`).
- If `x` is provided, and all items in `x` have ben processed.

The return value is a dict containing the metrics specified in `metrics`, as
well as an entry `global_step` which contains the value of the global step
for which this evaluation was performed.

Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression). If set,
x: Matrix of shape [n_samples, n_features...] containing the input samples
for fitting the model. Can be iterator that returns arrays of features.
If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs] containing the
target values (class labels in classification, real numbers in
regression). Can be iterator that returns array of targets. If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
`None`. If `steps` is not provided, this should raise `OutOfRangeError`
or `StopIteration` after the desired amount of data (e.g., one epoch)
has been provided. See "Stop conditions" above for specifics.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration. Must be `None` if `input_fn` is provided.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`, if specified. Must be `None` if `input_fn` is
provided.
steps: Number of steps for which to evaluate model. If `None`, evaluate
until running tensors generated by `metrics` raises an exception.
until `x` is consumed or `input_fn` raises an end-of-input exception.
See "Stop conditions" above for specifics.
metrics: Dict of metric ops to run. If `None`, the default metric
functions are used; if `{}`, no metrics are used. If model has one
output (i.e., returning single predction), keys are `str`, e.g.
Expand Down
19 changes: 8 additions & 11 deletions tensorflow/contrib/learn/python/learn/monitors.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ def step_end(self, step, outputs):
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
Expand Down Expand Up @@ -922,11 +921,10 @@ def signature_fn(self):
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
export.export_estimator(self._estimator,
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
default_batch_size=self._default_batch_size)
self._estimator.export(self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
default_batch_size=self._default_batch_size)
except (RuntimeError, TypeError):
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
Expand All @@ -941,11 +939,10 @@ def end(self, session=None):
"yet.")
return
try:
export.export_estimator(self._estimator,
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
default_batch_size=self._default_batch_size)
self._estimator.export(self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
default_batch_size=self._default_batch_size)
except (RuntimeError, TypeError):
logging.info("Skipping exporting for the same step.")

Expand Down
19 changes: 3 additions & 16 deletions tensorflow/contrib/learn/python/learn/utils/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,31 +165,18 @@ def logistic_regression_signature_fn(examples, unused_features, predictions):


# pylint: disable=protected-access
def _default_input_fn(estimator, examples):
def default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)


def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
input_fn=default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.

Args:
estimator: Estimator to export
export_dir: A string containing a directory to write the exported graph
and checkpoints.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
input_fn: Function that given `Tensor` of `Example` strings, parses it into
features that are then passed to the model.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
"""
"""Deprecated, please use BaseEstimator.export."""
checkpoint_path = tf_saver.latest_checkpoint(estimator._model_dir)
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
Expand Down
5 changes: 5 additions & 0 deletions tensorflow/contrib/makefile/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -553,3 +553,8 @@ $(DEPDIR)/%.d: ;
.PRECIOUS: $(DEPDIR)/%.d

-include $(patsubst %,$(DEPDIR)/%.d,$(basename $(TF_CC_SRCS)))

ifdef SUB_MAKEFILES
$(warning "include sub makefiles, must not contain white spaces in the path:" $(SUB_MAKEFILES))
include $(SUB_MAKEFILES)
endif
58 changes: 38 additions & 20 deletions tensorflow/contrib/makefile/build_all_android.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,48 +13,66 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This is a composite scprit to build all for Android OS
# This is a composite script to build all for Android OS

set -e

usage() {
info "-x use hexagon library located at ../hexagon/<libs and include>"
exit 1
echo "Usage: NDK_ROOT=<path to ndk root> $(basename "$0") [-t:]"
echo "-s [sub_makefiles] sub makefiles separated by white space"
echo "-t [build_target] build target for Android makefile [default=all]"
echo "-T only build tensorflow"
echo "-x use hexagon library located at ../hexagon/<libs and include>"
exit 1
}

while getopts "x" opt_name; do
case "$opt_name" in
x) USE_HEXAGON=true;;
*) usage;;
esac
done

if [[ -z "${NDK_ROOT}" ]]; then
echo "NDK_ROOT should be set as an environment variable" 1>&2
exit 1
fi

while getopts "s:t:Tx" opt_name; do
case "$opt_name" in
s) SUB_MAKEFILES="${OPTARG}";;
t) BUILD_TARGET="${OPTARG}";;
T) ONLY_MAKE_TENSORFLOW="true";;
x) USE_HEXAGON="true";;
*) usage;;
esac
done
shift $((OPTIND - 1))

# Make sure we're in the correct directory, at the root of the source tree.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd ${SCRIPT_DIR}/../../../

# Remove any old files first.
make -f tensorflow/contrib/makefile/Makefile clean
rm -rf tensorflow/contrib/makefile/downloads
if [[ "${ONLY_MAKE_TENSORFLOW}" != "true" ]]; then
# Remove any old files first.
make -f tensorflow/contrib/makefile/Makefile clean
rm -rf tensorflow/contrib/makefile/downloads

# Pull down the required versions of the frameworks we need.
tensorflow/contrib/makefile/download_dependencies.sh
# Pull down the required versions of the frameworks we need.
tensorflow/contrib/makefile/download_dependencies.sh

# Compile protobuf for the target Android device architectures.
CC_PREFIX="${CC_PREFIX}" NDK_ROOT="${NDK_ROOT}" \
# Compile protobuf for the target Android device architectures.
CC_PREFIX="${CC_PREFIX}" NDK_ROOT="${NDK_ROOT}" \
tensorflow/contrib/makefile/compile_android_protobuf.sh -c
fi

if [[ "${USE_HEXAGON}" == "true" ]]; then
HEXAGON_PARENT_DIR=$(cd ../hexagon && pwd)
HEXAGON_LIBS="${HEXAGON_PARENT_DIR}/libs"
HEXAGON_INCLUDE="${HEXAGON_PARENT_DIR}/include"
fi

make -f tensorflow/contrib/makefile/Makefile \
TARGET=ANDROID NDK_ROOT="${NDK_ROOT}" CC_PREFIX="${CC_PREFIX}" \
HEXAGON_LIBS="${HEXAGON_LIBS}" HEXAGON_INCLUDE="${HEXAGON_INCLUDE}"
if [[ -z "${BUILD_TARGET}" ]]; then
make -f tensorflow/contrib/makefile/Makefile \
TARGET=ANDROID NDK_ROOT="${NDK_ROOT}" CC_PREFIX="${CC_PREFIX}" \
HEXAGON_LIBS="${HEXAGON_LIBS}" HEXAGON_INCLUDE="${HEXAGON_INCLUDE}" \
SUB_MAKEFILES="${SUB_MAKEFILES}"
else
make -f tensorflow/contrib/makefile/Makefile \
TARGET=ANDROID NDK_ROOT="${NDK_ROOT}" CC_PREFIX="${CC_PREFIX}" \
HEXAGON_LIBS="${HEXAGON_LIBS}" HEXAGON_INCLUDE="${HEXAGON_INCLUDE}" \
SUB_MAKEFILES="${SUB_MAKEFILES}" "${BUILD_TARGET}"
fi
1 change: 1 addition & 0 deletions tensorflow/contrib/makefile/download_dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,6 @@ eigen-latest/Eigen/src/Core/arch/NEON/Complex.h
git clone https://github.com/google/re2.git re2
git clone https://github.com/google/gemmlowp.git gemmlowp
git clone https://github.com/google/protobuf.git protobuf
git clone https://github.com/google/googletest.git googletest

echo "download_dependencies.sh completed successfully."
27 changes: 27 additions & 0 deletions tensorflow/contrib/makefile/test/test_main.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

// A program with a main that is suitable for unittests
// This is designed to be used unittests built by Makefile

#include <iostream>
#include "tensorflow/core/platform/test.h"

GTEST_API_ int main(int argc, char** argv) {
std::cout << "Running main() from test_main.cc" << std::endl;
testing::InitGoogleTest(&argc, argv);

return RUN_ALL_TESTS();
}
Loading