Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typos #21086

Merged
merged 2 commits into from
Aug 9, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/contrib/autograph/operators/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def while_cond(epoch_number, iterate, *state):
while_body,
init_state=(epoch_number, iterate) + init_state,
extra_deps=())
# Dropping the epoch number and iterate because they are not not syntactically
# Dropping the epoch number and iterate because they are not syntactically
# visible.
results = results[2:]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
class Definition(object):
"""Definition objects describe a unique definition of a variable.

Subclasses of this may be used by passing an appropriate factory fuction to
Subclasses of this may be used by passing an appropriate factory function to
resolve.

Attributes:
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/bigtable/python/ops/bigtable_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def parallel_scan_prefix(self,
"""Retrieves row (including values) from the Bigtable service at high speed.

Rows with row-key prefixed by `prefix` will be retrieved. This method is
similar to `scan_prefix`, but by constrast performs multiple sub-scans in
similar to `scan_prefix`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.

Note: The dataset produced by this method is not deterministic!
Expand Down Expand Up @@ -390,7 +390,7 @@ def parallel_scan_range(self,
"""Retrieves rows (including values) from the Bigtable service.

Rows with row-keys between `start` and `end` will be retrieved. This method
is similar to `scan_range`, but by constrast performs multiple sub-scans in
is similar to `scan_range`, but by contrast performs multiple sub-scans in
parallel in order to achieve higher performance.

Note: The dataset produced by this method is not deterministic!
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _project_stochastic_matrix_wrt_euclidean_norm(matrix):
"matrix must be two dimensional (instead is %d-dimensional)" %
matrix_shape.ndims)
if matrix_shape[0] != matrix_shape[1]:
raise ValueError("matrix must be be square (instead has shape (%d,%d))" %
raise ValueError("matrix must be square (instead has shape (%d,%d))" %
(matrix_shape[0], matrix_shape[1]))
dimension = matrix_shape[0].value
if dimension is None:
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/distributions/python/ops/sample_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def auto_correlation(
x_len = util.prefer_static_shape(x_rotated)[-1]

# TODO(langmore) Investigate whether this zero padding helps or hurts. At
# the moment is is necessary so that all FFT implementations work.
# the moment is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = math_ops.cast(x_len, np.float64)
Expand Down Expand Up @@ -198,7 +198,7 @@ def auto_correlation(
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
# divide by by to make this an unbiased estimate of the expectation
# divide by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = math_ops.cast(x_len, dtype.real_dtype)
max_lags = math_ops.cast(max_lags, dtype.real_dtype)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/toco/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -2009,7 +2009,7 @@ class Model {
std::size_t transient_data_size = 0;
// For code-generation only: required alignment of the transient_data buffer
std::size_t transient_data_alignment = 0;
// Arithmatic operations performed in the model.
// Arithmetic operations performed in the model.
int64 ops_count = 0;

private:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/model_pruning/python/layers/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def masked_convolution(inputs,
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of of the filters. Can be a single integer to specify the same
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/quantize/python/quant_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def testMovingAvgQuantizeTrainingAssign(self):
self.assertGreater(max_value, 0.0)
self.assertLess(max_value, 1.0)

def testVariablesNotParitioned_LastValue(self):
def testVariablesNotPartitioned_LastValue(self):
# Variables added should not use a default partiioner since they are
# scalar. There would be a tensorflow error thrown if the partitioner was
# respected by the rewrite.
Expand All @@ -90,7 +90,7 @@ def testVariablesNotParitioned_LastValue(self):
is_training=True,
vars_collection=_MIN_MAX_VARS)

def testVariablesNotParitioned_MovingAvg(self):
def testVariablesNotPartitioned_MovingAvg(self):
# Variables added should not use a default partiioner since they are
# scalar. There would be a tensorflow error thrown if the partitioner was
# respected by the rewrite.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/framework/function.h
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ class FunctionLibraryRuntime {

// This interface is EXPERIMENTAL and subject to change.
//
// Instatiates the function using an executor of the given type. If empty,
// Instantiates the function using an executor of the given type. If empty,
// the default TensorFlow executor will be used.
string executor_type;
};
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/ops/image_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1956,7 +1956,7 @@ def testZeroLengthInput(self):
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])

# The orignal error message does not contain back slashes. However, they
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
Expand Down Expand Up @@ -2985,7 +2985,7 @@ def testZeroLengthInput(self):
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])

# The orignal error message does not contain back slashes. However, they
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/ops/parallel_for/pfor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2117,7 +2117,7 @@ def _convert_print(pfor_input):
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to to the j_th entry of the TensorArray in the i_th
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/training/ftrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(self,

if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be be positive or zero" %
"initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
Expand Down