Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typos #18475

Merged
merged 1 commit into from
May 3, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/compiler/xla/service/copy_insertion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ struct SpecialCaseCopyPolicy {
// output tuple.
bool copy_root_replicated_buffers = false;
// If true, insert a copy if a buffer coming from a constant or a parameter
// is found wihtin the output tuple.
// is found within the output tuple.
bool copy_parameters_and_constants = false;
};

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/xla/service/hlo_evaluator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1193,7 +1193,7 @@ class HloEvaluator::TypedVisitor : public DfsHloVisitorWithDefault {
// specifically:
// - For lhs, the non-contracting dimensions, including the batch
// dimension have the same index as the `result_index`.
// - For rhs, the batch dimension is set seperately from other
// - For rhs, the batch dimension is set separately from other
// non-contracting dimensions, since these other non-contracting
// dimensions in rhs follow the non-contracting dimensions of lhs in
// the resulting index.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/kfac/python/ops/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def __init__(self,
self._estimation_mode = estimation_mode
self._colocate_gradients_with_ops = colocate_gradients_with_ops

# The below paramaters are required only if damping needs to be adapated.
# The below parameters are required only if damping needs to be adapated.
# These parameters can be set by calling
# set_damping_adaptation_params() explicitly.
self._damping_adaptation_decay = 0.95
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2918,7 +2918,7 @@ inline void Concatenation(int concat_dim, const uint8* const* input_data,
const int32 output_zeropoint,
const float output_scale) {
// The arguments input_zeropoint and input_scale are expected to be an array
// that have the quantization paramaters for all the inputs to the concat
// that have the quantization parameters for all the inputs to the concat
// operator.
gemmlowp::ScopedProfilingLabel label("Concatenation");
TFLITE_DCHECK_GT(inputs_count, 1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1464,7 +1464,7 @@ inline void Concatenation(int concat_dim, const uint8* const* input_data,
const int32 output_zeropoint,
const float output_scale) {
// The arguments input_zeropoint and input_scale are expected to be an array
// that have the quantization paramaters for all the inputs to the concat
// that have the quantization parameters for all the inputs to the concat
// operator.
TFLITE_DCHECK_GT(inputs_count, 1);
int64_t concat_size = 0;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/testing/generate_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@


class ExtraTocoOptions(object):
"""Additonal toco options besides input, output, shape."""
"""Additional toco options besides input, output, shape."""

def __init__(self):
# Whether to ignore control dependency nodes.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/lite/toco/toco_flags.proto
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ message TocoFlags {
// transformations that are necessary in order to generate inference
// code for these graphs. Such graphs should be fixed, but as a
// temporary work-around, setting this reorder_across_fake_quant flag
// allows toco to perform necessary graph transformaitons on them,
// allows toco to perform necessary graph transformations on them,
// at the cost of no longer faithfully matching inference and training
// arithmetic.
optional bool reorder_across_fake_quant = 8;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def test1Workers2Period(self):
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))

# iteration 2, global varibale update
# iteration 2, global variable update
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).

Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they folow the same slicing behavior as symbolic TF tensors),
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Expand Down