Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NFC - minor spelling tweaks under lite/toco directory #37696

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/dump_graphviz.cc
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ void DumpNode(const Model& model, string* output_file, const string& node_name,

for (const auto& child : node.children) {
if (!child.second->array_id.empty()) {
// Dump array if this node posesses one.
// Dump array if this node possesses one.
DumpArray(model, output_file, child.second->array_id);
}
// Note that it is always possible to have children. Unlike a filesystem,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ ::tensorflow::Status ConvertTrivialTransposeToReshape::Run(Model* model,
}
// Note: We can assume we have error checked inputs in PropagateFixedSizes.

// Check that the permutation has propogated.
// Check that the permutation has propagated.
std::vector<int> const& perm = transpose_op->perm;
if (perm.empty()) {
return ::tensorflow::Status::OK();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ DECLARE_GRAPH_TRANSFORMATION(PropagateFixedSizes)
DECLARE_GRAPH_TRANSFORMATION(HardcodeMinMax)
DECLARE_GRAPH_TRANSFORMATION(Quantize)
DECLARE_GRAPH_TRANSFORMATION(RemoveFinalDequantizeOp)
DECLARE_GRAPH_TRANSFORMATION(RemoveSuccesiveTranspose)
DECLARE_GRAPH_TRANSFORMATION(RemoveSuccessiveTranspose)
DECLARE_GRAPH_TRANSFORMATION(RemoveTensorFlowAssert)
DECLARE_GRAPH_TRANSFORMATION(RemoveTensorFlowIdentity)
DECLARE_GRAPH_TRANSFORMATION(RemoveTrivialBinaryOperator)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,8 @@ ::tensorflow::Status IdentifyDilatedConv::Run(Model* model,
dilation_factor);
if (changed) {
LOG(INFO)
<< "Replaced sub-netork with Dilated DepthwiseConv2D op outputting \""
<< conv_base_op->outputs[0] << "\".";
<< "Replaced sub-network with Dilated DepthwiseConv2D op outputting "
<< "\"" << conv_base_op->outputs[0] << "\".";
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ bool RecursivelyForwardPropagateDataType(GraphTransformation* transformation,
// This can be thought of as a bidirectional flood-fill of the num_bits implied
// final_data_type that terminates at other FakeQuant ops (and a few others as
// determined by DoesOpBlockBackwardPropagation/DoesOpBlockForwardPropagation).
// Once all FakeQuant ops have been visted the arrays should all have
// Once all FakeQuant ops have been visited the arrays should all have
// appropriate final_data_types if the source graph was annotated with the
// proper FakeQuant ops.
//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,7 @@ void ProcessConcatenationOperator(Model* model, ConcatenationOperator* op) {
break;
}
}
// Determine the concat size, and enfore that all inputs have
// Determine the concat size, and enforce that all inputs have
// the same dimensions count.
int concat_size = 0;
for (const auto& input_name : op->inputs) {
Expand Down Expand Up @@ -1098,7 +1098,7 @@ void ProcessUnidirectionalSequenceLstmOperator(
constexpr int kInputActivationStateTensor = 18;
constexpr int kInputCellStateTensor = 19;

// TFlite intepreter does not support array which is variable and contains a
// TFlite interpreter does not support array which is variable and contains a
// buffer (see b/115961645 for more discussion).
// The follow block remove buffer from the array to work around the
// restriction, as a consequence, downstream applications should not
Expand Down Expand Up @@ -1142,7 +1142,7 @@ void ProcessUnidirectionalSequenceRnnOperator(
}

constexpr int kHiddenStateTensor = 4;
// TFlite intepreter does not support array which is variable and contains a
// TFlite interpreter does not support array which is variable and contains a
// buffer (see b/115961645 for more discussion).
// The follow block remove buffer from the array to work around the
// restriction, as a consequence, downstream applications should not
Expand Down Expand Up @@ -1658,7 +1658,7 @@ void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) {
}

if (op->ellipsis_mask != 0) {
// Something like LOG_FIRST_N(WARNING, 10) would be prefferable to reduce
// Something like LOG_FIRST_N(WARNING, 10) would be preferable to reduce
// log noise. However, the TensorFlow logging library does not appear to
// support this.
LOG(WARNING) << "Skipping StridedSlice op with output \"" << op->outputs[0]
Expand Down Expand Up @@ -2434,7 +2434,7 @@ ::tensorflow::Status PropagateFixedSizes::Run(Model* model,
break;
case OperatorType::kCTCBeamSearchDecoder:
// The sizes of the outputs are only known in runtime based on the input.
// Ignore shape progapation here and defer that to the interpreter.
// Ignore shape propagation here and defer that to the interpreter.
break;
case OperatorType::kMatrixSetDiagV2:
// MatrixSetDiagV2 operators are converted to MatrixSetDiag,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ bool IsArrayQuantizedRangeSubset(GraphTransformation* transformation,
ChooseQuantizationParamsForArrayAndQuantizedDataType(
array, quantized_data_type, &quantization_params);
transformation->AddMessageF(
"No quantization params - infering from data type %s with minmax "
"No quantization params - inferring from data type %s with minmax "
"%g,%g as zero_point=%g, scale=%g",
ArrayDataTypeName(quantized_data_type), array.minmax->min,
array.minmax->max, quantization_params.zero_point,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ void ReplaceOpInputsWith(Model* model, const string& lookfor,

} // namespace

::tensorflow::Status RemoveSuccesiveTranspose::Run(Model* model,
std::size_t op_index,
bool* modified) {
::tensorflow::Status RemoveSuccessiveTranspose::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
auto op = model->operators.begin() + op_index;
if (op->get()->type != OperatorType::kTranspose) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ TEST_F(RemoveSuccessiveTransposeTest, RemoveTranspose) {
// Creating a model.
CreateGraph({1, 0}, {1, 0});

toco::RemoveSuccesiveTranspose transformation;
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), /*op_index=*/1, &modified).ok());
EXPECT_TRUE(modified);
Expand All @@ -109,7 +109,7 @@ TEST_F(RemoveSuccessiveTransposeTest, DontRemoveNotIdentityTranspose) {
// Creating a model.
CreateGraph({0, 2, 1}, {1, 0, 2});

toco::RemoveSuccesiveTranspose transformation;
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), /*op_index=*/1, &modified).ok());
EXPECT_FALSE(modified);
Expand Down Expand Up @@ -139,7 +139,7 @@ TEST_F(RemoveSuccessiveTransposeTest, DontRemoveTransposeOutputUnused) {
transpose2_op->outputs = {"InputTransposeTranspose"};
model_->operators.push_back(std::unique_ptr<toco::Operator>(transpose2_op));

toco::RemoveSuccesiveTranspose transformation;
toco::RemoveSuccessiveTranspose transformation;
bool modified;
ASSERT_TRUE(transformation.Run(model_.get(), /*op_index=*/1, &modified).ok());
EXPECT_FALSE(modified);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ ::tensorflow::Status UnrollBatchMatMul::Run(Model* model, std::size_t op_index,

CHECK_EQ(input_array_a.shape().dims(dims_a - 1),
input_array_b.shape().dims(dims_b - 2))
<< "Input dimensions must be compatible for multipication. shape a = ["
<< "Input dimensions must be compatible for multiplication. shape a = ["
<< absl::StrJoin(input_array_a.shape().dims(), ", ") << "], shape b = ["
<< absl::StrJoin(input_array_b.shape().dims(), ", ") << "]";

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/logging/gen_html.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def generate(self,
dot_after: A string, the dot representation of the model after
the conversion.
toco_err_log: A string, the logs emitted by TOCO during conversion. Caller
need to ensure that this string is properly anoynimized (any kind of
need to ensure that this string is properly anonymized (any kind of
user data should be eliminated).
tflite_graph_path: A string, the filepath to the converted TFLite model.

Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/toco/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ struct ConvOperator : Operator {
// inputs[4]: optional: merge repeated.
//
// Outputs:
// outputs[0]: deocoded.
// outputs[0]: decoded.
// outputs[1]: log probability.
//
// TensorFlow equivalent: CTCBeamSearchDecoder
Expand Down Expand Up @@ -1258,7 +1258,7 @@ struct ExpandDimsOperator : Operator {
ExpandDimsOperator() : Operator(OperatorType::kExpandDims) {}
};

// Ceates a tensor of shape dims and fills it with the given scalar value.
// Creates a tensor of shape dims and fills it with the given scalar value.
// Output type will be the same as the given scalar value.
//
// Inputs:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/python/toco_from_protos.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import sys

# We need to import pywrap_tensorflow prior to the toco wrapper.
# pylint: disable=invalud-import-order,g-bad-import-order
# pylint: disable=invalid-import-order,g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python import _pywrap_toco_api
from tensorflow.python.platform import app
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/python/toco_from_protos_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def test_toco(self):
val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
out = tf.identity(val, name="out")
out2 = tf.sin(val, name="out2")
# This is a valid mdoel
# This is a valid model
self._run(sess, img, out, True)
# This uses an invalid function.
# TODO(aselle): Check to make sure a warning is included.
Expand Down
5 changes: 3 additions & 2 deletions tensorflow/lite/toco/tflite/export.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ namespace {

// Check if a TensorFlow Op is a control flow op by its name.
bool IsControlFlowOp(const string& tensorflow_op) {
// Technically this is equalivent to `::tensorflow::Node::IsControlFlow()`.
// Technically this is equivalent to `::tensorflow::Node::IsControlFlow()`.
// It requires to construct a `::tensorflow::Graph` to use that helper
// function, so we simply hardcode the list of control flow ops here.
if (tensorflow_op == "Switch" || tensorflow_op == "RefSwitch" ||
Expand Down Expand Up @@ -477,7 +477,8 @@ tensorflow::Status Export(
for (const string& input_array : model.GetInvalidInputArrays()) {
if (model.HasArray(input_array)) {
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Placeholder ", input_array, " should be specied by input_arrays."));
"Placeholder ", input_array, " should be specified by "
"input_arrays."));
}
}

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/tflite/operator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class OperatorTest : public ::testing::Test {
static auto* by_name = new OpsByName(BuildOperatorByNameMap());
static auto* by_type = new OpsByType(BuildOperatorByTypeMap());

// Make sure the two maps were consitently built.
// Make sure the two maps were consistently built.
CHECK(by_name->count(name)) << "No operator for '" << name << "'.";
BaseOperator* op1 = by_name->at(name).get();
CHECK(op1->type() == type) << "while verifying '" << name << "'.";
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/toco_cmdline_flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ bool ParseTocoFlagsFromCommandLineFlags(
"Ignored if the output format is not TFLite."),
Flag("quantize_to_float16", parsed_flags.quantize_to_float16.bind(),
parsed_flags.quantize_to_float16.default_value(),
"Used in conjuction with post_training_quantize. Specifies that "
"Used in conjunction with post_training_quantize. Specifies that "
"the weights should be quantized to fp16 instead of the default "
"(int8)"),
Flag("quantize_weights", parsed_flags.quantize_weights.bind(),
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/toco_convert_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ TEST(TocoTest, BadInputFormat) {
"Unhandled input_format='FILE_FORMAT_UNKNOWN'");
}

TEST(TocoTest, MissingOuputArrays) {
TEST(TocoTest, MissingOutputArrays) {
TocoFlags toco_flags;
ModelFlags model_flags;

Expand Down
6 changes: 3 additions & 3 deletions tensorflow/lite/toco/toco_tooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ void MakeGeneralGraphTransformationsSet(
transformations->Add(new PropagateActivationFunctionIntoConstants);
transformations->Add(new PropagateArrayDataTypes);
transformations->Add(new PropagateFixedSizes);
transformations->Add(new RemoveSuccesiveTranspose);
transformations->Add(new RemoveSuccessiveTranspose);
transformations->Add(new RemoveTensorFlowAssert);
transformations->Add(new RemoveTensorFlowIdentity);
transformations->Add(new RemoveTrivialConcatenation);
Expand Down Expand Up @@ -415,10 +415,10 @@ tensorflow::Status TransformWithStatus(const TocoFlags& toco_flags,
// is:
// Input [1, 20, 1, 20, 1, 64] * ones [1, 3, 1, 3, 1, 1]
// The problem is if the input is quantized, then the quantization parameters
// will be slightly different for the input and the output. (althought the
// will be slightly different for the input and the output. (although the
// difference is really small).
// But, since we're changing this pattern to be pack-based which enforce
// the quantization paramters to be exactly the same.
// the quantization parameters to be exactly the same.
// So we have to wait for all quantization parameters being resolved and
// propagated and create our own.
// We may need to revisit this logic later.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/tooling_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,7 @@ void CheckNonExistentIOArrays(const Model& model) {
}
static constexpr char general_comment[] =
"Is it a typo? This should not happen. If you trigger this error "
"please send a bug report (with code to reporduce this error), to the "
"please send a bug report (with code to reproduce this error), to the "
"TensorFlow Lite team.";
for (const string& output_array : model.flags.output_arrays()) {
if (IsConstantParameterArray(model, output_array)) {
Expand Down