Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

typos in tensorflow/core fixed #26235

Closed
wants to merge 22 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/core/framework/node_def_builder_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ TEST_F(NodeDefBuilderTest, Polymorphic) {
op: "Polymorphic" input: "a"
attr { key: "T" value { type: DT_BOOL } } )proto");

// Conficting Attr()
// Conflicting Attr()
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_STRING),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while");

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/graph/collective_order.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ Status CreateControlDependencies(

// Insert control dependencies defined by `dependency_edges` in `graph`. If
// `order_type` is `kEdges`, insert explicit control edges, else if `order_type`
// is `kAttrs`, encode depdencies as an attribute on collective node.
// is `kAttrs`, encode dependencies as an attribute on collective node.
Status InsertControlDependencies(
Graph* graph, GraphCollectiveOrder order_type,
const absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>&
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/graph/graph_constructor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -951,7 +951,7 @@ TEST_F(GraphConstructorTest, ImportGraphDef) {
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_EQ(9, graph_.num_edges());

// Importing again should fail because of node name collissions.
// Importing again should fail because of node name collisions.
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;

Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/graph/mkl_layout_pass_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ TEST_F(MklLayoutPassTest, Input_ControlEdge_PadWithConv2D_Positive) {
// Test if output control edges does not duplicate after merge.
// If both the merging ops have output control edge to a common op,
// then after merge, the merged op will have only one control edge
// to that commom op.
// to that common op.
// padding is VALID type
// A = input(image), B = input(paddings), C= Pad = input of conv2D,
// D=input(filter), E = Conv2D, Z = Zeta
Expand Down Expand Up @@ -1501,7 +1501,7 @@ TEST_F(MklLayoutPassTest, Input_ControlEdge_PadWithFusedConv2D_Positive) {
// ts that there are no duplicate output control edges after merge.
// If both the merging ops have output control edge to a common op,
// then after merge, the merged op will have only one control edge
// to that commom op. This test only add additional output control edge check
// to that common op. This test only add additional output control edge check
// based on the previous test NodeMerge_PadWithFusedConv2D_Positive1
// padding is VALID type
// A = input(image), B = input(paddings), C = Pad(A, B) = input of conv2D,
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ struct CropAndResizeBackpropImage<GPUDevice, T> {
d.stream(), config.virtual_thread_count, grads_image.data()));
}

// Configurate interpolation method.
// Configure interpolation method.
InterpolationMethod method = BILINEAR;
if (method_name == "nearest") {
method = NEAREST;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ class MaterializedDatasetResource : public ResourceBase {

// A wrapper class for storing an `IndexedDataset` instance in a DT_VARIANT
// tensor. Objects of the wrapper class own a reference on an instance of an
// `IndexedTensor` and the wrapper's copy constructor and desctructor take care
// `IndexedTensor` and the wrapper's copy constructor and destructor take care
// of managing the reference count.
//
// NOTE: This is not a feature-complete implementation of the DT_VARIANT
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/debug_ops_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ TEST_F(DebugNumericSummaryOpTest, Float_only_valid_values) {
7.33333333333, // variance of non-inf and non-nan elements.
static_cast<double>(DT_FLOAT), // dtype
2.0, // Number of dimensions.
2.0, 3.0}); // Dimensoin sizes.
2.0, 3.0}); // Dimension sizes.

test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/dynamic_stitch_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ class DynamicStitchOpGPU : public DynamicStitchOpImplBase<T> {
// merged that aren't covered by an index in indices. What should we do?
if (first_dim_size > 0) {
// because the collision requirements, we have to deal with
// collion first before send data to gpu kernel.
// collision first before send data to gpu kernel.
// TODO(ekelsen): Instead of doing a serial scan on the CPU to pick the
// last of duplicated indices, it could instead be done of the GPU
// implicitly using atomics to make sure the last index is the final
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/eigen_benchmark_cpu_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ You may obtain a copy of the License at

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONT OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/fuzzing/string_split_fuzz.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class FuzzStringSplit : public FuzzSession {
Tensor delimiter_tensor(tensorflow::DT_STRING, TensorShape({}));

if (size > 0) {
// The spec for split is that the delimeter should be 0 or 1 characters.
// The spec for split is that the delimiter should be 0 or 1 characters.
// Naturally, fuzz it with something larger. (This omits the possibility
// of handing it a > int32_max size string, which should be tested for in
// an explicit test).
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/hexagon/hexagon_control_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class HexagonControlWrapper final : public IRemoteFusedGraphExecutor {
// TODO(satok): Use actual data passed by FillInputNode and remove
// std::vector<float> dummy_input_float_{};
std::unordered_map<int, std::vector<uint8>> input_tensor_data_{};
// Dummy byte array for cosnt node.
// Dummy byte array for const node.
// TODO(satok): Remove
std::unordered_map<int, std::vector<uint8>> dummy_const_data_{};

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/mkl_concat_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ class MklConcatOp : public OpKernel {
output_tensor->flat<uint8>().size() * sizeof(uint8));
}

// This method finds the most commom format across all MKL inputs
// This method finds the most common format across all MKL inputs
// Inputs:
// 1. input_shapes: shapes of input (MKL) tensors.
// 2. concat_dim: concat dimension.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/mkl_conv_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ struct MklConvFwdParams {
typedef mkldnn::convolution_forward::primitive_desc ConvFwdPd;

// With quantization, input, filter, and output can have different types
// so we use differnt template parameter for each type
// so we use different template parameter for each type
template <typename T, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput>
class MklConvFwdPrimitive : public MklPrimitive {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/tensor_flag_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ std::vector<Tindices> ParseRowStartIndices(

// Returns Status::OK() if and only if config is a float scalar or a matrix with
// dimensions M x 3. If config is a scalar then config must be in the range
// [0, 1.0). If confix is a matrix then config must have shape M x 3, all of
// [0, 1.0). If config is a matrix then config must have shape M x 3, all of
// its entries must be positive, and entries in the last column may not
// exceed 1.0. If config is a matrix then it may not be empty.
Status ValidateSparseMatrixShardingConfig(const Tensor& config);
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/nccl/nccl_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ class NcclManager {
// the corresponding NCCL/CUDA error string.
Status GetCommunicator(Collective* collective, Communicator** communicator);

// Adds a participant device to the local `Collective` instance correponding
// Adds a participant device to the local `Collective` instance corresponding
// to `collective_key`. Launches the `Collective` if it is ready, which it
// checks by calling `CheckReady()`. Also performs consistency and sanity
// checks before launching.
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/platform/default/device_tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ void DeviceTracerImpl::AddCorrelationId(uint32 correlation_id,
auto *params = reinterpret_cast<const cuLaunchKernel_params *>(
cbInfo->functionParams);
if (VLOG_IS_ON(2)) {
VLOG(2) << "LAUNCH stream " << params->hStream << " correllation "
VLOG(2) << "LAUNCH stream " << params->hStream << " correlation "
<< cbInfo->correlationId << " kernel " << cbInfo->symbolName;
}
const string annotation =
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/platform/strong_hash.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace tensorflow {
// This is a strong keyed hash function interface for strings.
// The hash function is deterministic on the content of the string within the
// process. The key of the hash is an array of 2 uint64 elements.
// A strong hash make it dificult, if not infeasible, to compute inputs that
// A strong hash make it difficult, if not infeasible, to compute inputs that
// hash to the same bucket.
//
// Usage:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/profiler/internal/tfprof_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts,
// TODO(xpan): Is it the right choice?
root_->formatted_str = display_str;
}
// Populate the chidren field.
// Populate the children field.
auto* pre_pb = root_->mutable_proto();
for (auto& show_node : show_nodes) {
pre_pb->clear_children();
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/util/mkl_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -1581,7 +1581,7 @@ inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {

/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// dimension with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/util/proto/descriptors.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace {
// Build a `DescriptorPool` from the named file or URI. The file or URI
// must be available to the current TensorFlow environment.
//
// The file must contiain a serialized `FileDescriptorSet`. See
// The file must contain a serialized `FileDescriptorSet`. See
// `GetDescriptorPool()` for more information.
Status GetDescriptorPoolFromFile(
tensorflow::Env* env, const string& filename,
Expand Down