From b51db3c15c6c3388327ef45c1c9c49b9fd7d9098 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 22 Feb 2024 03:33:22 -0800 Subject: [PATCH] Automated Code Change PiperOrigin-RevId: 609312750 --- .../kernels/sparse_conditional_accumulator.h | 2 +- .../sparse_conditional_accumulator_op.cc | 4 ++-- tensorflow/core/kernels/sparse_cross_op.cc | 4 ++-- tensorflow/core/kernels/sparse_reduce_op.cc | 2 +- .../kernels/sparse_tensor_dense_add_op.cc | 2 +- .../kernels/sparse_tensor_dense_matmul_op.cc | 2 +- .../core/kernels/sparse_tensors_map_ops.cc | 10 ++++----- tensorflow/core/kernels/sparse_to_dense_op.cc | 2 +- tensorflow/core/kernels/sparse_utils.cc | 10 ++++----- tensorflow/core/kernels/sparse_xent_op.cc | 4 ++-- .../kernels/spectrogram_convert_test_data.cc | 2 +- tensorflow/core/kernels/squared-loss.h | 2 +- tensorflow/core/kernels/stack.cc | 8 +++---- tensorflow/core/kernels/stage_op.cc | 8 +++---- .../core/kernels/stateful_random_ops.cc | 8 +++---- .../core/kernels/stateless_random_ops.cc | 2 +- .../core/kernels/stateless_random_ops_v2.h | 2 +- .../kernels/stateless_random_ops_v2_util.h | 2 +- tensorflow/core/kernels/string_util.cc | 4 ++-- tensorflow/core/kernels/summary_image_op.cc | 2 +- tensorflow/core/kernels/summary_kernels.cc | 2 +- tensorflow/core/kernels/tensor_array.cc | 2 +- tensorflow/core/kernels/tensor_array.h | 22 +++++++++---------- tensorflow/core/kernels/tensor_array_ops.cc | 8 +++---- tensorflow/core/kernels/tensor_flag_utils.cc | 8 +++---- tensorflow/core/kernels/tensor_list_util.cc | 4 ++-- tensorflow/core/kernels/tensor_map.cc | 2 +- .../core/kernels/text_line_reader_op.cc | 8 +++---- .../core/kernels/tf_record_reader_op.cc | 8 +++---- tensorflow/core/kernels/training_op_helpers.h | 14 ++++++------ tensorflow/core/kernels/transpose_functor.h | 4 ++-- tensorflow/core/kernels/transpose_op.cc | 2 +- tensorflow/core/kernels/typed_queue.h | 2 +- .../core/kernels/unary_ops_composition.cc | 2 +- tensorflow/core/kernels/unicode_ops.cc | 2 +- 35 files changed, 86 insertions(+), 86 deletions(-) diff --git a/tensorflow/core/kernels/sparse_conditional_accumulator.h b/tensorflow/core/kernels/sparse_conditional_accumulator.h index 3b809f03e95dc2..e41caf8e0e4a45 100644 --- a/tensorflow/core/kernels/sparse_conditional_accumulator.h +++ b/tensorflow/core/kernels/sparse_conditional_accumulator.h @@ -140,7 +140,7 @@ class SparseConditionalAccumulator } } - return OkStatus(); + return absl::OkStatus(); } void AllocateAndAssignToAccumGradFunction( diff --git a/tensorflow/core/kernels/sparse_conditional_accumulator_op.cc b/tensorflow/core/kernels/sparse_conditional_accumulator_op.cc index 46137639b1d1d7..7a915cded37527 100644 --- a/tensorflow/core/kernels/sparse_conditional_accumulator_op.cc +++ b/tensorflow/core/kernels/sparse_conditional_accumulator_op.cc @@ -37,7 +37,7 @@ class SparseConditionalAccumulatorOp : public ConditionalAccumulatorBaseOp { new SparseConditionalAccumulator( dtype_, shape_, cinfo_.name(), reduction_type_); *ret = accumulator; - return OkStatus(); + return absl::OkStatus(); }; } @@ -45,7 +45,7 @@ class SparseConditionalAccumulatorOp : public ConditionalAccumulatorBaseOp { // it with cond2 otherwise. Status CheckSignature(OpKernelContext* ctx) override { TF_RETURN_IF_ERROR(ctx->MatchSignature({}, {DT_STRING_REF})); - return OkStatus(); + return absl::OkStatus(); } void SetHandleToOutput(OpKernelContext* ctx) diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc index 9235ebe9efa74b..bc8d3e3b329e9d 100644 --- a/tensorflow/core/kernels/sparse_cross_op.cc +++ b/tensorflow/core/kernels/sparse_cross_op.cc @@ -591,7 +591,7 @@ Status ValidateInput(const OpInputList& indices_list_in, } } - return OkStatus(); + return absl::OkStatus(); } // Extracts data about the features and populates feature data. @@ -733,7 +733,7 @@ Status CreateOutputTensors( shape_vec(0) = batch_size; shape_vec(1) = max_cross_count; - return OkStatus(); + return absl::OkStatus(); } template diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc index 9e040fe7224420..d64d7829a65fc1 100644 --- a/tensorflow/core/kernels/sparse_reduce_op.cc +++ b/tensorflow/core/kernels/sparse_reduce_op.cc @@ -129,7 +129,7 @@ Status ValidateInputs(const Tensor *shape_t, const Tensor *reduction_axes_t) { } } - return OkStatus(); + return absl::OkStatus(); } struct SumOp { diff --git a/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc b/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc index 80bb5a42e3b0aa..1d8b3b0156c756 100644 --- a/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc +++ b/tensorflow/core/kernels/sparse_tensor_dense_add_op.cc @@ -91,7 +91,7 @@ Status ValidateInputs(const Tensor *a_indices, const Tensor *a_values, } } - return OkStatus(); + return absl::OkStatus(); } } // namespace diff --git a/tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc b/tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc index 73870c5b37c2b1..04aff711362552 100644 --- a/tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc +++ b/tensorflow/core/kernels/sparse_tensor_dense_matmul_op.cc @@ -319,7 +319,7 @@ Status SparseTensorDenseMatMulImpl( } #undef LOOP_NNZ } - return OkStatus(); + return absl::OkStatus(); } } // namespace diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc index 841ed2cc36eaac..f14af265464f48 100644 --- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc +++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc @@ -68,7 +68,7 @@ class SparseTensorsMap : public ResourceBase { gtl::InlinedVector(sp.shape().begin(), sp.shape().end())}; *handle = unique_st_handle; } - return OkStatus(); + return absl::OkStatus(); } Status RetrieveAndClearSparseTensors( @@ -95,7 +95,7 @@ class SparseTensorsMap : public ResourceBase { } } - return OkStatus(); + return absl::OkStatus(); } protected: @@ -128,7 +128,7 @@ class SparseTensorAccessingOp : public OpKernel { if (sparse_tensors_map_) { *sparse_tensors_map = sparse_tensors_map_; - return OkStatus(); + return absl::OkStatus(); } TF_RETURN_IF_ERROR(cinfo_.Init(ctx->resource_manager(), def(), @@ -137,7 +137,7 @@ class SparseTensorAccessingOp : public OpKernel { CreatorCallback sparse_tensors_map_creator = [this](SparseTensorsMap** c) { SparseTensorsMap* map = new SparseTensorsMap(cinfo_.name()); *c = map; - return OkStatus(); + return absl::OkStatus(); }; TF_RETURN_IF_ERROR( @@ -146,7 +146,7 @@ class SparseTensorAccessingOp : public OpKernel { sparse_tensors_map_creator)); *sparse_tensors_map = sparse_tensors_map_; - return OkStatus(); + return absl::OkStatus(); } private: diff --git a/tensorflow/core/kernels/sparse_to_dense_op.cc b/tensorflow/core/kernels/sparse_to_dense_op.cc index f6fe495b637078..048461daede0db 100644 --- a/tensorflow/core/kernels/sparse_to_dense_op.cc +++ b/tensorflow/core/kernels/sparse_to_dense_op.cc @@ -85,7 +85,7 @@ Status CheckSparseToDenseShapes(const Tensor& indices, if (!TensorShapeUtils::IsScalar(default_value.shape())) { return errors::InvalidArgument("default_value should be a scalar."); } - return OkStatus(); + return absl::OkStatus(); } } // end namespace diff --git a/tensorflow/core/kernels/sparse_utils.cc b/tensorflow/core/kernels/sparse_utils.cc index cf39f8102cb0dd..d9a2850e596519 100644 --- a/tensorflow/core/kernels/sparse_utils.cc +++ b/tensorflow/core/kernels/sparse_utils.cc @@ -176,7 +176,7 @@ Status ValidateSparseTensorShape(const Tensor& indices, const Tensor& values, shape.NumElements(), ") do not match"); } - return OkStatus(); + return absl::OkStatus(); } // Creates a debug string for the index tuple in indices(row, :). @@ -215,7 +215,7 @@ Status ValidateSparseTensorIndicesUnordered(const Tensor& indices, } } - return OkStatus(); + return absl::OkStatus(); } // Ensures all sparse indices are within correct bounds and are @@ -229,7 +229,7 @@ Status ValidateSparseTensorIndicesOrdered(const Tensor& indices, int64_t ndims = indices.dim_size(1); if (nnz == 0) { - return OkStatus(); + return absl::OkStatus(); } // First set of indices must be within range. @@ -282,7 +282,7 @@ Status ValidateSparseTensorIndicesOrdered(const Tensor& indices, } } // for i in [1, nnz) - return OkStatus(); + return absl::OkStatus(); } } // namespace @@ -300,7 +300,7 @@ Status ValidateSparseTensor(const Tensor& indices, const Tensor& values, case IndexValidation::kNone: { } } - return OkStatus(); + return absl::OkStatus(); } #define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \ diff --git a/tensorflow/core/kernels/sparse_xent_op.cc b/tensorflow/core/kernels/sparse_xent_op.cc index 01f8ab42a2c506..4ece900f6c5b95 100644 --- a/tensorflow/core/kernels/sparse_xent_op.cc +++ b/tensorflow/core/kernels/sparse_xent_op.cc @@ -34,7 +34,7 @@ typedef Eigen::GpuDevice GPUDevice; template Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) { - if (labels.NumElements() == 0) return OkStatus(); + if (labels.NumElements() == 0) return absl::OkStatus(); const auto label_values = labels.vec(); int64_t bad_index; auto min_max_dim_value = std::minmax_element( @@ -47,7 +47,7 @@ Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) { " which is outside the valid range of [0, ", max_index, "). Label values: ", labels.SummarizeValue(labels.NumElements())); } - return OkStatus(); + return absl::OkStatus(); } template diff --git a/tensorflow/core/kernels/spectrogram_convert_test_data.cc b/tensorflow/core/kernels/spectrogram_convert_test_data.cc index 18ce56fb52bb39..1878eb5999b505 100644 --- a/tensorflow/core/kernels/spectrogram_convert_test_data.cc +++ b/tensorflow/core/kernels/spectrogram_convert_test_data.cc @@ -34,7 +34,7 @@ Status ConvertCsvToRaw(const string& input_filename) { input_filename); } LOG(INFO) << "Wrote raw file to " << output_filename; - return OkStatus(); + return absl::OkStatus(); } } // namespace wav diff --git a/tensorflow/core/kernels/squared-loss.h b/tensorflow/core/kernels/squared-loss.h index 7222813bbbf823..3a0f6d2abb2253 100644 --- a/tensorflow/core/kernels/squared-loss.h +++ b/tensorflow/core/kernels/squared-loss.h @@ -64,7 +64,7 @@ class SquaredLossUpdater : public DualLossUpdater { // Labels don't require conversion for linear regression. Status ConvertLabel(float* const example_label) const final { - return OkStatus(); + return absl::OkStatus(); } }; diff --git a/tensorflow/core/kernels/stack.cc b/tensorflow/core/kernels/stack.cc index 1ee4f4268d43ed..90eaf2efebe1bd 100644 --- a/tensorflow/core/kernels/stack.cc +++ b/tensorflow/core/kernels/stack.cc @@ -63,7 +63,7 @@ class Stack : public ResourceBase { "its max_size (", max_size_, ")"); } stack_.push_back(value); - return OkStatus(); + return absl::OkStatus(); } Status Pop(TensorAndAllocation* value) { @@ -75,7 +75,7 @@ class Stack : public ResourceBase { } *value = stack_.back(); stack_.pop_back(); - return OkStatus(); + return absl::OkStatus(); } // We don't swap the first tensor on the stack and any subsequent tensors @@ -121,7 +121,7 @@ class Stack : public ResourceBase { return errors::InvalidArgument("Stack[", stack_name_, "] has already been closed."); } - return OkStatus(); + return absl::OkStatus(); } }; @@ -147,7 +147,7 @@ Status GetStack(OpKernelContext* ctx, Stack** stack) { return errors::Internal("No step container."); } TF_RETURN_IF_ERROR(step_container->Lookup(rm, key, stack)); - return OkStatus(); + return absl::OkStatus(); } } diff --git a/tensorflow/core/kernels/stage_op.cc b/tensorflow/core/kernels/stage_op.cc index 5d20ea3536004b..63d84513b3f5e1 100644 --- a/tensorflow/core/kernels/stage_op.cc +++ b/tensorflow/core/kernels/stage_op.cc @@ -82,7 +82,7 @@ class Buffer : public ResourceBase { // we should wake them all. non_empty_cond_var_.notify_all(); - return OkStatus(); + return absl::OkStatus(); } // Get tuple at front of the buffer @@ -115,7 +115,7 @@ class Buffer : public ResourceBase { tuple->push_back(tensor); } - return OkStatus(); + return absl::OkStatus(); } // Buffer size @@ -187,13 +187,13 @@ Status GetBuffer(OpKernelContext* ctx, const NodeDef& ndef, Buffer** buf) { TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "capacity", &capacity)); TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "memory_limit", &memory_limit)); *ret = new Buffer(capacity, memory_limit); - return OkStatus(); + return absl::OkStatus(); }; TF_RETURN_IF_ERROR(cinfo.Init(rm, ndef, true /* use name() */)); TF_RETURN_IF_ERROR(rm->LookupOrCreate(cinfo.container(), cinfo.name(), buf, create_fn)); - return OkStatus(); + return absl::OkStatus(); } } // namespace diff --git a/tensorflow/core/kernels/stateful_random_ops.cc b/tensorflow/core/kernels/stateful_random_ops.cc index 80f2f9ae0805ba..ef54ced28e7e6e 100644 --- a/tensorflow/core/kernels/stateful_random_ops.cc +++ b/tensorflow/core/kernels/stateful_random_ops.cc @@ -65,7 +65,7 @@ Status CheckState(const Tensor& state) { return errors::InvalidArgument( "RNG state must have one and only one dimension, not ", state.dims()); } - return OkStatus(); + return absl::OkStatus(); } Status CheckPhiloxState(const Tensor& state, int64_t alg_tag_skip = 0) { @@ -80,7 +80,7 @@ Status CheckPhiloxState(const Tensor& state, int64_t alg_tag_skip = 0) { " must be at least ", min_size, "; got ", state.NumElements()); } - return OkStatus(); + return absl::OkStatus(); } template @@ -149,7 +149,7 @@ Status UpdateVariableAndFill( arg.state_tensor = var_tensor; functor::UpdateVariableAndFill_Philox()( ctx, ctx->eigen_device(), dist, &arg, output_data); - return OkStatus(); + return absl::OkStatus(); case ConcreteRngAlgorithm::RNG_ALG_THREEFRY: return errors::Unimplemented( "Non-XLA devices don't support the ThreeFry algorithm."); @@ -202,7 +202,7 @@ Status GetScalar(const Tensor& tensor, int input_idx, T* result) { ", not ", DataTypeString(tensor.dtype())); } *result = tensor.flat()(0); - return OkStatus(); + return absl::OkStatus(); } template diff --git a/tensorflow/core/kernels/stateless_random_ops.cc b/tensorflow/core/kernels/stateless_random_ops.cc index beb2391b03f7de..1ce076e4c1a0d1 100644 --- a/tensorflow/core/kernels/stateless_random_ops.cc +++ b/tensorflow/core/kernels/stateless_random_ops.cc @@ -62,7 +62,7 @@ Status GenerateKey(Tensor seed, random::PhiloxRandom::Key* out_key, (*out_counter)[0] = (*out_counter)[1] = 0; (*out_counter)[2] = mix[2]; (*out_counter)[3] = mix[3]; - return OkStatus(); + return absl::OkStatus(); } StatelessRandomOpBase::StatelessRandomOpBase(OpKernelConstruction* context) diff --git a/tensorflow/core/kernels/stateless_random_ops_v2.h b/tensorflow/core/kernels/stateless_random_ops_v2.h index f88e5330041f6b..b566f490fdd6fb 100644 --- a/tensorflow/core/kernels/stateless_random_ops_v2.h +++ b/tensorflow/core/kernels/stateless_random_ops_v2.h @@ -38,7 +38,7 @@ inline Status CheckKeyCounterShape(int minimum_counter_size, "; got shape: ", counter_shape.DebugString(), ". (Note that batched counters are not supported yet.)"); } - return OkStatus(); + return absl::OkStatus(); } // A base class for kernels of stateless RNG ops that take shape, key, counter diff --git a/tensorflow/core/kernels/stateless_random_ops_v2_util.h b/tensorflow/core/kernels/stateless_random_ops_v2_util.h index 8744d848e869ba..c606a90fec23e2 100644 --- a/tensorflow/core/kernels/stateless_random_ops_v2_util.h +++ b/tensorflow/core/kernels/stateless_random_ops_v2_util.h @@ -41,7 +41,7 @@ Status GetScalar(const Tensor& tensor, int input_idx, T* result) { ", not ", DataTypeString(tensor.dtype())); } *result = tensor.flat()(0); - return OkStatus(); + return absl::OkStatus(); } inline StatusOr > diff --git a/tensorflow/core/kernels/string_util.cc b/tensorflow/core/kernels/string_util.cc index a0486aa6c925c5..105a89f589a0fe 100644 --- a/tensorflow/core/kernels/string_util.cc +++ b/tensorflow/core/kernels/string_util.cc @@ -31,7 +31,7 @@ Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding) { strings::StrCat("Invalid encoding \"", str, "\": Should be one of: UTF-8, UTF-16-BE, UTF-32-BE")); } - return OkStatus(); + return absl::OkStatus(); } // Sets unit value based on str. @@ -44,7 +44,7 @@ Status ParseCharUnit(const string& str, CharUnit* unit) { return errors::InvalidArgument(strings::StrCat( "Invalid unit \"", str, "\": Should be one of: BYTE, UTF8_CHAR")); } - return OkStatus(); + return absl::OkStatus(); } // Return the number of Unicode characters in a UTF-8 string. diff --git a/tensorflow/core/kernels/summary_image_op.cc b/tensorflow/core/kernels/summary_image_op.cc index c2ab2ab585b6fc..a68bf724cf9efc 100644 --- a/tensorflow/core/kernels/summary_image_op.cc +++ b/tensorflow/core/kernels/summary_image_op.cc @@ -173,7 +173,7 @@ class SummaryImageOp : public OpKernel { return errors::Internal("PNG encoding failed"); } } - return OkStatus(); + return absl::OkStatus(); } template diff --git a/tensorflow/core/kernels/summary_kernels.cc b/tensorflow/core/kernels/summary_kernels.cc index e2348ca7a75953..81d30e8dbee42b 100644 --- a/tensorflow/core/kernels/summary_kernels.cc +++ b/tensorflow/core/kernels/summary_kernels.cc @@ -97,7 +97,7 @@ class CreateSummaryDbWriterOp : public OpKernel { TF_RETURN_IF_ERROR(SetupTensorboardSqliteDb(db)); TF_RETURN_IF_ERROR(CreateSummaryDbWriter( db, experiment_name, run_name, user_name, ctx->env(), s)); - return OkStatus(); + return absl::OkStatus(); })); } }; diff --git a/tensorflow/core/kernels/tensor_array.cc b/tensorflow/core/kernels/tensor_array.cc index 644e6c373aaf05..fa24b716a9c822 100644 --- a/tensorflow/core/kernels/tensor_array.cc +++ b/tensorflow/core/kernels/tensor_array.cc @@ -111,7 +111,7 @@ Status TensorArray::CopyShapesFrom(TensorArray* rhs, tensors_[i].written = true; } - return OkStatus(); + return absl::OkStatus(); } } // namespace tensorflow diff --git a/tensorflow/core/kernels/tensor_array.h b/tensorflow/core/kernels/tensor_array.h index 97e4cd45b085fe..1081c2be8a08a8 100644 --- a/tensorflow/core/kernels/tensor_array.h +++ b/tensorflow/core/kernels/tensor_array.h @@ -202,7 +202,7 @@ class TensorArray : public ResourceBase { ++i; TF_RETURN_IF_ERROR(s); } - return OkStatus(); + return absl::OkStatus(); } // Read from index 'index' into Tensor 'value'. @@ -238,7 +238,7 @@ class TensorArray : public ResourceBase { ++i; if (!s.ok()) return s; } - return OkStatus(); + return absl::OkStatus(); } DataType ElemType() const { return dtype_; } @@ -256,7 +256,7 @@ class TensorArray : public ResourceBase { return s; } element_shape_ = new_element_shape_; - return OkStatus(); + return absl::OkStatus(); } string DebugString() const override { @@ -275,7 +275,7 @@ class TensorArray : public ResourceBase { mutex_lock l(mu_); TF_RETURN_IF_ERROR(LockedReturnIfClosed()); *size = tensors_.size(); - return OkStatus(); + return absl::OkStatus(); } // Record the size of the TensorArray after an unpack or split. @@ -285,7 +285,7 @@ class TensorArray : public ResourceBase { if (!is_grad_) { marked_size_ = size; } - return OkStatus(); + return absl::OkStatus(); } // Return the marked size of the TensorArray. @@ -293,7 +293,7 @@ class TensorArray : public ResourceBase { mutex_lock l(mu_); TF_RETURN_IF_ERROR(LockedReturnIfClosed()); *size = marked_size_; - return OkStatus(); + return absl::OkStatus(); } // Return the size that should be used by pack or concat op. @@ -301,7 +301,7 @@ class TensorArray : public ResourceBase { mutex_lock l(mu_); TF_RETURN_IF_ERROR(LockedReturnIfClosed()); *size = is_grad_ ? marked_size_ : tensors_.size(); - return OkStatus(); + return absl::OkStatus(); } // Once a TensorArray is being used for gradient calculations, it @@ -367,7 +367,7 @@ class TensorArray : public ResourceBase { return errors::InvalidArgument("TensorArray ", handle_.vec()(1), " has already been closed."); } - return OkStatus(); + return absl::OkStatus(); } const string key_; @@ -508,7 +508,7 @@ Status TensorArray::LockedWriteOrAggregate(OpKernelContext* ctx, // was just a shape, which just means zeros. So all we must do in this // case is copy the reference over and return early. t.tensor = *value; - return OkStatus(); + return absl::OkStatus(); } Tensor* existing_t = &t.tensor; @@ -536,7 +536,7 @@ Status TensorArray::LockedWriteOrAggregate(OpKernelContext* ctx, t.shape = value->shape(); t.written = true; } - return OkStatus(); + return absl::OkStatus(); } template @@ -619,7 +619,7 @@ Status TensorArray::LockedRead(OpKernelContext* ctx, const int32_t index, t.cleared = true; } t.read = true; - return OkStatus(); + return absl::OkStatus(); } } // namespace tensorflow diff --git a/tensorflow/core/kernels/tensor_array_ops.cc b/tensorflow/core/kernels/tensor_array_ops.cc index dc4f1ca9400bf2..5d1322e6f4b7d6 100644 --- a/tensorflow/core/kernels/tensor_array_ops.cc +++ b/tensorflow/core/kernels/tensor_array_ops.cc @@ -75,7 +75,7 @@ Status GetHandle(OpKernelContext* ctx, string* container, string* ta_handle) { *container = h(0); *ta_handle = h(1); } - return OkStatus(); + return absl::OkStatus(); } Status GetTensorArray(OpKernelContext* ctx, TensorArray** tensor_array) { @@ -88,7 +88,7 @@ Status GetTensorArray(OpKernelContext* ctx, TensorArray** tensor_array) { ScopedStepContainer* sc = ctx->step_container(); if (sc == nullptr) return errors::Internal("No step container."); TF_RETURN_IF_ERROR(sc->Lookup(rm, container + ta_handle, tensor_array)); - return OkStatus(); + return absl::OkStatus(); } else { return LookupResource(ctx, HandleFromInput(ctx, 0), tensor_array); } @@ -100,7 +100,7 @@ Status SetupFlowControlInputs(OpKernelContext* ctx, bool set_output) { if (set_output) { TF_RETURN_IF_ERROR(ctx->set_output("flow_out", *flow_control)); } - return OkStatus(); + return absl::OkStatus(); } // CREATION ******************************************************************* @@ -220,7 +220,7 @@ class TensorArrayOp : public TensorArrayCreationOp { *output_tensor_array = tensor_array; - return OkStatus(); + return absl::OkStatus(); } private: diff --git a/tensorflow/core/kernels/tensor_flag_utils.cc b/tensorflow/core/kernels/tensor_flag_utils.cc index 2f0165d08a3911..974c4622a69a89 100644 --- a/tensorflow/core/kernels/tensor_flag_utils.cc +++ b/tensorflow/core/kernels/tensor_flag_utils.cc @@ -25,7 +25,7 @@ Status ValidateSparseMatrixShardingConfig(const Tensor& config) { if (TensorShapeUtils::IsScalar(config.shape())) { const float scalar_config = config.template scalar()(); if (0 < scalar_config && scalar_config <= 1.0) { - return OkStatus(); + return absl::OkStatus(); } return Status( absl::StatusCode::kInvalidArgument, @@ -69,7 +69,7 @@ Status ValidateSparseMatrixShardingConfig(const Tensor& config) { config_matrix(i, 2), " in row ", i); } } - return OkStatus(); + return absl::OkStatus(); } template @@ -89,7 +89,7 @@ Status ValidateScalarQuantityShardingConfig(const Tensor& config) { if (TensorShapeUtils::IsScalar(config.shape())) { const float scalar_config = config.template scalar()(); if (0 < scalar_config && scalar_config <= 1.0) { - return OkStatus(); + return absl::OkStatus(); } return Status( absl::StatusCode::kInvalidArgument, @@ -126,7 +126,7 @@ Status ValidateScalarQuantityShardingConfig(const Tensor& config) { config_matrix(i, 1), " in row ", i); } } - return OkStatus(); + return absl::OkStatus(); } template diff --git a/tensorflow/core/kernels/tensor_list_util.cc b/tensorflow/core/kernels/tensor_list_util.cc index 34aa1d35d9af6e..7dc0d01b56b61d 100644 --- a/tensorflow/core/kernels/tensor_list_util.cc +++ b/tensorflow/core/kernels/tensor_list_util.cc @@ -61,7 +61,7 @@ Status TensorListBinaryAdd( TF_RETURN_IF_ERROR(binary_add_func(c, a_tensor, b_tensor, &out_tensor)); out->tensors().push_back(out_tensor); } - return OkStatus(); + return absl::OkStatus(); } Status TensorListZerosLike( @@ -77,7 +77,7 @@ Status TensorListZerosLike( TF_RETURN_IF_ERROR(zeros_like_func(c, t, &out_tensor)); y->tensors().emplace_back(out_tensor); } - return OkStatus(); + return absl::OkStatus(); } } // namespace tensorflow diff --git a/tensorflow/core/kernels/tensor_map.cc b/tensorflow/core/kernels/tensor_map.cc index 94a22fbfcabe93..a95d256cff92f4 100644 --- a/tensorflow/core/kernels/tensor_map.cc +++ b/tensorflow/core/kernels/tensor_map.cc @@ -53,7 +53,7 @@ static Status TensorMapDeviceCopy( TF_RETURN_IF_ERROR(copy(p.second, &to_val)); to->tensors().emplace(to_key, to_val); } - return OkStatus(); + return absl::OkStatus(); } #define REGISTER_LIST_COPY(DIRECTION) \ diff --git a/tensorflow/core/kernels/text_line_reader_op.cc b/tensorflow/core/kernels/text_line_reader_op.cc index ae05e581ed09b9..89b56cb1853bd7 100644 --- a/tensorflow/core/kernels/text_line_reader_op.cc +++ b/tensorflow/core/kernels/text_line_reader_op.cc @@ -46,16 +46,16 @@ class TextLineReader : public ReaderBase { if (absl::IsOutOfRange(status)) { // We ignore an end of file error when skipping header lines. // We will end up skipping this file. - return OkStatus(); + return absl::OkStatus(); } TF_RETURN_IF_ERROR(status); } - return OkStatus(); + return absl::OkStatus(); } Status OnWorkFinishedLocked() override { input_buffer_.reset(nullptr); - return OkStatus(); + return absl::OkStatus(); } Status ReadLocked(tstring* key, tstring* value, bool* produced, @@ -69,7 +69,7 @@ class TextLineReader : public ReaderBase { } if (absl::IsOutOfRange(status)) { // End of file, advance to the next. *at_end = true; - return OkStatus(); + return absl::OkStatus(); } else { // Some other reading error return status; } diff --git a/tensorflow/core/kernels/tf_record_reader_op.cc b/tensorflow/core/kernels/tf_record_reader_op.cc index 416bc22b9413d2..9126139afc6b65 100644 --- a/tensorflow/core/kernels/tf_record_reader_op.cc +++ b/tensorflow/core/kernels/tf_record_reader_op.cc @@ -43,13 +43,13 @@ class TFRecordReader : public ReaderBase { io::RecordReaderOptions options = io::RecordReaderOptions::CreateRecordReaderOptions(compression_type_); reader_.reset(new io::RecordReader(file_.get(), options)); - return OkStatus(); + return absl::OkStatus(); } Status OnWorkFinishedLocked() override { reader_.reset(nullptr); file_.reset(nullptr); - return OkStatus(); + return absl::OkStatus(); } Status ReadLocked(tstring* key, tstring* value, bool* produced, @@ -58,11 +58,11 @@ class TFRecordReader : public ReaderBase { Status status = reader_->ReadRecord(&offset_, value); if (absl::IsOutOfRange(status)) { *at_end = true; - return OkStatus(); + return absl::OkStatus(); } if (!status.ok()) return status; *produced = true; - return OkStatus(); + return absl::OkStatus(); } Status ResetLocked() override { diff --git a/tensorflow/core/kernels/training_op_helpers.h b/tensorflow/core/kernels/training_op_helpers.h index fa26c829566a49..69447348f067a5 100644 --- a/tensorflow/core/kernels/training_op_helpers.h +++ b/tensorflow/core/kernels/training_op_helpers.h @@ -45,7 +45,7 @@ template Status EnsureSparseVariableAccess(OpKernelContext* ctx, Var* var, bool lock_held = false) { if (var->copy_on_read_mode.load()) { - return OkStatus(); + return absl::OkStatus(); } std::optional ml; @@ -58,7 +58,7 @@ Status EnsureSparseVariableAccess(OpKernelContext* ctx, Var* var, // copy-on-read mode is false. if (var->tensor()->RefCountIsOne()) { var->copy_on_read_mode.store(true); - return OkStatus(); + return absl::OkStatus(); } Tensor tmp; if (std::is_same::value) { @@ -84,7 +84,7 @@ Status EnsureSparseVariableAccess(OpKernelContext* ctx, Var* var, } *var->tensor() = tmp; var->copy_on_read_mode.store(true); - return OkStatus(); + return absl::OkStatus(); } // Utility structure that releases a sequence of borrowed mutexes when it is @@ -249,7 +249,7 @@ Status PrepareToUpdateVariable(OpKernelContext* ctx, Tensor* tensor, } *tensor = tmp; } - return OkStatus(); + return absl::OkStatus(); } // This gives you `*out`, a tensor you can update, corresponding to a variable @@ -269,15 +269,15 @@ Status GetInputTensorFromVariable(OpKernelContext* ctx, int input, if (sparse) { TF_RETURN_IF_ERROR(EnsureSparseVariableAccess(ctx, var.get())); *out = *var->tensor(); - return OkStatus(); + return absl::OkStatus(); } TF_RETURN_IF_ERROR(PrepareToUpdateVariable( ctx, var->tensor(), var->copy_on_read_mode.load())); *out = *var->tensor(); - return OkStatus(); + return absl::OkStatus(); } *out = ctx->mutable_input(input, lock_held); - return OkStatus(); + return absl::OkStatus(); } } // end namespace tensorflow diff --git a/tensorflow/core/kernels/transpose_functor.h b/tensorflow/core/kernels/transpose_functor.h index 2969918c33df1f..d640d051a40f4d 100644 --- a/tensorflow/core/kernels/transpose_functor.h +++ b/tensorflow/core/kernels/transpose_functor.h @@ -235,14 +235,14 @@ Status DoTransposeImpl(const Device& d, const Tensor& in, default: return errors::Unimplemented("Unsupported dtype on CPU: ", in.dtype()); } - return OkStatus(); + return absl::OkStatus(); } template inline Status DoMatrixTransposeImpl(const Device& device, const Tensor& in, bool conjugate, Tensor* out) { const int ndims = in.dims(); - if (ndims == 0) return OkStatus(); + if (ndims == 0) return absl::OkStatus(); TransposePermsVec perm(ndims); std::iota(perm.begin(), perm.end(), 0); std::swap(perm[ndims - 2], perm[ndims - 1]); diff --git a/tensorflow/core/kernels/transpose_op.cc b/tensorflow/core/kernels/transpose_op.cc index df129f78c889f3..e3719aab6c648e 100644 --- a/tensorflow/core/kernels/transpose_op.cc +++ b/tensorflow/core/kernels/transpose_op.cc @@ -107,7 +107,7 @@ Status PermutationHelper(const Tensor& perm, const int dims, reinterpret_cast(Vperm.data()); *permutation = std::vector(perm_begin, perm_begin + dims); - return OkStatus(); + return absl::OkStatus(); } } // namespace diff --git a/tensorflow/core/kernels/typed_queue.h b/tensorflow/core/kernels/typed_queue.h index f11029cddadba2..2e67261841859d 100644 --- a/tensorflow/core/kernels/typed_queue.h +++ b/tensorflow/core/kernels/typed_queue.h @@ -68,7 +68,7 @@ Status TypedQueue::Initialize() { for (int i = 0; i < num_components(); ++i) { queues_.push_back(SubQueue()); } - return OkStatus(); + return absl::OkStatus(); } template diff --git a/tensorflow/core/kernels/unary_ops_composition.cc b/tensorflow/core/kernels/unary_ops_composition.cc index 4805e6c2aef9dc..98684f382ecd21 100644 --- a/tensorflow/core/kernels/unary_ops_composition.cc +++ b/tensorflow/core/kernels/unary_ops_composition.cc @@ -69,7 +69,7 @@ struct UnaryOpsCompositionBase { *cost += reg.cost; } - return OkStatus(); + return absl::OkStatus(); } std::unordered_map compute_fns; diff --git a/tensorflow/core/kernels/unicode_ops.cc b/tensorflow/core/kernels/unicode_ops.cc index b884b5fd354b23..3d59cc034480b3 100644 --- a/tensorflow/core/kernels/unicode_ops.cc +++ b/tensorflow/core/kernels/unicode_ops.cc @@ -237,7 +237,7 @@ Status GetErrorOptions(OpKernelConstruction* ctx, ErrorOptions* out) { &(out->replace_control_chars))); } - return OkStatus(); + return absl::OkStatus(); } inline bool ShouldHandleFormatError(const ErrorOptions& error_options,