Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sign compare warning fixes batch 1 fix2 #40741

Merged
7 changes: 4 additions & 3 deletions tensorflow/core/kernels/batch_kernels.cc
Original file line number Diff line number Diff line change
Expand Up @@ -519,18 +519,19 @@ class BatchResource : public ResourceBase {
std::map<string, std::vector<Tensor>> split_tensors;

DCHECK_EQ(batch->task(0).context->num_outputs(), combined_outputs.size());
if (combined_outputs.size() != batch->task(0).context->num_outputs()) {
int combined_outputs_size = combined_outputs.size();
if (combined_outputs_size != batch->task(0).context->num_outputs()) {
return errors::Internal("Wrong number of batched output tensors");
}

// Generate 'split_tensors' and populate the context outputs.
for (int i = 0; i < combined_outputs.size(); ++i) {
for (int i = 0, iter_limit = combined_outputs.size(); i < iter_limit; ++i) {
const Tensor& output_tensor = combined_outputs[i];
if (output_tensor.shape().dims() == 0) {
return errors::FailedPrecondition(
"Batched output tensor has 0 dimensions");
}
if (output_tensor.shape().dim_size(0) != batch->size() + padding_size) {
if (output_tensor.shape().dim_size(0) != static_cast<long long int>(batch->size() + padding_size)) {
return errors::FailedPrecondition(
"Batched output tensor's 0th dimension does not equal the sum of "
"the 0th dimension sizes of the input tensors");
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/data/prefetch_autotuner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) {
case Mode::kDisabled:
return;
case Mode::kUpswing:
if (current_buffer_size == buffer_limit_) {
if (static_cast<tensorflow::int64>(current_buffer_size) == buffer_limit_) {
mode_ = Mode::kDownswing;
}
return;
case Mode::kDownswing:
if (current_buffer_size == 0) {
if (buffer_limit_ >= kBufferLimitThreshold) {
if (buffer_limit_ >= static_cast<tensorflow::int64>(kBufferLimitThreshold)) {
buffer_limit_ += kBufferLimitThreshold;
} else {
buffer_limit_ *= 2;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/quantization_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ inline void RequantizeManyInNewRangeReference(const qint32* input, int64 count,
// that could be easily adapted for a SIMD implementation. It should also be
// possible to perform all the calculations in 32-bit rather than 64, but
// that's not been implemented yet.
for (size_t index = 0; index < count; ++index) {
for (tensorflow::int64 index = 0; index < count; ++index) {
const int64 input_value = static_cast<int64>(input[index]);
const int64 fp_value =
((input_value * range_scale_fp) >> 32) + input_offset_fp;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/platform/s3/s3_file_system.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1050,7 +1050,7 @@ Status S3FileSystem::MultiPartCopy(const Aws::String& source,
// wait on the mutex until notify is called
// then check the finished parts as there could be false notifications
multi_part_copy_cv.wait(lock, [&finishedPartStates, num_parts] {
return finishedPartStates.size() == num_parts;
return static_cast<const int>(finishedPartStates.size()) == num_parts;
});
}
// check if there was any error for any part
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/profiler/utils/derived_timeline.cc
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ void DerivedXLineBuilder::ExpandOrAddLevelEvent(const XEvent& event,
}

void DerivedXLineBuilder::ResetLastEvents(int level) {
for (int i = level; i < last_event_by_level_.size(); ++i) {
for (int i = level, iter_limit = last_event_by_level_.size(); i < iter_limit; ++i) {
last_event_by_level_[i] = absl::nullopt;
}
if (level == 0) ResetDependentLines();
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/profiler/utils/derived_timeline.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class DerivedXLineBuilder {
std::vector<DerivedXLineBuilder*> dependent_lines);

void ExpandOrAddEvents(const std::vector<XEvent>& event_per_level) {
for (int level = 0; level < event_per_level.size(); ++level) {
for (size_t level = 0; level < event_per_level.size(); ++level) {
ExpandOrAddLevelEvent(event_per_level[level], level);
}
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/profiler/utils/xplane_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ void SortXSpace(XSpace* space) {
// smaller than these value.
void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) {
for (XLine& line : *plane->mutable_lines()) {
if (line.timestamp_ns() >= start_time_ns) {
if (line.timestamp_ns() >= static_cast<long int>(start_time_ns)) {
line.set_timestamp_ns(line.timestamp_ns() - start_time_ns);
}
}
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/util/bcast.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ BCastList<N>::BCastList(const BCastList::Vec (&x)[N],
if (x[i] != x[0]) {
all_equal = false;
}
if (x[i].size() > largest_rank) {
if (static_cast<int>(x[i].size()) > largest_rank) {
largest_rank = x[i].size();
}
}
Expand Down Expand Up @@ -176,7 +176,7 @@ BCastList<N>::BCastList(const BCastList::Vec (&x)[N],

// 1-extend and align all vectors.
for (int i = 0; i < N; ++i) {
if (copy[i].size() < largest_rank) {
if (static_cast<int>(copy[i].size()) < largest_rank) {
copy[i].resize(largest_rank, 1);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ ::tensorflow::Status ConvertTrivialTileToConcat::Run(Model* model,
// It then just becomes a concat along that dimension.
int non_one_dims = 0;
int concat_axis = 0;
for (int i = 0; i < multiples.size(); ++i) {
for (size_t i = 0; i < multiples.size(); ++i) {
if (multiples[i] != 1) {
++non_one_dims;
concat_axis = i;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ bool TransposeAffectsMemoryOrder(std::vector<int> perm,
// just the shape) then the flat buffer representation shouldn't change.
std::vector<int> old_major_index_ordering;
std::vector<int> new_major_index_ordering;
for (int i = 0; i < in_shape.size(); i++) {
for (int i = 0, iter_limit = in_shape.size(); i < iter_limit; i++) {
if (in_shape[i] != 1) {
old_major_index_ordering.push_back(i);
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/toco/graph_transformations/dequantize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ void DequantizeBuffer(Array* array) {
auto& new_data = array->GetMutableBuffer<ArrayDataType::kFloat>().data;
new_data.resize(old_data.size());
const auto& qparams = array->GetQuantizationParams();
for (int i = 0; i < old_data.size(); i++) {
for (int i = 0, iter_limit = old_data.size(); i < iter_limit; i++) {
new_data[i] = qparams.scale * (old_data[i] - qparams.zero_point);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ ::tensorflow::Status DropFakeQuant::Run(Model* model, std::size_t op_index,
}

// Drop min/max inputs
for (int i = 1; i < fakequant_op->inputs.size(); i++) {
for (int i = 1, iter_limit = fakequant_op->inputs.size(); i < iter_limit; i++) {
if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) {
model->EraseArray(fakequant_op->inputs[i]);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ ::tensorflow::Status EnsureUint8WeightsSafeForFastInt8Kernels::Run(
int index_of_previous_bad_value = 0;
bool changed = false;

for (int i = 0; i < buffer_data.size(); i++) {
for (int i = 0, iter_limit = buffer_data.size(); i < iter_limit; i++) {
if (buffer_data[i] == 0) {
count_bad++;
if (count_bad > 1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ bool IsBroadcastingOp(const Model& model, Operator* op) {
// Concatenation of identical inputs is usually a broadcast.
if (op->type == OperatorType::kConcatenation) {
// Verify that all inputs are the same.
for (int i = 1; i < op->inputs.size(); ++i) {
for (size_t i = 1; i < op->inputs.size(); ++i) {
if (op->inputs[i] != op->inputs[0]) {
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid(
return false;

// Make sure the inputs datatype matches.
for (int i = 0; i < fw_sequence_op->inputs.size(); ++i) {
for (size_t i = 0; i < fw_sequence_op->inputs.size(); ++i) {
const auto& fw_input_array_name = fw_sequence_op->inputs[i];
const auto& bw_input_array_name = bw_sequence_op->inputs[i];
if (model.HasArray(fw_input_array_name) &&
Expand All @@ -137,7 +137,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid(
}

// Make sure the outputs datatype matches.
for (int i = 0; i < fw_sequence_op->outputs.size(); ++i) {
for (size_t i = 0; i < fw_sequence_op->outputs.size(); ++i) {
const auto& fw_output_array_name = fw_sequence_op->outputs[i];
const auto& bw_output_array_name = bw_sequence_op->outputs[i];
if (model.HasArray(fw_output_array_name) &&
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ bool HardcodeMinMaxForPack(Model* model, Operator* op) {
}
const auto& first_input_minmax = first_input_array.GetMinMax();

for (int i = 1; i < op->inputs.size(); i++) {
for (size_t i = 1; i < op->inputs.size(); i++) {
const auto& input_array = model->GetArray(op->inputs[i]);
if (!input_array.minmax) {
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ ::tensorflow::Status IdentifyNearestUpsample::Run(Model* model,
shape_array.data_type = ArrayDataType::kInt32;
auto& shape_buffer = shape_array.GetMutableBuffer<ArrayDataType::kInt32>();
// This is what imagined as the original shape.
for (int i = 0; i < imagined_original_shape.size(); ++i) {
for (size_t i = 0; i < imagined_original_shape.size(); ++i) {
shape_buffer.data.push_back(imagined_original_shape.at(i));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ std::vector<int32> ReshapeToTranspose(const Model& model,
std::vector<int> not_one_indices;

// Separate into one indices and not one indices.
for (int i = 0; i < in_shape.size(); i++) {
for (size_t i = 0; i < in_shape.size(); i++) {
if (in_shape[i] == 1) {
one_indices.push_back(i);
} else {
Expand Down Expand Up @@ -167,7 +167,7 @@ ::tensorflow::Status MergeReshapeIntoPrecedingTranspose::Run(

// Combine the permutations.
const auto& transpose_perm = transpose_op->perm;
for (int i = 0; i < merged_perm.size(); i++) {
for (size_t i = 0; i < merged_perm.size(); i++) {
merged_perm[i] = transpose_perm[merged_perm[i]];
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ ::tensorflow::Status PropagateArrayDataTypes::Run(Model* model,
if (unsupported_op->output_data_types.size() < op->outputs.size()) {
return ::tensorflow::Status::OK();
}
for (int i = 0; i < op->outputs.size(); ++i) {
const std::string& output = op->outputs[i];
for (size_t i = 0; i < op->outputs.size(); ++i) {
const string& output = op->outputs[i];
const ArrayDataType data_type = unsupported_op->output_data_types[i];
model->GetArray(output).data_type = data_type;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ bool RecursivelyBackwardPropagateDataType(GraphTransformation* transformation,
ArrayDataType new_data_type,
const MinMax& new_minmax) {
bool did_change = false;
for (int input_index = 0; input_index < op->inputs.size(); ++input_index) {
for (size_t input_index = 0; input_index < op->inputs.size(); ++input_index) {
const auto& input = op->inputs[input_index];
auto& input_array = model->GetArray(input);

Expand Down
30 changes: 15 additions & 15 deletions tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ void ProcessTensorFlowReshapeOperator(Model* model,
bool has_wildcard = false;
int wildcard_index = 0;
int product_non_wildcard_dims = 1;
for (int i = 0; i < shape_data.size(); i++) {
for (size_t i = 0; i < shape_data.size(); i++) {
if (shape_data[i] == -1) {
CHECK(!has_wildcard);
has_wildcard = true;
Expand Down Expand Up @@ -574,7 +574,7 @@ void ProcessTensorFlowReductionOperator(Model* model, Operator* op) {
std::set<int32> true_indices;
const auto& reduction_indices =
reduction_indices_array.GetBuffer<ArrayDataType::kInt32>().data;
for (int i = 0; i < reduction_indices.size(); ++i) {
for (size_t i = 0; i < reduction_indices.size(); ++i) {
const int32 reduction_index = reduction_indices[i];
if (reduction_index < -input_rank || reduction_index >= input_rank) {
CHECK(false) << "Invalid reduction dimension " << reduction_index
Expand Down Expand Up @@ -627,7 +627,7 @@ void ProcessSliceOperator(Model* model, SliceOperator* op) {
CHECK_EQ(op->begin.size(), op->size.size());

std::vector<int> output_dims;
for (int i = 0; i < op->begin.size(); ++i) {
for (size_t i = 0; i < op->begin.size(); ++i) {
int size = op->size[i];
if (size == -1) {
size = input_array.shape().dims(i) - op->begin[i];
Expand Down Expand Up @@ -883,7 +883,7 @@ void ProcessTensorFlowSplitVOperator(Model* model,

CHECK_EQ(op->outputs.size(), op->num_split);

for (int i = 0; i < op->outputs.size(); ++i) {
for (size_t i = 0; i < op->outputs.size(); ++i) {
const auto& output = op->outputs[i];
Shape output_shape = input_shape;
(*output_shape.mutable_dims())[axis] = size_splits_vector.at(i);
Expand Down Expand Up @@ -1514,7 +1514,7 @@ void ProcessPadOperator(Model* model, PadOperator* op) {
std::vector<int>& dims = *output_shape.mutable_dims();
CHECK_EQ(op->left_padding.size(), dims.size());

for (int i = 0; i < op->left_padding.size(); ++i) {
for (size_t i = 0; i < op->left_padding.size(); ++i) {
dims[i] += op->left_padding[i] + op->right_padding[i];
}

Expand All @@ -1540,7 +1540,7 @@ void ProcessPadV2Operator(Model* model, PadV2Operator* op) {
std::vector<int>& dims = *output_shape.mutable_dims();
CHECK_EQ(op->left_padding.size(), dims.size());

for (int i = 0; i < op->left_padding.size(); ++i) {
for (size_t i = 0; i < op->left_padding.size(); ++i) {
dims[i] += op->left_padding[i] + op->right_padding[i];
}

Expand Down Expand Up @@ -1683,7 +1683,7 @@ void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) {
CHECK_LE(op->strides.size(), num_input_axes)
<< "StridedSlice op with output \"" << op->outputs[0]
<< "\", requires no more than " << num_input_axes << " strides";
for (int i = 0; i < op->strides.size(); i++) {
for (size_t i = 0; i < op->strides.size(); i++) {
CHECK_NE(op->strides[i], 0) << "Strides must be non-zero. Axis " << i
<< " has stride=" << op->strides[i] << ".";
}
Expand Down Expand Up @@ -1814,7 +1814,7 @@ void ProcessTransposeOperator(Model* model, TransposeOperator* op) {
<< "Transpose permutation input " << op->inputs[1]
<< " must be same length as input dimensions";
std::vector<int>* output_dims = output_array.mutable_shape()->mutable_dims();
for (int i = 0; i < perm.size(); i++) {
for (size_t i = 0; i < perm.size(); i++) {
int axis = perm[i];
CHECK_GE(axis, 0);
CHECK_LT(axis, input_shape.dimensions_count());
Expand Down Expand Up @@ -1856,8 +1856,8 @@ void ProcessArgMinMaxOperator(Model* model, Op* op) {
std::vector<int> output_dims;

output_dims.reserve(input_dims.size() - 1);
for (int i = 0; i < input_dims.size(); ++i) {
if (i != axis) {
for (size_t i = 0; i < input_dims.size(); ++i) {
if ( static_cast<int>(i) != axis) {
output_dims.push_back(input_dims[i]);
}
}
Expand Down Expand Up @@ -1938,7 +1938,7 @@ void ProcessTileOperator(Model* model, TensorFlowTileOperator* op) {

auto* mutable_dims = output_array.mutable_shape()->mutable_dims();
mutable_dims->resize(multiples.size());
for (int i = 0; i < mutable_dims->size(); ++i) {
for (size_t i = 0; i < mutable_dims->size(); ++i) {
(*mutable_dims)[i] = input_shape.dims(i) * multiples[i];
}
}
Expand Down Expand Up @@ -2010,8 +2010,8 @@ void ProcessUnpackOperator(Model* model, UnpackOperator* op) {
std::vector<int> output_dims;

output_dims.reserve(input_dims.size() - 1);
for (int i = 0; i < input_dims.size(); ++i) {
if (i != op->axis) {
for (size_t i = 0; i < input_dims.size(); ++i) {
if ( static_cast<int>(i) != op->axis) {
output_dims.push_back(input_dims[i]);
}
}
Expand Down Expand Up @@ -2399,8 +2399,8 @@ ::tensorflow::Status PropagateFixedSizes::Run(Model* model,
if (unsupported_op->output_shapes.size() < op->outputs.size()) {
return ::tensorflow::Status::OK();
}
for (int i = 0; i < op->outputs.size(); ++i) {
const std::string& output = op->outputs[i];
for (size_t i = 0; i < op->outputs.size(); ++i) {
const string& output = op->outputs[i];
model->GetArray(output).copy_shape(unsupported_op->output_shapes.at(i));
}
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ bool TransformsToIdentity(std::vector<int> const& perm1,
// perm1 is the order of the indices after first transpose. When perm1 is
// reordered according to perm2, if the result is simple increasing sequence
// i.e., range(0, perm1.size()), then the two transposes cancel each other.
for (int i = 0; i < perm1.size(); ++i) {
if (perm1[i] < 0 || perm1[i] >= perm1.size() || perm2[i] < 0 ||
perm2[i] >= perm1.size()) {
for (size_t i = 0; i < perm1.size(); ++i) {
if (perm1[i] < 0 || perm1[i] >= static_cast<int>(perm1.size()) || perm2[i] < 0 ||
perm2[i] >= static_cast<int>(perm1.size())) {
return false;
}
if (perm1[perm2[i]] != i) {
if (perm1[perm2[i]] != static_cast<int>(i)) {
return false;
}
}
Expand All @@ -46,7 +46,7 @@ bool TransformsToIdentity(std::vector<int> const& perm1,
void ReplaceOpInputsWith(Model* model, const std::string& lookfor,
const std::string& replacewith) {
for (const auto& op : model->operators) {
for (int i = 0; i < op->inputs.size(); ++i) {
for (size_t i = 0; i < op->inputs.size(); ++i) {
if (op->inputs[i] == lookfor) {
op->inputs[i] = replacewith;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ bool RemoveTrivialPassthroughOp(GraphTransformation* transformation,
// We call 'main input' the unique nonconstant input array if there is one,
// or else the 0-th input.
int count_nonconstant_input_arrays = 0;
for (int i = 0; i < passthru_op->inputs.size(); i++) {
for (size_t i = 0; i < passthru_op->inputs.size(); i++) {
if (!model->GetArray(passthru_op->inputs[i]).buffer) {
count_nonconstant_input_arrays++;
if (count_nonconstant_input_arrays == 1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,9 @@ ::tensorflow::Status ReorderElementwiseUnary::Run(Model* model,
move_op->outputs[0] = output_name;
} else {
// The intermediate array is now the output array.
for (int i = 0; i < model->operators.size(); i++) {
for (size_t i = 0; i < model->operators.size(); i++) {
Operator* consumer = model->operators[i].get();
for (int j = 0; j < consumer->inputs.size(); j++) {
for (size_t j = 0; j < consumer->inputs.size(); j++) {
if (consumer->inputs[j] == output_name) {
consumer->inputs[j] = intermediate_name;
}
Expand Down