Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid return without freeing a pointer #31078

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 4 additions & 1 deletion tensorflow/c/c_api_experimental.cc
Expand Up @@ -598,7 +598,10 @@ struct TF_CheckpointReader : public tensorflow::checkpoint::CheckpointReader {
TF_CheckpointReader* TF_NewCheckpointReader(const char* filename,
TF_Status* status) {
TF_CheckpointReader* reader = new TF_CheckpointReader(filename, status);
if (!status->status.ok()) return nullptr;
if (!status->status.ok()) {
TF_DeleteCheckpointReader(reader);
return nullptr;
}
const auto& m = reader->GetVariableToDataTypeMap();
for (auto it = m.begin(); it != m.end(); ++it)
reader->variable_list.push_back(it->first);
Expand Down
7 changes: 4 additions & 3 deletions tensorflow/c/eager/c_api.cc
Expand Up @@ -683,10 +683,11 @@ TF_AttrType TFE_OpNameGetAttrType(TFE_Context* ctx,
TF_Status* status) {
TF_AttrType ret;
TFE_Op* op = TFE_NewOp(ctx, op_or_function_name, status);
if (!status->status.ok()) {
return TF_ATTR_INT; // Same dummy return as TFE_OpGetAttrType.
if (status->status.ok()) {
ret = TFE_OpGetAttrType(op, attr_name, is_list, status);
} else {
ret = TF_ATTR_INT; // Same dummy return as TFE_OpGetAttrType.
}
ret = TFE_OpGetAttrType(op, attr_name, is_list, status);
TFE_DeleteOp(op);
return ret;
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/stack.cc
Expand Up @@ -184,10 +184,10 @@ void StackOp::Compute(OpKernelContext* ctx) {
ResourceMgr* rm = ctx->resource_manager();
OP_REQUIRES(ctx, rm != nullptr, errors::Internal("No resource manager."));
string key = strings::StrCat(kContainer, stack_name);
Stack* stack = new Stack(elem_type_, stack_name, size);
auto* step_container = ctx->step_container();
OP_REQUIRES(ctx, step_container != nullptr,
errors::Internal("No step container."));
Stack* stack = new Stack(elem_type_, stack_name, size);
OP_REQUIRES_OK(ctx, rm->Create(step_container->name(), key, stack));
if (IsRefType(ctx->expected_output_dtype(0))) {
// Create the stack handle.
Expand Down
16 changes: 13 additions & 3 deletions tensorflow/core/profiler/internal/tfprof_code.cc
Expand Up @@ -295,13 +295,23 @@ class PprofProfileImpl : public PprofProfile {
io::ZlibOutputBuffer* zlib_output_buffer = new io::ZlibOutputBuffer(
file.get(), buf_size, buf_size, io::ZlibCompressionOptions::GZIP());
s = zlib_output_buffer->Init();
if (!s.ok()) return s;
if (!s.ok()) {
delete zlib_output_buffer;
return s;
}
s = zlib_output_buffer->Append(profile_pb.SerializeAsString());
if (!s.ok()) return s;
if (!s.ok()) {
delete zlib_output_buffer;
return s;
}
s = zlib_output_buffer->Close();
if (!s.ok()) return s;
if (!s.ok()) {
delete zlib_output_buffer;
return s;
}
fprintf(stdout, "\nRun pprof -png --nodecount=100 --sample_index=1 <%s>\n",
filename.c_str());
delete zlib_output_buffer;
return s;
}

Expand Down
70 changes: 42 additions & 28 deletions tensorflow/lite/toco/import_tensorflow.cc
Expand Up @@ -818,18 +818,15 @@ tensorflow::Status ConvertConvOperator(
reorder->output_axes_order = AxesOrder::kOHWI;
model->operators.emplace_back(reorder);
}
auto* conv = new ConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
if (!HasAttr(node, "strides")) {
return tensorflow::errors::InvalidArgument("Missing attribute 'strides'");
}
const auto& strides = GetListAttr(node, "strides");
TF_RETURN_IF_ERROR(ExpectValue(strides.i_size(), 4, "number of strides"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(0), 1, "strides(0)"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(3), 1, "strides(3)"));
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
int dilation_height_factor;
int dilation_width_factor;
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
TF_RETURN_IF_ERROR(
Expand All @@ -841,21 +838,30 @@ tensorflow::Status ConvertConvOperator(
node.name(), "\" had dilations:[ ", dilations.i(0), ", ",
dilations.i(1), ", ", dilations.i(2), ", ", dilations.i(3), "]."));
}
conv->dilation_height_factor = dilations.i(1);
conv->dilation_width_factor = dilations.i(2);
dilation_height_factor = dilations.i(1);
dilation_width_factor = dilations.i(2);
} else {
conv->dilation_height_factor = 1;
conv->dilation_width_factor = 1;
dilation_height_factor = 1;
dilation_width_factor = 1;
}
const auto& padding = GetStringAttr(node, "padding");
PaddingType padding_type;
if (padding == "SAME") {
conv->padding.type = PaddingType::kSame;
padding_type = PaddingType::kSame;
} else if (padding == "VALID") {
conv->padding.type = PaddingType::kValid;
padding_type = PaddingType::kValid;
} else {
return tensorflow::errors::InvalidArgument(
"Bad padding (only SAME and VALID are supported)");
}
auto* conv = new ConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
conv->dilation_height_factor = dilation_height_factor;
conv->dilation_width_factor = dilation_width_factor;
conv->padding.type = padding_type;
model->operators.emplace_back(conv);

return tensorflow::Status::OK();
Expand Down Expand Up @@ -894,15 +900,12 @@ tensorflow::Status ConvertDepthwiseConvOperator(
reorder->output_axes_order = AxesOrder::k1HWO;
model->operators.emplace_back(reorder);
}
auto* conv = new DepthwiseConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
const auto& strides = GetListAttr(node, "strides");
CHECK_EQ(strides.i_size(), 4);
CHECK_EQ(strides.i(0), 1);
CHECK_EQ(strides.i(3), 1);
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
TF_RETURN_IF_ERROR(ExpectValue(strides.i_size(), 4, "number of strides"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(0), 1, "strides(0)"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(3), 1, "strides(3)"));
int dilation_height_factor;
int dilation_width_factor;
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
TF_RETURN_IF_ERROR(
Expand All @@ -914,20 +917,30 @@ tensorflow::Status ConvertDepthwiseConvOperator(
node.name(), "\" had dilations:[ ", dilations.i(0), ", ",
dilations.i(1), ", ", dilations.i(2), ", ", dilations.i(3), "]."));
}
conv->dilation_height_factor = dilations.i(1);
conv->dilation_width_factor = dilations.i(2);
dilation_height_factor = dilations.i(1);
dilation_width_factor = dilations.i(2);
} else {
conv->dilation_height_factor = 1;
conv->dilation_width_factor = 1;
dilation_height_factor = 1;
dilation_width_factor = 1;
}
const auto& padding = GetStringAttr(node, "padding");
PaddingType padding_type;
if (padding == "SAME") {
conv->padding.type = PaddingType::kSame;
padding_type = PaddingType::kSame;
} else if (padding == "VALID") {
conv->padding.type = PaddingType::kValid;
padding_type = PaddingType::kValid;
} else {
LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
return tensorflow::errors::InvalidArgument(
"Bad padding (only SAME and VALID are supported)");
}
auto* conv = new DepthwiseConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
conv->dilation_height_factor = dilation_height_factor;
conv->dilation_width_factor = dilation_width_factor;
conv->padding.type = padding_type;
model->operators.emplace_back(conv);
return tensorflow::Status::OK();
}
Expand Down Expand Up @@ -2369,12 +2382,13 @@ tensorflow::Status ConvertUnidirectionalSequenceLstm(
const ModelFlags& model_flags, Model* model) {
DCHECK_EQ(node.op(), "UnidirectionalSequenceLstm");

auto* op = new UnidirectionalSequenceLstmOperator();
const auto& indices = GetListAttr(node, "_tflite_input_indices");
if (indices.i_size() != node.input().size()) {
return tensorflow::errors::InvalidArgument("Input size does not match.");
}

auto* op = new UnidirectionalSequenceLstmOperator();

// The input size needs to be the same as the TfLite UniDirectionalSequence
// Lstm implementation.
const int kInputsSize = 20;
Expand Down Expand Up @@ -2424,12 +2438,12 @@ tensorflow::Status ConvertUnidirectionalSequenceRnn(
const ModelFlags& model_flags, Model* model) {
DCHECK_EQ(node.op(), "UnidirectionalSequenceRnn");

auto* op = new UnidirectionalSequenceRnnOperator();
const auto& indices = GetListAttr(node, "_tflite_input_indices");
if (indices.i_size() != node.input().size()) {
return tensorflow::errors::InvalidArgument("Input size does not match.");
}

auto* op = new UnidirectionalSequenceRnnOperator();
for (const string& input : node.input()) {
op->inputs.push_back(input);
}
Expand Down