Skip to content

Commit

Permalink
fix ncnn tools crash with compound layer creation, write gemm weight …
Browse files Browse the repository at this point in the history
…data (#5416)

* fix ncnn tools crash with compound layer creation

* write gemm weight data
  • Loading branch information
nihui committed Apr 10, 2024
1 parent db035d6 commit 1e4daff
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 8 deletions.
15 changes: 14 additions & 1 deletion tools/modelwriter.h
Original file line number Diff line number Diff line change
Expand Up @@ -796,7 +796,7 @@ int ModelWriter::save(const char* parampath, const char* binpath)
continue;
}

ncnn::Layer* layer_default = ncnn::create_layer(layer->typeindex);
ncnn::Layer* layer_default = ncnn::create_layer_cpu(layer->typeindex);

ncnn::ParamDict pd;
layer_default->load_param(pd);
Expand Down Expand Up @@ -1764,6 +1764,19 @@ int ModelWriter::save(const char* parampath, const char* binpath)
fprintf_param_value(" 20=%d", constant_TILE_M)
fprintf_param_value(" 21=%d", constant_TILE_N)
fprintf_param_value(" 22=%d", constant_TILE_K)

if (op->constantA == 1)
{
fwrite_weight_tag_data(op->A_data, bp);
}
if (op->constantB == 1)
{
fwrite_weight_tag_data(op->B_data, bp);
}
if (op->constantC == 1 && op->constant_broadcast_type_C != -1)
{
fwrite_weight_tag_data(op->C_data, bp);
}
}
else if (layer->type == "GLU")
{
Expand Down
10 changes: 5 additions & 5 deletions tools/ncnnoptimize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1993,7 +1993,7 @@ int NetOptimize::fuse_binaryop_eltwise()

fprintf(stderr, "fuse_binaryop_eltwise %s %s %s\n", binaryop0->name.c_str(), binaryop1->name.c_str(), binaryop->name.c_str());

ncnn::Eltwise* eltwise = (ncnn::Eltwise*)ncnn::create_layer("Eltwise");
ncnn::Eltwise* eltwise = (ncnn::Eltwise*)ncnn::create_layer_cpu("Eltwise");

eltwise->type = "Eltwise";
eltwise->name = binaryop->name;
Expand Down Expand Up @@ -2554,7 +2554,7 @@ int NetOptimize::replace_reduction_with_global_pooling()

fprintf(stderr, "replace_reduction_with_global_pooling %s %s\n", reduction1->name.c_str(), reduction2->name.c_str());

ncnn::Pooling* pooling = (ncnn::Pooling*)ncnn::create_layer("Pooling");
ncnn::Pooling* pooling = (ncnn::Pooling*)ncnn::create_layer_cpu("Pooling");

pooling->type = "Pooling";
pooling->name = reduction2->name;
Expand Down Expand Up @@ -2593,7 +2593,7 @@ int NetOptimize::replace_prelu_with_leaky_relu()

fprintf(stderr, "replace_prelu_with_leaky_relu %s\n", prelu->name.c_str());

ncnn::ReLU* relu = (ncnn::ReLU*)ncnn::create_layer("ReLU");
ncnn::ReLU* relu = (ncnn::ReLU*)ncnn::create_layer_cpu("ReLU");

relu->type = "ReLU";
relu->name = prelu->name;
Expand Down Expand Up @@ -2647,7 +2647,7 @@ int NetOptimize::replace_convolution_with_innerproduct_after_global_pooling()

fprintf(stderr, "replace_convolution_with_innerproduct_after_global_pooling %s %s\n", pooling->name.c_str(), convolution->name.c_str());

ncnn::InnerProduct* innerproduct = (ncnn::InnerProduct*)ncnn::create_layer("InnerProduct");
ncnn::InnerProduct* innerproduct = (ncnn::InnerProduct*)ncnn::create_layer_cpu("InnerProduct");

innerproduct->type = "InnerProduct";
innerproduct->name = convolution->name;
Expand Down Expand Up @@ -2715,7 +2715,7 @@ int NetOptimize::replace_convolution_with_innerproduct_after_innerproduct()

fprintf(stderr, "replace_convolution_with_innerproduct_after_innerproduct %s %s\n", innerproduct->name.c_str(), convolution->name.c_str());

ncnn::InnerProduct* innerproduct2 = (ncnn::InnerProduct*)ncnn::create_layer("InnerProduct");
ncnn::InnerProduct* innerproduct2 = (ncnn::InnerProduct*)ncnn::create_layer_cpu("InnerProduct");

innerproduct2->type = "InnerProduct";
innerproduct2->name = convolution->name;
Expand Down
4 changes: 2 additions & 2 deletions tools/quantize/ncnn2table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1112,7 +1112,7 @@ int QuantNet::quantize_EQ()
ncnn::Mat out;
ex.extract(conv_top_blobs[i], out);

ncnn::Layer* layer_int8 = ncnn::create_layer(layer->typeindex);
ncnn::Layer* layer_int8 = ncnn::create_layer_cpu(layer->typeindex);

ncnn::ParamDict pd;
get_layer_param(layer, pd);
Expand Down Expand Up @@ -1222,7 +1222,7 @@ int QuantNet::quantize_EQ()
ncnn::Mat out;
ex.extract(conv_top_blobs[i], out);

ncnn::Layer* layer_int8 = ncnn::create_layer(layer->typeindex);
ncnn::Layer* layer_int8 = ncnn::create_layer_cpu(layer->typeindex);

ncnn::ParamDict pd;
get_layer_param(layer, pd);
Expand Down

0 comments on commit 1e4daff

Please sign in to comment.