-
Notifications
You must be signed in to change notification settings - Fork 19
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Input layer didn't get converted #18
Comments
Hi, the input layer should be converted. The converter should find the InputLayer layer and convert that to ncnn Input layer. |
Hi, from the decoded model config string, there is no InputLayer inside your model. Without an input layer, there is no way to know your input shape settings. However, I am thinking maybe I should add a default input Layer. I will PR that later. If you want to fix your model. you can edit your model config to:
Here is the decoded config string in your model {
"class_name": "Sequential",
"config": {
"layers": [
{
"class_name": "Conv2D",
"config": {
"activation": "relu",
"activity_regularizer": null,
"batch_input_shape": [
null,
64,
64,
3
],
"bias_constraint": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {
"dtype": "float32"
}
},
"bias_regularizer": null,
"data_format": "channels_last",
"dilation_rate": [
1,
1
],
"dtype": "float32",
"filters": 16,
"kernel_constraint": null,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {
"dtype": "float32",
"seed": null
}
},
"kernel_regularizer": null,
"kernel_size": [
3,
3
],
"name": "conv2d_3",
"padding": "valid",
"strides": [
1,
1
],
"trainable": true,
"use_bias": true
}
},
{
"class_name": "AveragePooling2D",
"config": {
"data_format": "channels_last",
"dtype": "float32",
"name": "average_pooling2d_3",
"padding": "valid",
"pool_size": [
2,
2
],
"strides": [
2,
2
],
"trainable": true
}
},
{
"class_name": "Dropout",
"config": {
"dtype": "float32",
"name": "dropout_5",
"noise_shape": null,
"rate": 0.5,
"seed": null,
"trainable": true
}
},
{
"class_name": "Conv2D",
"config": {
"activation": "relu",
"activity_regularizer": null,
"bias_constraint": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {
"dtype": "float32"
}
},
"bias_regularizer": null,
"data_format": "channels_last",
"dilation_rate": [
1,
1
],
"dtype": "float32",
"filters": 32,
"kernel_constraint": null,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {
"dtype": "float32",
"seed": null
}
},
"kernel_regularizer": null,
"kernel_size": [
3,
3
],
"name": "conv2d_4",
"padding": "valid",
"strides": [
1,
1
],
"trainable": true,
"use_bias": true
}
},
{
"class_name": "AveragePooling2D",
"config": {
"data_format": "channels_last",
"dtype": "float32",
"name": "average_pooling2d_4",
"padding": "valid",
"pool_size": [
2,
2
],
"strides": [
2,
2
],
"trainable": true
}
},
{
"class_name": "Dropout",
"config": {
"dtype": "float32",
"name": "dropout_6",
"noise_shape": null,
"rate": 0.5,
"seed": null,
"trainable": true
}
},
{
"class_name": "Conv2D",
"config": {
"activation": "relu",
"activity_regularizer": null,
"bias_constraint": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {
"dtype": "float32"
}
},
"bias_regularizer": null,
"data_format": "channels_last",
"dilation_rate": [
1,
1
],
"dtype": "float32",
"filters": 64,
"kernel_constraint": null,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {
"dtype": "float32",
"seed": null
}
},
"kernel_regularizer": null,
"kernel_size": [
3,
3
],
"name": "conv2d_5",
"padding": "valid",
"strides": [
1,
1
],
"trainable": true,
"use_bias": true
}
},
{
"class_name": "AveragePooling2D",
"config": {
"data_format": "channels_last",
"dtype": "float32",
"name": "average_pooling2d_5",
"padding": "valid",
"pool_size": [
2,
2
],
"strides": [
2,
2
],
"trainable": true
}
},
{
"class_name": "Dropout",
"config": {
"dtype": "float32",
"name": "dropout_7",
"noise_shape": null,
"rate": 0.5,
"seed": null,
"trainable": true
}
},
{
"class_name": "Flatten",
"config": {
"data_format": "channels_last",
"dtype": "float32",
"name": "flatten_1",
"trainable": true
}
},
{
"class_name": "Dense",
"config": {
"activation": "relu",
"activity_regularizer": null,
"bias_constraint": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {
"dtype": "float32"
}
},
"bias_regularizer": null,
"dtype": "float32",
"kernel_constraint": null,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {
"dtype": "float32",
"seed": null
}
},
"kernel_regularizer": null,
"name": "dense_3",
"trainable": true,
"units": 256,
"use_bias": true
}
},
{
"class_name": "Dropout",
"config": {
"dtype": "float32",
"name": "dropout_8",
"noise_shape": null,
"rate": 0.2,
"seed": null,
"trainable": true
}
},
{
"class_name": "Dense",
"config": {
"activation": "relu",
"activity_regularizer": null,
"bias_constraint": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {
"dtype": "float32"
}
},
"bias_regularizer": null,
"dtype": "float32",
"kernel_constraint": null,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {
"dtype": "float32",
"seed": null
}
},
"kernel_regularizer": null,
"name": "dense_4",
"trainable": true,
"units": 128,
"use_bias": true
}
},
{
"class_name": "Dense",
"config": {
"activation": "softmax",
"activity_regularizer": null,
"bias_constraint": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {
"dtype": "float32"
}
},
"bias_regularizer": null,
"dtype": "float32",
"kernel_constraint": null,
"kernel_initializer": {
"class_name": "GlorotUniform",
"config": {
"dtype": "float32",
"seed": null
}
},
"kernel_regularizer": null,
"name": "dense_5",
"trainable": true,
"units": 2,
"use_bias": true
}
}
],
"name": "sequential_1"
}
} |
Ohh okk Thanks. Since netron showed input layer I thought it's there. About input size model needs 64×64 . Also adding input in Param file doesn't it affect the number of parameters (first line in .param file) So maybe correct .Param can be something like this
Right ? Again Thanks for your help. |
You can check the code, the first line is just a constant magic, but the second line is the layer count and blob count which should be increment. ncnn uses dynamic shapes alone for all executions, so you don't need to give a constant input shape here. keras2ncnn/keras2ncnn/ncnn_emitter.py Lines 35 to 84 in 39f2cb9
|
Fix in 9841e26, I will close this issue since the issue has been resolved and no other issue raised. |
Trying to convert keras model to NCNN.
When tried to convert model, it got converted successfully but convolution layer was the first layer
Not able to understand why input layer (data) couldn't get converted. Any reasons ?
Please help ! thank you.
@MarsTechHAN
The text was updated successfully, but these errors were encountered: