Skip to content

Commit d95d3b6

Browse files
committed
inference completed
1 parent 46dd5a7 commit d95d3b6

File tree

7 files changed

+163
-179
lines changed

7 files changed

+163
-179
lines changed

app/Graph/build.cpp

Lines changed: 130 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,132 @@
11
#include "build.hpp"
22

3-
#include <stdexcept>
3+
void build_graph(Tensor input, Tensor output) {
4+
std::vector<std::shared_ptr<Layer>> layers;
5+
6+
std::string json_file = MODEL_PATH;
7+
json model_data = read_json(json_file);
8+
9+
std::cout << "Loaded model data from JSON." << std::endl;
10+
11+
for (const auto& layer_data : model_data) {
12+
std::string layer_type = layer_data["type"];
13+
std::cout << "Processing layer of type: " << layer_type << std::endl;
14+
15+
Tensor tensor =
16+
create_tensor_from_json(layer_data["weights"], Type::kFloat);
17+
18+
if (layer_type.find("Conv") != std::string::npos) {
19+
Shape shape = tensor.get_shape();
20+
std::cout << "PoolingLayer shape: ";
21+
for (size_t i = 0; i < shape.dims(); ++i) {
22+
std::cout << shape[i] << " ";
23+
}
24+
std::cout << std::endl;
25+
26+
Tensor tmp_values = tensor;
27+
Tensor tmp_bias = make_tensor(tensor.get_bias());
28+
29+
auto conv_layer =
30+
std::make_shared<ConvolutionalLayer>(1, 0, 1, tmp_values, tmp_bias);
31+
conv_layer->setName(kConvolution);
32+
layers.push_back(conv_layer);
33+
std::cout << "ConvLayer added to layers." << std::endl;
34+
}
35+
36+
if (layer_type.find("Dense") != std::string::npos) {
37+
Tensor tmp_values = tensor;
38+
std::vector<float> Values_vector = *tensor.as<float>();
39+
std::vector<std::vector<float>> Values_vector_2d(
40+
tensor.get_shape()[0],
41+
std::vector<float>(tensor.get_shape()[1], 0.0f));
42+
int q = 0;
43+
for (int i = 0; i < Values_vector.size(); i++) {
44+
Values_vector_2d[q][i - (q * tensor.get_shape()[1])] = Values_vector[i];
45+
if ((i + 1) % tensor.get_shape()[1] == 0) {
46+
q++;
47+
}
48+
}
49+
std::vector<std::vector<float>> Values_vector_2d_2(
50+
tensor.get_shape()[1],
51+
std::vector<float>(tensor.get_shape()[0], 0.0f));
52+
53+
for (int i = 0; i < tensor.get_shape()[0]; ++i) {
54+
for (int j = 0; j < tensor.get_shape()[1]; ++j) {
55+
Values_vector_2d_2[j][i] = Values_vector_2d[i][j];
56+
}
57+
}
58+
std::vector<float> Values_vector_1d(
59+
tensor.get_shape()[0] * tensor.get_shape()[1], 0.0f);
60+
int index_1d = 0;
61+
62+
for (int j = 0; j < tensor.get_shape()[1]; ++j) {
63+
for (int k = 0; k < tensor.get_shape()[0]; ++k) {
64+
Values_vector_1d[index_1d++] = Values_vector_2d_2[j][k];
65+
}
66+
}
67+
68+
Shape shape_fc({tensor.get_shape()[1], tensor.get_shape()[0]});
69+
Tensor values = make_tensor<float>(Values_vector_1d, shape_fc);
70+
Tensor tmp_bias = make_tensor(tensor.get_bias());
71+
72+
auto fc_layer = std::make_shared<FCLayer>(values, tmp_bias);
73+
fc_layer->setName(kFullyConnected);
74+
layers.push_back(fc_layer);
75+
std::cout << "DenseLayer added to layers." << std::endl;
76+
}
77+
78+
if (layer_type.find("Pool") != std::string::npos) {
79+
Shape shape = {2, 2};
80+
std::cout << "PoolingLayer shape: " << shape[0] << "x" << shape[1]
81+
<< std::endl;
82+
auto pool_layer = std::make_shared<PoolingLayer>(shape);
83+
pool_layer->setName(kPooling);
84+
layers.push_back(pool_layer);
85+
std::cout << "PoolingLayer added to layers." << std::endl;
86+
}
87+
88+
if (layer_type.find("Flatten") != std::string::npos) {
89+
auto flatten_layer = std::make_shared<FlattenLayer>();
90+
flatten_layer->setName(kFlatten);
91+
layers.push_back(flatten_layer);
92+
std::cout << "FlattenLayer added to layers." << std::endl;
93+
}
94+
95+
if (layer_type.find("Dropout") != std::string::npos) {
96+
auto dropout_layer = std::make_shared<DropOutLayer>(0.5);
97+
dropout_layer->setName(kDropout);
98+
layers.push_back(dropout_layer);
99+
std::cout << "DropOutLayer added to layers with probability 0.5."
100+
<< std::endl;
101+
}
102+
}
103+
std::cout << "number of layers - " << layers.size() + 1 << std::endl;
104+
Graph graph(static_cast<int>(layers.size()));
105+
InputLayer a1(kNhwc, kNchw, 1, 2);
106+
107+
std::cout << "InputLayer created." << std::endl;
108+
109+
graph.setInput(a1, input);
110+
std::cout << "Input set in graph." << std::endl;
111+
112+
graph.makeConnection(a1, *layers[0]);
113+
std::cout << "Connection made between InputLayer and first layer."
114+
<< std::endl;
115+
116+
for (size_t i = 0; i < layers.size() - 1; ++i) {
117+
graph.makeConnection(*layers[i], *layers[i + 1]);
118+
}
119+
120+
graph.setOutput(*layers.back(), output);
121+
std::cout << "Output set in graph." << std::endl;
122+
123+
std::cout << "Starting inference..." << std::endl;
124+
graph.inference();
125+
std::cout << "Inference completed." << std::endl;
126+
127+
std::vector<float> tmp = *output.as<float>();
128+
std::vector<float> tmp_output = softmax<float>(*output.as<float>());
129+
for (float i : tmp) {
130+
std::cout << i << " ";
131+
}
132+
}

app/Graph/build.hpp

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,17 @@
1-
#pragma once
21
#include <iostream>
2+
#include <stdexcept>
3+
#include <variant>
4+
#include <vector>
35
#include <opencv2/opencv.hpp>
6+
#include "Weights_Reader/reader_weights.hpp"
7+
#include "graph/graph.hpp"
8+
#include "layers/ConvLayer.hpp"
9+
#include "layers/DropOutLayer.hpp"
10+
#include "layers/EWLayer.hpp"
11+
#include "layers/FCLayer.hpp"
12+
#include "layers/FlattenLayer.hpp"
13+
#include "layers/InputLayer.hpp"
14+
#include "layers/OutputLayer.hpp"
15+
#include "layers/PoolingLayer.hpp"
16+
17+
void build_graph(Tensor input, Tensor output);

app/Graph/graph_build.cpp

Lines changed: 11 additions & 133 deletions
Original file line numberDiff line numberDiff line change
@@ -1,152 +1,30 @@
1-
#include <iostream>
2-
#include <stdexcept>
3-
#include <variant>
4-
#include <vector>
5-
6-
#include "Weights_Reader/reader_weights.hpp"
71
#include "build.hpp"
8-
#include "graph/graph.hpp"
9-
#include "layers/ConvLayer.hpp"
10-
#include "layers/DropOutLayer.hpp"
11-
#include "layers/EWLayer.hpp"
12-
#include "layers/FCLayer.hpp"
13-
#include "layers/FlattenLayer.hpp"
14-
#include "layers/InputLayer.hpp"
15-
#include "layers/OutputLayer.hpp"
16-
#include "layers/PoolingLayer.hpp"
2+
#include "build.cpp"
173

184
using namespace itlab_2023;
195

20-
void build_graph(Tensor input, Tensor output) {
21-
std::vector<std::shared_ptr<Layer>> layers;
22-
23-
std::string json_file = MODEL_PATH;
24-
json model_data = read_json(json_file);
25-
26-
std::cout << "Loaded model data from JSON." << std::endl;
27-
28-
for (const auto& layer_data : model_data) {
29-
std::string layer_type = layer_data["type"];
30-
std::cout << "Processing layer of type: " << layer_type << std::endl;
31-
32-
Tensor tensor =
33-
create_tensor_from_json(layer_data["weights"], Type::kFloat);
34-
35-
if (layer_type.find("Conv") != std::string::npos) {
36-
Shape shape = tensor.get_shape();
37-
std::cout << "PoolingLayer shape: ";
38-
for (size_t i = 0; i < shape.dims(); ++i) {
39-
std::cout << shape[i] << " ";
40-
}
41-
std::cout << std::endl;
42-
43-
Tensor tmp_values = tensor;
44-
Tensor tmp_bias = make_tensor(tensor.get_bias());
45-
46-
auto conv_layer =
47-
std::make_shared<ConvolutionalLayer>(1, 0, 0, tmp_values, tmp_bias);
48-
conv_layer->setName(kConvolution);
49-
layers.push_back(conv_layer);
50-
std::cout << "ConvLayer added to layers." << std::endl;
51-
}
52-
53-
if (layer_type.find("Dense") != std::string::npos) {
54-
Tensor tmp_values = tensor;
55-
Tensor tmp_bias = make_tensor(tensor.get_bias());
56-
57-
auto fc_layer = std::make_shared<FCLayer>(tmp_values, tmp_bias);
58-
fc_layer->setName(kFullyConnected);
59-
layers.push_back(fc_layer);
60-
std::cout << "DenseLayer added to layers." << std::endl;
61-
}
62-
63-
if (layer_type.find("Pool") != std::string::npos) {
64-
Shape shape = {2, 2};
65-
std::cout << "PoolingLayer shape: " << shape[0] << "x" << shape[1]
66-
<< std::endl;
67-
auto pool_layer = std::make_shared<PoolingLayer>(shape);
68-
pool_layer->setName(kPooling);
69-
layers.push_back(pool_layer);
70-
std::cout << "PoolingLayer added to layers." << std::endl;
71-
}
72-
73-
if (layer_type.find("Flatten") != std::string::npos) {
74-
auto flatten_layer = std::make_shared<FlattenLayer>();
75-
flatten_layer->setName(kFlatten);
76-
layers.push_back(flatten_layer);
77-
std::cout << "FlattenLayer added to layers." << std::endl;
78-
}
79-
80-
if (layer_type.find("Dropout") != std::string::npos) {
81-
auto dropout_layer = std::make_shared<DropOutLayer>(0.5);
82-
dropout_layer->setName(kDropout);
83-
layers.push_back(dropout_layer);
84-
std::cout << "DropOutLayer added to layers with probability 0.5."
85-
<< std::endl;
86-
}
87-
}
88-
std::cout << "number of layers - " << layers.size() + 1<< std::endl;
89-
Graph graph(static_cast<int>(layers.size()));
90-
InputLayer a1(kNhwc, kNchw, 1, 2);
91-
92-
std::cout << "InputLayer created." << std::endl;
93-
94-
graph.setInput(a1, input);
95-
std::cout << "Input set in graph." << std::endl;
96-
97-
graph.makeConnection(a1, *layers[0]);
98-
std::cout << "Connection made between InputLayer and first layer."
99-
<< std::endl;
100-
101-
for (size_t i = 0; i < layers.size() - 1; ++i) {
102-
graph.makeConnection(*layers[i], *layers[i + 1]);
103-
std::cout << "Connection made between layer " << i << " ("
104-
<< layerTypeToString(layers[i]->getName()) << ")"
105-
<< " and layer " << i + 1 << " ("
106-
<< layerTypeToString(layers[i + 1]->getName()) << ")"
107-
<< std::endl;
108-
}
109-
110-
111-
112-
graph.setOutput(*layers.back(), output);
113-
std::cout << "Output set in graph." << std::endl;
114-
115-
std::cout << "Starting inference..." << std::endl;
116-
graph.inference();
117-
std::cout << "Inference completed." << std::endl;
118-
119-
std::vector<float> tmp = *output.as<float>();
120-
std::vector<float> tmp_output = softmax<float>(*output.as<float>());
121-
for (float i : tmp) {
122-
std::cout << i << " ";
123-
}
124-
}
125-
1266
int main() {
1277
std::string image_path = IMAGE1_PATH;
1288
cv::Mat image = cv::imread(image_path);
1299
if (image.empty()) {
13010
throw std::runtime_error("Failed to load image");
13111
}
13212
cv::Mat resized_image;
133-
cv::resize(image, resized_image, cv::Size(227, 227));
13+
cv::cvtColor(image, image, cv::COLOR_BGR2GRAY);
14+
cv::resize(image, resized_image, cv::Size(28, 28));
13415
std::vector<cv::Mat> channels;
16+
13517
cv::split(resized_image, channels);
18+
13619
int count_pic = 1;
137-
std::vector<float> res(count_pic * 227 * 227 * 3);
138-
int c = 0;
139-
for (int i = 0; i < 227; ++i) {
140-
for (int j = 0; j < 227; ++j) {
141-
res[c] = channels[2].at<uchar>(i, j);
142-
c++;
143-
res[c] = channels[1].at<uchar>(i, j);
144-
c++;
145-
res[c] = channels[0].at<uchar>(i, j);
146-
c++;
20+
std::vector<float> res(count_pic * 28 * 28);
21+
22+
for (int i = 0; i < 28; ++i) {
23+
for (int j = 0; j < 28; ++j) {
24+
res[i * 28 + j] = channels[0].at<uchar>(i,j);
14725
}
14826
}
149-
Shape sh({static_cast<size_t>(count_pic), 227, 227, 3});
27+
Shape sh({static_cast<size_t>(count_pic), 28, 28, 1});
15028
Tensor t = make_tensor<float>(res, sh);
15129
Tensor input = t;
15230

include/Weights_Reader/reader_weights.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,6 @@ json read_json(const std::string& filename);
1111
void extract_values_without_bias(const json& j, std::vector<float>& values);
1212
void extract_values_from_json(const json& j, std::vector<float>& values);
1313
void parse_json_shape(const json& j, std::vector<size_t>& shape,
14-
size_t dim = 0);
14+
size_t dim);
1515
Tensor create_tensor_from_json(const json& j, Type type);
1616
void extract_bias_from_json(const json& j, std::vector<float>& bias);

0 commit comments

Comments
 (0)