From 60fdfa49abf5476f7e8efebacee0dcebf69e8a60 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 4 Nov 2025 19:10:33 +0300 Subject: [PATCH 01/13] del parameters --- app/Graph/build.cpp | 92 ++++++++++++--------------------------- app/Graph/build.hpp | 14 +++--- app/Graph/graph_build.cpp | 48 +++++++++++++++++++- 3 files changed, 81 insertions(+), 73 deletions(-) diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index addd71f2..17944582 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -4,8 +4,9 @@ using namespace it_lab_ai; -void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - bool comments, bool parallel, bool onednn) { +it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, + it_lab_ai::Tensor& output, + bool comments) { if (comments) { for (size_t i = 0; i < input.get_shape().dims(); i++) { std::cout << input.get_shape()[i] << ' '; @@ -25,8 +26,7 @@ void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, std::cout << std::endl << std::endl; } } - it_lab_ai::ImplType impl1 = parallel ? it_lab_ai::kTBB : it_lab_ai::kDefault; - it_lab_ai::ImplType impl2 = parallel ? it_lab_ai::kSTL : it_lab_ai::kDefault; + std::vector> layers; std::vector layerpostop; @@ -74,18 +74,14 @@ void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, it_lab_ai::Tensor tmp_values = tensor; it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); auto conv_layer = std::make_shared( - 1, pads, 1, tmp_values, tmp_bias, impl2, 1, true); + 1, pads, 1, tmp_values, tmp_bias, kDefault, 1, true); layers.push_back(conv_layer); layerpostop.push_back(false); if (comments) std::cout << "ConvLayer added to layers." << std::endl; } if (layer_type.find("relu") != std::string::npos) { std::shared_ptr ew_layer; - if (onednn) { - ew_layer = std::make_shared("relu"); - } else { - ew_layer = std::make_shared("relu"); - } + ew_layer = std::make_shared("relu"); layers.push_back(ew_layer); layerpostop.push_back(true); if (comments) @@ -111,7 +107,7 @@ void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, std::cout << "PoolingLayer shape: " << shape[0] << "x" << shape[1] << std::endl; auto pool_layer = - std::make_shared(shape, pooltype, impl1); + std::make_shared(shape, pooltype, kDefault); layers.push_back(pool_layer); layerpostop.push_back(false); if (comments) std::cout << "PoolingLayer added to layers." << std::endl; @@ -162,8 +158,8 @@ void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, graph.setOutput(*layers.back(), output); if (comments) std::cout << "Output set in graph." << std::endl; - - if (comments) std::cout << "Starting inference..." << std::endl; + return graph; + /*if (comments) std::cout << "Starting inference..." << std::endl; graph.inference(); #ifdef ENABLE_STATISTIC_TIME std::vector times = graph.getTimeInfo(); @@ -187,7 +183,7 @@ void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, std::cout << i << ": " << tmp_output[i] << std::endl; } } - } + }*/ } std::string get_base_layer_name(const std::string& tensor_name) { @@ -234,9 +230,8 @@ std::string layerTypeToString(it_lab_ai::LayerType type) { } } -void build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - const std::string& json_path, bool comments, bool parallel, - bool onednn) { +it_lab_ai::Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, + const std::string& json_path, bool comments) { if (comments) { for (size_t i = 0; i < input.get_shape().dims(); i++) { std::cout << input.get_shape()[i] << ' '; @@ -257,7 +252,7 @@ void build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, } } - auto parse_result = parse_json_model(json_path, comments, parallel, onednn); + auto parse_result = parse_json_model(json_path, comments); auto& layers = parse_result.layers; auto& name_to_layer = parse_result.name_to_layer; @@ -354,8 +349,8 @@ void build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, graph.setSplitDistribution(split_distribution); auto output_layer = layers.back(); graph.setOutput(*output_layer, output); - - if (comments) std::cout << "Starting inference..." << std::endl; + return graph; + /*if (comments) std::cout << "Starting inference..." << std::endl; try { graph.inference(); if (comments) std::cout << "Inference completed successfully." << std::endl; @@ -373,11 +368,10 @@ void build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); std::cout << "Elapsed inference time:" << sum << std::endl; std::cout << "!INFERENCE TIME INFO END!" << std::endl; -#endif +#endif*/ } -ParseResult parse_json_model(const std::string& json_path, bool comments, - bool parallel, bool onednn) { +ParseResult parse_json_model(const std::string& json_path, bool comments) { ParseResult result; auto& layers = result.layers; @@ -390,9 +384,6 @@ ParseResult parse_json_model(const std::string& json_path, bool comments, auto& split_distribution = result.split_distribution; auto& original_ids = result.original_ids; - it_lab_ai::ImplType impl1 = parallel ? it_lab_ai::kTBB : it_lab_ai::kDefault; - it_lab_ai::ImplType impl2 = parallel ? it_lab_ai::kSTL : it_lab_ai::kDefault; - std::unordered_map> layer_parameters; std::unordered_map float_parameters; std::string last_constant_name; @@ -490,24 +481,16 @@ ParseResult parse_json_model(const std::string& json_path, bool comments, it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); auto conv_layer = std::make_shared( - stride, pads, dilations, tmp_tensor, tmp_bias, impl2, group); + stride, pads, dilations, tmp_tensor, tmp_bias, kDefault, group); layer = conv_layer; } else if (layer_type.find("Relu") != std::string::npos || layer_type.find("relu") != std::string::npos) { std::shared_ptr ew_layer; - if (onednn) { - ew_layer = std::make_shared("relu"); - } else { - ew_layer = std::make_shared("relu"); - } + ew_layer = std::make_shared("relu"); layer = ew_layer; } else if (layer_type.find("Sigmoid") != std::string::npos) { std::shared_ptr ew_layer; - if (onednn) { - ew_layer = std::make_shared("sigmoid"); - } else { - ew_layer = std::make_shared("sigmoid"); - } + ew_layer = std::make_shared("sigmoid"); layer = ew_layer; } else if (layer_type.find("Dense") != std::string::npos || layer_type.find("FullyConnected") != std::string::npos) { @@ -538,7 +521,7 @@ ParseResult parse_json_model(const std::string& json_path, bool comments, << std::endl; } else if (layer_type == "GlobalAveragePool") { auto pool_layer = std::make_shared( - it_lab_ai::Shape({0, 0}), "average", impl1); + it_lab_ai::Shape({0, 0}), "average", kDefault); layer = pool_layer; if (comments) { std::cout << "GlobalAveragePool layer added (will use input spatial " @@ -599,8 +582,8 @@ ParseResult parse_json_model(const std::string& json_path, bool comments, } } - auto pool_layer = - std::make_shared(shape, pooltype, impl1); + auto pool_layer = std::make_shared( + shape, pooltype, kDefault); try { if (strides[0] != 2 || strides[1] != 2) { @@ -733,37 +716,20 @@ ParseResult parse_json_model(const std::string& json_path, bool comments, if (layer_type == "Mul") { ew_operation = "linear"; std::shared_ptr ew_layer; - if (onednn) { - ew_layer = std::make_shared( - ew_operation, value, 0.0F); - } else { - ew_layer = std::make_shared(ew_operation, - value, 0.0F); - } + ew_layer = + std::make_shared(ew_operation, value, 0.0F); layer = ew_layer; } else if (layer_type == "Add") { ew_operation = "linear"; std::shared_ptr ew_layer; - if (onednn && - it_lab_ai::EwLayerOneDnn::is_function_supported("linear")) { - ew_layer = std::make_shared( - ew_operation, 1.0F, value); - } else { - ew_layer = std::make_shared(ew_operation, - 1.0F, value); - } + ew_layer = + std::make_shared(ew_operation, 1.0F, value); layer = ew_layer; } else if (layer_type == "Sub") { ew_operation = "linear"; std::shared_ptr ew_layer; - if (onednn && - it_lab_ai::EwLayerOneDnn::is_function_supported("linear")) { - ew_layer = std::make_shared( - ew_operation, 1.0F, -value); - } else { - ew_layer = std::make_shared(ew_operation, - 1.0F, -value); - } + ew_layer = std::make_shared(ew_operation, 1.0F, + -value); layer = ew_layer; } else { continue; diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index 5d4d651e..76bed061 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -56,17 +56,15 @@ struct ParseResult { std::unordered_map original_ids; }; -void build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - const std::string& json_path, bool comments, - bool parallel = false, bool onednn = false); -void build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - bool comments, bool parallel = false, - bool onednn = false); +it_lab_ai::Graph build_graph(it_lab_ai::Tensor& input, + it_lab_ai::Tensor& output, + const std::string& json_path, bool comments); +it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, + bool comments); std::unordered_map load_class_names( const std::string& filename); -ParseResult parse_json_model(const std::string& json_path, bool comments, - bool parallel, bool onednn); +ParseResult parse_json_model(const std::string& json_path, bool comments); std::vector get_input_shape_from_json(const std::string& json_path); std::vector process_model_output(const std::vector& output, diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index d2a50d3e..4d092f25 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -64,7 +64,31 @@ int main(int argc, char* argv[]) { std::vector vec(75, 3); it_lab_ai::Tensor output = it_lab_ai::make_tensor(vec, sh1); - build_graph_linear(input, output, true, parallel, onednn); + Graph graph = build_graph_linear(input, output, true); + + std::cout << "Starting inference..." << std::endl; + graph.inference(); +#ifdef ENABLE_STATISTIC_TIME + std::vector times = graph.getTimeInfo(); + std::cout << "!INFERENCE TIME INFO START!" << std::endl; + for (size_t i = 0; i < times.size(); i++) { + std::cout << times[i] << std::endl; + } + std::vector elps_time = graph.getTime(); + int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); + std::cout << "Elapsed inference time:" << sum << std::endl; + std::cout << "!INFERENCE TIME INFO END!" << std::endl; +#endif + std::cout << "Inference completed." << std::endl; + std::vector tmp_output = + it_lab_ai::softmax(*output.as()); + for (size_t i = 0; i < tmp_output.size(); i++) { + if (tmp_output[i] < 1e-6) { + std::cout << i << ": 0" << std::endl; + } else { + std::cout << i << ": " << tmp_output[i] << std::endl; + } + } std::vector tmp_output = softmax(*output.as()); int top_n = std::min(3, static_cast(tmp_output.size())); std::vector indices(tmp_output.size()); @@ -94,7 +118,27 @@ int main(int argc, char* argv[]) { size_t output_classes = 1000; it_lab_ai::Tensor output({1, output_classes}, it_lab_ai::Type::kFloat); - build_graph(input, output, json_path, false, parallel, onednn); + Graph graph = build_graph(input, output, json_path, false); + + std::cout << "Starting inference..." << std::endl; + try { + graph.inference(); + std::cout << "Inference completed successfully." << std::endl; + } catch (const std::exception& e) { + std::cerr << "ERROR during inference: " << e.what() << std::endl; + } + +#ifdef ENABLE_STATISTIC_TIME + std::vector times = graph.getTimeInfo(); + std::cout << "!INFERENCE TIME INFO START!" << std::endl; + for (size_t i = 0; i < times.size(); i++) { + std::cout << times[i] << std::endl; + } + std::vector elps_time = graph.getTime(); + int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); + std::cout << "Elapsed inference time:" << sum << std::endl; + std::cout << "!INFERENCE TIME INFO END!" << std::endl; +#endif* std::vector tmp_output = process_model_output(*output.as(), model_name); From e969ee0e70df2c9d0de729a056ff139fd868dbf2 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Wed, 5 Nov 2025 16:41:00 +0300 Subject: [PATCH 02/13] prototype --- app/Graph/acc_check.cpp | 8 ++-- app/Graph/build.cpp | 89 ++++++++++++--------------------------- app/Graph/build.hpp | 76 +++++++++++++++++++++++++++++++-- app/Graph/graph_build.cpp | 36 ++-------------- 4 files changed, 109 insertions(+), 100 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index dbd3d16b..44077fc1 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -24,7 +24,7 @@ int main(int argc, char* argv[]) { onednn = true; } } - + it_lab_ai::LayerFactory::configure(parallel, onednn); std::string dataset_path; if (model_name == "alexnet_mnist") { dataset_path = MNIST_PATH; @@ -80,7 +80,8 @@ int main(int argc, char* argv[]) { Shape sh({static_cast(count_pic), 1, 28, 28}); Tensor t = make_tensor(res, sh); input = t; - build_graph_linear(input, output, false, parallel, onednn); + Graph graph = build_graph_linear(input, output, false); + print_time_stats(graph); std::vector> tmp_output = softmax(*output.as(), 10); std::vector indices; @@ -187,7 +188,8 @@ int main(int argc, char* argv[]) { it_lab_ai::Tensor output = it_lab_ai::Tensor(output_shape, it_lab_ai::Type::kFloat); - build_graph(input, output, json_path, false, parallel, onednn); + Graph graph = build_graph(input, output, json_path, false); + print_time_stats(graph); std::vector> processed_outputs; const std::vector& raw_output = *output.as(); diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index 17944582..871478a8 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -73,15 +73,14 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor tmp_values = tensor; it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); - auto conv_layer = std::make_shared( - 1, pads, 1, tmp_values, tmp_bias, kDefault, 1, true); + auto conv_layer = + LayerFactory::createConvLayer(1, pads, 1, tmp_values, tmp_bias, 1); layers.push_back(conv_layer); layerpostop.push_back(false); if (comments) std::cout << "ConvLayer added to layers." << std::endl; } if (layer_type.find("relu") != std::string::npos) { - std::shared_ptr ew_layer; - ew_layer = std::make_shared("relu"); + auto ew_layer = LayerFactory::createEwLayer("relu"); layers.push_back(ew_layer); layerpostop.push_back(true); if (comments) @@ -106,8 +105,7 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, if (comments) std::cout << "PoolingLayer shape: " << shape[0] << "x" << shape[1] << std::endl; - auto pool_layer = - std::make_shared(shape, pooltype, kDefault); + auto pool_layer = LayerFactory::createPoolingLayer(shape, pooltype); layers.push_back(pool_layer); layerpostop.push_back(false); if (comments) std::cout << "PoolingLayer added to layers." << std::endl; @@ -350,25 +348,6 @@ it_lab_ai::Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output auto output_layer = layers.back(); graph.setOutput(*output_layer, output); return graph; - /*if (comments) std::cout << "Starting inference..." << std::endl; - try { - graph.inference(); - if (comments) std::cout << "Inference completed successfully." << std::endl; - } catch (const std::exception& e) { - std::cerr << "ERROR during inference: " << e.what() << std::endl; - } - -#ifdef ENABLE_STATISTIC_TIME - std::vector times = graph.getTimeInfo(); - std::cout << "!INFERENCE TIME INFO START!" << std::endl; - for (size_t i = 0; i < times.size(); i++) { - std::cout << times[i] << std::endl; - } - std::vector elps_time = graph.getTime(); - int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); - std::cout << "Elapsed inference time:" << sum << std::endl; - std::cout << "!INFERENCE TIME INFO END!" << std::endl; -#endif*/ } ParseResult parse_json_model(const std::string& json_path, bool comments) { @@ -480,18 +459,13 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); - auto conv_layer = std::make_shared( - stride, pads, dilations, tmp_tensor, tmp_bias, kDefault, group); - layer = conv_layer; + layer = LayerFactory::createConvLayer(stride, pads, dilations, tensor, + tmp_bias, group); } else if (layer_type.find("Relu") != std::string::npos || layer_type.find("relu") != std::string::npos) { - std::shared_ptr ew_layer; - ew_layer = std::make_shared("relu"); - layer = ew_layer; + layer = LayerFactory::createEwLayer("relu"); } else if (layer_type.find("Sigmoid") != std::string::npos) { - std::shared_ptr ew_layer; - ew_layer = std::make_shared("sigmoid"); - layer = ew_layer; + layer = LayerFactory::createEwLayer("sigmoid"); } else if (layer_type.find("Dense") != std::string::npos || layer_type.find("FullyConnected") != std::string::npos) { it_lab_ai::Tensor tensor = it_lab_ai::create_tensor_from_json( @@ -520,9 +494,9 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { "off for inference)." << std::endl; } else if (layer_type == "GlobalAveragePool") { - auto pool_layer = std::make_shared( - it_lab_ai::Shape({0, 0}), "average", kDefault); - layer = pool_layer; + layer = LayerFactory::createPoolingLayer( + Shape({0, 0}), "average", Shape({1, 1}), Shape({0, 0, 0, 0}), + Shape({1, 1}), false); if (comments) { std::cout << "GlobalAveragePool layer added (will use input spatial " "dimensions as kernel)" @@ -582,31 +556,8 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { } } - auto pool_layer = std::make_shared( - shape, pooltype, kDefault); - - try { - if (strides[0] != 2 || strides[1] != 2) { - pool_layer->setStrides(strides[0], strides[1]); - } - - if (pads[0] != 0 || pads[1] != 0 || pads[2] != 0 || pads[3] != 0) { - pool_layer->setPads(pads[0], pads[1], pads[2], pads[3]); - } - - if (dilations[0] != 1 || dilations[1] != 1) { - pool_layer->setDilations(dilations[0], dilations[1]); - } - - pool_layer->setCeilMode(ceil_mode); - - } catch (const std::exception& e) { - if (comments) { - std::cout << "Warning: Some pooling parameters not supported: " - << e.what() << std::endl; - } - } - layer = pool_layer; + layer = LayerFactory::createPoolingLayer(shape, pooltype, strides, pads, + dilations, ceil_mode); } else if (layer_type.find("Flatten") != std::string::npos) { int axis = 1; @@ -1258,4 +1209,18 @@ it_lab_ai::Tensor prepare_mnist_image(const cv::Mat& image) { Shape sh({1, 1, 28, 28}); return it_lab_ai::make_tensor(res, sh); +} + +void print_time_stats(Graph& graph) { +#ifdef ENABLE_STATISTIC_TIME + std::vector times = graph.getTimeInfo(); + std::cout << "!INFERENCE TIME INFO START!" << std::endl; + for (size_t i = 0; i < times.size(); i++) { + std::cout << times[i] << std::endl; + } + std::vector elps_time = graph.getTime(); + int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); + std::cout << "Elapsed inference time:" << sum << std::endl; + std::cout << "!INFERENCE TIME INFO END!" << std::endl; +#endif } \ No newline at end of file diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index 76bed061..d48b8b7e 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -58,9 +58,9 @@ struct ParseResult { it_lab_ai::Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - const std::string& json_path, bool comments); -it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - bool comments); + const std::string& json_path, bool comments); +it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, + it_lab_ai::Tensor& output, bool comments); std::unordered_map load_class_names( const std::string& filename); @@ -73,3 +73,73 @@ it_lab_ai::Tensor prepare_image(const cv::Mat& image, const std::vector& input_shape, const std::string& model_name = ""); it_lab_ai::Tensor prepare_mnist_image(const cv::Mat& image); + +void print_time_stats(it_lab_ai::Graph& graph); + +namespace it_lab_ai { +class LayerFactory { + private: + static bool parallel_; + static bool onednn_; + + public: + static void configure(bool parallel, bool onednn) { + parallel_ = parallel; + onednn_ = onednn; + } + + static std::shared_ptr createConvLayer(size_t stride, size_t pads, + size_t dilations, + const Tensor& weights, + const Tensor& bias, + size_t group = 1) { + ImplType impl = parallel_ ? kTBB : kDefault; + return std::make_shared(stride, pads, dilations, + weights, bias, impl, group); + } + + static std::shared_ptr createPoolingLayer( + const Shape& shape, const std::string& pooltype, + const Shape& strides = {2, 2}, const Shape& pads = {0, 0, 0, 0}, + const Shape& dilations = {1, 1}, bool ceil_mode = false) { + ImplType impl = parallel_ ? kTBB : kDefault; + auto pool_layer = std::make_shared(shape, pooltype, impl); + + try { + if (strides[0] != 2 || strides[1] != 2) { + pool_layer->setStrides(strides[0], strides[1]); + } + + if (pads[0] != 0 || pads[1] != 0 || pads[2] != 0 || pads[3] != 0) { + pool_layer->setPads(pads[0], pads[1], pads[2], pads[3]); + } + + if (dilations[0] != 1 || dilations[1] != 1) { + pool_layer->setDilations(dilations[0], dilations[1]); + } + + pool_layer->setCeilMode(ceil_mode); + + } catch (const std::exception& e) { + std::cout << "Warning: Some pooling parameters not supported: " + << e.what() << std::endl; + } + + return pool_layer; + } + + static std::shared_ptr createEwLayer(const std::string& function, + float alpha = 1.0f, + float beta = 0.0f) { + if (onednn_ && EwLayerOneDnn::is_function_supported(function)) { + return std::make_shared(function, alpha, beta); + } else { + return std::make_shared(function, alpha, beta); + } + } +}; + +bool LayerFactory::parallel_ = false; +bool LayerFactory::onednn_ = false; + +} // namespace it_lab_ai diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index 4d092f25..024d6fce 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -22,6 +22,8 @@ int main(int argc, char* argv[]) { } } + it_lab_ai::LayerFactory::configure(parallel, onednn); + std::string json_path = model_paths[model_name]; std::vector input_shape; @@ -68,27 +70,8 @@ int main(int argc, char* argv[]) { std::cout << "Starting inference..." << std::endl; graph.inference(); -#ifdef ENABLE_STATISTIC_TIME - std::vector times = graph.getTimeInfo(); - std::cout << "!INFERENCE TIME INFO START!" << std::endl; - for (size_t i = 0; i < times.size(); i++) { - std::cout << times[i] << std::endl; - } - std::vector elps_time = graph.getTime(); - int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); - std::cout << "Elapsed inference time:" << sum << std::endl; - std::cout << "!INFERENCE TIME INFO END!" << std::endl; -#endif std::cout << "Inference completed." << std::endl; - std::vector tmp_output = - it_lab_ai::softmax(*output.as()); - for (size_t i = 0; i < tmp_output.size(); i++) { - if (tmp_output[i] < 1e-6) { - std::cout << i << ": 0" << std::endl; - } else { - std::cout << i << ": " << tmp_output[i] << std::endl; - } - } + print_time_stats(graph); std::vector tmp_output = softmax(*output.as()); int top_n = std::min(3, static_cast(tmp_output.size())); std::vector indices(tmp_output.size()); @@ -127,18 +110,7 @@ int main(int argc, char* argv[]) { } catch (const std::exception& e) { std::cerr << "ERROR during inference: " << e.what() << std::endl; } - -#ifdef ENABLE_STATISTIC_TIME - std::vector times = graph.getTimeInfo(); - std::cout << "!INFERENCE TIME INFO START!" << std::endl; - for (size_t i = 0; i < times.size(); i++) { - std::cout << times[i] << std::endl; - } - std::vector elps_time = graph.getTime(); - int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); - std::cout << "Elapsed inference time:" << sum << std::endl; - std::cout << "!INFERENCE TIME INFO END!" << std::endl; -#endif* + print_time_stats(graph); std::vector tmp_output = process_model_output(*output.as(), model_name); From a6f98ac62fe71d393a1d82a994c63a0f8e82b4cf Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Wed, 5 Nov 2025 19:59:31 +0300 Subject: [PATCH 03/13] split parallel --- app/Graph/acc_check.cpp | 2 +- app/Graph/build.cpp | 70 +++++++++++++++++++-------------------- app/Graph/build.hpp | 45 +------------------------ app/Graph/graph_build.cpp | 4 +-- 4 files changed, 39 insertions(+), 82 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 44077fc1..094de4b5 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -24,7 +24,7 @@ int main(int argc, char* argv[]) { onednn = true; } } - it_lab_ai::LayerFactory::configure(parallel, onednn); + it_lab_ai::LayerFactory::configure(onednn); std::string dataset_path; if (model_name == "alexnet_mnist") { dataset_path = MNIST_PATH; diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index 871478a8..4df4d0fe 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -73,8 +73,8 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor tmp_values = tensor; it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); - auto conv_layer = - LayerFactory::createConvLayer(1, pads, 1, tmp_values, tmp_bias, 1); + auto conv_layer = std::make_shared( + 1, pads, 1, tmp_values, tmp_bias, kDefault, 1, true); layers.push_back(conv_layer); layerpostop.push_back(false); if (comments) std::cout << "ConvLayer added to layers." << std::endl; @@ -105,7 +105,8 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, if (comments) std::cout << "PoolingLayer shape: " << shape[0] << "x" << shape[1] << std::endl; - auto pool_layer = LayerFactory::createPoolingLayer(shape, pooltype); + auto pool_layer = + std::make_shared(shape, pooltype, kDefault); layers.push_back(pool_layer); layerpostop.push_back(false); if (comments) std::cout << "PoolingLayer added to layers." << std::endl; @@ -157,31 +158,6 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, graph.setOutput(*layers.back(), output); if (comments) std::cout << "Output set in graph." << std::endl; return graph; - /*if (comments) std::cout << "Starting inference..." << std::endl; - graph.inference(); -#ifdef ENABLE_STATISTIC_TIME - std::vector times = graph.getTimeInfo(); - std::cout << "!INFERENCE TIME INFO START!" << std::endl; - for (size_t i = 0; i < times.size(); i++) { - std::cout << times[i] << std::endl; - } - std::vector elps_time = graph.getTime(); - int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); - std::cout << "Elapsed inference time:" << sum << std::endl; - std::cout << "!INFERENCE TIME INFO END!" << std::endl; -#endif - if (comments) std::cout << "Inference completed." << std::endl; - if (comments) { - std::vector tmp_output = - it_lab_ai::softmax(*output.as()); - for (size_t i = 0; i < tmp_output.size(); i++) { - if (tmp_output[i] < 1e-6) { - std::cout << i << ": 0" << std::endl; - } else { - std::cout << i << ": " << tmp_output[i] << std::endl; - } - } - }*/ } std::string get_base_layer_name(const std::string& tensor_name) { @@ -459,8 +435,9 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); - layer = LayerFactory::createConvLayer(stride, pads, dilations, tensor, - tmp_bias, group); + auto conv_layer = std::make_shared( + stride, pads, dilations, tmp_tensor, tmp_bias, kDefault, group); + layer = conv_layer; } else if (layer_type.find("Relu") != std::string::npos || layer_type.find("relu") != std::string::npos) { layer = LayerFactory::createEwLayer("relu"); @@ -494,9 +471,9 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { "off for inference)." << std::endl; } else if (layer_type == "GlobalAveragePool") { - layer = LayerFactory::createPoolingLayer( - Shape({0, 0}), "average", Shape({1, 1}), Shape({0, 0, 0, 0}), - Shape({1, 1}), false); + auto pool_layer = std::make_shared( + it_lab_ai::Shape({0, 0}), "average", kDefault); + layer = pool_layer; if (comments) { std::cout << "GlobalAveragePool layer added (will use input spatial " "dimensions as kernel)" @@ -556,8 +533,31 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { } } - layer = LayerFactory::createPoolingLayer(shape, pooltype, strides, pads, - dilations, ceil_mode); + auto pool_layer = + std::make_shared(shape, pooltype, kDefault); + + try { + if (strides[0] != 2 || strides[1] != 2) { + pool_layer->setStrides(strides[0], strides[1]); + } + + if (pads[0] != 0 || pads[1] != 0 || pads[2] != 0 || pads[3] != 0) { + pool_layer->setPads(pads[0], pads[1], pads[2], pads[3]); + } + + if (dilations[0] != 1 || dilations[1] != 1) { + pool_layer->setDilations(dilations[0], dilations[1]); + } + + pool_layer->setCeilMode(ceil_mode); + + } catch (const std::exception& e) { + if (comments) { + std::cout << "Warning: Some pooling parameters not supported: " + << e.what() << std::endl; + } + } + layer = pool_layer; } else if (layer_type.find("Flatten") != std::string::npos) { int axis = 1; diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index d48b8b7e..f7f5a6ef 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -79,55 +79,13 @@ void print_time_stats(it_lab_ai::Graph& graph); namespace it_lab_ai { class LayerFactory { private: - static bool parallel_; static bool onednn_; public: - static void configure(bool parallel, bool onednn) { - parallel_ = parallel; + static void configure(bool onednn) { onednn_ = onednn; } - static std::shared_ptr createConvLayer(size_t stride, size_t pads, - size_t dilations, - const Tensor& weights, - const Tensor& bias, - size_t group = 1) { - ImplType impl = parallel_ ? kTBB : kDefault; - return std::make_shared(stride, pads, dilations, - weights, bias, impl, group); - } - - static std::shared_ptr createPoolingLayer( - const Shape& shape, const std::string& pooltype, - const Shape& strides = {2, 2}, const Shape& pads = {0, 0, 0, 0}, - const Shape& dilations = {1, 1}, bool ceil_mode = false) { - ImplType impl = parallel_ ? kTBB : kDefault; - auto pool_layer = std::make_shared(shape, pooltype, impl); - - try { - if (strides[0] != 2 || strides[1] != 2) { - pool_layer->setStrides(strides[0], strides[1]); - } - - if (pads[0] != 0 || pads[1] != 0 || pads[2] != 0 || pads[3] != 0) { - pool_layer->setPads(pads[0], pads[1], pads[2], pads[3]); - } - - if (dilations[0] != 1 || dilations[1] != 1) { - pool_layer->setDilations(dilations[0], dilations[1]); - } - - pool_layer->setCeilMode(ceil_mode); - - } catch (const std::exception& e) { - std::cout << "Warning: Some pooling parameters not supported: " - << e.what() << std::endl; - } - - return pool_layer; - } - static std::shared_ptr createEwLayer(const std::string& function, float alpha = 1.0f, float beta = 0.0f) { @@ -139,7 +97,6 @@ class LayerFactory { } }; -bool LayerFactory::parallel_ = false; bool LayerFactory::onednn_ = false; } // namespace it_lab_ai diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index 024d6fce..5a034b0c 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -13,7 +13,7 @@ int main(int argc, char* argv[]) { bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { - if (std::string(argv[i]) == "--parallel") { + if (std::string(argv[i]) == "--parallel") { // change by Andrey parallel = true; } else if (std::string(argv[i]) == "--model" && i + 1 < argc) { model_name = argv[++i]; @@ -22,7 +22,7 @@ int main(int argc, char* argv[]) { } } - it_lab_ai::LayerFactory::configure(parallel, onednn); + it_lab_ai::LayerFactory::configure(onednn); std::string json_path = model_paths[model_name]; From df17e1c32523f7acc4dfb07da97ebd6af7e64919 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Thu, 6 Nov 2025 19:03:35 +0300 Subject: [PATCH 04/13] fix --- app/Graph/build.cpp | 27 ++++++++------------------- app/Graph/build.hpp | 1 + 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index 4df4d0fe..29e11126 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -4,8 +4,7 @@ using namespace it_lab_ai; -it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, - it_lab_ai::Tensor& output, +Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, bool comments) { if (comments) { for (size_t i = 0; i < input.get_shape().dims(); i++) { @@ -26,7 +25,6 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, std::cout << std::endl << std::endl; } } - std::vector> layers; std::vector layerpostop; @@ -156,7 +154,6 @@ it_lab_ai::Graph build_graph_linear(it_lab_ai::Tensor& input, } graph.setOutput(*layers.back(), output); - if (comments) std::cout << "Output set in graph." << std::endl; return graph; } @@ -204,7 +201,7 @@ std::string layerTypeToString(it_lab_ai::LayerType type) { } } -it_lab_ai::Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, +Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, const std::string& json_path, bool comments) { if (comments) { for (size_t i = 0; i < input.get_shape().dims(); i++) { @@ -323,6 +320,7 @@ it_lab_ai::Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output graph.setSplitDistribution(split_distribution); auto output_layer = layers.back(); graph.setOutput(*output_layer, output); + return graph; } @@ -533,8 +531,8 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { } } - auto pool_layer = - std::make_shared(shape, pooltype, kDefault); + auto pool_layer = std::make_shared( + shape, pooltype, kDefault); try { if (strides[0] != 2 || strides[1] != 2) { @@ -666,22 +664,13 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { if (layer_type == "Mul") { ew_operation = "linear"; - std::shared_ptr ew_layer; - ew_layer = - std::make_shared(ew_operation, value, 0.0F); - layer = ew_layer; + layer = LayerFactory::createEwLayer(ew_operation, value, 0.0F); } else if (layer_type == "Add") { ew_operation = "linear"; - std::shared_ptr ew_layer; - ew_layer = - std::make_shared(ew_operation, 1.0F, value); - layer = ew_layer; + layer = LayerFactory::createEwLayer(ew_operation, 1.0F, value); } else if (layer_type == "Sub") { ew_operation = "linear"; - std::shared_ptr ew_layer; - ew_layer = std::make_shared(ew_operation, 1.0F, - -value); - layer = ew_layer; + layer = LayerFactory::createEwLayer(ew_operation, 1.0F, -value); } else { continue; } diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index f7f5a6ef..3c3fdc65 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -89,6 +89,7 @@ class LayerFactory { static std::shared_ptr createEwLayer(const std::string& function, float alpha = 1.0f, float beta = 0.0f) { + if (onednn_ && EwLayerOneDnn::is_function_supported(function)) { return std::make_shared(function, alpha, beta); } else { From a04bcd7c712a480f84aaf9f0718c9175766c6483 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Fri, 7 Nov 2025 19:52:01 +0300 Subject: [PATCH 05/13] use shared_ptr --- app/Graph/acc_check.cpp | 6 ++- app/Graph/build.cpp | 27 ++++++++----- include/graph/graph.hpp | 88 +++++++++++++++++++++++++++++++++++++++-- 3 files changed, 107 insertions(+), 14 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 094de4b5..415be5bc 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -80,7 +80,8 @@ int main(int argc, char* argv[]) { Shape sh({static_cast(count_pic), 1, 28, 28}); Tensor t = make_tensor(res, sh); input = t; - Graph graph = build_graph_linear(input, output, false); + auto graph = build_graph_linear(input, output, false); + graph.inference(); print_time_stats(graph); std::vector> tmp_output = softmax(*output.as(), 10); @@ -188,7 +189,8 @@ int main(int argc, char* argv[]) { it_lab_ai::Tensor output = it_lab_ai::Tensor(output_shape, it_lab_ai::Type::kFloat); - Graph graph = build_graph(input, output, json_path, false); + auto graph = build_graph(input, output, json_path, false); + graph.inference(); print_time_stats(graph); std::vector> processed_outputs; const std::vector& raw_output = *output.as(); diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index 29e11126..b89fdd08 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -132,14 +132,15 @@ Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, if (comments) std::cout << "number of layers - " << layers.size() + 1 << std::endl; it_lab_ai::Graph graph(static_cast(layers.size())); - it_lab_ai::InputLayer a1(it_lab_ai::kNchw, it_lab_ai::kNchw); + auto a1 = std::make_shared(it_lab_ai::kNchw, + it_lab_ai::kNchw); if (comments) std::cout << "InputLayer created." << std::endl; graph.setInput(a1, input); if (comments) std::cout << "Input set in graph." << std::endl; - graph.makeConnection(a1, *layers[0]); + graph.makeConnection(a1, layers[0]); if (comments) std::cout << "Connection made between InputLayer and first layer." << std::endl; @@ -148,12 +149,16 @@ Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, if (layerpostop[i]) { layers[i - 1]->postops.layers.push_back(layers[i].get()); layers[i - 1]->postops.count++; - graph.makeConnection(*layers[i - 1], *layers[i + 1]); + graph.makeConnection(layers[i - 1], layers[i + 1]); } else if (!layerpostop[i + 1]) - graph.makeConnection(*layers[i], *layers[i + 1]); + graph.makeConnection(layers[i], layers[i + 1]); } - graph.setOutput(*layers.back(), output); + graph.setOutput(layers.back(), output); + + for (auto& layer : layers) { + graph.addOwnedLayer(layer); + } return graph; } @@ -241,7 +246,7 @@ Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, [](const auto& layer) { return layer->getName() == it_lab_ai::kInput; }); if (input_layer_it != layers.end()) { - graph.setInput(**input_layer_it, input); + graph.setInput(*input_layer_it, input); } std::vector> connection_list; @@ -297,8 +302,8 @@ Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, } try { - graph.makeConnection(*name_to_layer[source_name], - *name_to_layer[target_name]); + graph.makeConnection(name_to_layer[source_name], + name_to_layer[target_name]); } catch (const std::exception& e) { std::cerr << "Failed: " << source_name << " -> " << target_name << " : " @@ -319,7 +324,11 @@ Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, } graph.setSplitDistribution(split_distribution); auto output_layer = layers.back(); - graph.setOutput(*output_layer, output); + graph.setOutput(output_layer, output); + + for (auto& layer : layers) { + graph.addOwnedLayer(layer); + } return graph; } diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index b72e7bc2..75c9c0bd 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -23,8 +24,9 @@ struct BranchState { class Graph { int BiggestSize_; - int V_; // amount of ids - std::vector layers_; // layers vector with some ids + int V_; // amount of ids + std::vector> owned_layers_; + std::vector layers_; std::vector arrayV_; // vertices (id -> vertex number) std::vector arrayE_; // edges (vertex number -> id) std::vector inten_; @@ -72,6 +74,18 @@ class Graph { split_distribution_ = split_dist; } + void addOwnedLayer(std::shared_ptr layer) { + if (!layer) return; + + for (const auto& existing_layer : owned_layers_) { + if (existing_layer.get() == layer.get()) { + return; + } + } + + owned_layers_.push_back(layer); + } + int getVertexValue(size_t layerID) const { if (layerID >= arrayV_.size()) { throw std::invalid_argument("ArrayV does not contain this ID."); @@ -94,6 +108,7 @@ class Graph { } int getLayersCount() const { return V_; } + const Layer& getLayerFromID(size_t layerID) const { if (layerID >= layers_.size()) { throw std::invalid_argument("Layers do not contain this ID."); @@ -111,6 +126,17 @@ class Graph { in_edges_.resize(1); } + void setInput(std::shared_ptr layer, Tensor& vec) { + addOwnedLayer(layer); + layer->setID(0); + layers_.push_back(layer.get()); + arrayV_.push_back(0); + inten_ = {vec}; + start_ = layer->getID(); + V_++; + in_edges_.resize(1); + } + void addSingleLayer(Layer& lay) { bool layer_exists = false; for (const auto* layer : layers_) { @@ -170,6 +196,49 @@ class Graph { in_edges_[layNext.getID()].push_back(layPrev.getID()); } + + void makeConnection(std::shared_ptr layPrev, + std::shared_ptr layNext) { + addOwnedLayer(layPrev); + addOwnedLayer(layNext); + bool layer_exists = false; + for (const auto* layer : layers_) { + if (layer == layNext.get()) { + layer_exists = true; + break; + } + } + + if (!layer_exists) { + layNext->setID(V_); + layers_.push_back(layNext.get()); + arrayV_.push_back(static_cast(arrayE_.size())); + + if (V_ >= static_cast(in_edges_.size())) { + in_edges_.resize(V_ + 1); + } + + V_++; + } + + if (layPrev->getID() == layNext->getID()) { + throw std::out_of_range("i=j cant add edge"); + } + + for (int i = layPrev->getID() + 1; i < V_; ++i) { + arrayV_[i]++; + } + arrayE_.insert(arrayE_.begin() + arrayV_[layPrev->getID()], + layNext->getID()); + arrayV_[V_] = static_cast(arrayE_.size()); + + if (layNext->getID() >= static_cast(in_edges_.size())) { + in_edges_.resize(layNext->getID() + 1); + } + + in_edges_[layNext->getID()].push_back(layPrev->getID()); + } + bool areLayerNext(const Layer& layPrev, const Layer& layNext) { for (int i = arrayV_[layPrev.getID()]; i < arrayV_[layPrev.getID() + 1]; i++) { @@ -179,6 +248,7 @@ class Graph { } return false; } + void inference() { std::vector> countinout = getInOutDegrees(); std::vector traversal = getTraversalOrder(); @@ -276,6 +346,7 @@ class Graph { *outtenres_ = outten_[0]; } + void setOutput(const Layer& lay, Tensor& vec) { end_ = lay.getID(); outtenres_ = &vec; @@ -283,6 +354,15 @@ class Graph { Tensor start = make_tensor(vec1); outten_.push_back(start); } + + void setOutput(std::shared_ptr layer, Tensor& vec) { + end_ = layer->getID(); + outtenres_ = &vec; + std::vector vec1 = {1, 7, 1, 0}; + Tensor start = make_tensor(vec1); + outten_.push_back(start); + } + #ifdef ENABLE_STATISTIC_TENSORS std::vector getTensors() { return tensors_; } #endif @@ -320,6 +400,7 @@ class Graph { #ifdef ENABLE_STATISTIC_WEIGHTS std::vector getWEIGHTS() { return weights_; } #endif + std::vector> getInOutDegrees() const { std::vector in_degree(V_, 0); @@ -340,6 +421,7 @@ class Graph { return result; } + std::vector getTraversalOrder() const { auto in_out_degrees = getInOutDegrees(); std::vector in_degree(V_); @@ -380,4 +462,4 @@ class Graph { return traversal; } }; -} // namespace it_lab_ai +} // namespace it_lab_ai \ No newline at end of file From 6a5a84c411bd377b8d299e8e9341fabb9c2ef3dc Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 15:28:28 +0300 Subject: [PATCH 06/13] del old and fix tests --- app/Accuracy/accuracy_check.cpp | 12 +- app/AccuracyImgNet/accimgnet.cpp | 8 +- app/Graph/build.cpp | 5 +- app/Graph/build.hpp | 5 +- app/Graph/graph_build.cpp | 12 +- include/graph/graph.hpp | 81 ++-------- test/graph/test_graph.cpp | 156 +++++++++--------- test/inference/test_inference.cpp | 253 +++++++++++++----------------- 8 files changed, 224 insertions(+), 308 deletions(-) diff --git a/app/Accuracy/accuracy_check.cpp b/app/Accuracy/accuracy_check.cpp index d97648dd..542d5595 100644 --- a/app/Accuracy/accuracy_check.cpp +++ b/app/Accuracy/accuracy_check.cpp @@ -43,16 +43,16 @@ int main() { } Tensor input = t; Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); + auto a1 = std::make_shared(kNchw, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 0, kernel); + auto a2 = std::make_shared(1, 0, 0, kernel); Shape poolshape = {2, 2}; - EWLayer a3("linear", 2.0F, 3.0F); - PoolingLayer a4(poolshape, "average"); - FCLayer a6; - OutputLayer a5; + auto a3 = std::make_shared("linear", 2.0F, 3.0F); + auto a4 = std::make_shared(poolshape, "average"); + auto a5 = std::make_shared(); + auto a6 = std::make_shared(); graph.setInput(a1, input); graph.makeConnection(a1, a2); graph.makeConnection(a2, a3); diff --git a/app/AccuracyImgNet/accimgnet.cpp b/app/AccuracyImgNet/accimgnet.cpp index 87008f2d..9f3fbe84 100644 --- a/app/AccuracyImgNet/accimgnet.cpp +++ b/app/AccuracyImgNet/accimgnet.cpp @@ -118,11 +118,11 @@ void check_accuracy(const std::string& neural_network_path, Graph a1 = open_network(neural_network_path); Tensor input; Tensor output; - InputLayer inlayer; - OutputLayer outlayer; + auto inlayer = std::make_shared(); + auto outlayer = std::make_shared(); // ?? warning from linux - outlayer.setID(1); - inlayer.setID(0); + outlayer->setID(1); + inlayer->setID(0); // size_t k = 5; for (size_t i = 0; i < imgs_size; i++) { diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index b89fdd08..67f5a154 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -5,7 +5,7 @@ using namespace it_lab_ai; Graph build_graph_linear(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - bool comments) { + bool comments) { if (comments) { for (size_t i = 0; i < input.get_shape().dims(); i++) { std::cout << input.get_shape()[i] << ' '; @@ -207,7 +207,7 @@ std::string layerTypeToString(it_lab_ai::LayerType type) { } Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, - const std::string& json_path, bool comments) { + const std::string& json_path, bool comments) { if (comments) { for (size_t i = 0; i < input.get_shape().dims(); i++) { std::cout << input.get_shape()[i] << ' '; @@ -325,7 +325,6 @@ Graph build_graph(it_lab_ai::Tensor& input, it_lab_ai::Tensor& output, graph.setSplitDistribution(split_distribution); auto output_layer = layers.back(); graph.setOutput(output_layer, output); - for (auto& layer : layers) { graph.addOwnedLayer(layer); } diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index 3c3fdc65..c34d9372 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -82,14 +82,11 @@ class LayerFactory { static bool onednn_; public: - static void configure(bool onednn) { - onednn_ = onednn; - } + static void configure(bool onednn) { onednn_ = onednn; } static std::shared_ptr createEwLayer(const std::string& function, float alpha = 1.0f, float beta = 0.0f) { - if (onednn_ && EwLayerOneDnn::is_function_supported(function)) { return std::make_shared(function, alpha, beta); } else { diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index 5a034b0c..7b02fd22 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -13,7 +13,7 @@ int main(int argc, char* argv[]) { bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { - if (std::string(argv[i]) == "--parallel") { // change by Andrey + if (std::string(argv[i]) == "--parallel") { // change by Andrey parallel = true; } else if (std::string(argv[i]) == "--model" && i + 1 < argc) { model_name = argv[++i]; @@ -69,8 +69,12 @@ int main(int argc, char* argv[]) { Graph graph = build_graph_linear(input, output, true); std::cout << "Starting inference..." << std::endl; - graph.inference(); - std::cout << "Inference completed." << std::endl; + try { + graph.inference(); + std::cout << "Inference completed successfully." << std::endl; + } catch (const std::exception& e) { + std::cerr << "ERROR during inference: " << e.what() << std::endl; + } print_time_stats(graph); std::vector tmp_output = softmax(*output.as()); int top_n = std::min(3, static_cast(tmp_output.size())); @@ -106,7 +110,7 @@ int main(int argc, char* argv[]) { std::cout << "Starting inference..." << std::endl; try { graph.inference(); - std::cout << "Inference completed successfully." << std::endl; + std::cout << "Inference completed successfully." << std::endl; } catch (const std::exception& e) { std::cerr << "ERROR during inference: " << e.what() << std::endl; } diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index 75c9c0bd..cdb790ad 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -24,11 +24,11 @@ struct BranchState { class Graph { int BiggestSize_; - int V_; // amount of ids + int V_; // amount of ids std::vector> owned_layers_; std::vector layers_; - std::vector arrayV_; // vertices (id -> vertex number) - std::vector arrayE_; // edges (vertex number -> id) + std::vector arrayV_; // vertices (id -> vertex number) + std::vector arrayE_; // edges (vertex number -> id) std::vector inten_; std::vector outten_; Tensor* outtenres_; @@ -76,7 +76,6 @@ class Graph { void addOwnedLayer(std::shared_ptr layer) { if (!layer) return; - for (const auto& existing_layer : owned_layers_) { if (existing_layer.get() == layer.get()) { return; @@ -116,16 +115,6 @@ class Graph { return *layers_[layerID]; } - void setInput(Layer& lay, Tensor& vec) { - lay.setID(0); - layers_.push_back(&lay); - arrayV_.push_back(0); - inten_ = {vec}; - start_ = lay.getID(); - V_++; - in_edges_.resize(1); - } - void setInput(std::shared_ptr layer, Tensor& vec) { addOwnedLayer(layer); layer->setID(0); @@ -137,40 +126,20 @@ class Graph { in_edges_.resize(1); } - void addSingleLayer(Layer& lay) { - bool layer_exists = false; - for (const auto* layer : layers_) { - if (layer == &lay) { - layer_exists = true; - break; - } - } - - if (!layer_exists) { - lay.setID(V_); - layers_.push_back(&lay); - arrayV_.push_back(static_cast(arrayE_.size())); - - if (V_ >= static_cast(in_edges_.size())) { - in_edges_.resize(V_ + 1); - } - - V_++; - } - } + void addSingleLayer(std::shared_ptr layer) { + addOwnedLayer(layer); - void makeConnection(const Layer& layPrev, Layer& layNext) { bool layer_exists = false; - for (const auto* layer : layers_) { - if (layer == &layNext) { + for (const auto* existing_layer : layers_) { + if (existing_layer == layer.get()) { layer_exists = true; break; } } if (!layer_exists) { - layNext.setID(V_); - layers_.push_back(&layNext); + layer->setID(V_); + layers_.push_back(layer.get()); arrayV_.push_back(static_cast(arrayE_.size())); if (V_ >= static_cast(in_edges_.size())) { @@ -179,28 +148,13 @@ class Graph { V_++; } - - if (layPrev.getID() == layNext.getID()) { - throw std::out_of_range("i=j cant add edge"); - } - - for (int i = layPrev.getID() + 1; i < V_; ++i) { - arrayV_[i]++; - } - arrayE_.insert(arrayE_.begin() + arrayV_[layPrev.getID()], layNext.getID()); - arrayV_[V_] = static_cast(arrayE_.size()); - - if (layNext.getID() >= static_cast(in_edges_.size())) { - in_edges_.resize(layNext.getID() + 1); - } - - in_edges_[layNext.getID()].push_back(layPrev.getID()); } void makeConnection(std::shared_ptr layPrev, std::shared_ptr layNext) { addOwnedLayer(layPrev); addOwnedLayer(layNext); + bool layer_exists = false; for (const auto* layer : layers_) { if (layer == layNext.get()) { @@ -239,10 +193,11 @@ class Graph { in_edges_[layNext->getID()].push_back(layPrev->getID()); } - bool areLayerNext(const Layer& layPrev, const Layer& layNext) { - for (int i = arrayV_[layPrev.getID()]; i < arrayV_[layPrev.getID() + 1]; + bool areLayerNext(std::shared_ptr layPrev, + std::shared_ptr layNext) { + for (int i = arrayV_[layPrev->getID()]; i < arrayV_[layPrev->getID() + 1]; i++) { - if (arrayE_[i] == layNext.getID()) { + if (arrayE_[i] == layNext->getID()) { return true; } } @@ -347,14 +302,6 @@ class Graph { *outtenres_ = outten_[0]; } - void setOutput(const Layer& lay, Tensor& vec) { - end_ = lay.getID(); - outtenres_ = &vec; - std::vector vec1 = {1, 7, 1, 0}; - Tensor start = make_tensor(vec1); - outten_.push_back(start); - } - void setOutput(std::shared_ptr layer, Tensor& vec) { end_ = layer->getID(); outtenres_ = &vec; diff --git a/test/graph/test_graph.cpp b/test/graph/test_graph.cpp index 5aa42890..ce84dcca 100644 --- a/test/graph/test_graph.cpp +++ b/test/graph/test_graph.cpp @@ -19,9 +19,10 @@ TEST(graph, check_connection) { Tensor input = make_tensor({1.0F, 2.0F}, {2}); Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - InputLayer inputLayer; - EWLayer ewLayer; + + auto fcLayer = std::make_shared(weights, bias); + auto inputLayer = std::make_shared(); + auto ewLayer = std::make_shared(); graph.setInput(inputLayer, input); graph.makeConnection(inputLayer, fcLayer); @@ -38,10 +39,11 @@ TEST(graph, check_connection1) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - InputLayer inputLayer; - EWLayer ewLayer; - FCLayer fcLayer2(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto inputLayer = std::make_shared(); + auto ewLayer = std::make_shared(); + auto fcLayer2 = std::make_shared(weights, bias); graph.setInput(inputLayer, input); graph.makeConnection(inputLayer, fcLayer); @@ -60,10 +62,11 @@ TEST(graph, check_connection_when_not_connection) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - InputLayer inputLayer; - EWLayer ewLayer; - FCLayer fcLayer2(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto inputLayer = std::make_shared(); + auto ewLayer = std::make_shared(); + auto fcLayer2 = std::make_shared(weights, bias); graph.setInput(inputLayer, input); graph.makeConnection(inputLayer, fcLayer); @@ -85,10 +88,11 @@ TEST(graph, check_connection_when_not_connection1) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -107,10 +111,11 @@ TEST(graph, check_connection_when_not_connection2) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -129,10 +134,11 @@ TEST(graph, vertex_out_of_range) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -150,10 +156,11 @@ TEST(graph, edges_out_of_range) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -171,10 +178,11 @@ TEST(graph, inputs_out_of_range) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -192,10 +200,11 @@ TEST(graph, get_layer_out_of_range) { Tensor output; Graph graph(5); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -214,10 +223,11 @@ TEST(graph_transformations, check_subgraphs_search) { Graph graph(5); Graph subgraph(2); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -241,11 +251,11 @@ TEST(graph_transformations, check_subgraphs_search1) { Graph graph(5); Graph subgraph(2); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); - EWLayer ewLayer5("relu"); + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); + auto ewLayer5 = std::make_shared("relu"); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -270,10 +280,10 @@ TEST(graph_transformations, check_subgraphs_search2) { Graph graph(5); Graph subgraph(2); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -300,10 +310,10 @@ TEST(graph_transformations, check_subgraphs_search3) { Graph graph(5); Graph subgraph(2); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -330,10 +340,10 @@ TEST(graph_transformations, check_subgraphs_search4) { Graph graph(5); Graph subgraph(2); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -360,11 +370,11 @@ TEST(graph_transformations, check_subgraphs_search5) { Graph graph(5); Graph subgraph(2); - FCLayer fcLayer(weights, bias); - FCLayer fcLayer2(weights, bias); - FCLayer fcLayer3(weights, bias); - FCLayer fcLayer4(weights, bias); - EWLayer ewLayer5("relu"); + auto fcLayer = std::make_shared(weights, bias); + auto fcLayer2 = std::make_shared(weights, bias); + auto fcLayer3 = std::make_shared(weights, bias); + auto fcLayer4 = std::make_shared(weights, bias); + auto ewLayer5 = std::make_shared("relu"); graph.setInput(fcLayer, input); graph.makeConnection(fcLayer, fcLayer2); @@ -392,6 +402,7 @@ TEST(graph_transformations, check_subgraphs_big_random) { Tensor output; Graph graph(num_vertices); Graph subgraph(3); + std::vector> layers; for (int i = 0; i < num_vertices / 2; i++) { layers.push_back(std::make_shared(weights, bias)); @@ -399,26 +410,27 @@ TEST(graph_transformations, check_subgraphs_big_random) { for (int i = 0; i < num_vertices / 2; i++) { layers.push_back(std::make_shared("relu")); } - graph.setInput(*layers[0], input); + + graph.setInput(layers[0], input); for (int i = 0; i < num_vertices; i++) { int rFirst = rand() % (num_vertices - 1); int rSecond = 1 + rand() % (num_vertices - 1); if ((rFirst == rSecond) || - (((*layers[rFirst]).getID() == (*layers[rSecond]).getID()) && - ((*layers[rFirst]).getID() != 0))) { + ((layers[rFirst]->getID() == layers[rSecond]->getID()) && + (layers[rFirst]->getID() != 0))) { continue; } - if (((*layers[rFirst]).getID() >= graph.getLayersCount()) || - (rFirst != 0 && (*layers[rFirst]).getID() == 0)) { - graph.addSingleLayer(*layers[rFirst]); + if ((layers[rFirst]->getID() >= graph.getLayersCount()) || + (rFirst != 0 && layers[rFirst]->getID() == 0)) { + graph.addSingleLayer(layers[rFirst]); } - graph.makeConnection(*layers[rFirst], *layers[rSecond]); + graph.makeConnection(layers[rFirst], layers[rSecond]); } - graph.setOutput(*layers[num_vertices - 1], output); + graph.setOutput(layers[num_vertices - 1], output); - subgraph.setInput(*layers[0], input); - subgraph.makeConnection(*layers[0], *layers[50]); - subgraph.makeConnection(*layers[50], *layers[1]); + subgraph.setInput(layers[0], input); + subgraph.makeConnection(layers[0], layers[50]); + subgraph.makeConnection(layers[50], layers[1]); std::vector> res1 = find_subgraphs(graph, subgraph); double res1_time = diff --git a/test/inference/test_inference.cpp b/test/inference/test_inference.cpp index a297b220..db2f3269 100644 --- a/test/inference/test_inference.cpp +++ b/test/inference/test_inference.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -26,35 +27,36 @@ TEST(bfs, check_struct_graph) { } Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); + + // shared_ptr + auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 1, kernel); + auto a2 = std::make_shared(1, 0, 1, kernel); - ConvolutionalLayer a3_1(1, 0, 1, kernel); - EWLayer a3_1_1("relu"); - ConvolutionalLayer a3_2(1, 0, 1, kernel); - EWLayer a3_2_1("relu"); + auto a3_1 = std::make_shared(1, 0, 1, kernel); + auto a3_1_1 = std::make_shared("relu"); + auto a3_2 = std::make_shared(1, 0, 1, kernel); + auto a3_2_1 = std::make_shared("relu"); - ConcatLayer a4(0); - EWLayer a5("relu"); + auto a4 = std::make_shared(0); + auto a5 = std::make_shared("relu"); - EWLayer a6_1("relu"); - EWLayer a6_2("relu"); + auto a6_1 = std::make_shared("relu"); + auto a6_2 = std::make_shared("relu"); - ConcatLayer a7(0); - // EWLayer a8("relu"); - SplitLayer a8(1, 3); + auto a7 = std::make_shared(0); + auto a8 = std::make_shared(1, 3); - EWLayer a9_1("relu"); - EWLayer a9_2("relu"); - EWLayer a9_3("relu"); + auto a9_1 = std::make_shared("relu"); + auto a9_2 = std::make_shared("relu"); + auto a9_3 = std::make_shared("relu"); - ConcatLayer a10(0); - EWLayer a11_1("relu"); + auto a10 = std::make_shared(0); + auto a11_1 = std::make_shared("relu"); - ConcatLayer a12(0); + auto a12 = std::make_shared(0); graph.setInput(a1, input); graph.makeConnection(a1, a2); @@ -101,22 +103,18 @@ TEST(bfs, check_struct_graph_not_used_yolo) { std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - // EWLayer a2("relu"); //split 1 , 4 - SplitLayer a2(1, 4); - - EWLayer a3_1("relu"); - EWLayer a3_1_1("relu"); - - ConcatLayer a3_2(0); - EWLayer a3_2_1("relu"); - - EWLayer a3_3("relu"); - ConcatLayer a3_3_1(0); - EWLayer a3_3_2("relu"); - EWLayer a3_3_3("relu"); - EWLayer a3_3_4("relu"); - ConcatLayer a4(0); + auto a2 = std::make_shared(1, 4); + auto a3_1 = std::make_shared("relu"); + auto a3_1_1 = std::make_shared("relu"); + auto a3_2 = std::make_shared(0); + auto a3_2_1 = std::make_shared("relu"); + auto a3_3 = std::make_shared("relu"); + auto a3_3_1 = std::make_shared(0); + auto a3_3_2 = std::make_shared("relu"); + auto a3_3_3 = std::make_shared("relu"); + auto a3_3_4 = std::make_shared("relu"); + auto a4 = std::make_shared(0); graph.setInput(a2, input); graph.makeConnection(a2, a3_1); @@ -151,19 +149,16 @@ TEST(bfs, check_struct_graph_resnet1) { Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - SplitLayer a2(1, 2); - - EWLayer a2_1("relu"); - EWLayer a2_1_1("relu"); - - EWLayer a2_1_1_1("relu"); - EWLayer a2_1_1_2("relu"); - - BinaryOpLayer a2_1_2(BinaryOpLayer::Operation::kMul); - EWLayer a2_1_3("relu"); - EWLayer a2_2("relu"); - BinaryOpLayer a3(BinaryOpLayer::Operation::kAdd); - EWLayer a4("relu"); + auto a2 = std::make_shared(1, 2); + auto a2_1 = std::make_shared("relu"); + auto a2_1_1 = std::make_shared("relu"); + auto a2_1_1_1 = std::make_shared("relu"); + auto a2_1_1_2 = std::make_shared("relu"); + auto a2_1_2 = std::make_shared(BinaryOpLayer::Operation::kMul); + auto a2_1_3 = std::make_shared("relu"); + auto a2_2 = std::make_shared("relu"); + auto a3 = std::make_shared(BinaryOpLayer::Operation::kAdd); + auto a4 = std::make_shared("relu"); graph.setInput(a2, input); graph.makeConnection(a2, a2_1); @@ -195,18 +190,15 @@ TEST(bfs, check_struct_graph_resnet2) { Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - SplitLayer a2(1, 2); - - EWLayer a2_1("relu"); - EWLayer a2_1_1("relu"); - - EWLayer a2_1_1_1("relu"); - EWLayer a2_1_1_2("relu"); - - BinaryOpLayer a2_1_2(BinaryOpLayer::Operation::kMul); - EWLayer a2_1_3("relu"); - BinaryOpLayer a3(BinaryOpLayer::Operation::kAdd); - EWLayer a4("relu"); + auto a2 = std::make_shared(1, 2); + auto a2_1 = std::make_shared("relu"); + auto a2_1_1 = std::make_shared("relu"); + auto a2_1_1_1 = std::make_shared("relu"); + auto a2_1_1_2 = std::make_shared("relu"); + auto a2_1_2 = std::make_shared(BinaryOpLayer::Operation::kMul); + auto a2_1_3 = std::make_shared("relu"); + auto a3 = std::make_shared(BinaryOpLayer::Operation::kAdd); + auto a4 = std::make_shared("relu"); graph.setInput(a2, input); graph.makeConnection(a2, a2_1); @@ -237,17 +229,14 @@ TEST(bfs, check_struct_graph_google1) { Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - EWLayer a2("relu"); - - EWLayer a2_1("relu"); - EWLayer a2_2("relu"); - EWLayer a2_3("relu"); - EWLayer a2_4("relu"); - - EWLayer a2_2_1("linear", 2.0F, 3.0F); - EWLayer a2_3_1("linear", 2.0F, 3.0F); - - ConcatLayer a3(0); + auto a2 = std::make_shared("relu"); + auto a2_1 = std::make_shared("relu"); + auto a2_2 = std::make_shared("relu"); + auto a2_3 = std::make_shared("relu"); + auto a2_4 = std::make_shared("relu"); + auto a2_2_1 = std::make_shared("linear", 2.0F, 3.0F); + auto a2_3_1 = std::make_shared("linear", 2.0F, 3.0F); + auto a3 = std::make_shared(0); graph.setInput(a2, input); graph.makeConnection(a2, a2_1); @@ -280,13 +269,15 @@ TEST(bfs, check_result_vec) { } Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); - InputLayer a3(kNhwc, kNhwc, 1, 1); + + auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); + auto a3 = std::make_shared(kNhwc, kNhwc, 1, 1); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 1, kernel); - ConvolutionalLayer a4(1, 0, 1, kernel); + auto a2 = std::make_shared(1, 0, 1, kernel); + auto a4 = std::make_shared(1, 0, 1, kernel); + graph.setInput(a1, input); graph.makeConnection(a1, a2); graph.makeConnection(a2, a4); @@ -351,7 +342,8 @@ TEST(bfs, check_end_to_end) { } Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); + + auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec; kernelvec.reserve(3 * 3 * 3 * 3); for (int i = 0; i < 81; ++i) { @@ -359,12 +351,13 @@ TEST(bfs, check_end_to_end) { } Shape sh2({3, 3, 3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 1, kernel); + auto a2 = std::make_shared(1, 0, 1, kernel); Shape poolshape = {2, 2}; - EWLayer a3("linear", 2.0F, 3.0F); - PoolingLayer a4(poolshape, "average"); - FCLayer a6; - OutputLayer a5; + auto a3 = std::make_shared("linear", 2.0F, 3.0F); + auto a4 = std::make_shared(poolshape, "average"); + auto a6 = std::make_shared(); + auto a5 = std::make_shared(); + graph.setInput(a1, input); graph.makeConnection(a1, a2); graph.makeConnection(a2, a3); @@ -373,41 +366,13 @@ TEST(bfs, check_end_to_end) { graph.setOutput(a5, output); graph.inference(); -#ifdef ENABLE_STATISTIC_WEIGHTS - std::vector weights = graph.getWEIGHTS(); - for (size_t i = 0; i < weights.size(); i++) { - switch (weights[i].get_type()) { - case Type::kInt: { - std::vector ten = *weights[i].as(); - for (size_t j = 0; j < ten.size(); j++) { - std::cout << ten[j] << ' '; - } - std::cout << '\n'; - break; - } - case Type::kFloat: { - std::vector ten = *weights[i].as(); - for (size_t j = 0; j < ten.size(); j++) { - std::cout << ten[j] << ' '; - } - std::cout << '\n'; - break; - } - case Type::kUnknown: - default: { - throw std::runtime_error("Unknown tensor type encountered"); - break; - } - } - } -#endif - std::vector tmp = *output.as(); ASSERT_GT(tmp.size(), 0); for (size_t i = 0; i < tmp.size(); ++i) { ASSERT_GE(tmp[i], 0); } } + TEST(bfs, check_struct_layer) { Graph graph(5); Shape sh1({1, 5, 5, 3}); @@ -418,16 +383,14 @@ TEST(bfs, check_struct_layer) { } Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); + + auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 1, kernel); - ConvolutionalLayer a3(1, 0, 1, kernel); + auto a2 = std::make_shared(1, 0, 1, kernel); + auto a3 = std::make_shared(1, 0, 1, kernel); - // EWLayer a4("linear", 2.0F, 3.0F); - // a2.ewops.layers.push_back(&a4); - // a2.ewops.countlayers++; graph.setInput(a1, input); graph.makeConnection(a1, a2); graph.makeConnection(a2, a3); @@ -437,6 +400,7 @@ TEST(bfs, check_struct_layer) { std::vector res = {81, 81, 81}; ASSERT_EQ(tmp, res); } + TEST(bfs, check_struct_layer_added) { Graph graph(5); Shape sh1({1, 5, 5, 3}); @@ -447,16 +411,16 @@ TEST(bfs, check_struct_layer_added) { } Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); + + auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 1, kernel); - ConvolutionalLayer a3(1, 0, 1, kernel); - - EWLayer a4("linear", 2.0F, 3.0F); - a2.postops.layers.push_back(&a4); - a2.postops.count++; + auto a2 = std::make_shared(1, 0, 1, kernel); + auto a3 = std::make_shared(1, 0, 1, kernel); + auto a4 = std::make_shared("linear", 2.0F, 3.0F); + a2->postops.layers.push_back(a4.get()); + a2->postops.count++; graph.setInput(a1, input); graph.makeConnection(a1, a2); @@ -480,35 +444,28 @@ FLAKY_TEST(bfs, check_struct_graph_split) { } Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - InputLayer a1(kNhwc, kNchw, 1, 2); + + auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); Tensor kernel = make_tensor(kernelvec, sh2); - ConvolutionalLayer a2(1, 0, 1, kernel); - - ConvolutionalLayer a3_1(1, 0, 1, kernel); - EWLayer a3_1_1("relu"); - ConvolutionalLayer a3_2(1, 0, 1, kernel); - EWLayer a3_2_1("relu"); - - ConcatLayer a4(0); - EWLayer a5("relu"); - - EWLayer a6_1("relu"); - EWLayer a6_2("relu"); - - ConcatLayer a7(0); - // EWLayer a8("relu"); - SplitLayer a8(1, 3); - - EWLayer a9_1("relu"); - EWLayer a9_2("relu"); - EWLayer a9_3("relu"); - - ConcatLayer a10(0); - EWLayer a11_1("relu"); - - ConcatLayer a12(0); + auto a2 = std::make_shared(1, 0, 1, kernel); + auto a3_1 = std::make_shared(1, 0, 1, kernel); + auto a3_1_1 = std::make_shared("relu"); + auto a3_2 = std::make_shared(1, 0, 1, kernel); + auto a3_2_1 = std::make_shared("relu"); + auto a4 = std::make_shared(0); + auto a5 = std::make_shared("relu"); + auto a6_1 = std::make_shared("relu"); + auto a6_2 = std::make_shared("relu"); + auto a7 = std::make_shared(0); + auto a8 = std::make_shared(1, 3); + auto a9_1 = std::make_shared("relu"); + auto a9_2 = std::make_shared("relu"); + auto a9_3 = std::make_shared("relu"); + auto a10 = std::make_shared(0); + auto a11_1 = std::make_shared("relu"); + auto a12 = std::make_shared(0); graph.setInput(a1, input); graph.makeConnection(a1, a2); From 6ca9d47ad9cf8f45ceec6059f2ac3c235d654102 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 15:31:36 +0300 Subject: [PATCH 07/13] utf --- test/inference/test_inference.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/inference/test_inference.cpp b/test/inference/test_inference.cpp index db2f3269..2bbd1f44 100644 --- a/test/inference/test_inference.cpp +++ b/test/inference/test_inference.cpp @@ -28,7 +28,7 @@ TEST(bfs, check_struct_graph) { Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - // shared_ptr + // Используем shared_ptr для всех слоев auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3}); From fecdb930bfeb9e24a5d3a696e7f6adc1b8da2523 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 15:44:17 +0300 Subject: [PATCH 08/13] unused --- app/Graph/acc_check.cpp | 6 ++---- app/Graph/build.cpp | 2 ++ app/Graph/graph_build.cpp | 6 ++---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 415be5bc..3ee2d69b 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -13,12 +13,10 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; - bool parallel = false; + //bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { - if (std::string(argv[i]) == "--parallel") { - parallel = true; - } else if (std::string(argv[i]) == "--model" && i + 1 < argc) { + if (std::string(argv[i]) == "--model" && i + 1 < argc) { model_name = argv[++i]; } else if (std::string(argv[i]) == "--onednn") { onednn = true; diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index 67f5a154..25e92bb3 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -1219,5 +1219,7 @@ void print_time_stats(Graph& graph) { int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); std::cout << "Elapsed inference time:" << sum << std::endl; std::cout << "!INFERENCE TIME INFO END!" << std::endl; +#else + (void)graph; #endif } \ No newline at end of file diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index 7b02fd22..d4eb2602 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -10,12 +10,10 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; - bool parallel = false; + //bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { - if (std::string(argv[i]) == "--parallel") { // change by Andrey - parallel = true; - } else if (std::string(argv[i]) == "--model" && i + 1 < argc) { + if (std::string(argv[i]) == "--model" && i + 1 < argc) { model_name = argv[++i]; } else if (std::string(argv[i]) == "--onednn") { onednn = true; From 3dc2d325872937c4ff1176478aeef17d3d9c0376 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 15:51:41 +0300 Subject: [PATCH 09/13] clang --- app/Graph/acc_check.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 3ee2d69b..c3488e9f 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -13,7 +13,7 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; - //bool parallel = false; + // bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { From 7acdc57428acaea43dc07d4eb476a5f7045f8557 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 15:53:51 +0300 Subject: [PATCH 10/13] clang --- app/Graph/graph_build.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index d4eb2602..6adda6cc 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -10,7 +10,7 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; - //bool parallel = false; + // bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { From 41e6c50ee4cc696d88e7d530f5eae3059d94ae47 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 16:21:49 +0300 Subject: [PATCH 11/13] F & --- app/Graph/build.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index c34d9372..72fdb9c8 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -85,8 +85,8 @@ class LayerFactory { static void configure(bool onednn) { onednn_ = onednn; } static std::shared_ptr createEwLayer(const std::string& function, - float alpha = 1.0f, - float beta = 0.0f) { + float alpha = 1.0F, + float beta = 0.0F) { if (onednn_ && EwLayerOneDnn::is_function_supported(function)) { return std::make_shared(function, alpha, beta); } else { From 4614041c22616a25e1e6d209cf66d13d71d73a23 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 8 Nov 2025 16:38:22 +0300 Subject: [PATCH 12/13] fix --- app/Graph/build.hpp | 3 +-- include/graph/graph.hpp | 16 ++++++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index 72fdb9c8..7d557ac8 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -89,9 +89,8 @@ class LayerFactory { float beta = 0.0F) { if (onednn_ && EwLayerOneDnn::is_function_supported(function)) { return std::make_shared(function, alpha, beta); - } else { - return std::make_shared(function, alpha, beta); } + return std::make_shared(function, alpha, beta); } }; diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index cdb790ad..01ad46d3 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -74,7 +74,7 @@ class Graph { split_distribution_ = split_dist; } - void addOwnedLayer(std::shared_ptr layer) { + void addOwnedLayer(const std::shared_ptr& layer) { if (!layer) return; for (const auto& existing_layer : owned_layers_) { if (existing_layer.get() == layer.get()) { @@ -115,7 +115,7 @@ class Graph { return *layers_[layerID]; } - void setInput(std::shared_ptr layer, Tensor& vec) { + void setInput(const std::shared_ptr& layer, Tensor& vec) { addOwnedLayer(layer); layer->setID(0); layers_.push_back(layer.get()); @@ -126,7 +126,7 @@ class Graph { in_edges_.resize(1); } - void addSingleLayer(std::shared_ptr layer) { + void addSingleLayer(const std::shared_ptr& layer) { addOwnedLayer(layer); bool layer_exists = false; @@ -150,8 +150,8 @@ class Graph { } } - void makeConnection(std::shared_ptr layPrev, - std::shared_ptr layNext) { + void makeConnection(const std::shared_ptr& layPrev, + const std::shared_ptr& layNext) { addOwnedLayer(layPrev); addOwnedLayer(layNext); @@ -193,8 +193,8 @@ class Graph { in_edges_[layNext->getID()].push_back(layPrev->getID()); } - bool areLayerNext(std::shared_ptr layPrev, - std::shared_ptr layNext) { + bool areLayerNext(const std::shared_ptr& layPrev, + const std::shared_ptr& layNext) { for (int i = arrayV_[layPrev->getID()]; i < arrayV_[layPrev->getID() + 1]; i++) { if (arrayE_[i] == layNext->getID()) { @@ -302,7 +302,7 @@ class Graph { *outtenres_ = outten_[0]; } - void setOutput(std::shared_ptr layer, Tensor& vec) { + void setOutput(const std::shared_ptr& layer, Tensor& vec) { end_ = layer->getID(); outtenres_ = &vec; std::vector vec1 = {1, 7, 1, 0}; From 51dc9479722346a04ba94d7904770dcf7eb70ccd Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sun, 9 Nov 2025 16:53:52 +0300 Subject: [PATCH 13/13] // --- app/Graph/acc_check.cpp | 1 - app/Graph/graph_build.cpp | 1 - include/graph/graph.hpp | 2 +- test/inference/test_inference.cpp | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index c3488e9f..e367d516 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -13,7 +13,6 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; - // bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index 6adda6cc..28fffa47 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -10,7 +10,6 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; - // bool parallel = false; bool onednn = false; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index 01ad46d3..9dfb567e 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -409,4 +409,4 @@ class Graph { return traversal; } }; -} // namespace it_lab_ai \ No newline at end of file +} // namespace it_lab_ai diff --git a/test/inference/test_inference.cpp b/test/inference/test_inference.cpp index 2bbd1f44..4d6d6569 100644 --- a/test/inference/test_inference.cpp +++ b/test/inference/test_inference.cpp @@ -28,7 +28,6 @@ TEST(bfs, check_struct_graph) { Tensor input = make_tensor(vec, sh1); Tensor output = make_tensor(vec, sh1); - // Используем shared_ptr для всех слоев auto a1 = std::make_shared(kNhwc, kNchw, 1, 2); std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; Shape sh2({3, 3});