diff --git a/app/Graph/CMakeLists.txt b/app/Graph/CMakeLists.txt index 69e934d3..64fbdfaa 100644 --- a/app/Graph/CMakeLists.txt +++ b/app/Graph/CMakeLists.txt @@ -22,6 +22,10 @@ target_link_libraries(Graph_Build BuildGraph) add_executable(ACC acc_check.cpp) target_link_libraries(ACC BuildGraph) +add_executable(onnx_subs onnx_subs.cpp) +target_link_libraries(onnx_subs BuildGraph) +target_link_libraries(onnx_subs graphT_lib) + if (WIN32) add_custom_command(TARGET Graph_Build POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_directory diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index a1cd4771..8439134e 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -19,20 +19,6 @@ void build_graph_linear(it_lab_ai::Graph& graph, it_lab_ai::Tensor& input, for (size_t i = 0; i < input.get_shape().dims(); i++) { std::cout << input.get_shape()[i] << ' '; } - std::cout << std::endl; - if (input.get_shape().dims() == 4) { - for (size_t n = 0; n < input.get_shape()[0]; n++) { - for (size_t h = 0; h < input.get_shape()[2]; h++) { - for (size_t w = 0; w < input.get_shape()[3]; w++) { - for (size_t c = 0; c < input.get_shape()[1]; c++) { - std::cout << input.get({n, c, h, w}) << ' '; - } - } - std::cerr << std::endl; - } - } - std::cout << std::endl << std::endl; - } } std::vector> layers; std::vector layer_ptrs; @@ -79,10 +65,13 @@ void build_graph_linear(it_lab_ai::Graph& graph, it_lab_ai::Tensor& input, std::cout << std::endl; } - it_lab_ai::Tensor tmp_values = tensor; - it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); + std::shared_ptr tmp_values = + std::make_shared(tensor); + std::shared_ptr tmp_bias = + std::make_shared( + it_lab_ai::make_tensor(tensor.get_bias())); auto conv_layer = std::make_unique( - 1, pads, 1, tmp_values, tmp_bias, kDefault, 1, true); + 1, pads, 1, *tmp_values, *tmp_bias, kDefault, 1, true); layer_ptrs.push_back(conv_layer.get()); layers.push_back(std::move(conv_layer)); layerpostop.push_back(false); @@ -97,8 +86,13 @@ void build_graph_linear(it_lab_ai::Graph& graph, it_lab_ai::Tensor& input, std::cout << "Element wise (relu) added to layers" << std::endl; } if (layer_type.find("Dense") != std::string::npos) { - it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); - auto fc_layer = std::make_unique(tensor, tmp_bias); + std::shared_ptr tmp_tensor = + std::make_shared(tensor); + std::shared_ptr tmp_bias = + std::make_shared( + it_lab_ai::make_tensor(tensor.get_bias())); + auto fc_layer = + std::make_unique(*tmp_tensor, *tmp_bias); layer_ptrs.push_back(fc_layer.get()); layers.push_back(std::move(fc_layer)); layerpostop.push_back(false); @@ -171,11 +165,6 @@ void build_graph_linear(it_lab_ai::Graph& graph, it_lab_ai::Tensor& input, } graph.setOutput(layer_ptrs.back(), output); - - graph.addOwnedLayer(std::move(a1)); - for (auto& layer : layers) { - graph.addOwnedLayer(std::move(layer)); - } } std::string get_base_layer_name(const std::string& tensor_name) { @@ -323,10 +312,6 @@ void build_graph(it_lab_ai::Graph& graph, it_lab_ai::Tensor& input, auto* output_layer = layers.back().get(); graph.setOutput(output_layer, output); } - - for (auto& layer : layers) { - graph.addOwnedLayer(std::move(layer)); - } } ParseResult parse_json_model(const std::string& json_path, bool comments) { @@ -436,11 +421,14 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { } } - it_lab_ai::Tensor tmp_tensor = tensor; - it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); + std::shared_ptr tmp_tensor = + std::make_shared(tensor); + std::shared_ptr tmp_bias = + std::make_shared( + it_lab_ai::make_tensor(tensor.get_bias())); auto conv_layer = std::make_unique( - stride, pads, dilations, tmp_tensor, tmp_bias, kDefault, group); + stride, pads, dilations, *tmp_tensor, *tmp_bias, kDefault, group); layer = std::move(conv_layer); } else if (layer_type.find("Relu") != std::string::npos || layer_type.find("relu") != std::string::npos) { @@ -452,19 +440,23 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { it_lab_ai::Tensor tensor = it_lab_ai::create_tensor_from_json( layer_data, it_lab_ai::Type::kFloat); - it_lab_ai::Tensor tmp_tensor = it_lab_ai::Tensor( - it_lab_ai::Shape({tensor.get_shape()[1], tensor.get_shape()[0]}), - it_lab_ai::Type::kFloat); + std::shared_ptr tmp_tensor = + std::make_shared( + it_lab_ai::Shape( + {tensor.get_shape()[1], tensor.get_shape()[0]}), + it_lab_ai::Type::kFloat); for (size_t h = 0; h < tensor.get_shape()[0]; h++) { for (size_t w = 0; w < tensor.get_shape()[1]; w++) { - tmp_tensor.set({w, h}, tensor.get({h, w})); + tmp_tensor->set({w, h}, tensor.get({h, w})); } } - it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); + std::shared_ptr tmp_bias = + std::make_shared( + it_lab_ai::make_tensor(tensor.get_bias())); auto fc_layer = - std::make_unique(tmp_tensor, tmp_bias); + std::make_unique(*tmp_tensor, *tmp_bias); layer = std::move(fc_layer); } else if (layer_type.find("Dropout") != std::string::npos) { auto dropout_layer = std::make_unique(0.0); @@ -710,8 +702,11 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { trans_b = layer_data["transB"].get() != 0; } - it_lab_ai::Tensor tmp_tensor = tensor; - it_lab_ai::Tensor tmp_bias = it_lab_ai::make_tensor(tensor.get_bias()); + std::shared_ptr tmp_tensor = + std::make_shared(tensor); + std::shared_ptr tmp_bias = + std::make_shared( + it_lab_ai::make_tensor(tensor.get_bias())); if (trans_b) { it_lab_ai::Shape transposed_shape( {tensor.get_shape()[1], tensor.get_shape()[0]}); @@ -725,7 +720,7 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { } } - tmp_tensor = transposed_tensor; + *tmp_tensor = transposed_tensor; if (comments) { std::cout << "Weights transposed from [" << tensor.get_shape()[0] @@ -736,23 +731,23 @@ ParseResult parse_json_model(const std::string& json_path, bool comments) { } if (alpha != 1.0F) { - auto weights_data = *tmp_tensor.as(); + auto weights_data = *tmp_tensor->as(); for (auto& val : weights_data) { val *= alpha; } - tmp_tensor = make_tensor(weights_data, tmp_tensor.get_shape()); + *tmp_tensor = make_tensor(weights_data, tmp_tensor->get_shape()); } if (beta != 1.0F) { - auto bias_data = *tmp_bias.as(); + auto bias_data = *tmp_bias->as(); for (auto& val : bias_data) { val *= beta; } - tmp_bias = make_tensor(bias_data, tmp_bias.get_shape()); + *tmp_bias = make_tensor(bias_data, tmp_bias->get_shape()); } auto fc_layer = - std::make_unique(tmp_tensor, tmp_bias); + std::make_unique(*tmp_tensor, *tmp_bias); layer = std::move(fc_layer); } else if (layer_type == "Transpose" || layer_type.find("transpose") != std::string::npos) { diff --git a/app/Graph/onnx_subs.cpp b/app/Graph/onnx_subs.cpp new file mode 100644 index 00000000..deae21c2 --- /dev/null +++ b/app/Graph/onnx_subs.cpp @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include +#include + +#include "graph_transformations/graph_transformations.hpp" +#include "perf/benchmarking.hpp" +#include "build.cpp" +#include "build.hpp" + +using namespace it_lab_ai; + +int main() { + Tensor aaaa = make_tensor(std::vector({0})); + Graph graph1; + build_graph(graph1, aaaa, aaaa, MODEL_PATH_DENSENET_ONNX, false); + + Graph subgraph; + Tensor scale = make_tensor(std::vector({1.0})); + std::unique_ptr layer_0 = + std::make_unique(scale, scale, scale, scale); + std::unique_ptr layer_1 = std::make_unique("relu"); + std::unique_ptr layer_2 = std::make_unique(); + std::unique_ptr layer_3 = std::make_unique("relu"); + std::unique_ptr layer_4 = std::make_unique(); + + Layer* layer_0_ptr = layer_0.get(); + Layer* layer_1_ptr = layer_1.get(); + Layer* layer_2_ptr = layer_2.get(); + Layer* layer_3_ptr = layer_3.get(); + Layer* layer_4_ptr = layer_4.get(); + + subgraph.setInput(layer_0_ptr, aaaa); + subgraph.makeConnection(layer_0_ptr, layer_1_ptr); + subgraph.makeConnection(layer_1_ptr, layer_2_ptr); + subgraph.makeConnection(layer_2_ptr, layer_3_ptr); + subgraph.makeConnection(layer_3_ptr, layer_4_ptr); + + Graph subgraph2; + std::unique_ptr layer_5 = std::make_unique(); + std::unique_ptr layer_6 = + std::make_unique(Shape({1, 1, 1}), "max"); + std::unique_ptr layer_7 = std::make_unique(); + + Layer* layer_5_ptr = layer_5.get(); + Layer* layer_6_ptr = layer_6.get(); + Layer* layer_7_ptr = layer_7.get(); + + subgraph2.setInput(layer_6_ptr, aaaa); + subgraph2.makeConnection(layer_6_ptr, layer_5_ptr); + subgraph2.addSingleLayer(layer_7_ptr); + subgraph2.makeConnection(layer_7_ptr, layer_5_ptr); + + std::vector> vec = find_subgraphs(graph1, subgraph); + std::vector> vec2 = find_subgraphs(graph1, subgraph2); + auto time = elapsed_time_avg(10, find_subgraphs, graph1, + subgraph); + auto time2 = elapsed_time_avg(10, find_subgraphs, graph1, + subgraph2); + for (int i = 0; i < vec.size(); i++) { + for (int j = 0; j < vec[i].size(); j++) { + std::cerr << vec[i][j] << ' '; + } + std::cerr << '\n'; + } + std::cerr << "Time for path5:" << time << std::endl; + + for (int i = 0; i < vec2.size(); i++) { + for (int j = 0; j < vec2[i].size(); j++) { + std::cerr << vec2[i][j] << ' '; + } + std::cerr << '\n'; + } + std::cerr << "Time for concat:" << time2 << std::endl; + return 0; +} diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index ff27b040..1ca7f14c 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -22,11 +22,13 @@ struct BranchState { std::vector> distribution; }; +std::unique_ptr layer_based_unique_copy( + const std::unique_ptr& layer); + class Graph { int BiggestSize_; int V_; // amount of ids - std::vector> owned_layers_; - std::vector layers_; + std::vector> layers_; std::vector arrayV_; // vertices (id -> vertex number) std::vector arrayE_; // edges (vertex number -> id) std::vector inten_; @@ -66,36 +68,19 @@ class Graph { in_edges_.clear(); } - Graph(const Graph&) = delete; + Graph(const Graph&) = default; Graph& operator=(const Graph&) = delete; Graph(Graph&&) noexcept = default; Graph& operator=(Graph&&) noexcept = default; ~Graph() = default; + Graph clone(Tensor& out) const; + void setSplitDistribution( std::vector>> split_dist) { split_distribution_ = std::move(split_dist); } - void addOwnedLayer(std::unique_ptr layer) { - if (!layer) return; - - for (const auto& existing_layer : owned_layers_) { - if (existing_layer.get() == layer.get()) { - return; - } - } - - owned_layers_.push_back(std::move(layer)); - } - - Layer* addLayer(std::unique_ptr layer) { - if (!layer) return nullptr; - Layer* raw_ptr = layer.get(); - addOwnedLayer(std::move(layer)); - return raw_ptr; - } - int getVertexValue(size_t layerID) const { if (layerID >= arrayV_.size()) { throw std::invalid_argument("ArrayV does not contain this ID."); @@ -117,13 +102,20 @@ class Graph { return in_edges_[layerID].size(); } + std::vector getInLayers(size_t layerID) const { + if (layerID >= in_edges_.size()) { + throw std::invalid_argument("Input edges array do not contain this ID."); + } + return in_edges_[layerID]; + } + int getLayersCount() const { return V_; } - const Layer& getLayerFromID(size_t layerID) const { + Layer* getLayerFromID(size_t layerID) const { if (layerID >= layers_.size()) { throw std::invalid_argument("Layers do not contain this ID."); } - return *layers_[layerID]; + return layers_[layerID].get(); } void setInput(Layer* layer, Tensor& vec) { @@ -132,8 +124,8 @@ class Graph { } bool layer_exists = false; - for (const auto* existing_layer : layers_) { - if (existing_layer == layer) { + for (std::unique_ptr& existing_layer : layers_) { + if (existing_layer.get() == layer) { layer_exists = true; break; } @@ -141,7 +133,7 @@ class Graph { if (!layer_exists) { layer->setID(V_); - layers_.push_back(layer); + layers_.push_back(std::unique_ptr(layer)); arrayV_.push_back(static_cast(arrayE_.size())); if (V_ >= static_cast(in_edges_.size())) { @@ -159,8 +151,8 @@ class Graph { if (!layer) return; bool layer_exists = false; - for (const auto* existing_layer : layers_) { - if (existing_layer == layer) { + for (std::unique_ptr& existing_layer : layers_) { + if (existing_layer.get() == layer) { layer_exists = true; break; } @@ -168,7 +160,7 @@ class Graph { if (!layer_exists) { layer->setID(V_); - layers_.push_back(layer); + layers_.push_back(std::unique_ptr(layer)); arrayV_.push_back(static_cast(arrayE_.size())); if (V_ >= static_cast(in_edges_.size())) { @@ -212,9 +204,73 @@ class Graph { in_edges_[layNext->getID()].push_back(layPrev->getID()); } + void removeConnection(int idPrev, int idNext) { + if (idPrev >= V_ || idNext >= V_ || idPrev < 0 || idNext < 0) { + throw std::out_of_range("Layer ID out of range"); + } + auto it = + std::find(in_edges_[idNext].begin(), in_edges_[idNext].end(), idPrev); + if (it == in_edges_[idNext].end()) { + throw std::invalid_argument( + (std::string("No such edge ") + std::to_string(idPrev)) + " " + + std::to_string(idNext)); + } + in_edges_[idNext].erase(it); + auto array_e_it = std::find(arrayE_.begin() + arrayV_[idPrev], + arrayE_.begin() + arrayV_[idPrev + 1], idNext); + if (array_e_it == arrayE_.begin() + arrayV_[idPrev + 1]) { + throw std::invalid_argument( + (std::string("No such edge ") + std::to_string(idPrev)) + " " + + std::to_string(idNext)); + } + arrayE_.erase(array_e_it); + for (size_t i = static_cast(idPrev) + 1; i < arrayV_.size(); ++i) { + arrayV_[i]--; + } + } + void removeSingleLayer(int id) { + if (id >= V_ || id < 0) { + throw std::out_of_range("Layer ID out of range"); + } + // remove inputs + for (int i = 0; i < V_; i++) { + if (arrayV_[i] != arrayV_[i + 1]) { + auto array_e_it = std::find(arrayE_.begin() + arrayV_[i], + arrayE_.begin() + arrayV_[i + 1], id); + if (array_e_it != arrayE_.begin() + arrayV_[i + 1]) { + removeConnection(i, id); + } + } + } + in_edges_.erase(in_edges_.begin() + id); + // remove outputs + arrayE_.erase(arrayE_.begin() + arrayV_[id], + arrayE_.begin() + arrayV_[id + 1]); + int amount_connected = arrayV_[id + 1] - arrayV_[id]; + // remove vertex + arrayV_.erase(arrayV_.begin() + id); + for (size_t i = id; i < arrayV_.size(); i++) { + arrayV_[i] -= amount_connected; + } + for (int& i : arrayE_) { + if (i > id) { + i -= 1; + } + } + for (size_t i = id + 1; i < layers_.size(); i++) { + layers_[i]->setID(layers_[i]->getID() - 1); + } + layers_[id]->setID(-1); + layers_.erase(layers_.begin() + id); + V_--; + } bool areLayerNext(Layer* layPrev, Layer* layNext) { if (!layPrev || !layNext) return false; + if (layPrev->getID() >= V_ || layPrev->getID() < 0) { + throw std::invalid_argument("No such layer in graph"); + } + for (int i = arrayV_[layPrev->getID()]; i < arrayV_[layPrev->getID() + 1]; i++) { if (arrayE_[i] == layNext->getID()) { diff --git a/include/graph_transformations/graph_transformations.hpp b/include/graph_transformations/graph_transformations.hpp index 0ff658a3..5214f36b 100644 --- a/include/graph_transformations/graph_transformations.hpp +++ b/include/graph_transformations/graph_transformations.hpp @@ -2,9 +2,11 @@ #include #include "graph/graph.hpp" +#include "layers/EWLayer.hpp" #include "layers/Layer.hpp" namespace it_lab_ai { +bool layer_conditions(Layer* layer, Layer* layer_sub); std::vector> find_subgraphs(const Graph& graph, const Graph& subgraph); bool has_edge(const Graph& graph, int id_from, int id_to); @@ -13,4 +15,9 @@ bool is_leaf(const Graph& graph, int id); bool run_search(const Graph& graph, const Graph& subgraph, std::vector& assignments, std::vector>& results); + +void change_ids(std::vector>& vec, int id); +bool does_intersect(const std::vector& vec1, const std::vector& vec2); +Graph changed_subgraphs(const Graph& graph, const Graph& subgraph_from, + Tensor& out); } // namespace it_lab_ai diff --git a/include/layers/ConvLayer.hpp b/include/layers/ConvLayer.hpp index 8ce58131..081d0097 100644 --- a/include/layers/ConvLayer.hpp +++ b/include/layers/ConvLayer.hpp @@ -23,8 +23,8 @@ class ConvolutionalLayer : public Layer { size_t stride_; size_t pads_; size_t dilations_; - Tensor kernel_; - Tensor bias_; + std::shared_ptr kernel_; + std::shared_ptr bias_; size_t group_; ImplType implType_; bool useLegacyImpl_; @@ -36,17 +36,15 @@ class ConvolutionalLayer : public Layer { dilations_ = 0; implType_ = kDefault; } - ConvolutionalLayer(size_t step, size_t pads, size_t dilations, - const Tensor& kernel, const Tensor& bias = Tensor(), + ConvolutionalLayer(size_t step, size_t pads, size_t dilations, Tensor& kernel, + Tensor& bias = *std::make_shared(), ImplType implType = kDefault, size_t group = 1, bool useLegacyImpl = false) - : Layer(kConvolution) { + : Layer(kConvolution), kernel_(&kernel), bias_(&bias) { stride_ = step; pads_ = pads; group_ = group; dilations_ = dilations; - kernel_ = kernel; - bias_ = bias; implType_ = implType; useLegacyImpl_ = useLegacyImpl; } @@ -127,7 +125,7 @@ class ConvImpl : public LayerImpl { } auto kercol = static_cast(coloms / input_width_ + 1); color += - matrix[(i + coloms + str) * input_flow_ + x] * + matrix.at((i + coloms + str) * input_flow_ + x) * kernel[kercol * kernel_size + static_cast(str + 1)]; } } @@ -152,7 +150,7 @@ class ConvImpl : public LayerImpl { // NCHW -> NCHW only template -void Conv4D(const Tensor& input, const Tensor& kernel_, const Tensor& bias_, +void Conv4D(const Tensor& input, Tensor& kernel_, Tensor& bias_, Tensor& output, size_t stride_, size_t pads_, size_t group_, size_t dilations_) { size_t batch_size = input.get_shape()[0]; @@ -284,7 +282,7 @@ void Conv4D(const Tensor& input, const Tensor& kernel_, const Tensor& bias_, // NCHW -> NCHW only template -void Conv4DSTL(const Tensor& input, const Tensor& kernel_, const Tensor& bias_, +void Conv4DSTL(const Tensor& input, Tensor& kernel_, Tensor& bias_, Tensor& output, size_t stride_, size_t pads_, size_t group_, size_t dilations_) { size_t batch_size = input.get_shape()[0]; @@ -455,9 +453,9 @@ void Conv4DSTL(const Tensor& input, const Tensor& kernel_, const Tensor& bias_, } template -void DepthwiseConv4D(const Tensor& input, const Tensor& kernel_, - const Tensor& bias_, Tensor& output, size_t stride_, - size_t pads_, size_t dilations_) { +void DepthwiseConv4D(const Tensor& input, Tensor& kernel_, Tensor& bias_, + Tensor& output, size_t stride_, size_t pads_, + size_t dilations_) { size_t batch_size = input.get_shape()[0]; size_t channels = input.get_shape()[1]; size_t in_height = input.get_shape()[2]; @@ -515,9 +513,9 @@ void DepthwiseConv4D(const Tensor& input, const Tensor& kernel_, // NCHW -> NCHW only template -void Conv4D_Legacy(const Tensor& input, const Tensor& kernel_, - const Tensor& bias_, Tensor& output, size_t stride_, - size_t pads_, size_t dilations_) { +void Conv4D_Legacy(const Tensor& input, Tensor& kernel_, Tensor& bias_, + Tensor& output, size_t stride_, size_t pads_, + size_t dilations_) { size_t batch_size = input.get_shape()[0]; size_t in_height = input.get_shape()[2]; size_t in_width = input.get_shape()[3]; diff --git a/include/layers/FCLayer.hpp b/include/layers/FCLayer.hpp index 0e7b21de..bca60f4d 100644 --- a/include/layers/FCLayer.hpp +++ b/include/layers/FCLayer.hpp @@ -11,13 +11,13 @@ namespace it_lab_ai { class FCLayer : public Layer { private: - Tensor weights_; - Tensor bias_; + std::shared_ptr weights_; + std::shared_ptr bias_; public: - FCLayer() : Layer(kFullyConnected) {} - FCLayer(Tensor weights, const Tensor& bias) - : Layer(kFullyConnected), weights_(std::move(weights)), bias_(bias) {} + FCLayer() : Layer(kFullyConnected), weights_(nullptr), bias_(nullptr) {} + FCLayer(Tensor& weights, Tensor& bias) + : Layer(kFullyConnected), weights_(&weights), bias_(&bias) {} void run(const std::vector& input, std::vector& output) override; #ifdef ENABLE_STATISTIC_WEIGHTS @@ -73,50 +73,50 @@ template class FCLayerImpl : public LayerImpl { public: FCLayerImpl() = delete; - FCLayerImpl(const std::vector& input_weights, + FCLayerImpl(std::vector& input_weights, const Shape& input_weights_shape, - const std::vector& input_bias); + std::vector& input_bias); FCLayerImpl(const FCLayerImpl& c) = default; FCLayerImpl& operator=(const FCLayerImpl& sec) = default; void set_weight(size_t i, size_t j, const ValueType& value) { if (i >= this->outputShape_[0] || j >= this->inputShape_[0]) { throw std::out_of_range("Invalid weight index"); } - weights_[i * this->inputShape_[0] + j] = value; + (*weights_)[i * this->inputShape_[0] + j] = value; } ValueType get_weight(size_t i, size_t j) const { if (i >= this->outputShape_[0] || j >= this->inputShape_[0]) { throw std::out_of_range("Invalid weight index"); } - return weights_[i * this->inputShape_[0] + j]; + return (*weights_)[i * this->inputShape_[0] + j]; } void set_bias(size_t i, const ValueType& value) { if (i >= this->outputShape_[0]) { throw std::out_of_range("Invalid bias index"); } - bias_[i] = value; + (*bias_)[i] = value; } ValueType get_bias(size_t i) const { if (i >= this->outputShape_[0]) { throw std::out_of_range("Invalid bias index"); } - return bias_[i]; + return (*bias_)[i]; } std::vector run( const std::vector& input) const override; private: - std::vector weights_; - std::vector bias_; + std::vector* weights_; + std::vector* bias_; }; // weights * inputValues + bias = outputValues template -FCLayerImpl::FCLayerImpl(const std::vector& input_weights, +FCLayerImpl::FCLayerImpl(std::vector& input_weights, const Shape& input_weights_shape, - const std::vector& input_bias) - : LayerImpl(1, 1), weights_(input_weights), bias_(input_bias) { + std::vector& input_bias) + : LayerImpl(1, 1), weights_(&input_weights), bias_(&input_bias) { if (input_weights.empty()) { throw std::invalid_argument("Empty weights for FCLayer"); } @@ -128,7 +128,7 @@ FCLayerImpl::FCLayerImpl(const std::vector& input_weights, throw std::invalid_argument("Bias size doesn't match output size"); } - weights_.resize(input_weights_shape.count(), ValueType(0)); + (*weights_).resize(input_weights_shape.count(), ValueType(0)); } template @@ -137,12 +137,12 @@ std::vector FCLayerImpl::run( Shape cur_w_shape({this->inputShape_[0], this->outputShape_[0]}); std::vector output_values = - mat_vec_mul(weights_, cur_w_shape, input); + mat_vec_mul(*weights_, cur_w_shape, input); size_t batch_size = output_values.size() / this->outputShape_[0]; for (size_t batch = 0; batch < batch_size; ++batch) { - for (size_t i = 0; i < bias_.size(); ++i) { - output_values[batch * this->outputShape_[0] + i] += bias_[i]; + for (size_t i = 0; i < bias_->size(); ++i) { + output_values[batch * this->outputShape_[0] + i] += (*bias_)[i]; } } diff --git a/src/graph/CMakeLists.txt b/src/graph/CMakeLists.txt index 9054e8a2..f2ee1795 100644 --- a/src/graph/CMakeLists.txt +++ b/src/graph/CMakeLists.txt @@ -1,3 +1,6 @@ file(GLOB_RECURSE graph_src *.cpp) add_library(graph_lib STATIC "${GRAPH_HEADERS}" "${graph_src}") -target_link_libraries(graph_lib PUBLIC TBB_unified) +target_link_libraries(graph_lib PUBLIC dnnl TBB_unified) +target_compile_definitions(graph_lib PRIVATE + DNNL_ENABLE_CONCURRENT_EXEC=1 +) diff --git a/src/graph/graph.cpp b/src/graph/graph.cpp index 0e30ab7e..a033a2ce 100644 --- a/src/graph/graph.cpp +++ b/src/graph/graph.cpp @@ -1 +1,133 @@ #include "graph/graph.hpp" + +#include "layers/BatchNormalizationLayer.hpp" +#include "layers/BinaryOpLayer.hpp" +#include "layers/ConcatLayer.hpp" +#include "layers/ConvLayer.hpp" +#include "layers/DropOutLayer.hpp" +#include "layers/EWLayer.hpp" +#include "layers/FCLayer.hpp" +#include "layers/FlattenLayer.hpp" +#include "layers/InputLayer.hpp" +#include "layers/MatmulLayer.hpp" +#include "layers/OutputLayer.hpp" +#include "layers/PoolingLayer.hpp" +#include "layers/ReduceLayer.hpp" +#include "layers/ReshapeLayer.hpp" +#include "layers/SoftmaxLayer.hpp" +#include "layers/SplitLayer.hpp" +#include "layers/Tensor.hpp" +#include "layers/TransposeLayer.hpp" +#include "layers_oneDNN/EWLayer.hpp" + +namespace it_lab_ai { + +Graph Graph::clone(Tensor& out) const { + Graph result; + result.arrayE_ = this->arrayE_; + result.arrayV_ = this->arrayV_; + result.BiggestSize_ = this->BiggestSize_; + result.branch_list_ = this->branch_list_; + result.count_used_split_distribution_ = this->count_used_split_distribution_; + result.end_ = this->end_; + result.inten_ = this->inten_; + result.in_edges_ = this->in_edges_; + result.outtenres_ = &out; + result.outten_ = this->outten_; + result.split_distribution_ = this->split_distribution_; + result.start_ = this->start_; + result.V_ = this->V_; + result.layers_ = std::vector>(); + for (int i = 0; i < this->layers_.size(); i++) { + result.layers_.push_back(layer_based_unique_copy(this->layers_[i])); + } + return result; +} + +std::unique_ptr layer_based_unique_copy( + const std::unique_ptr& layer) { + switch (layer->getName()) { + case it_lab_ai::kInput: { + InputLayer* tmp_layer = + new InputLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kPooling: { + PoolingLayer* tmp_layer = + new PoolingLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kElementWise: { + EWLayer* tmp_layer = new EWLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kConvolution: { + ConvolutionalLayer* tmp_layer = new ConvolutionalLayer( + *dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kFullyConnected: { + FCLayer* tmp_layer = new FCLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kFlatten: { + FlattenLayer* tmp_layer = + new FlattenLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kConcat: { + ConcatLayer* tmp_layer = + new ConcatLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kDropout: { + DropOutLayer* tmp_layer = + new DropOutLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kSplit: { + SplitLayer* tmp_layer = + new SplitLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kBinaryOp: { + BinaryOpLayer* tmp_layer = + new BinaryOpLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kTranspose: { + TransposeLayer* tmp_layer = + new TransposeLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kMatmul: { + MatmulLayer* tmp_layer = + new MatmulLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kReshape: { + ReshapeLayer* tmp_layer = + new ReshapeLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kSoftmax: { + SoftmaxLayer* tmp_layer = + new SoftmaxLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kReduce: { + ReduceLayer* tmp_layer = + new ReduceLayer(*dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + case it_lab_ai::kBatchNormalization: { + BatchNormalizationLayer* tmp_layer = new BatchNormalizationLayer( + *dynamic_cast(layer.get())); + return std::unique_ptr(tmp_layer); + } + default: { + throw std::invalid_argument("No such layer type"); + } + } +} +} // namespace it_lab_ai diff --git a/src/graph_transformations/CMakeLists.txt b/src/graph_transformations/CMakeLists.txt index 6942b48f..c894388f 100644 --- a/src/graph_transformations/CMakeLists.txt +++ b/src/graph_transformations/CMakeLists.txt @@ -1,3 +1,3 @@ file(GLOB_RECURSE graphT_src *.cpp) add_library(graphT_lib STATIC "${GRAPHT_HEADERS}" "${graphT_src}") -target_link_libraries(graphT_lib PUBLIC TBB_unified) +target_link_libraries(graphT_lib PUBLIC graph_lib TBB_unified) diff --git a/src/graph_transformations/graph_transformations.cpp b/src/graph_transformations/graph_transformations.cpp index 3bb283b1..fac7e1da 100644 --- a/src/graph_transformations/graph_transformations.cpp +++ b/src/graph_transformations/graph_transformations.cpp @@ -2,8 +2,8 @@ namespace it_lab_ai { -bool layer_conditions(const Layer& layer, const Layer& layer_sub) { - return layer.getName() == layer_sub.getName(); +bool layer_conditions(Layer* layer, Layer* layer_sub) { + return layer->getName() == layer_sub->getName(); } std::vector> find_subgraphs(const Graph& graph, @@ -40,13 +40,12 @@ bool run_search(const Graph& graph, const Graph& subgraph, std::vector& assignments, std::vector>& results) { size_t cur_size = assignments.size(); - for (int prev_id = 0; prev_id < subgraph.getLayersCount(); prev_id++) { + for (int prev_id = 0; prev_id < static_cast(cur_size); prev_id++) { int amount_connected_s = subgraph.getVertexValue(prev_id + 1) - subgraph.getVertexValue(prev_id); for (int j = 0; j < amount_connected_s; j++) { int next_id = subgraph.getEdgeValue(subgraph.getVertexValue(prev_id) + j); - if (prev_id < static_cast(cur_size) && - next_id < static_cast(cur_size)) { + if (next_id < static_cast(cur_size)) { if (!has_edge(graph, assignments[prev_id], assignments[next_id])) { return false; } @@ -97,4 +96,125 @@ bool run_search(const Graph& graph, const Graph& subgraph, return false; } +void change_ids(std::vector>& vec, int id) { + for (auto& i : vec) { + std::transform(i.begin(), i.end(), i.begin(), + [&](int elem) { return elem > id ? elem - 1 : elem; }); + } +} + +bool does_intersect(const std::vector& vec1, + const std::vector& vec2) { + // exists elem in vec1 which is found in vec2 + return std::any_of(vec1.begin(), vec1.end(), [&](int elem) { + return std::find(vec2.begin(), vec2.end(), elem) != vec2.end(); + }); +} + +Graph changed_subgraphs(const Graph& graph, const Graph& subgraph_from, Tensor& out) { + Graph new_graph = graph.clone(out); + std::vector> subs = find_subgraphs(graph, subgraph_from); + std::vector> subs_c = subs; + std::vector roots; + std::vector leafs; + std::vector roots_inps_final; + std::vector leafs_outs_final; + int amount_connected; + int amount_connected_s; + for (int v = 0; v < subgraph_from.getLayersCount(); v++) { + if (is_root(subgraph_from, v)) { + roots.push_back(v); + } + if (is_leaf(subgraph_from, v)) { + leafs.push_back(v); + } + } + for (size_t i = 0; i < subs.size(); i++) { + bool flag = false; + // don't change already changed subgraph + for (size_t j = 0; j < i; j++) { + if (does_intersect(subs_c[j], subs_c[i])) { + flag = true; + break; + } + } + if (flag) { + continue; + } + std::unique_ptr layer = std::make_unique("relu"); + std::vector is_root_special(roots.size(), false); + roots_inps_final.clear(); + leafs_outs_final.clear(); + for (size_t j = 0; j < roots.size(); j++) { + std::vector root_inps = new_graph.getInLayers(subs[i][roots[j]]); + // recognize transformations we can apply with roots + amount_connected = new_graph.getVertexValue(subs[i][roots[j]] + 1) - + new_graph.getVertexValue(subs[i][roots[j]]); + amount_connected_s = subgraph_from.getVertexValue(roots[j] + 1) - + subgraph_from.getVertexValue(roots[j]); + if (amount_connected == amount_connected_s) { + continue; + } + for (int k = 0; k < amount_connected; k++) { + int id = new_graph.getEdgeValue( + new_graph.getVertexValue(subs[i][roots[j]]) + k); + auto it = std::find(subs[i].begin(), subs[i].end(), id); + if (it == subs[i].end()) { + is_root_special[j] = true; + } + } + + // want subgraph -> single node + for (int root_inp : root_inps) { + auto it = std::find(roots_inps_final.begin(), roots_inps_final.end(), + root_inp); + if (it == roots_inps_final.end()) { + roots_inps_final.push_back(root_inp); + } + } + } + for (int leaf : leafs) { + amount_connected = new_graph.getVertexValue(subs[i][leaf] + 1) - + new_graph.getVertexValue(subs[i][leaf]); + for (int k = 0; k < amount_connected; k++) { + int id = + new_graph.getEdgeValue(new_graph.getVertexValue(subs[i][leaf]) + k); + auto it = + std::find(leafs_outs_final.begin(), leafs_outs_final.end(), id); + if (it == leafs_outs_final.end()) { + leafs_outs_final.push_back(id); + } + } + } + for (size_t j = 0; j < subs[i].size(); j++) { + auto it = std::find(roots.begin(), roots.end(), j); + size_t index_for_root = std::distance(roots.begin(), it); + // remove all nodes that isn't special roots + if (it == roots.end() || + (it != roots.end() && !is_root_special[index_for_root])) { + new_graph.removeSingleLayer(subs[i][j]); + change_ids(subs, subs[i][j]); + std::transform(roots_inps_final.begin(), roots_inps_final.end(), + roots_inps_final.begin(), [&](int elem) { + return elem > subs[i][j] ? elem - 1 : elem; + }); + std::transform(leafs_outs_final.begin(), leafs_outs_final.end(), + leafs_outs_final.begin(), [&](int elem) { + return elem > subs[i][j] ? elem - 1 : elem; + }); + } + } + for (int j : roots_inps_final) { + new_graph.makeConnection(new_graph.getLayerFromID(j), layer.get()); + } + if (roots_inps_final.empty()) { + new_graph.addSingleLayer(layer.get()); + } + for (int j : leafs_outs_final) { + new_graph.makeConnection(layer.get(), new_graph.getLayerFromID(j)); + } + } + return new_graph; +} + } // namespace it_lab_ai diff --git a/src/layers/ConvLayer.cpp b/src/layers/ConvLayer.cpp index 28c45e55..ac5dfcef 100644 --- a/src/layers/ConvLayer.cpp +++ b/src/layers/ConvLayer.cpp @@ -11,14 +11,14 @@ void ConvolutionalLayer::run(const std::vector& input, throw std::out_of_range("input must be 4-dimensional"); } if (group_ > 1) { - if (group_ == input[0].get_shape()[1] && group_ == kernel_.get_shape()[0]) { + if (group_ == input[0].get_shape()[1] && group_ == kernel_->get_shape()[0]) { switch (input[0].get_type()) { case Type::kFloat: - DepthwiseConv4D(input[0], kernel_, bias_, output[0], stride_, + DepthwiseConv4D(input[0], *kernel_, *bias_, output[0], stride_, pads_, dilations_); break; case Type::kInt: - DepthwiseConv4D(input[0], kernel_, bias_, output[0], stride_, + DepthwiseConv4D(input[0], *kernel_, *bias_, output[0], stride_, pads_, dilations_); break; default: @@ -30,7 +30,7 @@ void ConvolutionalLayer::run(const std::vector& input, } switch (input[0].get_type()) { case Type::kInt: { - if (kernel_.get_shape().dims() == 2) { + if (kernel_->get_shape().dims() == 2) { if (dilations_ > 0) { dilations_--; } @@ -44,15 +44,15 @@ void ConvolutionalLayer::run(const std::vector& input, input[0].get_shape()[input[0].get_shape().dims() - 3]), input[0].get_shape()[input[0].get_shape().dims() - 1] * input[0].get_shape()[input[0].get_shape().dims() - 2], - bias_.empty() ? std::vector() : *bias_.as()); + bias_->empty() ? std::vector() : *bias_->as()); auto sizeforshape = static_cast( ((static_cast( input[0].get_shape()[input[0].get_shape().dims() - 1]) - 1 - static_cast( - (1 + kernel_.get_shape()[kernel_.get_shape().dims() - 1]) * + (1 + kernel_->get_shape()[kernel_->get_shape().dims() - 1]) * dilations_ + - kernel_.get_shape()[kernel_.get_shape().dims() - 1] - 1)) / + kernel_->get_shape()[kernel_->get_shape().dims() - 1] - 1)) / static_cast(stride_)) + 1); @@ -66,26 +66,26 @@ void ConvolutionalLayer::run(const std::vector& input, static_cast( input[0].get_shape()[input[0].get_shape().dims() - 2]) + 2 * static_cast(pads_), - *kernel_.as(), - kernel_.get_shape()[kernel_.get_shape().dims() - 1], - (1 + kernel_.get_shape()[kernel_.get_shape().dims() - 1]) * + *kernel_->as(), + kernel_->get_shape()[kernel_->get_shape().dims() - 1], + (1 + kernel_->get_shape()[kernel_->get_shape().dims() - 1]) * dilations_ + - kernel_.get_shape()[kernel_.get_shape().dims() - 1], + kernel_->get_shape()[kernel_->get_shape().dims() - 1], static_cast( - ((1 + kernel_.get_shape()[kernel_.get_shape().dims() - 1]) * + ((1 + kernel_->get_shape()[kernel_->get_shape().dims() - 1]) * dilations_ + - kernel_.get_shape()[kernel_.get_shape().dims() - 1] - 1) / + kernel_->get_shape()[kernel_->get_shape().dims() - 1] - 1) / 2)), sh); } else { switch (implType_) { case kSTL: { - Conv4DSTL(input[0], kernel_, bias_, output[0], stride_, pads_, - group_, dilations_); + Conv4DSTL(input[0], *kernel_, *bias_, output[0], stride_, + pads_, group_, dilations_); break; } default: { - Conv4D(input[0], kernel_, bias_, output[0], stride_, pads_, + Conv4D(input[0], *kernel_, *bias_, output[0], stride_, pads_, group_, dilations_); break; } @@ -94,7 +94,7 @@ void ConvolutionalLayer::run(const std::vector& input, break; } case Type::kFloat: { - if (kernel_.get_shape().dims() == 2) { + if (kernel_->get_shape().dims() == 2) { if (dilations_ > 0) { dilations_--; } @@ -108,15 +108,15 @@ void ConvolutionalLayer::run(const std::vector& input, input[0].get_shape()[input[0].get_shape().dims() - 3]), input[0].get_shape()[input[0].get_shape().dims() - 1] * input[0].get_shape()[input[0].get_shape().dims() - 2], - bias_.empty() ? std::vector() : *bias_.as()); + bias_->empty() ? std::vector() : *bias_->as()); auto sizeforshape = static_cast( ((static_cast( input[0].get_shape()[input[0].get_shape().dims() - 1]) - 1 - static_cast( - (1 + kernel_.get_shape()[kernel_.get_shape().dims() - 1]) * + (1 + kernel_->get_shape()[kernel_->get_shape().dims() - 1]) * dilations_ + - kernel_.get_shape()[kernel_.get_shape().dims() - 1] - 1)) / + kernel_->get_shape()[kernel_->get_shape().dims() - 1] - 1)) / static_cast(stride_)) + 1); @@ -130,31 +130,31 @@ void ConvolutionalLayer::run(const std::vector& input, static_cast( input[0].get_shape()[input[0].get_shape().dims() - 2]) + 2 * static_cast(pads_), - *kernel_.as(), - kernel_.get_shape()[kernel_.get_shape().dims() - 1], - (1 + kernel_.get_shape()[kernel_.get_shape().dims() - 1]) * + *kernel_->as(), + kernel_->get_shape()[kernel_->get_shape().dims() - 1], + (1 + kernel_->get_shape()[kernel_->get_shape().dims() - 1]) * dilations_ + - kernel_.get_shape()[kernel_.get_shape().dims() - 1], + kernel_->get_shape()[kernel_->get_shape().dims() - 1], static_cast( - ((1 + kernel_.get_shape()[kernel_.get_shape().dims() - 1]) * + ((1 + kernel_->get_shape()[kernel_->get_shape().dims() - 1]) * dilations_ + - kernel_.get_shape()[kernel_.get_shape().dims() - 1] - 1) / + kernel_->get_shape()[kernel_->get_shape().dims() - 1] - 1) / 2)), sh); } else { if (useLegacyImpl_) { - Conv4D_Legacy(input[0], kernel_, bias_, output[0], stride_, + Conv4D_Legacy(input[0], *kernel_, *bias_, output[0], stride_, pads_, dilations_); } else { switch (implType_) { case kSTL: { - Conv4DSTL(input[0], kernel_, bias_, output[0], stride_, + Conv4DSTL(input[0], *kernel_, *bias_, output[0], stride_, pads_, group_, dilations_); break; } default: { - Conv4D(input[0], kernel_, bias_, output[0], stride_, pads_, - group_, dilations_); + Conv4D(input[0], *kernel_, *bias_, output[0], stride_, + pads_, group_, dilations_); break; } } diff --git a/src/layers/FCLayer.cpp b/src/layers/FCLayer.cpp index 29b9db76..4d414471 100644 --- a/src/layers/FCLayer.cpp +++ b/src/layers/FCLayer.cpp @@ -7,18 +7,18 @@ void FCLayer::run(const std::vector& input, if (input.size() != 1) { throw std::runtime_error("FCLayer: Input tensors not 1"); } - if (input[0].get_type() != weights_.get_type()) { + if (input[0].get_type() != weights_->get_type()) { throw std::invalid_argument("input[0] and weights data type aren't same"); } - if (bias_.get_type() != weights_.get_type()) { + if (bias_->get_type() != weights_->get_type()) { throw std::invalid_argument("Bias and weights data type aren't same"); } size_t batch_size; - size_t output_size = bias_.get_shape()[0]; + size_t output_size = bias_->get_shape()[0]; if (input[0].get_shape().dims() == 1) { size_t total_elements = input[0].get_shape()[0]; - size_t expected_input_size = weights_.get_shape()[0]; + size_t expected_input_size = weights_->get_shape()[0]; if (total_elements % expected_input_size == 0) { batch_size = total_elements / expected_input_size; @@ -31,15 +31,15 @@ void FCLayer::run(const std::vector& input, switch (input[0].get_type()) { case Type::kInt: { - FCLayerImpl used_impl(*weights_.as(), weights_.get_shape(), - *bias_.as()); + FCLayerImpl used_impl(*weights_->as(), weights_->get_shape(), + *bias_->as()); auto result = used_impl.run(*input[0].as()); output[0] = make_tensor(result, {batch_size, output_size}); break; } case Type::kFloat: { - FCLayerImpl used_impl(*weights_.as(), weights_.get_shape(), - *bias_.as()); + FCLayerImpl used_impl(*weights_->as(), + weights_->get_shape(), *bias_->as()); auto result = used_impl.run(*input[0].as()); output[0] = make_tensor(result, {batch_size, output_size}); break; diff --git a/src/layers/OutputLayer.cpp b/src/layers/OutputLayer.cpp index 57d2621e..6c22f957 100644 --- a/src/layers/OutputLayer.cpp +++ b/src/layers/OutputLayer.cpp @@ -13,7 +13,7 @@ std::pair, Tensor> OutputLayer::top_k( case Type::kFloat: { auto toppair = top_k_vec(*input.as(), labels_, k); reslabels = toppair.first; - resvector = resvector = make_tensor(toppair.second); + resvector = make_tensor(toppair.second); break; } case Type::kInt: { diff --git a/test/benchmarking/test_layers_time.cpp b/test/benchmarking/test_layers_time.cpp index ad2d8dc3..ea6b9bcb 100644 --- a/test/benchmarking/test_layers_time.cpp +++ b/test/benchmarking/test_layers_time.cpp @@ -58,8 +58,8 @@ TEST(conv_test, is_conv_stl_ok) { Tensor input = make_tensor(a1, test_shape); Tensor kernel = make_tensor(a2, Shape({5, 5, 3, 16})); Tensor output; - ConvolutionalLayer p1(1, 1, 2, kernel, Tensor(), kDefault); - ConvolutionalLayer p2(1, 1, 2, kernel, Tensor(), kSTL); + ConvolutionalLayer p1(1, 1, 2, kernel, *std::make_shared(), kDefault); + ConvolutionalLayer p2(1, 1, 2, kernel, *std::make_shared(), kSTL); double count1 = elapsed_time(test_func, p1, input, output); double count2 = diff --git a/test/graph/test_graph.cpp b/test/graph/test_graph.cpp index 33c99b28..f2132ff1 100644 --- a/test/graph/test_graph.cpp +++ b/test/graph/test_graph.cpp @@ -37,6 +37,120 @@ TEST(graph, check_connection) { ASSERT_EQ(graph.areLayerNext(inputLayer_ptr, fcLayer_ptr), 1); } +TEST(graph, check_connection_remove) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Graph graph; + auto fcLayer = std::make_unique(weights, bias); + auto inputLayer = std::make_unique(); + auto ewLayer = std::make_unique(); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* inputLayer_ptr = inputLayer.get(); + Layer* ewLayer_ptr = ewLayer.get(); + + graph.setInput(inputLayer_ptr, input); + graph.makeConnection(inputLayer_ptr, fcLayer_ptr); + graph.makeConnection(fcLayer_ptr, ewLayer_ptr); + graph.removeConnection(fcLayer->getID(), ewLayer->getID()); + graph.removeConnection(inputLayer->getID(), fcLayer->getID()); + + ASSERT_EQ(graph.areLayerNext(fcLayer_ptr, ewLayer_ptr), 0); + ASSERT_EQ(graph.areLayerNext(inputLayer_ptr, fcLayer_ptr), 0); +} + +TEST(graph, check_connection_remove_out_of_range) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Graph graph; + auto fcLayer = std::make_unique(weights, bias); + auto inputLayer = std::make_unique(); + auto ewLayer = std::make_unique(); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* inputLayer_ptr = inputLayer.get(); + Layer* ewLayer_ptr = ewLayer.get(); + + graph.setInput(inputLayer_ptr, input); + graph.makeConnection(inputLayer_ptr, fcLayer_ptr); + graph.makeConnection(fcLayer_ptr, ewLayer_ptr); + ASSERT_ANY_THROW(graph.removeConnection(999, -1)); +} + +TEST(graph, check_connection_double_remove_throw) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Graph graph; + auto fcLayer = std::make_unique(weights, bias); + auto inputLayer = std::make_unique(); + auto ewLayer = std::make_unique(); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* inputLayer_ptr = inputLayer.get(); + Layer* ewLayer_ptr = ewLayer.get(); + + graph.setInput(inputLayer_ptr, input); + graph.makeConnection(inputLayer_ptr, fcLayer_ptr); + graph.makeConnection(fcLayer_ptr, ewLayer_ptr); + graph.removeConnection(fcLayer->getID(), ewLayer->getID()); + ASSERT_ANY_THROW(graph.removeConnection(fcLayer->getID(), ewLayer->getID())); +} + +TEST(graph, check_layer_remove) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Graph graph; + auto fcLayer = std::make_unique(weights, bias); + auto inputLayer = std::make_unique(); + auto ewLayer = std::make_unique(); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* inputLayer_ptr = inputLayer.get(); + Layer* ewLayer_ptr = ewLayer.get(); + + graph.setInput(inputLayer_ptr, input); + graph.makeConnection(inputLayer_ptr, fcLayer_ptr); + graph.makeConnection(fcLayer_ptr, ewLayer_ptr); + graph.removeSingleLayer(fcLayer->getID()); + + ASSERT_EQ(graph.areLayerNext(inputLayer_ptr, fcLayer_ptr), 0); + ASSERT_ANY_THROW(graph.areLayerNext(fcLayer_ptr, ewLayer_ptr)); +} + +TEST(graph, check_layer_remove_out_of_range) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Graph graph; + auto fcLayer = std::make_unique(weights, bias); + auto inputLayer = std::make_unique(); + auto ewLayer = std::make_unique(); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* inputLayer_ptr = inputLayer.get(); + Layer* ewLayer_ptr = ewLayer.get(); + + graph.setInput(inputLayer_ptr, input); + graph.makeConnection(inputLayer_ptr, fcLayer_ptr); + graph.makeConnection(fcLayer_ptr, ewLayer_ptr); + ASSERT_ANY_THROW(graph.removeSingleLayer(999)); + ASSERT_ANY_THROW(graph.removeSingleLayer(-1)); +} + TEST(graph, check_connection1) { const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; Tensor weights = make_tensor(vec1, {3, 2}); @@ -162,31 +276,11 @@ TEST(graph, set_input_null_layer) { EXPECT_THROW(graph.setInput(nullptr, input), std::invalid_argument); } -TEST(graph, add_owned_layer_null_check) { - Graph graph; - std::unique_ptr null_layer = nullptr; - graph.addOwnedLayer(std::move(null_layer)); - SUCCEED(); -} - -TEST(graph, make_connection_null_layers) { - Graph graph; - Tensor input = make_tensor({1.0F, 2.0F}, {2}); - auto valid_layer = std::make_unique(); - Layer* valid_ptr = valid_layer.get(); - graph.addOwnedLayer(std::move(valid_layer)); - - EXPECT_THROW(graph.makeConnection(nullptr, valid_ptr), std::invalid_argument); - EXPECT_THROW(graph.makeConnection(valid_ptr, nullptr), std::invalid_argument); - EXPECT_THROW(graph.makeConnection(nullptr, nullptr), std::invalid_argument); -} - TEST(graph, make_connection_same_layer) { Graph graph; Tensor input = make_tensor({1.0F, 2.0F}, {2}); auto layer = std::make_unique(); Layer* layer_ptr = layer.get(); - graph.addOwnedLayer(std::move(layer)); graph.setInput(layer_ptr, input); EXPECT_THROW(graph.makeConnection(layer_ptr, layer_ptr), std::out_of_range); @@ -238,12 +332,6 @@ TEST(graph, complex_graph_with_split_distribution) { Layer* concat_ptr = concat_layer.get(); graph.setSplitDistribution(split_dist); - graph.addOwnedLayer(std::move(input_layer)); - graph.addOwnedLayer(std::move(split_layer)); - graph.addOwnedLayer(std::move(ew_layer1)); - graph.addOwnedLayer(std::move(ew_layer2)); - graph.addOwnedLayer(std::move(concat_layer)); - graph.setInput(input_ptr, input); graph.makeConnection(input_ptr, split_ptr); graph.makeConnection(split_ptr, ew1_ptr); @@ -313,6 +401,35 @@ TEST(graph, edges_out_of_range) { ASSERT_ANY_THROW(graph.getEdgeValue(999)); } +TEST(graph, copy_works) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Tensor new_output; + Graph graph; + + auto fcLayer = std::make_unique(weights, bias); + auto fcLayer2 = std::make_unique(weights, bias); + auto fcLayer3 = std::make_unique(weights, bias); + auto fcLayer4 = std::make_unique(weights, bias); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* fcLayer2_ptr = fcLayer2.get(); + Layer* fcLayer3_ptr = fcLayer3.get(); + Layer* fcLayer4_ptr = fcLayer4.get(); + + graph.setInput(fcLayer_ptr, input); + graph.makeConnection(fcLayer_ptr, fcLayer2_ptr); + graph.makeConnection(fcLayer2_ptr, fcLayer3_ptr); + graph.makeConnection(fcLayer_ptr, fcLayer4_ptr); + graph.setOutput(fcLayer4_ptr, output); + Graph new_graph = graph.clone(new_output); + ASSERT_EQ(graph.getInOutDegrees(), new_graph.getInOutDegrees()); + ASSERT_EQ(graph.getLayersCount(), new_graph.getLayersCount()); +} + TEST(graph, inputs_out_of_range) { const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; Tensor weights = make_tensor(vec1, {3, 2}); @@ -367,6 +484,32 @@ TEST(graph, get_layer_out_of_range) { ASSERT_ANY_THROW(graph.getLayerFromID(999)); } +TEST(graph, get_in_layers_out_of_range) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + + Graph graph; + auto fcLayer = std::make_unique(weights, bias); + auto fcLayer2 = std::make_unique(weights, bias); + auto fcLayer3 = std::make_unique(weights, bias); + auto fcLayer4 = std::make_unique(weights, bias); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* fcLayer2_ptr = fcLayer2.get(); + Layer* fcLayer3_ptr = fcLayer3.get(); + Layer* fcLayer4_ptr = fcLayer4.get(); + + graph.setInput(fcLayer_ptr, input); + graph.makeConnection(fcLayer_ptr, fcLayer2_ptr); + graph.makeConnection(fcLayer2_ptr, fcLayer3_ptr); + graph.makeConnection(fcLayer_ptr, fcLayer4_ptr); + graph.setOutput(fcLayer4_ptr, output); + ASSERT_ANY_THROW(graph.getInLayers(999)); +} + TEST(graph_transformations, check_subgraphs_search) { const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; Tensor weights = make_tensor(vec1, {3, 2}); @@ -590,18 +733,15 @@ TEST(graph_transformations, check_subgraphs_big_random) { Graph subgraph; std::vector> layers; - std::vector layer_ptrs; for (int i = 0; i < num_vertices / 2; i++) { layers.push_back(std::make_unique(weights, bias)); - layer_ptrs.push_back(layers.back().get()); } for (int i = 0; i < num_vertices / 2; i++) { layers.push_back(std::make_unique("relu")); - layer_ptrs.push_back(layers.back().get()); } - graph.setInput(layer_ptrs[0], input); + graph.setInput(layers[0].get(), input); std::mt19937 rng(42); std::uniform_int_distribution first_dist(0, num_vertices - 2); std::uniform_int_distribution second_dist(1, num_vertices - 1); @@ -609,28 +749,160 @@ TEST(graph_transformations, check_subgraphs_big_random) { int rFirst = first_dist(rng); int rSecond = second_dist(rng); if ((rFirst == rSecond) || - ((layer_ptrs[rFirst]->getID() == layer_ptrs[rSecond]->getID()) && - (layer_ptrs[rFirst]->getID() != 0))) { + ((layers[rFirst]->getID() == layers[rSecond]->getID()) && + (layers[rFirst]->getID() != 0))) { continue; } - if ((layer_ptrs[rFirst]->getID() >= graph.getLayersCount()) || - (rFirst != 0 && layer_ptrs[rFirst]->getID() == 0)) { - graph.addSingleLayer(layer_ptrs[rFirst]); + if ((layers[rFirst]->getID() >= graph.getLayersCount()) || + (rFirst != 0 && layers[rFirst]->getID() == 0)) { + graph.addSingleLayer(layers[rFirst].get()); } - graph.makeConnection(layer_ptrs[rFirst], layer_ptrs[rSecond]); - } - graph.setOutput(layer_ptrs[num_vertices - 1], output); - - for (auto& layer : layers) { - graph.addOwnedLayer(std::move(layer)); + graph.makeConnection(layers[rFirst].get(), layers[rSecond].get()); } + graph.setOutput(layers[num_vertices - 1].get(), output); - subgraph.setInput(layer_ptrs[0], input); - subgraph.makeConnection(layer_ptrs[0], layer_ptrs[50]); - subgraph.makeConnection(layer_ptrs[50], layer_ptrs[1]); + subgraph.setInput(layers[0].get(), input); + subgraph.makeConnection(layers[0].get(), layers[50].get()); + subgraph.makeConnection(layers[50].get(), layers[1].get()); std::vector> res1 = find_subgraphs(graph, subgraph); double res1_time = elapsed_time_avg(10, find_subgraphs, graph, subgraph); std::cerr << "Find subgraphs time in ms " << res1_time << std::endl; -} \ No newline at end of file +} + +class SubgraphTestsParameterized + : public ::testing::TestWithParam>> {}; + +TEST_P(SubgraphTestsParameterized, check_subgraphs_big_random_lines) { + auto data = GetParam(); + for (size_t m = 0; m < data.size(); m++) { + std::cerr << "(" << std::get<1>(data[m]) << ") "; + int num_vertices = std::get<0>(data[m]); + int num_vertices_sub = std::get<1>(data[m]); + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Graph graph; + Graph subgraph; + std::vector> layers; + for (int i = 0; i < num_vertices; i++) { + layers.push_back(std::make_unique(weights, bias)); + } + graph.setInput(layers[0].get(), input); + for (int i = 0; i < num_vertices - 1; i++) { + graph.makeConnection(layers[i].get(), layers[i + 1].get()); + } + graph.setOutput(layers[num_vertices - 1].get(), output); + std::vector> temp_layers( + num_vertices_sub + 2, std::make_unique(weights, bias)); + subgraph.setInput(temp_layers[0].get(), input); + for (int i = 0; i < num_vertices_sub; i++) { + subgraph.makeConnection(temp_layers[i].get(), temp_layers[i + 1].get()); + } + + double res1_time = elapsed_time_avg(1, find_subgraphs, + graph, subgraph); + std::cerr << "Find subgraphs time in ms " + << res1_time / (100 * num_vertices_sub * num_vertices_sub) + << std::endl; + } +} + +std::vector> genVector() { + std::vector> results(10); + for (size_t i = 0; i < results.size(); i++) { + results[i] = std::tuple(105, 2 + 2 * static_cast(i)); + } + return results; +} + +INSTANTIATE_TEST_SUITE_P(graph_transformations, SubgraphTestsParameterized, + ::testing::Values(genVector())); + +TEST(graph_transformations, check_subgraphs_replace) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Tensor output_new; + + Graph graph; + Graph res_graph; + Graph subgraph; + auto fcLayer = std::make_unique(weights, bias); + auto fcLayer2 = std::make_unique(weights, bias); + auto fcLayer3 = std::make_unique(weights, bias); + auto fcLayer4 = std::make_unique(weights, bias); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* fcLayer2_ptr = fcLayer2.get(); + Layer* fcLayer3_ptr = fcLayer3.get(); + Layer* fcLayer4_ptr = fcLayer4.get(); + + graph.setInput(fcLayer_ptr, input); + graph.makeConnection(fcLayer_ptr, fcLayer2_ptr); + graph.makeConnection(fcLayer2_ptr, fcLayer3_ptr); + graph.makeConnection(fcLayer_ptr, fcLayer4_ptr); + graph.setOutput(fcLayer4_ptr, output); + + subgraph.setInput(fcLayer_ptr, input); + subgraph.makeConnection(fcLayer_ptr, fcLayer2_ptr); + + res_graph.setInput(fcLayer_ptr, input); + res_graph.makeConnection(fcLayer_ptr, fcLayer4_ptr); + std::unique_ptr lay = std::make_unique("relu"); + Layer* lay_ptr = lay.get(); + res_graph.addSingleLayer(lay_ptr); + res_graph.makeConnection(lay_ptr, fcLayer3_ptr); + + Graph res = changed_subgraphs(graph, subgraph, output_new); + ASSERT_FALSE(find_subgraphs(graph, res_graph).empty()); +} + +TEST(graph_transformations, check_subgraphs_replace2) { + const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; + Tensor weights = make_tensor(vec1, {3, 2}); + Tensor bias = make_tensor({0.5F, 0.5F, 1.0F}); + Tensor input = make_tensor({1.0F, 2.0F}, {2}); + Tensor output; + Tensor output_new; + + Graph graph; + Graph res_graph; + Graph subgraph; + auto fcLayer = std::make_unique(weights, bias); + auto fcLayer2 = std::make_unique(weights, bias); + auto fcLayer3 = std::make_unique(weights, bias); + auto fcLayer4 = std::make_unique(weights, bias); + auto fcLayer5 = std::make_unique(weights, bias); + + Layer* fcLayer_ptr = fcLayer.get(); + Layer* fcLayer2_ptr = fcLayer2.get(); + Layer* fcLayer3_ptr = fcLayer3.get(); + Layer* fcLayer4_ptr = fcLayer4.get(); + Layer* fcLayer5_ptr = fcLayer5.get(); + + graph.setInput(fcLayer_ptr, input); + graph.addSingleLayer(fcLayer2_ptr); + graph.makeConnection(fcLayer2_ptr, fcLayer3_ptr); + graph.makeConnection(fcLayer_ptr, fcLayer4_ptr); + graph.makeConnection(fcLayer4_ptr, fcLayer5_ptr); + graph.setOutput(fcLayer5_ptr, output); + + subgraph.setInput(fcLayer_ptr, input); + subgraph.makeConnection(fcLayer_ptr, fcLayer2_ptr); + + std::unique_ptr lay = std::make_unique("relu"); + std::unique_ptr lay2 = std::make_unique("relu"); + Layer* lay_ptr = lay.get(); + Layer* lay2_ptr = lay2.get(); + res_graph.setInput(lay2_ptr, input); + res_graph.addSingleLayer(lay_ptr); + + Graph res = changed_subgraphs(graph, subgraph, output_new); + ASSERT_FALSE(find_subgraphs(graph, res_graph).empty()); +} diff --git a/test/single_layer/test_convlayer.cpp b/test/single_layer/test_convlayer.cpp index 41b4400f..be537bb0 100644 --- a/test/single_layer/test_convlayer.cpp +++ b/test/single_layer/test_convlayer.cpp @@ -363,7 +363,8 @@ TEST(ConvolutionalLayerTest, DepthwiseConv4DNoBias) { std::vector output_vec(12, 0); Tensor output = make_tensor(output_vec, output_shape); - DepthwiseConv4D(input, kernel, Tensor(), output, 2, 0, 1); + DepthwiseConv4D(input, kernel, *std::make_shared(), output, 2, 0, + 1); std::vector result = *output.as(); @@ -388,7 +389,8 @@ TEST(ConvolutionalLayerTest, Conv4DSTLFloatWithGroups) { std::vector output_vec(16, 0.0f); Tensor output = make_tensor(output_vec, output_shape); - Conv4DSTL(input, kernel, Tensor(), output, 1, 0, 2, 1); + Conv4DSTL(input, kernel, *std::make_shared(), output, 1, 0, 2, + 1); std::vector result = *output.as(); @@ -445,7 +447,8 @@ TEST(ConvolutionalLayerTest, DepthwiseIntegration) { std::vector output_vec(32, 0.0f); Tensor output = make_tensor(output_vec, output_shape); - ConvolutionalLayer layer(1, 1, 1, kernel, Tensor(), kDefault, 2); + ConvolutionalLayer layer(1, 1, 1, kernel, *std::make_shared(), + kDefault, 2); std::vector in{input}; std::vector out{output}; @@ -472,7 +475,8 @@ TEST(ConvolutionalLayerTest, DepthwiseConv4DWithPadding) { 0.0f); Tensor output = make_tensor(output_vec, output_shape); - DepthwiseConv4D(input, kernel, Tensor(), output, 1, 1, 1); + DepthwiseConv4D(input, kernel, *std::make_shared(), output, 1, + 1, 1); std::vector result = *output.as(); @@ -523,7 +527,8 @@ TEST(ConvolutionalLayerTest, Conv4DSTLFloatWithPaddingAndStride) { 0.0f); Tensor output = make_tensor(output_vec, output_shape); - Conv4DSTL(input, kernel, Tensor(), output, 2, 1, 1, 1); + Conv4DSTL(input, kernel, *std::make_shared(), output, 2, 1, 1, + 1); std::vector result = *output.as(); @@ -542,12 +547,14 @@ TEST(ConvolutionalLayerTest, Conv4DSTLFloatCompareWithConv4D) { Shape output_shape1({1, 1, 1, 1}); std::vector output_vec1(1, 0.0f); Tensor output1 = make_tensor(output_vec1, output_shape1); - Conv4D(input, kernel, Tensor(), output1, 1, 0, 1, 1); + Conv4D(input, kernel, *std::make_shared(), output1, 1, 0, 1, + 1); Shape output_shape2({1, 1, 1, 1}); std::vector output_vec2(1, 0.0f); Tensor output2 = make_tensor(output_vec2, output_shape2); - Conv4DSTL(input, kernel, Tensor(), output2, 1, 0, 1, 1); + Conv4DSTL(input, kernel, *std::make_shared(), output2, 1, 0, 1, + 1); float result1 = (*output1.as())[0]; float result2 = (*output2.as())[0]; @@ -569,7 +576,8 @@ TEST(ConvolutionalLayerTest, DepthwiseViaConvolutionalLayer) { std::vector output_vec(8, 0.0f); Tensor output = make_tensor(output_vec, output_shape); - ConvolutionalLayer layer(1, 0, 1, kernel, Tensor(), kDefault, 2); + ConvolutionalLayer layer(1, 0, 1, kernel, *std::make_shared(), + kDefault, 2); std::vector in{input}; std::vector out{output}; layer.run(in, out); @@ -595,7 +603,7 @@ TEST(ConvolutionalLayerTest, Conv4DSTLViaConvolutionalLayer) { std::vector output_vec(8, 0.0f); Tensor output = make_tensor(output_vec, output_shape); - ConvolutionalLayer layer(1, 0, 1, kernel, Tensor(), kSTL); + ConvolutionalLayer layer(1, 0, 1, kernel, *std::make_shared(), kSTL); std::vector in{input}; std::vector out{output}; layer.run(in, out); @@ -684,7 +692,8 @@ TEST(ConvolutionalLayerTest, Conv4DLegacyViaConvolutionalLayer) { std::vector output_vec(8, 0.0f); Tensor output = make_tensor(output_vec, output_shape); - ConvolutionalLayer layer(1, 0, 1, kernel, Tensor(), kDefault, 1, true); + ConvolutionalLayer layer(1, 0, 1, kernel, *std::make_shared(), + kDefault, 1, true); std::vector in{input}; std::vector out{output}; @@ -801,7 +810,8 @@ TEST(ConvolutionalLayerTest, DepthwiseConv4DNoBiasIntPathCoverage) { std::vector output_vec(2, 0); Tensor output = make_tensor(output_vec, output_shape); - ConvolutionalLayer layer(1, 0, 1, kernel, Tensor(), kDefault, 2); + ConvolutionalLayer layer(1, 0, 1, kernel, *std::make_shared(), + kDefault, 2); std::vector in{input}; std::vector out{output}; @@ -827,7 +837,8 @@ TEST(ConvolutionalLayerTest, DepthwiseConv4DNoBiasFloatPathCoverage) { std::vector output_vec(2, 0.0f); Tensor output = make_tensor(output_vec, output_shape); - ConvolutionalLayer layer(1, 0, 1, kernel, Tensor(), kDefault, 2); + ConvolutionalLayer layer(1, 0, 1, kernel, *std::make_shared(), + kDefault, 2); std::vector in{input}; std::vector out{output}; diff --git a/test/single_layer/test_fclayer.cpp b/test/single_layer/test_fclayer.cpp index f0b5e93f..9d1ee986 100644 --- a/test/single_layer/test_fclayer.cpp +++ b/test/single_layer/test_fclayer.cpp @@ -53,13 +53,13 @@ INSTANTIATE_TEST_SUITE_P( std::vector({13.1, -0.3, 9.9})))); TEST(fclayer, throws_when_empty_weights) { - const std::vector a1; + std::vector a1; Shape wshape({3, 2}); std::vector bias = {0.5, 0.5, 1.0}; ASSERT_ANY_THROW(FCLayerImpl layer(a1, wshape, bias)); } TEST(fclayer, throws_when_empty_bias) { - const std::vector a1 = {2.0, 1.5, 0.1, 1.9, 0.0, 5.5}; + std::vector a1 = {2.0, 1.5, 0.1, 1.9, 0.0, 5.5}; Shape wshape({3, 2}); std::vector bias; ASSERT_ANY_THROW(FCLayerImpl layer(a1, wshape, bias)); @@ -74,7 +74,7 @@ TEST(fclayer, matvecmul_works) { EXPECT_EQ(res, true_res); } TEST(fclayer, set_get_bias_is_correct) { - const std::vector a1 = {2.0, 1.5, 0.1, 1.9, 0.0, 5.5}; + std::vector a1 = {2.0, 1.5, 0.1, 1.9, 0.0, 5.5}; Shape wshape({3, 2}); std::vector bias = {0.5, 0.5}; FCLayerImpl layer(a1, wshape, bias); @@ -90,7 +90,7 @@ TEST(fclayer, set_get_bias_is_correct) { } TEST(fclayer, set_get_weight_throws_when_out_of_range) { - const std::vector a1 = {2.0, 1.5, 3.5, 0.1, 1.9, 2.6, 0.0, 5.5, 1.7}; + std::vector a1 = {2.0, 1.5, 3.5, 0.1, 1.9, 2.6, 0.0, 5.5, 1.7}; Shape wshape({3, 3}); std::vector bias = {0.5, 0.5, 1.0}; FCLayerImpl layer(a1, wshape, bias); @@ -100,7 +100,7 @@ TEST(fclayer, set_get_weight_throws_when_out_of_range) { ASSERT_ANY_THROW(layer.set_weight(0, 4, 1.3)); } TEST(fclayer, set_get_bias_throws_when_out_of_range) { - const std::vector a1 = {2.0, 1.5, 3.5, 0.1, 1.9, 2.6, 0.0, 5.5, 1.7}; + std::vector a1 = {2.0, 1.5, 3.5, 0.1, 1.9, 2.6, 0.0, 5.5, 1.7}; Shape wshape({3, 3}); std::vector bias = {0.5, 0.5, 1.0}; FCLayerImpl layer(a1, wshape, bias); @@ -109,7 +109,7 @@ TEST(fclayer, set_get_bias_throws_when_out_of_range) { } TEST(fclayer, get_dims_returns_correctly) { - const std::vector a1 = {2.0, 1.5, 0.1, 1.9, 0.0, 5.5}; + std::vector a1 = {2.0, 1.5, 0.1, 1.9, 0.0, 5.5}; Shape wshape({3, 2}); std::vector bias = {0.5, 0.5}; FCLayerImpl layer(a1, wshape, bias);