diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index fb514735..bdba38a0 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -24,6 +24,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments, ImplType impl1 = parallel ? kTBB : kDefault; ImplType impl2 = parallel ? kSTL : kDefault; std::vector> layers; + std::vector layerpostop; std::string json_file = MODEL_PATH_H5; json model_data = read_json(json_file); @@ -73,12 +74,14 @@ void build_graph(Tensor& input, Tensor& output, bool comments, 1, pads, 1, tmp_values, tmp_bias, impl2); conv_layer->setName(kConvolution); layers.push_back(conv_layer); + layerpostop.push_back(false); if (comments) std::cout << "ConvLayer added to layers." << std::endl; } if (layer_type.find("relu") != std::string::npos) { auto ew_layer = std::make_shared("relu"); ew_layer->setName(kElementWise); layers.push_back(ew_layer); + layerpostop.push_back(true); if (comments) std::cout << "Element wise (relu) added to layers" << std::endl; } @@ -99,6 +102,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments, auto fc_layer = std::make_shared(tensor, tmp_bias); fc_layer->setName(kFullyConnected); layers.push_back(fc_layer); + layerpostop.push_back(false); if (comments) std::cout << "DenseLayer added to layers." << std::endl; } @@ -116,6 +120,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments, auto pool_layer = std::make_shared(shape, pooltype, impl1); pool_layer->setName(kPooling); layers.push_back(pool_layer); + layerpostop.push_back(false); if (comments) std::cout << "PoolingLayer added to layers." << std::endl; } @@ -124,6 +129,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments, std::make_shared(std::vector({0, 3, 2, 1})); flatten_layer->setName(kFlatten); layers.push_back(flatten_layer); + layerpostop.push_back(false); if (comments) std::cout << "FlattenLayer added to layers." << std::endl; } @@ -131,6 +137,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments, auto dropout_layer = std::make_shared(0.0); dropout_layer->setName(kDropout); layers.push_back(dropout_layer); + layerpostop.push_back(false); if (comments) std::cout << "DropOutLayer added to layers with probability 0.4 (turned " @@ -155,7 +162,12 @@ void build_graph(Tensor& input, Tensor& output, bool comments, << std::endl; for (size_t i = 0; i < layers.size() - 1; ++i) { - graph.makeConnection(*layers[i], *layers[i + 1]); + if (layerpostop[i]) { + layers[i - 1]->postops.layers.push_back(layers[i].get()); + layers[i - 1]->postops.count++; + graph.makeConnection(*layers[i - 1], *layers[i + 1]); + } else if (!layerpostop[i + 1]) + graph.makeConnection(*layers[i], *layers[i + 1]); } graph.setOutput(*layers.back(), output); diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index 25713611..1afbb7e2 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -114,6 +114,12 @@ class Graph { weights_.push_back(layers_[i]->get_weights()); #endif inten_ = *outten_; + if (layers_[i]->postops.count > 0) { + for (unsigned int j = 0; j < layers_[i]->postops.count; j++) { + layers_[i]->postops.layers[j]->run(inten_, *outten_); + } + inten_ = *outten_; + } #ifdef ENABLE_STATISTIC_TIME auto end = std::chrono::high_resolution_clock::now(); auto elapsed = diff --git a/include/layers/Layer.hpp b/include/layers/Layer.hpp index 9f641a94..10af9d90 100644 --- a/include/layers/Layer.hpp +++ b/include/layers/Layer.hpp @@ -25,10 +25,18 @@ enum LayerType : uint8_t { enum ImplType : uint8_t { kDefault, kTBB, kSTL }; +class Layer; + +struct PostOperations { + std::vector layers; + unsigned int count = 0; +}; + class Layer { public: Layer() = default; virtual ~Layer() = default; + PostOperations postops; int getID() const { return id_; } void setID(int id) { id_ = id; } LayerType getName() const { return type_; } diff --git a/test/inference/test_inference.cpp b/test/inference/test_inference.cpp index 7bfc6ab0..0134abe6 100644 --- a/test/inference/test_inference.cpp +++ b/test/inference/test_inference.cpp @@ -138,3 +138,69 @@ TEST(bfs, check_end_to_end) { std::vector res(3, 21); ASSERT_EQ(tmp, res); } +TEST(bfs, check_struct_layer) { + Graph graph(5); + Shape sh1({1, 5, 5, 3}); + std::vector vec; + vec.reserve(75); + for (int i = 0; i < 75; ++i) { + vec.push_back(3); + } + Tensor input = make_tensor(vec, sh1); + Tensor output = make_tensor(vec, sh1); + InputLayer a1(kNhwc, kNchw, 1, 2); + a1.setName(kInput); + std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; + Shape sh2({3, 3}); + Tensor kernel = make_tensor(kernelvec, sh2); + ConvolutionalLayer a2(1, 0, 1, kernel); + ConvolutionalLayer a3(1, 0, 1, kernel); + + // EWLayer a4("linear", 2.0F, 3.0F); + // a2.ewops.layers.push_back(&a4); + // a2.ewops.countlayers++; + + a2.setName(kConvolution); + a3.setName(kConvolution); + graph.setInput(a1, input); + graph.makeConnection(a1, a2); + graph.makeConnection(a2, a3); + graph.setOutput(a3, output); + graph.inference(); + std::vector tmp = *output.as(); + std::vector res = {81, 81, 81}; + ASSERT_EQ(tmp, res); +} +TEST(bfs, check_struct_layer_added) { + Graph graph(5); + Shape sh1({1, 5, 5, 3}); + std::vector vec; + vec.reserve(75); + for (int i = 0; i < 75; ++i) { + vec.push_back(3); + } + Tensor input = make_tensor(vec, sh1); + Tensor output = make_tensor(vec, sh1); + InputLayer a1(kNhwc, kNchw, 1, 2); + a1.setName(kInput); + std::vector kernelvec = {1, 1, 1, 1, 1, 1, 1, 1, 1}; + Shape sh2({3, 3}); + Tensor kernel = make_tensor(kernelvec, sh2); + ConvolutionalLayer a2(1, 0, 1, kernel); + ConvolutionalLayer a3(1, 0, 1, kernel); + + EWLayer a4("linear", 2.0F, 3.0F); + a2.postops.layers.push_back(&a4); + a2.postops.count++; + + a2.setName(kConvolution); + a3.setName(kConvolution); + graph.setInput(a1, input); + graph.makeConnection(a1, a2); + graph.makeConnection(a2, a3); + graph.setOutput(a3, output); + graph.inference(); + std::vector tmp = *output.as(); + std::vector res = {189, 189, 189}; + ASSERT_EQ(tmp, res); +}