From e916531b0ecb8568168a2c319e188f0e196f5916 Mon Sep 17 00:00:00 2001 From: MikeMuradov Date: Tue, 5 Aug 2025 20:03:57 +0300 Subject: [PATCH 1/5] first try --- include/layers/ConCatLayer.hpp | 32 +++++ src/layers/ConCatLayer.cpp | 142 ++++++++++++++++++++++ test/single_layer/test_concatlayer.cpp | 159 +++++++++++++++++++++++++ 3 files changed, 333 insertions(+) create mode 100644 include/layers/ConCatLayer.hpp create mode 100644 src/layers/ConCatLayer.cpp create mode 100644 test/single_layer/test_concatlayer.cpp diff --git a/include/layers/ConCatLayer.hpp b/include/layers/ConCatLayer.hpp new file mode 100644 index 00000000..d76f0ee2 --- /dev/null +++ b/include/layers/ConCatLayer.hpp @@ -0,0 +1,32 @@ +#pragma once +#include +#include +#include +#include + +#include "layers/Layer.hpp" +#include "layers/Tensor.hpp" + +namespace it_lab_ai { + +class ConcatLayer : public Layer { + public: + explicit ConcatLayer(int64_t axis = 0) : axis_(axis) {} + + void run(const Tensor& input, Tensor& output) override; + void run(const std::vector& inputs, Tensor& output); + + static std::string get_name() { return "ConcatLayer"; } + + private: + int64_t axis_; + + void validate_inputs(const std::vector& inputs) const; + int64_t normalize_axis(size_t rank) const; + Shape calculate_output_shape(const std::vector& inputs) const; + + template + void concatenate(const std::vector& inputs, Tensor& output) const; +}; + +} // namespace it_lab_ai \ No newline at end of file diff --git a/src/layers/ConCatLayer.cpp b/src/layers/ConCatLayer.cpp new file mode 100644 index 00000000..add05d09 --- /dev/null +++ b/src/layers/ConCatLayer.cpp @@ -0,0 +1,142 @@ +#include "layers/ConCatLayer.hpp" + +namespace it_lab_ai { + +void ConcatLayer::run(const Tensor& input, Tensor& output) { output = input; } + +void ConcatLayer::run(const std::vector& inputs, Tensor& output) { + if (inputs.empty()) { + throw std::runtime_error("ConcatLayer: No input tensors provided"); + } + + validate_inputs(inputs); + int64_t normalized_axis = normalize_axis(inputs[0].get_shape().dims()); + + switch (inputs[0].get_type()) { + case Type::kFloat: + concatenate(inputs, output); + break; + case Type::kInt: + concatenate(inputs, output); + break; + default: + throw std::runtime_error("ConcatLayer: Unsupported input tensor type"); + } +} + +void ConcatLayer::validate_inputs(const std::vector& inputs) const { + if (inputs.empty()) return; + + const Shape& first_shape = inputs[0].get_shape(); + Type first_type = inputs[0].get_type(); + + for (size_t i = 1; i < inputs.size(); ++i) { + const Shape& shape = inputs[i].get_shape(); + if (shape.dims() != first_shape.dims()) { + throw std::runtime_error( + "ConcatLayer: All input tensors must have the same rank"); + } + + if (inputs[i].get_type() != first_type) { + throw std::runtime_error( + "ConcatLayer: All input tensors must have the same type"); + } + + for (size_t dim = 0; dim < shape.dims(); ++dim) { + if (dim != static_cast(axis_) && shape[dim] != first_shape[dim]) { + throw std::runtime_error( + "ConcatLayer: All input tensors must have the same shape except " + "for the concatenation axis_"); + } + } + } +} + +int64_t ConcatLayer::normalize_axis(size_t rank) const { + if (rank == 0) { + throw std::runtime_error("ConcatLayer: Cannot concatenate scalar tensors"); + } + + if (axis_ < -static_cast(rank) || + axis_ >= static_cast(rank)) { + throw std::runtime_error( + "ConcatLayer: Axis out of range. Valid range is [-" + + std::to_string(rank) + ", " + std::to_string(rank - 1) + "]"); + } + + return axis_ < 0 ? axis_ + rank : axis_; +} + +Shape ConcatLayer::calculate_output_shape(const std::vector& inputs) const { + if (inputs.empty()) return Shape({}); + + const Shape& first_shape = inputs[0].get_shape(); + std::vector output_dims(first_shape.dims()); + for (size_t i = 0; i < first_shape.dims(); ++i) { + output_dims[i] = first_shape[i]; + } + + output_dims[axis_] = 0; + for (const auto& input : inputs) { + output_dims[axis_] += input.get_shape()[axis_]; + } + + return Shape(output_dims); +} + +template +void ConcatLayer::concatenate(const std::vector& inputs, + Tensor& output) const { + Shape output_shape = calculate_output_shape(inputs); + std::vector output_data(output_shape.count(), 0); + + const int64_t axis = axis_; + const size_t outer_size = [&]() { + size_t size = 1; + for (int64_t i = 0; i < axis; ++i) { + size *= output_shape[i]; + } + return size; + }(); + + const size_t inner_size = [&]() { + size_t size = 1; + for (size_t i = axis + 1; i < output_shape.dims(); ++i) { + size *= output_shape[i]; + } + return size; + }(); + + size_t output_offset = 0; + + for (const auto& input : inputs) { + const auto& input_data = *input.as(); + const Shape& input_shape = input.get_shape(); + const size_t input_axis_size = input_shape[axis]; + + for (size_t outer = 0; outer < outer_size; ++outer) { + for (size_t inner = 0; inner < inner_size; ++inner) { + for (size_t a = 0; a < input_axis_size; ++a) { + size_t input_pos = + outer * input_axis_size * inner_size + a * inner_size + inner; + + size_t output_pos = outer * (output_shape[axis] * inner_size) + + (output_offset + a) * inner_size + inner; + + output_data[output_pos] = input_data[input_pos]; + } + } + } + + output_offset += input_axis_size; + } + + output = make_tensor(output_data, output_shape); +} + +template void ConcatLayer::concatenate(const std::vector&, + Tensor&) const; +template void ConcatLayer::concatenate(const std::vector&, + Tensor&) const; + +} // namespace it_lab_ai \ No newline at end of file diff --git a/test/single_layer/test_concatlayer.cpp b/test/single_layer/test_concatlayer.cpp new file mode 100644 index 00000000..be15e640 --- /dev/null +++ b/test/single_layer/test_concatlayer.cpp @@ -0,0 +1,159 @@ +#include + +#include "gtest/gtest.h" +#include "layers/ConCatLayer.hpp" +#include "layers/Tensor.hpp" + +using namespace it_lab_ai; + +class ConcatLayerTests : public ::testing::Test { + protected: + void SetUp() override { + data1 = {1.0f, 2.0f, 3.0f, 4.0f}; + data2 = {5.0f, 6.0f, 7.0f, 8.0f}; + data_int = {1, 2, 3, 4}; + } + + std::vector data1; + std::vector data2; + std::vector data_int; +}; + +TEST_F(ConcatLayerTests, ConcatSameShapeFloatAxis0) { + ConcatLayer layer; + Tensor input1 = make_tensor(data1, {2, 2}); + Tensor input2 = make_tensor(data2, {2, 2}); + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({4, 2})); + + EXPECT_FLOAT_EQ(output.get({0, 0}), 1.0f); + EXPECT_FLOAT_EQ(output.get({0, 1}), 2.0f); + EXPECT_FLOAT_EQ(output.get({1, 0}), 3.0f); + EXPECT_FLOAT_EQ(output.get({1, 1}), 4.0f); + + EXPECT_FLOAT_EQ(output.get({2, 0}), 5.0f); + EXPECT_FLOAT_EQ(output.get({2, 1}), 6.0f); + EXPECT_FLOAT_EQ(output.get({3, 0}), 7.0f); + EXPECT_FLOAT_EQ(output.get({3, 1}), 8.0f); +} + +TEST_F(ConcatLayerTests, ConcatSameShapeIntAxis1) { + ConcatLayer layer(1); + Tensor input1 = make_tensor(data_int, {2, 2}); + Tensor input2 = make_tensor(data_int, {2, 2}); + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 4})); + auto* result = output.as(); + EXPECT_EQ((*result)[0], 1); + EXPECT_EQ((*result)[3], 2); + EXPECT_EQ((*result)[4], 3); + EXPECT_EQ((*result)[7], 4); +} + +TEST_F(ConcatLayerTests, Concat3DTensorsAxis2) { + ConcatLayer layer(2); + Tensor input1 = make_tensor({1, 2, 3, 4, 5, 6, 7, 8}, {2, 2, 2}); + Tensor input2 = + make_tensor({9, 10, 11, 12, 13, 14, 15, 16}, {2, 2, 2}); + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 2, 4})); + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 1.0f); + EXPECT_FLOAT_EQ((*result)[3], 4.0f); + EXPECT_FLOAT_EQ((*result)[4], 5.0f); + EXPECT_FLOAT_EQ((*result)[11], 16.0f); +} + +TEST_F(ConcatLayerTests, NegativeAxis) { + ConcatLayer layer(-1); + Tensor input1 = make_tensor(data1, {2, 2}); + Tensor input2 = make_tensor(data2, {2, 2}); + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 4})); +} + +TEST_F(ConcatLayerTests, DynamicAxis) { + ConcatLayer layer(1); + Tensor input1 = make_tensor(data1, {2, 2}); + Tensor input2 = make_tensor(data2, {2, 2}); + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 4})); +} + +TEST_F(ConcatLayerTests, IncompatibleShapes) { + ConcatLayer layer(0); + Tensor input1 = make_tensor(data1, {4}); + Tensor input2 = make_tensor(data2, {2, 2}); + Tensor output; + + EXPECT_THROW(layer.run({input1, input2}, output), std::runtime_error); +} + +TEST_F(ConcatLayerTests, LayerName) { + EXPECT_EQ(ConcatLayer::get_name(), "ConcatLayer"); +} + +TEST_F(ConcatLayerTests, EmptyTensors) { + ConcatLayer layer(0); + Tensor empty1({}, Type::kFloat); + Tensor empty2({}, Type::kFloat); + Tensor output; + + EXPECT_NO_THROW(layer.run({empty1, empty2}, output)); +} + +TEST_F(ConcatLayerTests, ConcatMultipleTensors) { + ConcatLayer layer(0); + Tensor input1 = make_tensor({1, 2}, {2}); + Tensor input2 = make_tensor({3, 4}, {2}); + Tensor input3 = make_tensor({5, 6}, {2}); + Tensor output; + + layer.run({input1, input2, input3}, output); + + ASSERT_EQ(output.get_shape(), Shape({6})); + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 1.0f); + EXPECT_FLOAT_EQ((*result)[3], 4.0f); + EXPECT_FLOAT_EQ((*result)[5], 6.0f); +} + +TEST_F(ConcatLayerTests, ConcatDifferentTypes) { + ConcatLayer layer(0); + Tensor input1 = make_tensor(data1, {4}); + Tensor input2 = make_tensor(data_int, {4}); + Tensor output; + + EXPECT_THROW(layer.run({input1, input2}, output), std::runtime_error); +} + +TEST_F(ConcatLayerTests, ConcatResNetStyle) { + ConcatLayer layer(1); + Tensor input1 = make_tensor({1, 2, 3, 4, 5, 6, 7, 8}, {1, 2, 2, 2}); + Tensor input2 = + make_tensor({9, 10, 11, 12, 13, 14, 15, 16}, {1, 2, 2, 2}); + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({1, 4, 2, 2})); + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 1.0f); + EXPECT_FLOAT_EQ((*result)[8], 9.0f); + EXPECT_FLOAT_EQ((*result)[15], 16.0f); +} \ No newline at end of file From 9db32aa427996a1f651a66d3dbb4fb9f4b2bad8d Mon Sep 17 00:00:00 2001 From: MikeMuradov Date: Fri, 8 Aug 2025 17:05:17 +0300 Subject: [PATCH 2/5] fix realization --- src/layers/ConCatLayer.cpp | 39 ++++--- test/single_layer/test_concatlayer.cpp | 136 +++++++++++-------------- 2 files changed, 86 insertions(+), 89 deletions(-) diff --git a/src/layers/ConCatLayer.cpp b/src/layers/ConCatLayer.cpp index add05d09..ca98c23c 100644 --- a/src/layers/ConCatLayer.cpp +++ b/src/layers/ConCatLayer.cpp @@ -29,6 +29,7 @@ void ConcatLayer::validate_inputs(const std::vector& inputs) const { const Shape& first_shape = inputs[0].get_shape(); Type first_type = inputs[0].get_type(); + const int64_t normalized_axis = normalize_axis(first_shape.dims()); for (size_t i = 1; i < inputs.size(); ++i) { const Shape& shape = inputs[i].get_shape(); @@ -43,10 +44,11 @@ void ConcatLayer::validate_inputs(const std::vector& inputs) const { } for (size_t dim = 0; dim < shape.dims(); ++dim) { - if (dim != static_cast(axis_) && shape[dim] != first_shape[dim]) { + if (dim != static_cast(normalized_axis) && + shape[dim] != first_shape[dim]) { throw std::runtime_error( "ConcatLayer: All input tensors must have the same shape except " - "for the concatenation axis_"); + "for the concatenation axis"); } } } @@ -57,17 +59,23 @@ int64_t ConcatLayer::normalize_axis(size_t rank) const { throw std::runtime_error("ConcatLayer: Cannot concatenate scalar tensors"); } - if (axis_ < -static_cast(rank) || - axis_ >= static_cast(rank)) { - throw std::runtime_error( - "ConcatLayer: Axis out of range. Valid range is [-" + - std::to_string(rank) + ", " + std::to_string(rank - 1) + "]"); + int64_t axis = axis_; + + if (axis < 0) { + axis += static_cast(rank); + } + + if (axis < 0 || axis >= static_cast(rank)) { + throw std::runtime_error("ConcatLayer: Axis " + std::to_string(axis_) + + " out of range for tensor rank " + + std::to_string(rank)); } - return axis_ < 0 ? axis_ + rank : axis_; + return axis; } -Shape ConcatLayer::calculate_output_shape(const std::vector& inputs) const { +Shape ConcatLayer::calculate_output_shape( + const std::vector& inputs) const { if (inputs.empty()) return Shape({}); const Shape& first_shape = inputs[0].get_shape(); @@ -76,9 +84,10 @@ Shape ConcatLayer::calculate_output_shape(const std::vector& inputs) con output_dims[i] = first_shape[i]; } - output_dims[axis_] = 0; + const int64_t normalized_axis = normalize_axis(first_shape.dims()); + output_dims[normalized_axis] = 0; for (const auto& input : inputs) { - output_dims[axis_] += input.get_shape()[axis_]; + output_dims[normalized_axis] += input.get_shape()[normalized_axis]; } return Shape(output_dims); @@ -90,7 +99,7 @@ void ConcatLayer::concatenate(const std::vector& inputs, Shape output_shape = calculate_output_shape(inputs); std::vector output_data(output_shape.count(), 0); - const int64_t axis = axis_; + const int64_t axis = normalize_axis(inputs[0].get_shape().dims()); const size_t outer_size = [&]() { size_t size = 1; for (int64_t i = 0; i < axis; ++i) { @@ -115,12 +124,12 @@ void ConcatLayer::concatenate(const std::vector& inputs, const size_t input_axis_size = input_shape[axis]; for (size_t outer = 0; outer < outer_size; ++outer) { - for (size_t inner = 0; inner < inner_size; ++inner) { - for (size_t a = 0; a < input_axis_size; ++a) { + for (size_t a = 0; a < input_axis_size; ++a) { + for (size_t inner = 0; inner < inner_size; ++inner) { size_t input_pos = outer * input_axis_size * inner_size + a * inner_size + inner; - size_t output_pos = outer * (output_shape[axis] * inner_size) + + size_t output_pos = outer * output_shape[axis] * inner_size + (output_offset + a) * inner_size + inner; output_data[output_pos] = input_data[input_pos]; diff --git a/test/single_layer/test_concatlayer.cpp b/test/single_layer/test_concatlayer.cpp index be15e640..36fb3b62 100644 --- a/test/single_layer/test_concatlayer.cpp +++ b/test/single_layer/test_concatlayer.cpp @@ -42,18 +42,23 @@ TEST_F(ConcatLayerTests, ConcatSameShapeFloatAxis0) { TEST_F(ConcatLayerTests, ConcatSameShapeIntAxis1) { ConcatLayer layer(1); - Tensor input1 = make_tensor(data_int, {2, 2}); - Tensor input2 = make_tensor(data_int, {2, 2}); + Tensor input1 = make_tensor({1, 2, 3, 4}, {2, 2}); + Tensor input2 = make_tensor({1, 2, 3, 4}, {2, 2}); Tensor output; layer.run({input1, input2}, output); ASSERT_EQ(output.get_shape(), Shape({2, 4})); - auto* result = output.as(); - EXPECT_EQ((*result)[0], 1); - EXPECT_EQ((*result)[3], 2); - EXPECT_EQ((*result)[4], 3); - EXPECT_EQ((*result)[7], 4); + + EXPECT_EQ(output.get({0, 0}), 1); + EXPECT_EQ(output.get({0, 1}), 2); + EXPECT_EQ(output.get({0, 2}), 1); + EXPECT_EQ(output.get({0, 3}), 2); + + EXPECT_EQ(output.get({1, 0}), 3); + EXPECT_EQ(output.get({1, 1}), 4); + EXPECT_EQ(output.get({1, 2}), 3); + EXPECT_EQ(output.get({1, 3}), 4); } TEST_F(ConcatLayerTests, Concat3DTensorsAxis2) { @@ -66,80 +71,47 @@ TEST_F(ConcatLayerTests, Concat3DTensorsAxis2) { layer.run({input1, input2}, output); ASSERT_EQ(output.get_shape(), Shape({2, 2, 4})); - auto* result = output.as(); - EXPECT_FLOAT_EQ((*result)[0], 1.0f); - EXPECT_FLOAT_EQ((*result)[3], 4.0f); - EXPECT_FLOAT_EQ((*result)[4], 5.0f); - EXPECT_FLOAT_EQ((*result)[11], 16.0f); + + EXPECT_FLOAT_EQ(output.get({0, 0, 0}), 1.0f); + EXPECT_FLOAT_EQ(output.get({0, 0, 1}), 2.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 0}), 3.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 1}), 4.0f); + + EXPECT_FLOAT_EQ(output.get({0, 0, 2}), 9.0f); + EXPECT_FLOAT_EQ(output.get({0, 0, 3}), 10.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 2}), 11.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 3}), 12.0f); + + EXPECT_FLOAT_EQ(output.get({1, 0, 0}), 5.0f); + EXPECT_FLOAT_EQ(output.get({1, 0, 1}), 6.0f); + EXPECT_FLOAT_EQ(output.get({1, 1, 0}), 7.0f); + EXPECT_FLOAT_EQ(output.get({1, 1, 1}), 8.0f); + + EXPECT_FLOAT_EQ(output.get({1, 0, 2}), 13.0f); + EXPECT_FLOAT_EQ(output.get({1, 0, 3}), 14.0f); + EXPECT_FLOAT_EQ(output.get({1, 1, 2}), 15.0f); + EXPECT_FLOAT_EQ(output.get({1, 1, 3}), 16.0f); } TEST_F(ConcatLayerTests, NegativeAxis) { ConcatLayer layer(-1); - Tensor input1 = make_tensor(data1, {2, 2}); - Tensor input2 = make_tensor(data2, {2, 2}); + Tensor input1 = make_tensor({1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); + Tensor input2 = make_tensor({5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Tensor output; layer.run({input1, input2}, output); ASSERT_EQ(output.get_shape(), Shape({2, 4})); -} - -TEST_F(ConcatLayerTests, DynamicAxis) { - ConcatLayer layer(1); - Tensor input1 = make_tensor(data1, {2, 2}); - Tensor input2 = make_tensor(data2, {2, 2}); - Tensor output; - - layer.run({input1, input2}, output); - - ASSERT_EQ(output.get_shape(), Shape({2, 4})); -} - -TEST_F(ConcatLayerTests, IncompatibleShapes) { - ConcatLayer layer(0); - Tensor input1 = make_tensor(data1, {4}); - Tensor input2 = make_tensor(data2, {2, 2}); - Tensor output; - EXPECT_THROW(layer.run({input1, input2}, output), std::runtime_error); -} - -TEST_F(ConcatLayerTests, LayerName) { - EXPECT_EQ(ConcatLayer::get_name(), "ConcatLayer"); -} - -TEST_F(ConcatLayerTests, EmptyTensors) { - ConcatLayer layer(0); - Tensor empty1({}, Type::kFloat); - Tensor empty2({}, Type::kFloat); - Tensor output; - - EXPECT_NO_THROW(layer.run({empty1, empty2}, output)); -} - -TEST_F(ConcatLayerTests, ConcatMultipleTensors) { - ConcatLayer layer(0); - Tensor input1 = make_tensor({1, 2}, {2}); - Tensor input2 = make_tensor({3, 4}, {2}); - Tensor input3 = make_tensor({5, 6}, {2}); - Tensor output; - - layer.run({input1, input2, input3}, output); - - ASSERT_EQ(output.get_shape(), Shape({6})); - auto* result = output.as(); - EXPECT_FLOAT_EQ((*result)[0], 1.0f); - EXPECT_FLOAT_EQ((*result)[3], 4.0f); - EXPECT_FLOAT_EQ((*result)[5], 6.0f); -} - -TEST_F(ConcatLayerTests, ConcatDifferentTypes) { - ConcatLayer layer(0); - Tensor input1 = make_tensor(data1, {4}); - Tensor input2 = make_tensor(data_int, {4}); - Tensor output; + EXPECT_FLOAT_EQ(output.get({0, 0}), 1.0f); + EXPECT_FLOAT_EQ(output.get({0, 1}), 2.0f); + EXPECT_FLOAT_EQ(output.get({0, 2}), 5.0f); + EXPECT_FLOAT_EQ(output.get({0, 3}), 6.0f); - EXPECT_THROW(layer.run({input1, input2}, output), std::runtime_error); + EXPECT_FLOAT_EQ(output.get({1, 0}), 3.0f); + EXPECT_FLOAT_EQ(output.get({1, 1}), 4.0f); + EXPECT_FLOAT_EQ(output.get({1, 2}), 7.0f); + EXPECT_FLOAT_EQ(output.get({1, 3}), 8.0f); } TEST_F(ConcatLayerTests, ConcatResNetStyle) { @@ -152,8 +124,24 @@ TEST_F(ConcatLayerTests, ConcatResNetStyle) { layer.run({input1, input2}, output); ASSERT_EQ(output.get_shape(), Shape({1, 4, 2, 2})); - auto* result = output.as(); - EXPECT_FLOAT_EQ((*result)[0], 1.0f); - EXPECT_FLOAT_EQ((*result)[8], 9.0f); - EXPECT_FLOAT_EQ((*result)[15], 16.0f); + + EXPECT_FLOAT_EQ(output.get({0, 0, 0, 0}), 1.0f); + EXPECT_FLOAT_EQ(output.get({0, 0, 0, 1}), 2.0f); + EXPECT_FLOAT_EQ(output.get({0, 0, 1, 0}), 3.0f); + EXPECT_FLOAT_EQ(output.get({0, 0, 1, 1}), 4.0f); + + EXPECT_FLOAT_EQ(output.get({0, 1, 0, 0}), 5.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 0, 1}), 6.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 1, 0}), 7.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 1, 1}), 8.0f); + + EXPECT_FLOAT_EQ(output.get({0, 2, 0, 0}), 9.0f); + EXPECT_FLOAT_EQ(output.get({0, 2, 0, 1}), 10.0f); + EXPECT_FLOAT_EQ(output.get({0, 2, 1, 0}), 11.0f); + EXPECT_FLOAT_EQ(output.get({0, 2, 1, 1}), 12.0f); + + EXPECT_FLOAT_EQ(output.get({0, 3, 0, 0}), 13.0f); + EXPECT_FLOAT_EQ(output.get({0, 3, 0, 1}), 14.0f); + EXPECT_FLOAT_EQ(output.get({0, 3, 1, 0}), 15.0f); + EXPECT_FLOAT_EQ(output.get({0, 3, 1, 1}), 16.0f); } \ No newline at end of file From 5f8287ca44c1719c1b9910b549a91430ae03c925 Mon Sep 17 00:00:00 2001 From: MikeMuradov Date: Fri, 8 Aug 2025 17:29:24 +0300 Subject: [PATCH 3/5] fix tests --- src/layers/ConCatLayer.cpp | 1 - test/single_layer/test_concatlayer.cpp | 27 +++++++------------------- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/src/layers/ConCatLayer.cpp b/src/layers/ConCatLayer.cpp index ca98c23c..a744caa6 100644 --- a/src/layers/ConCatLayer.cpp +++ b/src/layers/ConCatLayer.cpp @@ -10,7 +10,6 @@ void ConcatLayer::run(const std::vector& inputs, Tensor& output) { } validate_inputs(inputs); - int64_t normalized_axis = normalize_axis(inputs[0].get_shape().dims()); switch (inputs[0].get_type()) { case Type::kFloat: diff --git a/test/single_layer/test_concatlayer.cpp b/test/single_layer/test_concatlayer.cpp index 36fb3b62..42d8e151 100644 --- a/test/single_layer/test_concatlayer.cpp +++ b/test/single_layer/test_concatlayer.cpp @@ -6,23 +6,10 @@ using namespace it_lab_ai; -class ConcatLayerTests : public ::testing::Test { - protected: - void SetUp() override { - data1 = {1.0f, 2.0f, 3.0f, 4.0f}; - data2 = {5.0f, 6.0f, 7.0f, 8.0f}; - data_int = {1, 2, 3, 4}; - } - - std::vector data1; - std::vector data2; - std::vector data_int; -}; - -TEST_F(ConcatLayerTests, ConcatSameShapeFloatAxis0) { +TEST(ConcatLayerTests, ConcatSameShapeFloatAxis0) { ConcatLayer layer; - Tensor input1 = make_tensor(data1, {2, 2}); - Tensor input2 = make_tensor(data2, {2, 2}); + Tensor input1 = make_tensor({1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); + Tensor input2 = make_tensor({5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); Tensor output; layer.run({input1, input2}, output); @@ -40,7 +27,7 @@ TEST_F(ConcatLayerTests, ConcatSameShapeFloatAxis0) { EXPECT_FLOAT_EQ(output.get({3, 1}), 8.0f); } -TEST_F(ConcatLayerTests, ConcatSameShapeIntAxis1) { +TEST(ConcatLayerTests, ConcatSameShapeIntAxis1) { ConcatLayer layer(1); Tensor input1 = make_tensor({1, 2, 3, 4}, {2, 2}); Tensor input2 = make_tensor({1, 2, 3, 4}, {2, 2}); @@ -61,7 +48,7 @@ TEST_F(ConcatLayerTests, ConcatSameShapeIntAxis1) { EXPECT_EQ(output.get({1, 3}), 4); } -TEST_F(ConcatLayerTests, Concat3DTensorsAxis2) { +TEST(ConcatLayerTests, Concat3DTensorsAxis2) { ConcatLayer layer(2); Tensor input1 = make_tensor({1, 2, 3, 4, 5, 6, 7, 8}, {2, 2, 2}); Tensor input2 = @@ -93,7 +80,7 @@ TEST_F(ConcatLayerTests, Concat3DTensorsAxis2) { EXPECT_FLOAT_EQ(output.get({1, 1, 3}), 16.0f); } -TEST_F(ConcatLayerTests, NegativeAxis) { +TEST(ConcatLayerTests, NegativeAxis) { ConcatLayer layer(-1); Tensor input1 = make_tensor({1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); Tensor input2 = make_tensor({5.0f, 6.0f, 7.0f, 8.0f}, {2, 2}); @@ -114,7 +101,7 @@ TEST_F(ConcatLayerTests, NegativeAxis) { EXPECT_FLOAT_EQ(output.get({1, 3}), 8.0f); } -TEST_F(ConcatLayerTests, ConcatResNetStyle) { +TEST(ConcatLayerTests, ConcatResNetStyle) { ConcatLayer layer(1); Tensor input1 = make_tensor({1, 2, 3, 4, 5, 6, 7, 8}, {1, 2, 2, 2}); Tensor input2 = From 4cde14abb8f09afa7cf88cde1e10b85d3435d81b Mon Sep 17 00:00:00 2001 From: MikeMuradov Date: Fri, 8 Aug 2025 18:50:22 +0300 Subject: [PATCH 4/5] fix name and add tests --- .../{ConCatLayer.hpp => ConcatLayer.hpp} | 0 .../{ConCatLayer.cpp => ConcatLayer.cpp} | 2 +- test/single_layer/test_concatlayer.cpp | 66 ++++++++++++++++++- 3 files changed, 66 insertions(+), 2 deletions(-) rename include/layers/{ConCatLayer.hpp => ConcatLayer.hpp} (100%) rename src/layers/{ConCatLayer.cpp => ConcatLayer.cpp} (99%) diff --git a/include/layers/ConCatLayer.hpp b/include/layers/ConcatLayer.hpp similarity index 100% rename from include/layers/ConCatLayer.hpp rename to include/layers/ConcatLayer.hpp diff --git a/src/layers/ConCatLayer.cpp b/src/layers/ConcatLayer.cpp similarity index 99% rename from src/layers/ConCatLayer.cpp rename to src/layers/ConcatLayer.cpp index a744caa6..e7008c1c 100644 --- a/src/layers/ConCatLayer.cpp +++ b/src/layers/ConcatLayer.cpp @@ -1,4 +1,4 @@ -#include "layers/ConCatLayer.hpp" +#include "layers/ConatLayer.hpp" namespace it_lab_ai { diff --git a/test/single_layer/test_concatlayer.cpp b/test/single_layer/test_concatlayer.cpp index 42d8e151..65a1ab46 100644 --- a/test/single_layer/test_concatlayer.cpp +++ b/test/single_layer/test_concatlayer.cpp @@ -1,11 +1,75 @@ #include #include "gtest/gtest.h" -#include "layers/ConCatLayer.hpp" +#include "layers/ConсatLayer.hpp" #include "layers/Tensor.hpp" using namespace it_lab_ai; +TEST(ConcatLayerTests, ConcatEmptyTensors) { + ConcatLayer layer(0); + + Tensor empty1 = make_tensor({}, {0}); + Tensor empty2 = make_tensor({}, {2, 0, 3}); + + Tensor output; + + EXPECT_THROW(layer.run({empty1, empty2}, output), std::runtime_error); +} + +TEST(ConcatLayerTests, ConcatSingleElementTensors) { + ConcatLayer layer(0); + + Tensor single1 = make_tensor({42.0f}, {1}); + Tensor single2 = make_tensor({99.0f}, {1}); + + Tensor output; + + layer.run({single1, single2}, output); + + ASSERT_EQ(output.get_shape(), Shape({2})); + EXPECT_FLOAT_EQ(output.get({0}), 42.0f); + EXPECT_FLOAT_EQ(output.get({1}), 99.0f); +} + +TEST(ConcatLayerTests, ConcatAlongAxisWithSize1) { + ConcatLayer layer(0); + + Tensor input1 = make_tensor({1, 2, 3, 4, 5, 6}, {1, 3, 2}); + Tensor input2 = make_tensor({7, 8, 9, 10, 11, 12}, {1, 3, 2}); + + Tensor output; + + layer.run({input1, input2}, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 3, 2})); + + EXPECT_FLOAT_EQ(output.get({0, 0, 0}), 1.0f); + EXPECT_FLOAT_EQ(output.get({0, 0, 1}), 2.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 0}), 3.0f); + EXPECT_FLOAT_EQ(output.get({0, 1, 1}), 4.0f); + EXPECT_FLOAT_EQ(output.get({0, 2, 0}), 5.0f); + EXPECT_FLOAT_EQ(output.get({0, 2, 1}), 6.0f); + + EXPECT_FLOAT_EQ(output.get({1, 0, 0}), 7.0f); + EXPECT_FLOAT_EQ(output.get({1, 0, 1}), 8.0f); + EXPECT_FLOAT_EQ(output.get({1, 1, 0}), 9.0f); + EXPECT_FLOAT_EQ(output.get({1, 1, 1}), 10.0f); + EXPECT_FLOAT_EQ(output.get({1, 2, 0}), 11.0f); + EXPECT_FLOAT_EQ(output.get({1, 2, 1}), 12.0f); +} + +TEST(ConcatLayerTests, ConcatScalars) { + ConcatLayer layer(0); + + Tensor scalar1 = make_tensor({42.0f}, {}); + Tensor scalar2 = make_tensor({99.0f}, {}); + + Tensor output; + + EXPECT_THROW(layer.run({scalar1, scalar2}, output), std::runtime_error); +} + TEST(ConcatLayerTests, ConcatSameShapeFloatAxis0) { ConcatLayer layer; Tensor input1 = make_tensor({1.0f, 2.0f, 3.0f, 4.0f}, {2, 2}); From 02816ffa12cc15e8ef382a9c1b034893e41da276 Mon Sep 17 00:00:00 2001 From: MikeMuradov Date: Fri, 8 Aug 2025 18:57:44 +0300 Subject: [PATCH 5/5] fix c --- src/layers/ConcatLayer.cpp | 2 +- test/single_layer/test_concatlayer.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/layers/ConcatLayer.cpp b/src/layers/ConcatLayer.cpp index e7008c1c..fc20269e 100644 --- a/src/layers/ConcatLayer.cpp +++ b/src/layers/ConcatLayer.cpp @@ -1,4 +1,4 @@ -#include "layers/ConatLayer.hpp" +#include "layers/ConcatLayer.hpp" namespace it_lab_ai { diff --git a/test/single_layer/test_concatlayer.cpp b/test/single_layer/test_concatlayer.cpp index 65a1ab46..4c144cc9 100644 --- a/test/single_layer/test_concatlayer.cpp +++ b/test/single_layer/test_concatlayer.cpp @@ -1,7 +1,7 @@ #include #include "gtest/gtest.h" -#include "layers/ConсatLayer.hpp" +#include "layers/ConcatLayer.hpp" #include "layers/Tensor.hpp" using namespace it_lab_ai;