diff --git a/app/Accuracy/accuracy_check.cpp b/app/Accuracy/accuracy_check.cpp index dbf1bd73..d97648dd 100644 --- a/app/Accuracy/accuracy_check.cpp +++ b/app/Accuracy/accuracy_check.cpp @@ -7,7 +7,7 @@ #include "layers/OutputLayer.hpp" #include "layers/PoolingLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; int main() { std::string image_path = IMAGE1_PATH; diff --git a/app/AccuracyImgNet/accimgnet.cpp b/app/AccuracyImgNet/accimgnet.cpp index c889db41..87008f2d 100644 --- a/app/AccuracyImgNet/accimgnet.cpp +++ b/app/AccuracyImgNet/accimgnet.cpp @@ -10,7 +10,7 @@ #include "layers/OutputLayer.hpp" #include "layers/PoolingLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; bool cmp_by_first(const std::pair& a, const std::pair& b) { diff --git a/app/Graph/acc_check_mnist.cpp b/app/Graph/acc_check_mnist.cpp index 4c1307e3..f2cf5ef4 100644 --- a/app/Graph/acc_check_mnist.cpp +++ b/app/Graph/acc_check_mnist.cpp @@ -5,7 +5,7 @@ #include "build.cpp" #include "build.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; int main(int argc, char* argv[]) { bool parallel = false; diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index da2a1bfd..fb514735 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -86,7 +86,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments, Tensor tmp_bias = make_tensor(tensor.get_bias()); Tensor tmp_tensor = Tensor(Shape({tensor.get_shape()[1], tensor.get_shape()[0]}), - itlab_2023::Type::kFloat); + it_lab_ai::Type::kFloat); // kernel is always transposed ? for (size_t h = 0; h < tensor.get_shape()[0]; h++) { for (size_t w = 0; w < tensor.get_shape()[1]; w++) { diff --git a/app/Graph/graph_build.cpp b/app/Graph/graph_build.cpp index aa847b63..309e944c 100644 --- a/app/Graph/graph_build.cpp +++ b/app/Graph/graph_build.cpp @@ -2,7 +2,7 @@ #include "build.hpp" namespace fs = std::filesystem; -using namespace itlab_2023; +using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string image_folder = IMAGE1_PATH; diff --git a/include/Weights_Reader/reader_weights.hpp b/include/Weights_Reader/reader_weights.hpp index 867ee1c1..de91ecf1 100644 --- a/include/Weights_Reader/reader_weights.hpp +++ b/include/Weights_Reader/reader_weights.hpp @@ -5,7 +5,7 @@ #include "layers/Tensor.hpp" using json = nlohmann::json; -using namespace itlab_2023; +using namespace it_lab_ai; json read_json(const std::string& filename); void extract_values_from_json(const json& j, std::vector& values); diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index 96d46d27..25713611 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -10,7 +10,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { class Graph { int BiggestSize_; @@ -148,4 +148,4 @@ class Graph { std::vector getWEIGHTS() { return weights_; } #endif }; -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/BinaryOpLayer.hpp b/include/layers/BinaryOpLayer.hpp new file mode 100644 index 00000000..05926aad --- /dev/null +++ b/include/layers/BinaryOpLayer.hpp @@ -0,0 +1,56 @@ +#pragma once +#include +#include +#include +#include +#include + +#include "Tensor.hpp" +#include "layers/Layer.hpp" + +namespace it_lab_ai { + +class BinaryOpLayer : public Layer { + public: + enum class Operation : uint8_t { kMul, kAdd, kSub, kDiv }; + + BinaryOpLayer() = default; + explicit BinaryOpLayer(Operation op) : op_(op) {} + + static std::string get_name() { return "Binary Operation Layer"; } + void run(const Tensor& input, Tensor& output) override; + void run(const Tensor& A, const Tensor& B, Tensor& output); + static bool is_scalar_tensor(const Tensor& t); + +#ifdef ENABLE_STATISTIC_WEIGHTS + Tensor get_weights() override { + std::vector v = {0}; + return make_tensor(v); + } +#endif + + private: + Operation op_ = Operation::kMul; + + template + void run_with_scalar_impl(const Tensor& input, ValueType scalar, + Tensor& output) const; + template + void run_broadcast_impl(const Tensor& A, const Tensor& B, Tensor& output, + const Shape& output_shape) const; + void run_with_scalar(const Tensor& input, float scalar, Tensor& output) const; + + static bool can_broadcast(const Shape& shape_A, const Shape& shape_B); + static Shape calculate_broadcasted_shape(const Shape& shape_A, + const Shape& shape_B); + static std::vector get_strides(const Shape& shape); + static size_t get_broadcasted_index( + size_t flat_index, const Shape& input_shape, const Shape& output_shape, + const std::vector& input_strides, + const std::vector& output_strides); + + template + class BinaryOpLayerImpl; +}; + +} // namespace it_lab_ai \ No newline at end of file diff --git a/include/layers/ConvLayer.hpp b/include/layers/ConvLayer.hpp index 219d35a9..4a0de298 100644 --- a/include/layers/ConvLayer.hpp +++ b/include/layers/ConvLayer.hpp @@ -6,7 +6,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { class ConvolutionalLayer : public Layer { private: @@ -402,4 +402,4 @@ void Conv4DSTL(const Tensor& input, const Tensor& kernel_, const Tensor& bias_, output = make_tensor(one_d_vector, sh); } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/DropOutLayer.hpp b/include/layers/DropOutLayer.hpp index ffdac5c4..23e5436b 100644 --- a/include/layers/DropOutLayer.hpp +++ b/include/layers/DropOutLayer.hpp @@ -3,7 +3,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { class DropOutLayer : public Layer { private: @@ -16,4 +16,4 @@ class DropOutLayer : public Layer { void run(const Tensor& input, Tensor& output) override; }; -} // namespace itlab_2023 \ No newline at end of file +} // namespace it_lab_ai \ No newline at end of file diff --git a/include/layers/EWLayer.hpp b/include/layers/EWLayer.hpp index b75d2a60..6dd2a0f3 100644 --- a/include/layers/EWLayer.hpp +++ b/include/layers/EWLayer.hpp @@ -6,7 +6,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { template T relu(const T& value) { @@ -93,4 +93,4 @@ std::vector EWLayerImpl::run( return res; } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/FCLayer.hpp b/include/layers/FCLayer.hpp index 667c6a39..7ddbe470 100644 --- a/include/layers/FCLayer.hpp +++ b/include/layers/FCLayer.hpp @@ -7,7 +7,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { class FCLayer : public Layer { private: @@ -128,4 +128,4 @@ std::vector FCLayerImpl::run( return output_values; } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/FlattenLayer.hpp b/include/layers/FlattenLayer.hpp index 462d7f5a..5636b21b 100644 --- a/include/layers/FlattenLayer.hpp +++ b/include/layers/FlattenLayer.hpp @@ -3,7 +3,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { std::vector reorder(std::vector order_vec, std::vector order); @@ -51,4 +51,4 @@ void Flatten4D(const Tensor& input, Tensor& output, Shape({input.get_shape().count()})); } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/InputLayer.hpp b/include/layers/InputLayer.hpp index 49ea4b24..1acf1cda 100644 --- a/include/layers/InputLayer.hpp +++ b/include/layers/InputLayer.hpp @@ -4,7 +4,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { enum LayInOut : uint8_t { kNchw, // 0 @@ -179,4 +179,4 @@ class InputLayer : public Layer { } }; -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/Layer.hpp b/include/layers/Layer.hpp index 3a607e9b..9f641a94 100644 --- a/include/layers/Layer.hpp +++ b/include/layers/Layer.hpp @@ -9,7 +9,7 @@ #include "layers/Tensor.hpp" #include "oneapi/tbb.h" -namespace itlab_2023 { +namespace it_lab_ai { enum LayerType : uint8_t { kInput, @@ -65,4 +65,4 @@ class LayerImpl { Shape outputShape_; }; -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/OutputLayer.hpp b/include/layers/OutputLayer.hpp index 8a63283a..32b5efdf 100644 --- a/include/layers/OutputLayer.hpp +++ b/include/layers/OutputLayer.hpp @@ -7,7 +7,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { class OutputLayer : public Layer { public: @@ -111,4 +111,4 @@ std::pair, std::vector> top_k_vec( return std::make_pair(res_labels, res_input); } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/PoolingLayer.hpp b/include/layers/PoolingLayer.hpp index 3a9d8011..1dd19915 100644 --- a/include/layers/PoolingLayer.hpp +++ b/include/layers/PoolingLayer.hpp @@ -6,7 +6,7 @@ #include "layers/Layer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { enum PoolingType : uint8_t { kAverage, kMax }; @@ -266,4 +266,4 @@ std::vector PoolingLayerImplTBB::run( return res; } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/Shape.hpp b/include/layers/Shape.hpp index 693a1512..6200a70a 100644 --- a/include/layers/Shape.hpp +++ b/include/layers/Shape.hpp @@ -8,7 +8,7 @@ #include #include -namespace itlab_2023 { +namespace it_lab_ai { class Shape { public: @@ -40,9 +40,19 @@ class Shape { size_t dims() const noexcept { return dims_.size(); } size_t get_index(const std::vector& coords) const; friend std::ostream& operator<<(std::ostream& os, const Shape& shape); + bool operator==(const Shape& other) const noexcept { + if (dims_.size() != other.dims_.size()) { + return false; + } + return std::equal(dims_.begin(), dims_.end(), other.dims_.begin()); + } + + bool operator!=(const Shape& other) const noexcept { + return !(*this == other); + } private: std::vector dims_; }; -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/layers/Tensor.hpp b/include/layers/Tensor.hpp index e602e8c7..b1691f66 100644 --- a/include/layers/Tensor.hpp +++ b/include/layers/Tensor.hpp @@ -8,7 +8,7 @@ #include "layers/Shape.hpp" -namespace itlab_2023 { +namespace it_lab_ai { enum class Type : uint8_t { kUnknown, kInt, kFloat }; @@ -207,4 +207,4 @@ Tensor make_tensor(const std::vector& values, const Shape& shape, } std::ostream& operator<<(std::ostream& out, const Tensor& t); -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/include/perf/benchmarking.hpp b/include/perf/benchmarking.hpp index ad985d7a..c0ee374d 100644 --- a/include/perf/benchmarking.hpp +++ b/include/perf/benchmarking.hpp @@ -9,7 +9,7 @@ #include #include -namespace itlab_2023 { +namespace it_lab_ai { template @@ -114,4 +114,4 @@ T accuracy_norm(T* test, T* ref, const size_t size) { // typename T should have friend sqrt() function return std::sqrt(res); } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/BinaryOpLayer.cpp b/src/layers/BinaryOpLayer.cpp new file mode 100644 index 00000000..56ae2de7 --- /dev/null +++ b/src/layers/BinaryOpLayer.cpp @@ -0,0 +1,218 @@ +#include "layers/BinaryOpLayer.hpp" + +namespace it_lab_ai { + +namespace { +template +T apply_binary_op(T a, T b, BinaryOpLayer::Operation op) { + switch (op) { + case BinaryOpLayer::Operation::kMul: + return a * b; + case BinaryOpLayer::Operation::kAdd: + return a + b; + case BinaryOpLayer::Operation::kSub: + return a - b; + case BinaryOpLayer::Operation::kDiv: + if (b == 0) throw std::runtime_error("Division by zero"); + return a / b; + default: + throw std::runtime_error("Unsupported binary operation"); + } +} +} // namespace + +void BinaryOpLayer::run(const Tensor& input, Tensor& output) { + (void)input; + (void)output; + throw std::runtime_error( + "Use run(const Tensor& A, const Tensor& B, Tensor& output) for binary " + "operations"); +} + +void BinaryOpLayer::run(const Tensor& A, const Tensor& B, Tensor& output) { + if (A.get_type() != B.get_type()) { + throw std::runtime_error( + "BinaryOpLayer: Input tensors must have the same type"); + } + + if (is_scalar_tensor(B)) { + switch (B.get_type()) { + case Type::kFloat: + run_with_scalar(A, B.as()->at(0), output); + return; + case Type::kInt: + run_with_scalar(A, static_cast(B.as()->at(0)), output); + return; + default: + throw std::runtime_error("Unsupported scalar type"); + } + } + + if (is_scalar_tensor(A)) { + switch (A.get_type()) { + case Type::kFloat: + run_with_scalar(B, A.as()->at(0), output); + return; + case Type::kInt: + run_with_scalar(B, static_cast(A.as()->at(0)), output); + return; + default: + throw std::runtime_error("BinaryOpLayer: Unsupported scalar type"); + } + } + + if (!can_broadcast(A.get_shape(), B.get_shape())) { + throw std::runtime_error( + "BinaryOpLayer: Incompatible shapes for broadcasting"); + } + + Shape output_shape = + calculate_broadcasted_shape(A.get_shape(), B.get_shape()); + + switch (A.get_type()) { + case Type::kFloat: + run_broadcast_impl(A, B, output, output_shape); + break; + case Type::kInt: + run_broadcast_impl(A, B, output, output_shape); + break; + default: + throw std::runtime_error("Unsupported tensor type"); + } +} + +void BinaryOpLayer::run_with_scalar(const Tensor& input, float scalar, + Tensor& output) const { + switch (input.get_type()) { + case Type::kFloat: { + run_with_scalar_impl(input, scalar, output); + break; + } + case Type::kInt: { + run_with_scalar_impl(input, static_cast(scalar), output); + break; + } + default: + throw std::runtime_error( + "BinaryOpLayer: Unsupported tensor type for scalar operation"); + } +} + +template +void BinaryOpLayer::run_with_scalar_impl(const Tensor& input, ValueType scalar, + Tensor& output) const { + const auto& input_data = *input.as(); + std::vector result; + result.reserve(input_data.size()); + + for (const auto& val : input_data) { + result.push_back(apply_binary_op(val, scalar, op_)); + } + + output = make_tensor(result, input.get_shape()); +} + +template +void BinaryOpLayer::run_broadcast_impl(const Tensor& A, const Tensor& B, + Tensor& output, + const Shape& output_shape) const { + const auto& a_data = *A.as(); + const auto& b_data = *B.as(); + std::vector result(output_shape.count()); + const auto strides_a = get_strides(A.get_shape()); + const auto strides_b = get_strides(B.get_shape()); + const auto strides_output = get_strides(output_shape); + + for (size_t i = 0; i < result.size(); ++i) { + size_t a_idx = get_broadcasted_index(i, A.get_shape(), output_shape, + strides_a, strides_output); + size_t b_idx = get_broadcasted_index(i, B.get_shape(), output_shape, + strides_b, strides_output); + result[i] = apply_binary_op(a_data[a_idx], b_data[b_idx], op_); + } + + output = make_tensor(result, output_shape); +} + +bool BinaryOpLayer::can_broadcast(const Shape& shape_A, const Shape& shape_B) { + size_t a_dims = shape_A.dims(); + size_t b_dims = shape_B.dims(); + size_t max_dims = std::max(a_dims, b_dims); + + for (size_t i = 0; i < max_dims; ++i) { + size_t a_dim = (i < a_dims) ? shape_A[a_dims - 1 - i] : 1; + size_t b_dim = (i < b_dims) ? shape_B[b_dims - 1 - i] : 1; + + if (a_dim != b_dim && a_dim != 1 && b_dim != 1) { + return false; + } + } + return true; +} + +Shape BinaryOpLayer::calculate_broadcasted_shape(const Shape& shape_A, + const Shape& shape_B) { + size_t a_dims = shape_A.dims(); + size_t b_dims = shape_B.dims(); + size_t max_dims = std::max(a_dims, b_dims); + Shape result(max_dims); + + for (size_t i = 0; i < max_dims; ++i) { + size_t a_dim = (i < a_dims) ? shape_A[a_dims - 1 - i] : 1; + size_t b_dim = (i < b_dims) ? shape_B[b_dims - 1 - i] : 1; + result[max_dims - 1 - i] = std::max(a_dim, b_dim); + } + return result; +} + +std::vector BinaryOpLayer::get_strides(const Shape& shape) { + std::vector strides(shape.dims()); + if (strides.empty()) return strides; + + strides.back() = 1; + for (int i = (int)shape.dims() - 2; i >= 0; --i) { + strides[i] = strides[i + 1] * shape[i + 1]; + } + return strides; +} + +size_t BinaryOpLayer::get_broadcasted_index( + size_t flat_index, const Shape& input_shape, const Shape& output_shape, + const std::vector& input_strides, + const std::vector& output_strides) { + size_t input_dims = input_shape.dims(); + size_t output_dims = output_shape.dims(); + size_t index = 0; + + for (size_t i = 0; i < output_dims; ++i) { + size_t output_dim = output_shape[i]; + size_t input_dim = (i >= output_dims - input_dims) + ? input_shape[i - (output_dims - input_dims)] + : 1; + + if (input_dim == 1) continue; + + size_t pos_in_dim = (flat_index / output_strides[i]) % output_dim; + if (i >= output_dims - input_dims) { + size_t input_pos = i - (output_dims - input_dims); + index += pos_in_dim * input_strides[input_pos]; + } + } + return index; +} + +bool BinaryOpLayer::is_scalar_tensor(const Tensor& t) { + const auto& shape = t.get_shape(); + const size_t dims = shape.dims(); + + if (dims == 0) return true; + + for (size_t i = 0; i < dims; ++i) { + if (shape[i] != 1) { + return false; + } + } + return true; +} + +} // namespace it_lab_ai \ No newline at end of file diff --git a/src/layers/ConvLayer.cpp b/src/layers/ConvLayer.cpp index 19a149bf..dddec796 100644 --- a/src/layers/ConvLayer.cpp +++ b/src/layers/ConvLayer.cpp @@ -1,6 +1,6 @@ #include "layers/ConvLayer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { void ConvolutionalLayer::run(const Tensor& input, Tensor& output) { if (input.get_shape().dims() != 4) { @@ -135,4 +135,4 @@ void ConvolutionalLayer::run(const Tensor& input, Tensor& output) { } } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/DropOutLayer.cpp b/src/layers/DropOutLayer.cpp index 7b188af3..f61ed554 100644 --- a/src/layers/DropOutLayer.cpp +++ b/src/layers/DropOutLayer.cpp @@ -4,7 +4,7 @@ #include #include -namespace itlab_2023 { +namespace it_lab_ai { void DropOutLayer::run(const Tensor &input, Tensor &output) { const double lower_bound = 0; @@ -35,4 +35,4 @@ void DropOutLayer::run(const Tensor &input, Tensor &output) { } } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/EWLayer.cpp b/src/layers/EWLayer.cpp index 70905ca8..4ed6a961 100644 --- a/src/layers/EWLayer.cpp +++ b/src/layers/EWLayer.cpp @@ -1,6 +1,6 @@ #include "layers/EWLayer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { void EWLayer::run(const Tensor &input, Tensor &output) { switch (input.get_type()) { @@ -21,4 +21,4 @@ void EWLayer::run(const Tensor &input, Tensor &output) { } } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/FCLayer.cpp b/src/layers/FCLayer.cpp index e55224e1..6c5da2d9 100644 --- a/src/layers/FCLayer.cpp +++ b/src/layers/FCLayer.cpp @@ -1,6 +1,6 @@ #include "layers/FCLayer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { void FCLayer::run(const Tensor& input, Tensor& output) { if (input.get_type() != weights_.get_type()) { @@ -32,4 +32,4 @@ void FCLayer::run(const Tensor& input, Tensor& output) { } } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/FlattenLayer.cpp b/src/layers/FlattenLayer.cpp index c229e5fc..141753ad 100644 --- a/src/layers/FlattenLayer.cpp +++ b/src/layers/FlattenLayer.cpp @@ -1,6 +1,6 @@ #include "layers/FlattenLayer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { // reorder coords std::vector reorder(std::vector order_vec, @@ -45,4 +45,4 @@ void FlattenLayer::run(const Tensor &input, Tensor &output) { } } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/OutputLayer.cpp b/src/layers/OutputLayer.cpp index 5caebca3..57d2621e 100644 --- a/src/layers/OutputLayer.cpp +++ b/src/layers/OutputLayer.cpp @@ -1,6 +1,6 @@ #include "layers/OutputLayer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { std::pair, Tensor> OutputLayer::top_k( const Tensor &input, size_t k) const { @@ -29,4 +29,4 @@ std::pair, Tensor> OutputLayer::top_k( return make_pair(reslabels, resvector); } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/PoolingLayer.cpp b/src/layers/PoolingLayer.cpp index 99651cd4..4b476aa4 100644 --- a/src/layers/PoolingLayer.cpp +++ b/src/layers/PoolingLayer.cpp @@ -1,6 +1,6 @@ #include "layers/PoolingLayer.hpp" -namespace itlab_2023 { +namespace it_lab_ai { void PoolingLayer::run(const Tensor& input, Tensor& output) { switch (input.get_type()) { @@ -48,4 +48,4 @@ void PoolingLayer::run(const Tensor& input, Tensor& output) { } } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/Shape.cpp b/src/layers/Shape.cpp index b226b5a6..11a19394 100644 --- a/src/layers/Shape.cpp +++ b/src/layers/Shape.cpp @@ -1,6 +1,6 @@ #include "layers/Shape.hpp" -namespace itlab_2023 { +namespace it_lab_ai { size_t Shape::get_index(const std::vector& coords) const { if (coords.size() != dims_.size()) { @@ -29,4 +29,4 @@ std::ostream& operator<<(std::ostream& os, const Shape& shape) { return os; } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/src/layers/Tensor.cpp b/src/layers/Tensor.cpp index d60f417a..06fceaa8 100644 --- a/src/layers/Tensor.cpp +++ b/src/layers/Tensor.cpp @@ -1,6 +1,6 @@ #include "layers/Tensor.hpp" -namespace itlab_2023 { +namespace it_lab_ai { std::ostream& operator<<(std::ostream& out, const Tensor& t) { for (size_t i = 0; i < t.get_shape().count(); i++) { @@ -17,4 +17,4 @@ std::ostream& operator<<(std::ostream& out, const Tensor& t) { return out; } -} // namespace itlab_2023 +} // namespace it_lab_ai diff --git a/test/benchmarking/test_accuracy.cpp b/test/benchmarking/test_accuracy.cpp index 7b4c8f8c..affb6258 100644 --- a/test/benchmarking/test_accuracy.cpp +++ b/test/benchmarking/test_accuracy.cpp @@ -4,7 +4,7 @@ #include "gtest/gtest.h" #include "perf/benchmarking.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(accuracy, max_accuracy_test) { double a[10] = {9.0, 2.0, 1.0, 4.0, 7.0, 10.5, -12.0, 11.0, 0.0, -2.5}; diff --git a/test/benchmarking/test_layers_time.cpp b/test/benchmarking/test_layers_time.cpp index d03414cf..add2b65b 100644 --- a/test/benchmarking/test_layers_time.cpp +++ b/test/benchmarking/test_layers_time.cpp @@ -8,7 +8,7 @@ #include "layers/PoolingLayer.hpp" #include "perf/benchmarking.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; void test_func(Layer& p, const Tensor& input, Tensor& output) { p.run(input, output); diff --git a/test/benchmarking/test_throughput.cpp b/test/benchmarking/test_throughput.cpp index 345e1876..3943a2d2 100644 --- a/test/benchmarking/test_throughput.cpp +++ b/test/benchmarking/test_throughput.cpp @@ -4,7 +4,7 @@ #include "gtest/gtest.h" #include "perf/benchmarking.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; template std::vector matrix_sum(const std::vector &first, diff --git a/test/benchmarking/test_timer.cpp b/test/benchmarking/test_timer.cpp index 87a57596..99cc487e 100644 --- a/test/benchmarking/test_timer.cpp +++ b/test/benchmarking/test_timer.cpp @@ -3,7 +3,7 @@ #include "gtest/gtest.h" #include "perf/benchmarking.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; void waitfor_function(const size_t ms) { std::this_thread::sleep_for(std::chrono::milliseconds(ms)); diff --git a/test/graph/test_graph.cpp b/test/graph/test_graph.cpp index 9cbd84de..932db254 100644 --- a/test/graph/test_graph.cpp +++ b/test/graph/test_graph.cpp @@ -6,7 +6,7 @@ #include "layers/FCLayer.hpp" #include "layers/InputLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(graph, check_connection) { const std::vector vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F}; diff --git a/test/inference/test_inference.cpp b/test/inference/test_inference.cpp index 5c65a60e..7bfc6ab0 100644 --- a/test/inference/test_inference.cpp +++ b/test/inference/test_inference.cpp @@ -9,7 +9,7 @@ #include "layers/OutputLayer.hpp" #include "layers/PoolingLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(bfs, check_result_vec) { Graph graph(5); diff --git a/test/single_layer/test_binaryoplayer.cpp b/test/single_layer/test_binaryoplayer.cpp new file mode 100644 index 00000000..93f6fc52 --- /dev/null +++ b/test/single_layer/test_binaryoplayer.cpp @@ -0,0 +1,219 @@ +#include + +#include "gtest/gtest.h" +#include "layers/BinaryOpLayer.hpp" +#include "layers/Tensor.hpp" + +using namespace it_lab_ai; + +class BinaryOpLayerTests : public ::testing::Test { + protected: + void SetUp() override { + data1 = {1.0f, 2.0f, 3.0f, 4.0f}; + data2 = {2.0f, 3.0f, 4.0f, 5.0f}; + data_int = {1, 2, 3, 4}; + scalar = make_tensor({2.0f}); + scalar_int = make_tensor({2}); + } + + std::vector data1; + std::vector data2; + std::vector data_int; + Tensor scalar; + Tensor scalar_int; +}; + +TEST_F(BinaryOpLayerTests, MulSameShapeFloat) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = make_tensor(data1, {2, 2}); + Tensor input2 = make_tensor(data2, {2, 2}); + Tensor output; + + layer.run(input1, input2, output); + + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 2.0f); + EXPECT_FLOAT_EQ((*result)[1], 6.0f); + EXPECT_FLOAT_EQ((*result)[2], 12.0f); + EXPECT_FLOAT_EQ((*result)[3], 20.0f); +} + +TEST_F(BinaryOpLayerTests, MulSameShapeInt) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = make_tensor(data_int, {2, 2}); + Tensor input2 = make_tensor(data_int, {2, 2}); + Tensor output; + + layer.run(input1, input2, output); + + auto* result = output.as(); + EXPECT_EQ((*result)[0], 1); + EXPECT_EQ((*result)[1], 4); + EXPECT_EQ((*result)[2], 9); + EXPECT_EQ((*result)[3], 16); +} + +TEST_F(BinaryOpLayerTests, MulSameShapeIntResNet1) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = make_tensor({1, 2, 64, 64, 64}, {5}); + Tensor input2 = make_tensor({1, 2, 64, 1, 1}, {5}); + Tensor output; + + layer.run(input1, input2, output); + + auto* result = output.as(); + EXPECT_EQ((*result)[0], 1); + EXPECT_EQ((*result)[1], 4); + EXPECT_EQ((*result)[2], 4096); + EXPECT_EQ((*result)[3], 64); + EXPECT_EQ((*result)[4], 64); +} + +TEST_F(BinaryOpLayerTests, MulWithScalarFloat) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input = make_tensor(data1, {2, 2}); + Tensor output; + + layer.run(input, scalar, output); + + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 2.0f); + EXPECT_FLOAT_EQ((*result)[1], 4.0f); + EXPECT_FLOAT_EQ((*result)[2], 6.0f); + EXPECT_FLOAT_EQ((*result)[3], 8.0f); +} + +TEST_F(BinaryOpLayerTests, MulWithScalarInt) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input = make_tensor(data_int, {2, 2}); + Tensor output; + + layer.run(input, scalar_int, output); + + auto* result = output.as(); + EXPECT_EQ((*result)[0], 2); + EXPECT_EQ((*result)[1], 4); + EXPECT_EQ((*result)[2], 6); + EXPECT_EQ((*result)[3], 8); +} + +TEST_F(BinaryOpLayerTests, BroadcastingTest1) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = make_tensor({1.0f, 2.0f}, {2, 1}); + Tensor input2 = make_tensor({3.0f, 4.0f}, {1, 2}); + Tensor output; + + layer.run(input1, input2, output); + + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 3.0f); + EXPECT_FLOAT_EQ((*result)[1], 4.0f); + EXPECT_FLOAT_EQ((*result)[2], 6.0f); + EXPECT_FLOAT_EQ((*result)[3], 8.0f); +} + +TEST_F(BinaryOpLayerTests, Broadcasting3D) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = + make_tensor({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}, {2, 1, 3}); + Tensor input2 = + make_tensor({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}, {2, 3, 1}); + Tensor output; + + layer.run(input1, input2, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 3, 3})); + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 1.0f); + EXPECT_FLOAT_EQ((*result)[1], 2.0f); + EXPECT_FLOAT_EQ((*result)[2], 3.0f); + EXPECT_FLOAT_EQ((*result)[3], 2.0f); + EXPECT_FLOAT_EQ((*result)[4], 4.0f); + EXPECT_FLOAT_EQ((*result)[5], 6.0f); + EXPECT_FLOAT_EQ((*result)[12], 20.0f); + EXPECT_FLOAT_EQ((*result)[13], 25.0f); + EXPECT_FLOAT_EQ((*result)[14], 30.0f); + EXPECT_FLOAT_EQ((*result)[15], 24.0f); + EXPECT_FLOAT_EQ((*result)[16], 30.0f); + EXPECT_FLOAT_EQ((*result)[17], 36.0f); +} + +TEST_F(BinaryOpLayerTests, BroadcastingDifferentRanks) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = make_tensor({1.0f, 2.0f, 3.0f}, {3}); + Tensor input2 = + make_tensor({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}, {2, 1, 3}); + Tensor output; + + layer.run(input1, input2, output); + + ASSERT_EQ(output.get_shape(), Shape({2, 1, 3})); + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[0], 1.0f); + EXPECT_FLOAT_EQ((*result)[1], 4.0f); + EXPECT_FLOAT_EQ((*result)[2], 9.0f); + EXPECT_FLOAT_EQ((*result)[3], 4.0f); +} + +TEST_F(BinaryOpLayerTests, IncompatibleShapes) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor input1 = make_tensor(data1, {4}); + Tensor input2 = make_tensor(data2, {2, 2}); + Tensor output; + + EXPECT_THROW(layer.run(input1, input2, output), std::runtime_error); +} + +TEST_F(BinaryOpLayerTests, LayerName) { + EXPECT_EQ(BinaryOpLayer::get_name(), "Binary Operation Layer"); +} + +TEST_F(BinaryOpLayerTests, EmptyTensors) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kMul); + Tensor empty1({}, Type::kFloat); + Tensor empty2({}, Type::kFloat); + Tensor output; + + EXPECT_NO_THROW(layer.run(empty1, empty2, output)); +} + +TEST_F(BinaryOpLayerTests, BroadcastingTestAdd) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kAdd); + + Tensor input1 = + make_tensor({1.0f, 2.0f, 3.0f, 4.0f, 5.0f}, {5, 1, 1, 1}); + + Tensor input2 = make_tensor( + {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, + 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, + {5, 4, 1, 1}); + + Tensor output; + layer.run(input1, input2, output); + + ASSERT_EQ(output.get_shape(), Shape({5, 4, 1, 1})); + + auto* result = output.as(); + + EXPECT_FLOAT_EQ((*result)[0], 2.0f); + EXPECT_FLOAT_EQ((*result)[1], 3.0f); + EXPECT_FLOAT_EQ((*result)[4], 7.0f); + EXPECT_FLOAT_EQ((*result)[5], 8.0f); +} + +TEST_F(BinaryOpLayerTests, BroadcastingTestSubGooglNet) { + BinaryOpLayer layer(BinaryOpLayer::Operation::kSub); + Tensor input1 = make_tensor( + {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, + 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f}, + {1, 2, 3, 3}); + Tensor output; + + layer.run(input1, scalar, output); + + auto* result = output.as(); + EXPECT_FLOAT_EQ((*result)[2], 1.0f); + EXPECT_FLOAT_EQ((*result)[5], 4.0f); + EXPECT_FLOAT_EQ((*result)[12], 11.0f); + EXPECT_FLOAT_EQ((*result)[17], 16.0f); +} \ No newline at end of file diff --git a/test/single_layer/test_convlayer.cpp b/test/single_layer/test_convlayer.cpp index 78083849..90b4a702 100644 --- a/test/single_layer/test_convlayer.cpp +++ b/test/single_layer/test_convlayer.cpp @@ -2,7 +2,7 @@ #include "layers/ConvLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(ConvolutionalLayerTest, FStep2) { std::vector image; diff --git a/test/single_layer/test_dropoutlayer.cpp b/test/single_layer/test_dropoutlayer.cpp index 3d23c18a..8babb19c 100644 --- a/test/single_layer/test_dropoutlayer.cpp +++ b/test/single_layer/test_dropoutlayer.cpp @@ -4,7 +4,7 @@ #include "gtest/gtest.h" #include "layers/DropOutLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(DropOutLayer, dropoutlayer_int) { DropOutLayer layer(1); diff --git a/test/single_layer/test_ewlayer.cpp b/test/single_layer/test_ewlayer.cpp index 7d3defc7..a6a273f7 100644 --- a/test/single_layer/test_ewlayer.cpp +++ b/test/single_layer/test_ewlayer.cpp @@ -5,7 +5,7 @@ #include "gtest/gtest.h" #include "layers/EWLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; class EWTestsParameterized : public ::testing::TestWithParam< diff --git a/test/single_layer/test_fclayer.cpp b/test/single_layer/test_fclayer.cpp index 40d0ed8b..7fa46dc7 100644 --- a/test/single_layer/test_fclayer.cpp +++ b/test/single_layer/test_fclayer.cpp @@ -3,7 +3,7 @@ #include "gtest/gtest.h" #include "layers/FCLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; class FCTestsParameterized : public ::testing::TestWithParam< diff --git a/test/single_layer/test_flattenlayer.cpp b/test/single_layer/test_flattenlayer.cpp index b7a4ddff..a2eed716 100644 --- a/test/single_layer/test_flattenlayer.cpp +++ b/test/single_layer/test_flattenlayer.cpp @@ -3,7 +3,7 @@ #include "gtest/gtest.h" #include "layers/FlattenLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(flattenlayer, new_flattenlayer_can_flatten_int) { FlattenLayer layer; diff --git a/test/single_layer/test_inlayer.cpp b/test/single_layer/test_inlayer.cpp index 57b416f7..3656bc47 100644 --- a/test/single_layer/test_inlayer.cpp +++ b/test/single_layer/test_inlayer.cpp @@ -4,7 +4,7 @@ #include "gtest/gtest.h" #include "layers/InputLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(input, check_basic) { Shape sh1({1, 2, 2, 1}); diff --git a/test/single_layer/test_outputlayer.cpp b/test/single_layer/test_outputlayer.cpp index 007d6dd1..2be69a29 100644 --- a/test/single_layer/test_outputlayer.cpp +++ b/test/single_layer/test_outputlayer.cpp @@ -6,7 +6,7 @@ #include "gtest/gtest.h" #include "layers/OutputLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; void fill_from_file(const std::string& path_from, std::vector& to, size_t limit = 0) { diff --git a/test/single_layer/test_poolinglayer.cpp b/test/single_layer/test_poolinglayer.cpp index 184a9617..083d36e2 100644 --- a/test/single_layer/test_poolinglayer.cpp +++ b/test/single_layer/test_poolinglayer.cpp @@ -3,7 +3,7 @@ #include "gtest/gtest.h" #include "layers/PoolingLayer.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; TEST(poolinglayer, empty_inputs1) { Shape inpshape = 0; @@ -178,7 +178,7 @@ TEST(poolinglayer, new_pooling_layer_can_run_int_avg) { TEST(poolinglayer, new_pooling_layer_can_run_int_avg_tbb) { Shape inpshape = {4, 4}; Shape poolshape = {2, 2}; - PoolingLayer a(poolshape, "average", itlab_2023::kTBB); + PoolingLayer a(poolshape, "average", it_lab_ai::kTBB); std::vector input({9, 8, 7, 6, 5, 4, 3, 2, 2, 3, 4, 5, 6, 7, 8, 9}); Tensor output = make_tensor({0}); a.run(make_tensor(input, inpshape), output); @@ -204,7 +204,7 @@ TEST(poolinglayer, new_pooling_layer_can_run_1d_pooling_float) { TEST(poolinglayer, new_pooling_layer_tbb_can_run_1d_pooling_float) { Shape inpshape = {8}; Shape poolshape = {3}; - PoolingLayer a(poolshape, "average", itlab_2023::kTBB); + PoolingLayer a(poolshape, "average", it_lab_ai::kTBB); std::vector input({9.0F, 8.0F, 7.0F, 6.0F, 5.0F, 4.0F, 3.0F, 2.0F}); Tensor output = make_tensor({0}); a.run(make_tensor(input, inpshape), output); diff --git a/test/single_layer/test_tensor.cpp b/test/single_layer/test_tensor.cpp index 2a5b78b4..5de58324 100644 --- a/test/single_layer/test_tensor.cpp +++ b/test/single_layer/test_tensor.cpp @@ -5,7 +5,7 @@ #include "gtest/gtest.h" #include "layers/Tensor.hpp" -using namespace itlab_2023; +using namespace it_lab_ai; struct TestClass { public: