Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion app/Accuracy/accuracy_check.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#include "layers/OutputLayer.hpp"
#include "layers/PoolingLayer.hpp"

using namespace itlab_2023;
using namespace it_lab_ai;

int main() {
std::string image_path = IMAGE1_PATH;
Expand Down
2 changes: 1 addition & 1 deletion app/AccuracyImgNet/accimgnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include "layers/OutputLayer.hpp"
#include "layers/PoolingLayer.hpp"

using namespace itlab_2023;
using namespace it_lab_ai;

bool cmp_by_first(const std::pair<size_t, std::string>& a,
const std::pair<size_t, std::string>& b) {
Expand Down
2 changes: 1 addition & 1 deletion app/Graph/acc_check_mnist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include "build.cpp"
#include "build.hpp"

using namespace itlab_2023;
using namespace it_lab_ai;

int main(int argc, char* argv[]) {
bool parallel = false;
Expand Down
2 changes: 1 addition & 1 deletion app/Graph/build.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ void build_graph(Tensor& input, Tensor& output, bool comments,
Tensor tmp_bias = make_tensor(tensor.get_bias());
Tensor tmp_tensor =
Tensor(Shape({tensor.get_shape()[1], tensor.get_shape()[0]}),
itlab_2023::Type::kFloat);
it_lab_ai::Type::kFloat);
// kernel is always transposed ?
for (size_t h = 0; h < tensor.get_shape()[0]; h++) {
for (size_t w = 0; w < tensor.get_shape()[1]; w++) {
Expand Down
2 changes: 1 addition & 1 deletion app/Graph/graph_build.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#include "build.hpp"

namespace fs = std::filesystem;
using namespace itlab_2023;
using namespace it_lab_ai;

int main(int argc, char* argv[]) {
std::string image_folder = IMAGE1_PATH;
Expand Down
2 changes: 1 addition & 1 deletion include/Weights_Reader/reader_weights.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include "layers/Tensor.hpp"

using json = nlohmann::json;
using namespace itlab_2023;
using namespace it_lab_ai;

json read_json(const std::string& filename);
void extract_values_from_json(const json& j, std::vector<float>& values);
Expand Down
4 changes: 2 additions & 2 deletions include/graph/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

class Graph {
int BiggestSize_;
Expand Down Expand Up @@ -148,4 +148,4 @@ class Graph {
std::vector<Tensor> getWEIGHTS() { return weights_; }
#endif
};
} // namespace itlab_2023
} // namespace it_lab_ai
72 changes: 72 additions & 0 deletions include/layers/BinaryOpLayer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#pragma once
#include <algorithm>
#include <memory>
#include <stdexcept>
#include <utility>
#include <vector>

#include "Tensor.hpp"
#include "layers/Layer.hpp"

namespace it_lab_ai {

class BinaryOpLayer : public Layer {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's part of elementwise operation

Copy link
Collaborator Author

@Semyon1104 Semyon1104 Aug 3, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought it would be better to put the logic of interaction between two tensors with a fairly comprehensive broadcast logic in an another layer to separate the situations where the input is a single tensor, EWLayer, and where there are two input arrays and 1 output

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or is it better to combine these layers into one?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok we can choose compromise, please create operation - Logic and unite all logic operations

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you mean to add the kDiv operation and name the enumerated type logic?

public:
enum class Operation : uint8_t { kMul, kAdd, kSub };

BinaryOpLayer() = default;
explicit BinaryOpLayer(Operation op) : op_(op) {}

static std::string get_name() { return "Binary Operation Layer"; }
void run(const Tensor& input, Tensor& output) override;
void run(const Tensor& A, const Tensor& B, Tensor& output);
static bool is_scalar_tensor(const Tensor& t);

#ifdef ENABLE_STATISTIC_WEIGHTS
Tensor get_weights() override {
std::vector<int> v = {0};
return make_tensor(v);
}
#endif

private:
Operation op_ = Operation::kMul;
std::shared_ptr<void> impl_;

template <typename ValueType>
void run_with_scalar_impl(const Tensor& input, ValueType scalar,
Tensor& output) const;
void run_with_scalar(const Tensor& input, float scalar, Tensor& output);

static bool can_broadcast(const Shape& shape_A, const Shape& shape_B);
static Shape calculate_broadcasted_shape(const Shape& shape_A,
const Shape& shape_B);
static std::vector<size_t> get_strides(const Shape& shape);
static size_t get_broadcasted_index(size_t flat_index,
const Shape& input_shape,
const Shape& output_shape);

template <typename ValueType>
class BinaryOpLayerImpl;
};

template <typename ValueType>
class BinaryOpLayer::BinaryOpLayerImpl : public LayerImpl<ValueType> {
public:
BinaryOpLayerImpl() = delete;
explicit BinaryOpLayerImpl(BinaryOpLayer::Operation op);

std::vector<ValueType> run(
const std::vector<ValueType>& input) const override {
(void)input;
throw std::runtime_error("BinaryOpLayer requires two inputs");
}

std::vector<ValueType> run(const std::vector<ValueType>& inputA,
const std::vector<ValueType>& inputB) const;

private:
BinaryOpLayer::Operation op_;
};

} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/ConvLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

class ConvolutionalLayer : public Layer {
private:
Expand Down Expand Up @@ -402,4 +402,4 @@ void Conv4DSTL(const Tensor& input, const Tensor& kernel_, const Tensor& bias_,
output = make_tensor<ValueType>(one_d_vector, sh);
}

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/DropOutLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

class DropOutLayer : public Layer {
private:
Expand All @@ -16,4 +16,4 @@ class DropOutLayer : public Layer {
void run(const Tensor& input, Tensor& output) override;
};

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/EWLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

template <typename T>
T relu(const T& value) {
Expand Down Expand Up @@ -93,4 +93,4 @@ std::vector<ValueType> EWLayerImpl<ValueType>::run(
return res;
}

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/FCLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

class FCLayer : public Layer {
private:
Expand Down Expand Up @@ -128,4 +128,4 @@ std::vector<ValueType> FCLayerImpl<ValueType>::run(
return output_values;
}

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/FlattenLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

std::vector<size_t> reorder(std::vector<size_t> order_vec,
std::vector<size_t> order);
Expand Down Expand Up @@ -51,4 +51,4 @@ void Flatten4D(const Tensor& input, Tensor& output,
Shape({input.get_shape().count()}));
}

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/InputLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

enum LayInOut : uint8_t {
kNchw, // 0
Expand Down Expand Up @@ -179,4 +179,4 @@ class InputLayer : public Layer {
}
};

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include "layers/Tensor.hpp"
#include "oneapi/tbb.h"

namespace itlab_2023 {
namespace it_lab_ai {

enum LayerType : uint8_t {
kInput,
Expand Down Expand Up @@ -65,4 +65,4 @@ class LayerImpl {
Shape outputShape_;
};

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/OutputLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

class OutputLayer : public Layer {
public:
Expand Down Expand Up @@ -111,4 +111,4 @@ std::pair<std::vector<std::string>, std::vector<ValueType>> top_k_vec(
return std::make_pair(res_labels, res_input);
}

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/PoolingLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

#include "layers/Layer.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

enum PoolingType : uint8_t { kAverage, kMax };

Expand Down Expand Up @@ -266,4 +266,4 @@ std::vector<ValueType> PoolingLayerImplTBB<ValueType>::run(
return res;
}

} // namespace itlab_2023
} // namespace it_lab_ai
14 changes: 12 additions & 2 deletions include/layers/Shape.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include <stdexcept>
#include <vector>

namespace itlab_2023 {
namespace it_lab_ai {

class Shape {
public:
Expand Down Expand Up @@ -40,9 +40,19 @@ class Shape {
size_t dims() const noexcept { return dims_.size(); }
size_t get_index(const std::vector<size_t>& coords) const;
friend std::ostream& operator<<(std::ostream& os, const Shape& shape);
bool operator==(const Shape& other) const noexcept {
if (dims_.size() != other.dims_.size()) {
return false;
}
return std::equal(dims_.begin(), dims_.end(), other.dims_.begin());
}

bool operator!=(const Shape& other) const noexcept {
return !(*this == other);
}

private:
std::vector<size_t> dims_;
};

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/layers/Tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

#include "layers/Shape.hpp"

namespace itlab_2023 {
namespace it_lab_ai {

enum class Type : uint8_t { kUnknown, kInt, kFloat };

Expand Down Expand Up @@ -207,4 +207,4 @@ Tensor make_tensor(const std::vector<T>& values, const Shape& shape,
}
std::ostream& operator<<(std::ostream& out, const Tensor& t);

} // namespace itlab_2023
} // namespace it_lab_ai
4 changes: 2 additions & 2 deletions include/perf/benchmarking.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include <stdexcept>
#include <vector>

namespace itlab_2023 {
namespace it_lab_ai {

template <typename DurationContainerType, typename DurationType, class Function,
typename... Args>
Expand Down Expand Up @@ -114,4 +114,4 @@ T accuracy_norm(T* test, T* ref, const size_t size) {
// typename T should have friend sqrt() function
return std::sqrt(res);
}
} // namespace itlab_2023
} // namespace it_lab_ai
Loading
Loading