Skip to content

Commit 0f04838

Browse files
committed
del impl_class, fix calculate strides, +div, add run_broadcast_impl, fix tests
1 parent 7dcc170 commit 0f04838

File tree

3 files changed

+54
-89
lines changed

3 files changed

+54
-89
lines changed

include/layers/BinaryOpLayer.hpp

Lines changed: 9 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ namespace it_lab_ai {
1212

1313
class BinaryOpLayer : public Layer {
1414
public:
15-
enum class Operation : uint8_t { kMul, kAdd, kSub };
15+
enum class Operation : uint8_t { kMul, kAdd, kSub, kDiv };
1616

1717
BinaryOpLayer() = default;
1818
explicit BinaryOpLayer(Operation op) : op_(op) {}
@@ -31,42 +31,26 @@ class BinaryOpLayer : public Layer {
3131

3232
private:
3333
Operation op_ = Operation::kMul;
34-
std::shared_ptr<void> impl_;
3534

3635
template <typename ValueType>
3736
void run_with_scalar_impl(const Tensor& input, ValueType scalar,
3837
Tensor& output) const;
39-
void run_with_scalar(const Tensor& input, float scalar, Tensor& output);
38+
template <typename ValueType>
39+
void run_broadcast_impl(const Tensor& A, const Tensor& B, Tensor& output,
40+
const Shape& output_shape) const;
41+
void run_with_scalar(const Tensor& input, float scalar, Tensor& output) const;
4042

4143
static bool can_broadcast(const Shape& shape_A, const Shape& shape_B);
4244
static Shape calculate_broadcasted_shape(const Shape& shape_A,
4345
const Shape& shape_B);
4446
static std::vector<size_t> get_strides(const Shape& shape);
45-
static size_t get_broadcasted_index(size_t flat_index,
46-
const Shape& input_shape,
47-
const Shape& output_shape);
47+
static size_t get_broadcasted_index(
48+
size_t flat_index, const Shape& input_shape, const Shape& output_shape,
49+
const std::vector<size_t>& input_strides,
50+
const std::vector<size_t>& output_strides);
4851

4952
template <typename ValueType>
5053
class BinaryOpLayerImpl;
5154
};
5255

53-
template <typename ValueType>
54-
class BinaryOpLayer::BinaryOpLayerImpl : public LayerImpl<ValueType> {
55-
public:
56-
BinaryOpLayerImpl() = delete;
57-
explicit BinaryOpLayerImpl(BinaryOpLayer::Operation op);
58-
59-
std::vector<ValueType> run(
60-
const std::vector<ValueType>& input) const override {
61-
(void)input;
62-
throw std::runtime_error("BinaryOpLayer requires two inputs");
63-
}
64-
65-
std::vector<ValueType> run(const std::vector<ValueType>& inputA,
66-
const std::vector<ValueType>& inputB) const;
67-
68-
private:
69-
BinaryOpLayer::Operation op_;
70-
};
71-
7256
} // namespace it_lab_ai

src/layers/BinaryOpLayer.cpp

Lines changed: 31 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ T apply_binary_op(T a, T b, BinaryOpLayer::Operation op) {
1212
return a + b;
1313
case BinaryOpLayer::Operation::kSub:
1414
return a - b;
15+
case BinaryOpLayer::Operation::kDiv:
16+
if (b == 0) throw std::runtime_error("Division by zero");
17+
return a / b;
1518
default:
1619
throw std::runtime_error("Unsupported binary operation");
1720
}
@@ -67,39 +70,19 @@ void BinaryOpLayer::run(const Tensor& A, const Tensor& B, Tensor& output) {
6770
calculate_broadcasted_shape(A.get_shape(), B.get_shape());
6871

6972
switch (A.get_type()) {
70-
case Type::kFloat: {
71-
const auto& a_data = *A.as<float>();
72-
const auto& b_data = *B.as<float>();
73-
std::vector<float> result(output_shape.count());
74-
75-
for (size_t i = 0; i < result.size(); ++i) {
76-
size_t a_idx = get_broadcasted_index(i, A.get_shape(), output_shape);
77-
size_t b_idx = get_broadcasted_index(i, B.get_shape(), output_shape);
78-
result[i] = apply_binary_op(a_data[a_idx], b_data[b_idx], op_);
79-
}
80-
output = make_tensor(result, output_shape);
73+
case Type::kFloat:
74+
run_broadcast_impl<float>(A, B, output, output_shape);
8175
break;
82-
}
83-
case Type::kInt: {
84-
const auto& a_data = *A.as<int>();
85-
const auto& b_data = *B.as<int>();
86-
std::vector<int> result(output_shape.count());
87-
88-
for (size_t i = 0; i < result.size(); ++i) {
89-
size_t a_idx = get_broadcasted_index(i, A.get_shape(), output_shape);
90-
size_t b_idx = get_broadcasted_index(i, B.get_shape(), output_shape);
91-
result[i] = apply_binary_op(a_data[a_idx], b_data[b_idx], op_);
92-
}
93-
output = make_tensor(result, output_shape);
76+
case Type::kInt:
77+
run_broadcast_impl<int>(A, B, output, output_shape);
9478
break;
95-
}
9679
default:
97-
throw std::runtime_error("BinaryOpLayer: Unsupported tensor type");
80+
throw std::runtime_error("Unsupported tensor type");
9881
}
9982
}
10083

10184
void BinaryOpLayer::run_with_scalar(const Tensor& input, float scalar,
102-
Tensor& output) {
85+
Tensor& output) const {
10386
switch (input.get_type()) {
10487
case Type::kFloat: {
10588
run_with_scalar_impl<float>(input, scalar, output);
@@ -130,23 +113,25 @@ void BinaryOpLayer::run_with_scalar_impl(const Tensor& input, ValueType scalar,
130113
}
131114

132115
template <typename ValueType>
133-
BinaryOpLayer::BinaryOpLayerImpl<ValueType>::BinaryOpLayerImpl(
134-
BinaryOpLayer::Operation op)
135-
: LayerImpl<ValueType>(Shape({1}), Shape({1})), op_(op) {}
136-
137-
template <typename ValueType>
138-
std::vector<ValueType> BinaryOpLayer::BinaryOpLayerImpl<ValueType>::run(
139-
const std::vector<ValueType>& inputA,
140-
const std::vector<ValueType>& inputB) const {
141-
if (inputA.size() != inputB.size()) {
142-
throw std::runtime_error("BinaryOpLayer: Input sizes must match");
143-
}
116+
void BinaryOpLayer::run_broadcast_impl(const Tensor& A, const Tensor& B,
117+
Tensor& output,
118+
const Shape& output_shape) const {
119+
const auto& a_data = *A.as<ValueType>();
120+
const auto& b_data = *B.as<ValueType>();
121+
std::vector<ValueType> result(output_shape.count());
122+
const auto strides_A = get_strides(A.get_shape());
123+
const auto strides_B = get_strides(B.get_shape());
124+
const auto strides_output = get_strides(output_shape);
144125

145-
std::vector<ValueType> result(inputA.size());
146126
for (size_t i = 0; i < result.size(); ++i) {
147-
result[i] = apply_binary_op(inputA[i], inputB[i], op_);
127+
size_t a_idx = get_broadcasted_index(i, A.get_shape(), output_shape,
128+
strides_A, strides_output);
129+
size_t b_idx = get_broadcasted_index(i, B.get_shape(), output_shape,
130+
strides_B, strides_output);
131+
result[i] = apply_binary_op(a_data[a_idx], b_data[b_idx], op_);
148132
}
149-
return result;
133+
134+
output = make_tensor(result, output_shape);
150135
}
151136

152137
bool BinaryOpLayer::can_broadcast(const Shape& shape_A, const Shape& shape_B) {
@@ -191,13 +176,13 @@ std::vector<size_t> BinaryOpLayer::get_strides(const Shape& shape) {
191176
return strides;
192177
}
193178

194-
size_t BinaryOpLayer::get_broadcasted_index(size_t flat_index,
195-
const Shape& input_shape,
196-
const Shape& output_shape) {
179+
size_t BinaryOpLayer::get_broadcasted_index(
180+
size_t flat_index, const Shape& input_shape, const Shape& output_shape,
181+
const std::vector<size_t>& input_strides,
182+
const std::vector<size_t>& output_strides) {
197183
size_t input_dims = input_shape.dims();
198184
size_t output_dims = output_shape.dims();
199185
size_t index = 0;
200-
auto strides = get_strides(input_shape);
201186

202187
for (size_t i = 0; i < output_dims; ++i) {
203188
size_t output_dim = output_shape[i];
@@ -207,11 +192,10 @@ size_t BinaryOpLayer::get_broadcasted_index(size_t flat_index,
207192

208193
if (input_dim == 1) continue;
209194

210-
size_t pos_in_dim =
211-
(flat_index / get_strides(output_shape)[i]) % output_dim;
195+
size_t pos_in_dim = (flat_index / output_strides[i]) % output_dim;
212196
if (i >= output_dims - input_dims) {
213197
size_t input_pos = i - (output_dims - input_dims);
214-
index += pos_in_dim * strides[input_pos];
198+
index += pos_in_dim * input_strides[input_pos];
215199
}
216200
}
217201
return index;
@@ -231,7 +215,4 @@ bool BinaryOpLayer::is_scalar_tensor(const Tensor& t) {
231215
return true;
232216
}
233217

234-
template class BinaryOpLayer::BinaryOpLayerImpl<int>;
235-
template class BinaryOpLayer::BinaryOpLayerImpl<float>;
236-
237218
} // namespace it_lab_ai

test/single_layer/test_binaryoplayer.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
using namespace it_lab_ai;
88

9-
class BinaryOpLayerMulTests : public ::testing::Test {
9+
class BinaryOpLayerTests : public ::testing::Test {
1010
protected:
1111
void SetUp() override {
1212
data1 = {1.0f, 2.0f, 3.0f, 4.0f};
@@ -23,7 +23,7 @@ class BinaryOpLayerMulTests : public ::testing::Test {
2323
Tensor scalar_int;
2424
};
2525

26-
TEST_F(BinaryOpLayerMulTests, MulSameShapeFloat) {
26+
TEST_F(BinaryOpLayerTests, MulSameShapeFloat) {
2727
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
2828
Tensor input1 = make_tensor<float>(data1, {2, 2});
2929
Tensor input2 = make_tensor<float>(data2, {2, 2});
@@ -38,7 +38,7 @@ TEST_F(BinaryOpLayerMulTests, MulSameShapeFloat) {
3838
EXPECT_FLOAT_EQ((*result)[3], 20.0f);
3939
}
4040

41-
TEST_F(BinaryOpLayerMulTests, MulSameShapeInt) {
41+
TEST_F(BinaryOpLayerTests, MulSameShapeInt) {
4242
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
4343
Tensor input1 = make_tensor<int>(data_int, {2, 2});
4444
Tensor input2 = make_tensor<int>(data_int, {2, 2});
@@ -53,7 +53,7 @@ TEST_F(BinaryOpLayerMulTests, MulSameShapeInt) {
5353
EXPECT_EQ((*result)[3], 16);
5454
}
5555

56-
TEST_F(BinaryOpLayerMulTests, MulSameShapeIntResNet1) {
56+
TEST_F(BinaryOpLayerTests, MulSameShapeIntResNet1) {
5757
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
5858
Tensor input1 = make_tensor<int>({1, 2, 64, 64, 64}, {5});
5959
Tensor input2 = make_tensor<int>({1, 2, 64, 1, 1}, {5});
@@ -69,7 +69,7 @@ TEST_F(BinaryOpLayerMulTests, MulSameShapeIntResNet1) {
6969
EXPECT_EQ((*result)[4], 64);
7070
}
7171

72-
TEST_F(BinaryOpLayerMulTests, MulWithScalarFloat) {
72+
TEST_F(BinaryOpLayerTests, MulWithScalarFloat) {
7373
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
7474
Tensor input = make_tensor<float>(data1, {2, 2});
7575
Tensor output;
@@ -83,7 +83,7 @@ TEST_F(BinaryOpLayerMulTests, MulWithScalarFloat) {
8383
EXPECT_FLOAT_EQ((*result)[3], 8.0f);
8484
}
8585

86-
TEST_F(BinaryOpLayerMulTests, MulWithScalarInt) {
86+
TEST_F(BinaryOpLayerTests, MulWithScalarInt) {
8787
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
8888
Tensor input = make_tensor<int>(data_int, {2, 2});
8989
Tensor output;
@@ -97,7 +97,7 @@ TEST_F(BinaryOpLayerMulTests, MulWithScalarInt) {
9797
EXPECT_EQ((*result)[3], 8);
9898
}
9999

100-
TEST_F(BinaryOpLayerMulTests, BroadcastingTest1) {
100+
TEST_F(BinaryOpLayerTests, BroadcastingTest1) {
101101
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
102102
Tensor input1 = make_tensor<float>({1.0f, 2.0f}, {2, 1});
103103
Tensor input2 = make_tensor<float>({3.0f, 4.0f}, {1, 2});
@@ -112,7 +112,7 @@ TEST_F(BinaryOpLayerMulTests, BroadcastingTest1) {
112112
EXPECT_FLOAT_EQ((*result)[3], 8.0f);
113113
}
114114

115-
TEST_F(BinaryOpLayerMulTests, Broadcasting3D) {
115+
TEST_F(BinaryOpLayerTests, Broadcasting3D) {
116116
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
117117
Tensor input1 =
118118
make_tensor<float>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}, {2, 1, 3});
@@ -138,7 +138,7 @@ TEST_F(BinaryOpLayerMulTests, Broadcasting3D) {
138138
EXPECT_FLOAT_EQ((*result)[17], 36.0f);
139139
}
140140

141-
TEST_F(BinaryOpLayerMulTests, BroadcastingDifferentRanks) {
141+
TEST_F(BinaryOpLayerTests, BroadcastingDifferentRanks) {
142142
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
143143
Tensor input1 = make_tensor<float>({1.0f, 2.0f, 3.0f}, {3});
144144
Tensor input2 =
@@ -155,7 +155,7 @@ TEST_F(BinaryOpLayerMulTests, BroadcastingDifferentRanks) {
155155
EXPECT_FLOAT_EQ((*result)[3], 4.0f);
156156
}
157157

158-
TEST_F(BinaryOpLayerMulTests, IncompatibleShapes) {
158+
TEST_F(BinaryOpLayerTests, IncompatibleShapes) {
159159
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
160160
Tensor input1 = make_tensor<float>(data1, {4});
161161
Tensor input2 = make_tensor<float>(data2, {2, 2});
@@ -164,11 +164,11 @@ TEST_F(BinaryOpLayerMulTests, IncompatibleShapes) {
164164
EXPECT_THROW(layer.run(input1, input2, output), std::runtime_error);
165165
}
166166

167-
TEST_F(BinaryOpLayerMulTests, LayerName) {
167+
TEST_F(BinaryOpLayerTests, LayerName) {
168168
EXPECT_EQ(BinaryOpLayer::get_name(), "Binary Operation Layer");
169169
}
170170

171-
TEST_F(BinaryOpLayerMulTests, EmptyTensors) {
171+
TEST_F(BinaryOpLayerTests, EmptyTensors) {
172172
BinaryOpLayer layer(BinaryOpLayer::Operation::kMul);
173173
Tensor empty1({}, Type::kFloat);
174174
Tensor empty2({}, Type::kFloat);
@@ -177,7 +177,7 @@ TEST_F(BinaryOpLayerMulTests, EmptyTensors) {
177177
EXPECT_NO_THROW(layer.run(empty1, empty2, output));
178178
}
179179

180-
TEST_F(BinaryOpLayerMulTests, BroadcastingTestAdd) {
180+
TEST_F(BinaryOpLayerTests, BroadcastingTestAdd) {
181181
BinaryOpLayer layer(BinaryOpLayer::Operation::kAdd);
182182

183183
Tensor input1 =
@@ -201,7 +201,7 @@ TEST_F(BinaryOpLayerMulTests, BroadcastingTestAdd) {
201201
EXPECT_FLOAT_EQ((*result)[5], 8.0f);
202202
}
203203

204-
TEST_F(BinaryOpLayerMulTests, BroadcastingTestSubGooglNet) {
204+
TEST_F(BinaryOpLayerTests, BroadcastingTestSubGooglNet) {
205205
BinaryOpLayer layer(BinaryOpLayer::Operation::kSub);
206206
Tensor input1 = make_tensor<float>(
207207
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f,

0 commit comments

Comments
 (0)