Skip to content

Commit 9f7a1fb

Browse files
committed
fix: Minor fix to recently added code: perform null check before accessing the object.
Added test that would have caught this issue. Updated existing tests to only use supported data inputs. Resolves: MLINFSW-1791 Signed-off-by: Anna Mayne <[email protected]> Change-Id: Id7fd7bf9df7bd8018691a2afe23201174a77f2ea Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/14861 Benchmark: Arm Jenkins <[email protected]> Comments-Addressed: Arm Jenkins <[email protected]> Reviewed-by: Dongsung Kim <[email protected]> Tested-by: Arm Jenkins <[email protected]>
1 parent f69b48a commit 9f7a1fb

File tree

6 files changed

+147
-47
lines changed

6 files changed

+147
-47
lines changed

src/runtime/experimental/operators/CpuFullyConnected.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ Status CpuFullyConnected::validate(const ITensorInfo *src,
6262
const WeightsInfo &weights_info)
6363
{
6464
bool fp32_ok = src->data_type() == DataType::F32 && weights->data_type() == DataType::F32 &&
65-
(biases->data_type() == DataType::F32 || biases == nullptr) && dst->data_type() == DataType::F32;
65+
(biases == nullptr || biases->data_type() == DataType::F32) && dst->data_type() == DataType::F32;
6666
bool fp16_ok = src->data_type() == DataType::F16 && weights->data_type() == DataType::F16 &&
67-
(biases->data_type() == DataType::F16 || biases == nullptr) && dst->data_type() == DataType::F16;
67+
(biases == nullptr || biases->data_type() == DataType::F16) && dst->data_type() == DataType::F16;
6868
if (!(fp32_ok || fp16_ok))
6969
{
7070
return Status(ErrorCode::RUNTIME_ERROR, "datatype is not supported");

tests/datasets/FullyConnectedLayerDataset.h

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2017-2020 Arm Limited.
2+
* Copyright (c) 2017-2020, 2025 Arm Limited.
33
*
44
* SPDX-License-Identifier: MIT
55
*
@@ -21,8 +21,8 @@
2121
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2222
* SOFTWARE.
2323
*/
24-
#ifndef ARM_COMPUTE_TEST_FULLYCONNECTED_LAYER_DATASET
25-
#define ARM_COMPUTE_TEST_FULLYCONNECTED_LAYER_DATASET
24+
#ifndef ACL_TESTS_DATASETS_FULLYCONNECTEDLAYERDATASET_H
25+
#define ACL_TESTS_DATASETS_FULLYCONNECTEDLAYERDATASET_H
2626

2727
#include "utils/TypePrinter.h"
2828

@@ -140,6 +140,7 @@ class TinyFullyConnectedLayerDataset final : public FullyConnectedLayerDataset
140140
add_config(TensorShape(1U, 3U), TensorShape(1U, 10U), TensorShape(10U), TensorShape(10U, 3U));
141141
}
142142
};
143+
143144
class SmallFullyConnectedLayerDataset final : public FullyConnectedLayerDataset
144145
{
145146
public:
@@ -166,6 +167,22 @@ class SmallFullyConnectedLayerDataset final : public FullyConnectedLayerDataset
166167
}
167168
};
168169

170+
class SmallFCFCFullyConnectedLayerDataset final : public FullyConnectedLayerDataset
171+
{
172+
public:
173+
SmallFCFCFullyConnectedLayerDataset()
174+
{
175+
// FC -> FC
176+
add_config(TensorShape(1U), TensorShape(1U, 10U), TensorShape(10U), TensorShape(10U));
177+
// FC -> FC (batched)
178+
add_config(TensorShape(1U, 3U), TensorShape(1U, 10U), TensorShape(10U), TensorShape(10U, 3U));
179+
// FC -> FC
180+
add_config(TensorShape(201U), TensorShape(201U, 529U), TensorShape(529U), TensorShape(529U));
181+
// FC -> FC (batched)
182+
add_config(TensorShape(201U, 3U), TensorShape(201U, 529U), TensorShape(529U), TensorShape(529U, 3U));
183+
}
184+
};
185+
169186
class LargeFullyConnectedLayerDataset final : public FullyConnectedLayerDataset
170187
{
171188
public:
@@ -183,4 +200,4 @@ class LargeFullyConnectedLayerDataset final : public FullyConnectedLayerDataset
183200
} // namespace datasets
184201
} // namespace test
185202
} // namespace arm_compute
186-
#endif /* ARM_COMPUTE_TEST_FULLYCONNECTED_LAYER_DATASET */
203+
#endif // ACL_TESTS_DATASETS_FULLYCONNECTEDLAYERDATASET_H

tests/validation/fixtures/CpuFullyConnectedFixture.h

Lines changed: 57 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,8 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
7070
DataType data_type,
7171
QuantizationInfo quantization_info,
7272
ActivationLayerInfo activation_info,
73-
TestType test_type)
73+
TestType test_type,
74+
bool with_bias = true)
7475
{
7576
if (std::is_same<TensorType, Tensor>::value && // Cpu
7677
data_type == DataType::F16 && !CPUInfo::get().has_fp16())
@@ -92,8 +93,8 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
9293

9394
_activation_info = activation_info;
9495

95-
compute_target(input_shape, weights_shape, bias_shape, output_shape);
96-
compute_reference(input_shape, weights_shape, bias_shape, output_shape);
96+
compute_target(input_shape, weights_shape, bias_shape, output_shape, with_bias);
97+
compute_reference(input_shape, weights_shape, bias_shape, output_shape, with_bias);
9798
}
9899

99100
protected:
@@ -147,7 +148,8 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
147148
void compute_target(const TensorShape &input_shape,
148149
const TensorShape &weights_shape,
149150
const TensorShape &bias_shape,
150-
const TensorShape &output_shape)
151+
const TensorShape &output_shape,
152+
bool with_bias)
151153
{
152154
TensorShape reshaped_weights_shape(weights_shape);
153155

@@ -181,15 +183,16 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
181183
{
182184
src[i] = create_tensor<TensorType>(input_shape, _data_type, 1, _input_q_info);
183185
weights[i] = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _weight_q_info);
184-
bias[i] = create_tensor<TensorType>(bias_shape, _bias_data_type, 1);
186+
bias[i] = with_bias ? create_tensor<TensorType>(bias_shape, _bias_data_type, 1) : nullptr;
185187
dst[i] = create_tensor<TensorType>(output_shape, _data_type, 1, _dst_q_info);
186-
weights[i].info()->set_are_values_constant(false);
188+
weights[i].info()->set_are_values_constant(false);
187189
}
188190
tmp_weights = create_tensor<TensorType>(weights_shape, _data_type, 1, _weight_q_info);
189191
tmp_weights.allocator()->allocate();
190192

191193
const bool kernel_found =
192-
bool(FunctionType::has_opt_impl(computed_weight_format, src[0].info(), weights[0].info(), bias[0].info(),
194+
bool(FunctionType::has_opt_impl(computed_weight_format, src[0].info(), weights[0].info(),
195+
with_bias? bias[0].info() : nullptr,
193196
dst[0].info(), fc_info, wei_info));
194197
ARM_COMPUTE_ASSERT(kernel_found);
195198
wei_info.set_weight_format(computed_weight_format);
@@ -201,36 +204,47 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
201204
reordered_weights[i].info()->set_is_resizable(true);
202205
}
203206

204-
// Create and configure function.
207+
// Create, configure and validate function.
205208
FunctionType fc;
206-
fc.configure(src[0].info(), weights[0].info(), bias[0].info(), dst[0].info(), fc_info, wei_info);
209+
fc.configure(src[0].info(), weights[0].info(),
210+
with_bias? bias[0].info() : nullptr,
211+
dst[0].info(), fc_info, wei_info);
207212
auto const aux_mem_req = fc.workspace();
208213

214+
ARM_COMPUTE_ASSERT(fc.validate(src[0].info(), weights[0].info(),
215+
with_bias? bias[0].info() : nullptr,
216+
dst[0].info(), fc_info, wei_info));
217+
209218
for (int i = 0; i < _num_parallel_runs; ++i)
210219
{
211220
ARM_COMPUTE_ASSERT(src[i].info()->is_resizable());
212221
ARM_COMPUTE_ASSERT(weights[i].info()->is_resizable());
213222
ARM_COMPUTE_ASSERT(reordered_weights[i].info()->is_resizable());
214-
ARM_COMPUTE_ASSERT(bias[i].info()->is_resizable());
215223
ARM_COMPUTE_ASSERT(dst[i].info()->is_resizable());
216224

217225
// Allocate tensors
218226
src[i].allocator()->allocate();
219227
weights[i].allocator()->allocate();
220228
reordered_weights[i].allocator()->allocate();
221-
bias[i].allocator()->allocate();
222229
dst[i].allocator()->allocate();
223230

224231
ARM_COMPUTE_ASSERT(!src[i].info()->is_resizable());
225232
ARM_COMPUTE_ASSERT(!weights[i].info()->is_resizable());
226233
ARM_COMPUTE_ASSERT(!reordered_weights[i].info()->is_resizable());
227-
ARM_COMPUTE_ASSERT(!bias[i].info()->is_resizable());
228234
ARM_COMPUTE_ASSERT(!dst[i].info()->is_resizable());
229235

230236
// Fill tensors
231237
fill(AccessorType(src[i]), 0 + i * 3);
232238
fill(AccessorType(tmp_weights), 1 + i * 3);
233-
fill(AccessorType(bias[i]), 2 + i * 3);
239+
240+
// Handle optional bias
241+
if(with_bias)
242+
{
243+
ARM_COMPUTE_ASSERT(bias[i].info()->is_resizable());
244+
bias[i].allocator()->allocate();
245+
ARM_COMPUTE_ASSERT(!bias[i].info()->is_resizable());
246+
fill(AccessorType(bias[i]), 2 + i * 3);
247+
}
234248

235249
// Reorder weight to the expected format
236250
ARM_COMPUTE_ASSERT(reorder.validate(tmp_weights.info(), reordered_weights[i].info(), WeightFormat::OHWI,
@@ -240,9 +254,9 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
240254
}
241255

242256
// Prepare function.
243-
prep_pack[0].add_const_tensor(arm_compute::TensorType::ACL_SRC_1, &reordered_weights[0]);
244-
prep_pack[0].add_const_tensor(arm_compute::TensorType::ACL_SRC_2, &bias[0]);
245-
fc.prepare(prep_pack[0]);
257+
prep_pack[0].add_const_tensor(arm_compute::TensorType::ACL_SRC_1, &reordered_weights[0]);
258+
prep_pack[0].add_const_tensor(arm_compute::TensorType::ACL_SRC_2, &bias[0]);
259+
fc.prepare(prep_pack[0]);
246260

247261
if (_test_type == TestType::ConfigureOnceRunMultiThreaded)
248262
{
@@ -295,20 +309,26 @@ class CpuFullyConnectedValidationGenericFixture : public framework::Fixture
295309
void compute_reference(const TensorShape &input_shape,
296310
const TensorShape &weights_shape,
297311
const TensorShape &bias_shape,
298-
const TensorShape &output_shape)
312+
const TensorShape &output_shape,
313+
bool with_bias)
299314
{
300315
// Create reference
301316
SimpleTensor<T> ref_src{input_shape, _data_type, 1, _input_q_info};
302317
SimpleTensor<T> ref_weights{weights_shape, _data_type, 1, _weight_q_info};
303318
SimpleTensor<TBias> ref_bias{bias_shape, _bias_data_type, 1, QuantizationInfo()};
319+
304320
for (int i = 0; i < _num_parallel_runs; ++i)
305321
{
306322
// Fill reference
307323
fill(ref_src, 0 + i * 3);
308324
fill(ref_weights, 1 + i * 3);
309-
fill(ref_bias, 2 + i * 3);
310325

311-
_reference[i] = reference::activation_layer(reference::fully_connected_layer<T>(ref_src, ref_weights, ref_bias, output_shape, _dst_q_info), _activation_info, _dst_q_info);
326+
if(with_bias)
327+
{
328+
fill(ref_bias, 2 + i * 3);
329+
}
330+
331+
_reference[i] = reference::activation_layer(reference::fully_connected_layer<T>(ref_src, ref_weights, ref_bias, output_shape, _dst_q_info, with_bias), _activation_info, _dst_q_info);
312332
}
313333
}
314334

@@ -344,6 +364,24 @@ class CpuFullyConnectedValidationFixture
344364
}
345365
};
346366

367+
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
368+
class CpuFullyConnectedValidationFixtureNoBias
369+
: public CpuFullyConnectedValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
370+
{
371+
public:
372+
void setup(TensorShape input_shape,
373+
TensorShape weights_shape,
374+
TensorShape bias_shape,
375+
TensorShape output_shape,
376+
DataType data_type,
377+
ActivationLayerInfo activation_info)
378+
{
379+
CpuFullyConnectedValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(
380+
input_shape, weights_shape, bias_shape, output_shape, data_type,
381+
QuantizationInfo(), activation_info, TestType::ConfigureOnceRunOnce, false);
382+
}
383+
};
384+
347385
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
348386
class CpuFullyConnectedThreadSafeValidationFixture
349387
: public CpuFullyConnectedValidationGenericFixture<TensorType, AccessorType, FunctionType, T>

tests/validation/reference/FullyConnectedLayer.cpp

Lines changed: 43 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2017-2021 Arm Limited.
2+
* Copyright (c) 2017-2021, 2025 Arm Limited.
33
*
44
* SPDX-License-Identifier: MIT
55
*
@@ -42,30 +42,49 @@ namespace
4242
{
4343
// Vector matrix multiply for floating point
4444
template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
45-
void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
46-
int rows_weights)
45+
void vector_matrix_multiply(const SimpleTensor<T> &src,
46+
const SimpleTensor<T> &weights,
47+
const SimpleTensor<TB> &bias,
48+
SimpleTensor<T> &dst,
49+
int offset_src,
50+
int offset_dst,
51+
int cols_weights,
52+
int rows_weights,
53+
bool with_bias = true)
4754
{
4855
const T *src_ptr = src.data() + offset_src;
4956
const T *weights_ptr = weights.data();
50-
const TB *bias_ptr = bias.data();
57+
const TB *bias_ptr = with_bias? bias.data() : nullptr;
5158
T *dst_ptr = dst.data() + offset_dst;
5259
#if defined(_OPENMP)
5360
#pragma omp parallel for
5461
#endif /* _OPENMP */
5562
for(int y = 0; y < rows_weights; ++y)
5663
{
57-
dst_ptr[y] = std::inner_product(src_ptr, src_ptr + cols_weights, &weights_ptr[cols_weights * y], static_cast<T>(0)) + bias_ptr[y];
64+
if(with_bias)
65+
{
66+
dst_ptr[y] = std::inner_product(src_ptr, src_ptr + cols_weights, &weights_ptr[cols_weights * y], static_cast<T>(0)) + bias_ptr[y];
67+
}
68+
else
69+
{
70+
dst_ptr[y] = std::inner_product(src_ptr, src_ptr + cols_weights, &weights_ptr[cols_weights * y], static_cast<T>(0));
71+
}
5872
}
5973
}
6074

6175
// Vector matrix multiply for quantized type
6276
template < typename T, typename TB, typename std::enable_if < (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value) &&std::is_same<TB, int32_t>::value, int >::type = 0 >
63-
void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
64-
int cols_weights, int rows_weights)
77+
void vector_matrix_multiply(const SimpleTensor<T> &src,
78+
const SimpleTensor<T> &weights,
79+
const SimpleTensor<TB> &bias,
80+
SimpleTensor<T> &dst,
81+
int offset_src, int offset_dst,
82+
int cols_weights, int rows_weights,
83+
bool with_bias = true)
6584
{
6685
const T *src_ptr = src.data() + offset_src;
6786
const T *weights_ptr = weights.data();
68-
const TB *bias_ptr = bias.data();
87+
const TB *bias_ptr = with_bias? bias.data() : nullptr;
6988
T *dst_ptr = dst.data() + offset_dst;
7089

7190
const UniformQuantizationInfo iq_info = src.quantization_info().uniform();
@@ -100,7 +119,10 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
100119
}
101120

102121
// Accumulate the bias
103-
acc += bias_ptr[y];
122+
if(with_bias)
123+
{
124+
acc += bias_ptr[y];
125+
}
104126

105127
// Quantize down
106128
acc = quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset, min, max);
@@ -112,7 +134,12 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
112134
} // namespace
113135

114136
template <typename T, typename TB>
115-
SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &dst_shape, QuantizationInfo out_quant_info)
137+
SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src,
138+
const SimpleTensor<T> &weights,
139+
const SimpleTensor<TB> &bias,
140+
const TensorShape &dst_shape,
141+
QuantizationInfo out_quant_info,
142+
bool with_bias)
116143
{
117144
// if no explicit quantization has been set you the same as src
118145
if(out_quant_info == QuantizationInfo())
@@ -152,20 +179,21 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
152179
offset_in,
153180
offset_out,
154181
cols_weights,
155-
rows_weights);
182+
rows_weights,
183+
with_bias);
156184
}
157185

158186
return dst;
159187
}
160188

161189
template SimpleTensor<float> fully_connected_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &dst_shape,
162-
QuantizationInfo out_quant_info);
190+
QuantizationInfo out_quant_info, bool with_bias);
163191
template SimpleTensor<half> fully_connected_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &dst_shape,
164-
QuantizationInfo out_quant_info);
192+
QuantizationInfo out_quant_info, bool with_bias);
165193
template SimpleTensor<uint8_t> fully_connected_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape,
166-
QuantizationInfo out_quant_info);
194+
QuantizationInfo out_quant_info, bool with_bias);
167195
template SimpleTensor<int8_t> fully_connected_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape,
168-
QuantizationInfo out_quant_info);
196+
QuantizationInfo out_quant_info, bool with_bias);
169197
} // namespace reference
170198
} // namespace validation
171199
} // namespace test

tests/validation/reference/FullyConnectedLayer.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2017-2019 Arm Limited.
2+
* Copyright (c) 2017-2019, 2025 Arm Limited.
33
*
44
* SPDX-License-Identifier: MIT
55
*
@@ -21,8 +21,8 @@
2121
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2222
* SOFTWARE.
2323
*/
24-
#ifndef ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_H
25-
#define ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_H
24+
#ifndef ACL_TESTS_VALIDATION_REFERENCE_FULLYCONNECTEDLAYER_H
25+
#define ACL_TESTS_VALIDATION_REFERENCE_FULLYCONNECTEDLAYER_H
2626

2727
#include "tests/SimpleTensor.h"
2828
#include "tests/validation/Helpers.h"
@@ -37,9 +37,9 @@ namespace reference
3737
{
3838
template <typename T, typename TB>
3939
SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &dst_shape,
40-
QuantizationInfo out_quant_info = QuantizationInfo());
40+
QuantizationInfo out_quant_info = QuantizationInfo(), bool with_bias = true);
4141
} // namespace reference
4242
} // namespace validation
4343
} // namespace test
4444
} // namespace arm_compute
45-
#endif /* ARM_COMPUTE_TEST_FULLY_CONNECTED_LAYER_H */
45+
#endif // ACL_TESTS_VALIDATION_REFERENCE_FULLYCONNECTEDLAYER_H

0 commit comments

Comments
 (0)