Skip to content

Commit 1412558

Browse files
authored
[CPU][ARM] Fix RandomUniform TF node precision issue (#29474)
### Details: - Add flag that disables FP16 compression for `RandomUniform` node - Remove precision loss due to FP32->FP16->FP32 conversions ### Tickets: - 156055
1 parent f7ee437 commit 1412558

File tree

3 files changed

+31
-4
lines changed

3 files changed

+31
-4
lines changed

src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include "openvino/op/mvn.hpp"
2525
#include "openvino/op/normalize_l2.hpp"
2626
#include "openvino/op/power.hpp"
27+
#include "openvino/op/random_uniform.hpp"
2728
#include "openvino/op/range.hpp"
2829
#include "openvino/op/reduce_max.hpp"
2930
#include "openvino/op/reduce_mean.hpp"
@@ -42,6 +43,7 @@
4243
#include "openvino/op/unsqueeze.hpp"
4344
#include "openvino/op/util/broadcast_base.hpp"
4445
#include "openvino/op/util/pad_base.hpp"
46+
#include "openvino/op/util/precision_sensitive_attribute.hpp"
4547
#include "openvino/op/variadic_split.hpp"
4648
#include "openvino/pass/manager.hpp"
4749
#include "openvino/pass/pattern/op/optional.hpp"
@@ -265,6 +267,33 @@ class MarkExp : public pass::MatcherPass {
265267
register_matcher(m, callback);
266268
}
267269
};
270+
271+
class MarkRandomUniform : public pass::MatcherPass {
272+
public:
273+
OPENVINO_MATCHER_PASS_RTTI("MarkRandomUniform");
274+
275+
MarkRandomUniform() {
276+
MATCHER_SCOPE(MarkRandomUniform);
277+
auto random_uniform_pattern = pattern::wrap_type<ov::op::v8::RandomUniform>();
278+
279+
matcher_pass_callback callback = [=](pattern::Matcher& m) {
280+
const auto& node = m.get_match_root();
281+
if (!node)
282+
return false;
283+
284+
disable_fp16_compression(node);
285+
for (const auto& output : node->outputs()) {
286+
for (const auto& out_inputs : output.get_target_inputs()) {
287+
mark_as_precision_sensitive(out_inputs);
288+
}
289+
}
290+
return false;
291+
};
292+
auto m = make_shared<pattern::Matcher>(random_uniform_pattern, matcher_name);
293+
register_matcher(m, callback);
294+
}
295+
};
296+
268297
/* MarkExpInReduceOpPath marks path that goes into ReduceSum and ReduceMean.
269298
* Values that go from Exp to ReduceSum/ReduceMean are precision
270299
* sensitive and should be kept in f32 precision for mixed inference.
@@ -435,6 +464,7 @@ bool MarkSugraphsToKeepInMixedPrecision::run_on_model(const shared_ptr<ov::Model
435464
REGISTER_PASS(manager, ov::pass::MarkFloatingPointRange)
436465
REGISTER_PASS(manager, MarkDivWithEps)
437466
REGISTER_PASS(manager, MarkExpInReduceOpPath)
467+
REGISTER_PASS(manager, MarkRandomUniform)
438468
REGISTER_PASS(manager, PropagateDownDisableSensitivityForQuantized)
439469

440470
// both Up and Down propagations are needed.

src/common/transformations/tests/common_optimizations/align_mixed_fp32_fp16_types_test.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,8 +198,7 @@ TEST_F(TransformationTestsF, align_mixed_fp16_fp32_with_rand_uniform) {
198198
auto minval = Constant::create(element::f32, Shape{}, {1});
199199
auto maxval = Constant::create(element::f32, Shape{}, {10});
200200
auto rand_uniform = make_shared<RandomUniform>(out_shape, minval, maxval, element::f32);
201-
auto rand_uniform_decompressed = make_shared<Convert>(rand_uniform, element::f32);
202-
auto rand_uniform_add_factor = make_shared<Add>(rand_uniform_decompressed, factor_const_decompressed);
201+
auto rand_uniform_add_factor = make_shared<Add>(rand_uniform, factor_const_decompressed);
203202

204203
auto mul_1 = make_shared<Multiply>(reduce_sum_1, rand_uniform_add_factor);
205204
auto convert_to_f16_1 = make_shared<Convert>(mul_1, element::f32);

tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,6 @@ def test_random_uniform(self, shape_value, shape_type, dtype, seed, seed2,
4242
ie_device, precision, ir_version, temp_dir):
4343
if dtype == np.float16 or dtype == np.float64:
4444
pytest.skip('156027: Incorrect specification of RandomUniform for float16 and float64 output type')
45-
if platform.machine() in ["aarch64", "arm64", "ARM64"]:
46-
pytest.skip("156055: accuracy error on ARM")
4745
if ie_device == 'GPU':
4846
pytest.skip('156056: Accuracy error on GPU')
4947
self._test(*self.create_tf_random_uniform_net(shape_value, shape_type, dtype, seed, seed2),

0 commit comments

Comments
 (0)