diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/compiler.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/compiler.cpp index 66240e8736cf4f..347706aee94618 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/compiler.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/compiler.cpp @@ -29,6 +29,7 @@ void dump_partitioning(const ov::npuw::Ensemble& ens, const std::string& to) { pugi::xml_node node = doc.append_child("ensemble"); node.append_attribute("gflops") = std::to_string(ens.gflops).data(); + node.append_attribute("irregular_results") = std::to_string(ens.irregular_results).data(); pugi::xml_node part = node.append_child("partitioning"); pugi::xml_node rep; @@ -83,6 +84,7 @@ void dump_partitioning(const ov::npuw::Ensemble& ens, const std::string& to) { doc.save_file(to.data()); } + } // namespace detail // Interface to get online partitioning from the model @@ -308,6 +310,7 @@ class Compiler { ov::npuw::Ensemble ens; ens.gflops = 1.; // FIXME: calculate proper flops + ens.irregular_results = !m_snapshot->isRegularResultCase(); auto graph = m_snapshot->getGraph(); // Iterate in topological order diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.cpp index 30fb8635783766..57e8297016f61a 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.cpp @@ -135,6 +135,10 @@ std::shared_ptr Group::getInitialNode() const { return *(m_content.begin()); } +const std::unordered_set>& Group::getOutputs() const { + return m_output_layers; +} + void Group::addInput(const std::shared_ptr& node) { m_input_layers.insert(node); } diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.hpp index 5ea48108d9ad57..d8b958162368dd 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/group.hpp @@ -49,6 +49,7 @@ class Group : public std::enable_shared_from_this { own::ade::NodeHandle getHandle() const; // Note: can only be used during initial group initialization std::shared_ptr getInitialNode() const; + const std::unordered_set>& getOutputs() const; void addInput(const std::shared_ptr& node); void addOutput(const std::shared_ptr& node); void addContent(const std::shared_ptr& node); diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.cpp index ff21829ff55eba..1c8a9129bc77c0 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.cpp @@ -1270,6 +1270,101 @@ void Snapshot::stripTag(const std::string& tag) { } } +bool Snapshot::isRegularResultCase() const { + LOG_INFO("Online partitioning: executing isRegularResultCase pass..."); + LOG_BLOCK(); + + // This method works around an issue where the final partitioning fails the sanity check + // because of a different number of output Convert across repeated block groups. + // The issue was initially observed in a model where only the final block has an additional ov::Result consumer. + // For example, Group[0..30] has only external consumers (i.e. consumers that belong to other groups): + // OpA -> OpB(external group) + // -> OpC(external group) + // but very last Group[31] has an additional ov::Result consumer: + // OpA -> ov::Result + // -> OpB(external group) + // -> OpC(external group) + // Later, if NPUW_F16IC is set, "Partitioner::identifySubgraphs" method adds output Converts to each Group[0..30], + // but skips Group[31] due to internal implementation details. + // "Partitioner::identifySubgraphs" can't: + // - add Convert to the Group[31] because it would require adding opposite Convert for the ov::Result + // - skip adding Converts to Group[0..30] because it would break symmetry of the repeated blocks, i.e. + // in the given graph `Convert(group0) -> output -> input -> Convert(group1)` input `Convert(group1)` should + // be also eliminated + // Therefore, we disable F16IC early in such cases. + + using NodeSPtr = std::shared_ptr; + std::unordered_map node_id_cache; + for (auto&& node_ptr : m_model->get_ordered_ops()) { + node_id_cache[node_ptr->get_friendly_name()] = node_ptr; + } + + auto getReadersMask = [](const NodeSPtr& node_ptr) { + // each element of the vector is + // the number of ov::Result readers for the corresponding output + std::vector mask; + for (auto&& output_desc : node_ptr->outputs()) { + auto readers = output_desc.get_target_inputs(); + int result_count = 0; + for (auto&& r : readers) { + auto reader_node_ptr = r.get_node()->shared_from_this(); + if (ov::op::util::is_output(reader_node_ptr)) { + result_count++; + } + } + mask.push_back(result_count); + } + return mask; + }; + + auto reptag_to_gset = repeating(); + if (!reptag_to_gset.empty()) { + NPUW_ASSERT(!m_layer_matches.empty()); + } + + for (const auto& reptag_and_gset : reptag_to_gset) { + auto reptag = reptag_and_gset.first; + auto gset = reptag_and_gset.second; + + auto matches = m_layer_matches.at(reptag->id()); + + if (gset.size() <= 1) { + continue; + } + + auto firstGroup = *(gset.begin()); + for (auto output_layer : firstGroup->getOutputs()) { + // this is the reference mask expected from all other matched layers + // in the remaining groups of the repeated block + auto expected_readers_mask = getReadersMask(output_layer); + + auto this_layer_name = output_layer->get_friendly_name(); + auto layer_bank_iter = std::find_if(matches.begin(), matches.end(), [&](const std::set& lrs) { + return lrs.count(this_layer_name) > 0; + }); + + NPUW_ASSERT(layer_bank_iter != matches.end()); + + // match output layers across all groups in the repeated block + // and compare their readers mask + for (const auto& layer_name : *layer_bank_iter) { + auto layer_ptr = node_id_cache.at(layer_name); + auto actual_readers_mask = getReadersMask(layer_ptr); + + if (actual_readers_mask != expected_readers_mask) { + LOG_INFO("This is NOT a regular result case. Readers mask mismatch found for " + << layer_name << " and " << this_layer_name << " output layers."); + return false; + } + } + } + } + + LOG_INFO("This is a regular result case"); + LOG_INFO("DONE"); + return true; +} + size_t Snapshot::getNextRepId() { return m_current_rep_count++; } diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.hpp index 1cc79dd02fd4de..d57748da4df464 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/online/snapshot.hpp @@ -56,6 +56,9 @@ class Snapshot : public std::enable_shared_from_this { void stripTag(const std::string& tag); + // Passes to detect corner cases + bool isRegularResultCase() const; + // Utility std::shared_ptr getGraph() const; const detail::OVPortsMap& getPortsMap() const; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp index a3c2f894fe52cb..23a43914255643 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp @@ -182,7 +182,10 @@ ov::npuw::Ensemble load_groups(const std::shared_ptr& model, const st LOG_INFO("Found " << repeated.size() << " different repeated block(s)"); - return ov::npuw::Ensemble{get_float_attr(root, "gflops"), std::move(partitions), std::move(repeated)}; + return ov::npuw::Ensemble{get_float_attr(root, "gflops"), + get_bool_attr(root, "irregular_results", false), + std::move(partitions), + std::move(repeated)}; } class Partitioner { @@ -376,7 +379,7 @@ void Partitioner::identifySubgraphs() { LOG_INFO("Identifying subgraphs for model " << model->get_friendly_name() << "..."); LOG_BLOCK(); - const bool connect_in_f16 = cfg.get<::intel_npu::NPUW_F16IC>(); + const bool connect_in_f16 = cfg.get<::intel_npu::NPUW_F16IC>() && !ens.irregular_results; using namespace ov::npuw; std::vector& partitions = ens.groups; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.hpp index d57d98f8cece88..ea24ace33d3378 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.hpp @@ -158,6 +158,7 @@ struct RepeatedBlock { struct Ensemble { float gflops; + bool irregular_results; std::vector groups; // Just a map as I don't expect 100s of _different_ diff --git a/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.cpp b/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.cpp index 8d8861c64aa00e..2da154ede57c96 100644 --- a/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.cpp +++ b/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.cpp @@ -4,10 +4,9 @@ #include "model_generator.hpp" +#include "openvino/op/ops.hpp" #include "openvino/openvino.hpp" #include "openvino/opsets/opset11.hpp" -#include "openvino/op/ops.hpp" - std::shared_ptr ModelGenerator::get_model_with_one_op() { auto param = std::make_shared(ov::element::i64, ov::PartialShape{1, 3, 2, 2}); @@ -22,7 +21,8 @@ std::shared_ptr ModelGenerator::get_model_with_one_op() { } std::shared_ptr ModelGenerator::get_model_without_repeated_blocks() { - std::shared_ptr input = std::make_shared(ov::element::i32, ov::Shape{1, 1, 40}); + std::shared_ptr input = + std::make_shared(ov::element::i32, ov::Shape{1, 1, 40}); m_nodes.push_back(input); set_name(input); @@ -39,8 +39,19 @@ std::shared_ptr ModelGenerator::get_model_without_repeated_blocks() { } std::shared_ptr ModelGenerator::get_model_with_repeated_blocks(std::size_t repetitions) { + return get_model_with_repeated_blocks_and_results(repetitions, {}); +} + +std::shared_ptr ModelGenerator::get_model_with_repeated_blocks() { + return get_model_with_repeated_blocks(10); +} + +std::shared_ptr ModelGenerator::get_model_with_repeated_blocks_and_results( + std::size_t repetitions, + const std::vector& block_indices) { // Generate head - std::shared_ptr input = std::make_shared(ov::element::i32, ov::Shape{1, 1, 40}); + std::shared_ptr input = + std::make_shared(ov::element::i32, ov::Shape{1, 1, 40}); m_nodes.push_back(input); set_name(input); @@ -60,18 +71,20 @@ std::shared_ptr ModelGenerator::get_model_with_repeated_blocks(std::s // Generate repeated blocks std::shared_ptr output = get_block(head[6]); - std::vector> outputs; - outputs.push_back(output); + std::vector> block_outputs; + block_outputs.push_back(output); for (size_t i = 0; i < repetitions - 1; ++i) { output = get_block(output); - outputs.push_back(output); + block_outputs.push_back(output); } // Generate tail std::vector> tail(6, nullptr); - tail[0] = std::make_shared(outputs, -1); - tail[1] = std::make_shared(ov::element::i32, ov::Shape{3}, std::vector{1, 40, int(repetitions)}); + tail[0] = std::make_shared(block_outputs, -1); + tail[1] = std::make_shared(ov::element::i32, + ov::Shape{3}, + std::vector{1, 40, int(repetitions)}); tail[2] = std::make_shared(tail[0], tail[1], false); tail[3] = std::make_shared(ov::element::i32, ov::Shape{1, 1, 1}); tail[4] = std::make_shared(tail[2], tail[3]); @@ -82,19 +95,117 @@ std::shared_ptr ModelGenerator::get_model_with_repeated_blocks(std::s set_name(t); } + // Create Results + ov::ResultVector results; + + // Add Results for specified blocks + for (size_t idx : block_indices) { + if (idx < block_outputs.size()) { + auto result = std::make_shared(block_outputs[idx]); + m_nodes.push_back(result); + set_name(result); + results.push_back(result); + } + } + // Create model - auto result = std::make_shared(tail[5]); - m_nodes.push_back(result); - set_name(result); + // Always add final tail Result + auto final_result = std::make_shared(tail[5]); + m_nodes.push_back(final_result); + set_name(final_result); + results.push_back(final_result); ov::ParameterVector params = {input}; - ov::ResultVector results = {result}; return std::make_shared(results, params); } -std::shared_ptr ModelGenerator::get_model_with_repeated_blocks() { - return get_model_with_repeated_blocks(10); +std::shared_ptr ModelGenerator::get_model_with_multi_output_repeating_blocks( + std::size_t repetitions, + bool last_block_has_direct_result) { + if (repetitions == 0) { + repetitions = 1; // keep the model non-empty + } + + auto input = std::make_shared(ov::element::f32, ov::Shape{1, 1, 8}); + m_nodes.push_back(input); + set_name(input); + + // Shared constants + auto add_const = ov::opset11::Constant::create(ov::element::f32, ov::Shape{1}, {1.f}); + auto k_const = ov::opset11::Constant::create(ov::element::i64, ov::Shape{}, {8}); + auto seed_indices = ov::opset11::Constant::create(ov::element::i32, ov::Shape{1, 1, 8}, {0, 1, 2, 3, 4, 5, 6, 7}); + auto tail_scale = ov::opset11::Constant::create(ov::element::f32, ov::Shape{1}, {0.5f}); + auto tail_bias = ov::opset11::Constant::create(ov::element::f32, ov::Shape{1}, {2.f}); + + for (const auto& c : {add_const, k_const, seed_indices, tail_scale, tail_bias}) { + m_nodes.push_back(c); + set_name(c); + } + + ov::Output current_values = input; + ov::Output current_indices = seed_indices; + + for (std::size_t i = 0; i < repetitions; ++i) { + // Build block body; TopK remains the final op of each block to expose multiple outputs + auto indices_as_float = std::make_shared(current_indices, ov::element::f32); + m_nodes.push_back(indices_as_float); + set_name(indices_as_float); + + auto mixed = std::make_shared(current_values, indices_as_float); + m_nodes.push_back(mixed); + set_name(mixed); + + auto shifted = std::make_shared(mixed, add_const); + m_nodes.push_back(shifted); + set_name(shifted); + + auto topk = std::make_shared(shifted, + k_const, + -1, + ov::op::TopKMode::MAX, + ov::op::TopKSortType::SORT_VALUES, + ov::element::i32); + m_nodes.push_back(topk); + set_name(topk); + + current_values = topk->output(0); + current_indices = topk->output(1); + } + + // Tail consumes the final block outputs + auto tail_indices_as_float = std::make_shared(current_indices, ov::element::f32); + m_nodes.push_back(tail_indices_as_float); + set_name(tail_indices_as_float); + + auto tail_mixed = std::make_shared(current_values, tail_indices_as_float); + m_nodes.push_back(tail_mixed); + set_name(tail_mixed); + + auto tail_mul = std::make_shared(tail_mixed, tail_scale); + m_nodes.push_back(tail_mul); + set_name(tail_mul); + + auto tail_add = std::make_shared(tail_mul, tail_bias); + m_nodes.push_back(tail_add); + set_name(tail_add); + + ov::ResultVector results; + auto tail_result = std::make_shared(tail_add); + m_nodes.push_back(tail_result); + set_name(tail_result); + results.push_back(tail_result); + + if (last_block_has_direct_result) { + auto direct_result = std::make_shared(current_values); + m_nodes.push_back(direct_result); + set_name(direct_result); + results.push_back(direct_result); + } + + ov::ParameterVector params = {input}; + + return std::make_shared(results, params); } std::shared_ptr ModelGenerator::get_block(const std::shared_ptr& input) { @@ -149,17 +260,17 @@ std::shared_ptr ModelGenerator::get_block(const std::shared_ptr(op[5]); op[7] = std::make_shared(model_c[5], model_c[6], op[6], model_c[7]); op[8] = std::make_shared(op[2], - model_c[8], - op[7], - model_c[9], - std::vector{1, 1, 1, 1}, - std::vector{1, 1, 1, 1}); + model_c[8], + op[7], + model_c[9], + std::vector{1, 1, 1, 1}, + std::vector{1, 1, 1, 1}); op[9] = std::make_shared(op[2], - op[7], - model_c[10], - model_c[11], - std::vector{1, 1, 1, 1}, - std::vector{1, 1, 1, 1}); + op[7], + model_c[10], + model_c[11], + std::vector{1, 1, 1, 1}, + std::vector{1, 1, 1, 1}); op[10] = std::make_shared(op[9], convert[2]); op[11] = std::make_shared(std::vector>{op[10], op[8]}, -1); op[12] = std::make_shared(model_c[13], op[11]); diff --git a/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.hpp b/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.hpp index ebc7869d20c45a..d41c9e2dc8e14e 100644 --- a/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.hpp +++ b/src/plugins/intel_npu/tests/unit/npuw/model_generator/model_generator.hpp @@ -15,6 +15,23 @@ class ModelGenerator { std::shared_ptr get_model_with_repeated_blocks(std::size_t repetitions); std::shared_ptr get_model_with_repeated_blocks(); + // Build model with repeating blocks and configurable ov::Result consumers: + // - repetitions: number of repeating blocks + // - block_indices: vector of block indices (0-based) that should have ov::Result consumers + // empty vector means no additional Results, only the final tail Result + std::shared_ptr get_model_with_repeated_blocks_and_results( + std::size_t repetitions, + const std::vector& block_indices); + + // Build model with repeating blocks where the final op in each block has multiple outputs (TopK values + indices). + // - repetitions: number of repeating blocks + // - last_block_has_direct_result: + // Option1 (false): for all blocks, multi-output node feeds only the next block; last block feeds only the + // tail Option2 (true): same as above, plus the last block also feeds a direct ov::Result from one of its + // outputs + std::shared_ptr get_model_with_multi_output_repeating_blocks(std::size_t repetitions, + bool last_block_has_direct_result); + private: std::shared_ptr get_block(const std::shared_ptr& input); void set_name(const std::shared_ptr& node); diff --git a/src/plugins/intel_npu/tests/unit/npuw/online_partitioning.cpp b/src/plugins/intel_npu/tests/unit/npuw/online_partitioning.cpp index e86487a1999e71..87308e7984762a 100644 --- a/src/plugins/intel_npu/tests/unit/npuw/online_partitioning.cpp +++ b/src/plugins/intel_npu/tests/unit/npuw/online_partitioning.cpp @@ -8,13 +8,24 @@ #include "intel_npu/config/config.hpp" #include "intel_npu/config/npuw.hpp" +#include "model_generator/model_generator.hpp" #include "openvino/op/ops.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/openvino.hpp" #include "partitioning/online/compiler.hpp" #include "partitioning/online/group.hpp" #include "partitioning/online/snapshot.hpp" -#include "model_generator/model_generator.hpp" + +namespace { + +::intel_npu::Config createConfigWithKeepBlockSize(std::size_t size) { + auto opt_desc = std::make_shared<::intel_npu::OptionsDesc>(); + auto cfg = ::intel_npu::Config(opt_desc); + ::intel_npu::registerNPUWOptions(*opt_desc); + std::map cfg_map = {{"NPUW_ONLINE_KEEP_BLOCK_SIZE", std::to_string(size)}}; + cfg.update(cfg_map); + return cfg; +} bool isEqualEns(ov::npuw::Ensemble& ens1, ov::npuw::Ensemble& ens2); bool isEqualEns(ov::npuw::Ensemble& ens1, ov::npuw::Ensemble& ens2) { @@ -89,16 +100,15 @@ bool isEqualEns(ov::npuw::Ensemble& ens1, ov::npuw::Ensemble& ens2) { return true; } +class IsRegularResultCaseParametrized : public ::testing::TestWithParam, bool>> {}; + +}; // namespace + TEST(OnlinePartitioningTest, Partitioning_IsTheSame_SmallModel) { ModelGenerator mg; auto model = mg.get_model_without_repeated_blocks(); - auto opt_desc = std::make_shared<::intel_npu::OptionsDesc>(); - auto cfg = ::intel_npu::Config(opt_desc); - ::intel_npu::registerNPUWOptions(*opt_desc); - std::map cfg_map = {{"NPUW_ONLINE_KEEP_BLOCK_SIZE", "9"}}; - cfg.update(cfg_map); - + auto cfg = createConfigWithKeepBlockSize(9); auto ens = ov::npuw::online::buildPartitioning(model, cfg); for (size_t i = 0; i < 100; ++i) { @@ -111,12 +121,7 @@ TEST(OnlinePartitioningTest, Partitioning_IsTheSame_RepeatedModel) { ModelGenerator mg; auto model = mg.get_model_with_repeated_blocks(); - auto opt_desc = std::make_shared<::intel_npu::OptionsDesc>(); - auto cfg = ::intel_npu::Config(opt_desc); - ::intel_npu::registerNPUWOptions(*opt_desc); - std::map cfg_map = {{"NPUW_ONLINE_KEEP_BLOCK_SIZE", "9"}}; - cfg.update(cfg_map); - + auto cfg = createConfigWithKeepBlockSize(9); auto ens = ov::npuw::online::buildPartitioning(model, cfg); for (size_t i = 0; i < 100; ++i) { @@ -527,7 +532,8 @@ TEST(OnlinePartitioningTest, Partitioning_Compiler_Compute_RepeatedModel) { } TEST(OnlinePartitioningTest, Partitioning_Avoids_Pipeline_None) { - std::shared_ptr input = std::make_shared(ov::element::i32, ov::Shape{1}); + std::shared_ptr input = + std::make_shared(ov::element::i32, ov::Shape{1}); input->set_friendly_name("input"); auto n1 = std::make_shared(input, input); @@ -563,3 +569,58 @@ TEST(OnlinePartitioningTest, Partitioning_Avoids_Pipeline_None) { EXPECT_EQ(ens.groups.size(), 3); } + +TEST(OnlinePartitioningTest, IsRegularResultCaseMultipleOutputs) { + ModelGenerator mg; + std::vector, bool>> model_expected = { + {mg.get_model_with_multi_output_repeating_blocks(10, /*irregular_results=*/true), /*irregular_results=*/true}, + {mg.get_model_with_multi_output_repeating_blocks(10, /*irregular_results=*/false), + /*irregular_results=*/false}}; + + for (auto [model, expected_result] : model_expected) { + auto cfg = createConfigWithKeepBlockSize(3); + auto ens = ov::npuw::online::buildPartitioning(model, cfg); + + EXPECT_EQ(ens.irregular_results, expected_result); + } +} + +TEST(OnlinePartitioningTest, IsRegularResultCaseWhenNoRB) { + bool expected_result = false; + + ModelGenerator mg; + std::vector> models = {mg.get_model_with_one_op(), + mg.get_model_without_repeated_blocks()}; + + for (auto model : models) { + auto cfg = createConfigWithKeepBlockSize(9); + auto ens = ov::npuw::online::buildPartitioning(model, cfg); + + EXPECT_EQ(ens.irregular_results, expected_result); + } +} + +TEST_P(IsRegularResultCaseParametrized, CheckForDifferentResultConfigs) { + auto [block_indices, expected_result] = GetParam(); + + ModelGenerator mg; + auto model = mg.get_model_with_repeated_blocks_and_results(10, block_indices); + + auto cfg = createConfigWithKeepBlockSize(9); + auto ens = ov::npuw::online::buildPartitioning(model, cfg); + + EXPECT_EQ(ens.irregular_results, expected_result); +} + +INSTANTIATE_TEST_SUITE_P(OnlinePartitioningTest, + IsRegularResultCaseParametrized, + ::testing::Values( + // All blocks have an ov::Result consumer + std::make_tuple(std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + /*irregular_results=*/false), + // Some blocks have an ov::Result consumer + std::make_tuple(std::vector{2, 5, 8}, /*irregular_results=*/true), + // Only last block has an additional ov::Result consumer + std::make_tuple(std::vector{9}, /*irregular_results=*/true), + // No blocks have additional ov::Result consumers + std::make_tuple(std::vector{}, /*irregular_results=*/false)));