diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt index 05aac548..97935a92 100644 --- a/apps/CMakeLists.txt +++ b/apps/CMakeLists.txt @@ -50,6 +50,7 @@ endif() if (COPT_FOUND) _add_executable( ilp_bsp_scheduler ) +_add_executable( ilp_hypergraph_partitioner ) endif() endif() diff --git a/apps/ilp_hypergraph_partitioner.cpp b/apps/ilp_hypergraph_partitioner.cpp new file mode 100644 index 00000000..fd184358 --- /dev/null +++ b/apps/ilp_hypergraph_partitioner.cpp @@ -0,0 +1,165 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#include +#include +#include +#include +#include +#include + +#include "osp/auxiliary/misc.hpp" +#include "osp/graph_algorithms/directed_graph_path_util.hpp" +#include "osp/auxiliary/io/general_file_reader.hpp" +#include "osp/partitioning/model/hypergraph_utility.hpp" +#include "osp/partitioning/partitioners/generic_FM.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" +#include "osp/auxiliary/io/hdag_graph_file_reader.hpp" +#include "osp/auxiliary/io/mtx_hypergraph_file_reader.hpp" +#include "osp/auxiliary/io/partitioning_file_writer.hpp" + + +using namespace osp; + +using graph = computational_dag_vector_impl_def_int_t; +using hypergraph = Hypergraph_def_t; + +int main(int argc, char *argv[]) { + if (argc < 4) { + std::cerr << "Usage: " << argv[0] << " " + << std::endl; + return 1; + } + + std::string filename_hgraph = argv[1]; + std::string name_hgraph = filename_hgraph.substr(0, filename_hgraph.rfind(".")); + std::string file_ending = filename_hgraph.substr(filename_hgraph.rfind(".") + 1); + if (!file_reader::isPathSafe(filename_hgraph)) { + std::cerr << "Error: Unsafe file path (possible traversal or invalid type).\n"; + return 1; + } + + std::cout << name_hgraph << std::endl; + + int nr_parts = std::stoi(argv[2]); + if (nr_parts < 2 || nr_parts > 32) { + std::cerr << "Argument nr_parts must be an integer between 2 and 32: " << nr_parts << std::endl; + return 1; + } + + float imbalance = std::stof(argv[3]); + if (imbalance < 0.01 || imbalance > .99) { + std::cerr << "Argument imbalance must be a float between 0.01 and 0.99: " << imbalance << std::endl; + return 1; + } + + unsigned replicate = 0; + + if (argc > 4 && std::string(argv[4]) == "part_repl") { + replicate = 1; + } else if (argc > 4 && std::string(argv[4]) == "full_repl") { + replicate = 2; + } else if (argc > 4) { + std::cerr << "Unknown argument: " << argv[4] << ". Expected 'part_repl' or 'full_repl' for replication." << std::endl; + return 1; + } + + PartitioningProblem instance; + + bool file_status = true; + if (file_ending == "hdag") { + graph dag; + file_status = file_reader::readComputationalDagHyperdagFormatDB(filename_hgraph, dag); + if(file_status) + instance.getHypergraph() = convert_from_cdag_as_hyperdag(dag); + } else if (file_ending == "mtx") { + file_status = file_reader::readHypergraphMartixMarketFormat(filename_hgraph, instance.getHypergraph()); + } else { + std::cout << "Unknown file extension." << std::endl; + return 1; + } + if (!file_status) { + + std::cout << "Reading input file failed." << std::endl; + return 1; + } + + instance.setNumberOfPartitions(static_cast(nr_parts)); + instance.setMaxWorkWeightViaImbalanceFactor(imbalance); + + Partitioning initial_partition(instance); + GenericFM fm; + for(size_t node = 0; node < instance.getHypergraph().num_vertices(); ++node) + initial_partition.setAssignedPartition(node, static_cast(node % static_cast(nr_parts))); + if(nr_parts == 2) + fm.ImprovePartitioning(initial_partition); + if(nr_parts == 4 || nr_parts == 8 || nr_parts == 16 || nr_parts == 32) + fm.RecursiveFM(initial_partition); + + if (replicate > 0) { + + PartitioningWithReplication partition(instance); + HypergraphPartitioningILPWithReplication partitioner; + + for(size_t node = 0; node < instance.getHypergraph().num_vertices(); ++node) + partition.setAssignedPartitions(node, {initial_partition.assignedPartition(node)}); + if(partition.satisfiesBalanceConstraint()) + partitioner.setUseInitialSolution(true); + + partitioner.setTimeLimitSeconds(600); + if(replicate == 2) + partitioner.setReplicationModel(HypergraphPartitioningILPWithReplication::REPLICATION_MODEL_IN_ILP::GENERAL); + + auto solve_status = partitioner.computePartitioning(partition); + + if (solve_status == RETURN_STATUS::OSP_SUCCESS || solve_status == RETURN_STATUS::BEST_FOUND) { + file_writer::write_txt(name_hgraph + "_" + std::to_string(nr_parts) + "_" + std::to_string(imbalance) + + "_ILP_rep" + std::to_string(replicate) + ".txt", partition); + std::cout << "Partitioning (with replicaiton) computed with costs: " << partition.computeConnectivityCost() << std::endl; + } else { + std::cout << "Computing partition failed." << std::endl; + return 1; + } + + } else { + + Partitioning partition(instance); + HypergraphPartitioningILP partitioner; + + for(size_t node = 0; node < instance.getHypergraph().num_vertices(); ++node) + partition.setAssignedPartition(node, initial_partition.assignedPartition(node)); + if(partition.satisfiesBalanceConstraint()) + partitioner.setUseInitialSolution(true); + + partitioner.setTimeLimitSeconds(600); + + auto solve_status = partitioner.computePartitioning(partition); + + if (solve_status == RETURN_STATUS::OSP_SUCCESS || solve_status == RETURN_STATUS::BEST_FOUND) { + file_writer::write_txt(name_hgraph + "_" + std::to_string(nr_parts) + "_" + std::to_string(imbalance) + + "_ILP_rep" + std::to_string(replicate) + ".txt", partition); + std::cout << "Partitioning computed with costs: " << partition.computeConnectivityCost() << std::endl; + } else { + std::cout << "Computing partition failed." << std::endl; + return 1; + } + } + return 0; +} diff --git a/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp new file mode 100644 index 00000000..7d0bbe8f --- /dev/null +++ b/include/osp/auxiliary/io/mtx_hypergraph_file_reader.hpp @@ -0,0 +1,175 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Christos Matzoros, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "osp/partitioning/model/hypergraph.hpp" +#include "osp/auxiliary/io/filepath_checker.hpp" + +namespace osp { +namespace file_reader { + +// reads a matrix into Hypergraph format, where nonzeros are vertices, and rows/columns are hyperedges +template +bool readHypergraphMartixMarketFormat(std::ifstream& infile, Hypergraph& hgraph) { + + std::string line; + + // Skip comments or empty lines (robustly) + while (std::getline(infile, line)) { + if (line.empty() || line[0] == '%') continue; + + // Null byte check + if (line.find('\0') != std::string::npos) { + std::cerr << "Error: Null byte detected in header line.\n"; + return false; + } + + if (line.size() > MAX_LINE_LENGTH) { + std::cerr << "Error: Line too long, possible malformed or malicious file.\n"; + return false; + } + break; // We found the actual header line + } + + if (infile.eof()) { + std::cerr << "Error: Unexpected end of file while reading header.\n"; + return false; + } + + int M_row = 0, M_col = 0, nEntries = 0; + + std::istringstream header_stream(line); + if (!(header_stream >> M_row >> M_col >> nEntries) || + M_row <= 0 || M_col <= 0) { + std::cerr << "Error: Invalid header.\n"; + return false; + } + + const index_type num_nodes = static_cast(nEntries); + + hgraph.reset(num_nodes, 0); + for (index_type node = 0; node < num_nodes; ++node) { + hgraph.set_vertex_work_weight(node, static_cast(1)); + hgraph.set_vertex_memory_weight(node, static_cast(1)); + } + + std::vector> row_hyperedges(static_cast(M_row)); + std::vector> column_hyperedges(static_cast(M_col)); + + int entries_read = 0; + while (entries_read < nEntries && std::getline(infile, line)) { + if (line.empty() || line[0] == '%') continue; + if (line.size() > MAX_LINE_LENGTH) { + std::cerr << "Error: Line too long.\n"; + return false; + } + + std::istringstream entry_stream(line); + int row = -1, col = -1; + double val = 0.0; + + if (!(entry_stream >> row >> col >> val)) { + std::cerr << "Error: Malformed matrix entry.\n"; + return false; + } + + row -= 1; col -= 1; // Convert to 0-based + + if (row < 0 || col < 0 || row >= M_row || col >= M_col) { + std::cerr << "Error: Matrix entry out of bounds.\n"; + return false; + } + + if (static_cast(row) >= num_nodes || static_cast(col) >= num_nodes) { + std::cerr << "Error: Index exceeds vertex type limit.\n"; + return false; + } + + row_hyperedges[static_cast(row)].push_back(static_cast(entries_read)); + column_hyperedges[static_cast(col)].push_back(static_cast(entries_read)); + + ++entries_read; + } + + if (entries_read != nEntries) { + std::cerr << "Error: Incomplete matrix entries.\n"; + return false; + } + + while (std::getline(infile, line)) { + if (!line.empty() && line[0] != '%') { + std::cerr << "Error: Extra data after matrix content.\n"; + return false; + } + } + + for(index_type row = 0; row < static_cast(M_row); ++row) + if(!row_hyperedges[row].empty()) + hgraph.add_hyperedge(row_hyperedges[row]); + + for(index_type col = 0; col < static_cast(M_col); ++col) + if(!column_hyperedges[col].empty()) + hgraph.add_hyperedge(column_hyperedges[col]); + + return true; +} + +template +bool readHypergraphMartixMarketFormat(const std::string& filename, Hypergraph& hgraph) { + // Ensure the file is .mtx format + if (std::filesystem::path(filename).extension() != ".mtx") { + std::cerr << "Error: Only .mtx files are accepted.\n"; + return false; + } + + if (!isPathSafe(filename)) { + std::cerr << "Error: Unsafe file path (potential traversal attack).\n"; + return false; + } + + if (std::filesystem::is_symlink(filename)) { + std::cerr << "Error: Symbolic links are not allowed.\n"; + return false; + } + + if (!std::filesystem::is_regular_file(filename)) { + std::cerr << "Error: Input is not a regular file.\n"; + return false; + } + + std::ifstream infile(filename); + if (!infile.is_open()) { + std::cerr << "Error: Failed to open file.\n"; + return false; + } + + return readHypergraphMartixMarketFormat(infile, hgraph); +} + +} // namespace FileReader + +} // namespace osp \ No newline at end of file diff --git a/include/osp/auxiliary/io/partitioning_file_writer.hpp b/include/osp/auxiliary/io/partitioning_file_writer.hpp new file mode 100644 index 00000000..782eac31 --- /dev/null +++ b/include/osp/auxiliary/io/partitioning_file_writer.hpp @@ -0,0 +1,67 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include "osp/partitioning/model/partitioning.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" +#include +#include + +namespace osp { namespace file_writer { + +template +void write_txt(std::ostream &os, const Partitioning &partition) { + + using index_type = typename hypergraph_t::vertex_idx; + + os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts." << std::endl; + + for(index_type node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) + os << node << " " << partition.assignedPartition(node) << std::endl; +} + +template +void write_txt(const std::string &filename, const Partitioning &partition) { + std::ofstream os(filename); + write_txt(os, partition); +} + +template +void write_txt(std::ostream &os, const PartitioningWithReplication &partition) { + + using index_type = typename hypergraph_t::vertex_idx; + + os << "\%\% Partitioning for " << partition.getInstance().getNumberOfPartitions() << " parts with replication." << std::endl; + + for(index_type node = 0; node < partition.getInstance().getHypergraph().num_vertices(); ++node) + { + os << node; + for(unsigned part : partition.assignedPartitions(node)) + os << " " << part; + os << std::endl; + } +} + +template +void write_txt(const std::string &filename, const PartitioningWithReplication &partition) { + std::ofstream os(filename); + write_txt(os, partition); +} + +}} // namespace osp::file_writer \ No newline at end of file diff --git a/include/osp/partitioning/model/hypergraph.hpp b/include/osp/partitioning/model/hypergraph.hpp new file mode 100644 index 00000000..715f6ac8 --- /dev/null +++ b/include/osp/partitioning/model/hypergraph.hpp @@ -0,0 +1,193 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ +#pragma once + +#include +#include +#include "osp/concepts/computational_dag_concept.hpp" +#include "osp/graph_algorithms/directed_graph_edge_desc_util.hpp" + +namespace osp { + +template +class Hypergraph { + + using this_t = Hypergraph; + + public: + + using vertex_idx = index_type; + using vertex_work_weight_type = workw_type; + using vertex_mem_weight_type = memw_type; + using vertex_comm_weight_type = commw_type; + + Hypergraph() = default; + + Hypergraph(index_type num_vertices_, index_type num_hyperedges_) + : Num_vertices(num_vertices_), Num_hyperedges(num_hyperedges_), vertex_work_weights(num_vertices_, 1), + vertex_memory_weights(num_vertices_, 1), hyperedge_weights(num_hyperedges_, 1), + incident_hyperedges_to_vertex(num_vertices_), vertices_in_hyperedge(num_hyperedges_){} + + Hypergraph(const this_t &other) = default; + Hypergraph &operator=(const this_t &other) = default; + + virtual ~Hypergraph() = default; + + inline index_type num_vertices() const { return Num_vertices; } + inline index_type num_hyperedges() const { return Num_hyperedges; } + inline index_type num_pins() const { return Num_pins; } + inline workw_type get_vertex_work_weight(index_type node) const { return vertex_work_weights[node]; } + inline memw_type get_vertex_memory_weight(index_type node) const { return vertex_memory_weights[node]; } + inline commw_type get_hyperedge_weight(index_type hyperedge) const { return hyperedge_weights[hyperedge]; } + + void add_pin(index_type vertex_idx, index_type hyperedge_idx); + void add_vertex(workw_type work_weight = 1, memw_type memory_weight = 1); + void add_empty_hyperedge(commw_type weight = 1); + void add_hyperedge(const std::vector& pins, commw_type weight = 1); + void set_vertex_work_weight(index_type vertex_idx, workw_type weight); + void set_vertex_memory_weight(index_type vertex_idx, memw_type weight); + void set_hyperedge_weight(index_type hyperedge_idx, commw_type weight); + + + void clear(); + void reset(index_type num_vertices_, index_type num_hyperedges_); + + inline const std::vector &get_incident_hyperedges(index_type vertex) const { return incident_hyperedges_to_vertex[vertex]; } + inline const std::vector &get_vertices_in_hyperedge(index_type hyperedge) const { return vertices_in_hyperedge[hyperedge]; } + + + private: + index_type Num_vertices = 0, Num_hyperedges = 0, Num_pins = 0; + + std::vector vertex_work_weights; + std::vector vertex_memory_weights; + std::vector hyperedge_weights; + + std::vector> incident_hyperedges_to_vertex; + std::vector> vertices_in_hyperedge; +}; + +using Hypergraph_def_t = Hypergraph; + +template +void Hypergraph::add_pin(index_type vertex_idx, index_type hyperedge_idx) +{ + if(vertex_idx >= Num_vertices) + { + throw std::invalid_argument("Invalid Argument while adding pin: vertex index out of range."); + } + else if(hyperedge_idx >= Num_hyperedges) + { + throw std::invalid_argument("Invalid Argument while adding pin: hyperedge index out of range."); + } + else{ + incident_hyperedges_to_vertex[vertex_idx].push_back(hyperedge_idx); + vertices_in_hyperedge[hyperedge_idx].push_back(vertex_idx); + ++Num_pins; + } +} + +template +void Hypergraph::add_vertex(workw_type work_weight, memw_type memory_weight) +{ + vertex_work_weights.push_back(work_weight); + vertex_memory_weights.push_back(memory_weight); + incident_hyperedges_to_vertex.emplace_back(); + ++Num_vertices; +} + +template +void Hypergraph::add_empty_hyperedge(commw_type weight) +{ + vertices_in_hyperedge.emplace_back(); + hyperedge_weights.push_back(weight); + ++Num_hyperedges; +} + +template +void Hypergraph::add_hyperedge(const std::vector& pins, commw_type weight) +{ + vertices_in_hyperedge.emplace_back(pins); + hyperedge_weights.push_back(weight); + for(index_type vertex : pins) + incident_hyperedges_to_vertex[vertex].push_back(Num_hyperedges); + ++Num_hyperedges; + Num_pins += static_cast(pins.size()); +} + +template +void Hypergraph::set_vertex_work_weight(index_type vertex_idx, workw_type weight) +{ + if(vertex_idx >= Num_vertices) + throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); + else + vertex_work_weights[vertex_idx] = weight; +} + +template +void Hypergraph::set_vertex_memory_weight(index_type vertex_idx, memw_type weight) +{ + if(vertex_idx >= Num_vertices) + throw std::invalid_argument("Invalid Argument while setting vertex weight: vertex index out of range."); + else + vertex_memory_weights[vertex_idx] = weight; +} + +template +void Hypergraph::set_hyperedge_weight(index_type hyperedge_idx, commw_type weight) +{ + if(hyperedge_idx >= Num_hyperedges) + throw std::invalid_argument("Invalid Argument while setting hyperedge weight: hyepredge index out of range."); + else + hyperedge_weights[hyperedge_idx] = weight; +} + + +template +void Hypergraph::clear() +{ + Num_vertices = 0; + Num_hyperedges = 0; + Num_pins = 0; + + vertex_work_weights.clear(); + vertex_memory_weights.clear(); + hyperedge_weights.clear(); + incident_hyperedges_to_vertex.clear(); + vertices_in_hyperedge.clear(); +} + +template +void Hypergraph::reset(index_type num_vertices_, index_type num_hyperedges_) +{ + clear(); + + Num_vertices = num_vertices_; + Num_hyperedges = num_hyperedges_; + + vertex_work_weights.resize(num_vertices_, 1); + vertex_memory_weights.resize(num_vertices_, 1); + hyperedge_weights.resize(num_hyperedges_, 1); + incident_hyperedges_to_vertex.resize(num_vertices_); + vertices_in_hyperedge.resize(num_hyperedges_); +} + + + + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/hypergraph_utility.hpp b/include/osp/partitioning/model/hypergraph_utility.hpp new file mode 100644 index 00000000..46e698b4 --- /dev/null +++ b/include/osp/partitioning/model/hypergraph_utility.hpp @@ -0,0 +1,164 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include +#include + +#include "osp/partitioning/model/hypergraph.hpp" + +/** + * @file hypergraph_utility.hpp + * @brief Utility functions and classes for working with hypergraphs graphs. + * + * This file provides a collection of simple utility functions for the hypergraph class. + */ + +namespace osp { + + +// summing up weights + +template +typename hypergraph_t::vertex_work_weight_type compute_total_vertex_work_weight(const hypergraph_t& hgraph) +{ + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + + workw_type total = 0; + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + total += hgraph.get_vertex_work_weight(node); + return total; +} + +template +typename hypergraph_t::vertex_mem_weight_type compute_total_vertex_memory_weight(const hypergraph_t& hgraph) +{ + using index_type = typename hypergraph_t::vertex_idx; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + + memw_type total = 0; + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + total += hgraph.get_vertex_memory_weight(node); + return total; +} + + +// get induced subhypergraph + +template +hypergraph_t create_induced_hypergraph(const hypergraph_t& hgraph, const std::vector& include) +{ + if(include.size() != hgraph.num_vertices()) + throw std::invalid_argument("Invalid Argument while extracting induced hypergraph: input bool array has incorrect size."); + + using index_type = typename hypergraph_t::vertex_idx; + + + std::vector new_index(hgraph.num_vertices()); + unsigned current_index = 0; + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + if(include[node]) + new_index[node] = current_index++; + + hypergraph_t new_hgraph(current_index, 0); + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + if(include[node]) + { + new_hgraph.set_vertex_work_weight(new_index[node], hgraph.get_vertex_work_weight(node)); + new_hgraph.set_vertex_memory_weight(new_index[node], hgraph.get_vertex_memory_weight(node)); + } + + for(index_type hyperedge = 0; hyperedge < hgraph.num_hyperedges(); ++hyperedge) + { + unsigned nr_induced_pins = 0; + std::vector induced_hyperedge; + for(index_type node : hgraph.get_vertices_in_hyperedge(hyperedge)) + if(include[node]) + { + induced_hyperedge.push_back(new_index[node]); + ++nr_induced_pins; + } + + if(nr_induced_pins >= 2) + new_hgraph.add_hyperedge(induced_hyperedge, hgraph.get_hyperedge_weight(hyperedge)); + } + return new_hgraph; +} + + +// conversion + +template +hypergraph_t convert_from_cdag_as_dag(const Graph_t& dag) +{ + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(!has_edge_weights_v || std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); + + hypergraph_t hgraph(dag.num_vertices(), 0); + for(const auto &node : dag.vertices()) + { + hgraph.set_vertex_work_weight(node, dag.vertex_work_weight(node)); + hgraph.set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); + for (const auto &child : dag.children(node)) + if constexpr(has_edge_weights_v) + hgraph.add_hyperedge({node, child}, dag.edge_comm_weight(edge_desc(node, child, dag).first)); + else + hgraph.add_hyperedge({node, child}); + } + return hgraph; +} + +template +hypergraph_t convert_from_cdag_as_hyperdag(const Graph_t& dag) +{ + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + static_assert(std::is_same_v, index_type>, "Index type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, workw_type>, "Work weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, memw_type>, "Memory weight type mismatch, cannot convert DAG to hypergraph."); + static_assert(std::is_same_v, commw_type>, "Communication weight type mismatch, cannot convert DAG to hypergraph."); + + hypergraph_t hgraph(dag.num_vertices(), 0); + for(const auto &node : dag.vertices()) + { + hgraph.set_vertex_work_weight(node, dag.vertex_work_weight(node)); + hgraph.set_vertex_memory_weight(node, dag.vertex_mem_weight(node)); + if(dag.out_degree(node) == 0) + continue; + std::vector new_hyperedge({node}); + for (const auto &child : dag.children(node)) + new_hyperedge.push_back(child); + hgraph.add_hyperedge(new_hyperedge, dag.vertex_comm_weight(node)); + } + return hgraph; +} + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/partitioning.hpp b/include/osp/partitioning/model/partitioning.hpp new file mode 100644 index 00000000..d25a28b9 --- /dev/null +++ b/include/osp/partitioning/model/partitioning.hpp @@ -0,0 +1,186 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include + +#include "osp/partitioning/model/partitioning_problem.hpp" + +namespace osp { + +// Represents a partitioning where each vertex of a hypergraph is assigned to a specifc partition + +template +class Partitioning { + + private: + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + const PartitioningProblem *instance; + + std::vector node_to_partition_assignment; + + public: + + Partitioning() = delete; + + Partitioning(const PartitioningProblem &inst) + : instance(&inst), node_to_partition_assignment(std::vector(inst.getHypergraph().num_vertices(), 0)) {} + + Partitioning(const PartitioningProblem &inst, const std::vector &partition_assignment_) + : instance(&inst), node_to_partition_assignment(partition_assignment_) {} + + Partitioning(const Partitioning &partitioning_) = default; + Partitioning(Partitioning &&partitioning_) = default; + + Partitioning &operator=(const Partitioning &partitioning_) = default; + + virtual ~Partitioning() = default; + + + // getters and setters + + inline const PartitioningProblem &getInstance() const { return *instance; } + + inline unsigned assignedPartition(index_type node) const { return node_to_partition_assignment[node]; } + inline const std::vector &assignedPartitions() const { return node_to_partition_assignment; } + inline std::vector &assignedPartitions() { return node_to_partition_assignment; } + + inline void setAssignedPartition(index_type node, unsigned part) { node_to_partition_assignment.at(node) = part; } + void setAssignedPartitions(const std::vector &vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partition_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + void setAssignedPartitions(std::vector &&vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partition_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + + std::vector getPartitionContent(unsigned part) const { + + std::vector content; + for (index_type node = 0; node < node_to_partition_assignment.size(); ++node) { + + if (node_to_partition_assignment[node] == part) { + content.push_back(node); + } + } + + return content; + } + + void resetPartition() { + node_to_partition_assignment.clear(); + node_to_partition_assignment.resize(instance->getHypergraph().num_vertices(), 0); + } + + // costs and validity + + std::vector computeLambdaForHyperedges() const; + commw_type computeConnectivityCost() const; + commw_type computeCutNetCost() const; + + bool satisfiesBalanceConstraint() const; + +}; + +template +std::vector Partitioning::computeLambdaForHyperedges() const +{ + std::vector lambda(instance->getHypergraph().num_hyperedges(), 0); + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + { + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + if(hyperedge.empty()) + continue; + std::vector intersects_part(instance->getNumberOfPartitions(), false); + for(const index_type& node : hyperedge) + intersects_part[node_to_partition_assignment[node]] = true; + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + if(intersects_part[part]) + ++lambda[edge_idx]; + } + return lambda; +} + +template +typename hypergraph_t::vertex_comm_weight_type Partitioning::computeConnectivityCost() const { + + commw_type total = 0; + std::vector lambda = computeLambdaForHyperedges(); + + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + if(lambda[edge_idx] >= 1) + total += (static_cast(lambda[edge_idx])-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); + + return total; +} + +template +typename hypergraph_t::vertex_comm_weight_type Partitioning::computeCutNetCost() const { + + commw_type total = 0; + std::vector lambda = computeLambdaForHyperedges(); + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + if(lambda[edge_idx] > 1) + total += instance->getHypergraph().get_hyperedge_weight(edge_idx); + + return total; +} + +template +bool Partitioning::satisfiesBalanceConstraint() const { + std::vector work_weight(instance->getNumberOfPartitions(), 0); + std::vector memory_weight(instance->getNumberOfPartitions(), 0); + for (index_type node = 0; node < node_to_partition_assignment.size(); ++node) { + if (node_to_partition_assignment[node] > instance->getNumberOfPartitions()) + throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); + else + { + work_weight[node_to_partition_assignment[node]] += instance->getHypergraph().get_vertex_work_weight(node); + memory_weight[node_to_partition_assignment[node]] += instance->getHypergraph().get_vertex_memory_weight(node); + } + } + + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + { + if(work_weight[part] > instance->getMaxWorkWeightPerPartition()) + return false; + if(memory_weight[part] > instance->getMaxMemoryWeightPerPartition()) + return false; + } + + return true; +}; + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/partitioning_problem.hpp b/include/osp/partitioning/model/partitioning_problem.hpp new file mode 100644 index 00000000..b121ddd9 --- /dev/null +++ b/include/osp/partitioning/model/partitioning_problem.hpp @@ -0,0 +1,103 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include + +#include "osp/partitioning/model/hypergraph_utility.hpp" + +namespace osp { + +// represents a hypergraph partitioning problem into a fixed number of parts with a balance constraint +template +class PartitioningProblem { + + private: + + using this_t = PartitioningProblem; + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + hypergraph_t hgraph; + + unsigned nr_of_partitions; + workw_type max_work_weight_per_partition; + memw_type max_memory_weight_per_partition; + + bool allows_replication = false; + + public: + + PartitioningProblem() = default; + + PartitioningProblem(const hypergraph_t &hgraph_, unsigned nr_parts_ = 2, + workw_type max_work_weight_ = std::numeric_limits::max(), + memw_type max_memory_weight_ = std::numeric_limits::max()) : + hgraph(hgraph_), nr_of_partitions(nr_parts_), + max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} + + PartitioningProblem(const hypergraph_t &&hgraph_, unsigned nr_parts_ = 2, + workw_type max_work_weight_ = std::numeric_limits::max(), + memw_type max_memory_weight_ = std::numeric_limits::max()) : + hgraph(hgraph_), nr_of_partitions(nr_parts_), + max_work_weight_per_partition(max_work_weight_), max_memory_weight_per_partition(max_memory_weight_) {} + + PartitioningProblem(const this_t &other) = default; + PartitioningProblem(this_t &&other) = default; + + PartitioningProblem &operator=(const this_t &other) = default; + PartitioningProblem &operator=(this_t &&other) = default; + + // getters + inline const hypergraph_t &getHypergraph() const { return hgraph; } + inline hypergraph_t &getHypergraph() { return hgraph; } + + inline unsigned getNumberOfPartitions() const { return nr_of_partitions; } + inline workw_type getMaxWorkWeightPerPartition() const { return max_work_weight_per_partition; } + inline memw_type getMaxMemoryWeightPerPartition() const { return max_memory_weight_per_partition; } + inline bool getAllowsReplication() const { return allows_replication; } + + // setters + inline void setHypergraph(const hypergraph_t &hgraph_) { hgraph = hgraph_; } + + inline void setNumberOfPartitions(unsigned nr_parts_) { nr_of_partitions = nr_parts_; } + inline void setAllowsReplication(bool allowed_) { allows_replication = allowed_; } + + inline void setMaxWorkWeightExplicitly(workw_type max_weight_) { max_work_weight_per_partition = max_weight_; } + void setMaxWorkWeightViaImbalanceFactor(double imbalance){ + if(imbalance < 0 ) + throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); + else + max_work_weight_per_partition = static_cast(ceil(compute_total_vertex_work_weight(hgraph)/ static_cast(nr_of_partitions) * (1.0+imbalance))); + } + inline void setMaxMemoryWeightExplicitly(memw_type max_weight_) { max_memory_weight_per_partition = max_weight_; } + void setMaxMemoryWeightViaImbalanceFactor(double imbalance){ + if(imbalance < 0 ) + throw std::invalid_argument("Invalid Argument while setting imbalance parameter: parameter is negative."); + else + max_memory_weight_per_partition = static_cast(ceil(compute_total_vertex_memory_weight(hgraph)/ static_cast(nr_of_partitions) * (1.0+imbalance))); + } +}; + + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/model/partitioning_replication.hpp b/include/osp/partitioning/model/partitioning_replication.hpp new file mode 100644 index 00000000..0efe577c --- /dev/null +++ b/include/osp/partitioning/model/partitioning_replication.hpp @@ -0,0 +1,228 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include + +#include "osp/partitioning/model/partitioning_problem.hpp" + +namespace osp { + +// Represents a partitioning where each vertex of a hypergraph can be assinged to one or more partitions + +template +class PartitioningWithReplication { + + private: + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + + const PartitioningProblem *instance; + + std::vector > node_to_partitions_assignment; + + public: + + PartitioningWithReplication() = delete; + + PartitioningWithReplication(const PartitioningProblem &inst) + : instance(&inst), node_to_partitions_assignment(std::vector>(inst.getHypergraph().num_vertices(), {0})) {} + + PartitioningWithReplication(const PartitioningProblem &inst, const std::vector > &partition_assignment_) + : instance(&inst), node_to_partitions_assignment(partition_assignment_) {} + + PartitioningWithReplication(const PartitioningWithReplication &partitioning_) = default; + PartitioningWithReplication(PartitioningWithReplication &&partitioning_) = default; + + PartitioningWithReplication &operator=(const PartitioningWithReplication &partitioning_) = default; + + virtual ~PartitioningWithReplication() = default; + + + // getters and setters + + inline const PartitioningProblem &getInstance() const { return *instance; } + + inline std::vector assignedPartitions(index_type node) const { return node_to_partitions_assignment[node]; } + inline const std::vector > &assignedPartitions() const { return node_to_partitions_assignment; } + inline std::vector > &assignedPartitions() { return node_to_partitions_assignment; } + + inline void setAssignedPartitions(index_type node, const std::vector& parts) { node_to_partitions_assignment.at(node) = parts; } + void setAssignedPartitionVectors(const std::vector > &vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partitions_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + void setAssignedPartitionVectors(std::vector > &&vec) { + + if (vec.size() == static_cast(instance->getHypergraph().num_vertices()) ) { + node_to_partitions_assignment = vec; + } else { + throw std::invalid_argument( + "Invalid Argument while assigning processors: size does not match number of nodes."); + } + } + + std::vector > getPartitionContents() const { + + std::vector > content(instance->getNumberOfPartitions()); + for (index_type node = 0; node < node_to_partitions_assignment.size(); ++node) + for(unsigned part : node_to_partitions_assignment[node]) + content[part].push_back(node); + + return content; + } + + void resetPartition() { + node_to_partitions_assignment.clear(); + node_to_partitions_assignment.resize(instance->getHypergraph().num_vertices(), {0}); + } + + // costs and validity + + commw_type computeConnectivityCost() const; + commw_type computeCutNetCost() const; + + bool satisfiesBalanceConstraint() const; + +}; + +template +typename hypergraph_t::vertex_comm_weight_type PartitioningWithReplication::computeConnectivityCost() const { + + // naive implementation. in the worst-case this is exponential in the number of parts + if(instance->getNumberOfPartitions() > 16) + throw std::invalid_argument("Computing connectivity cost is not supported for more than 16 partitions."); + + commw_type total = 0; + std::vector part_used(instance->getNumberOfPartitions(), false); + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + { + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + if(hyperedge.empty()) + continue; + + unsigned long mask = 0UL; + + std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); + for(const index_type& node : hyperedge) + if(node_to_partitions_assignment[node].size() == 1) + mask = mask | (1UL << node_to_partitions_assignment[node].front()); + + unsigned min_parts_to_cover = instance->getNumberOfPartitions(); + unsigned long mask_limit = 1UL << instance->getNumberOfPartitions(); + for(unsigned long subset_mask = 1UL; subset_mask < mask_limit; ++subset_mask) + { + if((subset_mask & mask)!= mask) + continue; + + unsigned nr_parts_used = 0; + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + { + part_used[part] = (((1UL << part) & subset_mask) > 0); + nr_parts_used += static_cast(part_used[part]); + } + + bool all_nodes_covered = true; + for(const index_type& node : hyperedge) + { + bool node_covered=false; + for(unsigned part : node_to_partitions_assignment[node]) + if(part_used[part]) + { + node_covered = true; + break; + } + if(!node_covered) + { + all_nodes_covered = false; + break; + } + } + if(all_nodes_covered) + min_parts_to_cover = std::min(min_parts_to_cover, nr_parts_used); + } + + total += static_cast(min_parts_to_cover-1) * instance->getHypergraph().get_hyperedge_weight(edge_idx); + } + + return total; +} + +template +typename hypergraph_t::vertex_comm_weight_type PartitioningWithReplication::computeCutNetCost() const { + + commw_type total = 0; + for(index_type edge_idx = 0; edge_idx < instance->getHypergraph().num_hyperedges(); ++edge_idx) + { + const std::vector &hyperedge = instance->getHypergraph().get_vertices_in_hyperedge(edge_idx); + if(hyperedge.empty()) + continue; + std::vector nr_nodes_covered_by_part(instance->getNumberOfPartitions(), 0); + for(const index_type& node : hyperedge) + for(unsigned part : node_to_partitions_assignment[node]) + ++nr_nodes_covered_by_part[part]; + + bool covers_all = false; + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + if(nr_nodes_covered_by_part[part] == hyperedge.size()) + covers_all = true; + + if(!covers_all) + total += instance->getHypergraph().get_hyperedge_weight(edge_idx); + } + + return total; +} + +template +bool PartitioningWithReplication::satisfiesBalanceConstraint() const { + std::vector work_weight(instance->getNumberOfPartitions(), 0); + std::vector memory_weight(instance->getNumberOfPartitions(), 0); + for (index_type node = 0; node < node_to_partitions_assignment.size(); ++node) + for(unsigned part : node_to_partitions_assignment[node]){ + if (part > instance->getNumberOfPartitions()) + throw std::invalid_argument("Invalid Argument while checking balance constraint: partition ID out of range."); + else + { + work_weight[part] += instance->getHypergraph().get_vertex_work_weight(node); + memory_weight[part] += instance->getHypergraph().get_vertex_memory_weight(node); + } + } + + for(unsigned part = 0; part < instance->getNumberOfPartitions(); ++part) + { + if(work_weight[part] > instance->getMaxWorkWeightPerPartition()) + return false; + if(memory_weight[part] > instance->getMaxMemoryWeightPerPartition()) + return false; + } + + return true; +}; + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/generic_FM.hpp b/include/osp/partitioning/partitioners/generic_FM.hpp new file mode 100644 index 00000000..a7df5bec --- /dev/null +++ b/include/osp/partitioning/partitioners/generic_FM.hpp @@ -0,0 +1,375 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include "osp/partitioning/model/partitioning.hpp" +#include +#include + +namespace osp{ + +template +class GenericFM { + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + using commw_type = typename hypergraph_t::vertex_comm_weight_type; + + + + protected: + unsigned max_number_of_passes = 10; + index_type max_nodes_in_part = 0; + + // auxiliary for RecursiveFM + std::vector getMaxNodesOnLevel(index_type nr_nodes, unsigned nr_parts) const; + + public: + + void ImprovePartitioning(Partitioning& partition); + + void RecursiveFM(Partitioning& partition); + + inline unsigned getMaxNumberOfPasses() const { return max_number_of_passes; } + inline void setMaxNumberOfPasses(unsigned passes_) { max_number_of_passes = passes_; } + inline index_type getMaxNodesInPart() const { return max_nodes_in_part; } + inline void setMaxNodesInPart(index_type max_nodes_) { max_nodes_in_part = max_nodes_; } +}; + +template +void GenericFM::ImprovePartitioning(Partitioning& partition) +{ + // Note: this algorithm disregards hyperedge weights, in order to keep the size of the gain bucket array bounded! + + if(partition.getInstance().getNumberOfPartitions() != 2) + { + std::cout << "Error: FM can only be used for 2 partitions." << std::endl; + return; + } + + if(!partition.satisfiesBalanceConstraint()) + { + std::cout << "Error: initial partition to FM does not satisfy balance constraint." << std::endl; + return; + } + + const Hypergraph& Hgraph = partition.getInstance().getHypergraph(); + + index_type max_degree = 0; + for(index_type node = 0; node < Hgraph.num_vertices(); ++node) + max_degree = std::max(max_degree, static_cast(Hgraph.get_incident_hyperedges(node).size())); + + if(max_nodes_in_part == 0) // if not initialized + max_nodes_in_part = static_cast(ceil(static_cast(Hgraph.num_vertices()) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) + / static_cast(compute_total_vertex_work_weight(Hgraph)) )); + + for(unsigned pass_idx = 0; pass_idx < max_number_of_passes; ++pass_idx) + { + std::vector node_to_new_part = partition.assignedPartitions(); + std::vector locked(Hgraph.num_vertices(), false); + std::vector gain(Hgraph.num_vertices(), 0); + std::vector > nr_nodes_in_hyperedge_on_side(Hgraph.num_hyperedges(), std::vector(2, 0)); + int cost = 0; + + index_type left_side = 0; + for(index_type node = 0; node < Hgraph.num_vertices(); ++node) + if(partition.assignedPartition(node) == 0) + ++left_side; + + if(left_side > max_nodes_in_part || Hgraph.num_vertices() - left_side > max_nodes_in_part) + { + if(pass_idx == 0) + { + std::cout<<"Error: initial partitioning of FM is not balanced."< max_gain(2, -static_cast(max_degree)-1); + std::vector > > gain_bucket_array(2, std::vector >(2*max_degree+1)); + for(index_type node = 0; node < Hgraph.num_vertices(); ++node) + { + const unsigned& part = partition.assignedPartition(node); + gain_bucket_array[part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + max_gain[part] = std::max(max_gain[part], gain[node]); + } + + index_type best_index = 0; + int best_cost = 0; + std::vector moved_nodes; + + // the pass itself: make moves + while(moved_nodes.size() < Hgraph.num_vertices()) + { + // select move + index_type to_move = std::numeric_limits::max(); + unsigned chosen_part = std::numeric_limits::max(); + + unsigned gain_index = static_cast(std::max(max_gain[0], max_gain[1]) + static_cast(max_degree)); + while(gain_index < std::numeric_limits::max()) + { + bool can_choose_left = (Hgraph.num_vertices() - left_side < max_nodes_in_part) && !gain_bucket_array[0][gain_index].empty(); + bool can_choose_right = (left_side < max_nodes_in_part) && !gain_bucket_array[1][gain_index].empty(); + + if(can_choose_left && can_choose_right) + chosen_part = (left_side >= Hgraph.num_vertices() / 2) ? 1 : 0; + else if(can_choose_left) + chosen_part = 0; + else if(can_choose_right) + chosen_part = 1; + + if(chosen_part < 2) + { + to_move = gain_bucket_array[chosen_part][gain_index].back(); + gain_bucket_array[chosen_part][gain_index].pop_back(); + break; + } + --gain_index; + } + + if(to_move == std::numeric_limits::max()) + break; + + // make move + + moved_nodes.push_back(to_move); + cost -= gain[to_move]; + if(cost < best_cost) + { + best_cost = cost; + best_index = static_cast(moved_nodes.size()) + 1; + } + locked[to_move] = true; + node_to_new_part[to_move] = 1 - node_to_new_part[to_move]; + + if(chosen_part == 0) + --left_side; + else + ++left_side; + + unsigned other_part = 1-chosen_part; + + // update gain values + for(index_type hyperedge : Hgraph.get_incident_hyperedges(to_move)) + { + if(nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part] == 1) + { + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(locked[node]) + continue; + + std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + --gain[node]; + gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + } + } + else if(nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part] == 2) + { + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(node_to_new_part[node] == chosen_part && !locked[node]) + { + std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + ++gain[node]; + gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + max_gain[chosen_part] = std::max(max_gain[chosen_part], gain[node]); + break; + } + } + } + if(nr_nodes_in_hyperedge_on_side[hyperedge][other_part] == 1) + { + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(node_to_new_part[node] == other_part && !locked[node]) + { + std::vector& vec = gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + --gain[node]; + gain_bucket_array[other_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + break; + } + } + } + else if(nr_nodes_in_hyperedge_on_side[hyperedge][other_part] == 0) + { + for(index_type node : Hgraph.get_vertices_in_hyperedge(hyperedge)) + { + if(locked[node]) + continue; + + std::vector& vec = gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))]; + vec.erase(std::remove(vec.begin(), vec.end(), node), vec.end()); + ++gain[node]; + gain_bucket_array[chosen_part][static_cast(gain[node] + static_cast(max_degree))].push_back(node); + max_gain[chosen_part] = std::max(max_gain[chosen_part], gain[node]); + } + } + --nr_nodes_in_hyperedge_on_side[hyperedge][chosen_part]; + ++nr_nodes_in_hyperedge_on_side[hyperedge][other_part]; + } + } + + // apply best configuration seen + if(best_index == 0) + break; + + for(index_type node_idx = 0; node_idx < best_index && node_idx < static_cast(moved_nodes.size()); ++node_idx) + partition.setAssignedPartition(moved_nodes[node_idx], 1U-partition.assignedPartition(moved_nodes[node_idx])); + + } +} + +template +void GenericFM::RecursiveFM(Partitioning& partition) +{ + const unsigned& nr_parts = partition.getInstance().getNumberOfPartitions(); + const index_type& nr_nodes = partition.getInstance().getHypergraph().num_vertices(); + + using Hgraph = Hypergraph; + + // Note: this is just a simple recursive heuristic for the case when the partitions are a small power of 2 + if(nr_parts != 4 && nr_parts != 8 && nr_parts != 16 && nr_parts != 32) + { + std::cout << "Error: Recursive FM can only be used for 4, 8, 16 or 32 partitions currently." << std::endl; + return; + } + + for(index_type node = 0; node < nr_nodes; ++node) + partition.setAssignedPartition(node, static_cast(node % 2)); + + if(max_nodes_in_part == 0) // if not initialized + max_nodes_in_part = static_cast(ceil(static_cast(nr_nodes) * static_cast(partition.getInstance().getMaxWorkWeightPerPartition()) + / static_cast(compute_total_vertex_work_weight(partition.getInstance().getHypergraph())) )); + + const std::vector max_nodes_on_level = getMaxNodesOnLevel(nr_nodes, nr_parts); + + unsigned parts = 1; + unsigned level = 0; + std::vector sub_hgraphs({partition.getInstance().getHypergraph()}); + unsigned start_index = 0; + + std::map > node_to_new_hgraph_and_id; + std::map, index_type> hgraph_and_id_to_old_idx; + for(index_type node = 0; node < nr_nodes; ++node) + { + node_to_new_hgraph_and_id[node] = std::make_pair(0, node); + hgraph_and_id_to_old_idx[std::make_pair(0, node)] = node; + } + + while(parts < nr_parts) + { + unsigned end_idx = static_cast(sub_hgraphs.size()); + for(unsigned sub_hgraph_index = start_index; sub_hgraph_index < end_idx; ++sub_hgraph_index) + { + const Hgraph& hgraph = sub_hgraphs[sub_hgraph_index]; + PartitioningProblem instance(hgraph, 2); + Partitioning sub_partition(instance); + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + sub_partition.setAssignedPartition(node, node%2); + + GenericFM sub_fm; + sub_fm.setMaxNodesInPart(max_nodes_on_level[level]); + //std::cout<<"Hgraph of size "< current_idx(2, 0); + std::vector > part_indicator(2, std::vector(hgraph.num_vertices(), false)); + for(index_type node = 0; node < hgraph.num_vertices(); ++node) + { + const unsigned part_id = sub_partition.assignedPartition(node); + const index_type original_id = hgraph_and_id_to_old_idx[std::make_pair(sub_hgraph_index, node)]; + node_to_new_hgraph_and_id[original_id] = std::make_pair(sub_hgraphs.size()+part_id, current_idx[part_id]); + hgraph_and_id_to_old_idx[std::make_pair(sub_hgraphs.size()+part_id, current_idx[part_id])] = original_id; + ++current_idx[part_id]; + part_indicator[part_id][node] = true; + } + + for(unsigned part = 0; part < 2; ++part) + sub_hgraphs.push_back(create_induced_hypergraph(sub_hgraphs[sub_hgraph_index], part_indicator[part])); + + ++start_index; + } + + parts *= 2; + ++level; + } + + for(index_type node = 0; node < nr_nodes; ++node) + partition.setAssignedPartition(node, node_to_new_hgraph_and_id[node].first - (static_cast(sub_hgraphs.size())-nr_parts)); + +} + +template +std::vector GenericFM::getMaxNodesOnLevel(typename hypergraph_t::vertex_idx nr_nodes, unsigned nr_parts) const +{ + std::vector max_nodes_on_level; + std::vector limit_per_level({static_cast(ceil(static_cast(nr_nodes) / 2.0))}); + for(unsigned parts = nr_parts / 4; parts > 0; parts /= 2) + limit_per_level.push_back(static_cast(ceil(static_cast(limit_per_level.back()) / 2.0))); + + max_nodes_on_level.push_back(max_nodes_in_part); + for(unsigned parts = 2; parts < nr_parts; parts *= 2) + { + index_type next_limit = max_nodes_on_level.back()*2; + if(next_limit > limit_per_level.back()) + --next_limit; + + limit_per_level.pop_back(); + max_nodes_on_level.push_back(next_limit); + } + + std::reverse(max_nodes_on_level.begin(),max_nodes_on_level.end()); + return max_nodes_on_level; +} + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/partitioning_ILP.hpp b/include/osp/partitioning/partitioners/partitioning_ILP.hpp new file mode 100644 index 00000000..6b799c7f --- /dev/null +++ b/include/osp/partitioning/partitioners/partitioning_ILP.hpp @@ -0,0 +1,148 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include + +#include "osp/partitioning/partitioners/partitioning_ILP_base.hpp" +#include "osp/partitioning/model/partitioning.hpp" + +namespace osp{ + +template +class HypergraphPartitioningILP : public HypergraphPartitioningILPBase { + + protected: + std::vector readCoptAssignment(const PartitioningProblem &instance, Model& model); + + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + + void setInitialSolution(const Partitioning &partition, Model& model); + + public: + + virtual ~HypergraphPartitioningILP() = default; + + RETURN_STATUS computePartitioning(Partitioning& result); + + virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILP"; } +}; + +template +RETURN_STATUS HypergraphPartitioningILP::computePartitioning(Partitioning& result) +{ + Envr env; + Model model = env.CreateModel("HypergraphPart"); + + this->setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); + setupExtraVariablesConstraints(result.getInstance(), model); + + if(this->use_initial_solution) + setInitialSolution(result, model); + + this->solveILP(model); + + if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_OPTIMAL) { + + result.setAssignedPartitions(readCoptAssignment(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_INF_OR_UNB) { + + return RETURN_STATUS::ERROR; + + } else { + + if (model.GetIntAttr(COPT_INTATTR_HASMIPSOL)) { + + result.setAssignedPartitions(readCoptAssignment(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else { + return RETURN_STATUS::ERROR; + } + } +} + +template +void HypergraphPartitioningILP::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { + + using index_type = typename hypergraph_t::vertex_idx; + + const index_type numberOfParts = instance.getNumberOfPartitions(); + const index_type numberOfVertices = instance.getHypergraph().num_vertices(); + + // Constraints + + // each node assigned to exactly one partition + for (index_type node = 0; node < numberOfVertices; node++) { + + Expr expr; + for (unsigned part = 0; part < numberOfParts; part++) + expr += this->node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr == 1); + } + + // hyperedge indicators match node variables + for (unsigned part = 0; part < numberOfParts; part++) + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part)] >= this->node_in_partition[node][static_cast(part)]); + +}; + +// convert generic one-to-many assingment (of base class function) to one-to-one +template +std::vector HypergraphPartitioningILP::readCoptAssignment(const PartitioningProblem &instance, Model& model) +{ + using index_type = typename hypergraph_t::vertex_idx; + + std::vector node_to_partition(instance.getHypergraph().num_vertices(), std::numeric_limits::max()); + std::vector > assignmentsGenericForm = this->readAllCoptAssignments(instance, model); + + for (index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) + node_to_partition[node] = assignmentsGenericForm[node].front(); + + return node_to_partition; +} + +template +void HypergraphPartitioningILP::setInitialSolution(const Partitioning &partition, Model& model) +{ + using index_type = typename hypergraph_t::vertex_idx; + + const std::vector& assignment = partition.assignedPartitions(); + const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); + if(assignment.size() != partition.getInstance().getHypergraph().num_vertices()) + return; + + for(index_type node = 0; node < assignment.size(); ++node) + { + if(assignment[node] >= numPartitions) + continue; + + for(unsigned part = 0; part < numPartitions; ++part) + model.SetMipStart(this->node_in_partition[node][static_cast(part)], static_cast(assignment[node] == part)); + } + model.LoadMipStart(); +} + +} // namespace osp \ No newline at end of file diff --git a/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp new file mode 100644 index 00000000..60e7378c --- /dev/null +++ b/include/osp/partitioning/partitioners/partitioning_ILP_base.hpp @@ -0,0 +1,176 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#pragma once + +#include +#include + +#include "osp/partitioning/model/partitioning_problem.hpp" +#include "osp/bsp/model/BspInstance.hpp" // for return statuses (stati?) + +namespace osp{ + +template +class HypergraphPartitioningILPBase { + + protected: + std::vector node_in_partition; + std::vector hyperedge_uses_partition; + + unsigned time_limit_seconds = 3600; + bool use_initial_solution = false; + + std::vector > readAllCoptAssignments(const PartitioningProblem &instance, Model& model); + + void setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model); + + void solveILP(Model& model); + + public: + + virtual std::string getAlgorithmName() const = 0; + + inline unsigned getTimeLimitSeconds() const { return time_limit_seconds; } + inline void setTimeLimitSeconds(unsigned limit_) { time_limit_seconds = limit_; } + inline void setUseInitialSolution(bool use_) { use_initial_solution = use_; } +}; + +template +void HypergraphPartitioningILPBase::solveILP(Model& model) { + + model.SetIntParam(COPT_INTPARAM_LOGTOCONSOLE, 0); + + model.SetDblParam(COPT_DBLPARAM_TIMELIMIT, time_limit_seconds); + model.SetIntParam(COPT_INTPARAM_THREADS, 128); + + model.SetIntParam(COPT_INTPARAM_STRONGBRANCHING, 1); + model.SetIntParam(COPT_INTPARAM_LPMETHOD, 1); + model.SetIntParam(COPT_INTPARAM_ROUNDINGHEURLEVEL, 1); + + model.SetIntParam(COPT_INTPARAM_SUBMIPHEURLEVEL, 1); + // model.SetIntParam(COPT_INTPARAM_PRESOLVE, 1); + // model.SetIntParam(COPT_INTPARAM_CUTLEVEL, 0); + model.SetIntParam(COPT_INTPARAM_TREECUTLEVEL, 2); + // model.SetIntParam(COPT_INTPARAM_DIVINGHEURLEVEL, 2); + + model.Solve(); +} + +template +void HypergraphPartitioningILPBase::setupFundamentalVariablesConstraintsObjective(const PartitioningProblem &instance, Model& model) { + + using index_type = typename hypergraph_t::vertex_idx; + using workw_type = typename hypergraph_t::vertex_work_weight_type; + using memw_type = typename hypergraph_t::vertex_mem_weight_type; + + const index_type numberOfParts = instance.getNumberOfPartitions(); + const index_type numberOfVertices = instance.getHypergraph().num_vertices(); + const index_type numberOfHyperedges = instance.getHypergraph().num_hyperedges(); + + // Variables + + node_in_partition = std::vector(numberOfVertices); + + for (index_type node = 0; node < numberOfVertices; node++) + node_in_partition[node] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "node_in_partition"); + + hyperedge_uses_partition = std::vector(numberOfHyperedges); + + for (index_type hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) + hyperedge_uses_partition[hyperedge] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "hyperedge_uses_partition"); + + // partition size constraints + if(instance.getMaxWorkWeightPerPartition() < std::numeric_limits::max()) + { + for (unsigned part = 0; part < numberOfParts; part++) + { + Expr expr; + for (index_type node = 0; node < numberOfVertices; node++) + expr += instance.getHypergraph().get_vertex_work_weight(node) * node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr <= instance.getMaxWorkWeightPerPartition()); + } + } + if(instance.getMaxMemoryWeightPerPartition() < std::numeric_limits::max()) + { + for (unsigned part = 0; part < numberOfParts; part++) + { + Expr expr; + for (index_type node = 0; node < numberOfVertices; node++) + expr += instance.getHypergraph().get_vertex_memory_weight(node) * node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr <= instance.getMaxMemoryWeightPerPartition()); + } + } + + // set objective + Expr expr; + for (index_type hyperedge = 0; hyperedge < numberOfHyperedges; hyperedge++) + { + expr -= instance.getHypergraph().get_hyperedge_weight(hyperedge); + for (unsigned part = 0; part < numberOfParts; part++) + expr += instance.getHypergraph().get_hyperedge_weight(hyperedge) * hyperedge_uses_partition[hyperedge][static_cast(part)]; + } + + model.SetObjective(expr, COPT_MINIMIZE); + +}; + +template +std::vector > HypergraphPartitioningILPBase::readAllCoptAssignments(const PartitioningProblem &instance, Model& model) +{ + using index_type = typename hypergraph_t::vertex_idx; + + + std::vector > node_to_partitions(instance.getHypergraph().num_vertices()); + + std::set nonempty_partition_ids; + for (index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) + for(unsigned part = 0; part < instance.getNumberOfPartitions(); part++) + if(node_in_partition[node][static_cast(part)].Get(COPT_DBLINFO_VALUE) >= .99) + { + node_to_partitions[node].push_back(part); + nonempty_partition_ids.insert(part); + } + + for(std::vector& chosen_partitions : node_to_partitions) + if(chosen_partitions.empty()) + { + std::cout<<"Error: partitioning returned by ILP seems incomplete!"<::max()); + } + + unsigned current_index = 0; + std::map new_part_index; + for(unsigned part_index : nonempty_partition_ids) + { + new_part_index[part_index] = current_index; + ++current_index; + } + + for(index_type node = 0; node < instance.getHypergraph().num_vertices(); node++) + for(unsigned entry_idx = 0; entry_idx < node_to_partitions[node].size(); ++entry_idx) + node_to_partitions[node][entry_idx] = new_part_index[node_to_partitions[node][entry_idx]]; + + std::cout<<"Hypergraph partitioning ILP best solution value: "< +#include + +#include "osp/partitioning/partitioners/partitioning_ILP_base.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" + +namespace osp{ + +template +class HypergraphPartitioningILPWithReplication : public HypergraphPartitioningILPBase { + + public: + enum class REPLICATION_MODEL_IN_ILP { ONLY_TWICE, GENERAL }; + + protected: + void setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model); + + void setInitialSolution(const PartitioningWithReplication &partition, Model& model); + + REPLICATION_MODEL_IN_ILP replication_model = REPLICATION_MODEL_IN_ILP::ONLY_TWICE; + + public: + + virtual ~HypergraphPartitioningILPWithReplication() = default; + + RETURN_STATUS computePartitioning(PartitioningWithReplication& result); + + virtual std::string getAlgorithmName() const override { return "HypergraphPartitioningILPWithReplication"; } + + void setReplicationModel(REPLICATION_MODEL_IN_ILP replication_model_) { replication_model = replication_model_; } +}; + +template +RETURN_STATUS HypergraphPartitioningILPWithReplication::computePartitioning(PartitioningWithReplication& result) +{ + Envr env; + Model model = env.CreateModel("HypergraphPartRepl"); + + this->setupFundamentalVariablesConstraintsObjective(result.getInstance(), model); + setupExtraVariablesConstraints(result.getInstance(), model); + + if(this->use_initial_solution) + setInitialSolution(result, model); + + this->solveILP(model); + + if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_OPTIMAL) { + + result.setAssignedPartitionVectors(this->readAllCoptAssignments(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else if (model.GetIntAttr(COPT_INTATTR_MIPSTATUS) == COPT_MIPSTATUS_INF_OR_UNB) { + + return RETURN_STATUS::ERROR; + + } else { + + if (model.GetIntAttr(COPT_INTATTR_HASMIPSOL)) { + + result.setAssignedPartitionVectors(this->readAllCoptAssignments(result.getInstance(), model)); + return RETURN_STATUS::OSP_SUCCESS; + + } else { + return RETURN_STATUS::ERROR; + } + } +} + +template +void HypergraphPartitioningILPWithReplication::setupExtraVariablesConstraints(const PartitioningProblem &instance, Model& model) { + + using index_type = typename hypergraph_t::vertex_idx; + + + const index_type numberOfParts = instance.getNumberOfPartitions(); + const index_type numberOfVertices = instance.getHypergraph().num_vertices(); + + if(replication_model == REPLICATION_MODEL_IN_ILP::GENERAL) + { + // create variables for each pin+partition combination + std::map, index_type> pin_ID_map; + index_type nr_of_pins = 0; + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + pin_ID_map[std::make_pair(node, hyperedge)] = nr_of_pins++; + + std::vector pin_covered_by_partition = std::vector(nr_of_pins); + + for (index_type pin = 0; pin < nr_of_pins; pin++) + pin_covered_by_partition[pin] = model.AddVars(static_cast(numberOfParts), COPT_BINARY, "pin_covered_by_partition"); + + // each pin covered exactly once + for (index_type pin = 0; pin < nr_of_pins; pin++) { + + Expr expr; + for (unsigned part = 0; part < numberOfParts; part++) + expr += pin_covered_by_partition[pin][static_cast(part)]; + + model.AddConstr(expr == 1); + } + + // pin covering requires node assignment + for (unsigned part = 0; part < numberOfParts; part++) + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->node_in_partition[node][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); + + // pin covering requires hyperedge use + for (unsigned part = 0; part < numberOfParts; part++) + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part)] >= pin_covered_by_partition[pin_ID_map[std::make_pair(node, hyperedge)]][static_cast(part)]); + + } + else if(replication_model == REPLICATION_MODEL_IN_ILP::ONLY_TWICE) + { + // each node has one or two copies + VarArray node_replicated = model.AddVars(static_cast(numberOfVertices), COPT_BINARY, "node_replicated"); + + for (index_type node = 0; node < numberOfVertices; node++) { + + Expr expr = -1; + for (unsigned part = 0; part < numberOfParts; part++) + expr += this->node_in_partition[node][static_cast(part)]; + + model.AddConstr(expr == node_replicated[static_cast(node)]); + } + + // hyperedge indicators if node is not replicated + for (unsigned part = 0; part < numberOfParts; part++) + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part)] >= this->node_in_partition[node][static_cast(part)] - node_replicated[static_cast(node)]); + + // hyperedge indicators if node is replicated + for (index_type node = 0; node < numberOfVertices; node++) + for (const index_type& hyperedge : instance.getHypergraph().get_incident_hyperedges(node)) + for (unsigned part1 = 0; part1 < numberOfParts; part1++) + for (unsigned part2 = part1+1; part2 < numberOfParts; part2++) + model.AddConstr(this->hyperedge_uses_partition[hyperedge][static_cast(part1)] + this->hyperedge_uses_partition[hyperedge][static_cast(part2)] >= + this->node_in_partition[node][static_cast(part1)] + this->node_in_partition[node][static_cast(part2)] - 1); + } + +}; + +template +void HypergraphPartitioningILPWithReplication::setInitialSolution(const PartitioningWithReplication &partition, Model& model) +{ + using index_type = typename hypergraph_t::vertex_idx; + + const std::vector >& assignments = partition.assignedPartitions(); + const unsigned& numPartitions = partition.getInstance().getNumberOfPartitions(); + if(assignments.size() != partition.getInstance().getHypergraph().num_vertices()) + return; + + for(index_type node = 0; node < assignments.size(); ++node) + { + std::vector assingedToPart(numPartitions, false); + for(unsigned part : assignments[node]) + if(part < numPartitions) + assingedToPart[part] = true; + + for(unsigned part = 0; part < numPartitions; ++part) + model.SetMipStart(this->node_in_partition[node][static_cast(part)], static_cast(assingedToPart[part])); + } + model.LoadMipStart(); +} + +} // namespace osp \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d817b7bf..359feed3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -90,6 +90,9 @@ _add_test( bsp_greedy_recomputer ) ## pebbling model _add_test( pebbling_schedule_class ) +## partitioning model +_add_test( hypergraph_and_partition ) + ## auxiliary _add_test( intpower ) @@ -118,6 +121,8 @@ if (COPT_FOUND) #_add_test( ilp_bsp_scheduler ) +#_add_test( ilp_hypergraph_partitioning ) + endif() _add_test( bsp_schedulers ) diff --git a/tests/hypergraph_and_partition.cpp b/tests/hypergraph_and_partition.cpp new file mode 100644 index 00000000..4f804a2a --- /dev/null +++ b/tests/hypergraph_and_partition.cpp @@ -0,0 +1,214 @@ +/* +Copyright 2024 Huawei Technologies Co., Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +@author Toni Boehnlein, Benjamin Lozes, Pal Andras Papp, Raphael S. Steiner +*/ + +#define BOOST_TEST_MODULE HYPERGRAPH_AND_PARTITION +#include + +#include +#include +#include + +#include "osp/partitioning/model/partitioning.hpp" +#include "osp/partitioning/model/partitioning_replication.hpp" +#include "osp/partitioning/model/hypergraph_utility.hpp" +#include "osp/partitioning/partitioners/generic_FM.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" +#include "osp/auxiliary/io/hdag_graph_file_reader.hpp" +#include "osp/auxiliary/io/mtx_hypergraph_file_reader.hpp" +#include "osp/auxiliary/io/partitioning_file_writer.hpp" + +using namespace osp; + +BOOST_AUTO_TEST_CASE(Hypergraph_and_Partition_test) { + + using graph = computational_dag_vector_impl_def_int_t; + using hypergraph = Hypergraph_def_t; + + // Getting root git directory + std::filesystem::path cwd = std::filesystem::current_path(); + std::cout << cwd << std::endl; + while ((!cwd.empty()) && (cwd.filename() != "OneStopParallel")) { + cwd = cwd.parent_path(); + std::cout << cwd << std::endl; + } + + graph DAG; + + bool status = file_reader::readComputationalDagHyperdagFormatDB( + (cwd / "data/spaa/tiny/instance_bicgstab.hdag").string(), DAG); + + BOOST_CHECK(status); + + hypergraph Hgraph; + + // Matrix format, one hyperedge for each row/column + status = file_reader::readHypergraphMartixMarketFormat((cwd / "data/mtx_tests/ErdosRenyi_8_19_A.mtx").string(), Hgraph); + BOOST_CHECK(status); + BOOST_CHECK_EQUAL(Hgraph.num_vertices(), 27); + BOOST_CHECK_EQUAL(Hgraph.num_hyperedges(), 16); + + // DAG format, all hyperedges have size 2 + Hgraph = convert_from_cdag_as_dag(DAG); + BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); + BOOST_CHECK_EQUAL(DAG.num_edges(), Hgraph.num_hyperedges()); + BOOST_CHECK_EQUAL(DAG.num_edges()*2, Hgraph.num_pins()); + + // HyperDAG format, one hypredge for each non-sink node + unsigned nr_of_non_sinks = 0; + for(const auto &node : DAG.vertices()) + if(DAG.out_degree(node) > 0) + ++ nr_of_non_sinks; + + Hgraph = convert_from_cdag_as_hyperdag(DAG); + BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); + BOOST_CHECK_EQUAL(nr_of_non_sinks, Hgraph.num_hyperedges()); + BOOST_CHECK_EQUAL(DAG.num_edges() + nr_of_non_sinks, Hgraph.num_pins()); + + + // Dummy partitioning + + PartitioningProblem instance(Hgraph, 3, 30); + + Partitioning partition(instance); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, node % 3); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + int cutNetCost = partition.computeCutNetCost(); + int connectivityCost = partition.computeConnectivityCost(); + BOOST_CHECK(connectivityCost >= cutNetCost); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + instance.getHypergraph().set_vertex_work_weight(node, 1); + + instance.setMaxWorkWeightViaImbalanceFactor(0); + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + + instance.setNumberOfPartitions(5); + instance.setMaxWorkWeightViaImbalanceFactor(0); + BOOST_CHECK(!partition.satisfiesBalanceConstraint()); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, node % 5); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + instance.getHypergraph().set_vertex_memory_weight(node, 1); + instance.setMaxMemoryWeightExplicitly(10); + BOOST_CHECK(partition.satisfiesBalanceConstraint() == false); + instance.setMaxMemoryWeightExplicitly(std::numeric_limits::max()); + + file_writer::write_txt(std::cout, partition); + + + // Dummy partitioning with replication + + instance.setHypergraph(convert_from_cdag_as_hyperdag(DAG)); + instance.setNumberOfPartitions(3); + instance.setMaxWorkWeightExplicitly(30); + PartitioningWithReplication partition_with_rep(instance); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_with_rep.setAssignedPartitions(node, {node % 3}); + + BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_with_rep.computeCutNetCost() == cutNetCost); + BOOST_CHECK(partition_with_rep.computeConnectivityCost() == connectivityCost); + + instance.setMaxWorkWeightExplicitly(60); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_with_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); + + BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_with_rep.computeConnectivityCost() >= partition_with_rep.computeCutNetCost()); + + instance.setMaxWorkWeightExplicitly(compute_total_vertex_work_weight(Hgraph)); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_with_rep.setAssignedPartitions(node, {0, 1, 2}); + + BOOST_CHECK(partition_with_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_with_rep.computeConnectivityCost() == 0); + BOOST_CHECK(partition_with_rep.computeCutNetCost() == 0); + + file_writer::write_txt(std::cout, partition_with_rep); + + + // Generic FM + + instance.setNumberOfPartitions(2); + instance.setMaxWorkWeightExplicitly(35); + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) + instance.getHypergraph().set_vertex_work_weight(node, 1); + + Partitioning partition_to_improve(instance); + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) + partition_to_improve.setAssignedPartition(node, node % 2); + + int original_cost = partition_to_improve.computeConnectivityCost(); + + GenericFM fm; + fm.ImprovePartitioning(partition_to_improve); + int new_cost = partition_to_improve.computeConnectivityCost(); + + BOOST_CHECK(partition_to_improve.satisfiesBalanceConstraint()); + BOOST_CHECK(new_cost <= original_cost); + std::cout< "<(larger_DAG)); + + instance.setMaxWorkWeightExplicitly(4000); + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) + instance.getHypergraph().set_vertex_work_weight(node, 1); + + partition_to_improve.resetPartition(); + for(unsigned node = 0; node < instance.getHypergraph().num_vertices(); ++node) + partition_to_improve.setAssignedPartition(node, node % 2); + + original_cost = partition_to_improve.computeConnectivityCost(); + + fm.setMaxNodesInPart(0); + fm.ImprovePartitioning(partition_to_improve); + new_cost = partition_to_improve.computeConnectivityCost(); + + BOOST_CHECK(partition_to_improve.satisfiesBalanceConstraint()); + BOOST_CHECK(new_cost <= original_cost); + std::cout< "< "< + +#include +#include "osp/partitioning/model/hypergraph_utility.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP.hpp" +#include "osp/partitioning/partitioners/partitioning_ILP_replication.hpp" +#include "osp/graph_implementations/adj_list_impl/computational_dag_vector_impl.hpp" +#include "osp/auxiliary/io/hdag_graph_file_reader.hpp" + + +using namespace osp; + +BOOST_AUTO_TEST_CASE(test_full) { + + using graph = computational_dag_vector_impl_def_int_t; + + // Getting root git directory + std::filesystem::path cwd = std::filesystem::current_path(); + std::cout << cwd << std::endl; + while ((!cwd.empty()) && (cwd.filename() != "OneStopParallel")) { + cwd = cwd.parent_path(); + std::cout << cwd << std::endl; + } + + graph DAG; + + bool status = file_reader::readComputationalDagHyperdagFormatDB( + (cwd / "data/spaa/tiny/instance_bicgstab.hdag").string(), DAG); + + BOOST_CHECK(status); + + Hypergraph Hgraph = convert_from_cdag_as_hyperdag(DAG); + BOOST_CHECK_EQUAL(DAG.num_vertices(), Hgraph.num_vertices()); + + PartitioningProblem instance(Hgraph, 3, 35); + Partitioning partition(instance); + + + // ILP without replication + + HypergraphPartitioningILP partitioner; + partitioner.setTimeLimitSeconds(60); + partitioner.computePartitioning(partition); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(partition.computeConnectivityCost() >= partition.computeCutNetCost()); + + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition.setAssignedPartition(node, node % 3); + + partitioner.setUseInitialSolution(true); + partitioner.computePartitioning(partition); + + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + int cutNetCost = partition.computeCutNetCost(), connectivityCost = partition.computeConnectivityCost(); + BOOST_CHECK(connectivityCost >= cutNetCost); + + instance.setMaxMemoryWeightExplicitly(37); + partitioner.computePartitioning(partition); + BOOST_CHECK(partition.satisfiesBalanceConstraint()); + BOOST_CHECK(cutNetCost == partition.computeCutNetCost()); + BOOST_CHECK(connectivityCost == partition.computeConnectivityCost()); + instance.setMaxMemoryWeightExplicitly(std::numeric_limits::max()); + + // ILP with replication + + HypergraphPartitioningILPWithReplication partitioner_rep; + PartitioningWithReplication partition_rep(instance); + + partitioner_rep.setTimeLimitSeconds(60); + partitioner_rep.computePartitioning(partition_rep); + + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + partitioner_rep.setUseInitialSolution(true); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + instance.setMaxWorkWeightExplicitly(60); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + // same tests with other replication formulation + instance.setMaxWorkWeightExplicitly(35); + partitioner_rep.setReplicationModel(HypergraphPartitioningILPWithReplication<>::REPLICATION_MODEL_IN_ILP::GENERAL); + partitioner_rep.setUseInitialSolution(false); + partitioner_rep.computePartitioning(partition_rep); + + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + partitioner_rep.setUseInitialSolution(true); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + + instance.setMaxWorkWeightExplicitly(60); + for(unsigned node = 0; node < Hgraph.num_vertices(); ++node) + partition_rep.setAssignedPartitions(node, {node % 3, (node+1)%3}); + + partitioner_rep.computePartitioning(partition_rep); + BOOST_CHECK(partition_rep.satisfiesBalanceConstraint()); + BOOST_CHECK(partition_rep.computeConnectivityCost() == 0); + +}; \ No newline at end of file