diff --git a/inputFiles/compositionalMultiphaseFlow/benchmarks/SPE11/b/spe11b_vti_source_00840x00120.xml b/inputFiles/compositionalMultiphaseFlow/benchmarks/SPE11/b/spe11b_vti_source_00840x00120.xml
index bf2ed23326a..6650647bef0 100644
--- a/inputFiles/compositionalMultiphaseFlow/benchmarks/SPE11/b/spe11b_vti_source_00840x00120.xml
+++ b/inputFiles/compositionalMultiphaseFlow/benchmarks/SPE11/b/spe11b_vti_source_00840x00120.xml
@@ -1,22 +1,27 @@
-
+
+
+
+
-
+
-
-
+
+
@@ -49,7 +54,7 @@
-
+
@@ -71,21 +76,21 @@
beginTime="7.884e5"
maxEventDt="1.3e6"
target="/Solvers/compositionalMultiphaseFlow" />
-
+
+ name="outputs1"
+ beginTime="0"
+ endTime="3153600000"
+ timeFrequency="315360000"
+ targetExactTimestep="1"
+ target="/Outputs/vtkOutput" />
+ name="outputs2"
+ beginTime="3153600000"
+ timeFrequency="3153600000"
+ targetExactTimestep="1"
+ target="/Outputs/vtkOutput" />
-
+
diff --git a/inputFiles/compositionalMultiphaseWell/staircase_co2_wells_hybrid_3d.xml b/inputFiles/compositionalMultiphaseWell/staircase_co2_wells_hybrid_3d.xml
index b7e30e91dcb..f6285205a5d 100644
--- a/inputFiles/compositionalMultiphaseWell/staircase_co2_wells_hybrid_3d.xml
+++ b/inputFiles/compositionalMultiphaseWell/staircase_co2_wells_hybrid_3d.xml
@@ -62,7 +62,6 @@
file="staircase3d_tet_with_properties.vtu"
fieldsToImport="{ FluidComposition, PERM, PORO }"
fieldNamesInGEOS="{ globalCompFraction, rockPerm_permeability, rockPorosity_referencePorosity }"
- partitionRefinement="1"
logLevel="2">
+
+
+
+
+
+ newtonMaxIter="8" />
+ directParallel="0" />
+
+
+
+
+
+ file="pebi3d_with_properties.vtu" />
@@ -44,24 +50,24 @@
+ target="/Outputs/vtkOutput" />
+ target="/Solvers/SinglePhaseFlow" />
+ target="/Outputs/restartOutput" />
+ name="singlePhaseTPFA" />
@@ -69,7 +75,7 @@
+ materialList="{ water, rock }" />
@@ -79,26 +85,26 @@
defaultViscosity="0.001"
referencePressure="0.0"
compressibility="0.0"
- viscosibility="0.0"/>
+ viscosibility="0.0" />
+ permeabilityModelName="rockPerm" />
+ name="nullSolid" />
+ compressibility="0.0" />
+ permeabilityComponents="{ 2.0e-16, 2.0e-16, 2.0e-16 }" />
@@ -109,28 +115,28 @@
setNames="{ all }"
objectPath="ElementRegions/Domain"
fieldName="pressure"
- scale="0.0"/>
+ scale="0.0" />
-
+ setNames="{ source }" />
-
+ setNames="{ sink }" />
+ name="vtkOutput" />
+ name="restartOutput" />
-
+
\ No newline at end of file
diff --git a/inputFiles/singlePhaseFlow/polyhedralDiscretizations/incompressible_consistency_base.xml b/inputFiles/singlePhaseFlow/polyhedralDiscretizations/incompressible_consistency_base.xml
index a53894b8129..b91508e6bde 100644
--- a/inputFiles/singlePhaseFlow/polyhedralDiscretizations/incompressible_consistency_base.xml
+++ b/inputFiles/singlePhaseFlow/polyhedralDiscretizations/incompressible_consistency_base.xml
@@ -1,32 +1,35 @@
-
+
+
+
+
+
+ file="polyhedral_voronoi_lattice.vtu" />
-
-
+
+ xMax="{ +1.001, 1.0, 1.0}" />
+ materialList="{rock, fluid }" />
@@ -42,26 +45,26 @@
referencePressure="0.0"
compressibility="0.0"
viscosibility="0.0"
- densityModelType="linear"/>
+ densityModelType="linear" />
+ permeabilityModelName="rockPerm" />
+ name="nullSolid" />
+ compressibility="0.0" />
+ permeabilityComponents="{ 1.0e-13, 1.0e-13, 1.0e-13 }" />
@@ -72,7 +75,7 @@
setNames="{ all }"
objectPath="ElementRegions/Domain"
fieldName="pressure"
- scale="1.0e7"/>
+ scale="1.0e7" />
-
-
+
+
\ No newline at end of file
diff --git a/inputFiles/surfaceGeneration/cube_8.xml b/inputFiles/surfaceGeneration/cube_8.xml
index 5a31f239e37..97ff296bb81 100644
--- a/inputFiles/surfaceGeneration/cube_8.xml
+++ b/inputFiles/surfaceGeneration/cube_8.xml
@@ -1,4 +1,4 @@
-
+
+ >
+
+
+
+
+ file="cube_8.vtu" />
+
diff --git a/src/coreComponents/integrationTests/fluidFlowTests/testSinglePhaseMFDPolyhedral.cpp b/src/coreComponents/integrationTests/fluidFlowTests/testSinglePhaseMFDPolyhedral.cpp
index 58c8c167f66..180c6a2a46c 100644
--- a/src/coreComponents/integrationTests/fluidFlowTests/testSinglePhaseMFDPolyhedral.cpp
+++ b/src/coreComponents/integrationTests/fluidFlowTests/testSinglePhaseMFDPolyhedral.cpp
@@ -102,12 +102,16 @@ static constexpr auto BdVLM = "beiraoDaVeigaLipnikovManzini";
std::string generateXmlInputTPFA( std::string const & meshFile )
{
std::ostringstream oss;
- oss << R"xml(
+ oss <<
+ R"xml(
+
+
+
+
@@ -256,14 +260,17 @@ std::string generateXmlInputMFD( std::string const & innerProductType,
std::string const & meshFile )
{
std::ostringstream oss;
- oss << R"xml(
+ oss <<
+ R"xml(
+
+
+
diff --git a/src/coreComponents/integrationTests/meshTests/testVTKImport.cpp b/src/coreComponents/integrationTests/meshTests/testVTKImport.cpp
index 3eab728a833..10883e9218d 100644
--- a/src/coreComponents/integrationTests/meshTests/testVTKImport.cpp
+++ b/src/coreComponents/integrationTests/meshTests/testVTKImport.cpp
@@ -20,10 +20,13 @@
#include "LvArray/src/system.hpp"
#include "mainInterface/GeosxState.hpp"
#include "mainInterface/initialization.hpp"
+#include "mainInterface/ProblemManager.hpp"
+
#include "mesh/MeshManager.hpp"
#include "mesh/generators/CellBlockManagerABC.hpp"
#include "mesh/generators/CellBlockABC.hpp"
#include "mesh/generators/VTKUtilities.hpp"
+#include "mesh/mpiCommunications/PartitionerManager.hpp"
// special CMake-generated include
#include "tests/meshDirName.hpp"
@@ -53,12 +56,15 @@ using namespace geos::dataRepository;
template< class V >
void TestMeshImport( string const & meshFilePath, V const & validate, string const fractureName="" )
{
- string const pattern = R"xml(
-
+ string const pattern =
+ R"xml(
+
+
+
+
@@ -66,15 +72,25 @@ void TestMeshImport( string const & meshFilePath, V const & validate, string con
string const meshNode = GEOS_FMT( pattern, meshFilePath, fractureName.empty() ? "" : "faceBlocks=\"{" + fractureName + "}\"" );
xmlWrapper::xmlDocument xmlDocument;
xmlDocument.loadString( meshNode );
+
xmlWrapper::xmlNode xmlMeshNode = xmlDocument.getChild( "Mesh" );
+ xmlWrapper::xmlNode xmlPartitionerNode = xmlDocument.getChild( "Partitioner" );
conduit::Node node;
Group root( "root", node );
+ // Create and initialize PartitionerManager
+ PartitionerManager & partitionerManager = root.registerGroup< PartitionerManager >( ProblemManager::groupKeysStruct().partitionerManager.key() );
+ partitionerManager.processInputFileRecursive( xmlDocument, xmlPartitionerNode );
+ partitionerManager.postInputInitializationRecursive();
+
+
+ DomainPartition domain( "domain", &root );
+
MeshManager meshManager( "mesh", &root );
meshManager.processInputFileRecursive( xmlDocument, xmlMeshNode );
meshManager.postInputInitializationRecursive();
- DomainPartition domain( "domain", &root );
+
meshManager.generateMeshes( domain );
// TODO Field import is not tested yet. Proper refactoring needs to be done first.
diff --git a/src/coreComponents/mainInterface/ProblemManager.cpp b/src/coreComponents/mainInterface/ProblemManager.cpp
index decbae981ef..a969850d52b 100644
--- a/src/coreComponents/mainInterface/ProblemManager.cpp
+++ b/src/coreComponents/mainInterface/ProblemManager.cpp
@@ -46,7 +46,11 @@
#include "mesh/MeshManager.hpp"
#include "mesh/simpleGeometricObjects/GeometricObjectManager.hpp"
#include "mesh/mpiCommunications/CommunicationTools.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
+#include "mesh/mpiCommunications/PartitionerManager.hpp"
+#include "mesh/mpiCommunications/DomainPartitioner.hpp"
+#include "mesh/generators/InternalMeshGenerator.hpp"
+#include "mesh/generators/ParticleMeshGenerator.hpp"
+#include "mesh/generators/ExternalMeshGeneratorBase.hpp"
#include "physicsSolvers/PhysicsSolverManager.hpp"
#include "physicsSolvers/PhysicsSolverBase.hpp"
#include "schema/schemaUtilities.hpp"
@@ -82,6 +86,7 @@ ProblemManager::ProblemManager( conduit::Node & root ):
m_eventManager = ®isterGroup< EventManager >( groupKeys.eventManager );
registerGroup< NumericalMethodsManager >( groupKeys.numericalMethodsManager );
registerGroup< GeometricObjectManager >( groupKeys.geometricObjectManager );
+ registerGroup< PartitionerManager >( groupKeys.partitionerManager );
registerGroup< MeshManager >( groupKeys.meshManager );
registerGroup< OutputManager >( groupKeys.outputManager );
m_physicsSolverManager = ®isterGroup< PhysicsSolverManager >( groupKeys.physicsSolverManager );
@@ -512,49 +517,29 @@ void ProblemManager::parseXMLDocument( xmlWrapper::xmlDocument & xmlDocument )
void ProblemManager::postInputInitialization()
{
- DomainPartition & domain = getDomainPartition();
-
Group const & commandLine = getGroup< Group >( groupKeys.commandLine );
- integer const & xparCL = commandLine.getReference< integer >( viewKeys.xPartitionsOverride );
- integer const & yparCL = commandLine.getReference< integer >( viewKeys.yPartitionsOverride );
- integer const & zparCL = commandLine.getReference< integer >( viewKeys.zPartitionsOverride );
-
integer const & suppressPinned = commandLine.getReference< integer >( viewKeys.suppressPinned );
setPreferPinned((suppressPinned == 0));
- PartitionBase & partition = domain.getReference< PartitionBase >( keys::partitionManager );
- bool repartition = false;
- integer xpar = 1;
- integer ypar = 1;
- integer zpar = 1;
- if( xparCL != 0 )
- {
- repartition = true;
- xpar = xparCL;
- }
- if( yparCL != 0 )
- {
- repartition = true;
- ypar = yparCL;
- }
- if( zparCL != 0 )
+ // Auto-create default partitioner if none specified in XML
+ PartitionerManager & partitionerManager = getGroup< PartitionerManager >( groupKeys.partitionerManager );
+ if( partitionerManager.numSubGroups() == 0 )
{
- repartition = true;
- zpar = zparCL;
+ MeshManager const & meshManager = getGroup< MeshManager >( groupKeys.meshManager );
+ MeshManager::createDefaultPartitioner( partitionerManager, meshManager );
}
- if( repartition )
+
+ if( getDomainPartition().hasPartitioner() )
{
- partition.setPartitions( xpar, ypar, zpar );
- int const mpiSize = MpiWrapper::commSize( MPI_COMM_GEOS );
- // Case : Using MPI domain decomposition and partition are not defined (mainly for external mesh readers)
- if( mpiSize > 1 && xpar == 1 && ypar == 1 && zpar == 1 )
- {
- //TODO confirm creates no issues with MPI_Cart_Create
- partition.setPartitions( 1, 1, mpiSize );
- }
+ // Handle command-line partition overrides
+ unsigned int const & xparCL = commandLine.getReference< integer >( viewKeys.xPartitionsOverride );
+ unsigned int const & yparCL = commandLine.getReference< integer >( viewKeys.yPartitionsOverride );
+ unsigned int const & zparCL = commandLine.getReference< integer >( viewKeys.zPartitionsOverride );
+
+ DomainPartitioner & partitioner = getDomainPartition().getPartitioner();
+ partitioner.processCommandLineOverrides( xparCL, yparCL, zparCL );
}
}
-
void ProblemManager::initializationOrder( string_array & order )
{
set< string > usedNames;
diff --git a/src/coreComponents/mainInterface/ProblemManager.hpp b/src/coreComponents/mainInterface/ProblemManager.hpp
index 447ac419885..fe34e9346ac 100644
--- a/src/coreComponents/mainInterface/ProblemManager.hpp
+++ b/src/coreComponents/mainInterface/ProblemManager.hpp
@@ -248,6 +248,7 @@ class ProblemManager : public dataRepository::Group
dataRepository::GroupKey functionManager = { "Functions" }; ///< Functions key
dataRepository::GroupKey geometricObjectManager = { "Geometry" }; ///< Geometry key
dataRepository::GroupKey meshManager = { "Mesh" }; ///< Mesh key
+ dataRepository::GroupKey partitionerManager = { "Partitioner" }; ///< Partitioner key
dataRepository::GroupKey numericalMethodsManager = { numericalMethodsManagerString() }; ///< Numerical methods key
dataRepository::GroupKey outputManager = { "Outputs" }; ///< Outputs key
dataRepository::GroupKey physicsSolverManager = { "Solvers" }; ///< Solvers key
diff --git a/src/coreComponents/mesh/CMakeLists.txt b/src/coreComponents/mesh/CMakeLists.txt
index 3b7e7a825cb..2dfe29a9974 100644
--- a/src/coreComponents/mesh/CMakeLists.txt
+++ b/src/coreComponents/mesh/CMakeLists.txt
@@ -92,7 +92,6 @@ set( mesh_headers
generators/MeshComponentBase.hpp
generators/MeshGeneratorBase.hpp
generators/ParticleMeshGenerator.hpp
- generators/PartitionDescriptor.hpp
generators/PrismUtilities.hpp
generators/Region.hpp
generators/WellGeneratorBase.hpp
@@ -101,8 +100,13 @@ set( mesh_headers
mpiCommunications/MPI_iCommData.hpp
mpiCommunications/NeighborCommunicator.hpp
mpiCommunications/NeighborData.hpp
- mpiCommunications/PartitionBase.hpp
- mpiCommunications/SpatialPartition.hpp
+ mpiCommunications/GraphPartitionEngine.hpp
+ mpiCommunications/NoOpEngine.hpp
+ mpiCommunications/PartitionerManager.hpp
+ mpiCommunications/DomainPartitioner.hpp
+ mpiCommunications/GeometricPartitioner.hpp
+ mpiCommunications/CartesianPartitioner.hpp
+ mpiCommunications/ParticleCartesianPartitioner.hpp
simpleGeometricObjects/Rectangle.hpp
simpleGeometricObjects/Disc.hpp
simpleGeometricObjects/CustomPolarObject.hpp
@@ -181,8 +185,14 @@ set( mesh_sources
mpiCommunications/CommunicationTools.cpp
mpiCommunications/MPI_iCommData.cpp
mpiCommunications/NeighborCommunicator.cpp
- mpiCommunications/PartitionBase.cpp
- mpiCommunications/SpatialPartition.cpp
+ mpiCommunications/GraphPartitionEngine.cpp
+ mpiCommunications/NoOpEngine.cpp
+ mpiCommunications/PartitionerManager.cpp
+ mpiCommunications/DomainPartitioner.cpp
+ mpiCommunications/GeometricPartitioner.cpp
+ mpiCommunications/CartesianPartitioner.cpp
+ mpiCommunications/ParticleCartesianPartitioner.cpp
+ mpiCommunications/NoOpEngine.cpp
simpleGeometricObjects/Rectangle.cpp
simpleGeometricObjects/Disc.cpp
simpleGeometricObjects/CustomPolarObject.cpp
@@ -216,6 +226,9 @@ if( ENABLE_VTK )
generators/VTKMeshGeneratorTools.hpp
generators/VTKWellGenerator.hpp
generators/VTKUtilities.hpp
+ mpiCommunications/MeshPartitioner.hpp
+ mpiCommunications/CellGraphPartitioner.hpp
+ mpiCommunications/LayeredMeshPartitioner.hpp
)
set( mesh_sources ${mesh_sources}
generators/CollocatedNodes.cpp
@@ -225,6 +238,9 @@ if( ENABLE_VTK )
generators/VTKMeshGeneratorTools.cpp
generators/VTKWellGenerator.cpp
generators/VTKUtilities.cpp
+ mpiCommunications/MeshPartitioner.cpp
+ mpiCommunications/CellGraphPartitioner.cpp
+ mpiCommunications/LayeredMeshPartitioner.cpp
)
list( APPEND tplDependencyList VTK::IOLegacy VTK::FiltersParallelDIY2 )
if( ENABLE_MPI )
@@ -233,14 +249,14 @@ if( ENABLE_VTK )
endif()
if( ENABLE_PARMETIS )
- set( mesh_headers ${mesh_headers} generators/ParMETISInterface.hpp )
- set( mesh_sources ${mesh_sources} generators/ParMETISInterface.cpp )
+ set( mesh_headers ${mesh_headers} mpiCommunications/ParMetisEngine.hpp )
+ set( mesh_sources ${mesh_sources} mpiCommunications/ParMetisEngine.cpp )
list( APPEND tplDependencyList parmetis metis )
endif()
if( ENABLE_SCOTCH )
- set( mesh_headers ${mesh_headers} generators/PTScotchInterface.hpp )
- set( mesh_sources ${mesh_sources} generators/PTScotchInterface.cpp )
+ set( mesh_headers ${mesh_headers} mpiCommunications/PTScotchEngine.hpp )
+ set( mesh_sources ${mesh_sources} mpiCommunications/PTScotchEngine.cpp )
list( APPEND tplDependencyList SCOTCH::ptscotch SCOTCH::scotcherr )
endif()
diff --git a/src/coreComponents/mesh/DomainPartition.cpp b/src/coreComponents/mesh/DomainPartition.cpp
index 943c6366974..3411179958b 100644
--- a/src/coreComponents/mesh/DomainPartition.cpp
+++ b/src/coreComponents/mesh/DomainPartition.cpp
@@ -27,9 +27,9 @@
#include "constitutive/ConstitutiveManager.hpp"
#include "mesh/ObjectManagerBase.hpp"
#include "mesh/mpiCommunications/CommunicationTools.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
+#include "mesh/mpiCommunications/PartitionerManager.hpp"
#include "mesh/LogLevelsInfo.hpp"
-
+#include "mainInterface/ProblemManager.hpp"
namespace geos
{
@@ -43,10 +43,6 @@ DomainPartition::DomainPartition( string const & name,
setRestartFlags( RestartFlags::NO_WRITE ).
setSizedFromParent( false );
- this->registerWrapper< SpatialPartition, PartitionBase >( keys::partitionManager ).
- setRestartFlags( RestartFlags::NO_WRITE ).
- setSizedFromParent( false );
-
registerGroup( groupKeys.meshBodies );
registerGroup< constitutive::ConstitutiveManager >( groupKeys.constitutiveManager );
@@ -57,66 +53,23 @@ DomainPartition::DomainPartition( string const & name,
DomainPartition::~DomainPartition()
{}
-void DomainPartition::initializationOrder( string_array & order )
-{
- set< string > usedNames;
- {
- order.emplace_back( string( groupKeysStruct::constitutiveManagerString() ) );
- usedNames.insert( groupKeysStruct::constitutiveManagerString() );
- }
-
- {
- order.emplace_back( string( groupKeysStruct::meshBodiesString() ) );
- usedNames.insert( groupKeysStruct::meshBodiesString() );
- }
-
-
- for( auto const & subGroup : this->getSubGroups() )
- {
- if( usedNames.count( subGroup.first ) == 0 )
- {
- order.emplace_back( subGroup.first );
- }
- }
-}
-
-void DomainPartition::setupBaseLevelMeshGlobalInfo()
+void DomainPartition::buildNeighborsFromPartitioner( std::vector< int > const & neighborsRank )
{
GEOS_MARK_FUNCTION;
#if defined(GEOS_USE_MPI)
- PartitionBase & partition1 = getReference< PartitionBase >( keys::partitionManager );
- SpatialPartition & partition = dynamic_cast< SpatialPartition & >(partition1);
+ // Clear existing neighbors
+ m_neighbors.clear();
+ m_numFirstOrderNeighbors = 0;
- const std::set< int > metisNeighborList = partition.getMetisNeighborList();
- if( metisNeighborList.empty() )
+ // Build NeighborCommunicator objects from the rank list
+ for( int const neighborRank : neighborsRank )
{
-
- //get communicator, rank, and coordinates
- MPI_Comm cartcomm;
- {
- int reorder = 0;
- MpiWrapper::cartCreate( MPI_COMM_GEOS, 3, partition.getPartitions().data(), partition.m_Periodic.data(), reorder, &cartcomm );
- GEOS_ERROR_IF( cartcomm == MPI_COMM_NULL, "Fail to run MPI_Cart_create and establish communications" );
- }
- int const rank = MpiWrapper::commRank( MPI_COMM_GEOS );
-
- MpiWrapper::cartCoords( cartcomm, rank, partition.m_nsdof, partition.m_coords.data() );
-
- int ncoords[3];
- addNeighbors( 0, cartcomm, ncoords );
-
- MpiWrapper::commFree( cartcomm );
- }
- else
- {
- for( integer const neighborRank : metisNeighborList )
- {
- m_neighbors.emplace_back( neighborRank );
- }
+ m_neighbors.emplace_back( neighborRank );
}
+ m_numFirstOrderNeighbors = static_cast< int >( m_neighbors.size() );
- // Create an array of the first neighbors.
+ // Create an array of the first-order neighbor ranks
array1d< int > firstNeighborRanks;
for( NeighborCommunicator const & neighbor : m_neighbors )
{
@@ -125,15 +78,14 @@ void DomainPartition::setupBaseLevelMeshGlobalInfo()
int neighborsTag = 54;
- // Send this list of neighbors to all neighbors.
+ // Send this list of neighbors to all neighbors
stdVector< MPI_Request > requests( m_neighbors.size(), MPI_REQUEST_NULL );
-
for( std::size_t i = 0; i < m_neighbors.size(); ++i )
{
MpiWrapper::iSend( firstNeighborRanks.toView(), m_neighbors[ i ].neighborRank(), neighborsTag, MPI_COMM_GEOS, &requests[ i ] );
}
- // This set will contain the second (neighbor of) neighbors ranks.
+ // This set will contain the second-order (neighbor-of-neighbor) ranks
std::set< int > secondNeighborRanks;
array1d< int > neighborOfNeighborRanks;
@@ -141,17 +93,18 @@ void DomainPartition::setupBaseLevelMeshGlobalInfo()
{
MpiWrapper::recv( neighborOfNeighborRanks, m_neighbors[ i ].neighborRank(), neighborsTag, MPI_COMM_GEOS, MPI_STATUS_IGNORE );
- // Insert the neighbors of the current neighbor into the set of second neighbors.
+ // Insert the neighbors of the current neighbor into the set of second neighbors
secondNeighborRanks.insert( neighborOfNeighborRanks.begin(), neighborOfNeighborRanks.end() );
}
- // Remove yourself and all the first neighbors from the second neighbors.
+ // Remove yourself and all the first neighbors from the second neighbors
secondNeighborRanks.erase( MpiWrapper::commRank() );
for( NeighborCommunicator const & neighbor : m_neighbors )
{
secondNeighborRanks.erase( neighbor.neighborRank() );
}
+ // Append second-order neighbors to the neighbor list
for( integer const neighborRank : secondNeighborRanks )
{
m_neighbors.emplace_back( neighborRank );
@@ -159,6 +112,100 @@ void DomainPartition::setupBaseLevelMeshGlobalInfo()
MpiWrapper::waitAll( requests.size(), requests.data(), MPI_STATUSES_IGNORE );
+#endif
+}
+
+
+
+void DomainPartition::initializationOrder( string_array & order )
+{
+ set< string > usedNames;
+ {
+ order.emplace_back( string( groupKeysStruct::constitutiveManagerString() ) );
+ usedNames.insert( groupKeysStruct::constitutiveManagerString() );
+ }
+
+ {
+ order.emplace_back( string( groupKeysStruct::meshBodiesString() ) );
+ usedNames.insert( groupKeysStruct::meshBodiesString() );
+ }
+
+
+ for( auto const & subGroup : this->getSubGroups() )
+ {
+ if( usedNames.count( subGroup.first ) == 0 )
+ {
+ order.emplace_back( subGroup.first );
+ }
+ }
+}
+
+
+PartitionerManager & DomainPartition::getPartitionerManager()
+{
+ GEOS_ERROR_IF( !this->hasParent(), "DomainPartition has no parent to get PartitionerManager from" );
+
+ Group & parent = this->getParent();
+
+ GEOS_ERROR_IF( !parent.hasGroup( ProblemManager::groupKeysStruct().partitionerManager.key() ),
+ "Parent does not contain a PartitionerManager" );
+
+ return parent.getGroup< PartitionerManager >( ProblemManager::groupKeysStruct().partitionerManager.key() );
+}
+
+PartitionerManager const & DomainPartition::getPartitionerManager() const
+{
+ GEOS_ERROR_IF( !this->hasParent(), "DomainPartition has no parent to get PartitionerManager from" );
+
+ Group const & parent = this->getParent();
+
+ GEOS_ERROR_IF( !parent.hasGroup( ProblemManager::groupKeysStruct().partitionerManager.key() ),
+ "Parent does not contain a PartitionerManager" );
+
+ return parent.getGroup< PartitionerManager >( ProblemManager::groupKeysStruct().partitionerManager.key() );
+}
+
+bool DomainPartition::hasPartitioner() const
+{
+ // Check if we have a parent
+ Group const * parent = this->hasParent() ? &this->getParent() : nullptr;
+ if( parent == nullptr )
+ {
+ return false;
+ }
+
+ // Check if parent (regardless of type: could be root, ProblemManager, etc.) has a partitionerManager
+ if( parent->hasGroup( ProblemManager::groupKeysStruct().partitionerManager.key() ) )
+ {
+ return getPartitionerManager().hasPartitioner();
+ }
+
+ // No partitioner found
+ return false;
+}
+
+DomainPartitioner & DomainPartition::getPartitioner()
+{
+ return getPartitionerManager().getPartitioner();
+}
+
+DomainPartitioner const & DomainPartition::getPartitioner() const
+{
+ return getPartitionerManager().getPartitioner();
+}
+
+
+
+void DomainPartition::setupBaseLevelMeshGlobalInfo()
+{
+ GEOS_MARK_FUNCTION;
+
+#if defined(GEOS_USE_MPI)
+ if( hasPartitioner())
+ {
+ DomainPartitioner const & partitioner = getPartitioner();
+ buildNeighborsFromPartitioner( partitioner.getNeighborsRank() );
+ }
#endif
forMeshBodies( [&]( MeshBody & meshBody )
@@ -275,56 +322,6 @@ void DomainPartition::setupCommunications( bool use_nonblocking )
} );
}
-void DomainPartition::addNeighbors( const unsigned int idim,
- MPI_Comm & cartcomm,
- int * ncoords )
-{
- PartitionBase & partition1 = getReference< PartitionBase >( keys::partitionManager );
- SpatialPartition & partition = dynamic_cast< SpatialPartition & >(partition1);
-
- if( idim == partition.m_nsdof )
- {
- bool me = true;
- for( int i = 0; i < partition.m_nsdof; i++ )
- {
- if( ncoords[i] != partition.m_coords( i ))
- {
- me = false;
- break;
- }
- }
- int const neighborRank = MpiWrapper::cartRank( cartcomm, ncoords );
- if( !me && !std::any_of( m_neighbors.begin(), m_neighbors.end(), [=]( NeighborCommunicator const & nn ) { return nn.neighborRank( ) == neighborRank; } ) )
- {
- m_neighbors.emplace_back( NeighborCommunicator( neighborRank ) );
- }
- }
- else
- {
- const int dim = partition.getPartitions()( LvArray::integerConversion< localIndex >( idim ));
- const bool periodic = partition.m_Periodic( LvArray::integerConversion< localIndex >( idim ));
- for( int i = -1; i < 2; i++ )
- {
- ncoords[idim] = partition.m_coords( LvArray::integerConversion< localIndex >( idim )) + i;
- bool ok = true;
- if( periodic )
- {
- if( ncoords[idim] < 0 )
- ncoords[idim] = dim - 1;
- else if( ncoords[idim] >= dim )
- ncoords[idim] = 0;
- }
- else
- {
- ok = ncoords[idim] >= 0 && ncoords[idim] < dim;
- }
- if( ok )
- {
- addNeighbors( idim + 1, cartcomm, ncoords );
- }
- }
- }
-}
void DomainPartition::outputPartitionInformation() const
{
diff --git a/src/coreComponents/mesh/DomainPartition.hpp b/src/coreComponents/mesh/DomainPartition.hpp
index 6644915738c..b8f31322362 100644
--- a/src/coreComponents/mesh/DomainPartition.hpp
+++ b/src/coreComponents/mesh/DomainPartition.hpp
@@ -41,7 +41,8 @@ string const partitionManager( "partitionManager" );
}
class ObjectManagerBase;
-class PartitionBase;
+class DomainPartitioner;
+class PartitionerManager;
/**
* @brief Partition of the decomposed physical domain. It also manages the connexion information to its neighbors.
@@ -99,23 +100,6 @@ class DomainPartition : public dataRepository::Group
*/
void setupBaseLevelMeshGlobalInfo();
- /**
- * @brief Recursively builds neighbors if an MPI cartesian topology is used (i.e. not metis).
- * @param idim Dimension index in the cartesian.
- * @param cartcomm Communicator with cartesian structure.
- * @param ncoords Cartesian coordinates of a process (assumed to be of length 3).
- *
- * This recursive function builds the neighbors recursively by increasing
- * the dimension index of the current DomainPartition until all the dimensions (3) are done.
- * The relevant values for initiating this functions are therefore @p ibim = 0
- * and a non-initialized vector @p ncoords of length 3.
- *
- * This functions should have been implemented `private`
- * and an additional functions to initiate the recursion could have been implemented.
- */
- void addNeighbors( const unsigned int idim,
- MPI_Comm & cartcomm,
- int * ncoords );
/**
* @brief Outputs information about the partitioning of the domain.
@@ -172,6 +156,18 @@ class DomainPartition : public dataRepository::Group
NumericalMethodsManager & getNumericalMethodManager()
{ return this->getParent().getGroup< NumericalMethodsManager >( "NumericalMethods" ); }
+ /**
+ * @brief Get the active partitioner from the PartitionerManager, const version.
+ * @return Reference to const DomainPartitioner instance.
+ */
+ DomainPartitioner const & getPartitioner() const;
+
+ /**
+ * @brief Get the active partitioner from the PartitionerManager.
+ * @return Reference to DomainPartitioner instance.
+ */
+ DomainPartitioner & getPartitioner();
+
/**
* @brief Get the mesh bodies, const version.
* @return Reference to a const instance of a Group that contains MeshBody instances.
@@ -274,6 +270,15 @@ class DomainPartition : public dataRepository::Group
stdVector< NeighborCommunicator > const & getNeighbors() const
{ return m_neighbors; };
+ /**
+ * @brief Get the number of first-order neighbors (excluding neighbors-of-neighbors).
+ * First-order neighbors are stored at indices [0, getNumFirstOrderNeighbors()).
+ * Second-order neighbors are stored at indices [getNumFirstOrderNeighbors(), getNeighbors().size()).
+ * @return Number of first-order neighbors.
+ */
+ int getNumFirstOrderNeighbors() const
+ { return m_numFirstOrderNeighbors; }
+
/**
* @brief Get a list of neighbor ranks.
* @return Container of neighbor ranks.
@@ -289,12 +294,46 @@ class DomainPartition : public dataRepository::Group
return ranks;
}
+ /**
+ * @brief Build neighbor communicators from a partitioner's neighbor rank list.
+ * @param neighborsRank Vector of first-order neighbor rank IDs from the partitioner.
+ *
+ * This method builds NeighborCommunicator objects from the lightweight neighbor rank list
+ * provided by the partitioner, then computes second-order neighbors (neighbors-of-neighbors)
+ * and appends them to the neighbor list.
+ */
+ void buildNeighborsFromPartitioner( std::vector< int > const & neighborsRank );
+
+ /**
+ * @brief Get the PartitionerManager.
+ * @return Reference to the PartitionerManager.
+ */
+ PartitionerManager & getPartitionerManager();
+
+ /**
+ * @brief Get the PartitionerManager (const version).
+ * @return Const reference to the PartitionerManager.
+ */
+ PartitionerManager const & getPartitionerManager() const;
+
+ /**
+ * @brief Checks if a partitioner is defined and available for this domain.
+ *
+ * @return True if a partitioner exists, false otherwise.
+ */
+ bool hasPartitioner() const;
+
private:
/**
* @brief Contains all the communicators from this DomainPartition to its neighbors.
*/
stdVector< NeighborCommunicator > m_neighbors;
+
+ /**
+ * @brief Number of first-order neighbors (before second-order neighbors are appended).
+ */
+ int m_numFirstOrderNeighbors = 0;
};
} /* namespace geos */
diff --git a/src/coreComponents/mesh/MeshManager.cpp b/src/coreComponents/mesh/MeshManager.cpp
index 2a67aa8db22..0e3a7167a6c 100644
--- a/src/coreComponents/mesh/MeshManager.cpp
+++ b/src/coreComponents/mesh/MeshManager.cpp
@@ -19,9 +19,13 @@
#include "MeshLevel.hpp"
#include "mesh/LogLevelsInfo.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
#include "generators/CellBlockManagerABC.hpp"
+#include "generators/InternalMeshGenerator.hpp"
+#include "generators/ExternalMeshGeneratorBase.hpp"
+#include "generators/ParticleMeshGenerator.hpp"
#include "mesh/mpiCommunications/CommunicationTools.hpp"
+#include "mesh/mpiCommunications/PartitionerManager.hpp"
+#include "mainInterface/ProblemManager.hpp"
#include "common/TimingMacros.hpp"
#include
@@ -64,19 +68,45 @@ void MeshManager::expandObjectCatalogs()
void MeshManager::generateMeshes( DomainPartition & domain )
{
+ // Early return if no work to do
+ int numMeshGenerators = 0;
+ forSubGroups< MeshGeneratorBase >( [&]( MeshGeneratorBase const & ) { ++numMeshGenerators; } );
+
+ if( numMeshGenerators == 0 )
+ {
+ GEOS_LOG_RANK_0( "No mesh generators found in MeshManager. Assuming meshless simulation." );
+ return;
+ }
+
+ // Get partitioner (create temporary if needed for unit tests)
+ std::unique_ptr< PartitionerManager > tempPartitionerManager;
+ DomainPartitioner * partitioner = nullptr;
+
+ if( domain.hasPartitioner() )
+ {
+ partitioner = &domain.getPartitioner();
+ }
+ else
+ {
+ GEOS_LOG_RANK_0( "No PartitionerManager available (likely a unit test). "
+ "Creating temporary PartitionerManager." );
+ tempPartitionerManager = std::make_unique< PartitionerManager >( "tempPartitionerManager", &domain );
+ partitioner = &tempPartitionerManager->getPartitioner();
+ }
+
+ // Generate all meshes
forSubGroups< MeshGeneratorBase >( [&]( MeshGeneratorBase & meshGen )
{
MeshBody & meshBody = domain.getMeshBodies().registerGroup< MeshBody >( meshGen.getName() );
meshBody.createMeshLevel( 0 );
- SpatialPartition & partition = dynamic_cast< SpatialPartition & >(domain.getReference< PartitionBase >( keys::partitionManager ) );
- meshGen.generateMesh( meshBody, partition );
+ meshGen.generateMesh( meshBody, *partitioner );
if( !meshBody.hasParticles() )
{
CellBlockManagerABC const & cellBlockManager = meshBody.getCellBlockManager();
-
- meshBody.setGlobalLengthScale( std::max( cellBlockManager.getGlobalLength(), cellBlockManager.getGlobalOffset() ) );
+ meshBody.setGlobalLengthScale( std::max( cellBlockManager.getGlobalLength(),
+ cellBlockManager.getGlobalOffset() ) );
}
} );
}
@@ -199,4 +229,86 @@ void MeshManager::importFields( MeshGeneratorBase const & generator,
}
}
+
+void MeshManager::createDefaultPartitioner( PartitionerManager & partitionerManager,
+ MeshManager const & meshManager )
+{
+ if( partitionerManager.hasPartitioner() )
+ {
+ return;
+ }
+
+ bool hasParticleMesh = false;
+ bool hasInternalMesh = false;
+ bool hasExternalMesh = false;
+
+ meshManager.forSubGroups< MeshGeneratorBase >( [&]( MeshGeneratorBase const & meshGen )
+ {
+ if( dynamic_cast< ParticleMeshGenerator const * >( &meshGen ) != nullptr )
+ {
+ hasParticleMesh = true;
+ }
+ else if( dynamic_cast< InternalMeshGenerator const * >( &meshGen ) != nullptr )
+ {
+ hasInternalMesh = true;
+ }
+ else if( dynamic_cast< ExternalMeshGeneratorBase const * >( &meshGen ) != nullptr )
+ {
+ hasExternalMesh = true;
+ }
+ } );
+
+ // Check for incompatible combination
+ if( hasInternalMesh && hasExternalMesh )
+ {
+ GEOS_ERROR( "Both internal and external meshes detected, but no partitioner specified." );
+ }
+
+ // Create default partitioner based on mesh type
+ if( hasParticleMesh )
+ {
+ GEOS_LOG_RANK_0( "No partitioner specified. "
+ "Creating default ParticleCartesianPartitioner for particle mesh." );
+ partitionerManager.createChild( "ParticleCartesianPartitioner", "defaultPartitioner" );
+ }
+ else if( hasInternalMesh )
+ {
+ GEOS_LOG_RANK_0( "No partitioner specified. "
+ "Creating default CartesianPartitioner for internal mesh." );
+ partitionerManager.createChild( "CartesianPartitioner", "defaultPartitioner" );
+ }
+ else if( hasExternalMesh )
+ {
+ // Check if CellGraphPartitioner is available in the DomainPartitioner catalog, proxy for VTK availability
+ auto const & catalog = DomainPartitioner::getCatalog();
+ bool const hasCellGraphPartitioner = catalog.count( "CellGraphPartitioner" ) > 0;
+
+ if( hasCellGraphPartitioner )
+ {
+ GEOS_LOG_RANK_0( "No partitioner specified. "
+ "Creating default CellGraphPartitioner for external mesh." );
+ partitionerManager.createChild( "CellGraphPartitioner", "defaultPartitioner" );
+ }
+ else
+ {
+ GEOS_ERROR( "External mesh detected but VTK is not available. " );
+ }
+ }
+ else
+ {
+ // No mesh generators found - this is OK for some unit tests
+ GEOS_LOG_RANK_0( "No partitioner defined and no mesh generators found (likely a unit test)." );
+ return;
+ }
+
+ // Initialize the newly created default partitioner
+ if( partitionerManager.hasPartitioner() )
+ {
+ DomainPartitioner & defaultPartitioner = partitionerManager.getPartitioner();
+ GEOS_LOG_RANK_0( "Initializing default partitioner: " << defaultPartitioner.getCatalogName() );
+ defaultPartitioner.postInputInitialization();
+ GEOS_LOG_RANK_0( "Finished initializing default partitioner" );
+ }
+}
+
} /* namespace geos */
diff --git a/src/coreComponents/mesh/MeshManager.hpp b/src/coreComponents/mesh/MeshManager.hpp
index 886d85692d8..c02c5052129 100644
--- a/src/coreComponents/mesh/MeshManager.hpp
+++ b/src/coreComponents/mesh/MeshManager.hpp
@@ -91,6 +91,14 @@ class MeshManager : public dataRepository::Group
stdMap< string, string > const & fieldsMapping,
FieldIdentifiers & fieldsToBeSync );
+/**
+ * @brief Create a default partitioner based on the mesh types present
+ * @param partitionerManager The partitioner manager to create the partitioner in
+ * @param meshManager The mesh manager to inspect for mesh types
+ */
+ static void createDefaultPartitioner( PartitionerManager & partitionerManager,
+ MeshManager const & meshManager );
+
private:
/**
diff --git a/src/coreComponents/mesh/ParticleManager.cpp b/src/coreComponents/mesh/ParticleManager.cpp
index 57bd1687935..83e9703e47e 100644
--- a/src/coreComponents/mesh/ParticleManager.cpp
+++ b/src/coreComponents/mesh/ParticleManager.cpp
@@ -30,7 +30,7 @@ namespace geos
using namespace dataRepository;
-class SpatialPartition;
+//class SpatialPartition;
// *********************************************************************************************************************
/**
diff --git a/src/coreComponents/mesh/generators/CellBlockManager.hpp b/src/coreComponents/mesh/generators/CellBlockManager.hpp
index 251142d960a..061c5a74fe9 100644
--- a/src/coreComponents/mesh/generators/CellBlockManager.hpp
+++ b/src/coreComponents/mesh/generators/CellBlockManager.hpp
@@ -26,7 +26,6 @@
#include "mesh/generators/LineBlock.hpp"
#include "mesh/generators/LineBlockABC.hpp"
#include "mesh/generators/CellBlockManagerABC.hpp"
-#include "mesh/generators/PartitionDescriptor.hpp"
namespace geos
{
diff --git a/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp b/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp
index 5070d7d33c5..cfed20b0e79 100644
--- a/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp
+++ b/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp
@@ -19,7 +19,7 @@
#include "InternalMeshGenerator.hpp"
#include "CellBlockManager.hpp"
-
+#include "mesh/mpiCommunications/CartesianPartitioner.hpp"
#include "common/DataTypes.hpp"
#include "mesh/MeshFields.hpp"
@@ -541,10 +541,25 @@ static void getElemToNodesRelationInBox( ElementType const elementType,
}
}
-void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockManager, SpatialPartition & partition )
+
+void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockManager, DomainPartitioner & domainPartitioner )
{
GEOS_MARK_FUNCTION;
+ // InternalMeshGenerator requires CartesianPartitioner
+ CartesianPartitioner * cartesianPartitioner = dynamic_cast< CartesianPartitioner * >( &domainPartitioner );
+
+ GEOS_ERROR_IF( cartesianPartitioner == nullptr,
+ GEOS_FMT( "InternalMeshGenerator '{}' requires CartesianPartitioner. "
+ "Current partitioner type is not compatible. ",
+ getName() ) );
+
+ CartesianPartitioner & partitioner = *cartesianPartitioner;
+
+ // Declare periodic boundaries BEFORE initializing domain
+ // This ensures neighbor computation and ghost identification are correct
+ declarePeriodicBoundaries( partitioner );
+
// Partition based on even spacing to get load balance
// Partition geometrical boundaries will be corrected in the end.
{
@@ -556,7 +571,12 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
m_max[1] = m_vertices[1].back();
m_max[2] = m_vertices[2].back();
- partition.setSizes( m_min, m_max );
+ R1Tensor minTensor;
+ R1Tensor maxTensor;
+ LvArray::tensorOps::copy< 3 >( minTensor.data, m_min );
+ LvArray::tensorOps::copy< 3 >( maxTensor.data, m_max );
+
+ partitioner.initializeDomain( minTensor, maxTensor );
}
// Make sure that the node manager fields are initialized
@@ -603,7 +623,7 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
{
m_numElemsTotal[dim] += m_nElems[dim][block];
}
- array1d< int > const & parts = partition.getPartitions();
+ array1d< int > const & parts = partitioner.getPartitionCounts();
GEOS_ERROR_IF( parts[dim] > m_numElemsTotal[dim], "Number of partitions in a direction should not exceed the number of elements in that direction" );
elemCenterCoords[dim].resize( m_numElemsTotal[dim] );
@@ -628,7 +648,7 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
// lastElemIndexInPartition[i] = -2;
for( int k = 0; k < m_numElemsTotal[dim]; ++k )
{
- if( partition.isCoordInPartition( elemCenterCoords[dim][k], dim ) )
+ if( partitioner.isCoordInPartition( elemCenterCoords[dim][k], dim ) )
{
firstElemIndexInPartition[dim] = k;
break;
@@ -639,7 +659,7 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
{
for( int k = firstElemIndexInPartition[dim]; k < m_numElemsTotal[dim]; ++k )
{
- if( partition.isCoordInPartition( elemCenterCoords[dim][k], dim ) )
+ if( partitioner.isCoordInPartition( elemCenterCoords[dim][k], dim ) )
{
lastElemIndexInPartition[dim] = k;
}
@@ -732,7 +752,8 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
{
numNodesInDir[dim] = lastElemIndexInPartition[dim] - firstElemIndexInPartition[dim] + 2;
}
- reduceNumNodesForPeriodicBoundary( partition, numNodesInDir );
+
+ reduceNumNodesForPeriodicBoundary( partitioner, numNodesInDir );
numNodes = numNodesInDir[0] * numNodesInDir[1] * numNodesInDir[2];
cellBlockManager.setNumNodes( numNodes );
@@ -759,7 +780,7 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
getNodePosition( globalIJK, m_trianglePattern, X[localNodeIndex] );
// Alter global node map for radial mesh
- setNodeGlobalIndicesOnPeriodicBoundary( partition, globalIJK );
+ setNodeGlobalIndicesOnPeriodicBoundary( partitioner, globalIJK );
nodeLocalToGlobal[localNodeIndex] = nodeGlobalIndex( globalIJK );
@@ -822,7 +843,8 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa
// Reset the number of nodes in each dimension in case of periodic BCs so the element firstNodeIndex
// calculation is correct? Not actually needed in parallel since we still have ghost nodes in that case and
// the count has not been altered due to periodicity.
- if( std::any_of( partition.m_Periodic.begin(), partition.m_Periodic.end(), []( int & dimPeriodic ) { return dimPeriodic == 1; } ) )
+ array1d< int > const periodic = partitioner.getPeriodicity();
+ if( std::any_of( periodic.begin(), periodic.end(), []( int & dimPeriodic ) { return dimPeriodic == 1; } ) )
{
for( int i = 0; i < m_dim; ++i )
{
diff --git a/src/coreComponents/mesh/generators/InternalMeshGenerator.hpp b/src/coreComponents/mesh/generators/InternalMeshGenerator.hpp
index bf7d893160f..c4eca2f6247 100644
--- a/src/coreComponents/mesh/generators/InternalMeshGenerator.hpp
+++ b/src/coreComponents/mesh/generators/InternalMeshGenerator.hpp
@@ -23,7 +23,7 @@
#include "common/format/EnumStrings.hpp"
#include "mesh/generators/MeshGeneratorBase.hpp"
#include "mesh/generators/CellBlockManager.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
+#include "mesh/mpiCommunications/CartesianPartitioner.hpp"
namespace geos
{
@@ -76,28 +76,28 @@ class InternalMeshGenerator : public MeshGeneratorBase
/**
* @brief Reduce the number of nodes in a block coordinate direction for
- * @param partition The partitioning object
+ * @param partitioner The partitioning object
* @param numNodes The number of nodes in each coordinate direction.
*/
- virtual void reduceNumNodesForPeriodicBoundary( SpatialPartition & partition,
+ virtual void reduceNumNodesForPeriodicBoundary( CartesianPartitioner & partitioner,
integer (& numNodes) [3] )
{
- GEOS_UNUSED_VAR( partition, numNodes );
+ GEOS_UNUSED_VAR( partitioner, numNodes );
};
/**
* @brief Alter the directional indices for when the ending index should be
* set to the beginning of the index as is the case with periodic
* boundaries.
- * @param partition The partitioning object
+ * @param partitioner The partitioning object
* @param index The indices to be evaluated for periodic indexing.
* merging.
*/
virtual void
- setNodeGlobalIndicesOnPeriodicBoundary( SpatialPartition & partition,
+ setNodeGlobalIndicesOnPeriodicBoundary( CartesianPartitioner & partitioner,
int (& index)[3] )
{
- GEOS_UNUSED_VAR( partition, index );
+ GEOS_UNUSED_VAR( partitioner, index );
}
/**
@@ -165,6 +165,19 @@ class InternalMeshGenerator : public MeshGeneratorBase
void postInputInitialization() override;
+ /**
+ * @brief Allow derived classes to declare periodic boundaries before partition initialization.
+ * @param partitioner The Cartesian partitioner to configure
+ * @note This is called BEFORE initializeDomain() to ensure periodicity is set correctly
+ * for neighbor computation and ghost node identification.
+ */
+ virtual void declarePeriodicBoundaries( CartesianPartitioner & partitioner )
+ {
+ GEOS_UNUSED_VAR( partitioner );
+ // Default implementation: no periodic boundaries
+ // Derived classes (e.g., InternalWellboreGenerator) override this as needed
+ }
+
/// Mesh number of dimension
int m_dim;
@@ -264,7 +277,9 @@ class InternalMeshGenerator : public MeshGeneratorBase
- virtual void fillCellBlockManager( CellBlockManager & cellBlockManager, SpatialPartition & partition ) override;
+ virtual void fillCellBlockManager( CellBlockManager & cellBlockManager, DomainPartitioner & partitioner ) override;
+
+private:
/**
* @brief Convert ndim node spatialized index to node global index.
diff --git a/src/coreComponents/mesh/generators/InternalWellboreGenerator.cpp b/src/coreComponents/mesh/generators/InternalWellboreGenerator.cpp
index c48385b9de7..f5cbd21e469 100644
--- a/src/coreComponents/mesh/generators/InternalWellboreGenerator.cpp
+++ b/src/coreComponents/mesh/generators/InternalWellboreGenerator.cpp
@@ -19,7 +19,6 @@
#include "InternalWellboreGenerator.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
namespace geos
{
@@ -112,6 +111,22 @@ InternalWellboreGenerator::InternalWellboreGenerator( string const & name,
}
+void InternalWellboreGenerator::declarePeriodicBoundaries( CartesianPartitioner & partitioner )
+{
+ if( m_isFullAnnulus )
+ {
+ // For full 360-degree wellbores, theta direction is periodic
+ // Only set periodicity for multi-partition case
+ // Single partition case is handled by reducing node count in reduceNumNodesForPeriodicBoundary
+ if( partitioner.getPartitionCounts()[1] > 1 )
+ {
+ partitioner.setPeriodicity( 1, 1 );
+
+ GEOS_LOG_RANK_0( "InternalWellboreGenerator: Theta direction set to periodic (multi-partition)" );
+ }
+ }
+}
+
void InternalWellboreGenerator::postInputInitialization()
{
@@ -302,29 +317,24 @@ void InternalWellboreGenerator::postInputInitialization()
InternalMeshGenerator::postInputInitialization();
}
-void InternalWellboreGenerator::reduceNumNodesForPeriodicBoundary( SpatialPartition & partition,
+void InternalWellboreGenerator::reduceNumNodesForPeriodicBoundary( CartesianPartitioner & partitioner,
integer ( & numNodesInDir )[3] )
{
if( m_isFullAnnulus )
{
- if( partition.getPartitions()[1] == 1 )
+ if( partitioner.getPartitionCounts()[1] == 1 )
{
numNodesInDir[1] -= 1;
}
- else if( partition.getPartitions()[1] > 1 )
- {
- partition.m_Periodic[1] = 1;
- }
}
-
}
void InternalWellboreGenerator::
- setNodeGlobalIndicesOnPeriodicBoundary( SpatialPartition & partition,
+ setNodeGlobalIndicesOnPeriodicBoundary( CartesianPartitioner & partitioner,
int ( & globalIJK )[3] )
{
- GEOS_UNUSED_VAR( partition );
+ GEOS_UNUSED_VAR( partitioner );
if( m_isFullAnnulus )
{
if( globalIJK[1] == m_nElems[1].back() + 1 )
diff --git a/src/coreComponents/mesh/generators/InternalWellboreGenerator.hpp b/src/coreComponents/mesh/generators/InternalWellboreGenerator.hpp
index af745d02f7d..4239ff8476e 100644
--- a/src/coreComponents/mesh/generators/InternalWellboreGenerator.hpp
+++ b/src/coreComponents/mesh/generators/InternalWellboreGenerator.hpp
@@ -52,10 +52,10 @@ class InternalWellboreGenerator : public InternalMeshGenerator
protected:
- void reduceNumNodesForPeriodicBoundary( SpatialPartition & partition,
+ void reduceNumNodesForPeriodicBoundary( CartesianPartitioner & partitioner,
integer ( &numNodes )[3] ) override final;
- void setNodeGlobalIndicesOnPeriodicBoundary( SpatialPartition & partition,
+ void setNodeGlobalIndicesOnPeriodicBoundary( CartesianPartitioner & partitioner,
int ( & index )[3] ) override final;
void setConnectivityForPeriodicBoundaries( int ( & globalIJK )[3],
@@ -90,6 +90,7 @@ class InternalWellboreGenerator : public InternalMeshGenerator
/// @endcond
void postInputInitialization() override final;
+ void declarePeriodicBoundaries( CartesianPartitioner & partitioner ) override;
private:
diff --git a/src/coreComponents/mesh/generators/MeshGeneratorBase.cpp b/src/coreComponents/mesh/generators/MeshGeneratorBase.cpp
index 97dcfad0e97..3ce38c35ba4 100644
--- a/src/coreComponents/mesh/generators/MeshGeneratorBase.cpp
+++ b/src/coreComponents/mesh/generators/MeshGeneratorBase.cpp
@@ -51,7 +51,7 @@ MeshGeneratorBase::CatalogInterface::CatalogType & MeshGeneratorBase::getCatalog
return catalog;
}
-void MeshGeneratorBase::generateMesh( Group & parent, SpatialPartition & partition )
+void MeshGeneratorBase::generateMesh( Group & parent, DomainPartitioner & partitioner )
{
MeshBody & meshBody = dynamic_cast< MeshBody & >( parent );
if( meshBody.hasParticles() )
@@ -61,13 +61,13 @@ void MeshGeneratorBase::generateMesh( Group & parent, SpatialPartition & partiti
MeshLevel & meshLevel0 = meshBody.getBaseDiscretization();
ParticleManager & particleManager = meshLevel0.getParticleManager();
- fillParticleBlockManager( particleBlockManager, particleManager, partition );
+ fillParticleBlockManager( particleBlockManager, particleManager, partitioner );
}
else
{
CellBlockManager & cellBlockManager = parent.registerGroup< CellBlockManager >( keys::cellManager );
- fillCellBlockManager( cellBlockManager, partition );
+ fillCellBlockManager( cellBlockManager, partitioner );
this->attachWellInfo( cellBlockManager );
}
diff --git a/src/coreComponents/mesh/generators/MeshGeneratorBase.hpp b/src/coreComponents/mesh/generators/MeshGeneratorBase.hpp
index 03ae359a7ac..c379e9cfb1f 100644
--- a/src/coreComponents/mesh/generators/MeshGeneratorBase.hpp
+++ b/src/coreComponents/mesh/generators/MeshGeneratorBase.hpp
@@ -20,8 +20,10 @@
#ifndef GEOS_MESH_GENERATORS_MESHGENERATORBASE_HPP
#define GEOS_MESH_GENERATORS_MESHGENERATORBASE_HPP
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
-
+#include "mesh/mpiCommunications/DomainPartitioner.hpp"
+#include "mesh/generators/ParticleBlockManager.hpp"
+#include "mesh/ParticleManager.hpp"
+#include "mesh/MeshBody.hpp"
#include "dataRepository/Group.hpp"
#include "dataRepository/WrapperBase.hpp"
#include "codingUtilities/Utilities.hpp"
@@ -77,9 +79,9 @@ class MeshGeneratorBase : public dataRepository::Group
/**
* @brief Generate the mesh object the input mesh object.
* @param parent The parent group of the CellBlockManager.
- * @param[in] partition The reference to spatial partition
+ * @param[in] partitioner The reference to partitioner
*/
- void generateMesh( Group & parent, SpatialPartition & partition );
+ void generateMesh( Group & parent, DomainPartitioner & partitioner );
/**
* @brief Describe which kind of block must be considered.
@@ -136,12 +138,13 @@ class MeshGeneratorBase : public dataRepository::Group
/**
* @brief Fill the cellBlockManager object .
* @param[inout] cellBlockManager the CellBlockManager that will receive the meshing information
- * @param[in] partition The reference to spatial partition
+ * @param[in] partitioner The reference to spatial partition
*/
- virtual void fillCellBlockManager( CellBlockManager & cellBlockManager, SpatialPartition & partition )
+
+ virtual void fillCellBlockManager( CellBlockManager & cellBlockManager, DomainPartitioner & partitioner )
{
GEOS_UNUSED_VAR( cellBlockManager );
- GEOS_UNUSED_VAR( partition );
+ GEOS_UNUSED_VAR( partitioner );
GEOS_ERROR( "Cell mesh generation not implemented for generator of this type" );
}
@@ -151,13 +154,14 @@ class MeshGeneratorBase : public dataRepository::Group
* @brief Fill the particleBlockManager object .
* @param[inout] particleBlockManager the particleBlockManager that will receive the meshing information
* @param[in] particleManager The reference to the particle manager
- * @param[in] partition The reference to spatial partition
+ * @param[in] partitioner The reference to spatial partition
*/
- virtual void fillParticleBlockManager( ParticleBlockManager & particleBlockManager, ParticleManager & particleManager, SpatialPartition const & partition )
+ virtual void fillParticleBlockManager( ParticleBlockManager & particleBlockManager,
+ ParticleManager & particleManager, DomainPartitioner & partitioner )
{
GEOS_UNUSED_VAR( particleBlockManager );
GEOS_UNUSED_VAR( particleManager );
- GEOS_UNUSED_VAR( partition );
+ GEOS_UNUSED_VAR( partitioner );
GEOS_ERROR( "Particle mesh generation not implemented for generator of this type" );
}
diff --git a/src/coreComponents/mesh/generators/PTScotchInterface.cpp b/src/coreComponents/mesh/generators/PTScotchInterface.cpp
deleted file mode 100644
index 2737c42db70..00000000000
--- a/src/coreComponents/mesh/generators/PTScotchInterface.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-/**
- * @file PTScotchInterface.cpp
- */
-
-#include "PTScotchInterface.hpp"
-
-#include
-
-#include
-
-static_assert( std::is_same< int64_t, SCOTCH_Num >::value,
- "Non-matching index types. Scotch must be configured with 64-bit indices." );
-
-#define GEOS_SCOTCH_CHECK( call ) \
- do { \
- auto const ierr = call; \
- GEOS_ERROR_IF_NE_MSG( ierr, 0, "Error in call to:\n" << #call ); \
- } while( false )
-
-namespace geos
-{
-namespace ptscotch
-{
-
-array1d< int64_t >
-partition( ArrayOfArraysView< int64_t const, int64_t > const & graph,
- int64_t const numParts,
- MPI_Comm comm )
-{
- SCOTCH_Num const numVerts = graph.size();
-
- array1d< int64_t > part( numVerts ); // all 0 by default
- if( numParts == 1 )
- {
- return part;
- }
-
- SCOTCH_Dgraph * const gr = SCOTCH_dgraphAlloc();
- GEOS_SCOTCH_CHECK( SCOTCH_dgraphInit( gr, comm ) );
-
- SCOTCH_Num const numEdges = graph.getOffsets()[numVerts];
-
- // Technical UB if Scotch writes into these arrays; in practice we discard them right after
- SCOTCH_Num * const offsets = const_cast< SCOTCH_Num * >( graph.getOffsets() );
- SCOTCH_Num * const edges = const_cast< SCOTCH_Num * >( graph.getValues() );
-
- GEOS_SCOTCH_CHECK( SCOTCH_dgraphBuild( gr, // graphptr
- 0, // baseval
- numVerts, // vertlocnbr
- numVerts, // vertlocmax
- offsets, // vertloctab
- offsets + 1, // vendloctab
- nullptr, // veloloctab
- nullptr, // vlblloctab
- numEdges, // edgelocnbr
- numEdges, // edgelocsiz
- edges, // edgeloctab
- nullptr, // edgegsttab
- nullptr // edloloctab,
- ) );
-
- // TODO: maybe remove?
- GEOS_SCOTCH_CHECK( SCOTCH_dgraphCheck( gr ) );
-
- SCOTCH_Strat * const strategy = SCOTCH_stratAlloc();
- GEOS_SCOTCH_CHECK( SCOTCH_stratInit( strategy ) );
-
- GEOS_SCOTCH_CHECK( SCOTCH_dgraphPart( gr, numParts, strategy, part.data() ) );
-
- SCOTCH_stratExit( strategy );
- SCOTCH_dgraphExit( gr );
-
- return part;
-}
-
-} // namespace ptscotch
-} // namespace geos
diff --git a/src/coreComponents/mesh/generators/PTScotchInterface.hpp b/src/coreComponents/mesh/generators/PTScotchInterface.hpp
deleted file mode 100644
index e188aadd55b..00000000000
--- a/src/coreComponents/mesh/generators/PTScotchInterface.hpp
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-/**
- * @file PTScotchInterface.hpp
- */
-
-#ifndef GEOS_MESH_GENERATORS_PTSCOTCHINTERFACE_HPP_
-#define GEOS_MESH_GENERATORS_PTSCOTCHINTERFACE_HPP_
-
-#include "common/DataTypes.hpp"
-
-#include "common/MpiWrapper.hpp"
-
-namespace geos
-{
-namespace ptscotch
-{
-
-/**
- * @brief Partition a mesh according to its dual graph.
- * @param graph the input graph (edges of locally owned nodes)
- * @param numParts target number of partitions
- * @param comm the MPI communicator of processes to partition over
- * @return an array of target partitions for each element in local mesh
- */
-array1d< int64_t >
-partition( ArrayOfArraysView< int64_t const, int64_t > const & graph,
- int64_t const numParts,
- MPI_Comm comm );
-
-} // namespace ptscotch
-} // namespace geos
-
-#endif //GEOS_MESH_GENERATORS_PTSCOTCHINTERFACE_HPP_
diff --git a/src/coreComponents/mesh/generators/ParMETISInterface.cpp b/src/coreComponents/mesh/generators/ParMETISInterface.cpp
deleted file mode 100644
index 4567826b341..00000000000
--- a/src/coreComponents/mesh/generators/ParMETISInterface.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-/**
- * @file ParMETISInterface.cpp
- */
-
-#include "ParMETISInterface.hpp"
-
-#include "common/GEOS_RAJA_Interface.hpp"
-
-#include
-
-#include
-
-#define GEOS_PARMETIS_CHECK( call ) \
- do { \
- auto const ierr = call; \
- GEOS_ERROR_IF_NE_MSG( ierr, METIS_OK, "Error in call to:\n" << #call ); \
- } while( false )
-
-namespace geos
-{
-namespace parmetis
-{
-
-static_assert( std::is_same< idx_t, pmet_idx_t >::value, "Non-matching index types. ParMETIS must be built with 64-bit indices." );
-
-ArrayOfArrays< idx_t, idx_t >
-meshToDual( ArrayOfArraysView< idx_t const, idx_t > const & elemToNodes,
- arrayView1d< idx_t const > const & elemDist,
- MPI_Comm comm,
- int const minCommonNodes )
-{
- idx_t const numElems = elemToNodes.size();
-
- // `parmetis` awaits the arrays to be allocated as two continuous arrays: one for values, the other for offsets.
- // Our `ArrayOfArrays` allows to reserve some extra space for further element insertion,
- // but this is not compatible with what `parmetis` requires.
- GEOS_ASSERT_EQ_MSG( std::accumulate( elemToNodes.getSizes(), elemToNodes.getSizes() + numElems, 0 ),
- elemToNodes.valueCapacity(),
- "Internal error. The element to nodes mapping must be strictly allocated for compatibility with a third party library." );
-
- idx_t numflag = 0;
- idx_t ncommonnodes = minCommonNodes;
- idx_t * xadj;
- idx_t * adjncy;
-
- // Technical UB if ParMETIS writes into these arrays; in practice we discard them right after
- GEOS_PARMETIS_CHECK( ParMETIS_V3_Mesh2Dual( const_cast< idx_t * >( elemDist.data() ),
- const_cast< idx_t * >( elemToNodes.getOffsets() ),
- const_cast< idx_t * >( elemToNodes.getValues() ),
- &numflag, &ncommonnodes, &xadj, &adjncy, &comm ) );
-
- ArrayOfArrays< idx_t, idx_t > graph;
- graph.resizeFromOffsets( numElems, xadj );
-
- // There is no way to direct-copy values into ArrayOfArrays without UB (casting away const)
- forAll< parallelHostPolicy >( numElems, [xadj, adjncy, graph = graph.toView()]( localIndex const k )
- {
- graph.appendToArray( k, adjncy + xadj[k], adjncy + xadj[k+1] );
- } );
-
- METIS_Free( xadj );
- METIS_Free( adjncy );
-
- return graph;
-}
-
-array1d< idx_t >
-partition( ArrayOfArraysView< idx_t const, idx_t > const & graph,
- arrayView1d< idx_t const > const & vertDist,
- idx_t const numParts,
- MPI_Comm comm,
- int const numRefinements )
-{
- array1d< idx_t > part( graph.size() ); // all 0 by default
- if( numParts == 1 )
- {
- return part;
- }
-
- // Compute tpwgts parameters (target partition weights)
- array1d< real_t > tpwgts( numParts );
- tpwgts.setValues< serialPolicy >( 1.0f / static_cast< real_t >( numParts ) );
-
- // Set other ParMETIS parameters
- idx_t wgtflag = 0;
- idx_t numflag = 0;
- idx_t ncon = 1;
- idx_t npart = numParts;
- idx_t options[4] = { 1, 0, 2022, PARMETIS_PSR_UNCOUPLED };
- idx_t edgecut = 0;
- real_t ubvec = 1.05;
-
- // Technical UB if ParMETIS writes into these arrays; in practice we discard them right after
- GEOS_PARMETIS_CHECK( ParMETIS_V3_PartKway( const_cast< idx_t * >( vertDist.data() ),
- const_cast< idx_t * >( graph.getOffsets() ),
- const_cast< idx_t * >( graph.getValues() ),
- nullptr, nullptr, &wgtflag,
- &numflag, &ncon, &npart, tpwgts.data(),
- &ubvec, options, &edgecut, part.data(), &comm ) );
-
- for( int iter = 0; iter < numRefinements; ++iter )
- {
- GEOS_PARMETIS_CHECK( ParMETIS_V3_RefineKway( const_cast< idx_t * >( vertDist.data() ),
- const_cast< idx_t * >( graph.getOffsets() ),
- const_cast< idx_t * >( graph.getValues() ),
- nullptr, nullptr, &wgtflag,
- &numflag, &ncon, &npart, tpwgts.data(),
- &ubvec, options, &edgecut, part.data(), &comm ) );
- }
-
- return part;
-}
-
-} // namespace parmetis
-} // namespace geos
diff --git a/src/coreComponents/mesh/generators/ParMETISInterface.hpp b/src/coreComponents/mesh/generators/ParMETISInterface.hpp
deleted file mode 100644
index fa1bb2c8545..00000000000
--- a/src/coreComponents/mesh/generators/ParMETISInterface.hpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-/**
- * @file ParMETISInterface.hpp
- */
-
-#ifndef GEOS_MESH_GENERATORS_PARMETISINTERFACE_HPP_
-#define GEOS_MESH_GENERATORS_PARMETISINTERFACE_HPP_
-
-#include "common/DataTypes.hpp"
-#include "common/MpiWrapper.hpp"
-
-namespace geos
-{
-
-#if defined(GEOS_USE_HIP) // still need int32 hypre for the current hip-capable build
-/// Typedef to allow us to specify required parmetis integer type.
-using pmet_idx_t = int32_t;
-#else
-/// Typedef to allow us to specify required parmetis integer type.
-using pmet_idx_t = int64_t;
-#endif
-
-namespace parmetis
-{
-
-/**
- * @brief Convert a element-node mesh map into a dual (element-element) graph
- * @param elemToNodes the input mesh represented by its elem-node map
- * @param elemDist the parallel distribution of elements: element index offset on each rank
- * @param comm the MPI communicator of processes to partition over
- * @param minCommonNodes minimum number of shared nodes to create an graph edge
- * @return a graph with an edge for every pair of elements that share at least @p minCommonNodes nodes;
- * target element indices are global with respect to offsets in @p elemDist.
- * @note elemDist must be a comm-wide exclusive scan of elemToNodes.size();
- * the user may compute it once and reuse in a subsequent call to partition().
- */
-ArrayOfArrays< pmet_idx_t, pmet_idx_t >
-meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
- arrayView1d< pmet_idx_t const > const & elemDist,
- MPI_Comm comm,
- int const minCommonNodes );
-
-/**
- * @brief Partition a mesh according to its dual graph.
- * @param graph the input graph (edges of locally owned nodes)
- * @param vertDist the parallel distribution of vertices: vertex index offset on each rank
- * @param numParts target number of partitions
- * @param comm the MPI communicator of processes to partition over
- * @param numRefinements number of partition refinement iterations
- * @return an array of target partitions for each element in local mesh
- */
-array1d< pmet_idx_t >
-partition( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
- arrayView1d< pmet_idx_t const > const & vertDist,
- pmet_idx_t const numParts,
- MPI_Comm comm,
- int const numRefinements );
-
-} // namespace parmetis
-} // namespace geos
-
-#endif //GEOS_MESH_GENERATORS_PARMETISINTERFACE_HPP_
diff --git a/src/coreComponents/mesh/generators/ParticleMeshGenerator.cpp b/src/coreComponents/mesh/generators/ParticleMeshGenerator.cpp
index 334287fca69..3e5299b6230 100644
--- a/src/coreComponents/mesh/generators/ParticleMeshGenerator.cpp
+++ b/src/coreComponents/mesh/generators/ParticleMeshGenerator.cpp
@@ -20,7 +20,7 @@
#include "ParticleMeshGenerator.hpp"
#include "ParticleBlockManager.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
+#include "mesh/mpiCommunications/ParticleCartesianPartitioner.hpp"
#include "common/DataTypes.hpp"
#include "common/TimingMacros.hpp"
@@ -65,10 +65,13 @@ Group * ParticleMeshGenerator::createChild( string const & GEOS_UNUSED_PARAM( ch
}
-void ParticleMeshGenerator::fillParticleBlockManager( ParticleBlockManager & particleBlockManager, ParticleManager & particleManager, SpatialPartition const & partition )
+void ParticleMeshGenerator::fillParticleBlockManager( ParticleBlockManager & particleBlockManager,
+ ParticleManager & particleManager, DomainPartitioner & domainPartitioner )
{
GEOS_MARK_FUNCTION;
+ ParticleCartesianPartitioner & partitioner = dynamic_cast< ParticleCartesianPartitioner & >(domainPartitioner);
+
// This should probably handled elsewhere:
int aa = 0;
for( auto & particleBlockName : m_blockNames )
@@ -146,7 +149,7 @@ void ParticleMeshGenerator::fillParticleBlockManager( ParticleBlockManager & par
if( 1<=column && column<4 ) // 0th column is global ID. Columns 1, 2 and 3 are the particle position components - check for
// partition membership
{ // TODO: This is super obfuscated and hard to read, make it better
- inPartition = inPartition && partition.isCoordInPartition( value, column-1 );
+ inPartition = inPartition && partitioner.isCoordInPartition( value, column-1 );
if( !inPartition ) // if the current particle is outside this partition, we can ignore the rest of its data and go to the next
// line
{
diff --git a/src/coreComponents/mesh/generators/ParticleMeshGenerator.hpp b/src/coreComponents/mesh/generators/ParticleMeshGenerator.hpp
index 91bb1e2208e..f043681cd5f 100644
--- a/src/coreComponents/mesh/generators/ParticleMeshGenerator.hpp
+++ b/src/coreComponents/mesh/generators/ParticleMeshGenerator.hpp
@@ -28,7 +28,7 @@ namespace geos
{
class ParticleManager;
-class SpatialPartition;
+class PartitionerBase;
/**
* @class ParticleMeshGenerator
@@ -61,7 +61,8 @@ class ParticleMeshGenerator : public MeshGeneratorBase
*/
virtual Group * createChild( string const & childKey, string const & childName ) override;
- virtual void fillParticleBlockManager( ParticleBlockManager & particleBlockManager, ParticleManager & particleManager, SpatialPartition const & partition ) override;
+ virtual void fillParticleBlockManager( ParticleBlockManager & particleBlockManager,
+ ParticleManager & particleManager, DomainPartitioner & partitioner ) override;
void importFieldOnArray( Block block,
string const & blockName,
diff --git a/src/coreComponents/mesh/generators/PartitionDescriptor.hpp b/src/coreComponents/mesh/generators/PartitionDescriptor.hpp
deleted file mode 100644
index 7b8b17267d4..00000000000
--- a/src/coreComponents/mesh/generators/PartitionDescriptor.hpp
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-/**
- * @file PartitionDescriptor.hpp
- */
-
-#ifndef GEOS_MESH_PARTITIONDESCRIPTOR_H_
-#define GEOS_MESH_PARTITIONDESCRIPTOR_H_
-
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
-
-#include
-
-
-namespace geos
-{
-
-/**
- * @class PartitionDescriptor
- * @brief Simple utility to retrieve partition information in case of Metis or Spatial partition.
- */
-class PartitionDescriptor
-{
-public:
- /**
- * @brief indicate if the partition is described using a Metis Neighbor list.
- * @return A boolean indicating if the partition is described usins a Metis neighbor list.
- */
- bool hasMetisNeighborList() const { return m_hasMetisNeighborList; }
-
- /**
- * @brief Sets the boolean that indicates if the partition is described using a Metis Neighbor list.
- * @param hasMetisNeighborList A boolean indicating if the partition is described usins a Metis neighbor list.
- */
- void setHasMetisNeighborList( bool hasMetisNeighborList ) { m_hasMetisNeighborList = hasMetisNeighborList; }
-
- /**
- * @brief Gets a reference to the list of metis neighbor list.
- * @return A reference to the Metis neighbor list.
- */
- std::set< int > const & getMetisNeighborList() const { return m_metisNeighborList; }
-
- /**
- * @brief Sets the list of metis neighbor list.
- * @param metisNeighborList A reference to the Metis neighbor list.
- */
- void setMetisNeighborList( stdVector< int > const & metisNeighborList )
- {
- m_metisNeighborList.clear();
- m_metisNeighborList.insert( metisNeighborList.cbegin(), metisNeighborList.cend() );
- }
-
- /**
- * @brief indicate if the partition is described using a spatial partition.
- * @return A boolean indicating if the parition is described using a spatial partition.
- */
- bool hasSpatialPartition() const { return !m_hasMetisNeighborList; }
-
- /**
- * @brief Sets the boolean that indicates if the partition is described using a Metis Neighbor list.
- * @param hasSpatialPartition a boolean indicating if the parition is described using a spatial partition.
- */
- void setHasSpatialPartition( bool hasSpatialPartition ) { m_hasMetisNeighborList = !hasSpatialPartition; }
-
- /**
- * @brief Returns a reference to the spatialPartition
- * @return The spatial partiton.
- */
- SpatialPartition const & getSpatialPartition() const { return m_spatialPartition; }
-
- /**
- * @brief Sets the spatialPartition
- * @param spatialPartition The spatial partiton.
- */
- void setSpatialPartition( SpatialPartition const & spatialPartition ) { m_spatialPartition = spatialPartition; }
-
-private:
-
- bool m_hasMetisNeighborList; //< Indicate if we use metis neighbor list or spatial partition to describe the partition
- std::set< int > m_metisNeighborList; //< The list of neighbors computed wwith metis
- SpatialPartition m_spatialPartition; //< The spatial partition
-};
-
-}
-#endif /* GEOS_MESH_PARTITIONDESCRIPTOR_H_ */
diff --git a/src/coreComponents/mesh/generators/VTKMeshGenerator.cpp b/src/coreComponents/mesh/generators/VTKMeshGenerator.cpp
index e6023216c29..2a60876e743 100644
--- a/src/coreComponents/mesh/generators/VTKMeshGenerator.cpp
+++ b/src/coreComponents/mesh/generators/VTKMeshGenerator.cpp
@@ -25,7 +25,7 @@
#include "mesh/generators/VTKFaceBlockUtilities.hpp"
#include "mesh/generators/VTKMeshGeneratorTools.hpp"
#include "mesh/generators/CellBlockManager.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
+#include "mesh/mpiCommunications/MeshPartitioner.hpp"
#include "mesh/generators/Region.hpp"
#include
@@ -71,13 +71,6 @@ VTKMeshGenerator::VTKMeshGenerator( string const & name,
setInputFlag( InputFlags::OPTIONAL ).
setDescription( "For multi-block files, names of the face mesh block." );
- registerWrapper( viewKeyStruct::partitionRefinementString(), &m_partitionRefinement ).
- setInputFlag( InputFlags::OPTIONAL ).
- setApplyDefaultValue( 1 ).
- setDescription( "Number of partitioning refinement iterations (defaults to 1, recommended value)."
- "A value of 0 disables graph partitioning and keeps simple kd-tree partitions (not recommended). "
- "Values higher than 1 may lead to slightly improved partitioning, but yield diminishing returns." );
-
registerWrapper( viewKeyStruct::partitionMethodString(), &m_partitionMethod ).
setInputFlag( InputFlags::OPTIONAL ).
setDescription( "Method (library) used to partition the mesh" );
@@ -121,7 +114,8 @@ void VTKMeshGenerator::postInputInitialization()
}
-void VTKMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockManager, SpatialPartition & partition )
+void VTKMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockManager,
+ DomainPartitioner & partitioner )
{
// TODO refactor void MeshGeneratorBase::generateMesh( DomainPartition & domain )
GEOS_MARK_FUNCTION;
@@ -130,91 +124,39 @@ void VTKMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockManager
vtkSmartPointer< vtkMultiProcessController > controller = vtk::getController();
vtkMultiProcessController::SetGlobalController( controller );
- int const numPartZ = m_structuredIndexAttributeName.empty() ? 1 : partition.getPartitions()[2];
-
GEOS_LOG_LEVEL_RANK_0( logInfo::VTKSteps, " redistributing mesh..." );
{
vtk::AllMeshes allMeshes;
GEOS_LOG_LEVEL_RANK_0( logInfo::VTKSteps, GEOS_FMT( "{} '{}': reading the dataset...", catalogName(), getName() ) );
- if( !m_filePath.empty())
+ if( !m_filePath.empty() )
{
GEOS_LOG_RANK_0( GEOS_FMT( "{} '{}': reading mesh from {}", catalogName(), getName(), m_filePath ) );
allMeshes = vtk::loadAllMeshes( m_filePath, m_mainBlockName, m_faceBlockNames );
}
- else if( !m_dataSourceName.empty())
+ else if( !m_dataSourceName.empty() )
{
- if( MpiWrapper::commRank() == 0 )
- {
- stdVector< vtkSmartPointer< vtkPartitionedDataSet > > partitions;
- vtkNew< vtkAppendFilter > appender;
- appender->MergePointsOn();
- for( auto & [key, value] : this->getSubGroups())
- {
- Region const & region = this->getGroup< Region >( key );
-
- string path = region.getWrapper< string >( Region::viewKeyStruct::pathInRepositoryString()).reference();
- integer region_id = region.getWrapper< integer >( Region::viewKeyStruct::idString()).reference();
-
- GEOS_LOG_RANK_0( GEOS_FMT( "{} '{}': reading partition from {}", catalogName(), getName(), path ) );
- vtkPartitionedDataSet * p = m_dataSource->search( path );
-
- //load the grid
- vtkDataObject * block = p->GetPartition( 0 );
- if( block->IsA( "vtkDataSet" ) )
- {
- vtkSmartPointer< vtkDataSet > dataset = vtkDataSet::SafeDownCast( block );
-
- vtkIntArray * arr = vtkIntArray::New();
- arr->SetName( m_regionAttributeName.c_str());
- arr->SetNumberOfComponents( 1 );
- arr->SetNumberOfTuples( dataset->GetNumberOfCells());
-
- arr->FillValue( region_id );
-
- dataset->GetCellData()->AddArray( arr );
- appender->AddInputDataObject( dataset );
- }
- }
- appender->Update();
- vtkUnstructuredGrid * result = vtkUnstructuredGrid::SafeDownCast( appender->GetOutputDataObject( 0 ) );
- allMeshes.setMainMesh( result );
-
- //DEBUG code
- vtkNew< vtkXMLUnstructuredGridWriter > writer;
- writer->SetFileName( "tmp_output.vtu" );
- writer->SetInputData( result );
- writer->Write();
- }
- else
- {
- vtkUnstructuredGrid * result = vtkUnstructuredGrid::New();
- allMeshes.setMainMesh( result );
- }
+ loadFromDataSource( allMeshes );
}
GEOS_LOG_LEVEL_RANK_0( logInfo::VTKSteps,
GEOS_FMT( "{} '{}': redistributing mesh...", catalogName(), getName() ) );
- vtk::AllMeshes redistributedMeshes = vtk::redistributeMeshes( getLogLevel(),
- allMeshes.getMainMesh(),
- allMeshes.getFaceBlocks(),
- comm,
- m_partitionMethod,
- m_partitionRefinement,
- m_useGlobalIds,
- m_structuredIndexAttributeName,
- numPartZ );
+
+ vtk::AllMeshes redistributedMeshes = redistributeMeshes(
+ getLogLevel(),
+ allMeshes.getMainMesh(),
+ allMeshes.getFaceBlocks(),
+ comm,
+ partitioner,
+ m_useGlobalIds
+ );
+
m_vtkMesh = redistributedMeshes.getMainMesh();
m_faceBlockMeshes = redistributedMeshes.getFaceBlocks();
- GEOS_LOG_LEVEL_RANK_0( logInfo::VTKSteps, GEOS_FMT( "{} '{}': finding neighbor ranks...", catalogName(), getName() ) );
- stdVector< vtkBoundingBox > boxes = vtk::exchangeBoundingBoxes( *m_vtkMesh, MPI_COMM_GEOS );
- stdVector< int > const neighbors = vtk::findNeighborRanks( std::move( boxes ) );
- partition.setMetisNeighborList( std::move( neighbors ) );
- GEOS_LOG_LEVEL_RANK_0( logInfo::VTKSteps, GEOS_FMT( "{} '{}': done!", catalogName(), getName() ) );
}
- GEOS_LOG_RANK_0( GEOS_FMT( "{} '{}': generating GEOS mesh data structure", catalogName(), getName() ) );
+ GEOS_LOG_RANK_0( GEOS_FMT( "{} '{}': generating GEOS mesh data structure", catalogName(), getName() ) );
GEOS_LOG_LEVEL_RANK_0( logInfo::VTKSteps, GEOS_FMT( "{} '{}': preprocessing...", catalogName(), getName() ) );
m_cellMap = vtk::buildCellMap( *m_vtkMesh, m_regionAttributeName );
@@ -244,6 +186,58 @@ void VTKMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockManager
vtk::printMeshStatistics( *m_vtkMesh, m_cellMap, MPI_COMM_GEOS );
}
+void VTKMeshGenerator::loadFromDataSource( vtk::AllMeshes & allMeshes )
+{
+ if( MpiWrapper::commRank() == 0 )
+ {
+ vtkNew< vtkAppendFilter > appender;
+ appender->MergePointsOn();
+
+ for( auto & [key, value] : this->getSubGroups() )
+ {
+ Region const & region = this->getGroup< Region >( key );
+
+ string path = region.getWrapper< string >( Region::viewKeyStruct::pathInRepositoryString() ).reference();
+ integer region_id = region.getWrapper< integer >( Region::viewKeyStruct::idString() ).reference();
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "{} '{}': reading partition from {}", catalogName(), getName(), path ) );
+ vtkPartitionedDataSet * p = m_dataSource->search( path );
+
+ // Load the grid
+ vtkDataObject * block = p->GetPartition( 0 );
+ if( block->IsA( "vtkDataSet" ) )
+ {
+ vtkSmartPointer< vtkDataSet > dataset = vtkDataSet::SafeDownCast( block );
+
+ vtkIntArray * arr = vtkIntArray::New();
+ arr->SetName( m_regionAttributeName.c_str() );
+ arr->SetNumberOfComponents( 1 );
+ arr->SetNumberOfTuples( dataset->GetNumberOfCells() );
+ arr->FillValue( region_id );
+
+ dataset->GetCellData()->AddArray( arr );
+ appender->AddInputDataObject( dataset );
+ }
+ }
+
+ appender->Update();
+ vtkUnstructuredGrid * result = vtkUnstructuredGrid::SafeDownCast( appender->GetOutputDataObject( 0 ) );
+ allMeshes.setMainMesh( result );
+
+ // DEBUG code
+ vtkNew< vtkXMLUnstructuredGridWriter > writer;
+ writer->SetFileName( "tmp_output.vtu" );
+ writer->SetInputData( result );
+ writer->Write();
+ }
+ else
+ {
+ vtkUnstructuredGrid * result = vtkUnstructuredGrid::New();
+ allMeshes.setMainMesh( result );
+ }
+}
+
+
void VTKMeshGenerator::importVolumicFieldOnArray( string const & cellBlockName,
string const & meshFieldName,
bool isMaterialField,
@@ -332,6 +326,100 @@ void VTKMeshGenerator::freeResources()
}
+vtk::AllMeshes redistributeMeshes( integer const logLevel,
+ vtkSmartPointer< vtkDataSet > loadedMesh,
+ stdMap< string, vtkSmartPointer< vtkDataSet > > & namesToFractures,
+ MPI_Comm const comm,
+ DomainPartitioner & partitioner,
+ int const useGlobalIds )
+{
+ GEOS_MARK_FUNCTION;
+
+ // ====================================================================
+ // STEP 1: Validate that we have a MeshPartitioner
+ // ====================================================================
+ MeshPartitioner * meshPartitioner = dynamic_cast< MeshPartitioner * >( &partitioner );
+
+ GEOS_ERROR_IF( meshPartitioner == nullptr,
+ "VTK mesh import requires a mesh-based partitioner (CellGraphPartitioner or LayeredMeshPartitioner). "
+ "Partitioner type '" << partitioner.getCatalogName()
+ << "' cannot partition imported VTK meshes." );
+
+ // ====================================================================
+ // STEP 2: Prepare fracture list for global ID generation
+ // ====================================================================
+ stdVector< vtkSmartPointer< vtkDataSet > > fractures;
+ for( auto & nameToFracture: namesToFractures )
+ {
+ fractures.push_back( nameToFracture.second );
+ }
+
+ // ====================================================================
+ // STEP 3: Generate global IDs for vertices and cells, if needed
+ // ====================================================================
+ vtkSmartPointer< vtkDataSet > mesh =
+ vtk::manageGlobalIds( loadedMesh, useGlobalIds, !std::empty( fractures ) );
+
+ // ====================================================================
+ // STEP 4: Verify fractures are on the last rank only
+ // ====================================================================
+ if( MpiWrapper::commRank( comm ) != ( MpiWrapper::commSize( comm ) - 1 ) )
+ {
+ for( auto const & nameToFracture: namesToFractures )
+ {
+ GEOS_UNUSED_VAR( nameToFracture );
+ GEOS_ASSERT_EQ( nameToFracture.second->GetNumberOfCells(), 0 );
+ }
+ }
+
+ // ====================================================================
+ // STEP 5: Initial KdTree redistribution if some ranks are empty
+ // ====================================================================
+ vtkIdType const minCellsOnAnyRank = MpiWrapper::min( mesh->GetNumberOfCells(), comm );
+
+ if( minCellsOnAnyRank == 0 )
+ {
+ mesh = vtk::redistributeByKdTree( *mesh );
+ }
+
+ // ====================================================================
+ // STEP 6: Ensure no rank is empty after initial redistribution
+ // ====================================================================
+ if( MpiWrapper::min( mesh->GetNumberOfCells(), comm ) == 0 )
+ {
+ GEOS_WARNING( "Empty ranks detected after kdtree - redistributing to ensure all ranks have cells" );
+ mesh = vtk::ensureNoEmptyRank( mesh, comm );
+ }
+
+ // ====================================================================
+ // STEP 7: Final partitioning using the configured partitioner
+ // ====================================================================
+ vtk::AllMeshes input( mesh, namesToFractures );
+ vtk::AllMeshes result = meshPartitioner->partitionMeshes( input, comm );
+
+ // ====================================================================
+ // STEP 8: Log redistribution statistics
+ // ====================================================================
+ if( logLevel >= 5 )
+ {
+ string const pattern = "{}: {}";
+ stdVector< string > messages;
+ messages.push_back( GEOS_FMT( pattern, "Local mesh size",
+ result.getMainMesh()->GetNumberOfCells() ) );
+
+ for( auto const & [faceName, faceMesh]: result.getFaceBlocks() )
+ {
+ messages.push_back( GEOS_FMT( pattern, faceName, faceMesh->GetNumberOfCells() ) );
+ }
+
+ GEOS_LOG_RANK( stringutilities::join( messages, ", " ) );
+ }
+
+ return result;
+}
+
+
+
REGISTER_CATALOG_ENTRY( MeshGeneratorBase, VTKMeshGenerator, string const &, Group * const )
} // namespace geos
diff --git a/src/coreComponents/mesh/generators/VTKMeshGenerator.hpp b/src/coreComponents/mesh/generators/VTKMeshGenerator.hpp
index b99145b85a0..08001e7cec2 100644
--- a/src/coreComponents/mesh/generators/VTKMeshGenerator.hpp
+++ b/src/coreComponents/mesh/generators/VTKMeshGenerator.hpp
@@ -23,7 +23,6 @@
#include "mesh/generators/ExternalMeshGeneratorBase.hpp"
#include "mesh/generators/VTKUtilities.hpp"
#include "mesh/generators/VTKHierarchicalDataSource.hpp"
-#include "mesh/mpiCommunications/SpatialPartition.hpp"
class vtkDataSet;
@@ -55,7 +54,7 @@ class VTKMeshGenerator : public ExternalMeshGeneratorBase
/**
* @brief Generate the mesh using the VTK library.
* @param[inout] cellBlockManager the CellBlockManager that will receive the meshing information
- * @param[in] partition the number of domain in each direction (x,y,z) for InternalMesh only, not used here
+ * @param[in] partitioner partitioner
* @details This method leverages the VTK library to load the meshes.
* The supported formats are the official VTK ones dedicated to
* unstructured grids (.vtu, .pvtu and .vtk) and structured grids (.vts, .vti and .pvts).
@@ -89,7 +88,7 @@ class VTKMeshGenerator : public ExternalMeshGeneratorBase
* surfaces of interest, with triangles and/or quads holding an attribute value
* of 1, 2 or 3, three node sets named "1", "2" and "3" will be instantiated by this method
*/
- void fillCellBlockManager( CellBlockManager & cellBlockManager, SpatialPartition & partition ) override;
+ void fillCellBlockManager( CellBlockManager & cellBlockManager, DomainPartitioner & partitioner ) override;
void importFieldOnArray( Block block,
string const & blockName,
@@ -102,8 +101,20 @@ class VTKMeshGenerator : public ExternalMeshGeneratorBase
protected:
void postInputInitialization() override;
+
private:
+ /**
+ * @brief Load mesh from data source (merges multiple regions)
+ *
+ * Rank 0 loads all regions from data source and merges them.
+ * Other ranks start with empty mesh.
+ *
+ * @param allMeshes Output: loaded and merged meshes
+ */
+ void loadFromDataSource( vtk::AllMeshes & allMeshes );
+
+
///@cond DO_NOT_DOCUMENT
struct viewKeyStruct
{
@@ -112,7 +123,6 @@ class VTKMeshGenerator : public ExternalMeshGeneratorBase
constexpr static char const * mainBlockNameString() { return "mainBlockName"; }
constexpr static char const * faceBlockNamesString() { return "faceBlocks"; }
constexpr static char const * nodesetNamesString() { return "nodesetNames"; }
- constexpr static char const * partitionRefinementString() { return "partitionRefinement"; }
constexpr static char const * partitionMethodString() { return "partitionMethod"; }
constexpr static char const * useGlobalIdsString() { return "useGlobalIds"; }
constexpr static char const * dataSourceString() { return "dataSourceName"; }
@@ -159,9 +169,6 @@ class VTKMeshGenerator : public ExternalMeshGeneratorBase
/// Names of VTK nodesets to import
string_array m_nodesetNames;
- /// Number of graph partitioning refinement iterations
- integer m_partitionRefinement = 0;
-
/// Whether global id arrays should be used, if available
integer m_useGlobalIds = 0;
@@ -182,6 +189,33 @@ class VTKMeshGenerator : public ExternalMeshGeneratorBase
};
+/**
+ * @brief Redistribute VTK mesh using configured partitioner
+ *
+ * This is the main entry point for VTK mesh redistribution. It:
+ * 1. Validates that a mesh-based partitioner is provided
+ * 2. Manages global IDs
+ * 3. Performs initial redistribution if needed (KdTree, empty rank handling)
+ * 4. Delegates to the partitioner for final partitioning
+ *
+ * @param logLevel Logging verbosity level
+ * @param loadedMesh Main volumetric mesh loaded from file
+ * @param namesToFractures Map of fracture name to fracture mesh
+ * @param comm MPI communicator
+ * @param partitioner Domain partitioner (must be MeshPartitioner subclass)
+ * @param useGlobalIds How to handle global IDs (0=auto, 1=required, -1=generate)
+ * @return Redistributed meshes
+ *
+ * @throws If partitioner is not a MeshPartitioner (e.g., CartesianPartitioner)
+ */
+vtk::AllMeshes redistributeMeshes( integer logLevel,
+ vtkSmartPointer< vtkDataSet > loadedMesh,
+ stdMap< string, vtkSmartPointer< vtkDataSet > > & namesToFractures,
+ MPI_Comm comm,
+ DomainPartitioner & partitioner,
+ int useGlobalIds );
+
+
} // namespace geos
#endif /* GEOS_MESH_GENERATORS_VTKMESHGENERATOR_HPP */
diff --git a/src/coreComponents/mesh/generators/VTKUtilities.cpp b/src/coreComponents/mesh/generators/VTKUtilities.cpp
index b49e30e857a..d74f57cf2d1 100644
--- a/src/coreComponents/mesh/generators/VTKUtilities.cpp
+++ b/src/coreComponents/mesh/generators/VTKUtilities.cpp
@@ -18,18 +18,13 @@
#include "common/format/table/TableLayout.hpp"
#include "common/TypeDispatch.hpp"
+#include "LvArray/src/genericTensorOps.hpp"
+#include "mesh/utilities/ComputationalGeometry.hpp"
#include "mesh/generators/CollocatedNodes.hpp"
#include "mesh/generators/VTKMeshGeneratorTools.hpp"
#include "mesh/generators/VTKUtilities.hpp"
#include "mesh/MeshFields.hpp"
-#ifdef GEOS_USE_PARMETIS
-#include "mesh/generators/ParMETISInterface.hpp"
-#endif
-#ifdef GEOS_USE_SCOTCH
-#include "mesh/generators/PTScotchInterface.hpp"
-#endif
-
#include
#include
#include
@@ -570,151 +565,6 @@ AllMeshes loadAllMeshes( Path const & filePath,
return AllMeshes( main, faces );
}
-
-/**
- * @brief Partition the mesh using cell graph methods (ParMETIS or PTScotch)
- *
- * @param[in] input a collection of vtk main (3D) and fracture mesh
- * @param[in] method the partitioning method
- * @param[in] comm the MPI communicator
- * @param[in] numParts the number of partitions
- * @param[in] minCommonNodes the minimum number of shared nodes for adding a graph edge
- * @param[in] numRefinements the number of refinements for PTScotch
- * @return the cell partitioning array
- */
-array1d< int64_t >
-partitionByCellGraph( AllMeshes & input,
- PartitionMethod const method,
- MPI_Comm const comm,
- int const numParts,
- int const minCommonNodes,
- int const numRefinements )
-{
- GEOS_MARK_FUNCTION;
-
- pmet_idx_t const numElems = input.getMainMesh()->GetNumberOfCells();
- pmet_idx_t const numRanks = MpiWrapper::commSize( comm );
- int const rank = MpiWrapper::commRank( comm );
- int const lastRank = numRanks - 1;
-
- // Value at each index (i.e. MPI rank) of `elemDist` gives the first element index of the MPI rank.
- // It's assumed that MPI ranks spans continuous numbers of elements.
- // Thus, the number of elements of each rank can be deduced by subtracting
- // the values between two consecutive ranks. To be able to do this even for the last rank,
- // a last additional value is appended, and the size of the array is then the comm size plus 1.
- array1d< pmet_idx_t > const elemDist( numRanks + 1 );
- {
- array1d< pmet_idx_t > elemCounts;
- MpiWrapper::allGather( numElems, elemCounts, comm );
- std::partial_sum( elemCounts.begin(), elemCounts.end(), elemDist.begin() + 1 );
- }
-
- vtkIdType localNumFracCells = 0;
- if( rank == lastRank ) // Let's add artificially the fracture to the last rank (for numbering reasons).
- {
- // Adding one fracture element
- for( auto const & [fractureName, fracture]: input.getFaceBlocks() )
- {
- localNumFracCells += fracture->GetNumberOfCells();
- }
- }
- vtkIdType globalNumFracCells = localNumFracCells;
- MpiWrapper::broadcast( globalNumFracCells, lastRank, comm );
- elemDist[lastRank + 1] += globalNumFracCells;
-
- // The `elemToNodes` mapping binds element indices (local to the rank) to the global indices of their support nodes.
- ArrayOfArrays< pmet_idx_t, pmet_idx_t > const elemToNodes = buildElemToNodes< pmet_idx_t >( input );
- ArrayOfArrays< pmet_idx_t, pmet_idx_t > graph;
-#ifdef GEOS_USE_PARMETIS
- graph = parmetis::meshToDual( elemToNodes.toViewConst(), elemDist, comm, minCommonNodes );
-#else
- GEOS_THROW( "GEOS must be built with ParMETIS support (ENABLE_PARMETIS=ON)"
- "to use any graph partitioning method for parallel mesh distribution", InputError );
-#endif
-
- switch( method )
- {
- case PartitionMethod::parmetis:
- {
-#ifdef GEOS_USE_PARMETIS
- return parmetis::partition( graph.toViewConst(), elemDist, numParts, comm, numRefinements );
-#else
- GEOS_THROW( "GEOS must be built with ParMETIS support (ENABLE_PARMETIS=ON) to use 'parmetis' partitioning method", InputError );
-#endif
- }
- case PartitionMethod::ptscotch:
- {
-#ifdef GEOS_USE_SCOTCH
- GEOS_WARNING_IF( numRefinements > 0, "Partition refinement is not supported by 'ptscotch' partitioning method" );
- return ptscotch::partition( graph.toViewConst(), numParts, comm );
-#else
- GEOS_THROW( "GEOS must be built with Scotch support (ENABLE_SCOTCH=ON) to use 'ptscotch' partitioning method", InputError );
-#endif
- }
- default:
- {
- GEOS_THROW( "Unknown partition method", InputError );
- return{};
- }
- }
- return {};
-}
-
-/**
- * @brief Redistribute the mesh using cell graph methods (ParMETIS or PTScotch)
- * @param[in] mesh a vtk grid
- * @param[in] method the partitioning method
- * @param[in] comm the MPI communicator
- * @param[in] numRefinements the number of refinements for PTScotch
- * @return
- */
-AllMeshes
-redistributeByCellGraph( AllMeshes & input,
- PartitionMethod const method,
- MPI_Comm const comm,
- int const numRefinements )
-{
- GEOS_MARK_FUNCTION;
-
- int const rank = MpiWrapper::commRank( comm );
- int const numRanks = MpiWrapper::commSize( comm );
- array1d< int64_t > newPartitions = partitionByCellGraph( input, method, comm, numRanks, 3, numRefinements );
-
- // Extract the partition information related to the fracture mesh.
- stdMap< string, array1d< pmet_idx_t > > newFracturePartitions;
- vtkIdType fracOffset = input.getMainMesh()->GetNumberOfCells();
- vtkIdType localNumFracCells = 0;
- for( auto const & [fractureName, fracture]: input.getFaceBlocks() )
- {
- localIndex const numFracCells = fracture->GetNumberOfCells();
- localNumFracCells += (rank == numRanks - 1) ? fracture->GetNumberOfCells() : 0;
- array1d< pmet_idx_t > tmp( numFracCells );
- std::copy( newPartitions.begin() + fracOffset, newPartitions.begin() + fracOffset + numFracCells, tmp.begin() );
- newFracturePartitions.insert( { fractureName, tmp } );
- fracOffset += numFracCells;
- }
- // Now do the same for the 3d mesh, simply by trimming the fracture information.
- newPartitions.resize( newPartitions.size() - localNumFracCells );
-
- // Now, perform the final steps: first, a new split following the new partitions.
- // Then those newly split meshes will be redistributed across the ranks.
-
- // First for the main 3d mesh...
- vtkSmartPointer< vtkPartitionedDataSet > const splitMesh = splitMeshByPartition( input.getMainMesh(), numRanks, newPartitions.toViewConst() );
- vtkSmartPointer< vtkUnstructuredGrid > finalMesh = vtk::redistribute( *splitMesh, MPI_COMM_GEOS );
- // ... and then for the fractures.
- stdMap< string, vtkSmartPointer< vtkDataSet > > finalFractures;
- for( auto const & [fractureName, fracture]: input.getFaceBlocks() )
- {
- vtkSmartPointer< vtkPartitionedDataSet > const splitFracMesh = splitMeshByPartition( fracture, numRanks, newFracturePartitions[fractureName].toViewConst() );
- vtkSmartPointer< vtkUnstructuredGrid > const finalFracMesh = vtk::redistribute( *splitFracMesh, MPI_COMM_GEOS );
- finalFractures.insert( {fractureName, finalFracMesh} );
- }
-
- return AllMeshes( finalMesh, finalFractures );
-}
-
-
/**
* @brief Scatter the mesh by blocks (no geometric information involved, assumes rank 0 has the full mesh)
*
@@ -883,44 +733,77 @@ findGlobalIndexBounds( vtkDataSet & mesh,
return { std::make_pair( minIdxGlobal[0], maxIdxGlobal[0] ), std::make_pair( minIdxGlobal[1], maxIdxGlobal[1] ) };
}
-AllMeshes
-redistributeByAreaGraphAndLayer( AllMeshes & input,
- PartitionMethod const method,
- string const & indexArrayName,
- MPI_Comm const comm,
- int const numPartZ,
- int const numRefinements )
+array1d< int64_t > partitionByAreaAndLayer( AllMeshes & input,
+ string const & indexArrayName,
+ GraphPartitionEngine & engine,
+ int numPartZ,
+ MPI_Comm comm )
{
GEOS_MARK_FUNCTION;
- localIndex const numCells = LvArray::integerConversion< localIndex >( input.getMainMesh()->GetNumberOfCells() );
+ localIndex const numCells =
+ LvArray::integerConversion< localIndex >( input.getMainMesh()->GetNumberOfCells() );
int const numProcs = MpiWrapper::commSize( comm );
int const numPartA = numProcs / numPartZ;
- GEOS_ERROR_IF_NE_MSG( numProcs % numPartZ, 0, "Number of ranks must evenly divide the number of z-partitions" );
- // Compute conversion from cell z-index to partition z-index
- std::array< std::pair< int, int >, 2 > const idxLimits = findGlobalIndexBounds( *input.getMainMesh(), comm, indexArrayName );
- double const cellPerPartZInv = static_cast< double >( numPartZ ) / ( idxLimits[1].second - idxLimits[1].first + 1 );
+ GEOS_ERROR_IF_NE_MSG( numProcs % numPartZ, 0,
+ "Number of ranks (" << numProcs << ") must evenly divide "
+ "the number of z-partitions (" << numPartZ << ")" );
+
+ // ====================================================================
+ // STEP 1: Find global bounds of structured indices
+ // ====================================================================
+ std::array< std::pair< int, int >, 2 > const idxLimits =
+ findGlobalIndexBounds( *input.getMainMesh(), comm, indexArrayName );
+
+ // idxLimits[0] = (minArea, maxArea)
+ // idxLimits[1] = (minLayer, maxLayer)
+
+ // ====================================================================
+ // STEP 2: Compute conversion from cell layer index to partition layer index
+ // ====================================================================
+ double const cellPerPartZInv =
+ static_cast< double >( numPartZ ) / ( idxLimits[1].second - idxLimits[1].first + 1 );
+
auto const computePartIndexZ = [minZ = idxLimits[1].first, cellPerPartZInv]( auto const zidx )
{
return static_cast< int >( ( zidx - minZ + 0.5 ) * cellPerPartZInv );
};
- // Extract cells in z-layer "0"
- vtkSmartPointer< vtkUnstructuredGrid > layer0 = threshold( *input.getMainMesh(), indexArrayName, 1, idxLimits[1].first, idxLimits[1].first );
-
- // Ranks that have the layer form a subcomm and compute the area partitioning
+ // ====================================================================
+ // STEP 3: Extract cells in z-layer 0
+ // ====================================================================
+ vtkSmartPointer< vtkUnstructuredGrid > layer0 =
+ threshold( *input.getMainMesh(),
+ indexArrayName,
+ 1,
+ idxLimits[1].first,
+ idxLimits[1].first );
+
+ // ====================================================================
+ // STEP 4: Partition layer 0 by area using cell graph
+ // ====================================================================
+ // Only ranks that have layer 0 cells participate in partitioning
bool const haveLayer0 = layer0->GetNumberOfCells() > 0;
- MPI_Comm subComm = MpiWrapper::commSplit( comm, haveLayer0 ? 0 : MPI_UNDEFINED, MpiWrapper::commRank( comm ) );
+
+ MPI_Comm subComm = MpiWrapper::commSplit( comm,
+ haveLayer0 ? 0 : MPI_UNDEFINED,
+ MpiWrapper::commRank( comm ) );
+
array1d< int64_t > layer0Parts;
+
if( haveLayer0 )
{
- AllMeshes layer0input( layer0, {} ); // fracture mesh not supported yet
- layer0Parts = partitionByCellGraph( layer0input, method, subComm, numPartA, 3, numRefinements );
+ AllMeshes layer0input( layer0, {} ); // fracture mesh not supported yet
+ layer0Parts = partitionByCellGraph( layer0input, engine, subComm, numPartA );
+
MpiWrapper::commFree( subComm );
}
- // pack the area index into unused high 32 bits of the 64-bit partition index
+ // ====================================================================
+ // STEP 5: Pack area index into unused high 32 bits of partition index
+ // ====================================================================
+ // This allows us to sort and lookup by area index
dispatchArray< vtkArrayDispatch::Integrals >( *layer0->GetCellData(), indexArrayName,
[&]( auto const index )
{
@@ -930,41 +813,184 @@ redistributeByAreaGraphAndLayer( AllMeshes & input,
auto const aidx = index.Get( i, 0 );
GEOS_ASSERT_GE( aidx, 0 );
GEOS_ASSERT_GT( std::numeric_limits< int32_t >::max(), dst[i] );
+ // Pack: high 32 bits = area index, low 32 bits = partition index
dst[i] |= static_cast< int64_t >( aidx - minA ) << 32;
} );
} );
- // Distribute the partitioning of layer 0 (or columns) to everyone
+ // ====================================================================
+ // STEP 6: Distribute layer 0 partitioning to all ranks
+ // ====================================================================
array1d< int64_t > columnParts;
MpiWrapper::allGatherv( layer0Parts.toViewConst(), columnParts, comm );
- // Sort w.r.t. area index. If this becomes too expensive, may need a diff approach.
- // The goal is to enable fast lookup of area partition index by area index.
+ // Sort by area index (high 32 bits) for fast lookup
std::sort( columnParts.begin(), columnParts.end() );
- auto const computePartIndexA = [minA = idxLimits[0].first, partsA = columnParts.toViewConst()]( auto const aidx )
+ // ====================================================================
+ // STEP 7: Create lookup function for area partition
+ // ====================================================================
+ auto const computePartIndexA =
+ [minA = idxLimits[0].first, partsA = columnParts.toViewConst()]( auto const aidx )
{
- return partsA[aidx - minA] & 0xFFFFFFFF;
+ // Extract partition index from low 32 bits
+ return static_cast< int >( partsA[aidx - minA] & 0xFFFFFFFF );
};
- // Finally, we can compute the target partitioning by combining area and z-partition data
- array1d< int > const newParts( numCells );
- dispatchArray< vtkArrayDispatch::Integrals >( *input.getMainMesh()->GetCellData(), indexArrayName,
- [&]( auto const index )
+ // ====================================================================
+ // STEP 8: Compute final partitioning for all cells
+ // ====================================================================
+ // Combine area partition and layer partition: partIdx = areaIdx * numPartZ + layerIdx
+ array1d< int64_t > newParts( numCells );
+
+ dispatchArray< vtkArrayDispatch::Integrals >(
+ *input.getMainMesh()->GetCellData(),
+ indexArrayName,
+ [&]( auto const index )
{
- forAll< parallelHostPolicy >( numCells, [computePartIndexA, computePartIndexZ,
- newParts = newParts.toView(),
- index, numPartZ]( localIndex const i )
+ forAll< parallelHostPolicy >( numCells,
+ [computePartIndexA, computePartIndexZ, newParts = newParts.toView(), index, numPartZ]
+ ( localIndex const i )
{
- int const partIdxA = computePartIndexA( index.Get( i, 0 ) );
- int const partIdxZ = computePartIndexZ( index.Get( i, 1 ) );
+ int const areaIdx = index.Get( i, 0 ); // Component 0 = area index
+ int const layerIdx = index.Get( i, 1 ); // Component 1 = layer index
+
+ int const partIdxA = computePartIndexA( areaIdx );
+ int const partIdxZ = computePartIndexZ( layerIdx );
+
+ // Combined partition: area partition controls X-Y, layer partition controls Z
newParts[i] = partIdxA * numPartZ + partIdxZ;
} );
} );
- vtkSmartPointer< vtkPartitionedDataSet > const splitMesh = splitMeshByPartition( input.getMainMesh(), numProcs, newParts.toViewConst() );
- return AllMeshes( vtk::redistribute( *splitMesh, MPI_COMM_GEOS ), {} );
+
+ return newParts;
}
+array1d< int64_t > partitionByCellGraph( AllMeshes & input,
+ GraphPartitionEngine & engine,
+ MPI_Comm comm,
+ pmet_idx_t numParts )
+{
+ GEOS_MARK_FUNCTION;
+
+ pmet_idx_t const numRanks = MpiWrapper::commSize( comm );
+
+ // ====================================================================
+ // STEP 1: Count all local elements (3D + 2D fractures)
+ // ====================================================================
+ pmet_idx_t numLocalElements = input.getMainMesh()->GetNumberOfCells();
+
+ for( auto const & [fractureName, fracture] : input.getFaceBlocks() )
+ {
+ numLocalElements += fracture->GetNumberOfCells();
+ }
+
+ // ====================================================================
+ // STEP 2: Create element distribution across ranks
+ // ====================================================================
+ array1d< pmet_idx_t > elemDist( numRanks + 1 );
+ {
+ array1d< pmet_idx_t > elemCounts( numRanks );
+ MpiWrapper::allGather( numLocalElements, elemCounts, comm );
+ elemDist[0] = 0;
+ std::partial_sum( elemCounts.begin(), elemCounts.end(), elemDist.begin() + 1 );
+ }
+
+ // ====================================================================
+ // STEP 3: Build element-to-node connectivity from VTK mesh
+ // ====================================================================
+ ArrayOfArrays< pmet_idx_t, pmet_idx_t > const elemToNodes =
+ buildElemToNodes< pmet_idx_t >( input );
+
+ // ====================================================================
+ // STEP 4: Build dual graph (elements connected if they share nodes)
+ // ====================================================================
+ ArrayOfArrays< pmet_idx_t, pmet_idx_t > graph =
+ engine.meshToDual( elemToNodes.toViewConst(),
+ elemDist.toViewConst(),
+ comm,
+ 3 ); // minCommonNodes = 3 (face connectivity)
+
+ // ====================================================================
+ // STEP 5: Partition the dual graph using the engine
+ // ====================================================================
+ return engine.partition( graph.toViewConst(),
+ elemDist.toViewConst(),
+ numParts,
+ comm );
+}
+
+
+AllMeshes applyPartitioning( AllMeshes & input,
+ arrayView1d< int64_t const > partitioning,
+ MPI_Comm comm )
+{
+ GEOS_MARK_FUNCTION;
+
+ int const numRanks = MpiWrapper::commSize( comm );
+ vtkIdType const num3DCells = input.getMainMesh()->GetNumberOfCells();
+
+ // Always separate 3D from 2D partitioning
+ array1d< int64_t > newPartitions3D( num3DCells );
+ if( num3DCells > 0 )
+ {
+ std::copy( partitioning.begin(),
+ partitioning.begin() + num3DCells,
+ newPartitions3D.begin() );
+ }
+
+ // Redistribute main mesh
+ vtkSmartPointer< vtkPartitionedDataSet > const splitMesh =
+ splitMeshByPartition( input.getMainMesh(), numRanks, newPartitions3D.toViewConst() );
+ vtkSmartPointer< vtkUnstructuredGrid > finalMesh = redistribute( *splitMesh, comm );
+
+ // Handle fractures if present
+ stdMap< string, vtkSmartPointer< vtkDataSet > > finalFractures;
+
+ if( !input.getFaceBlocks().empty() )
+ {
+ // Check if we have enough partitioning data for fractures
+ localIndex totalElements = num3DCells;
+ for( auto const & [fractureName, fracture]: input.getFaceBlocks() )
+ {
+ totalElements += fracture->GetNumberOfCells();
+ }
+
+ if( partitioning.size() >= totalElements )
+ {
+ // We have fracture partitioning data
+ vtkIdType offset = num3DCells;
+ for( auto const & [fractureName, fracture]: input.getFaceBlocks() )
+ {
+ localIndex const numFracCells = fracture->GetNumberOfCells();
+ array1d< int64_t > fracPartitioning( numFracCells );
+ if( numFracCells > 0 )
+ {
+ std::copy( partitioning.begin() + offset,
+ partitioning.begin() + offset + numFracCells,
+ fracPartitioning.begin() );
+ }
+
+ vtkSmartPointer< vtkPartitionedDataSet > const splitFracMesh =
+ splitMeshByPartition( fracture, numRanks, fracPartitioning.toViewConst() );
+ vtkSmartPointer< vtkUnstructuredGrid > const finalFracMesh =
+ redistribute( *splitFracMesh, comm );
+ finalFractures.try_emplace( fractureName, finalFracMesh );
+
+ offset += numFracCells;
+ }
+ }
+ else
+ {
+ // No fracture partitioning data - fractures are empty
+ GEOS_LOG_RANK_0( "Partitioning data only covers main mesh; fractures will be empty" );
+ }
+ }
+
+ return AllMeshes( finalMesh, finalFractures );
+}
+
+
/**
* @brief Redistributes the mesh using a Kd-Tree
*
@@ -1028,56 +1054,6 @@ findNeighborRanks( stdVector< vtkBoundingBox > boundingBoxes )
}
-vtkSmartPointer< vtkDataSet > manageGlobalIds( vtkSmartPointer< vtkDataSet > mesh, int useGlobalIds, bool isFractured )
-{
- auto hasGlobalIds = []( vtkSmartPointer< vtkDataSet > m ) -> bool
- {
- return m->GetPointData()->GetGlobalIds() != nullptr && m->GetCellData()->GetGlobalIds() != nullptr;
- };
-
- {
- // Add global ids on the fly if needed
- int const me = hasGlobalIds( mesh );
- int const everyone = MpiWrapper::allReduce( me, MpiWrapper::Reduction::Max, MPI_COMM_GEOS );
-
- if( everyone and not me )
- {
- mesh->GetPointData()->SetGlobalIds( vtkIdTypeArray::New() );
- mesh->GetCellData()->SetGlobalIds( vtkIdTypeArray::New() );
- }
- }
-
- vtkSmartPointer< vtkDataSet > output;
- bool const globalIdsAvailable = hasGlobalIds( mesh );
- if( useGlobalIds > 0 && !globalIdsAvailable )
- {
- GEOS_ERROR( "Global IDs strictly required (useGlobalId > 0) but unavailable. Set useGlobalIds to 0 to build them automatically." );
- }
- else if( useGlobalIds >= 0 && globalIdsAvailable )
- {
- output = mesh;
- vtkIdTypeArray const * const globalCellId = vtkIdTypeArray::FastDownCast( output->GetCellData()->GetGlobalIds() );
- vtkIdTypeArray const * const globalPointId = vtkIdTypeArray::FastDownCast( output->GetPointData()->GetGlobalIds() );
- GEOS_ERROR_IF( globalCellId->GetNumberOfComponents() != 1 && globalCellId->GetNumberOfTuples() != output->GetNumberOfCells(),
- "Global cell IDs are invalid. Check the array or enable automatic generation (useGlobalId < 0).\n" <<
- generalMeshErrorAdvice );
- GEOS_ERROR_IF( globalPointId->GetNumberOfComponents() != 1 && globalPointId->GetNumberOfTuples() != output->GetNumberOfPoints(),
- "Global cell IDs are invalid. Check the array or enable automatic generation (useGlobalId < 0).\n" <<
- generalMeshErrorAdvice );
-
- GEOS_LOG_RANK_0( "Using global Ids defined in VTK mesh" );
- }
- else
- {
- GEOS_ERROR_IF( isFractured, "Automatic generation of global IDs for fractured meshes is disabled. Please split with mesh_doctor. \n" << generalMeshErrorAdvice );
-
- GEOS_LOG_RANK_0( "Generating global Ids from VTK mesh" );
- output = generateGlobalIDs( mesh );
- }
-
- return output;
-}
-
/**
* @brief This function tries to make sure that no MPI rank is empty
*
@@ -1180,99 +1156,13 @@ ensureNoEmptyRank( vtkSmartPointer< vtkDataSet > mesh,
}
GEOS_WARNING_IF( donorRanks.size() < recipientRanks.size(),
- "We strongly encourage the use of partitionRefinement > 5 for this number of MPI ranks" );
+ "We strongly encourage the use of partitioner numRefinements > 4 for this number of MPI ranks" );
vtkSmartPointer< vtkPartitionedDataSet > const splitMesh = splitMeshByPartition( mesh, numProcs, newParts.toViewConst() );
return vtk::redistribute( *splitMesh, MPI_COMM_GEOS );
}
-AllMeshes
-redistributeMeshes( integer const logLevel,
- vtkSmartPointer< vtkDataSet > loadedMesh,
- stdMap< string, vtkSmartPointer< vtkDataSet > > & namesToFractures,
- MPI_Comm const comm,
- PartitionMethod const method,
- int const partitionRefinement,
- int const useGlobalIds,
- string const & structuredIndexAttributeName,
- int const numPartZ )
-{
- GEOS_MARK_FUNCTION;
-
- stdVector< vtkSmartPointer< vtkDataSet > > fractures;
- for( auto & nameToFracture: namesToFractures )
- {
- fractures.push_back( nameToFracture.second );
- }
-
- // Generate global IDs for vertices and cells, if needed
- vtkSmartPointer< vtkDataSet > mesh = manageGlobalIds( loadedMesh, useGlobalIds, !std::empty( fractures ) );
-
- if( MpiWrapper::commRank( comm ) != ( MpiWrapper::commSize( comm ) - 1 ) )
- {
- for( auto nameToFracture: namesToFractures )
- {
- GEOS_ASSERT_EQ( nameToFracture.second->GetNumberOfCells(), 0 );
- }
- }
-
- // Determine if redistribution is required
- vtkIdType const minCellsOnAnyRank = MpiWrapper::min( mesh->GetNumberOfCells(), comm );
- if( minCellsOnAnyRank == 0 )
- {
- // Redistribute the mesh over all ranks using simple octree partitions
- mesh = redistributeByKdTree( *mesh );
- }
-
- // Check if a rank does not have a cell after the redistribution
- // If this is the case, we need a fix otherwise the next redistribution will fail
- // We expect this function to only be called in some pathological cases
- if( MpiWrapper::min( mesh->GetNumberOfCells(), comm ) == 0 )
- {
- mesh = ensureNoEmptyRank( mesh, comm );
- }
-
- AllMeshes result;
- // Redistribute the mesh again using higher-quality graph partitioner
- if( !structuredIndexAttributeName.empty() )
- {
- AllMeshes input( mesh, namesToFractures );
- result = redistributeByAreaGraphAndLayer( input,
- method,
- structuredIndexAttributeName,
- comm,
- numPartZ,
- partitionRefinement - 1 );
- }
- else if( partitionRefinement > 0 )
- {
- AllMeshes input( mesh, namesToFractures );
- result = redistributeByCellGraph( input, method, comm, partitionRefinement - 1 );
- }
- else
- {
- result.setMainMesh( mesh );
- result.setFaceBlocks( namesToFractures );
- }
-
- // Logging some information about the redistribution.
- {
- string const pattern = "{}: {}";
- stdVector< string > messages;
- messages.push_back( GEOS_FMT( pattern, "Local mesh size", result.getMainMesh()->GetNumberOfCells() ) );
- for( auto const & [faceName, faceMesh]: result.getFaceBlocks() )
- {
- messages.push_back( GEOS_FMT( pattern, faceName, faceMesh->GetNumberOfCells() ) );
- }
- if( logLevel >= 5 )
- {
- GEOS_LOG_RANK( stringutilities::join( messages, ", " ) );
- }
- }
-
- return result;
-}
/**
* @brief Identify the GEOSX type of the polyhedron
@@ -2300,6 +2190,57 @@ string buildCellBlockName( ElementType const type, int const regionId )
return regionId != -1 ? std::to_string( regionId ) + "_" + cellTypeName : cellTypeName;
}
+
+vtkSmartPointer< vtkDataSet > manageGlobalIds( vtkSmartPointer< vtkDataSet > mesh, int useGlobalIds, bool isFractured )
+{
+ auto hasGlobalIds = []( vtkSmartPointer< vtkDataSet > m ) -> bool
+ {
+ return m->GetPointData()->GetGlobalIds() != nullptr && m->GetCellData()->GetGlobalIds() != nullptr;
+ };
+
+ {
+ // Add global ids on the fly if needed
+ int const me = hasGlobalIds( mesh );
+ int const everyone = MpiWrapper::allReduce( me, MpiWrapper::Reduction::Max, MPI_COMM_GEOS );
+
+ if( everyone and not me )
+ {
+ mesh->GetPointData()->SetGlobalIds( vtkIdTypeArray::New() );
+ mesh->GetCellData()->SetGlobalIds( vtkIdTypeArray::New() );
+ }
+ }
+
+ vtkSmartPointer< vtkDataSet > output;
+ bool const globalIdsAvailable = hasGlobalIds( mesh );
+ if( useGlobalIds > 0 && !globalIdsAvailable )
+ {
+ GEOS_ERROR( "Global IDs strictly required (useGlobalId > 0) but unavailable. Set useGlobalIds to 0 to build them automatically." );
+ }
+ else if( useGlobalIds >= 0 && globalIdsAvailable )
+ {
+ output = mesh;
+ vtkIdTypeArray const * const globalCellId = vtkIdTypeArray::FastDownCast( output->GetCellData()->GetGlobalIds() );
+ vtkIdTypeArray const * const globalPointId = vtkIdTypeArray::FastDownCast( output->GetPointData()->GetGlobalIds() );
+ GEOS_ERROR_IF( globalCellId->GetNumberOfComponents() != 1 && globalCellId->GetNumberOfTuples() != output->GetNumberOfCells(),
+ "Global cell IDs are invalid. Check the array or enable automatic generation (useGlobalId < 0).\n" <<
+ generalMeshErrorAdvice );
+ GEOS_ERROR_IF( globalPointId->GetNumberOfComponents() != 1 && globalPointId->GetNumberOfTuples() != output->GetNumberOfPoints(),
+ "Global cell IDs are invalid. Check the array or enable automatic generation (useGlobalId < 0).\n" <<
+ generalMeshErrorAdvice );
+
+ GEOS_LOG_RANK_0( "Using global Ids defined in VTK mesh" );
+ }
+ else
+ {
+ GEOS_ERROR_IF( isFractured, "Automatic generation of global IDs for fractured meshes is disabled. Please split with mesh_doctor. \n" << generalMeshErrorAdvice );
+
+ GEOS_LOG_RANK_0( "Generating global Ids from VTK mesh" );
+ output = generateGlobalIDs( mesh );
+ }
+
+ return output;
+}
+
} // namespace vtk
@@ -2371,7 +2312,7 @@ void writeNodes( integer const logLevel,
// and make launch policy parallel again
GEOS_ERROR_IF( nodeGlobalIds.count( pointGlobalID ) > 0,
GEOS_FMT( "At least one duplicate point detected (globalID = {}).\n"
- "Potential fixes :\n- Make sure partitionRefinement is set to 1 or higher.\n"
+ "Potential fixes :\n- Make sure a partitioner such as ParMetis is used.\n"
"- {}",
pointGlobalID, generalMeshErrorAdvice ) );
nodeGlobalIds.insert( pointGlobalID );
diff --git a/src/coreComponents/mesh/generators/VTKUtilities.hpp b/src/coreComponents/mesh/generators/VTKUtilities.hpp
index ef660d7641c..ba47d706d91 100644
--- a/src/coreComponents/mesh/generators/VTKUtilities.hpp
+++ b/src/coreComponents/mesh/generators/VTKUtilities.hpp
@@ -23,10 +23,13 @@
#include "common/DataTypes.hpp"
#include "common/MpiWrapper.hpp"
#include "mesh/generators/CellBlockManager.hpp"
+#include "mesh/mpiCommunications/DomainPartitioner.hpp"
+#include "mesh/mpiCommunications/GraphPartitionEngine.hpp"
#include
#include
#include
+#include
#include
#include
@@ -90,6 +93,15 @@ class AllMeshes
{
return m_main;
}
+ /**
+ * @brief Get the main 3D mesh (const version)
+ * @return Pointer to main mesh
+ */
+ vtkSmartPointer< vtkDataSet > getMainMesh() const
+ {
+ return m_main;
+ }
+
/**
* @return a mapping linking the name of each face block to its mesh.
@@ -117,6 +129,8 @@ class AllMeshes
m_faceBlocks = faceBlocks;
}
+
+
private:
/// The main 3d mesh (namely the matrix).
vtkSmartPointer< vtkDataSet > m_main;
@@ -136,6 +150,88 @@ AllMeshes loadAllMeshes( Path const & filePath,
string const & mainBlockName,
string_array const & faceBlockNames );
+/**
+ * @brief Manage global IDs for points and cells in a VTK mesh
+ * @param[in] mesh a vtk grid
+ * @param[in] useGlobalIds flag to control global ID handling:
+ * > 0: strictly require existing global IDs (error if missing)
+ * = 0: use existing global IDs if available, generate if missing
+ * < 0: always generate new global IDs
+ * @param[in] isFractured whether the mesh contains fractures
+ * @return the vtk grid with properly configured global IDs
+ */
+vtkSmartPointer< vtkDataSet >
+manageGlobalIds( vtkSmartPointer< vtkDataSet > mesh,
+ int useGlobalIds,
+ bool isFractured );
+
+/**
+ * @brief Redistributes the mesh using a Kd-Tree
+ * @param[in] mesh a vtk grid
+ * @return the vtk grid redistributed across MPI ranks
+ */
+vtkSmartPointer< vtkDataSet >
+redistributeByKdTree( vtkDataSet & mesh );
+
+/**
+ * @brief Ensure that no MPI rank is empty by redistributing elements
+ * @param[in] mesh a vtk grid
+ * @param[in] comm the MPI communicator
+ * @return the vtk grid redistributed to guarantee all ranks have at least one element
+ */
+vtkSmartPointer< vtkDataSet >
+ensureNoEmptyRank( vtkSmartPointer< vtkDataSet > mesh,
+ MPI_Comm const comm );
+
+
+
+/**
+ * @brief Partition meshes by cell graph
+ *
+ * @param input Input meshes
+ * @param engine Graph partitioning engine
+ * @param comm MPI communicator
+ * @param numParts Number of partitions
+ * @return Partition array
+ */
+array1d< int64_t > partitionByCellGraph( AllMeshes & input,
+ GraphPartitionEngine & engine,
+ MPI_Comm comm,
+ pmet_idx_t numParts );
+
+/**
+ * @brief Partition meshes by area and layer
+ *
+ * @param input Input meshes
+ * @param indexArrayName Name of the structured index array
+ * @param engine Graph partitioning engine
+ * @param numPartZ Number of partitions in Z direction
+ * @param comm MPI communicator
+ * @return Partition array
+ */
+array1d< int64_t > partitionByAreaAndLayer( AllMeshes & input,
+ string const & indexArrayName,
+ GraphPartitionEngine & engine,
+ int numPartZ,
+ MPI_Comm comm );
+
+/**
+ * @brief Apply partitioning to meshes and redistribute across MPI ranks
+ *
+ * Takes a partition assignment for each element and redistributes the meshes
+ * accordingly using VTK's redistribution utilities.
+ *
+ * @param input Input meshes (main mesh + fractures)
+ * @param partitioning Partition assignment for each element.
+ * Size must match total number of elements (3D + fractures).
+ * Format: [3D cells][fracture1 cells][fracture2 cells]...
+ * @param comm MPI communicator
+ * @return Redistributed meshes after applying partitioning
+ */
+AllMeshes applyPartitioning( AllMeshes & input,
+ arrayView1d< int64_t const > partitioning,
+ MPI_Comm comm );
+
/**
* @brief Compute the rank neighbor candidate list.
* @param[in] boundingBoxes the bounding boxes used by the VTK partitioner for all ranks
@@ -144,30 +240,6 @@ AllMeshes loadAllMeshes( Path const & filePath,
stdVector< int >
findNeighborRanks( stdVector< vtkBoundingBox > boundingBoxes );
-/**
- * @brief Generate global point/cell IDs and redistribute the mesh among MPI ranks.
- * @param[in] logLevel the log level
- * @param[in] loadedMesh the mesh that was loaded on one or several MPI ranks
- * @param[in] namesToFractures the fracture meshes
- * @param[in] comm the MPI communicator
- * @param[in] method the partitioning method
- * @param[in] partitionRefinement number of graph partitioning refinement cycles
- * @param[in] useGlobalIds controls whether global id arrays from the vtk input should be used
- * @param[in] structuredIndexAttributeName VTK array name for structured index attribute, if present
- * @param[in] numPartZ number of MPI partitions in Z direction (only if @p structuredIndexAttributeName is used)
- * @return the vtk grid redistributed
- */
-AllMeshes
-redistributeMeshes( integer const logLevel,
- vtkSmartPointer< vtkDataSet > loadedMesh,
- stdMap< string, vtkSmartPointer< vtkDataSet > > & namesToFractures,
- MPI_Comm const comm,
- PartitionMethod const method,
- int const partitionRefinement,
- int const useGlobalIds,
- string const & structuredIndexAttributeName,
- int const numPartZ );
-
/**
* @brief Collect lists of VTK cell indices organized by type and attribute value.
* @param[in] mesh the vtkUnstructuredGrid or vtkStructuredGrid that is loaded
diff --git a/src/coreComponents/mesh/generators/VTKWellGenerator.cpp b/src/coreComponents/mesh/generators/VTKWellGenerator.cpp
index eba79d7076e..51ceb12a025 100644
--- a/src/coreComponents/mesh/generators/VTKWellGenerator.cpp
+++ b/src/coreComponents/mesh/generators/VTKWellGenerator.cpp
@@ -22,6 +22,7 @@
#include "mesh/LogLevelsInfo.hpp"
#include "mesh/generators/VTKUtilities.hpp"
+#include "LvArray/src/genericTensorOps.hpp"
#include
#include
#include
diff --git a/src/coreComponents/mesh/mpiCommunications/CartesianPartitioner.cpp b/src/coreComponents/mesh/mpiCommunications/CartesianPartitioner.cpp
new file mode 100644
index 00000000000..bd481712de6
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/CartesianPartitioner.cpp
@@ -0,0 +1,321 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file CartesianPartitioner.cpp
+ */
+
+#include "CartesianPartitioner.hpp"
+#include "common/MpiWrapper.hpp"
+#include "codingUtilities/Utilities.hpp" // for isOdd
+#include "LvArray/src/genericTensorOps.hpp"
+#include
+
+namespace geos
+{
+using namespace dataRepository;
+
+// Modulo
+// returns a positive value regardless of the sign of numerator
+real64 Mod( real64 const num, real64 const denom )
+{
+ if( fabs( denom ) < fabs( num ) * 1.0e-14 )
+ {
+ return num;
+ }
+
+ return num - denom * std::floor( num / denom );
+}
+
+// MapValueToRange
+// returns a periodic value in the range [min, max)
+real64 MapValueToRange( real64 const value, real64 const min, real64 const max )
+{
+ return Mod( value - min, max - min ) + min;
+}
+
+// Helper class to manage the MPI_Comm lifetime
+class ScopedMpiComm
+{
+public:
+ // Constructor is implicit
+
+ // Destructor
+ ~ScopedMpiComm()
+ {
+ if( m_comm != MPI_COMM_NULL )
+ {
+ MPI_Comm_free( &m_comm );
+ }
+ }
+
+ // Allow the class to be used as an MPI_Comm handle
+ operator MPI_Comm &() { return m_comm; }
+
+private:
+ MPI_Comm m_comm = MPI_COMM_NULL;
+};
+
+CartesianPartitioner::CartesianPartitioner( string const & name,
+ Group * const parent )
+ : GeometricPartitioner( name, parent ),
+ m_periodic( m_ndim ),
+ m_coords( m_ndim ),
+ m_partitionCounts(),
+ m_localMin{ 0.0, 0.0, 0.0 },
+ m_localMax{ 0.0, 0.0, 0.0 },
+ m_localSize{ 0.0, 0.0, 0.0 },
+ m_globalGridMin{ 0.0, 0.0, 0.0 },
+ m_globalGridMax{ 0.0, 0.0, 0.0 },
+ m_globalGridSize{ 0.0, 0.0, 0.0 },
+ m_cartRank( 0 )
+{
+ registerWrapper( viewKeyStruct::partitionCountsString(), &m_partitionCounts )
+ .setInputFlag( InputFlags::OPTIONAL )
+ .setDescription( "Number of partitions in each direction [nx, ny, nz]" );
+}
+
+void CartesianPartitioner::initializeDomain( R1Tensor const & globalGridMin,
+ R1Tensor const & globalGridMax )
+{
+ ScopedMpiComm cartComm;
+ initializeCartesianCommunicator( cartComm );
+
+ // Compute neighbors from Cartesian topology
+ computeNeighborsFromTopology( cartComm );
+
+ // Compute checkerboard coloring
+ color();
+
+ // Set global and local domain values
+ setGlobalGridValues( globalGridMin, globalGridMax );
+ setLocalPartitionValues( globalGridMin );
+}
+
+void CartesianPartitioner::initializeCartesianCommunicator( MPI_Comm & cartComm )
+{
+ GEOS_ERROR_IF( m_partitionCounts.size() != 3,
+ "Partition counts were not set before initializeCartesianCommunicator." );
+
+ int const size = MpiWrapper::commSize( MPI_COMM_GEOS );
+ validatePartitionSize( size );
+
+
+ int const reorder = 0;
+ MpiWrapper::cartCreate( MPI_COMM_GEOS, m_ndim, m_partitionCounts.data(),
+ m_periodic.data(), reorder, &cartComm );
+
+ m_cartRank = MpiWrapper::commRank( cartComm );
+ MpiWrapper::cartCoords( cartComm, m_cartRank, m_ndim, m_coords.data() );
+}
+
+void CartesianPartitioner::validatePartitionSize( int const size ) const
+{
+ GEOS_ERROR_IF( m_numPartitions != size,
+ GEOS_FMT( "The total number of processes = {} does not correspond to the total number of partitions = {}.\n"
+ "The number of cells in an axis cannot be lower than the partition count of this axis",
+ size, m_numPartitions ) );
+}
+
+void CartesianPartitioner::color()
+{
+ // Specialized checkerboard coloring for Cartesian grid
+ // Much more efficient than generic graph coloring!
+
+ int color = 0;
+ if( isOdd( m_coords[0] ) )
+ color += 1;
+ if( isOdd( m_coords[1] ) )
+ color += 2;
+ if( isOdd( m_coords[2] ) )
+ color += 4;
+
+ // This color numbering may have gaps, so number of colors is just an upper bound
+ m_numColors = MpiWrapper::max( color, MPI_COMM_GEOS ) + 1;
+ m_color = color;
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "CartesianPartitioner: Checkerboard coloring complete. "
+ "Using {} colors (upper bound)", m_numColors ) );
+}
+
+void CartesianPartitioner::processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL )
+{
+ int const mpiSize = MpiWrapper::commSize( MPI_COMM_GEOS );
+
+ // Case 1: User provided command-line overrides
+ if( xparCL != 0 || yparCL != 0 || zparCL != 0 )
+ {
+ integer const xpar = (xparCL == 0) ? 1 : xparCL;
+ integer const ypar = (yparCL == 0) ? 1 : yparCL;
+ integer const zpar = (zparCL == 0) ? 1 : zparCL;
+
+ integer const totalPartitions = xpar * ypar * zpar;
+
+ // Validate that partition counts match MPI size
+ GEOS_THROW_IF( totalPartitions != mpiSize,
+ GEOS_FMT( "Partition count mismatch: -x {} -y {} -z {} = {} total partitions, "
+ "but running with {} MPI ranks. Product must equal MPI ranks.",
+ xpar, ypar, zpar, totalPartitions, mpiSize ), InputError );
+
+ setPartitionCounts( xpar, ypar, zpar );
+ }
+ // Case 2: No command-line overrides - check if XML values are set
+ else if( m_partitionCounts.empty() )
+ {
+ GEOS_LOG_RANK_0( "CartesianPartitioner: No partition counts specified. "
+ "Using default 1D decomposition along z-axis." );
+ setPartitionCounts( 1, 1, mpiSize );
+ }
+ // Case 3: XML values already set, validate them
+ else
+ {
+ integer const totalPartitions = m_partitionCounts[0] * m_partitionCounts[1] * m_partitionCounts[2];
+ GEOS_THROW_IF( totalPartitions != mpiSize,
+ GEOS_FMT( "Partition count mismatch: XML specifies {} x {} x {} = {} total partitions, "
+ "but running with {} MPI ranks. Product must equal MPI ranks.",
+ m_partitionCounts[0], m_partitionCounts[1], m_partitionCounts[2],
+ totalPartitions, mpiSize ), InputError );
+ }
+}
+
+void CartesianPartitioner::setPartitionCounts( unsigned int const xPartitions,
+ unsigned int const yPartitions,
+ unsigned int const zPartitions )
+{
+ m_partitionCounts.resize( 3 );
+ m_partitionCounts[0] = xPartitions;
+ m_partitionCounts[1] = yPartitions;
+ m_partitionCounts[2] = zPartitions;
+
+ m_numPartitions = xPartitions * yPartitions * zPartitions;
+}
+
+void CartesianPartitioner::addNeighbors( unsigned int const idim,
+ int * ncoords,
+ MPI_Comm const cartComm )
+{
+ if( idim == m_ndim )
+ {
+ bool me = true;
+ for( int i = 0; i < m_ndim; i++ )
+ {
+ if( ncoords[i] != this->m_coords( i ) )
+ {
+ me = false;
+ break;
+ }
+ }
+ if( !me )
+ {
+ int const rank = MpiWrapper::cartRank( cartComm, ncoords );
+
+ // Prevent duplication
+ if( std::find( m_neighborsRank.begin(), m_neighborsRank.end(), rank )
+ == m_neighborsRank.end() )
+ {
+ m_neighborsRank.push_back( rank );
+ }
+ }
+ }
+ else
+ {
+ int const dim = this->m_partitionCounts( LvArray::integerConversion< localIndex >( idim ) );
+ bool const periodic = this->m_periodic( LvArray::integerConversion< localIndex >( idim ) );
+ for( int i = -1; i < 2; i++ )
+ {
+ ncoords[idim] = this->m_coords( LvArray::integerConversion< localIndex >( idim ) ) + i;
+ bool ok = true;
+ if( periodic )
+ {
+ if( ncoords[idim] < 0 )
+ ncoords[idim] = dim - 1;
+ else if( ncoords[idim] >= dim )
+ ncoords[idim] = 0;
+ }
+ else
+ {
+ ok = ncoords[idim] >= 0 && ncoords[idim] < dim;
+ }
+ if( ok )
+ {
+ addNeighbors( idim + 1, ncoords, cartComm );
+ }
+ }
+ }
+}
+
+void CartesianPartitioner::computeNeighborsFromTopology( MPI_Comm & cartComm )
+{
+ m_neighborsRank.clear();
+
+ int ncoords[3];
+
+ // Recursively find all neighbors in 3×3×3 stencil
+ addNeighbors( 0, ncoords, cartComm );
+}
+
+void CartesianPartitioner::setGlobalGridValues( R1Tensor const & gridMin,
+ R1Tensor const & gridMax )
+{
+ LvArray::tensorOps::copy< 3 >( m_globalGridMin, gridMin );
+ LvArray::tensorOps::copy< 3 >( m_globalGridMax, gridMax );
+ LvArray::tensorOps::copy< 3 >( m_globalGridSize, gridMax );
+ LvArray::tensorOps::subtract< 3 >( m_globalGridSize, gridMin );
+}
+
+void CartesianPartitioner::setLocalPartitionValues( R1Tensor const & gridMin )
+{
+ LvArray::tensorOps::copy< 3 >( m_localSize, m_globalGridSize );
+ LvArray::tensorOps::copy< 3 >( m_localMin, gridMin );
+ for( int i = 0; i < m_ndim; ++i )
+ {
+ m_localSize[i] /= m_partitionCounts( i );
+ m_localMin[i] += m_coords( i ) * m_localSize[i];
+ m_localMax[i] = gridMin[i] + (m_coords( i ) + 1) * m_localSize[i];
+ }
+}
+
+bool CartesianPartitioner::isCoordInPartition( R1Tensor const & coords ) const
+{
+ return isCoordInPartition( coords[0], 0 ) &&
+ isCoordInPartition( coords[1], 1 ) &&
+ isCoordInPartition( coords[2], 2 );
+}
+
+bool CartesianPartitioner::isCoordInPartition( real64 const & coord, int const dir ) const
+{
+ bool rval = true;
+ if( m_periodic( dir ) )
+ {
+ if( m_partitionCounts( dir ) != 1 )
+ {
+ real64 const localCenter = MapValueToRange( coord, m_globalGridMin[dir], m_globalGridMax[dir] );
+ rval = rval && localCenter >= m_localMin[dir] && localCenter < m_localMax[dir];
+ }
+ }
+ else
+ {
+ rval = rval && (m_partitionCounts[dir] == 1 || (coord >= m_localMin[dir] && coord < m_localMax[dir]));
+ }
+
+ return rval;
+}
+
+// Register in DomainPartitioner catalog
+REGISTER_CATALOG_ENTRY( DomainPartitioner, CartesianPartitioner, string const &, Group * const )
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/CartesianPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/CartesianPartitioner.hpp
new file mode 100644
index 00000000000..0f03045b914
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/CartesianPartitioner.hpp
@@ -0,0 +1,330 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file CartesianPartitioner.hpp
+ */
+
+#ifndef GEOS_MESH_MPICOMMUNICATIONS_CARTESIANPARTITIONER_HPP_
+#define GEOS_MESH_MPICOMMUNICATIONS_CARTESIANPARTITIONER_HPP_
+
+#include "GeometricPartitioner.hpp"
+
+namespace geos
+{
+
+/**
+ * @class CartesianPartitioner
+ * @brief Cartesian (regular grid) domain decomposition
+ *
+ * CartesianPartitioner divides a rectangular domain into a regular X × Y × Z
+ * grid of subdomains. Each MPI rank is assigned one subdomain.
+ *
+ * Features:
+ * - Checkerboard coloring (8 colors maximum)
+ * - Support for periodic boundaries in any direction
+ * - 26-connected neighbor topology (includes faces, edges, and corners)
+ *
+ */
+class CartesianPartitioner : public GeometricPartitioner
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this partitioner instance
+ * @param parent The parent group
+ */
+ explicit CartesianPartitioner( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~CartesianPartitioner() override = default;
+
+ /**
+ * @brief Catalog name for factory registration
+ * @return The catalog key
+ */
+ static string catalogName() { return "CartesianPartitioner"; }
+
+ /**
+ * @brief Initialize the Cartesian grid partitioner
+ *
+ * Implementation of GeometricPartitioner::initializeDomain()
+ *
+ * Steps:
+ * 1. Create MPI Cartesian communicator
+ * 2. Compute local subdomain bounds
+ * 3. Determine neighbor ranks from grid topology
+ * 4. Compute checkerboard coloring
+ *
+ * @param globalMin Global domain minimum (x, y, z)
+ * @param globalMax Global domain maximum (x, y, z)
+ *
+ * @post m_neighborsRank is populated
+ * @post m_color and m_numColors are set
+ * @post Local bounds (m_localMin, m_localMax) are set
+ */
+ void initializeDomain( R1Tensor const & globalMin,
+ R1Tensor const & globalMax ) override;
+
+ /**
+ * @brief Check if coordinate is in local Cartesian subdomain
+ *
+ * Handles periodic boundaries by mapping coordinates into
+ * the periodic range before checking bounds.
+ *
+ * @param coords Spatial coordinates (x, y, z) to test
+ * @return true if coordinate is in local partition
+ *
+ * @pre initializeDomain() must have been called
+ */
+ bool isCoordInPartition( R1Tensor const & coords ) const override;
+
+ /**
+ * @brief Check if coordinate is in partition along one axis
+ *
+ * Helper for isCoordInLocalPartition().
+ * Handles periodic boundaries.
+ *
+ * @param coord Coordinate value to test
+ * @param dir Direction (0=x, 1=y, 2=z)
+ * @return true if coordinate is in range for this direction
+ */
+ bool isCoordInPartition( real64 const & coord, int const dir ) const;
+
+
+ /**
+ * @brief Process command-line partition overrides (-x, -y, -z flags)
+ *
+ * Priority:
+ * 1. Command-line flags (if provided)
+ * 2. XML partitionCounts (if specified)
+ * 3. Default: 1 × 1 × N (1D decomposition along z-axis)
+ *
+ * @param xparCL X-direction partition count (0 = not specified)
+ * @param yparCL Y-direction partition count (0 = not specified)
+ * @param zparCL Z-direction partition count (0 = not specified)
+ *
+ * @post m_partitionCounts is set
+ * @post m_numPartitions is set
+ */
+ void processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL ) override;
+
+ /**
+ * @brief Set partition counts in each direction
+ *
+ * @param xPartitions Number of partitions in X direction
+ * @param yPartitions Number of partitions in Y direction
+ * @param zPartitions Number of partitions in Z direction
+ *
+ * @post m_partitionCounts = {xPartitions, yPartitions, zPartitions}
+ * @post m_numPartitions = xPartitions * yPartitions * zPartitions
+ */
+ void setPartitionCounts( unsigned int const xPartitions,
+ unsigned int const yPartitions,
+ unsigned int const zPartitions ) override;
+
+ /**
+ * @brief Get partition counts in all directions
+ * @return Array of partition counts {nx, ny, nz}
+ */
+ array1d< int > const & getPartitionCounts() const { return m_partitionCounts; }
+
+ /**
+ * @brief Set periodicity for specific directions
+ *
+ * Used by mesh generators to enable
+ * periodic boundaries in specific directions.
+ *
+ * @param dim Direction (0=x, 1=y, 2=z)
+ * @param periodic Periodicity flag (1=periodic, 0=not periodic)
+ */
+ void setPeriodicity( int const dim, int const periodic )
+ {
+ GEOS_ERROR_IF( dim < 0 || dim >= m_ndim,
+ "Invalid dimension " << dim << ". Must be 0, 1, or 2." );
+ m_periodic[dim] = periodic;
+ }
+
+ /**
+ * @brief Get periodicity flags
+ * @return Array of periodicity flags {px, py, pz} where 1 = periodic, 0 = not periodic
+ */
+ array1d< int > const & getPeriodicity() const { return m_periodic; }
+
+ /**
+ * @brief Compute checkerboard coloring
+ *
+ * @post m_color is set (0-7)
+ * @post m_numColors is set (≤ 8)
+ */
+ void color() override;
+
+ /**
+ * @brief Get X-direction partition count
+ * @return Number of partitions in X direction
+ */
+ unsigned int getXPartitions() const { return m_partitionCounts[0]; }
+
+ /**
+ * @brief Get Y-direction partition count
+ * @return Number of partitions in Y direction
+ */
+ unsigned int getYPartitions() const { return m_partitionCounts[1]; }
+
+ /**
+ * @brief Get Z-direction partition count
+ * @return Number of partitions in Z direction
+ */
+ unsigned int getZPartitions() const { return m_partitionCounts[2]; }
+
+ real64 const * getLocalMin() const { return m_localMin;}
+
+ real64 const * getLocalMax() const { return m_localMax; }
+
+ real64 const * getGlobalMin() const { return m_globalGridMin; }
+
+ real64 const * getGlobalMax() const { return m_globalGridMax; }
+
+
+ /**
+ * @brief Struct to serve as a container for variable strings and keys.
+ * @struct viewKeyStruct
+ */
+ struct viewKeyStruct : GeometricPartitioner::viewKeyStruct
+ {
+ /// String key for partition counts
+ static constexpr char const * partitionCountsString() { return "partitionCounts"; }
+ };
+
+protected:
+
+ /**
+ * @brief Compute neighbors from Cartesian grid topology
+ *
+ * Finds all neighbors in a 3×3×3 stencil (26-connected):
+ * - 6 face neighbors
+ * - 12 edge neighbors
+ * - 8 corner neighbors
+ *
+ * Handles periodic boundaries.
+ *
+ * @param[in] comm Cartesian communicator
+ *
+ * @post m_neighborsRank is populated
+ */
+ void computeNeighborsFromTopology( MPI_Comm & comm ) override;
+
+
+private:
+
+ /**
+ * @brief Initialize MPI Cartesian communicator
+ *
+ * Creates an MPI_Cart communicator with the specified topology.
+ * Sets m_cartRank and m_coords based on rank position in grid.
+ *
+ * @param[out] cartComm The created Cartesian communicator
+ *
+ * @pre m_partitionCounts must be set
+ * @post m_cartRank is set
+ * @post m_coords is set
+ */
+ void initializeCartesianCommunicator( MPI_Comm & cartComm );
+
+ /**
+ * @brief Recursively add all neighbors in 3×3×3 stencil
+ *
+ * @param idim Current dimension being explored (0, 1, or 2)
+ * @param ncoords Neighbor coordinates being constructed
+ * @param cartComm Cartesian communicator
+ */
+ void addNeighbors( unsigned int const idim, int * ncoords, MPI_Comm const cartComm );
+
+ /**
+ * @brief Set global domain values
+ *
+ * Stores global bounding box and computes global domain size.
+ *
+ * @param gridMin Global domain minimum
+ * @param gridMax Global domain maximum
+ *
+ * @post m_globalGridMin, m_globalGridMax, m_globalGridSize are set
+ */
+ void setGlobalGridValues( R1Tensor const & gridMin, R1Tensor const & gridMax );
+
+ /**
+ * @brief Compute local subdomain bounds
+ *
+ * Divides global domain equally among partitions and computes
+ * this rank's portion.
+ *
+ * @param gridMin Global domain minimum
+ *
+ * @post m_localMin, m_localMax, m_localSize are set
+ */
+ void setLocalPartitionValues( R1Tensor const & gridMin );
+
+ /**
+ * @brief Validate that partition count matches MPI size
+ *
+ * @param size MPI communicator size
+ * @throw std::runtime_error if size != m_numPartitions
+ */
+ void validatePartitionSize( int const size ) const;
+
+protected:
+ /// Number of spatial dimensions (always 3)
+ static constexpr int m_ndim = 3;
+
+ /// Periodic boundary flags for each direction {x, y, z}
+ array1d< int > m_periodic;
+
+ /// Cartesian grid coordinates of this rank {i, j, k}
+ array1d< int > m_coords;
+
+ /// Number of partitions in each direction {nx, ny, nz}
+ array1d< int > m_partitionCounts;
+
+ /// Local subdomain minimum (x, y, z)
+ real64 m_localMin[3];
+
+ /// Local subdomain maximum (x, y, z)
+ real64 m_localMax[3];
+
+ /// Local subdomain size (x, y, z)
+ real64 m_localSize[3];
+
+ /// Global domain minimum (x, y, z)
+ real64 m_globalGridMin[3];
+
+ /// Global domain maximum (x, y, z)
+ real64 m_globalGridMax[3];
+
+ /// Global domain size (x, y, z)
+ real64 m_globalGridSize[3];
+
+ /// Rank in Cartesian communicator
+ int m_cartRank;
+};
+
+} // namespace geos
+
+#endif // GEOS_MESH_MPICOMMUNICATIONS_CARTESIANPARTITIONER_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/CellGraphPartitioner.cpp b/src/coreComponents/mesh/mpiCommunications/CellGraphPartitioner.cpp
new file mode 100644
index 00000000000..1486beab99a
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/CellGraphPartitioner.cpp
@@ -0,0 +1,42 @@
+#include "CellGraphPartitioner.hpp"
+#include "mesh/generators/VTKUtilities.hpp"
+#include "GraphPartitionEngine.hpp"
+
+namespace geos
+{
+using namespace dataRepository;
+
+CellGraphPartitioner::CellGraphPartitioner( string const & name, Group * const parent )
+ : MeshPartitioner( name, parent )
+{}
+
+CellGraphPartitioner::~CellGraphPartitioner() = default;
+
+array1d< int64_t > CellGraphPartitioner::computePartitioning( vtk::AllMeshes & mesh,
+ MPI_Comm const comm )
+{
+ GEOS_MARK_FUNCTION;
+
+ pmet_idx_t const numParts = MpiWrapper::commSize( comm );
+ return vtk::partitionByCellGraph( mesh, getEngine(), comm, numParts );
+}
+
+string CellGraphPartitioner::getInfoString() const
+{
+ string const engineName = getEngine().getCatalogName();
+
+ return GEOS_FMT( "{} '{}': engine={}, refinements={}, numPartitions={}, numNeighbors={}, numColors={}, color={}",
+ catalogName(),
+ getName(),
+ engineName,
+ m_numRefinements,
+ m_numPartitions,
+ m_neighborsRank.size(),
+ m_numColors,
+ m_color );
+}
+
+// Register in DomainPartitioner catalog
+REGISTER_CATALOG_ENTRY( DomainPartitioner, CellGraphPartitioner, string const &, Group * const )
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/CellGraphPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/CellGraphPartitioner.hpp
new file mode 100644
index 00000000000..ca7b0c4f326
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/CellGraphPartitioner.hpp
@@ -0,0 +1,63 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file CellGraphPartitioner.hpp
+ */
+
+#ifndef GEOS_MESH_MPICOMMUNICATIONS_CELLGRAPHPARTITIONER_HPP
+#define GEOS_MESH_MPICOMMUNICATIONS_CELLGRAPHPARTITIONER_HPP
+
+#include "MeshPartitioner.hpp"
+
+namespace geos
+{
+
+/**
+ * @class CellGraphPartitioner
+ * @brief Partition mesh using cell dual graph
+ *
+ * Uses pure graph-based partitioning:
+ * 1. Build dual graph (cells connected if they share face)
+ * 2. Partition graph using ParMETIS/PTScotch
+ */
+class CellGraphPartitioner : public MeshPartitioner
+{
+public:
+
+ CellGraphPartitioner( string const & name, Group * const parent );
+
+ virtual ~CellGraphPartitioner() override;
+
+ static string catalogName() { return "CellGraphPartitioner"; }
+
+ virtual string getInfoString() const override;
+
+protected:
+
+ /**
+ * @brief Compute partition assignment using cell graph
+ *
+ * @param mesh Input mesh
+ * @param comm MPI communicator
+ * @return Partition assignment for each element (3D + 2D)
+ */
+ virtual array1d< int64_t > computePartitioning( vtk::AllMeshes & mesh,
+ MPI_Comm const comm ) override;
+};
+
+} // namespace geos
+
+#endif
diff --git a/src/coreComponents/mesh/mpiCommunications/DomainPartitioner.cpp b/src/coreComponents/mesh/mpiCommunications/DomainPartitioner.cpp
new file mode 100644
index 00000000000..06ebf640e80
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/DomainPartitioner.cpp
@@ -0,0 +1,86 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file DomainPartitioner.cpp
+ */
+
+#include "DomainPartitioner.hpp"
+
+namespace geos
+{
+using namespace dataRepository;
+
+DomainPartitioner::DomainPartitioner( string const & name,
+ Group * const parent )
+ : Group( name, parent ),
+ m_neighborsRank(),
+ m_numPartitions( 1 ),
+ m_numColors( 1 ),
+ m_color( 0 )
+{
+ {
+ setInputFlags( InputFlags::OPTIONAL_NONUNIQUE ); // needed for schema generation
+ setRestartFlags( RestartFlags::NO_WRITE ); // partitioners are configuration-only, not simulation state
+ }
+
+}
+
+DomainPartitioner::~DomainPartitioner()
+{}
+
+void DomainPartitioner::postInputInitialization()
+{
+ Group::postInputInitialization();
+}
+
+void DomainPartitioner::setPartitionCounts( unsigned int const xPartitions,
+ unsigned int const yPartitions,
+ unsigned int const zPartitions )
+{
+ m_numPartitions = xPartitions * yPartitions * zPartitions;
+
+ GEOS_ERROR_IF( m_numPartitions < 1,
+ "Total partition count must be at least 1" );
+}
+
+void DomainPartitioner::setNeighborsRank( std::vector< int > const & neighborsRank )
+{
+ m_neighborsRank = neighborsRank;
+}
+
+DomainPartitioner::CatalogInterface::CatalogType &
+DomainPartitioner::getCatalog()
+{
+ static DomainPartitioner::CatalogInterface::CatalogType catalog;
+ return catalog;
+}
+
+void DomainPartitioner::printInfo() const
+{
+ GEOS_LOG_RANK_0( getInfoString() );
+}
+
+string DomainPartitioner::getInfoString() const
+{
+ return GEOS_FMT( "{} '{}': partitions={}, neighbors={}, colors={}",
+ getCatalogName(),
+ getName(),
+ m_numPartitions,
+ m_neighborsRank.size(),
+ m_numColors );
+}
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/DomainPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/DomainPartitioner.hpp
new file mode 100644
index 00000000000..ef1d9fbf469
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/DomainPartitioner.hpp
@@ -0,0 +1,202 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file DomainPartitioner.hpp
+ */
+
+#ifndef GEOS_MESH_MPICOMMUNICATIONS_DOMAINPARTITIONER_HPP_
+#define GEOS_MESH_MPICOMMUNICATIONS_DOMAINPARTITIONER_HPP_
+
+#include "dataRepository/Group.hpp"
+#include "common/DataTypes.hpp"
+
+namespace geos
+{
+
+/**
+ * @class DomainPartitioner
+ * @brief Abstract base class for all domain decomposition strategies
+ *
+ * ALL domain partitioners must:
+ * - Compute MPI neighbor ranks
+ * - Assign communication colors
+ * - Support partition count configuration
+ *
+ * Concrete implementations:
+ * - MeshPartitioner: Partitioners that operate on loaded meshes (uses GraphPartitionEngine)
+ * - CellGraphPartitioner: Standard cell-based dual graph partitioning
+ * - LayeredGraphPartitioner: Structured/layered mesh partitioning
+ * - GeometricPartitioner: Partitioners based on spatial coordinates
+ * - CartesianPartitioner: Cartesian domain decomposition
+ * - ParticleCartesianPartitioner: Cartesian for particle meshes
+ */
+class DomainPartitioner : public dataRepository::Group
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this partitioner instance
+ * @param parent The parent group
+ */
+ explicit DomainPartitioner( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~DomainPartitioner() override;
+
+
+ /**
+ * @brief Get the catalog name for this partitioner type
+ *
+ */
+ string getCatalogName() const
+ {
+ // Extract type name from the data context
+ return getDataContext().toString();
+ }
+
+ /**
+ * @brief Post-input initialization
+ * @note Made public to allow initialization of dynamically created default partitioners
+ */
+ virtual void postInputInitialization() override;
+
+ /**
+ * @brief Process command-line partition count overrides (-x, -y, -z flags)
+ *
+ * This is called after XML parsing to allow command-line overrides.
+ *
+ * @param xparCL X-direction partition count from command line (0 = no override)
+ * @param yparCL Y-direction partition count from command line (0 = no override)
+ * @param zparCL Z-direction partition count from command line (0 = no override)
+ */
+ virtual void processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL ) = 0;
+
+ /**
+ * @brief Set target partition counts in each logical dimension
+ *
+ * @param xPartitions Number of partitions in X direction
+ * @param yPartitions Number of partitions in Y direction
+ * @param zPartitions Number of partitions in Z direction
+ *
+ * @post m_numPartitions = xPartitions * yPartitions * zPartitions
+ */
+ virtual void setPartitionCounts( unsigned int const xPartitions,
+ unsigned int const yPartitions,
+ unsigned int const zPartitions );
+
+ /**
+ * @brief Set the list of neighboring MPI ranks
+ *
+ * This is typically called internally by the partitioner after domain decomposition.
+ *
+ * @param neighborsRank Vector of neighbor rank IDs
+ */
+ void setNeighborsRank( std::vector< int > const & neighborsRank );
+
+ /**
+ * @brief Get the list of neighboring MPI ranks
+ *
+ * This is called by DomainPartition::setupBaseLevelMeshGlobalInfo() to configure
+ * ghost communication.
+ *
+ * @return Vector of neighbor rank IDs
+ *
+ * @pre Partitioner must be fully initialized (neighbors computed)
+ */
+ std::vector< int > const & getNeighborsRank() const { return m_neighborsRank; }
+
+ /**
+ * @brief Get the number of neighboring ranks
+ * @return Number of neighbors
+ */
+ int getNumNeighbors() const { return static_cast< int >( m_neighborsRank.size() ); }
+
+ /**
+ * @brief Compute graph coloring for communication scheduling
+ *
+ * Colors are assigned such that no two neighboring ranks have the same color.
+ * This enables non-blocking communication patterns.
+ *
+ * @post m_color and m_numColors are set
+ *
+ * @note Must be called after neighbors are determined
+ */
+ virtual void color() = 0;
+
+ /**
+ * @brief Get the color assigned to this rank
+ * @return Color ID [0, numColors)
+ */
+ int getColor() const { return m_color; }
+
+ /**
+ * @brief Get the total number of colors used
+ * @return Number of colors
+ */
+ int getNumColors() const { return m_numColors; }
+
+ /**
+ * @brief Get the total number of partitions
+ * @return Total partition count
+ */
+ int getNumPartitions() const { return m_numPartitions; }
+
+
+ /**
+ * @brief Print information about this partitioner to log
+ */
+ virtual void printInfo() const;
+
+ /**
+ * @brief Get a descriptive string about this partitioner
+ */
+ virtual string getInfoString() const;
+
+ /**
+ * @brief Accessor for the singleton Catalog object
+ *
+ * @return Reference to the Catalog object
+ */
+ using CatalogInterface = dataRepository::CatalogInterface< DomainPartitioner,
+ string const &,
+ dataRepository::Group * const >;
+
+ static CatalogInterface::CatalogType & getCatalog();
+
+protected:
+
+ /// MPI neighbor ranks (set after domain decomposition)
+ std::vector< int > m_neighborsRank;
+
+ /// Total number of partitions
+ int m_numPartitions;
+
+ /// Total number of colors used for communication scheduling
+ int m_numColors;
+
+ /// Color assigned to this rank [0, numColors)
+ int m_color;
+};
+
+} // namespace geos
+
+#endif // GEOS_MESH_MPICOMMUNICATIONS_DOMAINPARTITIONER_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/PartitionBase.cpp b/src/coreComponents/mesh/mpiCommunications/GeometricPartitioner.cpp
similarity index 70%
rename from src/coreComponents/mesh/mpiCommunications/PartitionBase.cpp
rename to src/coreComponents/mesh/mpiCommunications/GeometricPartitioner.cpp
index a1175c74b62..24e8cf669a9 100644
--- a/src/coreComponents/mesh/mpiCommunications/PartitionBase.cpp
+++ b/src/coreComponents/mesh/mpiCommunications/GeometricPartitioner.cpp
@@ -13,20 +13,21 @@
* ------------------------------------------------------------------------------------------------------------
*/
-#include "PartitionBase.hpp"
+/**
+ * @file GeometricPartitioner.cpp
+ */
+
+#include "GeometricPartitioner.hpp"
namespace geos
{
+using namespace dataRepository;
-PartitionBase::PartitionBase( const unsigned int numPartitions,
- const unsigned int thisPartition )
- :
- m_size( numPartitions ),
- m_rank( thisPartition ),
- m_numColors( 1 )
+GeometricPartitioner::GeometricPartitioner( string const & name, Group * const parent )
+ : DomainPartitioner( name, parent )
{}
-PartitionBase::~PartitionBase()
+GeometricPartitioner::~GeometricPartitioner()
{}
-}
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/GeometricPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/GeometricPartitioner.hpp
new file mode 100644
index 00000000000..bf8224044f6
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/GeometricPartitioner.hpp
@@ -0,0 +1,110 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file GeometricPartitioner.hpp
+ * @brief Base class for geometric (coordinate-based) domain partitioners
+ */
+
+#ifndef GEOS_MESH_MPICOMMUNICATIONS_GEOMETRICPARTITIONER_HPP_
+#define GEOS_MESH_MPICOMMUNICATIONS_GEOMETRICPARTITIONER_HPP_
+
+#include "DomainPartitioner.hpp"
+
+namespace geos
+{
+
+/**
+ * @class GeometricPartitioner
+ * @brief Base class for partitioners that use geometric decomposition
+ *
+ * Geometric partitioners decompose the domain based on spatial coordinates rather than
+ * mesh connectivity:
+ * - Don't require loaded meshes
+ * - Don't use graph partitioning algorithms
+ * - Compute neighbors from geometric topology
+ * - Can be used before mesh generation
+ * - Provide spatial queries (isCoordInPartition)
+ *
+ * Concrete implementations:
+ * - CartesianPartitioner: Regular X × Y × Z grid decomposition
+ *
+ * Workflow:
+ * 1. Create partitioner from XML
+ * 2. Call initializeDomain() with global bounding box
+ * 3. Partitioner computes local subdomain and neighbors
+ * 4. Use isCoordInPartition() to query point ownership
+ *
+ */
+class GeometricPartitioner : public DomainPartitioner
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this partitioner instance
+ * @param parent The parent group
+ */
+ explicit GeometricPartitioner( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~GeometricPartitioner() override;
+
+ /**
+ * @brief Initialize the partitioner with the global domain's bounding box
+ *
+ * This method:
+ * 1. Computes local subdomain bounds for this rank
+ * 2. Determines neighbor ranks from geometric topology
+ * 3. Computes communication coloring
+ *
+ * @param globalMin The minimum coordinates (x, y, z) of the global domain
+ * @param globalMax The maximum coordinates (x, y, z) of the global domain
+ *
+ * @post m_neighborsRank is set
+ * @post m_color and m_numColors are set
+ * @post Local subdomain bounds are computed
+ *
+ */
+ virtual void initializeDomain( R1Tensor const & globalMin,
+ R1Tensor const & globalMax ) = 0;
+
+ /**
+ * @brief Check if a coordinate falls within this rank's local partition
+ *
+ * @param coords The spatial coordinates (x, y, z) to test
+ * @return true if the coordinate is within the local partition's boundaries
+ *
+ * @pre initializeDomain() must have been called
+ */
+ virtual bool isCoordInPartition( R1Tensor const & coords ) const = 0;
+
+protected:
+
+ /**
+ * @brief Compute neighbors from geometric topology
+ *
+ * @post m_neighborsRank is populated
+ *
+ */
+ virtual void computeNeighborsFromTopology( MPI_Comm & comm ) = 0;
+};
+
+} // namespace geos
+
+#endif // GEOS_MESH_MPICOMMUNICATIONS_GEOMETRICPARTITIONER_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/GraphPartitionEngine.cpp b/src/coreComponents/mesh/mpiCommunications/GraphPartitionEngine.cpp
new file mode 100644
index 00000000000..c429eb113e4
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/GraphPartitionEngine.cpp
@@ -0,0 +1,54 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file GraphPartitionEngine.cpp
+ */
+
+#include "GraphPartitionEngine.hpp"
+#ifdef GEOS_USE_PARMETIS
+ #include "ParMetisEngine.hpp"
+#endif
+namespace geos
+{
+using namespace dataRepository;
+
+GraphPartitionEngine::GraphPartitionEngine( string const & name,
+ dataRepository::Group * const parent )
+ : Group( name, parent )
+{
+ // No wrappers to register
+ // Only set input flags if we have a parent (i.e., when used in production code)
+ // For unit tests with nullptr parent, skip this
+ if( parent != nullptr )
+ {
+ setInputFlags( dataRepository::InputFlags::FALSE );
+ setRestartFlags( RestartFlags::NO_WRITE );
+ }
+}
+
+GraphPartitionEngine::~GraphPartitionEngine()
+{
+ // Default destructor
+}
+
+GraphPartitionEngine::CatalogInterface::CatalogType &
+GraphPartitionEngine::getCatalog()
+{
+ static GraphPartitionEngine::CatalogInterface::CatalogType catalog;
+ return catalog;
+}
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/GraphPartitionEngine.hpp b/src/coreComponents/mesh/mpiCommunications/GraphPartitionEngine.hpp
new file mode 100644
index 00000000000..b49e241c260
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/GraphPartitionEngine.hpp
@@ -0,0 +1,147 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file GraphPartitionEngine.hpp
+ */
+
+#ifndef GEOS_PARTITIONER_GRAPHPARTITIONENGINE_HPP_
+#define GEOS_PARTITIONER_GRAPHPARTITIONENGINE_HPP_
+
+#include "dataRepository/Group.hpp"
+#include "common/DataTypes.hpp"
+#include "LvArray/src/ArrayOfArrays.hpp"
+
+namespace geos
+{
+
+#if defined(GEOS_USE_HIP) // still need int32 hypre for the current hip-capable build
+/// Typedef to allow us to specify required parmetis/scotch integer type.
+using pmet_idx_t = int32_t;
+#else
+/// Typedef to allow us to specify required parmetis/scotch integer type.
+using pmet_idx_t = int64_t;
+#endif
+
+/**
+ * @class GraphPartitionEngine
+ * @brief Abstract interface for low-level graph partitioning algorithms
+ *
+ * This is a LOW-LEVEL engine for pure algorithms: graph -> partition IDs
+ */
+class GraphPartitionEngine : public dataRepository::Group
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this engine instance
+ * @param parent The parent group
+ */
+ explicit GraphPartitionEngine( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~GraphPartitionEngine() override;
+
+ /**
+ * @brief Partition a distributed graph
+ *
+ * @param graph Edge connectivity (locally owned vertices).
+ * graph[i] contains the global indices of neighbors of local vertex i.
+ * @param vertDist Vertex distribution across ranks.
+ * vertDist[rank] = global index of first vertex on rank.
+ * Length: commSize + 1
+ * @param numParts Target number of partitions (typically = commSize)
+ * @param comm MPI communicator
+ * @return Array mapping each local vertex to its target partition ID [0, numParts)
+ *
+ * @pre graph.size() == vertDist[myRank+1] - vertDist[myRank]
+ * @post return.size() == graph.size()
+ * @post All return values in [0, numParts)
+ */
+ virtual array1d< pmet_idx_t > partition(
+ ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t numParts,
+ MPI_Comm comm ) = 0;
+
+ /**
+ * @brief Convert element-node connectivity to element-element dual graph
+ *
+ * Given a mesh represented by element-node connectivity, construct the dual graph
+ * where:
+ * - Vertices = elements
+ * - Edges = element pairs that share at least minCommonNodes nodes
+ * Each engine must provide an implementation (typically using ParMETIS as fallback).
+ *
+ * @param elemToNodes Element-to-node connectivity map.
+ * elemToNodes[i] = global node IDs of element i.
+ * @param elemDist Element distribution across ranks.
+ * elemDist[rank] = global index of first element on rank.
+ * @param comm MPI communicator
+ * @param minCommonNodes Minimum number of shared nodes to create an edge (typically 3 for 3D)
+ * @return Dual graph where vertices=elements, edges=element adjacency
+ */
+ virtual ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int minCommonNodes ) = 0;
+
+ /**
+ * @brief Get the number of refinement iterations
+ *
+ * @return Number of refinement iterations
+ */
+ virtual int getNumRefinements() const = 0;
+
+ /**
+ * @brief Set the number of refinement iterations
+ *
+ * @param numRefinements Number of refinement iterations
+ */
+ virtual void setNumRefinements( int const numRefinements ) = 0;
+
+ /**
+ * @brief Accessor for the singleton Catalog object
+ *
+ * @note This is an INTERNAL catalog, separate from the DomainPartitioner catalog.
+ *
+ * @return A reference to the Catalog object
+ */
+ using CatalogInterface = dataRepository::CatalogInterface< GraphPartitionEngine,
+ string const &,
+ dataRepository::Group * const >;
+
+ static CatalogInterface::CatalogType & getCatalog();
+
+ /**
+ * @brief Get the catalog name for this partitioner type
+ *
+ */
+ string getCatalogName() const
+ {
+ // Extract type name from the data context
+ return getDataContext().toString();
+ }
+
+};
+
+} // namespace geos
+
+#endif // GEOS_PARTITIONER_GRAPHPARTITIONENGINE_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/LayeredMeshPartitioner.cpp b/src/coreComponents/mesh/mpiCommunications/LayeredMeshPartitioner.cpp
new file mode 100644
index 00000000000..64edf50721a
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/LayeredMeshPartitioner.cpp
@@ -0,0 +1,120 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+ #include "LayeredMeshPartitioner.hpp"
+#include "mesh/generators/VTKUtilities.hpp"
+
+namespace geos
+{
+using namespace dataRepository;
+
+LayeredMeshPartitioner::LayeredMeshPartitioner( string const & name, Group * const parent )
+ : MeshPartitioner( name, parent ),
+ m_indexArrayName( "" ),
+ m_numPartZ( 1 )
+{
+ // Only register parameters specific to LayeredMeshPartitioner
+ registerWrapper( viewKeyStruct::indexArrayNameString(), &m_indexArrayName ).
+ setInputFlag( dataRepository::InputFlags::REQUIRED ).
+ setDescription( "Name of VTK cell data array containing [area, layer] indices" );
+
+ registerWrapper( viewKeyStruct::numPartZString(), &m_numPartZ ).
+ setApplyDefaultValue( 1 ).
+ setInputFlag( dataRepository::InputFlags::OPTIONAL ).
+ setDescription( "Number of partitions in layer Z-direction" );
+}
+
+LayeredMeshPartitioner::~LayeredMeshPartitioner() = default;
+
+void LayeredMeshPartitioner::postInputInitialization()
+{
+ // Call base class initialization (creates and sets engine)
+ MeshPartitioner::postInputInitialization();
+
+ // Validate LayeredMeshPartitioner-specific parameters
+ GEOS_THROW_IF( m_indexArrayName.empty(),
+ "LayeredMeshPartitioner requires 'indexArrayName' to be specified", InputError );
+
+ GEOS_THROW_IF_LE_MSG( m_numPartZ, 0,
+ GEOS_FMT( "LayeredMeshPartitioner requires 'numPartZ' > 0, got {}", m_numPartZ ), InputError );
+
+ int const mpiSize = MpiWrapper::commSize( MPI_COMM_GEOS );
+ GEOS_THROW_IF_NE_MSG( mpiSize % m_numPartZ, 0,
+ GEOS_FMT( "Total MPI ranks ( {} ) must be evenly divisible by numPartZ ({})", mpiSize, m_numPartZ ), InputError );
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "LayeredMeshPartitioner: {} ranks will be partitioned into {} area partitions x {} layer partitions",
+ mpiSize, mpiSize / m_numPartZ, m_numPartZ ) );
+}
+
+void LayeredMeshPartitioner::processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL )
+{
+ // For layered partitioner:
+ // - zparCL -> numPartZ (layer partitions)
+ // - xparCL * yparCL -> area partitions
+
+ if( zparCL != 0 )
+ {
+ m_numPartZ = zparCL;
+ GEOS_LOG_RANK_0( GEOS_FMT( "LayeredMeshPartitioner: Command-line override - numPartZ set to {}", m_numPartZ ) );
+ }
+
+ if( xparCL != 0 || yparCL != 0 )
+ {
+ unsigned int const xPart = ( xparCL != 0 ? xparCL : 1 );
+ unsigned int const yPart = ( yparCL != 0 ? yparCL : 1 );
+ unsigned int const areaPartitions = xPart * yPart;
+ unsigned int const totalPartitions = areaPartitions * m_numPartZ;
+
+ setPartitionCounts( xPart, yPart, m_numPartZ );
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "LayeredMeshPartitioner: Command-line override - "
+ "partitioning into {} total parts ({} area x {} layers)",
+ totalPartitions, areaPartitions, m_numPartZ ) );
+ }
+ else
+ {
+ // Default: partition into commSize parts
+ int const mpiSize = MpiWrapper::commSize( MPI_COMM_GEOS );
+
+ GEOS_THROW_IF_NE_MSG( mpiSize % m_numPartZ, 0,
+ GEOS_FMT( "Total MPI ranks ({}) must be evenly divisible by numPartZ ({})", mpiSize, m_numPartZ ), InputError );
+
+ int const areaPartitions = mpiSize / m_numPartZ;
+ setPartitionCounts( 1, areaPartitions, m_numPartZ );
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "LayeredMeshPartitioner: Partitioning into {} parts "
+ "({} area x {} layers)",
+ mpiSize, areaPartitions, m_numPartZ ) );
+ }
+}
+
+array1d< int64_t > LayeredMeshPartitioner::computePartitioning( vtk::AllMeshes & mesh,
+ MPI_Comm const comm )
+{
+ GEOS_MARK_FUNCTION;
+
+ GraphPartitionEngine & engine = getEngine();
+ return vtk::partitionByAreaAndLayer( mesh,
+ m_indexArrayName,
+ engine,
+ m_numPartZ,
+ comm );
+}
+
+REGISTER_CATALOG_ENTRY( DomainPartitioner, LayeredMeshPartitioner, string const &, Group * const )
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/LayeredMeshPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/LayeredMeshPartitioner.hpp
new file mode 100644
index 00000000000..8a1909505fa
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/LayeredMeshPartitioner.hpp
@@ -0,0 +1,105 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @class LayeredMeshPartitioner
+ */
+
+#ifndef GEOS_PARTITIONER_LAYEREDMESHPARTITIONER_HPP_
+#define GEOS_PARTITIONER_LAYEREDMESHPARTITIONER_HPP_
+
+#include "MeshPartitioner.hpp"
+
+namespace geos
+{
+
+class LayeredMeshPartitioner : public MeshPartitioner
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this partitioner instance
+ * @param parent The parent group
+ */
+ explicit LayeredMeshPartitioner( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~LayeredMeshPartitioner() override;
+
+ /**
+ * @brief Catalog name for factory registration
+ * @return The catalog key
+ */
+ static string catalogName() { return "LayeredMeshPartitioner"; }
+
+ /**
+ * @brief Process command-line partition count overrides
+ *
+ * For layered partitioner, Z-direction override sets numPartZ.
+ *
+ * @param xparCL X-direction partition count (0 = no override)
+ * @param yparCL Y-direction partition count (0 = no override)
+ * @param zparCL Z-direction partition count (0 = no override, sets numPartZ)
+ */
+ virtual void processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL ) override;
+
+ /**
+ * @brief Struct to serve as a container for variable strings and keys
+ */
+ struct viewKeyStruct : MeshPartitioner::viewKeyStruct
+ {
+ /// String key for the structured index array name
+ static constexpr char const * indexArrayNameString() { return "indexArrayName"; }
+
+ /// String key for the number of partitions in Z direction
+ static constexpr char const * numPartZString() { return "numPartZ"; }
+ };
+
+protected:
+
+ /**
+ * @brief Post-processing after input has been read
+ */
+ virtual void postInputInitialization() override;
+
+ /**
+ * @brief Compute partition assignment using area-layer algorithm
+ *
+ * @param mesh Input VTK mesh with structured index array
+ * @param comm MPI communicator
+ * @return Partition assignment for each element (main mesh only)
+ */
+ virtual array1d< int64_t > computePartitioning( vtk::AllMeshes & mesh,
+ MPI_Comm const comm ) override;
+
+private:
+
+ /// Name of VTK cell data array containing [area, layer] indices
+ string m_indexArrayName;
+
+ /// Number of partitions in layer Z-direction
+ int m_numPartZ;
+};
+
+} // namespace geos
+
+
+#endif // GEOS_PARTITIONER_LAYEREDMESHPARTITIONER_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/MeshPartitioner.cpp b/src/coreComponents/mesh/mpiCommunications/MeshPartitioner.cpp
new file mode 100644
index 00000000000..417ab780611
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/MeshPartitioner.cpp
@@ -0,0 +1,226 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file MeshPartitioner.cpp
+ */
+
+
+
+#include "MeshPartitioner.hpp"
+#include "GraphPartitionEngine.hpp"
+
+#include "mesh/generators/VTKMeshGeneratorTools.hpp"
+#include
+
+#ifdef GEOS_USE_TRILINOS
+#include "mesh/graphs/ZoltanGraphColoring.hpp"
+#else
+#include "mesh/graphs/RLFGraphColoringMPI.hpp"
+#endif
+
+#include "mesh/mpiCommunications/NoOpEngine.hpp"
+#ifdef GEOS_USE_PARMETIS
+#include "mesh/mpiCommunications/ParMetisEngine.hpp"
+#endif
+#ifdef GEOS_USE_PTSCOTCH
+#include "mesh/mpiCommunications/PTScotchEngine.hpp"
+#endif
+
+
+namespace geos
+{
+MeshPartitioner::MeshPartitioner( string const & name, Group * const parent )
+ : DomainPartitioner( name, parent ),
+ m_engineType( "parmetis" ),
+ m_numRefinements( 0 )
+{
+ // Register engine type
+ registerWrapper( MeshPartitioner::viewKeyStruct::engineTypeString(), &m_engineType ).
+ setApplyDefaultValue( "parmetis" ).
+ setInputFlag( dataRepository::InputFlags::OPTIONAL ).
+ setDescription( "Graph partitioning engine (parmetis, ptscotch, or noop)" );
+
+ // Register number of refinements
+ registerWrapper( MeshPartitioner::viewKeyStruct::numRefinementsString(), &m_numRefinements ).
+ setApplyDefaultValue( 0 ).
+ setInputFlag( dataRepository::InputFlags::OPTIONAL ).
+ setDescription( "Number of refinement iterations for graph partitioning (ParMETIS only, ignored by other engines)" );
+}
+
+
+
+GraphPartitionEngine & MeshPartitioner::getEngine()
+{
+ auto * engine = this->getGroupPointer< GraphPartitionEngine >(
+ viewKeyStruct::engineTypeString());
+
+ GEOS_ERROR_IF( engine == nullptr,
+ "Engine not initialized. Call postInputInitialization() first." );
+ return *engine;
+}
+
+GraphPartitionEngine const & MeshPartitioner::getEngine() const
+{
+ auto const * engine = this->getGroupPointer< GraphPartitionEngine >(
+ viewKeyStruct::engineTypeString());
+
+ GEOS_ERROR_IF( engine == nullptr,
+ "Engine not initialized. Call postInputInitialization() first." );
+ return *engine;
+}
+
+
+void MeshPartitioner::processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL )
+{
+ // If user provided command-line overrides for partition counts...
+ if( xparCL != 0 || yparCL != 0 || zparCL != 0 )
+ {
+ // ...warn that they are ignored for non-geometric partitioners
+ GEOS_WARNING( "Partition counts from the command line (-x, -y, -z) are ignored when using a mesh-based partitioner." );
+ }
+
+ int const mpiSize = MpiWrapper::commSize( MPI_COMM_GEOS );
+ setPartitionCounts( 1, 1, mpiSize );
+}
+
+
+vtk::AllMeshes MeshPartitioner::partitionMeshes( vtk::AllMeshes & mesh, MPI_Comm const comm )
+{
+ GEOS_MARK_FUNCTION;
+
+ // ====================================================================
+ // STEP 1: Compute partitioning, specific to subclass
+ // ====================================================================
+ array1d< int64_t > const partitioning = computePartitioning( mesh, comm );
+
+ // ====================================================================
+ // STEP 2: Apply partitioning to redistribute VTK meshes
+ // ====================================================================
+ vtk::AllMeshes redistributedMesh =
+ vtk::applyPartitioning( mesh, partitioning.toViewConst(), comm );
+
+ // ====================================================================
+ // STEP 3: Extract neighbor information from redistributed mesh
+ // ====================================================================
+ extractNeighborsFromMesh( redistributedMesh, comm );
+
+ // ====================================================================
+ // STEP 4: Compute graph coloring for communication scheduling
+ // ====================================================================
+ color();
+
+ return redistributedMesh;
+}
+
+
+
+void MeshPartitioner::extractNeighborsFromMesh( vtk::AllMeshes const & mesh, MPI_Comm const comm )
+{
+ GEOS_MARK_FUNCTION;
+
+ // Exchange bounding boxes to determine neighbors
+ stdVector< vtkBoundingBox > const boxes =
+ vtk::exchangeBoundingBoxes( *mesh.getMainMesh(), comm );
+
+ // Find neighbor ranks based on bounding box overlaps
+ stdVector< int > const neighbors = vtk::findNeighborRanks( std::move( boxes ) );
+
+ // Store neighbors (if needed)
+ setNeighborsRank( std::move( neighbors ) );
+}
+
+
+
+void MeshPartitioner::color()
+{
+ GEOS_MARK_FUNCTION;
+
+ // Build adjacency list from neighbor ranks
+ std::vector< camp::idx_t > adjncy;
+ adjncy.reserve( m_neighborsRank.size() );
+ std::copy( m_neighborsRank.begin(), m_neighborsRank.end(), std::back_inserter( adjncy ) );
+
+#ifdef GEOS_USE_TRILINOS
+ geos::graph::ZoltanGraphColoring coloring;
+#else
+ geos::graph::RLFGraphColoringMPI coloring;
+#endif
+
+ m_color = coloring.colorGraph( adjncy );
+
+ // Verify that the coloring is valid (no two neighbors have the same color)
+ if( !coloring.isColoringValid( adjncy, m_color ) )
+ {
+ GEOS_ERROR( "Graph coloring failed: an adjacent partition has the same color." );
+ }
+
+ m_numColors = coloring.getNumberOfColors( m_color );
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "MeshPartitioner: Coloring complete. Using {} colors", m_numColors ) );
+}
+
+
+void MeshPartitioner::postInputInitialization()
+{
+ DomainPartitioner::postInputInitialization();
+
+ // Check if engine already exists as child group (from XML deserialization)
+ if( !this->hasGroup( viewKeyStruct::engineTypeString() ) )
+ {
+ // Not in XML - create based on m_engineType attribute
+ GraphPartitionEngine * engine = createEngine();
+ GEOS_ERROR_IF( engine == nullptr,
+ "Failed to create graph partition engine for " << getCatalogName() );
+ }
+
+ // Configure the engine (whether from XML or just created)
+ getEngine().setNumRefinements( m_numRefinements );
+}
+
+
+GraphPartitionEngine * MeshPartitioner::createEngine()
+{
+ GEOS_LOG_RANK_0( "Creating '" << m_engineType << "' engine for " << getName() );
+
+ string const engineTypeLower = stringutilities::toLower( m_engineType );
+
+ if( engineTypeLower == "noop" )
+ {
+ return &this->registerGroup< NoOpEngine >( viewKeyStruct::engineTypeString() );
+ }
+#ifdef GEOS_USE_PARMETIS
+ else if( engineTypeLower == "parmetis" )
+ {
+ return &this->registerGroup< ParMetisEngine >( viewKeyStruct::engineTypeString() );
+ }
+#endif
+#ifdef GEOS_USE_PTSCOTCH
+ else if( engineTypeLower == "ptscotch" )
+ {
+ return &this->registerGroup< PTScotchEngine >( viewKeyStruct::engineTypeString() );
+ }
+#endif
+ else
+ {
+ GEOS_THROW( GEOS_FMT( "Unknown graph partitioner engine type: '{}'", m_engineType ),
+ InputError );
+ }
+}
+
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/MeshPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/MeshPartitioner.hpp
new file mode 100644
index 00000000000..73456079c2e
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/MeshPartitioner.hpp
@@ -0,0 +1,140 @@
+#ifndef GEOS_MESH_MPICOMMUNICATIONS_MESHPARTITIONER_HPP
+#define GEOS_MESH_MPICOMMUNICATIONS_MESHPARTITIONER_HPP
+
+#include "DomainPartitioner.hpp"
+#include "mesh/generators/VTKUtilities.hpp"
+
+#include
+
+namespace geos
+{
+
+class GraphPartitionEngine;
+
+/**
+ * @class MeshPartitioner
+ * @brief Base class for mesh-based partitioners that use graph partitioning
+ */
+class MeshPartitioner : public DomainPartitioner
+{
+public:
+
+ MeshPartitioner( string const & name, Group * const parent );
+
+ virtual ~MeshPartitioner() = default;
+
+ /**
+ * @brief Partition and redistribute a mesh
+ *
+ * This is the main workflow method that:
+ * 1. Calls computePartitioning() to get partition assignments (virtual, subclass-specific)
+ * 2. Redistributes the mesh using VTK utilities
+ * 3. Extracts neighbors from the redistributed mesh
+ * 4. Computes graph coloring for communication scheduling
+ *
+ * @param mesh Input mesh (loaded from file or generated)
+ * @param comm MPI communicator
+ * @return Redistributed mesh
+ *
+ * @post m_neighborsRank is set
+ * @post m_color and m_numColors are set
+ */
+ virtual vtk::AllMeshes partitionMeshes( vtk::AllMeshes & mesh, MPI_Comm const comm );
+
+ /**
+ * @brief Post-input initialization - creates default engine if needed
+ */
+ virtual void postInputInitialization() override;
+
+
+ /**
+ * @brief Get the graph partition engine (non-const version)
+ *
+ * @return Reference to the engine
+ * @throws If engine has not been set
+ */
+ GraphPartitionEngine & getEngine();
+
+ /**
+ * @brief Get the graph partition engine (const version)
+ *
+ * @return Const reference to the engine
+ * @throws If engine has not been set
+ */
+ GraphPartitionEngine const & getEngine() const;
+
+
+ /**
+ * @brief Process command-line overrides for partition counts
+ *
+ * @param xparCL X-direction partition count from command line (0 = not specified)
+ * @param yparCL Y-direction partition count from command line (0 = not specified)
+ * @param zparCL Z-direction partition count from command line (0 = not specified)
+ */
+ void processCommandLineOverrides( unsigned int const xparCL,
+ unsigned int const yparCL,
+ unsigned int const zparCL ) override;
+
+ /**
+ * @brief Compute graph coloring for communication scheduling
+ */
+ void color() override;
+
+ /**
+ * @brief Struct to serve as a container for variable strings and keys
+ */
+ struct viewKeyStruct : DomainPartitioner::viewKeyStruct
+ {
+ static constexpr char const * engineTypeString() { return "engine"; }
+ static constexpr char const * numRefinementsString() { return "numRefinements"; }
+ };
+
+ /// @brief ViewKeys for MeshPartitioner
+ struct viewKeysStruct
+ {
+ static constexpr char const * engineString() { return "engine"; }
+ dataRepository::ViewKey engine = { engineString() };
+ } viewKeys;
+
+protected:
+
+ /**
+ * @brief Compute partition assignment for mesh elements
+ *
+ * This is the algorithm-specific part that subclasses must implement.
+ * Different partitioners use different algorithms:
+ * - CellGraphPartitioner: Pure dual graph
+ * - StructuredMeshPartitioner: Area-layer decomposition
+ *
+ * @param mesh Input mesh
+ * @param comm MPI communicator
+ * @return Partition assignment for each element
+ */
+ virtual array1d< int64_t > computePartitioning( vtk::AllMeshes & mesh,
+ MPI_Comm const comm ) = 0;
+
+ /**
+ * @brief Extract neighbor ranks from redistributed mesh
+ *
+ * @param mesh Redistributed mesh
+ * @param comm MPI communicator
+ */
+ void extractNeighborsFromMesh( vtk::AllMeshes const & mesh, MPI_Comm const comm );
+
+ /**
+ * @brief Creates a graph partition engine
+ *
+ * @return Pointer to the created engine
+ */
+ GraphPartitionEngine * createEngine();
+
+ /// Engine type (e.g., "parmetis", "ptscotch", or "noop")
+ string m_engineType;
+
+ /// Number of refinement iterations (ParMETIS only, ignored by others)
+ int m_numRefinements;
+};
+
+} // namespace geos
+
+#endif
diff --git a/src/coreComponents/mesh/mpiCommunications/NoOpEngine.cpp b/src/coreComponents/mesh/mpiCommunications/NoOpEngine.cpp
new file mode 100644
index 00000000000..c5e3c01c962
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/NoOpEngine.cpp
@@ -0,0 +1,86 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file NoOpEngine.cpp
+ */
+
+#include "NoOpEngine.hpp"
+#include "common/MpiWrapper.hpp"
+
+#ifdef GEOS_USE_PARMETIS
+ #include "ParMetisEngine.hpp" // For fallback
+#endif
+
+namespace geos
+{
+
+NoOpEngine::NoOpEngine( string const & name, Group * const parent )
+ : GraphPartitionEngine( name, parent )
+{}
+
+array1d< pmet_idx_t >
+NoOpEngine::partition( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t const numParts,
+ MPI_Comm comm )
+{
+ GEOS_UNUSED_VAR( graph );
+ GEOS_UNUSED_VAR( numParts );
+
+ int const myRank = MpiWrapper::commRank( comm );
+
+ // Determine the number of local elements on this rank
+ localIndex const numLocalElements = vertDist.size() > 1
+ ? vertDist[myRank + 1] - vertDist[myRank]
+ : graph.size(); // Fallback for single-rank or uninitialized vertDist
+
+ // Create partition array: assign all local elements to current rank
+ array1d< pmet_idx_t > partition( numLocalElements );
+ partition.setValues< serialPolicy >( static_cast< pmet_idx_t >( myRank ) );
+
+ return partition;
+}
+
+ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+NoOpEngine::meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int minCommonNodes )
+{
+#ifdef GEOS_USE_PARMETIS
+ // Fallback to ParMETIS implementation
+ return ParMetisEngine::parmetisMeshToDual( elemToNodes, elemDist, comm, minCommonNodes );
+#else
+
+ GEOS_UNUSED_VAR( elemToNodes );
+ GEOS_UNUSED_VAR( elemDist );
+ GEOS_UNUSED_VAR( comm );
+ GEOS_UNUSED_VAR( minCommonNodes );
+
+ GEOS_ERROR( "NoOpEngine::meshToDual requires ParMETIS for mesh-to-dual conversion. "
+ "Either build with GEOS_USE_PARMETIS=ON or provide the dual graph directly." );
+ return ArrayOfArrays< pmet_idx_t, pmet_idx_t >();
+
+#endif
+}
+
+// Register in the GraphPartitionEngine catalog
+REGISTER_CATALOG_ENTRY( GraphPartitionEngine,
+ NoOpEngine,
+ string const &,
+ dataRepository::Group * const )
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/NoOpEngine.hpp b/src/coreComponents/mesh/mpiCommunications/NoOpEngine.hpp
new file mode 100644
index 00000000000..e88867bbb40
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/NoOpEngine.hpp
@@ -0,0 +1,117 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file NoOpEngine.hpp
+ */
+
+#ifndef GEOS_PARTITIONER_NOOPENGINE_HPP_
+#define GEOS_PARTITIONER_NOOPENGINE_HPP_
+
+#include "GraphPartitionEngine.hpp"
+
+namespace geos
+{
+
+/**
+ * @class NoOpEngine
+ * @brief Graph partition engine that does not repartition
+ *
+ * NoOpEngine is a graph partitioning engine that:
+ * - Does NOT repartition the graph
+ * - Assigns each element to its current MPI rank
+ * - Useful for testing or when mesh is already well-partitioned
+ */
+class NoOpEngine : public GraphPartitionEngine
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this engine instance
+ * @param parent The parent group
+ */
+ explicit NoOpEngine( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~NoOpEngine() override = default;
+
+ /**
+ * @brief Catalog name for factory registration
+ * @return The catalog key
+ */
+ static string catalogName() { return "noop"; }
+
+ /**
+ * @brief Partition a graph (no-op: assigns each element to current rank)
+ *
+ * This method does NOT repartition. It simply assigns all local elements
+ * to the current MPI rank, effectively preserving the existing distribution.
+ *
+ * @param graph Dual graph adjacency (ignored)
+ * @param vertDist Vertex distribution across ranks
+ * @param numParts Target number of partitions (ignored)
+ * @param comm MPI communicator
+ * @param numRefinements Number of refinement iterations (ignored)
+ * @return Partition assignment: all local elements → current rank
+ */
+ array1d< pmet_idx_t > partition(
+ ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t const numParts,
+ MPI_Comm const comm ) override;
+
+ /**
+ * @brief Build dual graph from mesh (fallback to ParMETIS)
+ *
+ * NoOpEngine does not implement its own mesh-to-dual conversion.
+ * Instead, it falls back to ParMETIS implementation.
+ *
+ * @param elemToNodes Element-to-node connectivity
+ * @param elemDist Element distribution across ranks
+ * @param comm MPI communicator
+ * @param minCommonNodes Minimum shared nodes for dual edge
+ * @return Dual graph adjacency
+ *
+ * @throws If ParMETIS is not available (GEOS_USE_PARMETIS=OFF)
+ */
+ ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm const comm,
+ int const minCommonNodes ) override;
+
+
+ /**
+ * @brief Get the number of refinement iterations
+ */
+ int getNumRefinements() const override { return 0; }
+
+ /**
+ * @brief Set the number of refinement iterations (no-op for NoOpEngine)
+ * @param numRefinements Number of refinement iterations (ignored)
+ */
+ void setNumRefinements( int const numRefinements ) override
+ {
+ GEOS_UNUSED_VAR( numRefinements );
+ }
+};
+
+} // namespace geos
+
+#endif // GEOS_PARTITIONER_NOOPENGINE_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/PTScotchEngine.cpp b/src/coreComponents/mesh/mpiCommunications/PTScotchEngine.cpp
new file mode 100644
index 00000000000..b350704c9a0
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/PTScotchEngine.cpp
@@ -0,0 +1,186 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file PTScotchEngine.cpp
+ */
+
+#include "PTScotchEngine.hpp"
+#include "common/MpiWrapper.hpp"
+#include "common/TimingMacros.hpp"
+
+#ifdef GEOS_USE_PARMETIS
+ #include "ParMetisEngine.hpp" // For fallback
+#endif
+
+
+#ifdef GEOS_USE_PTSCOTCH
+extern "C"
+{
+#include
+}
+
+#define GEOS_SCOTCH_CHECK( call ) \
+ do { \
+ auto const ierr = call; \
+ GEOS_ERROR_IF_NE_MSG( ierr, 0, "Error in call to PT-Scotch library:\n" << #call ); \
+ } while( false )
+
+namespace geos
+{
+
+using namespace dataRepository;
+
+static_assert( std::is_same< int64_t, SCOTCH_Num >::value,
+ "Non-matching index types. Scotch must be configured with 64-bit indices (SCOTCH_Num)." );
+static_assert( std::is_same< pmet_idx_t, SCOTCH_Num >::value,
+ "pmet_idx_t must match SCOTCH_Num when using PTScotchEngine." );
+
+#else
+
+namespace geos
+{
+
+using namespace dataRepository;
+
+#endif // GEOS_USE_PTSCOTCH
+
+PTScotchEngine::PTScotchEngine( string const & name,
+ dataRepository::Group * const parent )
+ : GraphPartitionEngine( name, parent ),
+ m_strategy( "default" )
+{
+ registerWrapper( viewKeyStruct::strategyString(), &m_strategy ).
+ setApplyDefaultValue( "default" ).
+ setInputFlag( dataRepository::InputFlags::OPTIONAL ).
+ setDescription( "PT-Scotch partitioning strategy. "
+ "Options: 'default', 'quality', 'speed', 'balance'. "
+ "Default: 'default'." );
+}
+
+PTScotchEngine::~PTScotchEngine()
+{}
+
+array1d< pmet_idx_t >
+PTScotchEngine::partition( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t const numParts,
+ MPI_Comm comm )
+{
+ GEOS_MARK_FUNCTION;
+
+#ifndef GEOS_USE_PTSCOTCH
+ GEOS_UNUSED_VAR( graph, vertDist, numParts, comm );
+ GEOS_ERROR( "GEOS was not built with PT-Scotch support. "
+ "Reconfigure with -DENABLE_PTSCOTCH=ON" );
+ return array1d< pmet_idx_t >();
+#else
+
+ GEOS_UNUSED_VAR( vertDist );
+
+ SCOTCH_Num const numLocalVerts = graph.size();
+ array1d< SCOTCH_Num > part( numLocalVerts );
+
+ // If only one partition is requested, no partitioning is needed.
+ // All elements are assigned to rank 0.
+ if( numParts == 1 )
+ {
+ part.setValues< serialPolicy >( 0 );
+ return part;
+ }
+
+ // Initialize the distributed graph structure for Scotch.
+ SCOTCH_Dgraph * const gr = SCOTCH_dgraphAlloc();
+ GEOS_SCOTCH_CHECK( SCOTCH_dgraphInit( gr, comm ) );
+
+ SCOTCH_Num const numLocalEdges = graph.getOffsets()[numLocalVerts];
+
+ // PT-Scotch writes into these arrays; in practice we discard them right after.
+ // We must cast away constness, which is technically UB but is how the library is used.
+ SCOTCH_Num * const offsets = const_cast< SCOTCH_Num * >( graph.getOffsets() );
+ SCOTCH_Num * const edges = const_cast< SCOTCH_Num * >( graph.getValues() );
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "PTScotchEngine: Partitioning {} local vertices into {} parts (strategy: {})",
+ numLocalVerts, numParts, m_strategy ) );
+
+ // Build the distributed graph from the local CSR representation.
+ GEOS_SCOTCH_CHECK( SCOTCH_dgraphBuild( gr, // graphptr
+ 0, // baseval (0-based indexing)
+ numLocalVerts,// vertlocnbr
+ numLocalVerts,// vertlocmax
+ offsets, // vertloctab
+ offsets + 1, // vendloctab
+ nullptr, // veloloctab (no vertex weights)
+ nullptr, // vlblloctab (no vertex labels)
+ numLocalEdges,// edgelocnbr
+ numLocalEdges,// edgelocsiz
+ edges, // edgeloctab
+ nullptr, // edgegsttab (no ghost edges)
+ nullptr // edloloctab (no edge weights)
+ ) );
+
+ // Verify the consistency of the distributed graph data structure.
+ GEOS_SCOTCH_CHECK( SCOTCH_dgraphCheck( gr ) );
+
+ // Use the default PT-Scotch strategy.
+ SCOTCH_Strat * const strategy = SCOTCH_stratAlloc();
+ GEOS_SCOTCH_CHECK( SCOTCH_stratInit( strategy ) );
+
+ // Perform the partitioning.
+ GEOS_SCOTCH_CHECK( SCOTCH_dgraphPart( gr, numParts, strategy, part.data() ) );
+
+ // Clean up Scotch data structures.
+ SCOTCH_stratExit( strategy );
+ SCOTCH_dgraphExit( gr );
+
+ GEOS_LOG_RANK_0( "PTScotchEngine: Partition complete" );
+
+ return part;
+
+#endif // GEOS_USE_PTSCOTCH
+}
+
+
+
+ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+PTScotchEngine::meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int const minCommonNodes )
+{
+#ifdef GEOS_USE_PARMETIS
+
+ // Fallback to ParMETIS implementation
+ return ParMetisEngine::parmetisMeshToDual( elemToNodes, elemDist, comm, minCommonNodes );
+
+#else
+
+ GEOS_UNUSED_VAR( elemToNodes );
+ GEOS_UNUSED_VAR( elemDist );
+ GEOS_UNUSED_VAR( comm );
+ GEOS_UNUSED_VAR( minCommonNodes );
+
+ GEOS_ERROR( "PTScotchEngine::meshToDual requires ParMETIS for mesh-to-dual conversion. "
+ "Either build with GEOS_USE_PARMETIS=ON or provide the dual graph directly." );
+ return ArrayOfArrays< pmet_idx_t, pmet_idx_t >();
+
+#endif
+}
+
+// Register in the GraphPartitionEngine catalog
+REGISTER_CATALOG_ENTRY( GraphPartitionEngine, PTScotchEngine,
+ string const &, dataRepository::Group * const )
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/PTScotchEngine.hpp b/src/coreComponents/mesh/mpiCommunications/PTScotchEngine.hpp
new file mode 100644
index 00000000000..505ec165797
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/PTScotchEngine.hpp
@@ -0,0 +1,138 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file PTScotchEngine.hpp
+ */
+
+#ifndef GEOS_PARTITIONER_PTSCOTCHENGINE_HPP_
+#define GEOS_PARTITIONER_PTSCOTCHENGINE_HPP_
+
+#include "GraphPartitionEngine.hpp"
+
+namespace geos
+{
+
+/**
+ * @class PTScotchEngine
+ * @brief Graph partitioning engine using the PT-Scotch library
+ *
+ * This engine wraps PT-Scotch (Parallel Scotch).
+ */
+class PTScotchEngine : public GraphPartitionEngine
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this engine instance
+ * @param parent The parent group
+ */
+ explicit PTScotchEngine( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~PTScotchEngine() override;
+
+ /**
+ * @brief Structure to hold XML input keys
+ */
+ struct viewKeyStruct
+ {
+ /// Partitioning strategy
+ static constexpr char const * strategyString() { return "strategy"; }
+ };
+
+ /**
+ * @brief Return the catalog name for factory registration
+ * @return The string "PTScotch"
+ */
+ static string catalogName() { return "PTScotch"; }
+
+ /**
+ * @brief Partition a distributed graph using PT-Scotch
+ *
+ * Calls SCOTCH_dgraphPart to perform parallel graph partitioning.
+ *
+ * @param graph Edge connectivity (locally owned vertices)
+ * @param vertDist Vertex distribution across ranks
+ * @param numParts Target number of partitions
+ * @param comm MPI communicator
+ * @return Array mapping each local vertex to its target partition
+ */
+ array1d< pmet_idx_t > partition(
+ ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t const numParts,
+ MPI_Comm comm ) override;
+
+
+ /**
+ * @brief Convert mesh to dual graph
+ *
+ * PT-Scotch doesn't have a native mesh-to-dual function, so we fall back
+ * to ParMETIS implementation if available.
+ */
+ ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int const minCommonNodes ) override;
+
+ /**
+ * @brief Get the number of refinement iterations
+ *
+ * @note PT-Scotch doesn't expose refinements the same way as ParMETIS.
+ * This returns 0 to indicate it's not applicable.
+ *
+ * @return 0 (not applicable for PT-Scotch)
+ */
+ int getNumRefinements() const override { return 0; }
+
+ /**
+ * @brief Set the number of refinement iterations (no-op for PT-Scotch)
+ * @param numRefinements Number of refinement iterations (ignored)
+ */
+ void setNumRefinements( int const numRefinements ) override
+ {
+ GEOS_UNUSED_VAR( numRefinements );
+ }
+
+ /**
+ * @brief Set the partitioning strategy
+ * @param strategy Strategy string (e.g., "default", "quality", "speed")
+ */
+ void setStrategy( string const & strategy )
+ {
+ m_strategy = strategy;
+ }
+
+ /**
+ * @brief Get the partitioning strategy
+ * @return Strategy string
+ */
+ string const & getStrategy() const { return m_strategy; }
+
+private:
+
+ /// Partitioning strategy (default, quality, speed, etc.)
+ string m_strategy;
+};
+
+} // namespace geos
+
+#endif // GEOS_PARTITIONER_PTSCOTCHENGINE_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/ParMetisEngine.cpp b/src/coreComponents/mesh/mpiCommunications/ParMetisEngine.cpp
new file mode 100644
index 00000000000..4f5f4634f06
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/ParMetisEngine.cpp
@@ -0,0 +1,193 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file ParMetisEngine.cpp
+ */
+
+#include "ParMetisEngine.hpp"
+#include "common/MpiWrapper.hpp"
+#include "common/TimingMacros.hpp"
+#include
+
+#ifdef GEOS_USE_PARMETIS
+extern "C"
+{
+#include
+}
+#endif
+
+#define GEOS_PARMETIS_CHECK( call ) \
+ do { \
+ auto const ierr = call; \
+ GEOS_ERROR_IF_NE_MSG( ierr, METIS_OK, "Error in call to:\n" << #call ); \
+ } while( false )
+
+namespace geos
+{
+
+using namespace dataRepository;
+using camp::idx_t;
+
+static_assert( std::is_same< idx_t, pmet_idx_t >::value,
+ "Non-matching index types. ParMETIS must be configured with 64-bit indices." );
+
+ParMetisEngine::ParMetisEngine( string const & name,
+ dataRepository::Group * const parent )
+ : GraphPartitionEngine( name, parent ),
+ m_numRefinements( 0 )
+{}
+
+ParMetisEngine::~ParMetisEngine()
+{}
+
+array1d< pmet_idx_t >
+ParMetisEngine::partition( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t const numParts,
+ MPI_Comm comm )
+{
+ GEOS_MARK_FUNCTION;
+
+#ifndef GEOS_USE_PARMETIS
+ GEOS_UNUSED_VAR( graph, vertDist, numParts, comm );
+ GEOS_ERROR( "GEOS was not built with ParMETIS support. "
+ "Reconfigure with -DENABLE_PARMETIS=ON" );
+ return array1d< pmet_idx_t >();
+#else
+
+ array1d< pmet_idx_t > part( graph.size() );
+
+ // If only one partition is requested, no partitioning is needed.
+ // All elements are assigned to rank 0.
+ if( numParts == 1 )
+ {
+ part.setValues< serialPolicy >( 0 );
+ return part;
+ }
+
+ // Set up ParMETIS parameters
+ idx_t wgtflag = 0; // No weights on vertices or edges
+ idx_t numflag = 0; // C-style numbering (starts at 0)
+ idx_t ncon = 1; // Number of constraints (1, for vertex count balance)
+ idx_t npart = numParts;
+ idx_t options[4] = { 1, 0, 2022, PARMETIS_PSR_UNCOUPLED };
+ idx_t edgecut = 0;
+ real_t ubvec = 1.05; // Imbalance tolerance
+
+ // Set target partition weights for even distribution
+ array1d< real_t > tpwgts( numParts );
+ tpwgts.setValues< serialPolicy >( 1.0f / static_cast< real_t >( numParts ) );
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "ParMetisEngine: Partitioning {} local vertices into {} parts",
+ graph.size(), numParts ) );
+
+ // ParMETIS has an unusual API that requires non-const pointers for read-only data.
+ // We must cast away constness, which is technically UB but is how the library is used.
+ GEOS_PARMETIS_CHECK( ParMETIS_V3_PartKway( const_cast< idx_t * >( vertDist.data() ),
+ const_cast< idx_t * >( graph.getOffsets() ),
+ const_cast< idx_t * >( graph.getValues() ),
+ nullptr, nullptr, &wgtflag,
+ &numflag, &ncon, &npart, tpwgts.data(),
+ &ubvec, options, &edgecut, part.data(), &comm ) );
+
+ // Perform refinements if requested.
+ if( m_numRefinements > 0 )
+ {
+ GEOS_LOG_RANK_0( GEOS_FMT( "ParMetisEngine: Performing {} refinements", m_numRefinements ) );
+ GEOS_PARMETIS_CHECK( ParMETIS_V3_RefineKway( const_cast< idx_t * >( vertDist.data() ),
+ const_cast< idx_t * >( graph.getOffsets() ),
+ const_cast< idx_t * >( graph.getValues() ),
+ nullptr, nullptr, &wgtflag,
+ &numflag, &ncon, &npart, tpwgts.data(),
+ &ubvec, options, &edgecut, part.data(), &comm ) );
+ }
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "ParMetisEngine: Partition complete, edge-cut = {}", edgecut ) );
+
+ return part;
+
+#endif // GEOS_USE_PARMETIS
+}
+
+ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ParMetisEngine::meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int const minCommonNodes )
+{
+ return parmetisMeshToDual( elemToNodes, elemDist, comm, minCommonNodes );
+}
+
+ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ParMetisEngine::parmetisMeshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int const minCommonNodes )
+{
+ GEOS_MARK_FUNCTION;
+
+#ifndef GEOS_USE_PARMETIS
+ GEOS_UNUSED_VAR( elemToNodes, elemDist, comm, minCommonNodes );
+ GEOS_ERROR( "GEOS was not built with ParMETIS support. "
+ "Reconfigure with -DENABLE_PARMETIS=ON" );
+ return ArrayOfArrays< pmet_idx_t, pmet_idx_t >();
+#else
+
+ idx_t const numElems = elemToNodes.size();
+
+ // `parmetis` awaits the arrays to be allocated as two continuous arrays: one for values, the other for offsets.
+ // Our `ArrayOfArrays` allows to reserve some extra space for further element insertion,
+ // but this is not compatible with what `parmetis` requires.
+ GEOS_ASSERT_EQ_MSG( std::accumulate( elemToNodes.getSizes(), elemToNodes.getSizes() + numElems, 0 ),
+ elemToNodes.valueCapacity(),
+ "Internal error. The element to nodes mapping must be strictly allocated for compatibility with a third party library." );
+
+ pmet_idx_t numflag = 0;
+ pmet_idx_t ncommonnodes = minCommonNodes;
+ pmet_idx_t * xadj;
+ pmet_idx_t * adjncy;
+
+ GEOS_LOG_RANK_0( GEOS_FMT( "ParMetisEngine: Converting mesh to dual graph ({} local elements, min common nodes = {})",
+ numElems, minCommonNodes ) );
+
+ // Technical UB if ParMETIS writes into these arrays; in practice we discard them right after
+ GEOS_PARMETIS_CHECK( ParMETIS_V3_Mesh2Dual( const_cast< pmet_idx_t * >( elemDist.data() ),
+ const_cast< pmet_idx_t * >( elemToNodes.getOffsets() ),
+ const_cast< pmet_idx_t * >( elemToNodes.getValues() ),
+ &numflag, &ncommonnodes, &xadj, &adjncy, &comm ) );
+
+ ArrayOfArrays< pmet_idx_t, pmet_idx_t > graph;
+ graph.resizeFromOffsets( numElems, xadj );
+
+ // There is no way to direct-copy values into ArrayOfArrays without UB (casting away const)
+ forAll< parallelHostPolicy >( numElems, [xadj, adjncy, graph = graph.toView()]( localIndex const k )
+ {
+ graph.appendToArray( k, adjncy + xadj[k], adjncy + xadj[k+1] );
+ } );
+
+ METIS_Free( xadj );
+ METIS_Free( adjncy );
+
+ return graph;
+
+#endif // GEOS_USE_PARMETIS
+}
+
+// Register in the GraphPartitionEngine catalog
+REGISTER_CATALOG_ENTRY( GraphPartitionEngine, ParMetisEngine,
+ string const &, dataRepository::Group * const )
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/ParMetisEngine.hpp b/src/coreComponents/mesh/mpiCommunications/ParMetisEngine.hpp
new file mode 100644
index 00000000000..b0e55879d94
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/ParMetisEngine.hpp
@@ -0,0 +1,145 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file ParMetisEngine.hpp
+ */
+
+#ifndef GEOS_PARTITIONER_PARMETISENGINE_HPP_
+#define GEOS_PARTITIONER_PARMETISENGINE_HPP_
+
+#include "GraphPartitionEngine.hpp"
+
+namespace geos
+{
+
+/**
+ * @class ParMetisEngine
+ * @brief Graph partitioning engine using the ParMETIS library
+ */
+class ParMetisEngine : public GraphPartitionEngine
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this engine instance
+ * @param parent The parent group
+ */
+ explicit ParMetisEngine( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ virtual ~ParMetisEngine() override;
+
+ /**
+ * @brief Structure to hold XML input keys
+ */
+ struct viewKeyStruct
+ {
+ /// Number of refinement iterations
+ static constexpr char const * numRefinementsString() { return "numRefinements"; }
+ };
+
+ /**
+ * @brief Return the catalog name for factory registration
+ * @return The string "ParMetis"
+ */
+ static string catalogName() { return "ParMetis"; }
+
+ /**
+ * @brief Partition a distributed graph using ParMETIS
+ *
+ * Calls ParMETIS_V3_PartKway to perform multilevel k-way partitioning.
+ *
+ * @param graph Edge connectivity (locally owned vertices)
+ * @param vertDist Vertex distribution across ranks
+ * @param numParts Target number of partitions
+ * @param comm MPI communicator
+ * @return Array mapping each local vertex to its target partition
+ */
+ array1d< pmet_idx_t > partition(
+ ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & graph,
+ arrayView1d< pmet_idx_t const > const & vertDist,
+ pmet_idx_t const numParts,
+ MPI_Comm comm ) override;
+
+ /**
+ * @brief Convert element-node connectivity to element-element dual graph
+ *
+ * Given a mesh represented by element-node connectivity, construct the dual graph
+ * where:
+ * - Vertices = elements
+ * - Edges = element pairs that share at least minCommonNodes nodes
+ *
+ * This is a utility function used by MeshPartitioner to build the graph from a mesh.
+ *
+ * @param elemToNodes Element-to-node connectivity map.
+ * elemToNodes[i] = global node IDs of element i.
+ * @param elemDist Element distribution across ranks.
+ * elemDist[rank] = global index of first element on rank.
+ * @param comm MPI communicator
+ * @param minCommonNodes Minimum number of shared nodes to create an edge (typically 3 for 3D)
+ * @return Dual graph where vertices=elements, edges=element adjacency
+ *
+ * @note This is a static utility, not part of the core partition() interface
+ */
+ /**
+ * @brief Convert mesh to dual graph using ParMETIS (instance method)
+ */
+ ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ meshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int const minCommonNodes ) override;
+
+ /**
+ * @brief Static helper: ParMETIS mesh-to-dual implementation
+ *
+ * This can be called by other engines (e.g., PTScotch) as a fallback.
+ */
+ static ArrayOfArrays< pmet_idx_t, pmet_idx_t >
+ parmetisMeshToDual( ArrayOfArraysView< pmet_idx_t const, pmet_idx_t > const & elemToNodes,
+ arrayView1d< pmet_idx_t const > const & elemDist,
+ MPI_Comm comm,
+ int const minCommonNodes );
+
+ /**
+ * @brief Get the number of refinement iterations
+ * @return Number of refinement iterations
+ */
+ int getNumRefinements() const override { return m_numRefinements; }
+
+ /**
+ * @brief Set the number of refinement iterations
+ * @param numRefinements Number of refinement iterations
+ */
+ void setNumRefinements( int const numRefinements ) override
+ {
+ GEOS_THROW_IF_LT_MSG( numRefinements, 0, "Number of refinements must be non-negative", InputError );
+ m_numRefinements = numRefinements;
+ }
+
+private:
+
+ /// Number of refinement iterations
+ int m_numRefinements;
+};
+
+} // namespace geos
+
+#endif // GEOS_PARTITIONER_PARMETISENGINE_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/SpatialPartition.cpp b/src/coreComponents/mesh/mpiCommunications/ParticleCartesianPartitioner.cpp
similarity index 66%
rename from src/coreComponents/mesh/mpiCommunications/SpatialPartition.cpp
rename to src/coreComponents/mesh/mpiCommunications/ParticleCartesianPartitioner.cpp
index 5beecb1842c..311947e7385 100644
--- a/src/coreComponents/mesh/mpiCommunications/SpatialPartition.cpp
+++ b/src/coreComponents/mesh/mpiCommunications/ParticleCartesianPartitioner.cpp
@@ -13,332 +13,58 @@
* ------------------------------------------------------------------------------------------------------------
*/
-#include "SpatialPartition.hpp"
-#include "codingUtilities/Utilities.hpp"
-#include "LvArray/src/genericTensorOps.hpp"
-#include "mesh/mpiCommunications/MPI_iCommData.hpp"
-#ifdef GEOS_USE_TRILINOS
-#include "mesh/graphs/ZoltanGraphColoring.hpp"
-#else
-#include "mesh/graphs/RLFGraphColoringMPI.hpp"
-#endif
-
-#include
-
-namespace geos
-{
-
-namespace
-{
-
-// Modulo
-// returns a positive value regardless of the sign of numerator
-real64 Mod( real64 num, real64 denom )
-{
- if( fabs( denom ) adjncy;
- adjncy.reserve( m_metisNeighborList.size());
- std::copy( m_metisNeighborList.begin(), m_metisNeighborList.end(), std::back_inserter( adjncy ));
-#ifdef GEOS_USE_TRILINOS
- geos::graph::ZoltanGraphColoring coloring;
-#else
- geos::graph::RLFGraphColoringMPI coloring;
-#endif
- int color = coloring.colorGraph( adjncy );
-
- if( !coloring.isColoringValid( adjncy, color ))
- {
- GEOS_ERROR( "Invalid partition coloring: two neighboring partitions share the same color" );
- }
- m_numColors = coloring.getNumberOfColors( color );
- return color;
- }
-}
-
-void SpatialPartition::addNeighbors( const unsigned int idim,
- MPI_Comm & cartcomm,
- int * ncoords )
-{
-
- if( idim == m_nsdof )
- {
- bool me = true;
- for( int i = 0; i < m_nsdof; i++ )
- {
- if( ncoords[i] != this->m_coords( i ))
- {
- me = false;
- break;
- }
- }
- if( !me )
- {
- int const rank = MpiWrapper::cartRank( cartcomm, ncoords );
- m_neighbors.push_back( NeighborCommunicator( rank ) );
- }
- }
- else
- {
- const int dim = this->m_Partitions( LvArray::integerConversion< localIndex >( idim ) );
- const bool periodic = this->m_Periodic( LvArray::integerConversion< localIndex >( idim ) );
- for( int i = -1; i < 2; i++ )
- {
- ncoords[idim] = this->m_coords( LvArray::integerConversion< localIndex >( idim ) ) + i;
- bool ok = true;
- if( periodic )
- {
- if( ncoords[idim] < 0 )
- ncoords[idim] = dim - 1;
- else if( ncoords[idim] >= dim )
- ncoords[idim] = 0;
- }
- else
- {
- ok = ncoords[idim] >= 0 && ncoords[idim] < dim;
- }
- if( ok )
- {
- addNeighbors( idim + 1, cartcomm, ncoords );
- }
- }
- }
-}
-
-void SpatialPartition::updateSizes( arrayView1d< real64 > const domainL,
- real64 const dt )
+void ParticleCartesianPartitioner::updateSizes( arrayView1d< real64 > const domainL,
+ real64 const dt )
{
for( int i=0; i<3; i++ )
{
real64 ratio = 1.0 + domainL[i] * dt;
- m_min[i] *= ratio;
- m_max[i] *= ratio;
- //m_PartitionLocations[i] *= ratio; ?
- m_blockSize[i] *= ratio;
- m_gridSize[i] *= ratio;
- m_gridMin[i] *= ratio;
- m_gridMax[i] *= ratio;
- m_contactGhostMin[i] *= ratio;
- m_contactGhostMax[i] *= ratio;
+ m_localMin[i] *= ratio;
+ m_localMax[i] *= ratio;
+ m_localSize[i] *= ratio;
+ m_globalGridSize[i] *= ratio;
+ m_globalGridMin[i] *= ratio;
+ m_globalGridMax[i] *= ratio;
}
}
-void SpatialPartition::setSizes( real64 const ( &min )[ 3 ],
- real64 const ( &max )[ 3 ] )
-{
-
- {
- //get size of problem and decomposition
- m_size = MpiWrapper::commSize( MPI_COMM_GEOS );
-
- //check to make sure our dimensions agree
- {
- string_view partitionsLogMessage =
- "The total number of processes = {} does not correspond to the total number of partitions = {}.\n"
- "The number of cells in an axis cannot be lower that the partition count of this axis\n";
-
- int nbPartitions = 1;
- for( int i = 0; i < m_nsdof; i++ )
- {
- nbPartitions *= this->m_Partitions( i );
- }
- GEOS_ERROR_IF_NE_MSG( nbPartitions, m_size, GEOS_FMT( partitionsLogMessage, m_size, nbPartitions ) );
- }
-
- //get communicator, rank, and coordinates
- MPI_Comm cartcomm;
- {
- int reorder = 0;
- MpiWrapper::cartCreate( MPI_COMM_GEOS, m_nsdof, m_Partitions.data(), m_Periodic.data(), reorder, &cartcomm );
- }
- m_rank = MpiWrapper::commRank( cartcomm );
- MpiWrapper::cartCoords( cartcomm, m_rank, m_nsdof, m_coords.data());
-
- //add neighbors
- {
- int ncoords[m_nsdof];
- m_neighbors.clear();
- addNeighbors( 0, cartcomm, ncoords );
- }
-
- MpiWrapper::commFree( cartcomm );
- }
-
- // global values
- LvArray::tensorOps::copy< 3 >( m_gridMin, min );
- LvArray::tensorOps::copy< 3 >( m_gridMax, max );
- LvArray::tensorOps::copy< 3 >( m_gridSize, max );
- LvArray::tensorOps::subtract< 3 >( m_gridSize, min );
-
- // block values
- LvArray::tensorOps::copy< 3 >( m_blockSize, m_gridSize );
- LvArray::tensorOps::copy< 3 >( m_min, min );
- for( int i = 0; i < m_nsdof; ++i )
- {
- const int nloc = m_Partitions( i ) - 1;
- const localIndex nlocl = static_cast< localIndex >(nloc);
- if( m_PartitionLocations[i].empty() )
- {
- // the default "even" spacing
- m_blockSize[ i ] /= m_Partitions( i );
- m_min[ i ] += m_coords( i ) * m_blockSize[ i ];
- m_max[ i ] = min[ i ] + (m_coords( i ) + 1) * m_blockSize[ i ];
-
- m_PartitionLocations[i].resize( nlocl );
- for( localIndex j = 0; j < m_PartitionLocations[ i ].size(); ++j )
- {
- m_PartitionLocations[ i ][ j ] = (j+1) * m_blockSize[ i ];
- }
- }
- else if( nlocl == m_PartitionLocations[i].size() )
- {
- const int parIndex = m_coords[i];
- if( parIndex == 0 )
- {
- m_min[i] = min[i];
- m_max[i] = m_PartitionLocations[i][parIndex];
- }
- else if( parIndex == nloc )
- {
- m_min[i] = m_PartitionLocations[i][parIndex-1];
- m_max[i] = max[i];
- }
- else
- {
- m_min[i] = m_PartitionLocations[i][parIndex-1];
- m_max[i] = m_PartitionLocations[i][parIndex];
- }
- }
- else
- {
- GEOS_ERROR( "SpatialPartition::setSizes(): number of partition locations does not equal number of partitions - 1\n" );
- }
- }
-}
-
-bool SpatialPartition::isCoordInPartition( const real64 & coord, const int dir ) const
+bool ParticleCartesianPartitioner::isCoordInPartitionBoundingBox( const R1Tensor & elemCenter,
+ const real64 & boundaryRadius ) const
+// Test a point relative to a boundary box. If non-zero buffer specified, expand the box
{
- bool rval = true;
- const int i = dir;
- if( m_Periodic( i ))
- {
- if( m_Partitions( i ) != 1 )
- {
- real64 localCenter = MapValueToRange( coord, m_gridMin[ i ], m_gridMax[ i ] );
- rval = rval && localCenter >= m_min[ i ] && localCenter < m_max[ i ];
- }
-
- }
- else
- {
- rval = rval && (m_Partitions[ i ] == 1 || (coord >= m_min[ i ] && coord < m_max[ i ]));
- }
-
- return rval;
-}
-
-bool SpatialPartition::isCoordInPartitionBoundingBox( const R1Tensor & elemCenter,
- const real64 & boundaryRadius ) const
-// test a point relative to a boundary box. If non-zero buffer specified, expand the box.
-{
- for( int i = 0; i < m_nsdof; i++ )
+ for( int i = 0; i < m_ndim; i++ )
{
// Is particle already in bounds of partition?
- if( !(m_Partitions( i )==1 || ( elemCenter[i] >= (m_min[i] - boundaryRadius) && elemCenter[i] <= (m_max[i] + boundaryRadius) ) ) )
+ if( !(m_partitionCounts( i )==1 || ( elemCenter[i] >= (m_localMin[i] - boundaryRadius) && elemCenter[i] <= (m_localMax[i] + boundaryRadius) ) ) )
{
// Particle not in bounds, check if direction has a periodic boundary
- if( m_Periodic( i ) && (m_coords[i] == 0 || m_coords[i] == m_Partitions[i] - 1) )
+ if( m_periodic( i ) && (m_coords[i] == 0 || m_coords[i] == m_partitionCounts[i] - 1) )
{
// Partition minimum boundary is periodic
- if( m_coords[i] == 0 && ( (elemCenter[i] - m_gridSize[i]) < (m_min[i] - boundaryRadius) ) )
+ if( m_coords[i] == 0 && ( (elemCenter[i] - m_globalGridSize[i]) < (m_localMin[i] - boundaryRadius) ) )
{
return false;
}
// Partition maximum boundary is periodic
- if( m_coords[i] == m_Partitions[i] - 1 && ( (elemCenter[i] + m_gridSize[i]) > (m_max[i] + boundaryRadius) ) )
+ if( m_coords[i] == m_partitionCounts[i] - 1 && ( (elemCenter[i] + m_globalGridSize[i]) > (m_localMax[i] + boundaryRadius) ) )
{
return false;
}
@@ -353,17 +79,10 @@ bool SpatialPartition::isCoordInPartitionBoundingBox( const R1Tensor & elemCente
return true;
}
-void SpatialPartition::setContactGhostRange( const real64 bufferSize )
-{
- LvArray::tensorOps::copy< 3 >( m_contactGhostMin, m_min );
- LvArray::tensorOps::addScalar< 3 >( m_contactGhostMin, -bufferSize );
- LvArray::tensorOps::copy< 3 >( m_contactGhostMax, m_max );
- LvArray::tensorOps::addScalar< 3 >( m_contactGhostMax, bufferSize );
-}
-
-void SpatialPartition::repartitionMasterParticles( ParticleSubRegion & subRegion,
- MPI_iCommData & commData )
+void ParticleCartesianPartitioner::repartitionMasterParticles( DomainPartition & domain,
+ ParticleSubRegion & subRegion,
+ MPI_iCommData & commData )
{
/*
@@ -392,11 +111,15 @@ void SpatialPartition::repartitionMasterParticles( ParticleSubRegion & subRegion
// has a Rank=-1 at the end of this function is lost and needs to be deleted. This
// should only happen if it has left the global domain (hopefully at an outflow b.c.).
+ // Get first-order neighbors from the domain (we'll use numFirstOrderNeighbors as the limit for iteration)
+ int const numFirstOrderNeighbors = domain.getNumFirstOrderNeighbors();
+ stdVector< NeighborCommunicator > & neighbors = domain.getNeighbors();
+
arrayView2d< real64 > const particleCenter = subRegion.getParticleCenter();
arrayView1d< localIndex > const particleRank = subRegion.getParticleRank();
array1d< R1Tensor > outOfDomainParticleCoordinates;
stdVector< localIndex > outOfDomainParticleLocalIndices;
- unsigned int nn = m_neighbors.size(); // Number of partition neighbors.
+ unsigned int nn = numFirstOrderNeighbors; // Number of first-order partition neighbors.
forAll< serialPolicy >( subRegion.size(), [&, particleCenter, particleRank] GEOS_HOST ( localIndex const pp )
{
@@ -407,7 +130,7 @@ void SpatialPartition::repartitionMasterParticles( ParticleSubRegion & subRegion
p_x[i] = particleCenter[pp][i];
inPartition = inPartition && isCoordInPartition( p_x[i], i );
}
- if( particleRank[pp]==this->m_rank && !inPartition )
+ if( particleRank[pp]==this->m_cartRank && !inPartition )
{
outOfDomainParticleCoordinates.emplace_back( p_x ); // Store the coordinate of the out-of-domain particle
outOfDomainParticleLocalIndices.push_back( pp ); // Store the local index "pp" for the current coordinate.
@@ -423,8 +146,10 @@ void SpatialPartition::repartitionMasterParticles( ParticleSubRegion & subRegion
sendCoordinateListToNeighbors( outOfDomainParticleCoordinates.toView(), // input: Single list of coordinates sent to all neighbors
commData, // input: Solver MPI communicator
- particleCoordinatesReceivedFromNeighbors ); // output: List of lists of coordinates received from each
+ particleCoordinatesReceivedFromNeighbors, // output: List of lists of coordinates received from each
// neighbor.
+ neighbors, // input: Neighbors to communicate with
+ numFirstOrderNeighbors ); // input: Number of first-order neighbors
@@ -460,7 +185,9 @@ void SpatialPartition::repartitionMasterParticles( ParticleSubRegion & subRegion
sendListOfIndicesToNeighbors< localIndex >( particleListIndicesRequestingFromNeighbors,
commData,
- particleListIndicesRequestedFromNeighbors );
+ particleListIndicesRequestedFromNeighbors,
+ neighbors,
+ numFirstOrderNeighbors );
// (5) Update the ghost rank of the out-of-domain particles to be equal to the rank
@@ -487,20 +214,20 @@ void SpatialPartition::repartitionMasterParticles( ParticleSubRegion & subRegion
particleLocalIndicesRequestedFromNeighbors[n][k] = pp;
// Set ghost rank of the particle equal to neighbor rank.
- particleRank[pp] = m_neighbors[n].neighborRank();
+ particleRank[pp] = neighbors[n].neighborRank();
} );
}
// Check that there is exactly one processor requesting each out-of-domain particle.
if( numberOfRequestedParticles != outOfDomainParticleLocalIndices.size())
{
- std::cout << "Rank " << m_rank << " has requests for " << numberOfRequestedParticles << " out of " << outOfDomainParticleLocalIndices.size() << " out-of-domain particles" << std::endl;
+ std::cout << "Rank " << m_cartRank << " has requests for " << numberOfRequestedParticles << " out of " << outOfDomainParticleLocalIndices.size() << " out-of-domain particles" << std::endl;
}
for( size_t i=0; i( particleGlobalIndicesSendingToNeighbors,
commData,
- particleGlobalIndicesReceivedFromNeighbors );
+ particleGlobalIndicesReceivedFromNeighbors,
+ neighbors,
+ numFirstOrderNeighbors );
// (4) Look through the received coordinates and make a list of the particles that are within
@@ -677,7 +413,7 @@ void SpatialPartition::getGhostParticlesFromNeighboringPartitions( DomainPartiti
if( gI == particleGlobalID[p] )
{
// The particle already exists as a ghost on this partition, so we should update its rank.
- particleRank[p] = m_neighbors[n].neighborRank();
+ particleRank[p] = neighbors[n].neighborRank();
alreadyHere = true;
}
} );
@@ -698,14 +434,16 @@ void SpatialPartition::getGhostParticlesFromNeighboringPartitions( DomainPartiti
sendListOfIndicesToNeighbors< globalIndex >( particleGlobalIndicesRequestingFromNeighbors,
commData,
- particleGlobalIndicesRequestedFromNeighbors );
+ particleGlobalIndicesRequestedFromNeighbors,
+ neighbors,
+ numFirstOrderNeighbors );
// (6) Temporarily set the ghost rank of all ghosts to "-1". After ghosts are unpacked from the
// masters, the ghost rank will be overwritten. At the end of this function, any ghosts that
// still have ghostRank=-1 are orphans and need to be deleted.
- int partitionRank = this->m_rank;
+ int partitionRank = this->m_cartRank;
forAll< parallelHostPolicy >( subRegion.size(), [=] GEOS_HOST ( localIndex const p ) // TODO: Worth moving to device?
{
if( particleRank[p] != partitionRank )
@@ -753,7 +491,9 @@ void SpatialPartition::getGhostParticlesFromNeighboringPartitions( DomainPartiti
newParticleStartingIndices,
numberOfIncomingParticles,
commData,
- particleLocalIndicesRequestedFromNeighbors );
+ particleLocalIndicesRequestedFromNeighbors,
+ neighbors,
+ numFirstOrderNeighbors );
}
@@ -778,14 +518,16 @@ void SpatialPartition::getGhostParticlesFromNeighboringPartitions( DomainPartiti
* @param[in] particleCoordinatesSendingToNeighbors Single list of coordinates sent to all neighbors
* @param[in] commData Solver's MPI communicator
* @param[in] particleCoordinatesReceivedFromNeighbors List of lists of coordinates received from each neighbor
+ * @param[in] neighbors Neighbors to communicate with
*/
-void SpatialPartition::sendCoordinateListToNeighbors( arrayView1d< R1Tensor > const & particleCoordinatesSendingToNeighbors,
- MPI_iCommData & commData,
- stdVector< array1d< R1Tensor > > & particleCoordinatesReceivedFromNeighbors
- )
+void ParticleCartesianPartitioner::sendCoordinateListToNeighbors( arrayView1d< R1Tensor > const & particleCoordinatesSendingToNeighbors,
+ MPI_iCommData & commData,
+ stdVector< array1d< R1Tensor > > & particleCoordinatesReceivedFromNeighbors,
+ stdVector< NeighborCommunicator > & neighbors,
+ int numFirstOrderNeighbors )
{
- // Number of neighboring partitions
- unsigned int nn = m_neighbors.size();
+ // Number of first-order neighboring partitions
+ unsigned int nn = numFirstOrderNeighbors;
// The send buffer is identical for each neighbor, and contains the packed coordinate list.
unsigned int sizeToBePacked = 0; // size of the outgoing data with packing=false
@@ -822,14 +564,14 @@ void SpatialPartition::sendCoordinateListToNeighbors( arrayView1d< R1Tensor > co
receiveRequest[n] = MPI_REQUEST_NULL;
// Perform comms
- m_neighbors[n].mpiISendReceive( &sizeOfPacked,
- 1,
- sendRequest[n],
- &(sizeOfReceived[n]),
- 1,
- receiveRequest[n],
- commData.commID(),
- MPI_COMM_GEOS );
+ neighbors[n].mpiISendReceive( &sizeOfPacked,
+ 1,
+ sendRequest[n],
+ &(sizeOfReceived[n]),
+ 1,
+ receiveRequest[n],
+ commData.commID(),
+ MPI_COMM_GEOS );
}
MPI_Waitall( nn, sendRequest.data(), sendStatus.data());
MPI_Waitall( nn, receiveRequest.data(), receiveStatus.data());
@@ -850,15 +592,15 @@ void SpatialPartition::sendCoordinateListToNeighbors( arrayView1d< R1Tensor > co
// Perform comms
receiveBuffer[n].resize( sizeOfReceived[n] );
- m_neighbors[n].mpiISendReceive( sendBuffer.data(), // TODO: This can't be sendBufferPtr, why not? I guess cuz sendBufferPtr gets
- // incremented (moved) during packing.
- sizeOfPacked,
- sendRequest[n],
- receiveBuffer[n].data(),
- sizeOfReceived[n],
- receiveRequest[n],
- commData.commID(),
- MPI_COMM_GEOS );
+ neighbors[n].mpiISendReceive( sendBuffer.data(), // TODO: This can't be sendBufferPtr, why not? I guess cuz sendBufferPtr gets
+ // incremented (moved) during packing.
+ sizeOfPacked,
+ sendRequest[n],
+ receiveBuffer[n].data(),
+ sizeOfReceived[n],
+ receiveRequest[n],
+ commData.commID(),
+ MPI_COMM_GEOS );
}
MPI_Waitall( nn, sendRequest.data(), sendStatus.data());
MPI_Waitall( nn, receiveRequest.data(), receiveStatus.data());
@@ -874,12 +616,14 @@ void SpatialPartition::sendCoordinateListToNeighbors( arrayView1d< R1Tensor > co
}
template< typename indexType >
-void SpatialPartition::sendListOfIndicesToNeighbors( stdVector< array1d< indexType > > & listSendingToEachNeighbor,
- MPI_iCommData & commData,
- stdVector< array1d< indexType > > & listReceivedFromEachNeighbor )
+void ParticleCartesianPartitioner::sendListOfIndicesToNeighbors( stdVector< array1d< indexType > > & listSendingToEachNeighbor,
+ MPI_iCommData & commData,
+ stdVector< array1d< indexType > > & listReceivedFromEachNeighbor,
+ stdVector< NeighborCommunicator > & neighbors,
+ int numFirstOrderNeighbors )
{
- // Number of neighboring partitions
- unsigned int nn = m_neighbors.size();
+ // Number of first-order neighboring partitions
+ unsigned int nn = numFirstOrderNeighbors;
// Pack the outgoing lists of local indices
stdVector< unsigned int > sizeOfPacked( nn );
@@ -921,14 +665,14 @@ void SpatialPartition::sendListOfIndicesToNeighbors( stdVector< array1d< indexTy
receiveRequest[n] = MPI_REQUEST_NULL;
// Perform comms
- m_neighbors[n].mpiISendReceive( &(sizeOfPacked[n]),
- 1,
- sendRequest[n],
- &(sizeOfReceived[n]),
- 1,
- receiveRequest[n],
- commData.commID(),
- MPI_COMM_GEOS );
+ neighbors[n].mpiISendReceive( &(sizeOfPacked[n]),
+ 1,
+ sendRequest[n],
+ &(sizeOfReceived[n]),
+ 1,
+ receiveRequest[n],
+ commData.commID(),
+ MPI_COMM_GEOS );
}
MPI_Waitall( nn, sendRequest.data(), sendStatus.data());
MPI_Waitall( nn, receiveRequest.data(), receiveStatus.data());
@@ -949,15 +693,15 @@ void SpatialPartition::sendListOfIndicesToNeighbors( stdVector< array1d< indexTy
// Perform comms
receiveBuffer[n].resize( sizeOfReceived[n] );
- m_neighbors[n].mpiISendReceive( sendBuffer[n].data(), // TODO: This can't be sendBufferPtr, why not? I guess cuz sendBufferPtr gets
- // incremented (moved) during packing.
- sizeOfPacked[n],
- sendRequest[n],
- receiveBuffer[n].data(),
- sizeOfReceived[n],
- receiveRequest[n],
- commData.commID(),
- MPI_COMM_GEOS );
+ neighbors[n].mpiISendReceive( sendBuffer[n].data(), // TODO: This can't be sendBufferPtr, why not? I guess cuz sendBufferPtr gets
+ // incremented (moved) during packing.
+ sizeOfPacked[n],
+ sendRequest[n],
+ receiveBuffer[n].data(),
+ sizeOfReceived[n],
+ receiveRequest[n],
+ commData.commID(),
+ MPI_COMM_GEOS );
}
MPI_Waitall( nn, sendRequest.data(), sendStatus.data());
MPI_Waitall( nn, receiveRequest.data(), receiveStatus.data());
@@ -972,13 +716,15 @@ void SpatialPartition::sendListOfIndicesToNeighbors( stdVector< array1d< indexTy
}
}
-void SpatialPartition::sendParticlesToNeighbor( ParticleSubRegionBase & subRegion,
- stdVector< int > const & newParticleStartingIndices,
- stdVector< int > const & numberOfIncomingParticles,
- MPI_iCommData & commData,
- stdVector< array1d< localIndex > > const & particleLocalIndicesToSendToEachNeighbor )
+void ParticleCartesianPartitioner::sendParticlesToNeighbor( ParticleSubRegionBase & subRegion,
+ stdVector< int > const & newParticleStartingIndices,
+ stdVector< int > const & numberOfIncomingParticles,
+ MPI_iCommData & commData,
+ stdVector< array1d< localIndex > > const & particleLocalIndicesToSendToEachNeighbor,
+ stdVector< NeighborCommunicator > & neighbors,
+ int numFirstOrderNeighbors )
{
- unsigned int nn = m_neighbors.size();
+ unsigned int nn = numFirstOrderNeighbors;
// Pack the send buffer for the particles being sent to each neighbor
stdVector< buffer_type > sendBuffer( nn );
@@ -1011,14 +757,14 @@ void SpatialPartition::sendParticlesToNeighbor( ParticleSubRegionBase & subRegio
receiveRequest[n] = MPI_REQUEST_NULL;
// Perform comms
- m_neighbors[n].mpiISendReceive( &(sizeOfPacked[n]),
- 1,
- sendRequest[n],
- &(sizeOfReceived[n]),
- 1,
- receiveRequest[n],
- commData.commID(),
- MPI_COMM_GEOS );
+ neighbors[n].mpiISendReceive( &(sizeOfPacked[n]),
+ 1,
+ sendRequest[n],
+ &(sizeOfReceived[n]),
+ 1,
+ receiveRequest[n],
+ commData.commID(),
+ MPI_COMM_GEOS );
}
MPI_Waitall( nn, sendRequest.data(), sendStatus.data() );
MPI_Waitall( nn, receiveRequest.data(), receiveStatus.data() );
@@ -1039,14 +785,14 @@ void SpatialPartition::sendParticlesToNeighbor( ParticleSubRegionBase & subRegio
// Perform comms
receiveBuffer[n].resize( sizeOfReceived[n] );
- m_neighbors[n].mpiISendReceive( sendBuffer[n].data(),
- sizeOfPacked[n],
- sendRequest[n],
- receiveBuffer[n].data(),
- sizeOfReceived[n],
- receiveRequest[n],
- commData.commID(),
- MPI_COMM_GEOS );
+ neighbors[n].mpiISendReceive( sendBuffer[n].data(),
+ sizeOfPacked[n],
+ sendRequest[n],
+ receiveBuffer[n].data(),
+ sizeOfReceived[n],
+ receiveRequest[n],
+ commData.commID(),
+ MPI_COMM_GEOS );
}
MPI_Waitall( nn, sendRequest.data(), sendStatus.data());
MPI_Waitall( nn, receiveRequest.data(), receiveStatus.data());
@@ -1061,4 +807,6 @@ void SpatialPartition::sendParticlesToNeighbor( ParticleSubRegionBase & subRegio
}
+REGISTER_CATALOG_ENTRY( DomainPartitioner, ParticleCartesianPartitioner,
+ string const &, Group * const )
}
diff --git a/src/coreComponents/mesh/mpiCommunications/ParticleCartesianPartitioner.hpp b/src/coreComponents/mesh/mpiCommunications/ParticleCartesianPartitioner.hpp
new file mode 100644
index 00000000000..2045dfe810e
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/ParticleCartesianPartitioner.hpp
@@ -0,0 +1,102 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file ParticleCartesianPartitioner.hpp
+ */
+
+#ifndef GEOS_PARTITIONER_PARTICLECARTESIANPARTITIONER_HPP_
+#define GEOS_PARTITIONER_PARTICLECARTESIANPARTITIONER_HPP_
+
+#include "CartesianPartitioner.hpp"
+#include "NeighborCommunicator.hpp"
+
+#include "mesh/DomainPartition.hpp"
+
+
+namespace geos
+{
+
+class ParticleCartesianPartitioner : public CartesianPartitioner
+{
+public:
+
+
+ ParticleCartesianPartitioner( string const & name,
+ Group * const parent );
+
+ /**
+ * @brief Structure to hold scoped key names
+ */
+ struct viewKeyStruct
+ {
+ //constexpr static char const * m_partitionCountsString() { return "partitionCounts"; }
+ };
+
+ /**
+ * @brief Return the name of the VTKMeshGenerator in object Catalog.
+ * @return string that contains the key name to VTKMeshGenerator in the Catalog
+ */
+ static string catalogName() { return "ParticleCartesianPartitioner"; }
+
+
+ void updateSizes( arrayView1d< real64 > const domainL,
+ real64 const dt );
+
+ bool isCoordInPartitionBoundingBox( const R1Tensor & elemCenter,
+ const real64 & boundaryRadius ) const;
+ void repartitionMasterParticles( DomainPartition & domain,
+ ParticleSubRegion & subRegion,
+ MPI_iCommData & commData );
+
+ void getGhostParticlesFromNeighboringPartitions( DomainPartition & domain,
+ MPI_iCommData & commData,
+ const real64 & boundaryRadius );
+
+ /**
+ * @brief Send coordinates to neighbors as part of repartition.
+ * @param[in] particleCoordinatesSendingToNeighbors Single list of coordinates sent to all neighbors
+ * @param[in] commData Solver's MPI communicator
+ * @param[in] particleCoordinatesReceivedFromNeighbors List of lists of coordinates received from each neighbor
+ * @param[in] neighbors Neighbors to communicate with
+ */
+ void sendCoordinateListToNeighbors( arrayView1d< R1Tensor > const & particleCoordinatesSendingToNeighbors,
+ MPI_iCommData & commData,
+ stdVector< array1d< R1Tensor > > & particleCoordinatesReceivedFromNeighbors,
+ stdVector< NeighborCommunicator > & neighbors,
+ int numFirstOrderNeighbors );
+
+ template< typename indexType >
+ void sendListOfIndicesToNeighbors( stdVector< array1d< indexType > > & listSendingToEachNeighbor,
+ MPI_iCommData & commData,
+ stdVector< array1d< indexType > > & listReceivedFromEachNeighbor,
+ stdVector< NeighborCommunicator > & neighbors,
+ int numFirstOrderNeighbors );
+
+ void sendParticlesToNeighbor( ParticleSubRegionBase & subRegion,
+ stdVector< int > const & newParticleStartingIndices,
+ stdVector< int > const & numberOfIncomingParticles,
+ MPI_iCommData & commData,
+ stdVector< array1d< localIndex > > const & particleLocalIndicesToSendToEachNeighbor,
+ stdVector< NeighborCommunicator > & neighbors,
+ int numFirstOrderNeighbors );
+
+
+
+};
+
+} // namespace geos
+
+#endif // GEOS_PARTITIONER_PARTICLECARTESIANPARTITIONER_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/PartitionBase.hpp b/src/coreComponents/mesh/mpiCommunications/PartitionBase.hpp
deleted file mode 100644
index d06a329893e..00000000000
--- a/src/coreComponents/mesh/mpiCommunications/PartitionBase.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-#ifndef GEOS_MESH_MPICOMMUNICATIONS_PARTITIONBASE_HPP_
-#define GEOS_MESH_MPICOMMUNICATIONS_PARTITIONBASE_HPP_
-
-#include "mesh/mpiCommunications/NeighborCommunicator.hpp"
-#include "common/DataTypes.hpp"
-
-namespace geos
-{
-
-/**
- * @brief Base class for partitioning.
- */
-class PartitionBase
-{
-public:
-
- /**
- * @brief Virtual empty destructor for C++ inheritance reasons
- */
- virtual ~PartitionBase();
-
- /**
- * @brief Checks if the point located inside the current partition in the given direction dir.
- * @param coord The point coordinates.
- * @param dir The considered direction.
- * @return The predicate result.
- */
- virtual bool isCoordInPartition( const real64 & coord, const int dir ) const = 0;
-
- /**
- * @brief Defines the dimensions of the grid.
- * @param min Global minimum spatial dimensions.
- * @param max Global maximum spatial dimensions.
- */
- virtual void setSizes( real64 const ( &min )[ 3 ],
- real64 const ( &max )[ 3 ] ) = 0;
-
- /**
- * @brief Defines the number of partitions along the three (x, y, z) axis.
- * @param xPartitions Number of partitions along x.
- * @param yPartitions Number of partitions along y.
- * @param zPartitions Number of partitions along z.
- */
- virtual void setPartitions( unsigned int xPartitions,
- unsigned int yPartitions,
- unsigned int zPartitions ) = 0;
-
- /**
- * @brief Computes an associated color.
- * @return The color
- *
- * @note The other Color member function.
- */
- virtual int getColor() = 0;
-
- /**
- * @brief Returns the number of colors.
- * @return The number of associated colors.
- */
- int numColor() const
- { return m_numColors; }
-
-protected:
- /**
- * @brief Preventing dummy default constructor.
- */
- PartitionBase() = default;
-
- /**
- * @brief Builds from the size of partitions and the current rank of the partition
- * @param numPartitions Size of the partitions.
- * @param thisPartition The rank of the build partition.
- */
- PartitionBase( const unsigned int numPartitions,
- const unsigned int thisPartition );
-
- /**
- * @brief Array of neighbor communicators.
- */
- stdVector< NeighborCommunicator > m_neighbors;
-
- /// Size of the group associated with the MPI communicator
- int m_size;
- /// MPI rank of the current partition
- int m_rank;
-
- /**
- * @brief Number of colors
- */
- int m_numColors;
-};
-
-}
-
-#endif /* GEOS_MESH_MPICOMMUNICATIONS_PARTITIONBASE_HPP_ */
diff --git a/src/coreComponents/mesh/mpiCommunications/PartitionerManager.cpp b/src/coreComponents/mesh/mpiCommunications/PartitionerManager.cpp
new file mode 100644
index 00000000000..56a0bd47690
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/PartitionerManager.cpp
@@ -0,0 +1,98 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 TheBoard of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file PartitionerManager.cpp
+ */
+
+#include "PartitionerManager.hpp"
+
+namespace geos
+{
+
+using namespace dataRepository;
+
+PartitionerManager::PartitionerManager( string const & name,
+ Group * const parent )
+ : Group( name, parent )
+{
+ setInputFlags( InputFlags::OPTIONAL );
+}
+
+PartitionerManager::~PartitionerManager()
+{}
+
+Group * PartitionerManager::createChild( string const & childKey,
+ string const & childName )
+{
+ GEOS_LOG_RANK_0( GEOS_FMT( "PartitionerManager: adding {} '{}'", childKey, childName ) );
+
+ // Create partitioner from DomainPartitioner catalog
+ std::unique_ptr< DomainPartitioner > partitioner =
+ DomainPartitioner::CatalogInterface::factory( childKey,
+ getDataContext(),
+ childName,
+ this );
+
+ return &this->registerGroup< DomainPartitioner >( childName, std::move( partitioner ) );
+}
+
+void PartitionerManager::expandObjectCatalogs()
+{
+ // During schema generation, register one of each type derived from DomainPartitioner
+ for( auto & catalogIter : DomainPartitioner::getCatalog() )
+ {
+ createChild( catalogIter.first, catalogIter.first );
+ }
+}
+
+bool PartitionerManager::hasPartitioner() const
+{
+ return this->numSubGroups() > 0;
+}
+
+DomainPartitioner & PartitionerManager::getPartitioner()
+{
+ // Get the first (and should be only) partitioner defined in the XML
+ GEOS_ERROR_IF( !hasPartitioner(),
+ "No partitioner defined in XML. Please add a partitioner inside .\n"
+ "Examples:\n"
+ " \n"
+ " " );
+
+ GEOS_ERROR_IF( this->numSubGroups() > 1,
+ "Multiple partitioners defined. Only one partitioner can be active at a time." );
+
+ // Return the first child which should be a DomainPartitioner
+ return this->getGroup< DomainPartitioner >( 0 );
+}
+
+DomainPartitioner const & PartitionerManager::getPartitioner() const
+{
+ // Get the first (and should be only) partitioner defined in the XML
+ GEOS_ERROR_IF( this->numSubGroups() == 0,
+ "No partitioner defined in XML. Please add a partitioner inside .\n"
+ "Examples:\n"
+ " \n"
+ " " );
+
+ GEOS_ERROR_IF( this->numSubGroups() > 1,
+ "Multiple partitioners defined. Only one partitioner can be active at a time." );
+
+ // Return the first child which should be a DomainPartitioner
+ return this->getGroup< DomainPartitioner >( 0 );
+}
+
+} // namespace geos
diff --git a/src/coreComponents/mesh/mpiCommunications/PartitionerManager.hpp b/src/coreComponents/mesh/mpiCommunications/PartitionerManager.hpp
new file mode 100644
index 00000000000..a5334392daa
--- /dev/null
+++ b/src/coreComponents/mesh/mpiCommunications/PartitionerManager.hpp
@@ -0,0 +1,143 @@
+/*
+ * ------------------------------------------------------------------------------------------------------------
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
+ * Copyright (c) 2018-2024 TotalEnergies
+ * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
+ * Copyright (c) 2023-2024 Chevron
+ * Copyright (c) 2019- GEOS/GEOSX Contributors
+ * All rights reserved
+ *
+ * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
+ * ------------------------------------------------------------------------------------------------------------
+ */
+
+/**
+ * @file PartitionerManager.hpp
+ * @brief Manager for domain partitioning strategies
+ */
+
+#ifndef GEOS_PARTITIONER_PARTITIONERMANAGER_HPP_
+#define GEOS_PARTITIONER_PARTITIONERMANAGER_HPP_
+
+#include "dataRepository/Group.hpp"
+#include "DomainPartitioner.hpp"
+
+namespace geos
+{
+
+/**
+ * @class PartitionerManager
+ * @brief Manager for domain partitioning strategies
+ *
+ * PartitionerManager is a container for a single partitioner instance.
+ *
+ * The manager supports multiple partitioner types:
+ * - Geometric partitioners (CartesianPartitioner, ParticleCartesianPartitioner)
+ * - Mesh partitioners (CellGraphPartitioner, LayeredGraphPartitioner)
+ *
+ * Only one partitioner can be active at a time.
+ *
+ * XML Structure:
+ * @code{.xml}
+ *
+ *
+ *
+ * @endcode
+ *
+ * or
+ *
+ * @code{.xml}
+ *
+ *
+ *
+ * @endcode
+ *
+ * Usage:
+ * @code
+ * PartitionerManager & partMgr = problemManager.getGroup("Partitioner");
+ *
+ * if( partMgr.hasPartitioner() )
+ * {
+ * DomainPartitioner & part = partMgr.getPartitioner();
+ *
+ * // Query partitioner type
+ * if( auto* meshPart = dynamic_cast(&part) )
+ * {
+ * // Use mesh partitioner
+ * }
+ * else if( auto* geomPart = dynamic_cast(&part) )
+ * {
+ * // Use geometric partitioner
+ * }
+ * }
+ * @endcode
+ */
+class PartitionerManager : public dataRepository::Group
+{
+public:
+
+ /**
+ * @brief Constructor
+ * @param name The name of this manager (typically "Partitioner")
+ * @param parent The parent group
+ */
+ PartitionerManager( string const & name,
+ dataRepository::Group * const parent );
+
+ /**
+ * @brief Destructor
+ */
+ ~PartitionerManager() override;
+
+ /**
+ * @brief Create a child partitioner from XML
+ *
+ * Called by the XML parser when it encounters a partitioner element.
+ * Uses the DomainPartitioner catalog to create the appropriate concrete type.
+ *
+ * @param childKey The catalog key (partitioner type, e.g., "CellGraphPartitioner")
+ * @param childName The instance name (e.g., "cellPartitioner")
+ * @return Pointer to the created partitioner
+ */
+ virtual Group * createChild( string const & childKey,
+ string const & childName ) override;
+
+ /**
+ * @brief Expand catalogs for schema generation
+ *
+ * During schema generation, this creates one instance of each registered
+ * partitioner type to document available options.
+ */
+ virtual void expandObjectCatalogs() override;
+
+ /**
+ * @brief Check if a partitioner has been defined
+ * @return true if a partitioner exists
+ */
+ bool hasPartitioner() const;
+
+ /**
+ * @brief Get the active partitioner
+ *
+ * Returns the single partitioner defined in the XML.
+ *
+ * @return Reference to the partitioner
+ */
+ DomainPartitioner & getPartitioner();
+
+ /**
+ * @brief Get the active partitioner (const version)
+ *
+ * @return Const reference to the partitioner
+ */
+ DomainPartitioner const & getPartitioner() const;
+};
+
+} // namespace geos
+
+#endif // GEOS_PARTITIONER_PARTITIONERMANAGER_HPP_
diff --git a/src/coreComponents/mesh/mpiCommunications/SpatialPartition.hpp b/src/coreComponents/mesh/mpiCommunications/SpatialPartition.hpp
deleted file mode 100644
index 0ed1b526039..00000000000
--- a/src/coreComponents/mesh/mpiCommunications/SpatialPartition.hpp
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * ------------------------------------------------------------------------------------------------------------
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC
- * Copyright (c) 2018-2024 TotalEnergies
- * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University
- * Copyright (c) 2023-2024 Chevron
- * Copyright (c) 2019- GEOS/GEOSX Contributors
- * All rights reserved
- *
- * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details.
- * ------------------------------------------------------------------------------------------------------------
- */
-
-#ifndef GEOS_MESH_MPICOMMUNICATIONS_SPATIALPARTITION_HPP_
-#define GEOS_MESH_MPICOMMUNICATIONS_SPATIALPARTITION_HPP_
-
-
-#include "PartitionBase.hpp"
-#include "mesh/DomainPartition.hpp"
-
-
-#include