diff --git a/examples/dam_break.cpp b/examples/dam_break.cpp index e137445..4399f23 100644 --- a/examples/dam_break.cpp +++ b/examples/dam_break.cpp @@ -66,7 +66,7 @@ struct ParticleInitFunc //---------------------------------------------------------------------------// void damBreak( const double cell_size, const int ppc, const int halo_size, const double delta_t, const double t_final, const int write_freq, - const std::string& device ) + const std::string& device, const std::string& partitioner_type ) { // The dam break domain is in a box on [0,1] in each dimension. Kokkos::Array global_box = { 0.0, 0.0, 0.0, 1.0, 1.0, 1.0 }; @@ -87,7 +87,6 @@ void damBreak( const double cell_size, const int ppc, const int halo_size, int comm_size; MPI_Comm_size( MPI_COMM_WORLD, &comm_size ); std::array ranks_per_dim = { 1, comm_size, 1 }; - Cajita::ManualPartitioner partitioner( ranks_per_dim ); // Material properties. double bulk_modulus = 1.0e5; @@ -110,8 +109,9 @@ void damBreak( const double cell_size, const int ppc, const int halo_size, // Solve the problem. auto solver = ExaMPM::createSolver( device, MPI_COMM_WORLD, global_box, global_num_cell, periodic, - partitioner, halo_size, ParticleInitFunc( cell_size, ppc, density ), - ppc, bulk_modulus, density, gamma, kappa, delta_t, gravity, bc ); + halo_size, ParticleInitFunc( cell_size, ppc, density ), ppc, + bulk_modulus, density, gamma, kappa, delta_t, gravity, bc, + partitioner_type ); solver->solve( t_final, write_freq ); } @@ -143,8 +143,12 @@ int main( int argc, char* argv[] ) // device type std::string device( argv[7] ); + // partitioner type + std::string partitioner_type( argv[8] ); + // run the problem. - damBreak( cell_size, ppc, halo_size, delta_t, t_final, write_freq, device ); + damBreak( cell_size, ppc, halo_size, delta_t, t_final, write_freq, device, + partitioner_type ); Kokkos::finalize(); diff --git a/examples/free_fall.cpp b/examples/free_fall.cpp index 4b4eb19..af82fbc 100644 --- a/examples/free_fall.cpp +++ b/examples/free_fall.cpp @@ -65,7 +65,7 @@ struct ParticleInitFunc //---------------------------------------------------------------------------// void freeFall( const double cell_size, const int ppc, const int halo_size, const double delta_t, const double t_final, const int write_freq, - const std::string& device ) + const std::string& device, const std::string& partitioner_type ) { // The dam break domain is in a box on [0,1] in each dimension. Kokkos::Array global_box = { -0.5, -0.5, -0.5, 0.5, 0.5, 0.5 }; @@ -86,7 +86,6 @@ void freeFall( const double cell_size, const int ppc, const int halo_size, int comm_size; MPI_Comm_size( MPI_COMM_WORLD, &comm_size ); std::array ranks_per_dim = { 1, comm_size, 1 }; - Cajita::ManualPartitioner partitioner( ranks_per_dim ); // Material properties. double bulk_modulus = 5.0e5; @@ -109,8 +108,9 @@ void freeFall( const double cell_size, const int ppc, const int halo_size, // Solve the problem. auto solver = ExaMPM::createSolver( device, MPI_COMM_WORLD, global_box, global_num_cell, periodic, - partitioner, halo_size, ParticleInitFunc( cell_size, ppc, density ), - ppc, bulk_modulus, density, gamma, kappa, delta_t, gravity, bc ); + halo_size, ParticleInitFunc( cell_size, ppc, density ), ppc, + bulk_modulus, density, gamma, kappa, delta_t, gravity, bc, + partitioner_type ); solver->solve( t_final, write_freq ); } @@ -142,8 +142,12 @@ int main( int argc, char* argv[] ) // device type std::string device( argv[7] ); + // partitioner type + std::string partitioner_type( argv[8] ); + // run the problem. - freeFall( cell_size, ppc, halo_size, delta_t, t_final, write_freq, device ); + freeFall( cell_size, ppc, halo_size, delta_t, t_final, write_freq, device, + partitioner_type ); Kokkos::finalize(); diff --git a/src/ExaMPM_Solver.hpp b/src/ExaMPM_Solver.hpp index 99505ff..9ec76e9 100644 --- a/src/ExaMPM_Solver.hpp +++ b/src/ExaMPM_Solver.hpp @@ -43,20 +43,37 @@ class Solver : public SolverBase template Solver( MPI_Comm comm, const Kokkos::Array& global_bounding_box, const std::array& global_num_cell, - const std::array& periodic, - const Cajita::BlockPartitioner<3>& partitioner, - const int halo_cell_width, const InitFunc& create_functor, - const int particles_per_cell, const double bulk_modulus, - const double density, const double gamma, const double kappa, - const double delta_t, const double gravity, - const BoundaryCondition& bc ) + const std::array& periodic, const int halo_cell_width, + const InitFunc& create_functor, const int particles_per_cell, + const double bulk_modulus, const double density, const double gamma, + const double kappa, const double delta_t, const double gravity, + const BoundaryCondition& bc, const std::string& partitioner_type ) : _dt( delta_t ) , _gravity( gravity ) , _bc( bc ) , _halo_min( 3 ) { + if ( 0 == partitioner_type.compare( "manual" ) ) + { + int comm_size; + MPI_Comm_size( comm, &comm_size ); + std::array ranks_per_dim = { 1, comm_size, 1 }; + _partitioner = + std::make_shared( ranks_per_dim ); + } + else if ( 0 == partitioner_type.compare( "dynamic" ) ) + { + _partitioner = std::make_shared>>( + comm, global_num_cell ); + } + else + { + throw std::runtime_error( "invalid partitioner type" ); + } + _mesh = std::make_shared>( - global_bounding_box, global_num_cell, periodic, partitioner, + global_bounding_box, global_num_cell, periodic, *_partitioner, halo_cell_width, _halo_min, comm ); _bc.min = _mesh->minDomainGlobalNodeIndex(); @@ -112,6 +129,7 @@ class Solver : public SolverBase int _halo_min; std::shared_ptr> _mesh; std::shared_ptr> _pm; + std::shared_ptr> _partitioner; int _rank; }; @@ -122,22 +140,21 @@ std::shared_ptr createSolver( const std::string& device, MPI_Comm comm, const Kokkos::Array& global_bounding_box, const std::array& global_num_cell, - const std::array& periodic, - const Cajita::BlockPartitioner<3>& partitioner, - const int halo_cell_width, const InitFunc& create_functor, - const int particles_per_cell, const double bulk_modulus, - const double density, const double gamma, const double kappa, - const double delta_t, const double gravity, - const BoundaryCondition& bc ) + const std::array& periodic, const int halo_cell_width, + const InitFunc& create_functor, const int particles_per_cell, + const double bulk_modulus, const double density, + const double gamma, const double kappa, const double delta_t, + const double gravity, const BoundaryCondition& bc, + const std::string& partitioner_type ) { if ( 0 == device.compare( "serial" ) ) { #ifdef KOKKOS_ENABLE_SERIAL return std::make_shared< ExaMPM::Solver>( - comm, global_bounding_box, global_num_cell, periodic, partitioner, + comm, global_bounding_box, global_num_cell, periodic, halo_cell_width, create_functor, particles_per_cell, bulk_modulus, - density, gamma, kappa, delta_t, gravity, bc ); + density, gamma, kappa, delta_t, gravity, bc, partitioner_type ); #else throw std::runtime_error( "Serial Backend Not Enabled" ); #endif @@ -147,9 +164,9 @@ createSolver( const std::string& device, MPI_Comm comm, #ifdef KOKKOS_ENABLE_OPENMP return std::make_shared< ExaMPM::Solver>( - comm, global_bounding_box, global_num_cell, periodic, partitioner, + comm, global_bounding_box, global_num_cell, periodic, halo_cell_width, create_functor, particles_per_cell, bulk_modulus, - density, gamma, kappa, delta_t, gravity, bc ); + density, gamma, kappa, delta_t, gravity, bc, partitioner_type ); #else throw std::runtime_error( "OpenMP Backend Not Enabled" ); #endif @@ -159,9 +176,9 @@ createSolver( const std::string& device, MPI_Comm comm, #ifdef KOKKOS_ENABLE_CUDA return std::make_shared< ExaMPM::Solver>( - comm, global_bounding_box, global_num_cell, periodic, partitioner, + comm, global_bounding_box, global_num_cell, periodic, halo_cell_width, create_functor, particles_per_cell, bulk_modulus, - density, gamma, kappa, delta_t, gravity, bc ); + density, gamma, kappa, delta_t, gravity, bc, partitioner_type ); #else throw std::runtime_error( "CUDA Backend Not Enabled" ); #endif @@ -171,9 +188,9 @@ createSolver( const std::string& device, MPI_Comm comm, #ifdef KOKKOS_ENABLE_HIP return std::make_shared>( - comm, global_bounding_box, global_num_cell, periodic, partitioner, + comm, global_bounding_box, global_num_cell, periodic, halo_cell_width, create_functor, particles_per_cell, bulk_modulus, - density, gamma, kappa, delta_t, gravity, bc ); + density, gamma, kappa, delta_t, gravity, bc, partitioner_type ); #else throw std::runtime_error( "HIP Backend Not Enabled" ); #endif