diff --git a/CMakeLists.txt b/CMakeLists.txt index f0a9cad9384..901ae4802f2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -149,6 +149,7 @@ four_c_configure_dependency(Backtrace DEFAULT OFF) four_c_configure_dependency(ryml REQUIRED) four_c_configure_dependency(magic_enum REQUIRED) four_c_configure_dependency(ZLIB REQUIRED) +four_c_configure_dependency(CLI11 REQUIRED) four_c_configure_dependency(Python DEFAULT ON) # not required but useful for most developers four_c_configure_dependency(pybind11 DEFAULT OFF) diff --git a/apps/global_full/4C_global_full_io.cpp b/apps/global_full/4C_global_full_io.cpp index 2134745005e..f363ec4fb7a 100644 --- a/apps/global_full/4C_global_full_io.cpp +++ b/apps/global_full/4C_global_full_io.cpp @@ -9,16 +9,17 @@ #include "4C_global_full_io.hpp" +#include "4C_comm_utils.hpp" #include "4C_global_data_read.hpp" #include "4C_global_legacy_module.hpp" #include "4C_io_pstream.hpp" +#include "4C_utils_exceptions.hpp" #include #include FOUR_C_NAMESPACE_OPEN - Core::IO::InputFile setup_input_file(const MPI_Comm comm) { return Global::set_up_input_file(comm); @@ -33,7 +34,8 @@ void emit_general_metadata(const Core::IO::YamlNodeRef& root_ref) /** * \brief Sets up the parallel output environment. */ -void setup_parallel_output(const CommandlineArguments& arguments) +void setup_parallel_output( + const CommandlineArguments& arguments, const Core::Communication::Communicators& communicators) { using namespace FourC; @@ -45,21 +47,22 @@ void setup_parallel_output(const CommandlineArguments& arguments) int oproc = io.get("LIMIT_OUTP_TO_PROC"); auto level = Teuchos::getIntegralValue(io, "VERBOSITY"); - Core::IO::cout.setup(screen, file, preGrpID, level, std::move(arguments.comms.local_comm()), - oproc, arguments.comms.group_id(), arguments.output_file_identifier); + Core::IO::cout.setup(screen, file, preGrpID, level, communicators.local_comm(), oproc, + communicators.group_id(), arguments.output_file_identifier); } -void setup_global_problem(Core::IO::InputFile& input_file, const CommandlineArguments& arguments) +void setup_global_problem(Core::IO::InputFile& input_file, const CommandlineArguments& arguments, + Core::Communication::Communicators& communicators) { Global::Problem* problem = Global::Problem::instance(); - problem->set_restart_step(arguments.restart_step); - problem->set_communicators(arguments.comms); + problem->set_restart_step(arguments.restart); + problem->set_communicators(communicators); Global::read_parameter(*problem, input_file); - setup_parallel_output(arguments); + setup_parallel_output(arguments, communicators); // create control file for output and read restart data if required - problem->open_control_file(arguments.comms.local_comm(), arguments.input_file_name, + problem->open_control_file(communicators.local_comm(), arguments.input_file_name, arguments.output_file_identifier, arguments.restart_file_identifier); // input of materials @@ -108,197 +111,6 @@ double walltime_in_seconds() 1.0e-3; } -void parse_commandline_arguments(CommandlineArguments& arguments) -{ - int group = arguments.comms.group_id(); - - int restart_group = 0; - int my_rank = Core::Communication::my_mpi_rank(arguments.comms.local_comm()); - - std::vector inout = - parse_input_output_files(arguments.argc, arguments.argv, my_rank); - - // number of input/output arguments specified by the user - auto inout_args = int(inout.size()); - - std::string input_filename; - std::string output_file_identifier; - std::string restart_file_identifier; - // set input file name in each group - switch (arguments.comms.np_type()) - { - case Core::Communication::NestedParallelismType::no_nested_parallelism: - input_filename = inout[0]; - output_file_identifier = inout[1]; - restart_group = 0; - break; - case Core::Communication::NestedParallelismType::every_group_read_input_file: - { - if (inout_args > 4) - FOUR_C_THROW( - "You specified too many arguments ({}). A maximum of four args is allowed", inout_args); - - input_filename = inout[0]; - // check whether output_file_identifier includes a dash and in case separate the number at the - // end - size_t pos = inout[1].rfind('-'); - if (pos != std::string::npos) - { - int number = atoi(inout[1].substr(pos + 1).c_str()); - inout[1] = inout[1].substr(0, pos); - output_file_identifier = std::format("{}_group_{}_{}", inout[1], group, number); - } - else - { - output_file_identifier = std::format("{}_group_{}", inout[1], group); - } - restart_group = 0; - } - break; - case Core::Communication::NestedParallelismType::separate_input_files: - if (inout_args % arguments.comms.num_groups() != 0) - FOUR_C_THROW("Each group needs the same number of arguments for input/output."); - inout_args /= arguments.comms.num_groups(); - input_filename = inout[group * inout_args]; - output_file_identifier = inout[group * inout_args + 1]; - restart_group = group; - break; - default: - FOUR_C_THROW( - "-nptype is not correct. Only everyGroupReadInputFile and separateInputFiles " - "are available"); - break; - } - - if (my_rank == 0) - { - std::cout << "input is read from " << input_filename << std::endl; - } - parse_restart_definition( - inout, inout_args, restart_file_identifier, output_file_identifier, restart_group, arguments); - - /// set IO file names and identifiers - arguments.input_file_name = input_filename; - arguments.output_file_identifier = output_file_identifier; - arguments.restart_file_identifier = restart_file_identifier; -} - - -std::vector parse_input_output_files(const int argc, char** argv, const int my_rank) -{ - if (argc <= 1) - { - if (my_rank == 0) - { - printf("You forgot to give the input and output file names!\n"); - printf("Try again!\n"); - } - MPI_Finalize(); - exit(EXIT_FAILURE); - } - else if (argc <= 2) - { - if (my_rank == 0) - { - printf("You forgot to give the output file name!\n"); - printf("Try again!\n"); - } - MPI_Finalize(); - exit(EXIT_FAILURE); - } - - - // parse command line and separate input/output arguments - std::vector inout; - for (int i = 1; i < argc; i++) - { - std::string temp = argv[i]; - if (temp.substr(0, 1) != "-") inout.push_back(temp); - } - return inout; -} - -void parse_restart_definition(const std::vector& inout, const int in_out_args, - std::string& restart_file_identifier, const std::string& outfile_identifier, - const int restart_group, CommandlineArguments& arguments) -{ - // Global::Problem* problem = Global::Problem::instance(); - // bool parameter defining if input argument is given - bool restartIsGiven = false; - bool restartfromIsGiven = false; - - // default case is an identical restartfile_identifier and outputfile_identifier - restart_file_identifier = outfile_identifier; - for (int i = 2; i < in_out_args; i++) - { - std::string restart = inout[restart_group * in_out_args + i]; - - if (restart.substr(0, 8) == "restart=") - { - const std::string option = restart.substr(8, std::string::npos); - int r; - if (option.compare("last_possible") == 0) - { - r = -1; // here we use a negative value to trigger the search in the control file in - // the later step. It does not mean a restart from a negative number is allowed - // from the user point of view. - } - else - { - r = atoi(option.c_str()); - if (r < 0) FOUR_C_THROW("Restart number must be a positive value"); - } - // tell the global problem about the restart step given in the command line - arguments.restart_step = r; - restartIsGiven = true; - } - else if (restart.substr(0, 12) == "restartfrom=") - { - restart_file_identifier = (restart.substr(12, std::string::npos).c_str()); - - switch (arguments.comms.np_type()) - { - case Core::Communication::NestedParallelismType::no_nested_parallelism: - case Core::Communication::NestedParallelismType::separate_input_files: - // nothing to add to restartfileidentifier - break; - case Core::Communication::NestedParallelismType::every_group_read_input_file: - { - // check whether restartfileidentifier includes a dash and in case separate the number - // at the end - size_t pos = restart_file_identifier.rfind('-'); - if (pos != std::string::npos) - { - int number = atoi(restart_file_identifier.substr(pos + 1).c_str()); - std::string identifier = restart_file_identifier.substr(0, pos); - restart_file_identifier = - std::format("{}_group_{}_-{}", identifier, arguments.comms.group_id(), number); - } - else - { - restart_file_identifier = - std::format("{}_group_{}", restart_file_identifier, arguments.comms.group_id()); - } - } - break; - default: - FOUR_C_THROW( - "-nptype is not correct. Only everyGroupReadInputFile and " - "separateInputFiles are available"); - break; - } - - restartfromIsGiven = true; - } - } - - // throw error in case restartfrom is given but no restart step is specified - if (restartfromIsGiven && !restartIsGiven) - { - FOUR_C_THROW("You need to specify a restart step when using restartfrom."); - } -} - void write_timemonitor(MPI_Comm comm) { std::shared_ptr> TeuchosComm = @@ -306,5 +118,4 @@ void write_timemonitor(MPI_Comm comm) Teuchos::TimeMonitor::summarize(Teuchos::Ptr(TeuchosComm.get()), std::cout, false, true, false); } - FOUR_C_NAMESPACE_CLOSE diff --git a/apps/global_full/4C_global_full_io.hpp b/apps/global_full/4C_global_full_io.hpp index ee88f1550b7..297331736ee 100644 --- a/apps/global_full/4C_global_full_io.hpp +++ b/apps/global_full/4C_global_full_io.hpp @@ -11,27 +11,11 @@ #include "4C_config.hpp" #include "4C_comm_utils.hpp" +#include "4C_io_command_line_helpers.hpp" #include "4C_io_input_file.hpp" FOUR_C_NAMESPACE_OPEN -/** - * Gather some arguments from the command line and store them in a struct. This should be - * changed to a proper parser - */ -struct CommandlineArguments -{ - int argc; - char** argv; - - std::string input_file_name; - std::string output_file_identifier; - std::string restart_file_identifier; - int restart_step = 0; - - Core::Communication::Communicators comms; -}; - /** * \brief Initializes the input file for reading. * \note Currently, this function is a wrapper around Global::set_up_input_file to keep the main @@ -52,29 +36,14 @@ void emit_general_metadata(const Core::IO::YamlNodeRef& root_ref); * \brief Sets up the Global::Problem instance and puts all the parameters from the input file * there. */ -void setup_global_problem(Core::IO::InputFile& input_file, const CommandlineArguments& arguments); - +void setup_global_problem(Core::IO::InputFile& input_file, const CommandlineArguments& arguments, + Core::Communication::Communicators& communicators); /** * \brief Returns the wall time in seconds. */ double walltime_in_seconds(); -/** - * \brief Parses command line arguments and sets input, output, and restart file identifiers. - */ -void parse_commandline_arguments(CommandlineArguments& arguments); - -/** - * \brief Parses input and output files from command line arguments. - */ -std::vector parse_input_output_files(const int argc, char** argv, const int my_rank); -/** - * \brief Parses the restart definition from command line arguments. - */ -void parse_restart_definition(const std::vector& inout, int in_out_args, - std::string& restart_file_identifier, const std::string& outfile_identifier, int restart_group, - CommandlineArguments& arguments); /** * \brief Writes the Teuchos::TimeMonitor information to std::cout diff --git a/apps/global_full/4C_global_full_main.cpp b/apps/global_full/4C_global_full_main.cpp index dca97d7d15f..e9237048eac 100644 --- a/apps/global_full/4C_global_full_main.cpp +++ b/apps/global_full/4C_global_full_main.cpp @@ -11,12 +11,13 @@ #include "4C_comm_utils.hpp" #include "4C_global_full_io.hpp" #include "4C_global_legacy_module.hpp" +#include "4C_io_command_line_helpers.hpp" #include "4C_io_input_file_utils.hpp" -#include "4C_io_input_spec_builders.hpp" #include "4C_io_pstream.hpp" #include "4C_utils_exceptions.hpp" #include "4C_utils_singleton_owner.hpp" +#include #include #include @@ -33,72 +34,22 @@ using namespace FourC; namespace { - void print_help_message() + // Custom CLI11 formatter to add extra spacing between options + class SpacedFormatter : public CLI::Formatter { - std::cout - << "NAME\n" - << "\t" - << "4C - simulate just about anything\n" - << "\n" - << "SYNOPSIS\n" - << "\t" - << "4C [-h | --help] [-p | --parameters] [-d | --datfile] [-ngroup=] \\ " - "\n" - "\t\t[-glayout=a,b,c,...] [-nptype=] \\ \n" - << "\t\t [restart=] [restartfrom=restart_file_name] \\ \n" - "\t\t[ [restart=] [restartfrom=restart_file_name] ... " - "] \\ \n" - "\t\t[--interactive]\n" - << "\n" - << "DESCRIPTION\n" - << "\tThe am besten simulation tool in the world.\n" - << "\n" - << "OPTIONS\n" - << "\t--help or -h\n" - << "\t\tPrint this message.\n" - << "\n" - << "\t--parameters or -p\n" - << "\t\tDumps information about the parameters for consumption by additional tools.\n" - << "\n" - << "\t-ngroup=\n" - << "\t\tSpecify the number of groups for nested parallelism. (default: 1)\n" - << "\n" - << "\t-glayout=,,,...\n" - << "\t\tSpecify the number of processors per group. \n" - "\t\tArgument \"-ngroup\" is mandatory and must be preceding. \n" - "\t\t(default: equal distribution)\n" - << "\n" - << "\t-nptype=\n" - << "\t\tAvailable options: \"separateInputFiles\" and \"everyGroupReadInputFile\"; \n" - "\t\tMust be set if \"-ngroup\" > 1.\n" - << "\t\t\"diffgroupx\" can be used to compare results from separate but parallel 4C " - "runs; \n" - "\t\tx must be 0 and 1 for the respective run\n" - << "\n" - << "\t\n" - << "\t\tName of the input file, including the suffix\n" - << "\n" - << "\t\n" - << "\t\tPrefix of your output files.\n" - << "\n" - << "\trestart=\n" - << "\t\tRestart the simulation from step . \n" - "\t\tIt always refers to the previously defined and . \n" - "\t\t(default: 0 or from )\n" - "\t\tIf y=last_possible, it will restart from the last restart step defined in the " - "control file.\n" - << "\n" - << "\trestartfrom=\n" - << "\t\tRestart the simulation from the files prefixed with . \n" - "\t\t(default: )\n" - << "\n" - << "\t--interactive\n" - << "\t\t4C waits at the beginning for keyboard input. \n" - "\t\tHelpful for parallel debugging when attaching to a single job. \n" - "\t\tMust be specified at the end in the command line.\n" - << "\n"; - } + public: + SpacedFormatter() : Formatter() {} + std::string make_option(const CLI::Option* opt, bool in_sub) const override + { + std::string s = Formatter::make_option(opt, in_sub); + if (!s.empty() && s.back() == '\n') + s += '\n'; + else + s += "\n\n"; + return s; + } + }; /** Collect and print data on memory high water mark of this run * * 1. Ask the operating system for memory usage. @@ -256,10 +207,234 @@ namespace } // namespace +CommandlineArguments parse_command_line(int argc, char** argv) +{ + CLI::App cli_app{"4C - Multiphysics \nComprehensive Computational Community Code"}; + cli_app.formatter(std::make_shared()); + CommandlineArguments arguments; + cli_app.add_flag("-p,--parameters", arguments.parameters, + "Dumps information about the parameters for consumption by additional tools."); + cli_app.add_flag("-i,--interactive", arguments.interactive, + "4C waits at the beginning for keyboard input. " + "Helpful for parallel debugging when attaching to a single job."); + cli_app + .add_option("--ngroup", arguments.n_groups, + "Specify the number of groups for nested parallelism. (default: 1)") + ->check(CLI::PositiveNumber); + cli_app + .add_option( + "--glayout", + [&arguments](const std::vector& tokens) -> bool + { + if (tokens.empty()) return false; + arguments.group_layout.clear(); + + for (const auto& tok : tokens) + { + std::stringstream ss(tok); + std::string part; + while (std::getline(ss, part, ',')) + { + if (part.empty()) continue; + try + { + size_t pos = 0; + int val = std::stoi(part, &pos); + if (pos != part.size() || val <= 0) + { + throw CLI::ValidationError("glayout", "Entries must be positive integers."); + } + arguments.group_layout.push_back(val); + } + catch (const std::invalid_argument&) + { + throw CLI::ValidationError("glayout", "Entries must be positive integers."); + } + catch (const std::out_of_range&) + { + throw CLI::ValidationError("glayout", "glayout entry out of range."); + } + } + } + return true; + }, + "Specify the number of processors per group. Comma-separated list without spaces, e.g. " + "--glayout=,.\n" + "Argument --ngroup is mandatory if a glayout is provided. (default: equal distribution)") + ->allow_extra_args(false); + cli_app.add_option( + "--nptype", + [&arguments](const std::vector& tokens) -> bool + { + if (tokens.empty()) return false; + const std::string& input = tokens.front(); + if (input == "separateInputFiles") + { + arguments.nptype = Core::Communication::NestedParallelismType::separate_input_files; + return true; + } + else if (input == "everyGroupReadInputFile") + { + arguments.nptype = + Core::Communication::NestedParallelismType::every_group_read_input_file; + return true; + } + else if (input == "nestedMultiscale") + { + arguments.nptype = Core::Communication::NestedParallelismType::nested_multiscale; + return true; + } + else if (input.rfind("diffgroup", 0) == 0) + { + // Expect exactly an integer suffix after "diffgroup", and only allow 0 or 1 + const std::string suffix = input.substr(9); + if (suffix.empty()) + { + throw CLI::ValidationError( + "nptype", "Missing suffix for 'diffgroup'; expected 'diffgroup0' or 'diffgroup1'."); + } + for (char c : suffix) + { + if (!std::isdigit(static_cast(c))) + { + throw CLI::ValidationError( + "nptype", "Invalid diffgroup suffix; expected integer after 'diffgroup'."); + } + } + int val = std::stoi(suffix); + if (val != 0 && val != 1) + { + throw CLI::ValidationError("nptype", "Only diffgroup0 and diffgroup1 are allowed."); + } + arguments.nptype = Core::Communication::NestedParallelismType::no_nested_parallelism; + arguments.diffgroup = val; + return true; + } + else + { + throw CLI::ValidationError("nptype", + "Only 'everyGroupReadInputFile', 'separateInputFiles', 'nestedMultiscale', and " + "'diffgroupx' are available for nptype."); + } + }, + "Specify nested parallelism type: " + "separateInputFiles|everyGroupReadInputFile|\nnestedMultiscale|diffgroup \n" + "Must be set if --ngroup > 1. \n" + "'diffgroupx' can be used to compare results from separate but parallel 4C runs; " + "x must be 0 and 1 for the respective run"); + cli_app.add_option( + "--restart", + [&arguments](const std::vector& tokens) -> bool + { + if (tokens.empty()) return false; + for (const auto& tok : tokens) + { + std::string s = tok; + // allow comma-separated lists in a single token (e.g. "--restart=3,4") + std::stringstream ss(s); + std::string part; + while (std::getline(ss, part, ',')) + { + if (part.empty()) continue; + if (part == "last_possible") + { + arguments.restart_per_group.push_back(-1); // legacy sentinel for last_possible + continue; + } + try + { + size_t pos = 0; + int val = std::stoi(part, &pos); + if (pos != part.size() || val < 0) + { + throw CLI::ValidationError( + "restart", "Restart step must be a non-negative integer or 'last_possible'."); + } + arguments.restart_per_group.push_back(val); + } + catch (const std::invalid_argument&) + { + throw CLI::ValidationError( + "restart", "Restart step must be a non-negative integer or 'last_possible'."); + } + catch (const std::out_of_range&) + { + throw CLI::ValidationError("restart", "Restart step out of range."); + } + } + } + return true; + }, + "Restart the simulation from step . Accepts a non-negative integer or 'last_possible'.\n" + "If nested parallelism with separate input files is used, each group can have a different " + "restart step defined as a comma-separated list, e.g., --restart=,."); + cli_app.add_option( + "--restartfrom", + [&arguments](const std::vector& tokens) -> bool + { + if (tokens.empty()) return false; + for (const auto& tok : tokens) + { + std::stringstream ss(tok); + std::string part; + while (std::getline(ss, part, ',')) + { + if (part.empty()) continue; + arguments.restart_identifier_per_group.push_back(part); + } + } + return true; + }, + "Restart the simulation from the files prefixed with .\n If nested " + "parallelism with separate input files is used, each group can have a different file prefix " + "defined as a comma-separated list."); + std::string primary_input; + std::string primary_output; + cli_app.add_option("input", primary_input, "Name of the input file, including the suffix"); + cli_app.add_option("output", primary_output, "Prefix of your output files."); + + std::vector io_pairs; + cli_app + .add_option("io_pairs", io_pairs, + "More pairs of simulation and names. Only necessary when using nested " + "parallelism with multiple groups and separate input files.") + ->expected(-1); + + + std::vector raw_args; + raw_args.reserve(argc); + for (int i = 1; i < argc; ++i) raw_args.emplace_back(argv[i]); + + LegacyCliOptions legacy_options = {.single_dash_legacy_names = {"ngroup", "glayout", "nptype"}, + .nodash_legacy_names = {"restart", "restartfrom"}}; + std::vector sanitized_args = adapt_legacy_cli_arguments(raw_args, legacy_options); + + // Reversed order required when parsing std::vector with CLI11 + std::reverse(sanitized_args.begin(), sanitized_args.end()); + try + { + cli_app.parse(sanitized_args); + } + catch (const CLI::ParseError& e) + { + std::exit(cli_app.exit(e)); + } + + if (!arguments.parameters) + { + if (primary_input.empty() || primary_output.empty()) + { + FOUR_C_THROW("Please provide both and arguments."); + } + } + arguments.io_pairs = build_io_pairs(io_pairs, primary_input, primary_output); + validate_argument_cross_compatibility(arguments); + return arguments; +} /*----------------------------------------------------------------------* *----------------------------------------------------------------------*/ void entrypoint_switch(); -void run(CommandlineArguments& arguments); +void run(CommandlineArguments& cli_args, Core::Communication::Communicators& communicators); /** * @brief The main function of the central 4C executable. @@ -287,26 +462,23 @@ int main(int argc, char* argv[]) // Initialize our own singleton registry to ensure we clean up all singletons properly. Core::Utils::SingletonOwnerRegistry::ScopeGuard singleton_owner_guard{}; - Core::Communication::Communicators communicators = - Core::Communication::create_comm(std::vector(argv, argv + argc)); - - CommandlineArguments arguments{ - .argc = argc, - .argv = argv, - .input_file_name = "", - .output_file_identifier = "", - .restart_file_identifier = "", - .restart_step = 0, - .comms = communicators, + auto arguments = parse_command_line(argc, argv); + + Core::Communication::CommConfig config{ + .ngroup = arguments.n_groups, + .group_layout = arguments.group_layout, + .np_type = arguments.nptype, + .diffgroup = arguments.diffgroup, }; + Core::Communication::Communicators communicators = Core::Communication::create_comm(config); - if (strcmp(argv[argc - 1], "--interactive") == 0) + if (arguments.interactive) { char hostname[256]; gethostname(hostname, sizeof(hostname)); printf("Global rank %d with PID %d on %s is ready for attach\n", - Core::Communication::my_mpi_rank(arguments.comms.global_comm()), getpid(), hostname); - if (Core::Communication::my_mpi_rank(arguments.comms.global_comm()) == 0) + Core::Communication::my_mpi_rank(communicators.global_comm()), getpid(), hostname); + if (Core::Communication::my_mpi_rank(communicators.global_comm()) == 0) { printf("\n** Enter a character to continue > \n"); fflush(stdout); @@ -318,20 +490,11 @@ int main(int argc, char* argv[]) } } - Core::Communication::barrier(arguments.comms.global_comm()); + Core::Communication::barrier(communicators.global_comm()); - if ((argc == 2) && ((strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "--help") == 0))) + if (arguments.parameters) { - if (Core::Communication::my_mpi_rank(arguments.comms.local_comm()) == 0) - { - printf("\n\n"); - print_help_message(); - printf("\n\n"); - } - } - else if ((argc == 2) && ((strcmp(argv[1], "-p") == 0) || (strcmp(argv[1], "--parameters") == 0))) - { - if (Core::Communication::my_mpi_rank(arguments.comms.local_comm()) == 0) + if (Core::Communication::my_mpi_rank(communicators.local_comm()) == 0) { ryml::Tree tree = Core::IO::init_yaml_tree_with_exceptions(); ryml::NodeRef root = tree.rootref(); @@ -342,7 +505,7 @@ int main(int argc, char* argv[]) emit_general_metadata(root_ref); // Write the user input defined for various physics module. - Core::IO::InputFile input_file = setup_input_file(arguments.comms.local_comm()); + Core::IO::InputFile input_file = setup_input_file(communicators.local_comm()); input_file.emit_metadata(root_ref); // Finally, dump everything. @@ -351,7 +514,7 @@ int main(int argc, char* argv[]) } else { - if (Core::Communication::my_mpi_rank(arguments.comms.global_comm()) == 0) + if (Core::Communication::my_mpi_rank(communicators.global_comm()) == 0) { constexpr int box_width = 54; @@ -378,7 +541,7 @@ int main(int argc, char* argv[]) std::cout << "Trilinos Version: " << FOUR_C_TRILINOS_HASH << " (git SHA1)\n"; std::cout << "Total number of MPI ranks: " - << Core::Communication::num_mpi_ranks(arguments.comms.global_comm()) << '\n'; + << Core::Communication::num_mpi_ranks(communicators.global_comm()) << '\n'; } /* Here we turn the NaN and INF numbers off. No need to calculate @@ -411,21 +574,18 @@ int main(int argc, char* argv[]) #else try { - run(arguments); + run(arguments, communicators); } catch (Core::Exception& err) { char line[] = "=========================================================================\n"; - std::cout << "\n\n" - << line << err.what_with_stacktrace() << "\n" - << line << "\n" - << std::endl; + std::cout << "\n\n" << line << err.what_with_stacktrace() << "\n" << line << "\n" << '\n'; - if (arguments.comms.num_groups() > 1) + if (communicators.num_groups() > 1) { printf("Global processor %d has thrown an error and is waiting for the remaining procs\n\n", - Core::Communication::my_mpi_rank(arguments.comms.global_comm())); - Core::Communication::barrier(arguments.comms.global_comm()); + Core::Communication::my_mpi_rank(communicators.global_comm())); + Core::Communication::barrier(communicators.global_comm()); } MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); @@ -433,21 +593,21 @@ int main(int argc, char* argv[]) #endif /*----------------------------------------------------------------------*/ - get_memory_high_water_mark(arguments.comms.global_comm()); + get_memory_high_water_mark(communicators.global_comm()); - Core::Communication::barrier(arguments.comms.local_comm()); - if (arguments.comms.num_groups() > 1) + Core::Communication::barrier(communicators.local_comm()); + if (communicators.num_groups() > 1) { printf("Global processor %d with local rank %d finished normally\n", - Core::Communication::my_mpi_rank(arguments.comms.global_comm()), - Core::Communication::my_mpi_rank(arguments.comms.local_comm())); - Core::Communication::barrier(arguments.comms.global_comm()); + Core::Communication::my_mpi_rank(communicators.global_comm()), + Core::Communication::my_mpi_rank(communicators.local_comm())); + Core::Communication::barrier(communicators.global_comm()); } else { - Core::Communication::barrier(arguments.comms.global_comm()); + Core::Communication::barrier(communicators.global_comm()); printf("processor %d finished normally\n", - Core::Communication::my_mpi_rank(arguments.comms.local_comm())); + Core::Communication::my_mpi_rank(communicators.local_comm())); } } @@ -455,27 +615,27 @@ int main(int argc, char* argv[]) return (0); } -void run(CommandlineArguments& arguments) +void run(CommandlineArguments& cli_args, Core::Communication::Communicators& communicators) { - parse_commandline_arguments(arguments); + update_io_identifiers(cli_args, communicators.group_id()); /* input phase, input of all information */ global_legacy_module_callbacks().RegisterParObjectTypes(); double t0 = walltime_in_seconds(); // and now the actual reading - Core::IO::InputFile input_file = setup_input_file(arguments.comms.local_comm()); - input_file.read(arguments.input_file_name); - setup_global_problem(input_file, arguments); + Core::IO::InputFile input_file = setup_input_file(communicators.local_comm()); + input_file.read(cli_args.input_file_name); + setup_global_problem(input_file, cli_args, communicators); // we wait till all procs are here. Otherwise a hang up might occur where // one proc ended with FOUR_C_THROW but other procs were not finished and waited... // we also want to have the printing above being finished. - Core::Communication::barrier(arguments.comms.local_comm()); + Core::Communication::barrier(communicators.local_comm()); const double ti = walltime_in_seconds() - t0; - if (Core::Communication::my_mpi_rank(arguments.comms.global_comm()) == 0) + if (Core::Communication::my_mpi_rank(communicators.global_comm()) == 0) { Core::IO::cout << "\nTotal wall time for INPUT: " << std::setw(10) << std::setprecision(3) << std::scientific << ti << " sec \n\n"; @@ -486,10 +646,10 @@ void run(CommandlineArguments& arguments) entrypoint_switch(); - write_timemonitor(arguments.comms.local_comm()); + write_timemonitor(communicators.local_comm()); const double tc = walltime_in_seconds() - t0; - if (Core::Communication::my_mpi_rank(arguments.comms.global_comm()) == 0) + if (Core::Communication::my_mpi_rank(communicators.global_comm()) == 0) { Core::IO::cout << "\nTotal wall time for CALCULATION: " << std::setw(10) << std::setprecision(3) << std::scientific << tc << " sec \n\n"; diff --git a/cmake/configure/configure_CLI11.cmake b/cmake/configure/configure_CLI11.cmake new file mode 100644 index 00000000000..12dd8a8cc83 --- /dev/null +++ b/cmake/configure/configure_CLI11.cmake @@ -0,0 +1,44 @@ +# This file is part of 4C multiphysics licensed under the +# GNU Lesser General Public License v3.0 or later. +# +# See the LICENSE.md file in the top-level for license information. +# +# SPDX-License-Identifier: LGPL-3.0-or-later + +message(STATUS "Fetch content for CLI11") +fetchcontent_declare( + CLI11 + GIT_REPOSITORY https://github.com/CLIUtils/CLI11.git + GIT_TAG bfffd37e1f804ca4fae1caae106935791696b6a9 # version 2.6.1 + ) + +# Temporarily change project name to trick CLI11 into believing it is the main project +# This is required because CLI11 only sets up the install rules when it is the main project +set(_old ${CMAKE_PROJECT_NAME}) +set(CMAKE_PROJECT_NAME "CLI11") + +set(CLI11_INSTALL + ON + CACHE BOOL "" + ) +set(CLI11_BUILD_DOCS + OFF + CACHE BOOL "" + ) +set(CLI11_BUILD_TESTS + OFF + CACHE BOOL "" + ) +set(CLI11_BUILD_EXAMPLES + OFF + CACHE BOOL "" + ) + +fetchcontent_makeavailable(CLI11) + +set(CMAKE_PROJECT_NAME "${_old}") + +set(FOUR_C_CLI11_ROOT "${CMAKE_INSTALL_PREFIX}") + +four_c_add_external_dependency(four_c_all_enabled_external_dependencies CLI11::CLI11) +four_c_remember_variable_for_install(FOUR_C_CLI11_ROOT) diff --git a/cmake/functions/four_c_testing_functions.cmake b/cmake/functions/four_c_testing_functions.cmake index 31222ec5da7..3f061b2a4dd 100644 --- a/cmake/functions/four_c_testing_functions.cmake +++ b/cmake/functions/four_c_testing_functions.cmake @@ -422,7 +422,7 @@ function(four_c_test_restart) set(name_of_test "${_parsed_BASED_ON}-restart_${_parsed_RESTART_STEP}-p${_parsed_NP}") get_test_property(${_parsed_BASED_ON} _internal_INPUT_FILE test_file_full_path) get_test_property(${_parsed_BASED_ON} _internal_OUTPUT_DIR test_directory) - set(restart_arguments "restart=${_parsed_RESTART_STEP}") + set(restart_arguments "--restart=${_parsed_RESTART_STEP}") else() # Restart from a different testfile set(name_of_test @@ -431,7 +431,7 @@ function(four_c_test_restart) set(test_file_full_path "${PROJECT_SOURCE_DIR}/tests/input_files/${_parsed_TEST_FILE}") set(test_directory ${PROJECT_BINARY_DIR}/framework_test_output/${name_of_test}) get_test_property(${_parsed_BASED_ON} _internal_OUTPUT_DIR base_directory) - set(restart_arguments "restartfrom=${base_directory}/xxx restart=${_parsed_RESTART_STEP}") + set(restart_arguments "--restartfrom=${base_directory}/xxx --restart=${_parsed_RESTART_STEP}") if(NOT EXISTS ${test_file_full_path}) message(FATAL_ERROR "Test source file ${test_file_full_path} does not exist") @@ -570,7 +570,7 @@ function(four_c_test_nested_parallelism name_of_input_file_1 name_of_input_file_ NAME ${name_of_input_file_1}-nestedPar COMMAND bash -c - "mkdir -p ${test_directory} && ${MPIEXEC_EXECUTABLE} ${_mpiexec_all_args_for_testing} -np 3 $ -ngroup=2 -glayout=1,2 -nptype=separateInputFiles ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_1} ${test_directory}/xxx ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_2} ${test_directory}/xxxAdditional" + "mkdir -p ${test_directory} && ${MPIEXEC_EXECUTABLE} ${_mpiexec_all_args_for_testing} -np 3 $ --ngroup=2 --glayout=1,2 --nptype=separateInputFiles ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_1} ${test_directory}/xxx ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_2} ${test_directory}/xxxAdditional" ) require_fixture(${name_of_input_file_1}-nestedPar test_cleanup) @@ -583,7 +583,7 @@ function(four_c_test_nested_parallelism name_of_input_file_1 name_of_input_file_ NAME ${name_of_input_file_1}-nestedPar-restart COMMAND bash -c - "${MPIEXEC_EXECUTABLE} ${_mpiexec_all_args_for_testing} -np 3 $ -ngroup=2 -glayout=1,2 -nptype=separateInputFiles ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_1} ${test_directory}/xxx restart=${restart_step} ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_2} ${test_directory}/xxxAdditional restart=${restart_step}" + "${MPIEXEC_EXECUTABLE} ${_mpiexec_all_args_for_testing} -np 3 $ --ngroup=2 --glayout=1,2 --nptype=separateInputFiles ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_1} ${test_directory}/xxx ${PROJECT_SOURCE_DIR}/tests/input_files/${name_of_input_file_2} ${test_directory}/xxxAdditional --restart=${restart_step},${restart_step}" ) require_fixture( diff --git a/cmake/setup_install.cmake b/cmake/setup_install.cmake index 57fff241d53..d9e90640fae 100644 --- a/cmake/setup_install.cmake +++ b/cmake/setup_install.cmake @@ -78,6 +78,7 @@ _add_dependency_to_config(ryml) _add_dependency_to_config(magic_enum) _add_dependency_to_config(ZLIB) _add_dependency_to_config(pybind11) +_add_dependency_to_config(CLI11) # install the Find modules install( diff --git a/cmake/templates/4CConfig.cmake.in b/cmake/templates/4CConfig.cmake.in index 9315bccdc28..cae5848b1c4 100644 --- a/cmake/templates/4CConfig.cmake.in +++ b/cmake/templates/4CConfig.cmake.in @@ -82,6 +82,10 @@ if(FOUR_C_WITH_MAGIC_ENUM) find_package(magic_enum CONFIG HINTS ${FOUR_C_MAGIC_ENUM_ROOT}) endif() +if(FOUR_C_WITH_CLI11) + find_package(CLI11 CONFIG HINTS ${FOUR_C_CLI11_ROOT}) +endif() + if(FOUR_C_WITH_PYBIND11) find_package(pybind11 CONFIG HINTS ${FOUR_C_PYBIND11_ROOT}) endif() diff --git a/doc/documentation/src/analysis_guide_templates/4Csimulation.rst b/doc/documentation/src/analysis_guide_templates/4Csimulation.rst index 5c5d2a8fcb6..43de884ec3a 100755 --- a/doc/documentation/src/analysis_guide_templates/4Csimulation.rst +++ b/doc/documentation/src/analysis_guide_templates/4Csimulation.rst @@ -55,10 +55,10 @@ given on the command line: :: - ./4C [restartfrom=] restart= + ./4C [--restartfrom=] --restart= Here, one has to provide the step, at which the restart is started from the previous simulation. -If the parameter ``restartfrom`` is given, the initial configuration is read from this file, +If the parameter ``--restartfrom`` is given, the initial configuration is read from this file, otherwise it is read from ````. In the latter case the filename of the new output is the same with an appended number, e.g., ``outfile-1``. Note that the value for ``step`` must be given in the file ``.control`` in one of the step lines: ``step = ``. diff --git a/doc/documentation/src/developer_guide/debugging_profiling.rst b/doc/documentation/src/developer_guide/debugging_profiling.rst index 30cddda2f13..8be4d825fca 100644 --- a/doc/documentation/src/developer_guide/debugging_profiling.rst +++ b/doc/documentation/src/developer_guide/debugging_profiling.rst @@ -183,7 +183,7 @@ In addition to the usual |FOURC| output, ``valgrind`` writes output for each mpi Follow the steps as described below:: mpirun -np /<4C-execdir>/4C - mpirun -np valgrind --tool=callgrind /<4C-execdir>/4C restart= + mpirun -np valgrind --tool=callgrind /<4C-execdir>/4C --restart= Examine results with ``kcachegrind`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/documentation/src/installation/installation.rst b/doc/documentation/src/installation/installation.rst index 6fef5d4e4b4..c9df3873891 100644 --- a/doc/documentation/src/installation/installation.rst +++ b/doc/documentation/src/installation/installation.rst @@ -389,8 +389,8 @@ They have to be placed in .vscode/launch.json in the configurations-list. "args": [ "/path/to/inputfile", "<4C-problemdir>/xxxx" - "restart=1", - "restartfrom=<4C-problemdir>/xxx" + "--restart=1", + "--restartfrom=<4C-problemdir>/xxx" ], "cwd": "<4C-problemdir>", "setupCommands": [ diff --git a/src/core/comm/src/4C_comm_utils.cpp b/src/core/comm/src/4C_comm_utils.cpp index 76f362f2838..2a886f1a059 100644 --- a/src/core/comm/src/4C_comm_utils.cpp +++ b/src/core/comm/src/4C_comm_utils.cpp @@ -13,6 +13,7 @@ #include "4C_linalg_transfer.hpp" #include "4C_linalg_utils_densematrix_communication.hpp" #include "4C_linalg_utils_sparse_algebra_manipulation.hpp" +#include "4C_utils_exceptions.hpp" #include #include @@ -26,7 +27,7 @@ namespace Core::Communication /*----------------------------------------------------------------------* | create communicator ghamm 02/12 | *----------------------------------------------------------------------*/ - Communicators create_comm(std::vector argv) + Communicators create_comm(CommConfig& config) { // for coupled simulations: color = 1 for 4C and color = 0 for other programs // so far: either nested parallelism within 4C or coupling with further @@ -37,199 +38,84 @@ namespace Core::Communication MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Comm_size(MPI_COMM_WORLD, &size); int color = 0; - int ngroup = 1; - NestedParallelismType npType = NestedParallelismType::no_nested_parallelism; - - // parse command line and separate configuration arguments - std::vector conf; - for (std::size_t i = 1; i < argv.size(); i++) + if (config.ngroup > 1) { - std::string temp = argv[i]; - if (temp.substr(0, 1) == "-") - { - conf.push_back(argv[i]); - } - } - - // grouplayout will be filled accordingly to the user given input - std::vector grouplayout; - bool ngroupisset = false; - bool nptypeisset = false; - for (std::size_t i = 0; i < conf.size(); i++) - { - // fill std::string with current argument - std::string argument(conf[i]); - //---------------------------------------------------------------- - // determine number of groups and the proc distribution - //---------------------------------------------------------------- - if (argument.substr(0, 8) == "-ngroup=") - { - ngroupisset = true; - ngroup = atoi(argument.substr(8, std::string::npos).c_str()); - - // read out argument after ngroup= - std::string glayout; - if (i + 1 < conf.size()) - glayout = conf[i + 1]; - else - glayout = "dummy"; - - // case with given group layout - if (glayout.substr(0, 9) == "-glayout=") - { - glayout = glayout.substr(9, std::string::npos).c_str(); - - std::istringstream layout(glayout); - int sumprocs = 0; - - while (layout) - { - std::string s; - if (!getline(layout, s, ',')) break; - grouplayout.push_back(atoi(s.c_str())); - sumprocs += atoi(s.c_str()); - } - - // final check whether a correct group layout is specified - if (ngroup != int(grouplayout.size()) or size != sumprocs or ngroup < 1) - { - if (myrank == (size - 1)) // myrank == 0 is eventually not within 4C (i.e. coupling - // to external codes) - { - printf( - "\n\nNumber of procs (%d) and number of groups (%d) does not match given group " - "layout! \n", - size, ngroup); - printf("Example mpirun -np 4 ./4C -ngroup=2 -glayout=1,3 \n"); - printf("Try again!\n"); - } - MPI_Finalize(); - exit(EXIT_FAILURE); - } - } - // case without given group layout - else - { - if (myrank == (size - 1)) // myrank == 0 is eventually not within 4C (i.e. coupling to - // external codes) - { - printf( - "\n\n\nINFO: Group layout is not specified. Default is equal size of the " - "groups.\n"); - } - if ((size % ngroup) != 0) - { - if (myrank == (size - 1)) - { - printf( - "\n\nNumber of processors (%d) cannot be divided by the number of groups (%d)!\n", - size, ngroup); - printf("Try again!\n"); - } - MPI_Finalize(); - exit(EXIT_FAILURE); - } - - // equal size of the groups - for (int k = 0; k < ngroup; k++) - { - grouplayout.push_back(size / ngroup); - } - } - - // the color is specified: procs are distributed to the groups with increasing global rank - color = -1; - int gsum = 0; - do - { - color++; - gsum += grouplayout[color]; - } while (gsum <= myrank); - -#ifdef FOUR_C_ENABLE_ASSERTIONS - std::cout << "Nested parallelism layout: Global rank: " << myrank - << " is in group: " << color << std::endl; -#endif - - } // end if (argument.substr( 0, 8 ) == "-ngroup=") - - //---------------------------------------------------------------- - // nested parallelism type - //---------------------------------------------------------------- - else if (argument.substr(0, 8) == "-nptype=") + if (config.group_layout.size() == 0) { - nptypeisset = true; - argument = argument.substr(8, std::string::npos).c_str(); - if (argument == "everyGroupReadInputFile") - npType = NestedParallelismType::every_group_read_input_file; - else if (argument == "separateInputFiles") - npType = NestedParallelismType::separate_input_files; - else if (argument == "nestedMultiscale") - { - npType = NestedParallelismType::separate_input_files; - // the color is specified: only two groups and group one (macro problem) is distributed - // over all processors - color = -1; - if (myrank % (int)(size / grouplayout[0]) == 0 and - myrank < (grouplayout[0] * (int)(size / grouplayout[0]))) - color = 0; - else - color = 1; - } - else if (argument.substr(0, 9) == "diffgroup") + if (myrank == (size - 1)) // myrank == 0 is eventually not within 4C (i.e. coupling to + // external codes) { - npType = NestedParallelismType::no_nested_parallelism; - ngroup = 2; - color = atoi(argument.substr(9, std::string::npos).c_str()); + printf( + "\n\n\nINFO: Group layout is not specified. Default is equal size of the " + "groups.\n"); } - else + if ((size % config.ngroup) != 0) { - if (myrank == (size - 1)) // myrank == 0 is eventually not within 4C (i.e. coupling to - // external codes) + if (myrank == (size - 1)) { printf( - "\n\nOnly everyGroupReadInputFile and separateInputFiles is available for " - "nptype= \n\n"); + "\n\nNumber of processors (%d) cannot be divided by the number of groups (%d)!\n", + size, config.ngroup); printf("Try again!\n"); } MPI_Finalize(); exit(EXIT_FAILURE); } + + // equal size of the groups + for (int k = 0; k < config.ngroup; k++) + { + config.group_layout.push_back(size / config.ngroup); + } } - //---------------------------------------------------------------- - // check for valid arguments that can be used in 4C.cpp - //---------------------------------------------------------------- - else if ((argument.substr(0, 9) != "-glayout=") and (argument.substr(0, 2) != "-v") and - (argument.substr(0, 2) != "-h") and (argument.substr(0, 6) != "--help") and - (argument.substr(0, 2) != "-p") and (argument.substr(0, 12) != "--parameters") and - (argument.substr(0, 2) != "-d") and (argument.substr(0, 9) != "--datfile") and - (argument.substr(0, 13) != "--interactive")) + int sum_layout = 0; + for (size_t k = 0; k < config.group_layout.size(); k++) { - printf( - "\n\n You have specified an argument ( %s ) for 4C starting with a \"-\" that is not " - "valid!\n", - argument.c_str()); - printf("Please refer to ./4C --help and try again!\n"); + sum_layout += config.group_layout[k]; + } + if (sum_layout != size) + { + if (myrank == (size - 1)) + { + printf("Error: Group layout sum (%d) does not equal total MPI ranks (%d).\n", sum_layout, + size); + } MPI_Finalize(); exit(EXIT_FAILURE); } - } // end for(int i=0; i 1) and (ngroupisset == false or nptypeisset == false)) + if (config.np_type == NestedParallelismType::nested_multiscale) { - if (myrank == - (size - 1)) // myrank == 0 is eventually not within 4C (i.e. coupling to external codes) - { - printf( - "\n\nAt least -nptype= and -ngroup= must be specified for nested parallelism. -glayout " - "is optional (behind -ngroup). \n\n"); - printf("Try again!\n"); - } - MPI_Finalize(); - exit(EXIT_FAILURE); + // the color is specified: only two groups and group one (macro problem) is distributed + // over all processors + color = -1; + if (myrank % (int)(size / config.group_layout[0]) == 0 and + myrank < (config.group_layout[0] * (int)(size / config.group_layout[0]))) + color = 0; + else + color = 1; + } + else if (config.np_type == NestedParallelismType::no_nested_parallelism && + config.diffgroup != -1) + { + config.ngroup = 2; + color = config.diffgroup; } // do the splitting of the communicator @@ -239,7 +125,7 @@ namespace Core::Communication // the global communicator is created MPI_Comm gcomm; - if (ngroup == 1) + if (config.ngroup == 1) { gcomm = lcomm; } @@ -267,10 +153,10 @@ namespace Core::Communication } // nested parallelism group is created - Communicators communicators(color, ngroup, lpidgpid, lcomm, gcomm, npType); + Communicators communicators(color, config.ngroup, lpidgpid, lcomm, gcomm, config.np_type); // info for the nested parallelism user - if (Core::Communication::my_mpi_rank(lcomm) == 0 && ngroup > 1) + if (Core::Communication::my_mpi_rank(lcomm) == 0 && config.ngroup > 1) printf("Nested parallelism layout: Group %d has %d processors.\n ", color, Core::Communication::num_mpi_ranks(lcomm)); fflush(stdout); diff --git a/src/core/comm/src/4C_comm_utils.hpp b/src/core/comm/src/4C_comm_utils.hpp index 4308ab729f7..c10ee24174a 100644 --- a/src/core/comm/src/4C_comm_utils.hpp +++ b/src/core/comm/src/4C_comm_utils.hpp @@ -31,27 +31,39 @@ namespace Core::Communication { every_group_read_input_file, separate_input_files, + nested_multiscale, no_nested_parallelism }; + /** + * Configuration for communicators. + */ + struct CommConfig + { + int ngroup = 1; + std::vector group_layout; + NestedParallelismType np_type; + int diffgroup = -1; + }; + //! create a local and a global communicator for the problem - Communicators create_comm(std::vector argv); + Communicators create_comm(CommConfig& config); /*! \brief debug routine to compare vectors from different parallel 4C runs * - * You can add Core::Communication::AreDistributedVectorsIdentical in your code which will lead to - * a comparison of the given vector for different executables and/or configurations. Command for - * using this feature: + * You can add Core::Communication::are_distributed_vectors_identical in your code which will lead + * to a comparison of the given vector for different executables and/or configurations. Command + * for using this feature: * @code - * mpirun -np 1 ./4C -nptype=diffgroup0 xxx_set \ - * : -np 3 ./other-4C -nptype=diffgroup1 xxx_par + * mpirun -np 1 ./4C --nptype=diffgroup0 xxx_set \ + * : -np 3 ./other-4C --nptype=diffgroup1 xxx_par * @endcode * * A further nice option is to compare results from different executables used for * running the same simulation. * - * \note You need to add the AreDistributedVectorsIdentical method in both executables at the same - * position in the code + * \note You need to add the are_distributed_vectors_identical method in both executables at the + * same position in the code * * \param communicators (in): communicators containing local and global comm * \param vec (in): vector to compare @@ -64,19 +76,19 @@ namespace Core::Communication /*! \brief debug routine to compare sparse matrices from different parallel 4C runs * - * You can add Core::Communication::AreDistributedSparseMatricesIdentical in your code which will - * lead to a comparison of the given sparse matrices for different executables and/or + * You can add Core::Communication::are_distributed_sparse_matrices_identical in your code which + * will lead to a comparison of the given sparse matrices for different executables and/or * configurations. Command for using this feature: * @code - * mpirun -np 1 ./4C -nptype=diffgroup0 xxx_set \ - * : -np 3 ./other-4C -nptype=diffgroup1 xxx_par + * mpirun -np 1 ./4C --nptype=diffgroup0 xxx_set \ + * : -np 3 ./other-4C --nptype=diffgroup1 xxx_par * @endcode * * A further nice option is to compare results from different executables used for * running the same simulation. * - * \note You need to add the AreDistributedSparseMatricesIdentical method in both executables at - * the same position in the code. + * \note You need to add the are_distributed_sparse_matrices_identical method in both executables + * at the same position in the code. * * \param communicators (in): communicators containing local and global comm * \param matrix (in): matrix to compare diff --git a/src/core/comm/tests/4C_comm_utils_test.np3.cpp b/src/core/comm/tests/4C_comm_utils_test.np3.cpp index ca4a473f54b..0113f837987 100644 --- a/src/core/comm/tests/4C_comm_utils_test.np3.cpp +++ b/src/core/comm/tests/4C_comm_utils_test.np3.cpp @@ -24,10 +24,13 @@ namespace Core::Communication::Communicators mock_up_communicators() { // mock up for command line to create communicators - std::vector argv{ - "dummyEntryInputFile", "-nptype=separateInputFiles", "-ngroup=2", "-glayout=1,2"}; + Core::Communication::CommConfig config = { + .ngroup = 2, + .group_layout = {1, 2}, + .np_type = Core::Communication::NestedParallelismType::separate_input_files, + }; - return Core::Communication::create_comm(argv); + return Core::Communication::create_comm(config); }; /** diff --git a/src/core/io/src/4C_io_command_line_helpers.cpp b/src/core/io/src/4C_io_command_line_helpers.cpp new file mode 100644 index 00000000000..9e533175579 --- /dev/null +++ b/src/core/io/src/4C_io_command_line_helpers.cpp @@ -0,0 +1,281 @@ +// This file is part of 4C multiphysics licensed under the +// GNU Lesser General Public License v3.0 or later. +// +// See the LICENSE.md file in the top-level for license information. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + +#include "4C_config.hpp" + +#include "4C_io_command_line_helpers.hpp" + + +FOUR_C_NAMESPACE_OPEN + + +std::vector> build_io_pairs( + std::vector io_pairs, const std::filesystem::path& primary_input, + const std::string& primary_output) +{ + std::vector> io_pairs_new; + + io_pairs_new.emplace_back(primary_input, primary_output); + + if (!io_pairs.empty()) + { + if (io_pairs.size() % 2 != 0) + { + FOUR_C_THROW("Positional arguments must be provided as pairs: .\n"); + } + for (size_t i = 0; i < io_pairs.size(); i += 2) + io_pairs_new.emplace_back(std::filesystem::path(io_pairs[i]), io_pairs[i + 1]); + } + return io_pairs_new; +} + +using NPT = Core::Communication::NestedParallelismType; +void validate_argument_cross_compatibility(const CommandlineArguments& arguments) +{ + if (!arguments.group_layout.empty()) + { + const int layout_len = static_cast(arguments.group_layout.size()); + if (arguments.n_groups != layout_len) + { + FOUR_C_THROW( + "When --glayout is provided its number of entries must equal --ngroup.\n " + "Example mpirun -np 4 ./4C --ngroup=2 --glayout=1,3 \n"); + } + } + + if (arguments.n_groups > 1 && arguments.nptype == NPT::no_nested_parallelism) + { + FOUR_C_THROW("when --ngroup > 1, a nested parallelism type must be specified via --nptype.\n"); + } + + if (!arguments.parameters) + { + const size_t num_pairs = arguments.io_pairs.size(); + if (arguments.nptype == NPT::no_nested_parallelism || + arguments.nptype == NPT::every_group_read_input_file) + { + if (num_pairs != 1) + { + FOUR_C_THROW( + "when using 'no_nested_parallelism' or 'everyGroupReadInputFile' the " + "number of pairs must be exactly 1.\n"); + } + } + else if (arguments.nptype == NPT::separate_input_files || + arguments.nptype == NPT::nested_multiscale) + { + if (static_cast(num_pairs) != arguments.n_groups) + { + FOUR_C_THROW( + "when using 'separateInputFiles' or 'nestedMultiscale' the number of " + " pairs must equal --ngroup {}.\n", + arguments.n_groups); + } + } + } + + if (arguments.nptype != NPT::separate_input_files && + (arguments.restart_per_group.size() > 1 || arguments.restart_identifier_per_group.size() > 1)) + { + FOUR_C_THROW( + "When using --nptype other than 'separateInputFiles', only one restart step and one " + "restartfrom identifier must be given."); + } + + for (size_t i = 0; i < arguments.restart_identifier_per_group.size(); ++i) + { + if (i >= arguments.restart_per_group.size()) + { + FOUR_C_THROW("You need to specify a restart step when using restartfrom."); + } + } +} + +std::vector adapt_legacy_cli_arguments( + const std::vector& args, LegacyCliOptions& legacy_options) +{ + if (args.empty()) return {}; + + std::vector new_args; + new_args.reserve(args.size()); + std::vector> pending_vals(legacy_options.nodash_legacy_names.size()); + + auto warn = [](const std::string& name, const std::string& to) + { + std::cerr << "DEPRECATION WARNING: Legacy argument '" << name << "' has been converted to '" + << to + << "'. Please update your command line arguments. This legacy form will be removed " + "in a future release.\n"; + }; + + auto combine_and_warn = [&warn](std::vector& out_args, const std::string& name, + std::vector& vals) + { + if (!vals.empty()) + { + std::string combined = std::string("--") + name + "="; + for (size_t i = 0; i < vals.size(); ++i) + { + if (i) combined += ","; + combined += vals[i]; + } + out_args.push_back(combined); + warn(name, combined); + vals.clear(); + } + }; + + for (size_t i = 0; i < args.size(); ++i) + { + const std::string& arg = args[i]; + + bool handled = false; + + // Check nodash legacy names first (e.g., "restart=1") and collect values. + for (size_t j = 0; j < legacy_options.nodash_legacy_names.size(); ++j) + { + const std::string& name = legacy_options.nodash_legacy_names[j]; + std::string prefix = name + "="; + if (arg.rfind(prefix, 0) == 0) + { + pending_vals[j].push_back(arg.substr(prefix.size())); + handled = true; + break; + } + } + if (handled) continue; + + // Convert known single-dash legacy options to double-dash (e.g., "-ngroup=2" -> "--ngroup=2"). + for (const auto& name : legacy_options.single_dash_legacy_names) + { + std::string prefix = std::string("-") + name + "="; + if (arg.rfind(prefix, 0) == 0) + { + std::string new_arg = std::string("-") + arg; + warn(arg, new_arg); + new_args.push_back(new_arg); + handled = true; + break; + } + } + if (handled) continue; + + // Already a long option with two dashes: keep as-is + if (arg.rfind("--", 0) == 0) + { + new_args.push_back(arg); + continue; + } + + // Keep everything else unchanged (positional args, -p, -h, etc.) + new_args.push_back(arg); + } + + for (size_t j = 0; j < legacy_options.nodash_legacy_names.size(); ++j) + { + combine_and_warn(new_args, legacy_options.nodash_legacy_names[j], pending_vals[j]); + } + + return new_args; +} + +void update_io_identifiers(CommandlineArguments& arguments, int group) +{ + std::filesystem::path input_filename; + std::string output_file_identifier; + + int restart_input_index = (arguments.nptype == NPT::separate_input_files) ? group : 0; + + arguments.restart = + arguments.restart_per_group.empty() ? 0 : arguments.restart_per_group[restart_input_index]; + std::string restart_file_identifier = + arguments.restart_identifier_per_group.empty() + ? "" + : arguments.restart_identifier_per_group[restart_input_index]; + + switch (arguments.nptype) + { + case NPT::no_nested_parallelism: + input_filename = arguments.io_pairs[0].first; + output_file_identifier = arguments.io_pairs[0].second; + if (restart_file_identifier == "") + { + restart_file_identifier = output_file_identifier; + } + break; + case NPT::every_group_read_input_file: + { + input_filename = arguments.io_pairs[0].first; + std::string output_file_identifier_temp = arguments.io_pairs[0].second; + // check whether output_file_identifier includes a dash and in case separate the number at the + // end + size_t pos = output_file_identifier_temp.rfind('-'); + auto extract_number_and_identifier = [](const std::string& str, size_t pos) + { + std::string number_str = str.substr(pos + 1); + std::string identifier = str.substr(0, pos); + int number = 0; + try + { + size_t idx = 0; + number = std::stoi(number_str, &idx); + if (idx != number_str.size()) + { + FOUR_C_THROW("Invalid numeric value in output identifier: '{}'", number_str); + } + } + catch (const std::exception& e) + { + FOUR_C_THROW( + "Failed to parse number in output identifier '{}': {}", number_str, e.what()); + } + return std::make_pair(identifier, number); + }; + if (pos != std::string::npos) + { + auto [identifier, number] = extract_number_and_identifier(output_file_identifier_temp, pos); + output_file_identifier = std::format("{}_group_{}_{}", identifier, group, number); + } + else + { + output_file_identifier = std::format("{}_group_{}", output_file_identifier_temp, group); + } + size_t pos_r = restart_file_identifier.rfind('-'); + if (restart_file_identifier == "") + { + restart_file_identifier = output_file_identifier; + } + else if (pos_r != std::string::npos) + { + auto [identifier, number] = extract_number_and_identifier(restart_file_identifier, pos_r); + restart_file_identifier = std::format("{}_group_{}-{}", identifier, group, number); + } + else + { + restart_file_identifier = std::format("{}_group_{}", restart_file_identifier, group); + } + break; + } + case NPT::separate_input_files: + case NPT::nested_multiscale: + input_filename = arguments.io_pairs[group].first; + output_file_identifier = arguments.io_pairs[group].second; + if (restart_file_identifier == "") + { + restart_file_identifier = output_file_identifier; + } + break; + default: + FOUR_C_THROW("-nptype value {} is not valid.", static_cast(arguments.nptype)); + break; + } + arguments.input_file_name = input_filename; + arguments.output_file_identifier = output_file_identifier; + arguments.restart_file_identifier = restart_file_identifier; +} + +FOUR_C_NAMESPACE_CLOSE \ No newline at end of file diff --git a/src/core/io/src/4C_io_command_line_helpers.hpp b/src/core/io/src/4C_io_command_line_helpers.hpp new file mode 100644 index 00000000000..bd2bf212513 --- /dev/null +++ b/src/core/io/src/4C_io_command_line_helpers.hpp @@ -0,0 +1,94 @@ +// This file is part of 4C multiphysics licensed under the +// GNU Lesser General Public License v3.0 or later. +// +// See the LICENSE.md file in the top-level for license information. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + +#ifndef FOUR_C_IO_COMMAND_LINE_HELPERS_HPP +#define FOUR_C_IO_COMMAND_LINE_HELPERS_HPP + +#include "4C_config.hpp" + +#include "4C_comm_utils.hpp" + +#include + +FOUR_C_NAMESPACE_OPEN + +/** + * \brief Structure to hold command line arguments. + */ +struct CommandlineArguments +{ + bool help = false; + int n_groups = 1; + bool parameters = false; + std::vector group_layout = {}; + Core::Communication::NestedParallelismType nptype = + Core::Communication::NestedParallelismType::no_nested_parallelism; + int diffgroup = -1; + int restart = 0; + std::string restart_file_identifier = ""; + std::vector restart_per_group = {}; + std::vector restart_identifier_per_group = {}; + bool interactive = false; + std::vector> io_pairs; + std::filesystem::path input_file_name = ""; + std::string output_file_identifier = ""; +}; + +/** + * \brief Build canonical input/output pairs from positional command line arguments. + * Due to the legacy argument structure, we separate between the primary input and output (first two + * positional arguments) and the rest (io_pairs). The latter are only required when using nested + * parallelism with separate input files. + * \param io_pairs Vector of strings from the command line representing input/output pairs. + * \param primary_input The primary input file name (first positional argument). + * \param primary_output The primary output file identifier (second positional argument). + * \return A vector of pairs of input file paths and output file identifiers. + */ +std::vector> build_io_pairs( + std::vector io_pairs, const std::filesystem::path& primary_input, + const std::string& primary_output); + +/** + * \brief Validates cross-compatibility of command line options. + * \param arguments The parsed command line arguments. + */ +void validate_argument_cross_compatibility(const CommandlineArguments& arguments); + +/** + * \brief Structure to hold legacy CLI option names. + */ +struct LegacyCliOptions +{ + std::vector single_dash_legacy_names; + std::vector nodash_legacy_names; +}; +/** + * \brief Adapt legacy command line arguments. + * This function converts legacy single-dash options (e.g. "-ngroup=2") into + * their new form ("--ngroup=2") and combines legacy dashless positional + * options (e.g. "restart=1") into comma-separated lists ("--restart=1,2"). + * \param args Input arguments (no program-name expected). + * \param legacy_options Structure containing names of legacy options: + * - single_dash_legacy_names: Options that used a single dash and + * should be converted to the double-dash form (e.g. {"ngroup", "glayout"}). + * - nodash_legacy_names: Legacy dashless options that should be + * collected/combined (e.g. {"restart", "restartfrom"}). + * \return Sanitized vector of arguments. + */ +std::vector adapt_legacy_cli_arguments( + const std::vector& args, LegacyCliOptions& legacy_options); + +/** + * \brief Updates input/output identifiers based on group id and nested parallelism type. + * \param arguments The command line arguments to update. + * \param group The group id of the current process. + */ +void update_io_identifiers(CommandlineArguments& arguments, int group); + +FOUR_C_NAMESPACE_CLOSE + +#endif \ No newline at end of file diff --git a/src/core/io/tests/4C_io_command_line_helpers_test.cpp b/src/core/io/tests/4C_io_command_line_helpers_test.cpp new file mode 100644 index 00000000000..0d7a5abdcfe --- /dev/null +++ b/src/core/io/tests/4C_io_command_line_helpers_test.cpp @@ -0,0 +1,263 @@ +// This file is part of 4C multiphysics licensed under the +// GNU Lesser General Public License v3.0 or later. +// +// See the LICENSE.md file in the top-level for license information. +// +// SPDX-License-Identifier: LGPL-3.0-or-later + +#include + +#include "4C_io_command_line_helpers.hpp" + +#include "4C_unittest_utils_assertions_test.hpp" + +#include + +namespace +{ + using namespace FourC; + + TEST(BuildIoPairs, PrimaryOnly) + { + std::vector extra = {}; + auto pairs = build_io_pairs(extra, std::filesystem::path("prim_in.4C.yaml"), "prim_out"); + ASSERT_EQ(pairs.size(), 1); + EXPECT_EQ(pairs[0].first, std::filesystem::path("prim_in.4C.yaml")); + EXPECT_EQ(pairs[0].second, "prim_out"); + } + + TEST(BuildIoPairs, WithAdditionalPairs) + { + std::vector extra = {"inB.4C.yaml", "outB", "inC.4C.yaml", "outC"}; + auto pairs = build_io_pairs(extra, std::filesystem::path("prim_in.4C.yaml"), "prim_out"); + ASSERT_EQ(pairs.size(), 3); + EXPECT_EQ(pairs[0].first, std::filesystem::path("prim_in.4C.yaml")); + EXPECT_EQ(pairs[0].second, "prim_out"); + EXPECT_EQ(pairs[1].first, std::filesystem::path("inB.4C.yaml")); + EXPECT_EQ(pairs[1].second, "outB"); + EXPECT_EQ(pairs[2].first, std::filesystem::path("inC.4C.yaml")); + EXPECT_EQ(pairs[2].second, "outC"); + } + + TEST(ValidateArgumentCrossCompatibility, ValidArgumentsNoThrow) + { + CommandlineArguments args; + args.n_groups = 2; + args.nptype = Core::Communication::NestedParallelismType::separate_input_files; + args.io_pairs = {{std::filesystem::path("a"), "A"}, {std::filesystem::path("b"), "B"}}; + // should not throw for valid configuration + EXPECT_NO_THROW(validate_argument_cross_compatibility(args)); + } + + TEST(ValidateArgumentCrossCompatibility, ThrowsIfTooManyIoPairs) + { + CommandlineArguments args; + args.group_layout = {2, 2}; + args.n_groups = 3; // mismatch + FOUR_C_EXPECT_THROW_WITH_MESSAGE( + validate_argument_cross_compatibility(args), Core::Exception, "When --glayout is provided"); + } + + TEST(ValidateArgumentCrossCompatibility, ThrowsIfNoNptypeWithMultipleGroups) + { + CommandlineArguments args; + args.n_groups = 2; + args.nptype = Core::Communication::NestedParallelismType::no_nested_parallelism; // invalid + FOUR_C_EXPECT_THROW_WITH_MESSAGE(validate_argument_cross_compatibility(args), Core::Exception, + "--ngroup > 1, a nested parallelism type must be specified"); + } + + TEST(ValidateArgumentCrossCompatibility, ThrowsIfTooManyIoPairsForNoNestedParallelism) + { + CommandlineArguments args; + args.nptype = Core::Communication::NestedParallelismType::no_nested_parallelism; + args.io_pairs = {{std::filesystem::path("a"), "A"}, {std::filesystem::path("b"), "B"}}; + FOUR_C_EXPECT_THROW_WITH_MESSAGE(validate_argument_cross_compatibility(args), Core::Exception, + "number of pairs must be exactly 1"); + } + + TEST(ValidateArgumentCrossCompatibility, ThrowsIfTooFewIoPairsForSeparateInputFiles) + { + CommandlineArguments args; + args.n_groups = 2; + args.nptype = Core::Communication::NestedParallelismType::separate_input_files; + args.io_pairs = {{std::filesystem::path("a"), "A"}}; + // Not enough io pairs for number of groups + FOUR_C_EXPECT_THROW_WITH_MESSAGE(validate_argument_cross_compatibility(args), Core::Exception, + " pairs must equal --ngroup 2."); + } + + TEST(ValidateArgumentCrossCompatibility, ThrowsIfNoNestedParallelismWithTooManyRestarts) + { + CommandlineArguments args; + args.n_groups = 1; + args.nptype = Core::Communication::NestedParallelismType::no_nested_parallelism; + args.io_pairs = {{std::filesystem::path("a"), "A"}}; + args.restart_per_group = {1, 2}; // only one restart for no nested parallelism + FOUR_C_EXPECT_THROW_WITH_MESSAGE(validate_argument_cross_compatibility(args), Core::Exception, + "only one restart step and one"); + + args.restart_per_group = {1}; + args.restart_identifier_per_group = { + "a", "b"}; // only one restart identifier for no nested parallelism + FOUR_C_EXPECT_THROW_WITH_MESSAGE(validate_argument_cross_compatibility(args), Core::Exception, + "only one restart step and one"); + } + + TEST(ValidateArgumentCrossCompatibility, ThrowsIfRestartFromWithoutRestart) + { + CommandlineArguments args; + args.n_groups = 1; + args.nptype = Core::Communication::NestedParallelismType::no_nested_parallelism; + args.io_pairs = {{std::filesystem::path("a"), "A"}}; + args.restart_per_group = {}; // empty + args.restart_identifier_per_group = {"prefix1"}; + FOUR_C_EXPECT_THROW_WITH_MESSAGE(validate_argument_cross_compatibility(args), Core::Exception, + "You need to specify a restart step"); + }; + + TEST(AdaptLegacyCliArguments, ConvertsAndCombines) + { + std::vector in = {"-ngroup=2", "-glayout=3,2", "-nptype=separateInputFiles", + "inp1", "out1", "restart=1", "restartfrom=xxx", "inp2", "out2", "restart=2", + "restartfrom=yyy"}; + + + LegacyCliOptions legacy_options = {.single_dash_legacy_names = {"ngroup", "glayout", "nptype"}, + .nodash_legacy_names = {"restart", "restartfrom"}}; + std::vector out = adapt_legacy_cli_arguments(in, legacy_options); + + // Expected: converted --ngroup, combined --restart, combined --restartfrom + std::vector expect = {"--ngroup=2", "--glayout=3,2", "--nptype=separateInputFiles", + "inp1", "out1", "inp2", "out2", "--restart=1,2", "--restartfrom=xxx,yyy"}; + EXPECT_EQ(out, expect); + } + + std::vector> single_io = { + {std::filesystem::path("inputA.4C.yaml"), "outA"}}; + CommandlineArguments args_single = { + .nptype = Core::Communication::NestedParallelismType::no_nested_parallelism, + .restart_per_group = std::vector{}, // empty + .restart_identifier_per_group = std::vector{}, + .io_pairs = single_io}; + + TEST(UpdateIoIdentifiers, NoNestedParallelismNoRestart) + { + update_io_identifiers(args_single, 0); + + EXPECT_EQ(args_single.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args_single.output_file_identifier, "outA"); + EXPECT_EQ(args_single.restart, 0); + EXPECT_EQ(args_single.restart_file_identifier, "outA"); + } + + TEST(UpdateIoIdentifiers, NoNestedParallelismWithRestart) + { + CommandlineArguments args = args_single; + args.restart_per_group = std::vector{5}; + args.restart_identifier_per_group = std::vector{"restart_prefix"}; + + update_io_identifiers(args, 0); + + EXPECT_EQ(args.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args.output_file_identifier, "outA"); + EXPECT_EQ(args.restart, 5); + EXPECT_EQ(args.restart_file_identifier, "restart_prefix"); + } + + TEST(UpdateIoIdentifiers, EveryGroupReadInputFileNoRestart) + { + CommandlineArguments args = args_single; + args.nptype = Core::Communication::NestedParallelismType::every_group_read_input_file; + update_io_identifiers(args, 1); + + EXPECT_EQ(args.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args.output_file_identifier, "outA_group_1"); + EXPECT_EQ(args.restart, 0); + EXPECT_EQ(args.restart_file_identifier, "outA_group_1"); + } + + TEST(UpdateIoIdentifiers, EveryGroupReadInputFileWithRestart) + { + CommandlineArguments args = args_single; + args.nptype = Core::Communication::NestedParallelismType::every_group_read_input_file; + args.restart_per_group = std::vector{10}; + args.restart_identifier_per_group = std::vector{"restart_prefix"}; + + update_io_identifiers(args, 0); + + EXPECT_EQ(args.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args.output_file_identifier, "outA_group_0"); + EXPECT_EQ(args.restart, 10); + EXPECT_EQ(args.restart_file_identifier, "restart_prefix_group_0"); + } + + TEST(UpdateIoIdentifiers, EveryGroupReadInputFileWithRestartAndNumber) + { + CommandlineArguments args = args_single; + args.io_pairs[0].second = "outA-42"; + args.nptype = Core::Communication::NestedParallelismType::every_group_read_input_file; + args.restart_per_group = std::vector{10}; + args.restart_identifier_per_group = std::vector{"restart_prefix-43"}; + + update_io_identifiers(args, 0); + + EXPECT_EQ(args.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args.output_file_identifier, "outA_group_0_42"); + EXPECT_EQ(args.restart, 10); + EXPECT_EQ(args.restart_file_identifier, "restart_prefix_group_0-43"); + } + + std::vector> double_io = { + {std::filesystem::path("inputA.4C.yaml"), "outA"}, + {std::filesystem::path("inputB.4C.yaml"), "outB"}}; + CommandlineArguments args_double = { + .nptype = Core::Communication::NestedParallelismType::separate_input_files, + .restart_per_group = std::vector{}, // empty + .restart_identifier_per_group = std::vector{}, + .io_pairs = double_io}; + + TEST(UpdateIoIdentifiers, SeparateInputFilesNoRestart) + { + CommandlineArguments args0 = args_double; + update_io_identifiers(args0, 0); + + EXPECT_EQ(args0.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args0.output_file_identifier, "outA"); + EXPECT_EQ(args0.restart, 0); + EXPECT_EQ(args0.restart_file_identifier, "outA"); + + CommandlineArguments args1 = args_double; + update_io_identifiers(args1, 1); + + EXPECT_EQ(args1.input_file_name, std::filesystem::path("inputB.4C.yaml")); + EXPECT_EQ(args1.output_file_identifier, "outB"); + EXPECT_EQ(args1.restart, 0); + EXPECT_EQ(args1.restart_file_identifier, "outB"); + } + + TEST(UpdateIoIdentifiers, SeparateInputFilesWithRestart) + { + CommandlineArguments args0 = args_double; + args0.restart_per_group = std::vector{3, 7}; + args0.restart_identifier_per_group = std::vector{"restartA", "restartB"}; + + update_io_identifiers(args0, 0); + + EXPECT_EQ(args0.input_file_name, std::filesystem::path("inputA.4C.yaml")); + EXPECT_EQ(args0.output_file_identifier, "outA"); + EXPECT_EQ(args0.restart, 3); + EXPECT_EQ(args0.restart_file_identifier, "restartA"); + + CommandlineArguments args1 = args_double; + args1.restart_per_group = std::vector{3, 7}; + args1.restart_identifier_per_group = std::vector{"restartA", "restartB"}; + + update_io_identifiers(args1, 1); + + EXPECT_EQ(args1.input_file_name, std::filesystem::path("inputB.4C.yaml")); + EXPECT_EQ(args1.output_file_identifier, "outB"); + EXPECT_EQ(args1.restart, 7); + EXPECT_EQ(args1.restart_file_identifier, "restartB"); + } +} // namespace \ No newline at end of file diff --git a/tests/benchmark_tests/solid_3D_ele/4C_solid_3D_ele_evaluation_benchmark.cpp b/tests/benchmark_tests/solid_3D_ele/4C_solid_3D_ele_evaluation_benchmark.cpp index b2659930dad..beecf165ef6 100644 --- a/tests/benchmark_tests/solid_3D_ele/4C_solid_3D_ele_evaluation_benchmark.cpp +++ b/tests/benchmark_tests/solid_3D_ele/4C_solid_3D_ele_evaluation_benchmark.cpp @@ -172,7 +172,8 @@ template set_communicators(Core::Communication::create_comm({})); + Core::Communication::CommConfig config; + Global::Problem::instance()->set_communicators(Core::Communication::create_comm(config)); setup_material_in_global_problem(); auto dis = make_discretization(ele_tech, kinem_type); diff --git a/tests/input_files/sohex8_multiscale_macro.4C.yaml b/tests/input_files/sohex8_multiscale_macro.4C.yaml index cfc5115ee99..4abdd086205 100644 --- a/tests/input_files/sohex8_multiscale_macro.4C.yaml +++ b/tests/input_files/sohex8_multiscale_macro.4C.yaml @@ -4,12 +4,12 @@ TITLE: - corresponding file name. The micro file (here sohex8_multiscale_micro.mat.4C.yaml) - is expected in the same directory. - The test case can either run as usual: - - 'mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro.4C.yaml output_name' + - "mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro.4C.yaml output_name" - or in nested parallelism mode via: - - 'mpirun -np 5 ./4C -nptype=separateInputFiles -ngroup=2 -glayout=2,3 /path/to/sohex8_multiscale_macro.4C.yaml - output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name' - - The nested mode command uses 2 procs (see 1st argument in -glayout) to - - solve the macro scale and 3 procs (see 2nd argument in -glayout) to support + - "mpirun -np 5 ./4C --nptype=separateInputFiles --ngroup=2 --glayout=2,3 /path/to/sohex8_multiscale_macro.4C.yaml + output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name" + - The nested mode command uses 2 procs (see 1st argument in --glayout) to + - solve the macro scale and 3 procs (see 2nd argument in --glayout) to support - while solving the microscale. The sum of the two proc numbers must - correspond to the argument -np 5 of mpirun. The additional input file - (sohex8_multiscale_npsupport.4C.yaml) for the supporting procs does not have diff --git a/tests/input_files/sohex8_multiscale_macro_2micro.4C.yaml b/tests/input_files/sohex8_multiscale_macro_2micro.4C.yaml index a36196f4df6..29985a4c709 100644 --- a/tests/input_files/sohex8_multiscale_macro_2micro.4C.yaml +++ b/tests/input_files/sohex8_multiscale_macro_2micro.4C.yaml @@ -6,12 +6,12 @@ TITLE: - elements. The microscale files (*.mat.4C.yaml) are expected in the same directory - as the macro file. - The test case can either run as usual: - - 'mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro_2micro.4C.yaml output_name' + - "mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro_2micro.4C.yaml output_name" - or in nested parallelism mode via: - - 'mpirun -np 5 ./4C -nptype=separateInputFiles -ngroup=2 -glayout=2,3 /path/to/sohex8_multiscale_macro_2micro.4C.yaml - output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name' - - The nested mode command uses 2 procs (see 1st argument in -glayout) to - - solve the macro scale and 3 procs (see 2nd argument in -glayout) to support + - "mpirun -np 5 ./4C --nptype=separateInputFiles --ngroup=2 --glayout=2,3 /path/to/sohex8_multiscale_macro_2micro.4C.yaml + output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name" + - The nested mode command uses 2 procs (see 1st argument in --glayout) to + - solve the macro scale and 3 procs (see 2nd argument in --glayout) to support - while solving the microscale. The sum of the two proc numbers must - correspond to the argument -np 5 of mpirun. The additional input file - (sohex8_multiscale_npsupport.4C.yaml) for the supporting procs does not have diff --git a/tests/input_files/sohex8_multiscale_macro_eas.4C.yaml b/tests/input_files/sohex8_multiscale_macro_eas.4C.yaml index 5685d29c10e..3051ce3cba6 100644 --- a/tests/input_files/sohex8_multiscale_macro_eas.4C.yaml +++ b/tests/input_files/sohex8_multiscale_macro_eas.4C.yaml @@ -4,12 +4,12 @@ TITLE: - corresponding file name. The micro file (here sohex8_multiscale_micro_eas.mat.4C.yaml) - is expected in the same directory. - The test case can either run as usual: - - 'mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro_eas.4C.yaml output_name' + - "mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro_eas.4C.yaml output_name" - or in nested parallelism mode via: - - 'mpirun -np 5 ./4C -nptype=separateInputFiles -ngroup=2 -glayout=2,3 /path/to/sohex8_multiscale_macro_eas.4C.yaml - output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name' - - The nested mode command uses 2 procs (see 1st argument in -glayout) to - - solve the macro scale and 3 procs (see 2nd argument in -glayout) to support + - "mpirun -np 5 ./4C --nptype=separateInputFiles --ngroup=2 --glayout=2,3 /path/to/sohex8_multiscale_macro_eas.4C.yaml + output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name" + - The nested mode command uses 2 procs (see 1st argument in --glayout) to + - solve the macro scale and 3 procs (see 2nd argument in --glayout) to support - while solving the microscale. The sum of the two proc numbers must - correspond to the argument -np 5 of mpirun. The additional input file - (sohex8_multiscale_npsupport.4C.yaml) for the supporting procs does not have diff --git a/tests/input_files/sohex8_multiscale_macro_plastic.4C.yaml b/tests/input_files/sohex8_multiscale_macro_plastic.4C.yaml index 5a8ef879453..e7bedf3b003 100644 --- a/tests/input_files/sohex8_multiscale_macro_plastic.4C.yaml +++ b/tests/input_files/sohex8_multiscale_macro_plastic.4C.yaml @@ -4,12 +4,12 @@ TITLE: - corresponding file name. The micro file (here sohex8_multiscale_micro_plastic.mat.4C.yaml) - is expected in the same directory. - The test case can either run as usual: - - 'mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro_plastic.4C.yaml output_name' + - "mpirun -np 2 ./4C /path/to/sohex8_multiscale_macro_plastic.4C.yaml output_name" - or in nested parallelism mode via: - - 'mpirun -np 5 ./4C -nptype=separateInputFiles -ngroup=2 -glayout=2,3 /path/to/sohex8_multiscale_macro_plastic.4C.yaml - output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name' - - The nested mode command uses 2 procs (see 1st argument in -glayout) to - - solve the macro scale and 3 procs (see 2nd argument in -glayout) to support + - "mpirun -np 5 ./4C --nptype=separateInputFiles --ngroup=2 --glayout=2,3 /path/to/sohex8_multiscale_macro_plastic.4C.yaml + output_name /path/to/sohex8_multiscale_npsupport.4C.yaml dummy_output_name" + - The nested mode command uses 2 procs (see 1st argument in --glayout) to + - solve the macro scale and 3 procs (see 2nd argument in --glayout) to support - while solving the microscale. The sum of the two proc numbers must - correspond to the argument -np 5 of mpirun. The additional input file - (sohex8_multiscale_npsupport.4C.yaml) for the supporting procs does not have diff --git a/unittests/mat/4C_electrode_test.cpp b/unittests/mat/4C_electrode_test.cpp index 894d967b43e..1b0efad535b 100644 --- a/unittests/mat/4C_electrode_test.cpp +++ b/unittests/mat/4C_electrode_test.cpp @@ -380,8 +380,9 @@ namespace { Global::Problem* problem = Global::Problem::instance(); + Core::Communication::CommConfig config; // create default communicators - Core::Communication::Communicators communicators = Core::Communication::create_comm({}); + Core::Communication::Communicators communicators = Core::Communication::create_comm(config); problem->set_communicators(communicators); MPI_Comm comm = communicators.global_comm();