diff --git a/.clang-format b/.clang-format index fac02f27be..6ecc400b55 100644 --- a/.clang-format +++ b/.clang-format @@ -1,22 +1,22 @@ -Language: Cpp -Standard: c++20 -BasedOnStyle: Google -ColumnLimit: 120 -UseTab: Never -AllowShortFunctionsOnASingleLine: Empty -IndentPPDirectives: AfterHash -SortIncludes: true -FixNamespaceComments: true -InsertBraces: true -QualifierAlignment: Left -PointerAlignment: Right -ReferenceAlignment: Right -SortUsingDeclarations: LexicographicNumeric -InsertNewlineAtEOF: true -LambdaBodyIndentation: OuterScope -MaxEmptyLinesToKeep: 1 -KeepEmptyLines: - AtStartOfFile: false - AtStartOfBlock: false - AtEndOfFile: false -LineEnding: LF +Language: Cpp +Standard: c++20 +BasedOnStyle: Google +ColumnLimit: 120 +UseTab: Never +AllowShortFunctionsOnASingleLine: Empty +IndentPPDirectives: AfterHash +SortIncludes: true +FixNamespaceComments: true +InsertBraces: true +QualifierAlignment: Left +PointerAlignment: Right +ReferenceAlignment: Right +SortUsingDeclarations: LexicographicNumeric +InsertNewlineAtEOF: true +LambdaBodyIndentation: OuterScope +MaxEmptyLinesToKeep: 1 +KeepEmptyLines: + AtStartOfFile: false + AtStartOfBlock: false + AtEndOfFile: false +LineEnding: LF diff --git a/.clang-tidy b/.clang-tidy index 314b69d130..f72208304d 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,104 +1,104 @@ -Checks: > - bugprone-*, - cert-dcl50-cpp, - cert-dcl58-cpp, - cert-env33-c, - cert-err34-c, - cert-err52-cpp, - cert-err60-cpp, - cert-flp30-c, - cert-mem57-cpp, - cert-msc50-cpp, - cert-msc51-cpp, - cert-oop57-cpp, - cert-oop58-cpp, - concurrency-*, - cppcoreguidelines-*, - google-*, - llvm-include-order, - llvm-namespace-comment, - misc-*, - modernize-*, - mpi-*, - openmp-*, - performance-*, - portability-*, - readability-*, - -bugprone-casting-through-void, - -bugprone-easily-swappable-parameters, - -cppcoreguidelines-avoid-magic-numbers, - -cppcoreguidelines-non-private-member-variables-in-classes, - -cppcoreguidelines-owning-memory, - -cppcoreguidelines-pro-bounds-pointer-arithmetic, - -cppcoreguidelines-pro-type-reinterpret-cast, - -cppcoreguidelines-pro-type-vararg, - -cppcoreguidelines-special-member-functions, - -misc-const-correctness, - -misc-non-private-member-variables-in-classes, - -modernize-avoid-c-arrays, - -modernize-use-trailing-return-type, - -portability-avoid-pragma-once, - -portability-template-virtual-member-function, - -readability-magic-numbers - -WarningsAsErrors: "*" -HeaderFilterRegex: '.*/(modules|tasks)/.*' - -CheckOptions: - - key: readability-identifier-naming.ClassCase - value: CamelCase - - key: readability-identifier-naming.ClassMemberCase - value: lower_case - - key: readability-identifier-naming.ConstexprVariableCase - value: CamelCase - - key: readability-identifier-naming.ConstexprVariablePrefix - value: k - - key: readability-identifier-naming.EnumCase - value: CamelCase - - key: readability-identifier-naming.EnumConstantCase - value: CamelCase - - key: readability-identifier-naming.EnumConstantPrefix - value: k - - key: readability-identifier-naming.FunctionCase - value: CamelCase - - key: readability-identifier-naming.GlobalConstantCase - value: CamelCase - - key: readability-identifier-naming.GlobalConstantPrefix - value: k - - key: readability-identifier-naming.StaticConstantCase - value: CamelCase - - key: readability-identifier-naming.StaticConstantPrefix - value: k - - key: readability-identifier-naming.StaticVariableCase - value: lower_case - - key: readability-identifier-naming.MacroDefinitionCase - value: UPPER_CASE - - key: readability-identifier-naming.MacroDefinitionIgnoredRegexp - value: '^[A-Z]+(_[A-Z]+)*_$' - - key: readability-identifier-naming.MemberCase - value: lower_case - - key: readability-identifier-naming.PrivateMemberSuffix - value: _ - - key: readability-identifier-naming.PublicMemberSuffix - value: '' - - key: readability-identifier-naming.NamespaceCase - value: lower_case - - key: readability-identifier-naming.ParameterCase - value: lower_case - - key: readability-identifier-naming.TypeAliasCase - value: CamelCase - - key: readability-identifier-naming.TypedefCase - value: CamelCase - - key: readability-identifier-naming.VariableCase - value: lower_case - - key: readability-identifier-naming.IgnoreMainLikeFunctions - value: 1 - # Functions with scores beyond 15 are typically flagged as potentially problematic (empirically) - - key: readability-function-cognitive-complexity.Threshold - value: 15 # default: 25 - - key: readability-identifier-length.MinimumVariableNameLength - value: 1 - - key: readability-identifier-length.MinimumParameterNameLength - value: 1 - - key: misc-include-cleaner.IgnoreHeaders - value: '(__chrono/.*|stdlib\.h|3rdparty/.*)' +Checks: > + bugprone-*, + cert-dcl50-cpp, + cert-dcl58-cpp, + cert-env33-c, + cert-err34-c, + cert-err52-cpp, + cert-err60-cpp, + cert-flp30-c, + cert-mem57-cpp, + cert-msc50-cpp, + cert-msc51-cpp, + cert-oop57-cpp, + cert-oop58-cpp, + concurrency-*, + cppcoreguidelines-*, + google-*, + llvm-include-order, + llvm-namespace-comment, + misc-*, + modernize-*, + mpi-*, + openmp-*, + performance-*, + portability-*, + readability-*, + -bugprone-casting-through-void, + -bugprone-easily-swappable-parameters, + -cppcoreguidelines-avoid-magic-numbers, + -cppcoreguidelines-non-private-member-variables-in-classes, + -cppcoreguidelines-owning-memory, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -cppcoreguidelines-pro-type-reinterpret-cast, + -cppcoreguidelines-pro-type-vararg, + -cppcoreguidelines-special-member-functions, + -misc-const-correctness, + -misc-non-private-member-variables-in-classes, + -modernize-avoid-c-arrays, + -modernize-use-trailing-return-type, + -portability-avoid-pragma-once, + -portability-template-virtual-member-function, + -readability-magic-numbers + +WarningsAsErrors: "*" +HeaderFilterRegex: '.*/(modules|tasks)/.*' + +CheckOptions: + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.ClassMemberCase + value: lower_case + - key: readability-identifier-naming.ConstexprVariableCase + value: CamelCase + - key: readability-identifier-naming.ConstexprVariablePrefix + value: k + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.EnumConstantCase + value: CamelCase + - key: readability-identifier-naming.EnumConstantPrefix + value: k + - key: readability-identifier-naming.FunctionCase + value: CamelCase + - key: readability-identifier-naming.GlobalConstantCase + value: CamelCase + - key: readability-identifier-naming.GlobalConstantPrefix + value: k + - key: readability-identifier-naming.StaticConstantCase + value: CamelCase + - key: readability-identifier-naming.StaticConstantPrefix + value: k + - key: readability-identifier-naming.StaticVariableCase + value: lower_case + - key: readability-identifier-naming.MacroDefinitionCase + value: UPPER_CASE + - key: readability-identifier-naming.MacroDefinitionIgnoredRegexp + value: '^[A-Z]+(_[A-Z]+)*_$' + - key: readability-identifier-naming.MemberCase + value: lower_case + - key: readability-identifier-naming.PrivateMemberSuffix + value: _ + - key: readability-identifier-naming.PublicMemberSuffix + value: '' + - key: readability-identifier-naming.NamespaceCase + value: lower_case + - key: readability-identifier-naming.ParameterCase + value: lower_case + - key: readability-identifier-naming.TypeAliasCase + value: CamelCase + - key: readability-identifier-naming.TypedefCase + value: CamelCase + - key: readability-identifier-naming.VariableCase + value: lower_case + - key: readability-identifier-naming.IgnoreMainLikeFunctions + value: 1 + # Functions with scores beyond 15 are typically flagged as potentially problematic (empirically) + - key: readability-function-cognitive-complexity.Threshold + value: 15 # default: 25 + - key: readability-identifier-length.MinimumVariableNameLength + value: 1 + - key: readability-identifier-length.MinimumParameterNameLength + value: 1 + - key: misc-include-cleaner.IgnoreHeaders + value: '(__chrono/.*|stdlib\.h|3rdparty/.*)' diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 91ee2175da..90c8b86727 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,18 +1,18 @@ -{ - "name": "Parallel Programming Course", - "image": "ghcr.io/learning-process/ppc-ubuntu:1.1", - "customizations": { - "vscode": { - "extensions": [ - "ms-vscode.cpptools-extension-pack", - "ms-vscode.cmake-tools", - "ms-python.python" - ], - "settings": { - "cmake.configureOnOpen": true, - "C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools" - } - } - }, - "postCreateCommand": "python3 -m pip install -r requirements.txt" -} +{ + "name": "Parallel Programming Course", + "image": "ghcr.io/learning-process/ppc-ubuntu:1.1", + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.cpptools-extension-pack", + "ms-vscode.cmake-tools", + "ms-python.python" + ], + "settings": { + "cmake.configureOnOpen": true, + "C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools" + } + } + }, + "postCreateCommand": "python3 -m pip install -r requirements.txt" +} diff --git a/.editorconfig b/.editorconfig index 6e37ad64bd..95589f6412 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,27 +1,27 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true -indent_style = space -indent_size = 4 - -[*.{cpp,h,hpp,c}] -indent_size = 2 - -[*.{yml,yaml}] -indent_size = 2 - -[*.py] -indent_size = 4 - -[Makefile] -indent_style = tab - -[{CMakeLists.txt,*.cmake}] -indent_size = 2 - -[*.{md,rst}] -trim_trailing_whitespace = false +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_style = space +indent_size = 4 + +[*.{cpp,h,hpp,c}] +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 + +[*.py] +indent_size = 4 + +[Makefile] +indent_style = tab + +[{CMakeLists.txt,*.cmake}] +indent_size = 2 + +[*.{md,rst}] +trim_trailing_whitespace = false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ed7b99d898..58d4a9ccdb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @aobolensk @allnes +* @aobolensk @allnes diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index 18c9147181..d28f7f9101 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -1,128 +1,128 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/.github/PULL_REQUEST_TEMPLATE/task_submission_en.md b/.github/PULL_REQUEST_TEMPLATE/task_submission_en.md index 59ecfe046a..d849ec6ed8 100644 --- a/.github/PULL_REQUEST_TEMPLATE/task_submission_en.md +++ b/.github/PULL_REQUEST_TEMPLATE/task_submission_en.md @@ -1,39 +1,39 @@ - - -## Description - - -- **Task**: _Enter the full task name here_ -- **Variant**: _Enter the variant number here_ -- **Technology**: _Enter technology (e.g., SEQ, OMP, TBB, STL, MPI)_ -- **Description** of your implementation and report. - _Provide a concise summary of your implementation and report here._ - ---- - -## Checklist - - -- [ ] **CI Status**: All CI jobs (build, tests, report generation) are passing on my branch in my fork -- [ ] **Task Directory & Naming**: I have created a directory named `__` -- [ ] **Full Task Definition**: I have provided the complete task description in the pull request body. -- [ ] **clang-format**: My changes pass `clang-format` locally in my fork (no formatting errors) -- [ ] **clang-tidy**: My changes pass `clang-tidy` locally in my fork (no warnings/errors) -- [ ] **Functional Tests**: All functional tests are passing locally on my machine -- [ ] **Performance Tests**: All performance tests are passing locally on my machine -- [ ] **Branch**: I am working on a branch named exactly as my task directory (e.g., `nesterov_a_vector_sum`), not on `master`. -- [ ] **Truthful Content**: I confirm that every detail provided in this pull request is accurate and truthful to the best of my knowledge. - - + + +## Description + + +- **Task**: _Enter the full task name here_ +- **Variant**: _Enter the variant number here_ +- **Technology**: _Enter technology (e.g., SEQ, OMP, TBB, STL, MPI)_ +- **Description** of your implementation and report. + _Provide a concise summary of your implementation and report here._ + +--- + +## Checklist + + +- [ ] **CI Status**: All CI jobs (build, tests, report generation) are passing on my branch in my fork +- [ ] **Task Directory & Naming**: I have created a directory named `__` +- [ ] **Full Task Definition**: I have provided the complete task description in the pull request body. +- [ ] **clang-format**: My changes pass `clang-format` locally in my fork (no formatting errors) +- [ ] **clang-tidy**: My changes pass `clang-tidy` locally in my fork (no warnings/errors) +- [ ] **Functional Tests**: All functional tests are passing locally on my machine +- [ ] **Performance Tests**: All performance tests are passing locally on my machine +- [ ] **Branch**: I am working on a branch named exactly as my task directory (e.g., `nesterov_a_vector_sum`), not on `master`. +- [ ] **Truthful Content**: I confirm that every detail provided in this pull request is accurate and truthful to the best of my knowledge. + + diff --git a/.github/PULL_REQUEST_TEMPLATE/task_submission_ru.md b/.github/PULL_REQUEST_TEMPLATE/task_submission_ru.md index 714ddc5efc..6a112c65a6 100644 --- a/.github/PULL_REQUEST_TEMPLATE/task_submission_ru.md +++ b/.github/PULL_REQUEST_TEMPLATE/task_submission_ru.md @@ -1,39 +1,39 @@ - - -## Описание - - -- **Задача**: _Введите здесь полное название задачи_ -- **Вариант**: _Введите здесь номер варианта_ -- **Технология**: _Введите технологию (например, SEQ, OMP, TBB, STL, MPI)_ -- **Описание** вашей реализации и отчёта. - _Кратко опишите вашу реализацию и содержание отчёта здесь._ - ---- - -## Чек-лист - - -- [ ] **Статус CI**: Все CI-задачи (сборка, тесты, генерация отчёта) успешно проходят на моей ветке в моем форке -- [ ] **Директория и именование задачи**: Я создал директорию с именем `<фамилия>_<первая_буква_имени>_<короткое_название_задачи>` -- [ ] **Полное описание задачи**: Я предоставил полное описание задачи в теле pull request -- [ ] **clang-format**: Мои изменения успешно проходят `clang-format` локально в моем форке (нет ошибок форматирования) -- [ ] **clang-tidy**: Мои изменения успешно проходят `clang-tidy` локально в моем форке (нет предупреждений/ошибок) -- [ ] **Функциональные тесты**: Все функциональные тесты успешно проходят локально на моей машине -- [ ] **Тесты производительности**: Все тесты производительности успешно проходят локально на моей машине -- [ ] **Ветка**: Я работаю в ветке, названной точно так же, как директория моей задачи (например, `nesterov_a_vector_sum`), а не в `master` -- [ ] **Правдивое содержание**: Я подтверждаю, что все сведения, указанные в этом pull request, являются точными и достоверными - - + + +## Описание + + +- **Задача**: _Введите здесь полное название задачи_ +- **Вариант**: _Введите здесь номер варианта_ +- **Технология**: _Введите технологию (например, SEQ, OMP, TBB, STL, MPI)_ +- **Описание** вашей реализации и отчёта. + _Кратко опишите вашу реализацию и содержание отчёта здесь._ + +--- + +## Чек-лист + + +- [ ] **Статус CI**: Все CI-задачи (сборка, тесты, генерация отчёта) успешно проходят на моей ветке в моем форке +- [ ] **Директория и именование задачи**: Я создал директорию с именем `<фамилия>_<первая_буква_имени>_<короткое_название_задачи>` +- [ ] **Полное описание задачи**: Я предоставил полное описание задачи в теле pull request +- [ ] **clang-format**: Мои изменения успешно проходят `clang-format` локально в моем форке (нет ошибок форматирования) +- [ ] **clang-tidy**: Мои изменения успешно проходят `clang-tidy` локально в моем форке (нет предупреждений/ошибок) +- [ ] **Функциональные тесты**: Все функциональные тесты успешно проходят локально на моей машине +- [ ] **Тесты производительности**: Все тесты производительности успешно проходят локально на моей машине +- [ ] **Ветка**: Я работаю в ветке, названной точно так же, как директория моей задачи (например, `nesterov_a_vector_sum`), а не в `master` +- [ ] **Правдивое содержание**: Я подтверждаю, что все сведения, указанные в этом pull request, являются точными и достоверными + + diff --git a/.github/actions/archive-install/action.yml b/.github/actions/archive-install/action.yml index aba56eab62..13d82c6cec 100644 --- a/.github/actions/archive-install/action.yml +++ b/.github/actions/archive-install/action.yml @@ -1,32 +1,32 @@ -name: Archive installed package -inputs: - path: - description: Directory to archive - required: true - name: - description: Artifact name - required: true -runs: - using: composite - steps: - - id: set-archive - shell: bash - run: | - if [ "${RUNNER_OS}" = "Windows" ]; then - echo "archive=${{ inputs.name }}.zip" >> "$GITHUB_OUTPUT" - else - echo "archive=${{ inputs.name }}.tar.gz" >> "$GITHUB_OUTPUT" - fi - - name: Compress directory (unix) - if: runner.os != 'Windows' - shell: bash - run: tar -czvf "${{ steps.set-archive.outputs.archive }}" -C "${{ inputs.path }}" . - - name: Compress directory (windows) - if: runner.os == 'Windows' - shell: pwsh - run: Compress-Archive -Path ${{ inputs.path }} -DestinationPath ${{ steps.set-archive.outputs.archive }} - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: ${{ inputs.name }} - path: ${{ steps.set-archive.outputs.archive }} +name: Archive installed package +inputs: + path: + description: Directory to archive + required: true + name: + description: Artifact name + required: true +runs: + using: composite + steps: + - id: set-archive + shell: bash + run: | + if [ "${RUNNER_OS}" = "Windows" ]; then + echo "archive=${{ inputs.name }}.zip" >> "$GITHUB_OUTPUT" + else + echo "archive=${{ inputs.name }}.tar.gz" >> "$GITHUB_OUTPUT" + fi + - name: Compress directory (unix) + if: runner.os != 'Windows' + shell: bash + run: tar -czvf "${{ steps.set-archive.outputs.archive }}" -C "${{ inputs.path }}" . + - name: Compress directory (windows) + if: runner.os == 'Windows' + shell: pwsh + run: Compress-Archive -Path ${{ inputs.path }} -DestinationPath ${{ steps.set-archive.outputs.archive }} + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.name }} + path: ${{ steps.set-archive.outputs.archive }} diff --git a/.github/actions/clang-tidy-native/action.yml b/.github/actions/clang-tidy-native/action.yml index 5858552e81..5da5d42e22 100644 --- a/.github/actions/clang-tidy-native/action.yml +++ b/.github/actions/clang-tidy-native/action.yml @@ -1,114 +1,114 @@ -name: 'Native Clang-Tidy Analysis' -description: 'Run clang-tidy analysis without Docker' -inputs: - build_dir: - description: 'Build directory for CMake' - required: false - default: 'build' - exclude: - description: 'Directories to exclude from analysis (space-separated)' - required: false - default: '3rdparty' - clang_tidy_version: - description: 'Clang-tidy version to use' - required: false - default: '21' -outputs: - total_comments: - description: 'Total number of clang-tidy issues found' - value: ${{ steps.analyze.outputs.total_comments }} -runs: - using: 'composite' - steps: - - name: Verify clang-tidy installation - shell: bash - run: | - clang-tidy-${{ inputs.clang_tidy_version }} --version - - - name: Get changed files - id: changed-files - shell: bash - run: | - git config --global --add safe.directory $GITHUB_WORKSPACE - git fetch origin ${{ github.event.pull_request.base.ref }} - - # Check if .clang-tidy files were changed - CLANG_TIDY_CHANGED=$(git diff --name-only \ - origin/${{ github.event.pull_request.base.ref }}...HEAD \ - -- '**/.clang-tidy' || true) - - if [ -n "$CLANG_TIDY_CHANGED" ]; then - echo "::notice::.clang-tidy configuration changed, analyzing all source files" - # Find all source files in the repository (excluding specified directories) - CHANGED_FILES=$(find . -name "*.cpp" -o -name "*.hpp" -o -name "*.c" -o -name "*.h") - # Filter out excluded directories - for exclude_dir in ${{ inputs.exclude }}; do - CHANGED_FILES=$(echo "$CHANGED_FILES" | grep -v "^./${exclude_dir}/" || true) - done - else - # Only analyze changed source files - CHANGED_FILES=$(git diff --name-only \ - origin/${{ github.event.pull_request.base.ref }}...HEAD \ - -- '*.cpp' '*.hpp' '*.c' '*.h') - # Filter out excluded directories - for exclude_dir in ${{ inputs.exclude }}; do - CHANGED_FILES=$(echo "$CHANGED_FILES" | grep -v "^${exclude_dir}/" || true) - done - fi - - echo "changed_files<> $GITHUB_OUTPUT - echo "$CHANGED_FILES" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - if [ -z "$CHANGED_FILES" ]; then - echo "has_changes=false" >> $GITHUB_OUTPUT - else - echo "has_changes=true" >> $GITHUB_OUTPUT - fi - - - name: Run clang-tidy analysis - id: analyze - shell: bash - if: steps.changed-files.outputs.has_changes == 'true' - run: | - COMMENTS_FILE=$(mktemp) - TOTAL_ISSUES=0 - - while IFS= read -r file; do - if [ -n "$file" ] && [ -f "$file" ]; then - echo "Analyzing $file..." - FILE_OUTPUT=$(mktemp) - if clang-tidy-${{ inputs.clang_tidy_version }} "$file" \ - -p ${{ inputs.build_dir }} --format-style=file 2>&1 | \ - tee "$FILE_OUTPUT"; then - ISSUES=$(grep -c "warning:\|error:" "$FILE_OUTPUT" 2>/dev/null || echo "0") - ISSUES=$(echo "$ISSUES" | tr -d '[:space:]') - TOTAL_ISSUES=$((TOTAL_ISSUES + ${ISSUES:-0})) - cat "$FILE_OUTPUT" >> "$COMMENTS_FILE" - else - echo "::error::Failed to analyze $file" - TOTAL_ISSUES=$((TOTAL_ISSUES + 1)) - fi - rm -f "$FILE_OUTPUT" - fi - done <<< "${{ steps.changed-files.outputs.changed_files }}" - - echo "total_comments=$TOTAL_ISSUES" >> $GITHUB_OUTPUT - - if [ -f "$COMMENTS_FILE" ] && [ -s "$COMMENTS_FILE" ]; then - echo "::group::Clang-tidy Analysis Results" - cat "$COMMENTS_FILE" - echo "::endgroup::" - fi - - if [ "$TOTAL_ISSUES" -gt 0 ]; then - echo "::error::Found $TOTAL_ISSUES clang-tidy issues" - else - echo "No clang-tidy issues found" - fi - - - name: Set output for no changes - shell: bash - if: steps.changed-files.outputs.has_changes == 'false' - run: | - echo "total_comments=0" >> $GITHUB_OUTPUT +name: 'Native Clang-Tidy Analysis' +description: 'Run clang-tidy analysis without Docker' +inputs: + build_dir: + description: 'Build directory for CMake' + required: false + default: 'build' + exclude: + description: 'Directories to exclude from analysis (space-separated)' + required: false + default: '3rdparty' + clang_tidy_version: + description: 'Clang-tidy version to use' + required: false + default: '21' +outputs: + total_comments: + description: 'Total number of clang-tidy issues found' + value: ${{ steps.analyze.outputs.total_comments }} +runs: + using: 'composite' + steps: + - name: Verify clang-tidy installation + shell: bash + run: | + clang-tidy-${{ inputs.clang_tidy_version }} --version + + - name: Get changed files + id: changed-files + shell: bash + run: | + git config --global --add safe.directory $GITHUB_WORKSPACE + git fetch origin ${{ github.event.pull_request.base.ref }} + + # Check if .clang-tidy files were changed + CLANG_TIDY_CHANGED=$(git diff --name-only \ + origin/${{ github.event.pull_request.base.ref }}...HEAD \ + -- '**/.clang-tidy' || true) + + if [ -n "$CLANG_TIDY_CHANGED" ]; then + echo "::notice::.clang-tidy configuration changed, analyzing all source files" + # Find all source files in the repository (excluding specified directories) + CHANGED_FILES=$(find . -name "*.cpp" -o -name "*.hpp" -o -name "*.c" -o -name "*.h") + # Filter out excluded directories + for exclude_dir in ${{ inputs.exclude }}; do + CHANGED_FILES=$(echo "$CHANGED_FILES" | grep -v "^./${exclude_dir}/" || true) + done + else + # Only analyze changed source files + CHANGED_FILES=$(git diff --name-only \ + origin/${{ github.event.pull_request.base.ref }}...HEAD \ + -- '*.cpp' '*.hpp' '*.c' '*.h') + # Filter out excluded directories + for exclude_dir in ${{ inputs.exclude }}; do + CHANGED_FILES=$(echo "$CHANGED_FILES" | grep -v "^${exclude_dir}/" || true) + done + fi + + echo "changed_files<> $GITHUB_OUTPUT + echo "$CHANGED_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + if [ -z "$CHANGED_FILES" ]; then + echo "has_changes=false" >> $GITHUB_OUTPUT + else + echo "has_changes=true" >> $GITHUB_OUTPUT + fi + + - name: Run clang-tidy analysis + id: analyze + shell: bash + if: steps.changed-files.outputs.has_changes == 'true' + run: | + COMMENTS_FILE=$(mktemp) + TOTAL_ISSUES=0 + + while IFS= read -r file; do + if [ -n "$file" ] && [ -f "$file" ]; then + echo "Analyzing $file..." + FILE_OUTPUT=$(mktemp) + if clang-tidy-${{ inputs.clang_tidy_version }} "$file" \ + -p ${{ inputs.build_dir }} --format-style=file 2>&1 | \ + tee "$FILE_OUTPUT"; then + ISSUES=$(grep -c "warning:\|error:" "$FILE_OUTPUT" 2>/dev/null || echo "0") + ISSUES=$(echo "$ISSUES" | tr -d '[:space:]') + TOTAL_ISSUES=$((TOTAL_ISSUES + ${ISSUES:-0})) + cat "$FILE_OUTPUT" >> "$COMMENTS_FILE" + else + echo "::error::Failed to analyze $file" + TOTAL_ISSUES=$((TOTAL_ISSUES + 1)) + fi + rm -f "$FILE_OUTPUT" + fi + done <<< "${{ steps.changed-files.outputs.changed_files }}" + + echo "total_comments=$TOTAL_ISSUES" >> $GITHUB_OUTPUT + + if [ -f "$COMMENTS_FILE" ] && [ -s "$COMMENTS_FILE" ]; then + echo "::group::Clang-tidy Analysis Results" + cat "$COMMENTS_FILE" + echo "::endgroup::" + fi + + if [ "$TOTAL_ISSUES" -gt 0 ]; then + echo "::error::Found $TOTAL_ISSUES clang-tidy issues" + else + echo "No clang-tidy issues found" + fi + + - name: Set output for no changes + shell: bash + if: steps.changed-files.outputs.has_changes == 'false' + run: | + echo "total_comments=0" >> $GITHUB_OUTPUT diff --git a/.github/actions/setup-windows-toolchain/action.yml b/.github/actions/setup-windows-toolchain/action.yml index e0b936aa17..7bf530509f 100644 --- a/.github/actions/setup-windows-toolchain/action.yml +++ b/.github/actions/setup-windows-toolchain/action.yml @@ -1,21 +1,21 @@ -name: 'Setup Windows Toolchain' -description: 'Configure msbuild, MPI, ccache, ninja, and MSVC' -runs: - using: 'composite' - steps: - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@v2 - with: - vs-version: 'latest' - - name: Setup MPI - uses: mpi4py/setup-mpi@v1 - with: - mpi: msmpi - - name: Setup ccache - uses: Chocobo1/setup-ccache-action@v1 - with: - windows_compile_environment: msvc - - name: Setup ninja - uses: seanmiddleditch/gha-setup-ninja@v6 - - name: Setup MSVC for Ninja again - uses: ilammy/msvc-dev-cmd@v1 +name: 'Setup Windows Toolchain' +description: 'Configure msbuild, MPI, ccache, ninja, and MSVC' +runs: + using: 'composite' + steps: + - name: Add msbuild to PATH + uses: microsoft/setup-msbuild@v2 + with: + vs-version: 'latest' + - name: Setup MPI + uses: mpi4py/setup-mpi@v1 + with: + mpi: msmpi + - name: Setup ccache + uses: Chocobo1/setup-ccache-action@v1 + with: + windows_compile_environment: msvc + - name: Setup ninja + uses: seanmiddleditch/gha-setup-ninja@v6 + - name: Setup MSVC for Ninja again + uses: ilammy/msvc-dev-cmd@v1 diff --git a/.github/actions/update-precommit/action.yml b/.github/actions/update-precommit/action.yml index d15ca18a1f..42211b78ef 100644 --- a/.github/actions/update-precommit/action.yml +++ b/.github/actions/update-precommit/action.yml @@ -1,51 +1,51 @@ -name: 'Update Pre-commit Hooks' -description: 'Updates pre-commit hook versions and creates a PR if changes are detected' -inputs: - token: - description: 'GitHub token for creating PRs' - required: true - python-version: - description: 'Python version to use' - required: false - default: '3.13' - -runs: - using: composite - steps: - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: ${{ inputs.python-version }} - - name: Install pre-commit - shell: bash - run: pip install pre-commit - - name: Update pre-commit hooks - shell: bash - run: | - pre-commit autoupdate > /tmp/autoupdate.log 2>&1 - cat /tmp/autoupdate.log - - name: Check for changes - id: changes - shell: bash - run: | - if git diff --quiet .pre-commit-config.yaml; then - echo "changed=false" >> $GITHUB_OUTPUT - else - echo "changed=true" >> $GITHUB_OUTPUT - fi - - name: Create Pull Request - if: steps.changes.outputs.changed == 'true' - uses: peter-evans/create-pull-request@v7 - with: - token: ${{ inputs.token }} - commit-message: "[pre-commit] Update hooks versions" - title: "[pre-commit] Update hooks versions" - committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> - author: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> - body: | - 🤖 This PR was created automatically by the update-pre-commit workflow. - branch: update-pre-commit-hooks - base: master - delete-branch: true - labels: | - dependencies +name: 'Update Pre-commit Hooks' +description: 'Updates pre-commit hook versions and creates a PR if changes are detected' +inputs: + token: + description: 'GitHub token for creating PRs' + required: true + python-version: + description: 'Python version to use' + required: false + default: '3.13' + +runs: + using: composite + steps: + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + - name: Install pre-commit + shell: bash + run: pip install pre-commit + - name: Update pre-commit hooks + shell: bash + run: | + pre-commit autoupdate > /tmp/autoupdate.log 2>&1 + cat /tmp/autoupdate.log + - name: Check for changes + id: changes + shell: bash + run: | + if git diff --quiet .pre-commit-config.yaml; then + echo "changed=false" >> $GITHUB_OUTPUT + else + echo "changed=true" >> $GITHUB_OUTPUT + fi + - name: Create Pull Request + if: steps.changes.outputs.changed == 'true' + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ inputs.token }} + commit-message: "[pre-commit] Update hooks versions" + title: "[pre-commit] Update hooks versions" + committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> + author: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> + body: | + 🤖 This PR was created automatically by the update-pre-commit workflow. + branch: update-pre-commit-hooks + base: master + delete-branch: true + labels: | + dependencies diff --git a/.github/codecov.yml b/.github/codecov.yml index 5f0d268b2a..3aebfdc1a9 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -1,18 +1,18 @@ -ignore: - - "tasks/**/tests/**" - - "modules/**/tests/**" - - "tasks/common/runners/**" - - "modules/runners/**" - - "modules/util/include/perf_test_util.hpp" - - "modules/util/include/func_test_util.hpp" - - "modules/util/src/func_test_util.cpp" -coverage: - status: - project: - default: - target: auto - threshold: 1% - patch: - default: - target: 95% - threshold: 5% +ignore: + - "tasks/**/tests/**" + - "modules/**/tests/**" + - "tasks/common/runners/**" + - "modules/runners/**" + - "modules/util/include/perf_test_util.hpp" + - "modules/util/include/func_test_util.hpp" + - "modules/util/src/func_test_util.cpp" +coverage: + status: + project: + default: + target: auto + threshold: 1% + patch: + default: + target: 95% + threshold: 5% diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 64b9c0b95c..d255f28bbc 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,14 +1,14 @@ -version: 2 -updates: - - package-ecosystem: "pip" - directory: "/" - schedule: - interval: "weekly" - day: "friday" - open-pull-requests-limit: 10 - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "weekly" - day: "friday" - open-pull-requests-limit: 10 +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + day: "friday" + open-pull-requests-limit: 10 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + day: "friday" + open-pull-requests-limit: 10 diff --git a/.github/labeler.yml b/.github/labeler.yml index 03b817c10f..60dd1526c5 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,94 +1,94 @@ -"task:all": - - changed-files: - - any-glob-to-any-file: - - "tasks/**/all" - -"task:mpi": - - changed-files: - - any-glob-to-any-file: - - "tasks/**/mpi" - -"task:omp": - - changed-files: - - any-glob-to-any-file: - - "tasks/**/omp" - -"task:seq": - - changed-files: - - any-glob-to-any-file: - - "tasks/**/seq" - -"task:stl": - - changed-files: - - any-glob-to-any-file: - - "tasks/**/stl" - -"task:tbb": - - changed-files: - - any-glob-to-any-file: - - "tasks/**/tbb" - -task: - - changed-files: - - any-glob-to-any-file: - - "tasks/**" - -ci: - - changed-files: - - any-glob-to-any-file: - - ".github/**" - - "appveyor.yml" - -"modules:performance": - - changed-files: - - any-glob-to-any-file: - - "modules/performance/**" - -"modules:runners": - - changed-files: - - any-glob-to-any-file: - - "modules/runners/**" - -"modules:task": - - changed-files: - - any-glob-to-any-file: - - "modules/task/**" - -"modules:util": - - changed-files: - - any-glob-to-any-file: - - "modules/util/**" - -"3rdparty": - - changed-files: - - any-glob-to-any-file: - - "3rdparty/**" - -infrastructure: - - changed-files: - - any-glob-to-any-file: - - "scripts/**" - - "**/CMakeLists.txt" - - "cmake/**" - - ".clang-format" - - ".clang-tidy" - - ".gitignore" - - ".gitmodules" - - "codecov.yml" - - "setup.cfg" - -python: - - changed-files: - - any-glob-to-any-file: - - "**/requirements.txt" - -docs: - - changed-files: - - any-glob-to-any-file: - - "docs/**" - - "README.md" - -scoreboard: - - changed-files: - - any-glob-to-any-file: - - "scoreboard/**" +"task:all": + - changed-files: + - any-glob-to-any-file: + - "tasks/**/all" + +"task:mpi": + - changed-files: + - any-glob-to-any-file: + - "tasks/**/mpi" + +"task:omp": + - changed-files: + - any-glob-to-any-file: + - "tasks/**/omp" + +"task:seq": + - changed-files: + - any-glob-to-any-file: + - "tasks/**/seq" + +"task:stl": + - changed-files: + - any-glob-to-any-file: + - "tasks/**/stl" + +"task:tbb": + - changed-files: + - any-glob-to-any-file: + - "tasks/**/tbb" + +task: + - changed-files: + - any-glob-to-any-file: + - "tasks/**" + +ci: + - changed-files: + - any-glob-to-any-file: + - ".github/**" + - "appveyor.yml" + +"modules:performance": + - changed-files: + - any-glob-to-any-file: + - "modules/performance/**" + +"modules:runners": + - changed-files: + - any-glob-to-any-file: + - "modules/runners/**" + +"modules:task": + - changed-files: + - any-glob-to-any-file: + - "modules/task/**" + +"modules:util": + - changed-files: + - any-glob-to-any-file: + - "modules/util/**" + +"3rdparty": + - changed-files: + - any-glob-to-any-file: + - "3rdparty/**" + +infrastructure: + - changed-files: + - any-glob-to-any-file: + - "scripts/**" + - "**/CMakeLists.txt" + - "cmake/**" + - ".clang-format" + - ".clang-tidy" + - ".gitignore" + - ".gitmodules" + - "codecov.yml" + - "setup.cfg" + +python: + - changed-files: + - any-glob-to-any-file: + - "**/requirements.txt" + +docs: + - changed-files: + - any-glob-to-any-file: + - "docs/**" + - "README.md" + +scoreboard: + - changed-files: + - any-glob-to-any-file: + - "scoreboard/**" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b6f96f76c6..4873bc5229 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ - - -Please go to the `Preview` tab and select the appropriate template: - -* [Submit Student task (English)](?expand=1&template=task_submission_en.md) -* [Submit Student task (Russian)](?expand=1&template=task_submission_ru.md) + + +Please go to the `Preview` tab and select the appropriate template: + +* [Submit Student task (English)](?expand=1&template=task_submission_en.md) +* [Submit Student task (Russian)](?expand=1&template=task_submission_ru.md) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 13747aaec3..1511d8d5e6 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,60 +1,60 @@ -name: "CodeQL" - -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-24.04 - permissions: - actions: read - contents: read - security-events: write - strategy: - fail-fast: false - matrix: - language: - - cpp - - python - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - name: Setup environment - run: | - sudo apt-get update - sudo apt-get install -y gcc-14 g++-14 ninja-build mpich libomp-dev valgrind - python3 -m pip install -r requirements.txt - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-gcc - create-symlink: true - max-size: 1G - - name: Initialize CodeQL - uses: github/codeql-action/init@v4 - with: - languages: ${{ matrix.language }} - - name: CMake configure - if: matrix.language == 'cpp' - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=RELEASE - env: - CC: gcc-14 - CXX: g++-14 - - name: Build project - if: matrix.language == 'cpp' - run: | - cmake --build build --parallel -- --quiet - env: - CC: gcc-14 - CXX: g++-14 - - name: Show ccache stats - if: matrix.language == 'cpp' - run: ccache --show-stats - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v4 +name: "CodeQL" + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-24.04 + permissions: + actions: read + contents: read + security-events: write + strategy: + fail-fast: false + matrix: + language: + - cpp + - python + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - name: Setup environment + run: | + sudo apt-get update + sudo apt-get install -y gcc-14 g++-14 ninja-build mpich libomp-dev valgrind + python3 -m pip install -r requirements.txt + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-gcc + create-symlink: true + max-size: 1G + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: ${{ matrix.language }} + - name: CMake configure + if: matrix.language == 'cpp' + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=RELEASE + env: + CC: gcc-14 + CXX: g++-14 + - name: Build project + if: matrix.language == 'cpp' + run: | + cmake --build build --parallel -- --quiet + env: + CC: gcc-14 + CXX: g++-14 + - name: Show ccache stats + if: matrix.language == 'cpp' + run: ccache --show-stats + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index b4591a1af4..98a1d1c26e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,51 +1,51 @@ -on: - push: - branches: - - master - paths: - - 'docker/**' - - '.github/workflows/docker.yml' - pull_request: - paths: - - 'docker/**' - - '.github/workflows/docker.yml' - workflow_dispatch: - -permissions: - contents: read - packages: write - -jobs: - build-and-push: - name: Build & Push Docker Image - runs-on: ubuntu-latest - if: github.repository == 'learning-process/parallel_programming_course' - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - with: - platforms: linux/amd64,linux/arm64 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Log in to GHCR - if: github.event_name == 'push' - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.DOCKER_TOKEN }} - - - name: Build and push multi-arch image - uses: docker/build-push-action@v6 - with: - context: . - file: ./docker/ubuntu.Dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.ref == 'refs/heads/master' }} - tags: ghcr.io/learning-process/ppc-ubuntu:1.1 +on: + push: + branches: + - master + paths: + - 'docker/**' + - '.github/workflows/docker.yml' + pull_request: + paths: + - 'docker/**' + - '.github/workflows/docker.yml' + workflow_dispatch: + +permissions: + contents: read + packages: write + +jobs: + build-and-push: + name: Build & Push Docker Image + runs-on: ubuntu-latest + if: github.repository == 'learning-process/parallel_programming_course' + + steps: + - name: Check out code + uses: actions/checkout@v5 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: linux/amd64,linux/arm64 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GHCR + if: github.event_name == 'push' + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Build and push multi-arch image + uses: docker/build-push-action@v6 + with: + context: . + file: ./docker/ubuntu.Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.ref == 'refs/heads/master' }} + tags: ghcr.io/learning-process/ppc-ubuntu:1.1 diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index bcd35e6ece..5c72913800 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -1,19 +1,19 @@ -name: "Label PRs" - -on: - - pull_request_target - -jobs: - label-pull-requests: - runs-on: ubuntu-24.04 - permissions: - contents: read - issues: write - pull-requests: write - steps: - - name: Apply labels based on changed files - uses: actions/labeler@v6 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - configuration-path: ".github/labeler.yml" - dot: true +name: "Label PRs" + +on: + - pull_request_target + +jobs: + label-pull-requests: + runs-on: ubuntu-24.04 + permissions: + contents: read + issues: write + pull-requests: write + steps: + - name: Apply labels based on changed files + uses: actions/labeler@v6 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: ".github/labeler.yml" + dot: true diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 7b04a6b6ec..bc04fb7175 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -1,105 +1,105 @@ -on: - workflow_call: - -jobs: - clang-build: - runs-on: macOS-latest - strategy: - matrix: - build_type: [Release, Debug] - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - name: Install Xcode - uses: maxim-lobanov/setup-xcode@v1 - with: - xcode-version: 'latest-stable' - - name: Setup environment - run: | - brew update - brew install ninja mpich llvm libomp openssl - brew link libomp --overwrite --force - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-clang - create-symlink: true - max-size: 1G - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -DCMAKE_C_FLAGS="-I$(brew --prefix)/opt/libomp/include" - -DCMAKE_CXX_FLAGS="-I$(brew --prefix)/opt/libomp/include" - -D CMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_INSTALL_PREFIX=install - - name: Build project - run: | - cmake --build build --parallel -- --quiet - - name: Install project - run: | - cmake --build build --target install -- --quiet - - name: Archive installed package - uses: ./.github/actions/archive-install - with: - path: install - name: ${{ matrix.build_type == 'Debug' && 'macos-clang-debug-install' || 'macos-clang-install' }} - - name: Show ccache stats - run: ccache --show-stats - clang-test: - needs: - - clang-build - runs-on: macOS-latest - steps: - - uses: actions/checkout@v5 - - name: Install Xcode - uses: maxim-lobanov/setup-xcode@v1 - with: - xcode-version: 'latest-stable' - - name: Setup environment - run: | - brew update - brew install ninja mpich llvm libomp openssl - brew link libomp --overwrite --force - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: macos-clang-install - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf macos-clang-install.tar.gz -C install - - name: Run func tests (MPI) - run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 - env: - PPC_NUM_THREADS: 1 - - name: Run tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - clang-test-extended: - needs: - - clang-test - runs-on: macOS-latest - steps: - - uses: actions/checkout@v5 - - name: Install Xcode - uses: maxim-lobanov/setup-xcode@v1 - with: - xcode-version: 'latest-stable' - - name: Setup environment - run: | - brew update - brew install ninja mpich llvm libomp openssl - brew link libomp --overwrite --force - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: macos-clang-install - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf macos-clang-install.tar.gz -C install - - name: Run tests (threads extended) - run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 - env: - PPC_NUM_PROC: 1 +on: + workflow_call: + +jobs: + clang-build: + runs-on: macOS-latest + strategy: + matrix: + build_type: [Release, Debug] + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - name: Install Xcode + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: 'latest-stable' + - name: Setup environment + run: | + brew update + brew install ninja mpich llvm libomp openssl + brew link libomp --overwrite --force + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-clang + create-symlink: true + max-size: 1G + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -DCMAKE_C_FLAGS="-I$(brew --prefix)/opt/libomp/include" + -DCMAKE_CXX_FLAGS="-I$(brew --prefix)/opt/libomp/include" + -D CMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_INSTALL_PREFIX=install + - name: Build project + run: | + cmake --build build --parallel -- --quiet + - name: Install project + run: | + cmake --build build --target install -- --quiet + - name: Archive installed package + uses: ./.github/actions/archive-install + with: + path: install + name: ${{ matrix.build_type == 'Debug' && 'macos-clang-debug-install' || 'macos-clang-install' }} + - name: Show ccache stats + run: ccache --show-stats + clang-test: + needs: + - clang-build + runs-on: macOS-latest + steps: + - uses: actions/checkout@v5 + - name: Install Xcode + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: 'latest-stable' + - name: Setup environment + run: | + brew update + brew install ninja mpich llvm libomp openssl + brew link libomp --overwrite --force + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: macos-clang-install + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf macos-clang-install.tar.gz -C install + - name: Run func tests (MPI) + run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 + env: + PPC_NUM_THREADS: 1 + - name: Run tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + clang-test-extended: + needs: + - clang-test + runs-on: macOS-latest + steps: + - uses: actions/checkout@v5 + - name: Install Xcode + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: 'latest-stable' + - name: Setup environment + run: | + brew update + brew install ninja mpich llvm libomp openssl + brew link libomp --overwrite --force + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: macos-clang-install + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf macos-clang-install.tar.gz -C install + - name: Run tests (threads extended) + run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 + env: + PPC_NUM_PROC: 1 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b410037502..c4e2429fdb 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,43 +1,43 @@ -name: Build application - -on: - push: - pull_request: - merge_group: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: >- - ${{ github.ref != 'refs/heads/master' && - github.event_name != 'merge_group' && - !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }} - -jobs: - pre-commit: - uses: ./.github/workflows/pre-commit.yml - ubuntu: - needs: - - pre-commit - uses: ./.github/workflows/ubuntu.yml - mac: - needs: - - pre-commit - uses: ./.github/workflows/mac.yml - windows: - needs: - - pre-commit - uses: ./.github/workflows/windows.yml - perf: - needs: - - ubuntu - - mac - - windows - uses: ./.github/workflows/perf.yml - - pages: - needs: - - perf - uses: ./.github/workflows/pages.yml +name: Build application + +on: + push: + pull_request: + merge_group: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: >- + ${{ github.ref != 'refs/heads/master' && + github.event_name != 'merge_group' && + !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }} + +jobs: + pre-commit: + uses: ./.github/workflows/pre-commit.yml + ubuntu: + needs: + - pre-commit + uses: ./.github/workflows/ubuntu.yml + mac: + needs: + - pre-commit + uses: ./.github/workflows/mac.yml + windows: + needs: + - pre-commit + uses: ./.github/workflows/windows.yml + perf: + needs: + - ubuntu + - mac + - windows + uses: ./.github/workflows/perf.yml + + pages: + needs: + - perf + uses: ./.github/workflows/pages.yml diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 938ecec156..d54639ea7f 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -1,25 +1,25 @@ -name: Nightly routine - -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -permissions: - issues: write - pull-requests: write - -jobs: - close-stale-prs: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v10 - with: - stale-pr-message: 'This pull request is stale for 2 weeks and is going to be closed in a week' - days-before-pr-stale: 14 - days-before-issue-stale: -1 - days-before-pr-close: 7 - days-before-issue-close: -1 - stale-pr-label: 'stale' - operations-per-run: 100 - exempt-pr-labels: 'no_stale' +name: Nightly routine + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + +jobs: + close-stale-prs: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v10 + with: + stale-pr-message: 'This pull request is stale for 2 weeks and is going to be closed in a week' + days-before-pr-stale: 14 + days-before-issue-stale: -1 + days-before-pr-close: 7 + days-before-issue-close: -1 + stale-pr-label: 'stale' + operations-per-run: 100 + exempt-pr-labels: 'no_stale' diff --git a/.github/workflows/openssf-scorecard.yml b/.github/workflows/openssf-scorecard.yml index 136d7d7597..1009c4bf00 100644 --- a/.github/workflows/openssf-scorecard.yml +++ b/.github/workflows/openssf-scorecard.yml @@ -1,37 +1,37 @@ -name: OpenSSF Scorecard - -on: - schedule: - - cron: '0 0 * * 0' - workflow_dispatch: - -permissions: read-all - -jobs: - analysis: - name: Scorecard analysis - runs-on: ubuntu-latest - permissions: - security-events: write - id-token: write - steps: - - name: Checkout code - uses: actions/checkout@v5 - with: - persist-credentials: false - - name: Run analysis - uses: ossf/scorecard-action@v2.4.3 - with: - results_file: results.sarif - results_format: sarif - publish_results: true - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: SARIF file - path: results.sarif - retention-days: 5 - - name: Upload to code-scanning - uses: github/codeql-action/upload-sarif@v4 - with: - sarif_file: results.sarif +name: OpenSSF Scorecard + +on: + schedule: + - cron: '0 0 * * 0' + workflow_dispatch: + +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + security-events: write + id-token: write + steps: + - name: Checkout code + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Run analysis + uses: ossf/scorecard-action@v2.4.3 + with: + results_file: results.sarif + results_format: sarif + publish_results: true + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + - name: Upload to code-scanning + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: results.sarif diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml index 259446c26a..c65b49b546 100644 --- a/.github/workflows/pages.yml +++ b/.github/workflows/pages.yml @@ -1,138 +1,138 @@ -name: Pages - -on: - workflow_call: - workflow_dispatch: - -permissions: - id-token: write - contents: read - pages: write - -jobs: - build-doxygen-xml: - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - name: Install Doxygen - run: | - sudo apt-get update - sudo apt-get install --no-install-recommends -y doxygen - - name: Run Doxygen - run: doxygen Doxyfile - - name: Upload Doxygen documentation - uses: actions/upload-artifact@v4 - with: - name: doxygen-documentation-xml - path: xml - build-sphinx: - runs-on: ubuntu-24.04 - needs: - - build-doxygen-xml - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.13' - - name: Install Doxygen - run: | - sudo apt-get update - sudo apt-get install --no-install-recommends -y doxygen - - name: Install dependencies - run: | - python3 -m pip install -r docs/requirements.txt - - name: Download artifact - uses: actions/download-artifact@v5 - with: - name: doxygen-documentation-xml - path: xml - - name: Configure project - run: > - cmake -S . -B build -D USE_DOCS=ON - - name: Build i18n - run: | - cmake --build build -t docs_gettext -- --quiet - cmake --build build -t docs_update -- --quiet - - name: Build documentation - run: | - cmake --build build -t docs_html -- --quiet - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: sphinx-documentation - path: ./build/docs/_build/html - build-scoreboard: - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.13' - - name: Install dependencies - run: | - python3 -m pip install -r requirements.txt - python3 -m pip install -r scoreboard/requirements.txt - - name: Download performance data - uses: actions/download-artifact@v5 - with: - name: perf-stat - - name: Extract performance data - run: | - mkdir -p build/perf_stat_dir - # The uploaded artifact contains a nested perf-stat.zip inside. - # First unzip extracts the inner archive; the second extracts perf_stat_dir/*. - unzip -o perf-stat.zip -d . - if [ -f "perf-stat.zip" ]; then - mv -f perf-stat.zip perf-stat-inner.zip - unzip -o perf-stat-inner.zip -d . - fi - - name: CMake configure - run: | - cmake -S . -B build -DUSE_SCOREBOARD=ON - - name: CMake build - run: | - cmake --build build --parallel -- --quiet - - name: Upload artifact - uses: actions/upload-artifact@v4 - with: - name: scoreboard - path: ./build/scoreboard/html/ - deploy-pages: - if: github.ref == 'refs/heads/master' - needs: - - build-sphinx - - build-scoreboard - runs-on: ubuntu-24.04 - environment: - name: github-pages - concurrency: - group: github-pages-deploy - cancel-in-progress: false - steps: - - name: Download artifact - uses: actions/download-artifact@v5 - with: - name: sphinx-documentation - path: ./ - - name: Download artifact - uses: actions/download-artifact@v5 - with: - name: scoreboard - path: ./scoreboard/ - - name: Download coverage artifact - uses: actions/download-artifact@v5 - with: - name: cov-report - path: ./coverage/ - - name: Upload artifact - uses: actions/upload-pages-artifact@v4 - with: - path: ./ - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 +name: Pages + +on: + workflow_call: + workflow_dispatch: + +permissions: + id-token: write + contents: read + pages: write + +jobs: + build-doxygen-xml: + runs-on: ubuntu-24.04 + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Install Doxygen + run: | + sudo apt-get update + sudo apt-get install --no-install-recommends -y doxygen + - name: Run Doxygen + run: doxygen Doxyfile + - name: Upload Doxygen documentation + uses: actions/upload-artifact@v4 + with: + name: doxygen-documentation-xml + path: xml + build-sphinx: + runs-on: ubuntu-24.04 + needs: + - build-doxygen-xml + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.13' + - name: Install Doxygen + run: | + sudo apt-get update + sudo apt-get install --no-install-recommends -y doxygen + - name: Install dependencies + run: | + python3 -m pip install -r docs/requirements.txt + - name: Download artifact + uses: actions/download-artifact@v5 + with: + name: doxygen-documentation-xml + path: xml + - name: Configure project + run: > + cmake -S . -B build -D USE_DOCS=ON + - name: Build i18n + run: | + cmake --build build -t docs_gettext -- --quiet + cmake --build build -t docs_update -- --quiet + - name: Build documentation + run: | + cmake --build build -t docs_html -- --quiet + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: sphinx-documentation + path: ./build/docs/_build/html + build-scoreboard: + runs-on: ubuntu-24.04 + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.13' + - name: Install dependencies + run: | + python3 -m pip install -r requirements.txt + python3 -m pip install -r scoreboard/requirements.txt + - name: Download performance data + uses: actions/download-artifact@v5 + with: + name: perf-stat + - name: Extract performance data + run: | + mkdir -p build/perf_stat_dir + # The uploaded artifact contains a nested perf-stat.zip inside. + # First unzip extracts the inner archive; the second extracts perf_stat_dir/*. + unzip -o perf-stat.zip -d . + if [ -f "perf-stat.zip" ]; then + mv -f perf-stat.zip perf-stat-inner.zip + unzip -o perf-stat-inner.zip -d . + fi + - name: CMake configure + run: | + cmake -S . -B build -DUSE_SCOREBOARD=ON + - name: CMake build + run: | + cmake --build build --parallel -- --quiet + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: scoreboard + path: ./build/scoreboard/html/ + deploy-pages: + if: github.ref == 'refs/heads/master' + needs: + - build-sphinx + - build-scoreboard + runs-on: ubuntu-24.04 + environment: + name: github-pages + concurrency: + group: github-pages-deploy + cancel-in-progress: false + steps: + - name: Download artifact + uses: actions/download-artifact@v5 + with: + name: sphinx-documentation + path: ./ + - name: Download artifact + uses: actions/download-artifact@v5 + with: + name: scoreboard + path: ./scoreboard/ + - name: Download coverage artifact + uses: actions/download-artifact@v5 + with: + name: cov-report + path: ./coverage/ + - name: Upload artifact + uses: actions/upload-pages-artifact@v4 + with: + path: ./ + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/perf.yml b/.github/workflows/perf.yml index ba87df9b59..c1e79cad4d 100644 --- a/.github/workflows/perf.yml +++ b/.github/workflows/perf.yml @@ -1,76 +1,76 @@ -on: - workflow_call: - -jobs: - ubuntu-gcc-build-perf-stats: - runs-on: ubuntu-24.04 - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v5 - - name: Setup environment - run: | - python3 -m pip install -r requirements.txt --break-system-packages --ignore-installed - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-gcc-install-ubuntu-24.04 - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-gcc-install-ubuntu-24.04.tar.gz -C install - - name: Run perf tests - run: | - bash -e scripts/generate_perf_results.sh - env: - PPC_NUM_PROC: 2 - PPC_NUM_THREADS: 2 - OMPI_ALLOW_RUN_AS_ROOT: 1 - OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 - - name: Archive results - working-directory: build - run: zip -r ../perf-stat.zip perf_stat_dir - - name: Upload results - uses: actions/upload-artifact@v4 - with: - name: perf-stat - path: perf-stat.zip - macos-clang-build-perf-stats: - runs-on: macOS-latest - steps: - - uses: actions/checkout@v5 - - name: Install Xcode - uses: maxim-lobanov/setup-xcode@v1 - with: - xcode-version: 'latest-stable' - - name: Setup environment - run: | - brew update - brew install ninja mpich llvm libomp openssl - brew link libomp --overwrite --force - python3 -m pip install -r requirements.txt --break-system-packages - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: macos-clang-install - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf macos-clang-install.tar.gz -C install - - name: Run perf tests - run: | - bash -e scripts/generate_perf_results.sh - env: - PPC_NUM_PROC: 1 - PPC_NUM_THREADS: 2 - - name: Archive results - working-directory: build - run: zip -r perf-stat-macos.zip perf_stat_dir - - name: Upload results - uses: actions/upload-artifact@v4 - with: - name: perf-stat-macos - path: perf-stat-macos.zip +on: + workflow_call: + +jobs: + ubuntu-gcc-build-perf-stats: + runs-on: ubuntu-24.04 + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v5 + - name: Setup environment + run: | + python3 -m pip install -r requirements.txt --break-system-packages --ignore-installed + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-gcc-install-ubuntu-24.04 + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-gcc-install-ubuntu-24.04.tar.gz -C install + - name: Run perf tests + run: | + bash -e scripts/generate_perf_results.sh + env: + PPC_NUM_PROC: 2 + PPC_NUM_THREADS: 2 + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + - name: Archive results + working-directory: build + run: zip -r ../perf-stat.zip perf_stat_dir + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: perf-stat + path: perf-stat.zip + macos-clang-build-perf-stats: + runs-on: macOS-latest + steps: + - uses: actions/checkout@v5 + - name: Install Xcode + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: 'latest-stable' + - name: Setup environment + run: | + brew update + brew install ninja mpich llvm libomp openssl + brew link libomp --overwrite --force + python3 -m pip install -r requirements.txt --break-system-packages + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: macos-clang-install + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf macos-clang-install.tar.gz -C install + - name: Run perf tests + run: | + bash -e scripts/generate_perf_results.sh + env: + PPC_NUM_PROC: 1 + PPC_NUM_THREADS: 2 + - name: Archive results + working-directory: build + run: zip -r perf-stat-macos.zip perf_stat_dir + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: perf-stat-macos + path: perf-stat-macos.zip diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 6fa2ed6b30..c2a51167f0 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -1,31 +1,31 @@ -name: Pre-commit checks - -on: - push: - pull_request: - workflow_call: - -jobs: - pre-commit: - runs-on: ubuntu-24.04 - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - fetch-depth: 0 - - name: Setup environment - run: | - python3 -m pip install -r requirements.txt --break-system-packages --ignore-installed - - name: Configure git safe directory - run: | - git config --global --add safe.directory '*' - - name: Run pre-commit checks - run: | - FROM_REF="${{ github.base_ref || 'HEAD~1' }}" - git fetch origin $FROM_REF:$FROM_REF || true - pre-commit run --from-ref $FROM_REF --to-ref HEAD +name: Pre-commit checks + +on: + push: + pull_request: + workflow_call: + +jobs: + pre-commit: + runs-on: ubuntu-24.04 + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + fetch-depth: 0 + - name: Setup environment + run: | + python3 -m pip install -r requirements.txt --break-system-packages --ignore-installed + - name: Configure git safe directory + run: | + git config --global --add safe.directory '*' + - name: Run pre-commit checks + run: | + FROM_REF="${{ github.base_ref || 'HEAD~1' }}" + git fetch origin $FROM_REF:$FROM_REF || true + pre-commit run --from-ref $FROM_REF --to-ref HEAD diff --git a/.github/workflows/static-analysis-pr.yml b/.github/workflows/static-analysis-pr.yml index 4747944387..5aa86b4a13 100644 --- a/.github/workflows/static-analysis-pr.yml +++ b/.github/workflows/static-analysis-pr.yml @@ -1,141 +1,141 @@ -name: Static analysis - -on: - pull_request: - paths: - - '**/*.cpp' - - '**/*.hpp' - - '**/*.c' - - '**/*.h' - - '**/CMakeLists.txt' - - '**/*.cmake' - - '**/.clang-tidy' - - '.github/workflows/static-analysis-pr.yml' - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: >- - ${{ github.ref != 'refs/heads/master' && - github.event_name != 'merge_group' && - !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }} - -jobs: - clang-tidy: - runs-on: ubuntu-24.04 - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - fetch-depth: 0 - - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-clang - create-symlink: true - max-size: 1G - - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - env: - CC: clang-21 - CXX: clang++-21 - - - name: Build project - run: | - cmake --build build --parallel -- --quiet - env: - CC: clang-21 - CXX: clang++-21 - - - name: Show ccache stats - run: ccache --show-stats - - name: Run clang-tidy - uses: ./.github/actions/clang-tidy-native - id: review - with: - exclude: "3rdparty build" - clang_tidy_version: "21" - - if: steps.review.outputs.total_comments > 0 - run: | - echo "clang-tidy run has failed. See previous 'Run clang-tidy' stage logs" - exit 1 - clang-tidy-for-gcc-build: - needs: - - clang-tidy - runs-on: ubuntu-24.04 - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - fetch-depth: 0 - - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-gcc - create-symlink: true - max-size: 1G - - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - env: - CC: gcc-14 - CXX: g++-14 - - - name: Build project - run: | - cmake --build build --parallel -- --quiet - env: - CC: gcc-14 - CXX: g++-14 - - - name: Show ccache stats - run: ccache --show-stats - - name: Run clang-tidy - uses: ./.github/actions/clang-tidy-native - id: review - with: - exclude: "3rdparty build docs_venv .git .pytest_cache .ruff_cache xml" - clang_tidy_version: "21" - - if: steps.review.outputs.total_comments > 0 - run: | - echo "clang-tidy run has failed. See previous 'Run clang-tidy' stage logs" - exit 1 - nolint-check: - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v5 - - name: Search for linter suppression markers - run: | - export BASE_REF=${{ github.event.pull_request.base.ref }} - export CHANGED_FILES="$(git diff --name-only origin/$BASE_REF HEAD | grep '^tasks/')" - if [ -z "$CHANGED_FILES" ]; then - echo "No changed files in tasks directory." - exit 0 - fi - for file in $CHANGED_FILES; do - if grep -n "NOLINT" "$file"; then - echo "::error::Found 'NOLINT' in $file." - exit 1 - fi - if grep -En 'IWYU[[:space:]]+pragma' "$file"; then - echo "::error::Found 'IWYU pragma' in $file." - exit 1 - fi - done - echo "No linter suppression markers found in changed files." +name: Static analysis + +on: + pull_request: + paths: + - '**/*.cpp' + - '**/*.hpp' + - '**/*.c' + - '**/*.h' + - '**/CMakeLists.txt' + - '**/*.cmake' + - '**/.clang-tidy' + - '.github/workflows/static-analysis-pr.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: >- + ${{ github.ref != 'refs/heads/master' && + github.event_name != 'merge_group' && + !startsWith(github.ref, 'refs/heads/gh-readonly-queue') }} + +jobs: + clang-tidy: + runs-on: ubuntu-24.04 + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + fetch-depth: 0 + + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-clang + create-symlink: true + max-size: 1G + + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + env: + CC: clang-21 + CXX: clang++-21 + + - name: Build project + run: | + cmake --build build --parallel -- --quiet + env: + CC: clang-21 + CXX: clang++-21 + + - name: Show ccache stats + run: ccache --show-stats + - name: Run clang-tidy + uses: ./.github/actions/clang-tidy-native + id: review + with: + exclude: "3rdparty build" + clang_tidy_version: "21" + - if: steps.review.outputs.total_comments > 0 + run: | + echo "clang-tidy run has failed. See previous 'Run clang-tidy' stage logs" + exit 1 + clang-tidy-for-gcc-build: + needs: + - clang-tidy + runs-on: ubuntu-24.04 + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + fetch-depth: 0 + + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-gcc + create-symlink: true + max-size: 1G + + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + env: + CC: gcc-14 + CXX: g++-14 + + - name: Build project + run: | + cmake --build build --parallel -- --quiet + env: + CC: gcc-14 + CXX: g++-14 + + - name: Show ccache stats + run: ccache --show-stats + - name: Run clang-tidy + uses: ./.github/actions/clang-tidy-native + id: review + with: + exclude: "3rdparty build docs_venv .git .pytest_cache .ruff_cache xml" + clang_tidy_version: "21" + - if: steps.review.outputs.total_comments > 0 + run: | + echo "clang-tidy run has failed. See previous 'Run clang-tidy' stage logs" + exit 1 + nolint-check: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - name: Search for linter suppression markers + run: | + export BASE_REF=${{ github.event.pull_request.base.ref }} + export CHANGED_FILES="$(git diff --name-only origin/$BASE_REF HEAD | grep '^tasks/')" + if [ -z "$CHANGED_FILES" ]; then + echo "No changed files in tasks directory." + exit 0 + fi + for file in $CHANGED_FILES; do + if grep -n "NOLINT" "$file"; then + echo "::error::Found 'NOLINT' in $file." + exit 1 + fi + if grep -En 'IWYU[[:space:]]+pragma' "$file"; then + echo "::error::Found 'IWYU pragma' in $file." + exit 1 + fi + done + echo "No linter suppression markers found in changed files." diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 598e367190..5f084b4ffe 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -1,406 +1,406 @@ -on: - workflow_call: - -jobs: - gcc-build: - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04", "ubuntu-24.04-arm"] - build_type: [Release, Debug] - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-gcc - create-symlink: true - max-size: 1G - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_INSTALL_PREFIX=install - env: - CC: gcc-14 - CXX: g++-14 - - name: Archive revert list - uses: actions/upload-artifact@v4 - if: ${{ matrix.os == 'ubuntu-24.04' && matrix.build_type == 'Release' }} - with: - name: revert-list - path: build/revert-list.txt - - name: Build project - run: | - cmake --build build --parallel -- --quiet - env: - CC: gcc-14 - CXX: g++-14 - - name: Install project - run: | - cmake --build build --target install -- --quiet - - name: Archive installed package - uses: ./.github/actions/archive-install - with: - path: install - name: ${{ matrix.build_type == 'Debug' && - format('ubuntu-gcc-debug-install-{0}', matrix.os) || - format('ubuntu-gcc-install-{0}', matrix.os) }} - - name: Show ccache stats - run: ccache --show-stats - gcc-test: - needs: - - gcc-build - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04", "ubuntu-24.04-arm"] - steps: - - uses: actions/checkout@v5 - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-gcc-install-${{ matrix.os }} - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-gcc-install-${{ matrix.os }}.tar.gz -C install - - name: Run func tests (MPI) - run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 --additional-mpi-args="--oversubscribe" - env: - PPC_NUM_THREADS: 1 - OMPI_ALLOW_RUN_AS_ROOT: 1 - OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 - - name: Run func tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - gcc-test-extended: - needs: - - gcc-test - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04", "ubuntu-24.04-arm"] - steps: - - uses: actions/checkout@v5 - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-gcc-install-${{ matrix.os }} - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-gcc-install-${{ matrix.os }}.tar.gz -C install - - name: Run func tests (threads extended) - run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 - env: - PPC_NUM_PROC: 1 - clang-build: - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04", "ubuntu-24.04-arm"] - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-clang - create-symlink: true - max-size: 1G - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=install - env: - CC: clang-21 - CXX: clang++-21 - - name: Build project - run: | - cmake --build build --parallel -- --quiet - env: - CC: clang-21 - CXX: clang++-21 - - name: Install project - run: | - cmake --build build --target install -- --quiet - - name: Archive installed package - uses: ./.github/actions/archive-install - with: - path: install - name: ubuntu-clang-install-${{ matrix.os }} - - name: Show ccache stats - run: ccache --show-stats - clang-test: - needs: - - clang-build - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04", "ubuntu-24.04-arm"] - steps: - - uses: actions/checkout@v5 - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-clang-install-${{ matrix.os }} - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-clang-install-${{ matrix.os }}.tar.gz -C install - - name: Run func tests (MPI) - run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 --additional-mpi-args="--oversubscribe" - env: - PPC_NUM_THREADS: 1 - OMPI_ALLOW_RUN_AS_ROOT: 1 - OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 - - name: Run tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - clang-test-extended: - needs: - - clang-test - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04", "ubuntu-24.04-arm"] - steps: - - uses: actions/checkout@v5 - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-clang-install-${{ matrix.os }} - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-clang-install-${{ matrix.os }}.tar.gz -C install - - name: Run tests (threads extended) - run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 - env: - PPC_NUM_PROC: 1 - clang-sanitizer-build: - needs: - - clang-build - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04"] - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-clang - create-symlink: true - max-size: 1G - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=RelWithDebInfo - -D ENABLE_ADDRESS_SANITIZER=ON -D ENABLE_UB_SANITIZER=ON -D ENABLE_LEAK_SANITIZER=ON - -D CMAKE_INSTALL_PREFIX=install - env: - CC: clang-21 - CXX: clang++-21 - - name: Build project - run: | - cmake --build build --parallel -- --quiet - env: - CC: clang-21 - CXX: clang++-21 - - name: Install project - run: | - cmake --build build --target install -- --quiet - - name: Archive installed package - uses: ./.github/actions/archive-install - with: - path: install - name: ubuntu-clang-sanitizer-install-${{ matrix.os }} - - name: Show ccache stats - run: ccache --show-stats - clang-sanitizer-test: - needs: - - clang-sanitizer-build - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04"] - steps: - - uses: actions/checkout@v5 - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-clang-sanitizer-install-${{ matrix.os }} - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-clang-sanitizer-install-${{ matrix.os }}.tar.gz -C install - - name: Run tests (MPI) - run: scripts/run_tests.py --running-type="processes" --counts 2 --additional-mpi-args="--oversubscribe" - env: - PPC_NUM_THREADS: 2 - PPC_ASAN_RUN: 1 - ASAN_OPTIONS: abort_on_error=1 - UBSAN_OPTIONS: halt_on_error=1 - OMPI_ALLOW_RUN_AS_ROOT: 1 - OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 - - name: Run tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - PPC_ASAN_RUN: 1 - ASAN_OPTIONS: abort_on_error=1 - UBSAN_OPTIONS: halt_on_error=1 - clang-sanitizer-test-extended: - needs: - - clang-sanitizer-test - runs-on: ${{ matrix.os }} - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - strategy: - matrix: - os: ["ubuntu-24.04"] - steps: - - uses: actions/checkout@v5 - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: ubuntu-clang-sanitizer-install-${{ matrix.os }} - - name: Extract installed package - run: | - mkdir -p install - tar -xzvf ubuntu-clang-sanitizer-install-${{ matrix.os }}.tar.gz -C install - - name: Run tests (threads extended) - run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 - env: - PPC_NUM_PROC: 1 - PPC_ASAN_RUN: 1 - gcc-build-codecov: - needs: - - gcc-test-extended - - clang-test-extended - runs-on: ubuntu-24.04 - container: - image: ghcr.io/learning-process/ppc-ubuntu:1.1 - credentials: - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - name: ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ runner.os }}-gcc - create-symlink: true - max-size: 1G - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_BUILD_TYPE=RELEASE - -D CMAKE_VERBOSE_MAKEFILE=ON -D USE_COVERAGE=ON - - name: Build project - run: | - cmake --build build --parallel -- --quiet - - name: Run tests (MPI) - run: scripts/run_tests.py --running-type="processes" --additional-mpi-args="--oversubscribe" - env: - PPC_NUM_PROC: 2 - PPC_NUM_THREADS: 2 - OMPI_ALLOW_RUN_AS_ROOT: 1 - OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 - - name: Run tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - - name: Generate gcovr Coverage Data - run: | - mkdir cov-report - cd build - gcovr --gcov-executable `which gcov-14` \ - -r ../ \ - --exclude '.*3rdparty/.*' \ - --exclude '/usr/.*' \ - --exclude '.*tasks/.*/tests/.*' \ - --exclude '.*modules/.*/tests/.*' \ - --exclude '.*tasks/common/runners/.*' \ - --exclude '.*modules/runners/.*' \ - --exclude '.*modules/util/include/perf_test_util.hpp' \ - --exclude '.*modules/util/include/func_test_util.hpp' \ - --exclude '.*modules/util/src/func_test_util.cpp' \ - --xml --output ../coverage.xml \ - --html=../cov-report/index.html --html-details - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v5.5.1 - with: - files: coverage.xml - token: ${{ secrets.CODECOV_TOKEN }} - - name: Upload coverage report artifact - id: upload-cov - uses: actions/upload-artifact@v4 - with: - name: cov-report - path: 'cov-report' - - name: Comment coverage report link - # TODO: Support PRs from forks and handle cases with insufficient write permissions - continue-on-error: true - uses: peter-evans/create-or-update-comment@v5 - with: - token: ${{ secrets.GITHUB_TOKEN }} - issue-number: ${{ github.event.pull_request.number }} - body: | - Coverage report is available for download - [here](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) - - name: Show ccache stats - run: ccache --show-stats +on: + workflow_call: + +jobs: + gcc-build: + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04", "ubuntu-24.04-arm"] + build_type: [Release, Debug] + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-gcc + create-symlink: true + max-size: 1G + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_INSTALL_PREFIX=install + env: + CC: gcc-14 + CXX: g++-14 + - name: Archive revert list + uses: actions/upload-artifact@v4 + if: ${{ matrix.os == 'ubuntu-24.04' && matrix.build_type == 'Release' }} + with: + name: revert-list + path: build/revert-list.txt + - name: Build project + run: | + cmake --build build --parallel -- --quiet + env: + CC: gcc-14 + CXX: g++-14 + - name: Install project + run: | + cmake --build build --target install -- --quiet + - name: Archive installed package + uses: ./.github/actions/archive-install + with: + path: install + name: ${{ matrix.build_type == 'Debug' && + format('ubuntu-gcc-debug-install-{0}', matrix.os) || + format('ubuntu-gcc-install-{0}', matrix.os) }} + - name: Show ccache stats + run: ccache --show-stats + gcc-test: + needs: + - gcc-build + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04", "ubuntu-24.04-arm"] + steps: + - uses: actions/checkout@v5 + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-gcc-install-${{ matrix.os }} + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-gcc-install-${{ matrix.os }}.tar.gz -C install + - name: Run func tests (MPI) + run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 --additional-mpi-args="--oversubscribe" + env: + PPC_NUM_THREADS: 1 + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + - name: Run func tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + gcc-test-extended: + needs: + - gcc-test + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04", "ubuntu-24.04-arm"] + steps: + - uses: actions/checkout@v5 + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-gcc-install-${{ matrix.os }} + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-gcc-install-${{ matrix.os }}.tar.gz -C install + - name: Run func tests (threads extended) + run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 + env: + PPC_NUM_PROC: 1 + clang-build: + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04", "ubuntu-24.04-arm"] + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-clang + create-symlink: true + max-size: 1G + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=install + env: + CC: clang-21 + CXX: clang++-21 + - name: Build project + run: | + cmake --build build --parallel -- --quiet + env: + CC: clang-21 + CXX: clang++-21 + - name: Install project + run: | + cmake --build build --target install -- --quiet + - name: Archive installed package + uses: ./.github/actions/archive-install + with: + path: install + name: ubuntu-clang-install-${{ matrix.os }} + - name: Show ccache stats + run: ccache --show-stats + clang-test: + needs: + - clang-build + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04", "ubuntu-24.04-arm"] + steps: + - uses: actions/checkout@v5 + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-clang-install-${{ matrix.os }} + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-clang-install-${{ matrix.os }}.tar.gz -C install + - name: Run func tests (MPI) + run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 --additional-mpi-args="--oversubscribe" + env: + PPC_NUM_THREADS: 1 + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + - name: Run tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + clang-test-extended: + needs: + - clang-test + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04", "ubuntu-24.04-arm"] + steps: + - uses: actions/checkout@v5 + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-clang-install-${{ matrix.os }} + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-clang-install-${{ matrix.os }}.tar.gz -C install + - name: Run tests (threads extended) + run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 + env: + PPC_NUM_PROC: 1 + clang-sanitizer-build: + needs: + - clang-build + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04"] + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-clang + create-symlink: true + max-size: 1G + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=RelWithDebInfo + -D ENABLE_ADDRESS_SANITIZER=ON -D ENABLE_UB_SANITIZER=ON -D ENABLE_LEAK_SANITIZER=ON + -D CMAKE_INSTALL_PREFIX=install + env: + CC: clang-21 + CXX: clang++-21 + - name: Build project + run: | + cmake --build build --parallel -- --quiet + env: + CC: clang-21 + CXX: clang++-21 + - name: Install project + run: | + cmake --build build --target install -- --quiet + - name: Archive installed package + uses: ./.github/actions/archive-install + with: + path: install + name: ubuntu-clang-sanitizer-install-${{ matrix.os }} + - name: Show ccache stats + run: ccache --show-stats + clang-sanitizer-test: + needs: + - clang-sanitizer-build + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04"] + steps: + - uses: actions/checkout@v5 + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-clang-sanitizer-install-${{ matrix.os }} + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-clang-sanitizer-install-${{ matrix.os }}.tar.gz -C install + - name: Run tests (MPI) + run: scripts/run_tests.py --running-type="processes" --counts 2 --additional-mpi-args="--oversubscribe" + env: + PPC_NUM_THREADS: 2 + PPC_ASAN_RUN: 1 + ASAN_OPTIONS: abort_on_error=1 + UBSAN_OPTIONS: halt_on_error=1 + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + - name: Run tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + PPC_ASAN_RUN: 1 + ASAN_OPTIONS: abort_on_error=1 + UBSAN_OPTIONS: halt_on_error=1 + clang-sanitizer-test-extended: + needs: + - clang-sanitizer-test + runs-on: ${{ matrix.os }} + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + os: ["ubuntu-24.04"] + steps: + - uses: actions/checkout@v5 + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: ubuntu-clang-sanitizer-install-${{ matrix.os }} + - name: Extract installed package + run: | + mkdir -p install + tar -xzvf ubuntu-clang-sanitizer-install-${{ matrix.os }}.tar.gz -C install + - name: Run tests (threads extended) + run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 + env: + PPC_NUM_PROC: 1 + PPC_ASAN_RUN: 1 + gcc-build-codecov: + needs: + - gcc-test-extended + - clang-test-extended + runs-on: ubuntu-24.04 + container: + image: ghcr.io/learning-process/ppc-ubuntu:1.1 + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ runner.os }}-gcc + create-symlink: true + max-size: 1G + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_BUILD_TYPE=RELEASE + -D CMAKE_VERBOSE_MAKEFILE=ON -D USE_COVERAGE=ON + - name: Build project + run: | + cmake --build build --parallel -- --quiet + - name: Run tests (MPI) + run: scripts/run_tests.py --running-type="processes" --additional-mpi-args="--oversubscribe" + env: + PPC_NUM_PROC: 2 + PPC_NUM_THREADS: 2 + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + - name: Run tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + - name: Generate gcovr Coverage Data + run: | + mkdir cov-report + cd build + gcovr --gcov-executable `which gcov-14` \ + -r ../ \ + --exclude '.*3rdparty/.*' \ + --exclude '/usr/.*' \ + --exclude '.*tasks/.*/tests/.*' \ + --exclude '.*modules/.*/tests/.*' \ + --exclude '.*tasks/common/runners/.*' \ + --exclude '.*modules/runners/.*' \ + --exclude '.*modules/util/include/perf_test_util.hpp' \ + --exclude '.*modules/util/include/func_test_util.hpp' \ + --exclude '.*modules/util/src/func_test_util.cpp' \ + --xml --output ../coverage.xml \ + --html=../cov-report/index.html --html-details + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v5.5.1 + with: + files: coverage.xml + token: ${{ secrets.CODECOV_TOKEN }} + - name: Upload coverage report artifact + id: upload-cov + uses: actions/upload-artifact@v4 + with: + name: cov-report + path: 'cov-report' + - name: Comment coverage report link + # TODO: Support PRs from forks and handle cases with insufficient write permissions + continue-on-error: true + uses: peter-evans/create-or-update-comment@v5 + with: + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ github.event.pull_request.number }} + body: | + Coverage report is available for download + [here](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) + - name: Show ccache stats + run: ccache --show-stats diff --git a/.github/workflows/update-pre-commit.yml b/.github/workflows/update-pre-commit.yml index f350b7d33a..f6f21f5115 100644 --- a/.github/workflows/update-pre-commit.yml +++ b/.github/workflows/update-pre-commit.yml @@ -1,24 +1,24 @@ -name: Update pre-commit hooks - -on: - schedule: - - cron: '0 0 * * 5' - workflow_dispatch: - -permissions: - contents: write - pull-requests: write - actions: write - checks: write - repository-projects: write - -jobs: - update-pre-commit: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - name: Update pre-commit hooks - uses: ./.github/actions/update-precommit - with: - token: ${{ secrets.GITHUB_TOKEN }} +name: Update pre-commit hooks + +on: + schedule: + - cron: '0 0 * * 5' + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + actions: write + checks: write + repository-projects: write + +jobs: + update-pre-commit: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v5 + - name: Update pre-commit hooks + uses: ./.github/actions/update-precommit + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index eb067fb0eb..93ec35ba7f 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,167 +1,167 @@ -on: - workflow_call: - -jobs: - msvc-build: - runs-on: windows-latest - strategy: - matrix: - build_type: [Release, Debug] - defaults: - run: - shell: bash - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - uses: ./.github/actions/setup-windows-toolchain - - name: CMake configure - shell: bash - run: > - cmake -S . -B build -G Ninja -D CMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=cl - -D CMAKE_C_COMPILER_LAUNCHER=ccache -D CMAKE_CXX_COMPILER_LAUNCHER=ccache - -D CMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_INSTALL_PREFIX=install - - name: Build project - shell: bash - run: | - cmake --build build --config ${{ matrix.build_type }} --parallel -- --quiet - - name: Install project - run: | - cmake --build build --target install -- --quiet - - name: Archive installed package - uses: ./.github/actions/archive-install - with: - path: install - name: ${{ matrix.build_type == 'Debug' && 'windows-msvc-debug-install' || 'windows-msvc-install' }} - - name: Show ccache stats - shell: bash - run: ccache --show-stats - msvc-test: - needs: - - msvc-build - runs-on: windows-latest - defaults: - run: - shell: bash - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/setup-windows-toolchain - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: windows-msvc-install - - name: Extract installed package - run: Expand-Archive -Path .\windows-msvc-install.zip -DestinationPath . -Force - shell: pwsh - - name: Run func tests (MPI) - run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 - env: - PPC_NUM_THREADS: 1 - - name: Run tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - msvc-test-extended: - needs: - - msvc-test - runs-on: windows-latest - defaults: - run: - shell: bash - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/setup-windows-toolchain - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: windows-msvc-install - - name: Extract installed package - run: Expand-Archive -Path .\windows-msvc-install.zip -DestinationPath . -Force - shell: pwsh - - name: Run tests (threads extended) - run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 - env: - PPC_NUM_PROC: 1 - clang-build: - runs-on: windows-latest - defaults: - run: - shell: bash - steps: - - uses: actions/checkout@v5 - with: - submodules: recursive - - uses: ./.github/actions/setup-windows-toolchain - - name: Setup LLVM - uses: KyleMayes/install-llvm-action@v2 - with: - version: "20.1.4" - - name: CMake configure - run: > - cmake -S . -B build -G Ninja - -D CMAKE_C_COMPILER=clang-cl -D CMAKE_CXX_COMPILER=clang-cl - -D CMAKE_C_COMPILER_LAUNCHER=ccache -D CMAKE_CXX_COMPILER_LAUNCHER=ccache - -D CMAKE_BUILD_TYPE=Release -D CMAKE_INSTALL_PREFIX=install - -D CMAKE_PREFIX_PATH="C:/Program Files/LLVM" - env: - CC: clang-cl - CXX: clang-cl - - name: Build project - run: | - cmake --build build --config Release --parallel - env: - CC: clang-cl - CXX: clang-cl - - name: Install project - run: | - cmake --install build - - name: Archive installed package - uses: ./.github/actions/archive-install - with: - path: install - name: windows-clang-install - - name: Show ccache stats - shell: bash - run: ccache --show-stats - clang-test: - needs: - - clang-build - runs-on: windows-latest - defaults: - run: - shell: bash - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/setup-windows-toolchain - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: windows-clang-install - - name: Extract installed package - run: Expand-Archive -Path .\windows-clang-install.zip -DestinationPath . -Force - shell: pwsh - - name: Run tests (threads) - run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 - env: - PPC_NUM_PROC: 1 - clang-test-extended: - needs: - - clang-test - runs-on: windows-latest - defaults: - run: - shell: bash - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/setup-windows-toolchain - - name: Download installed package - uses: actions/download-artifact@v5 - with: - name: windows-clang-install - - name: Extract installed package - run: Expand-Archive -Path .\windows-clang-install.zip -DestinationPath . -Force - shell: pwsh - - name: Run tests (threads extended) - run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 - env: - PPC_NUM_PROC: 1 +on: + workflow_call: + +jobs: + msvc-build: + runs-on: windows-latest + strategy: + matrix: + build_type: [Release, Debug] + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - uses: ./.github/actions/setup-windows-toolchain + - name: CMake configure + shell: bash + run: > + cmake -S . -B build -G Ninja -D CMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=cl + -D CMAKE_C_COMPILER_LAUNCHER=ccache -D CMAKE_CXX_COMPILER_LAUNCHER=ccache + -D CMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_INSTALL_PREFIX=install + - name: Build project + shell: bash + run: | + cmake --build build --config ${{ matrix.build_type }} --parallel -- --quiet + - name: Install project + run: | + cmake --build build --target install -- --quiet + - name: Archive installed package + uses: ./.github/actions/archive-install + with: + path: install + name: ${{ matrix.build_type == 'Debug' && 'windows-msvc-debug-install' || 'windows-msvc-install' }} + - name: Show ccache stats + shell: bash + run: ccache --show-stats + msvc-test: + needs: + - msvc-build + runs-on: windows-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + - uses: ./.github/actions/setup-windows-toolchain + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: windows-msvc-install + - name: Extract installed package + run: Expand-Archive -Path .\windows-msvc-install.zip -DestinationPath . -Force + shell: pwsh + - name: Run func tests (MPI) + run: scripts/run_tests.py --running-type="processes" --counts 1 2 3 4 + env: + PPC_NUM_THREADS: 1 + - name: Run tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + msvc-test-extended: + needs: + - msvc-test + runs-on: windows-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + - uses: ./.github/actions/setup-windows-toolchain + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: windows-msvc-install + - name: Extract installed package + run: Expand-Archive -Path .\windows-msvc-install.zip -DestinationPath . -Force + shell: pwsh + - name: Run tests (threads extended) + run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 + env: + PPC_NUM_PROC: 1 + clang-build: + runs-on: windows-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + with: + submodules: recursive + - uses: ./.github/actions/setup-windows-toolchain + - name: Setup LLVM + uses: KyleMayes/install-llvm-action@v2 + with: + version: "20.1.4" + - name: CMake configure + run: > + cmake -S . -B build -G Ninja + -D CMAKE_C_COMPILER=clang-cl -D CMAKE_CXX_COMPILER=clang-cl + -D CMAKE_C_COMPILER_LAUNCHER=ccache -D CMAKE_CXX_COMPILER_LAUNCHER=ccache + -D CMAKE_BUILD_TYPE=Release -D CMAKE_INSTALL_PREFIX=install + -D CMAKE_PREFIX_PATH="C:/Program Files/LLVM" + env: + CC: clang-cl + CXX: clang-cl + - name: Build project + run: | + cmake --build build --config Release --parallel + env: + CC: clang-cl + CXX: clang-cl + - name: Install project + run: | + cmake --install build + - name: Archive installed package + uses: ./.github/actions/archive-install + with: + path: install + name: windows-clang-install + - name: Show ccache stats + shell: bash + run: ccache --show-stats + clang-test: + needs: + - clang-build + runs-on: windows-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + - uses: ./.github/actions/setup-windows-toolchain + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: windows-clang-install + - name: Extract installed package + run: Expand-Archive -Path .\windows-clang-install.zip -DestinationPath . -Force + shell: pwsh + - name: Run tests (threads) + run: scripts/run_tests.py --running-type="threads" --counts 1 2 3 4 + env: + PPC_NUM_PROC: 1 + clang-test-extended: + needs: + - clang-test + runs-on: windows-latest + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v5 + - uses: ./.github/actions/setup-windows-toolchain + - name: Download installed package + uses: actions/download-artifact@v5 + with: + name: windows-clang-install + - name: Extract installed package + run: Expand-Archive -Path .\windows-clang-install.zip -DestinationPath . -Force + shell: pwsh + - name: Run tests (threads extended) + run: scripts/run_tests.py --running-type="threads" --counts 5 7 11 13 + env: + PPC_NUM_PROC: 1 diff --git a/.gitignore b/.gitignore index 5de0996c8b..6bee984eaf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,19 @@ -/build* -docs/build* -/xml -docs/xml -out -mpich -cmake-build-* -.idea/ -.vs/ -.vscode/ -scripts/variants*.csv -scripts/variants*.xlsx -*venv* -sln/ -CMakeSettings.json -.DS_Store -.cache -install -*.pyc +/build* +docs/build* +/xml +docs/xml +out +mpich +cmake-build-* +.idea/ +.vs/ +.vscode/ +scripts/variants*.csv +scripts/variants*.xlsx +*venv* +sln/ +CMakeSettings.json +.DS_Store +.cache +install +*.pyc diff --git a/.gitmodules b/.gitmodules index 6ef00628f4..98b312ef00 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,15 +1,15 @@ -[submodule "3rdparty/googletest"] - path = 3rdparty/googletest - url = https://github.com/google/googletest -[submodule "3rdparty/onetbb"] - path = 3rdparty/onetbb - url = https://github.com/uxlfoundation/oneTBB -[submodule "3rdparty/stb"] - path = 3rdparty/stb - url = https://github.com/nothings/stb -[submodule "3rdparty/json"] - path = 3rdparty/json - url = https://github.com/nlohmann/json -[submodule "3rdparty/libenvpp"] - path = 3rdparty/libenvpp - url = https://github.com/ph3at/libenvpp +[submodule "3rdparty/googletest"] + path = 3rdparty/googletest + url = https://github.com/google/googletest +[submodule "3rdparty/onetbb"] + path = 3rdparty/onetbb + url = https://github.com/uxlfoundation/oneTBB +[submodule "3rdparty/stb"] + path = 3rdparty/stb + url = https://github.com/nothings/stb +[submodule "3rdparty/json"] + path = 3rdparty/json + url = https://github.com/nlohmann/json +[submodule "3rdparty/libenvpp"] + path = 3rdparty/libenvpp + url = https://github.com/ph3at/libenvpp diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b70f3d31c9..eeb9844077 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,52 +1,52 @@ -# Pre-commit hooks for automated formatting and linting -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks - -repos: - # C++ formatting with clang-format - - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v21.1.2 - hooks: - - id: clang-format - files: \.(cpp|hpp|c|h)$ - exclude: ^(3rdparty/|build.*/|install/) - args: [--style=file] - - # CMake formatting - - repo: https://github.com/cheshirekow/cmake-format-precommit - rev: v0.6.13 - hooks: - - id: cmake-format - files: \.(cmake|CMakeLists\.txt)$ - exclude: ^(3rdparty/|build.*/|install/) - - # Ruff Python linter - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.14.0 - hooks: - - id: ruff - args: [--fix] - - id: ruff-format - - # Flake8 Python style/lint checker (supplemental to Ruff) - - repo: https://github.com/pycqa/flake8 - rev: 7.3.0 - hooks: - - id: flake8 - - # YAML linting - - repo: https://github.com/adrienverge/yamllint.git - rev: v1.37.1 - hooks: - - id: yamllint - - # Shell script linting with shellcheck - - repo: https://github.com/koalaman/shellcheck-precommit - rev: v0.11.0 - hooks: - - id: shellcheck - files: \.sh$ - -# Configuration -default_stages: [pre-commit] -fail_fast: false +# Pre-commit hooks for automated formatting and linting +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks + +repos: + # C++ formatting with clang-format + - repo: https://github.com/pre-commit/mirrors-clang-format + rev: v21.1.2 + hooks: + - id: clang-format + files: \.(cpp|hpp|c|h)$ + exclude: ^(3rdparty/|build.*/|install/) + args: [--style=file] + + # CMake formatting + - repo: https://github.com/cheshirekow/cmake-format-precommit + rev: v0.6.13 + hooks: + - id: cmake-format + files: \.(cmake|CMakeLists\.txt)$ + exclude: ^(3rdparty/|build.*/|install/) + + # Ruff Python linter + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.14.0 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format + + # Flake8 Python style/lint checker (supplemental to Ruff) + - repo: https://github.com/pycqa/flake8 + rev: 7.3.0 + hooks: + - id: flake8 + + # YAML linting + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.37.1 + hooks: + - id: yamllint + + # Shell script linting with shellcheck + - repo: https://github.com/koalaman/shellcheck-precommit + rev: v0.11.0 + hooks: + - id: shellcheck + files: \.sh$ + +# Configuration +default_stages: [pre-commit] +fail_fast: false diff --git a/.yamllint b/.yamllint index 268a9dac6d..30bf003e80 100644 --- a/.yamllint +++ b/.yamllint @@ -1,9 +1,9 @@ -extends: default -ignore: | - .git -rules: - line-length: - max: 120 - level: error - truthy: disable - document-start: disable +extends: default +ignore: | + .git +rules: + line-length: + max: 120 + level: error + truthy: disable + document-start: disable diff --git a/3rdparty/stb_image_wrapper.cpp b/3rdparty/stb_image_wrapper.cpp index 6cba77fbfc..1eb267d2ce 100644 --- a/3rdparty/stb_image_wrapper.cpp +++ b/3rdparty/stb_image_wrapper.cpp @@ -1,3 +1,3 @@ -#define STB_IMAGE_IMPLEMENTATION - -#include "stb/stb_image.h" +#define STB_IMAGE_IMPLEMENTATION + +#include "stb/stb_image.h" diff --git a/CMakeLists.txt b/CMakeLists.txt index 37584c9c5f..b1cb0b1c87 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,52 +1,52 @@ -cmake_minimum_required( VERSION 3.25 ) - -if(DEFINED CMAKE_OSX_SYSROOT AND NOT EXISTS "${CMAKE_OSX_SYSROOT}") - unset(CMAKE_OSX_SYSROOT CACHE) -endif() - -message( STATUS "Parallel Programming Course (PPC)" ) -project(parallel_programming_course) - -############################ Scoreboard ############################# - -message( STATUS "PPC step: Setup scoreboard generator" ) -include(cmake/scoreboard.cmake) -add_subdirectory(scoreboard) - -########################### Documentation ########################### - -message( STATUS "PPC step: Setup documentation generation" ) -include(cmake/sphinx.cmake) -add_subdirectory(docs) - -if( USE_SCOREBOARD OR USE_DOCS ) - return() -endif() - -############################ Configures ############################# - -message( STATUS "PPC step: First configures" ) -include(cmake/configure.cmake) -include(cmake/modes.cmake) -include(cmake/sanitizers.cmake) -foreach(dep json libenvpp stb) - include(cmake/${dep}.cmake) -endforeach() - -################# Parallel programming technologies ################# - -message( STATUS "PPC step: Setup parallel programming technologies" ) -foreach(dep mpi openmp onetbb) - include(cmake/${dep}.cmake) -endforeach() - -######################### External projects ######################### - -message( STATUS "PPC step: Setup external projects" ) -include(cmake/gtest.cmake) - -############################## Modules ############################## - -message( STATUS "PPC step: Setup modules" ) -add_subdirectory(modules) -add_subdirectory(tasks) +cmake_minimum_required( VERSION 3.25 ) + +if(DEFINED CMAKE_OSX_SYSROOT AND NOT EXISTS "${CMAKE_OSX_SYSROOT}") + unset(CMAKE_OSX_SYSROOT CACHE) +endif() + +message( STATUS "Parallel Programming Course (PPC)" ) +project(parallel_programming_course) + +############################ Scoreboard ############################# + +message( STATUS "PPC step: Setup scoreboard generator" ) +include(cmake/scoreboard.cmake) +add_subdirectory(scoreboard) + +########################### Documentation ########################### + +message( STATUS "PPC step: Setup documentation generation" ) +include(cmake/sphinx.cmake) +add_subdirectory(docs) + +if( USE_SCOREBOARD OR USE_DOCS ) + return() +endif() + +############################ Configures ############################# + +message( STATUS "PPC step: First configures" ) +include(cmake/configure.cmake) +include(cmake/modes.cmake) +include(cmake/sanitizers.cmake) +foreach(dep json libenvpp stb) + include(cmake/${dep}.cmake) +endforeach() + +################# Parallel programming technologies ################# + +message( STATUS "PPC step: Setup parallel programming technologies" ) +foreach(dep mpi openmp onetbb) + include(cmake/${dep}.cmake) +endforeach() + +######################### External projects ######################### + +message( STATUS "PPC step: Setup external projects" ) +include(cmake/gtest.cmake) + +############################## Modules ############################## + +message( STATUS "PPC step: Setup modules" ) +add_subdirectory(modules) +add_subdirectory(tasks) diff --git a/Doxyfile b/Doxyfile index bca4229401..0586b80f0c 100644 --- a/Doxyfile +++ b/Doxyfile @@ -1,27 +1,27 @@ -# Project identity -PROJECT_NAME = "Parallel Programming Course" -PROJECT_BRIEF = "Parallel Programming Course" - -# Input -INPUT = modules/task/include \ - modules/util/include \ - modules/util/src \ - modules/performance/include \ - modules/runners/include \ - modules/runners/src -FILE_PATTERNS = *.h *.c *.hpp *.cpp -RECURSIVE = YES - -# Build -GENERATE_HTML = NO -GENERATE_LATEX = NO -GENERATE_XML = YES -XML_OUTPUT = xml - -# Docs -EXTRACT_ALL = YES -EXTRACT_PRIVATE = YES -EXTRACT_TEMPLATE_PARAMS = YES -ENABLE_PREPROCESSING = YES -MACRO_EXPANSION = YES +# Project identity +PROJECT_NAME = "Parallel Programming Course" +PROJECT_BRIEF = "Parallel Programming Course" + +# Input +INPUT = modules/task/include \ + modules/util/include \ + modules/util/src \ + modules/performance/include \ + modules/runners/include \ + modules/runners/src +FILE_PATTERNS = *.h *.c *.hpp *.cpp +RECURSIVE = YES + +# Build +GENERATE_HTML = NO +GENERATE_LATEX = NO +GENERATE_XML = YES +XML_OUTPUT = xml + +# Docs +EXTRACT_ALL = YES +EXTRACT_PRIVATE = YES +EXTRACT_TEMPLATE_PARAMS = YES +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = NO \ No newline at end of file diff --git a/LICENSE b/LICENSE index a6546558d2..17062522e9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,29 +1,29 @@ -BSD 3-Clause License - -Copyright (c) 2019-2025, Learning Process developers -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +BSD 3-Clause License + +Copyright (c) 2019-2025, Learning Process developers +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 3d19b6ca76..54100979b5 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,27 @@ -[![Build application](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/main.yml/badge.svg?branch=master)](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/main.yml) -[![Pages](https://github.com/learning-process/parallel_programming_course/actions/workflows/pages.yml/badge.svg?branch=master)](https://github.com/learning-process/parallel_programming_course/actions/workflows/pages.yml) -[![CodeQL](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/codeql.yml/badge.svg?branch=master)](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/codeql.yml) -[![codecov](https://codecov.io/gh/learning-process/ppc-2025-processes-engineers/graph/badge.svg?token=qCOtqeFyIz)](https://codecov.io/gh/learning-process/ppc-2025-processes-engineers) -[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/learning-process/ppc-2025-processes-engineers/badge)](https://scorecard.dev/viewer/?uri=github.com/learning-process/ppc-2025-processes-engineers) - -# Parallel Programming Course - -Welcome to the Parallel Programming Course! - -For more detailed documentation and resources, please visit documentation pages: [en](https://learning-process.github.io/parallel_programming_course/en/), [ru](https://learning-process.github.io/parallel_programming_course/ru/). - -Course scoreboard is available [here](https://learning-process.github.io/ppc-2025-processes-engineers/scoreboard/). - -Coverage report is available [here](https://learning-process.github.io/ppc-2025-processes-engineers/coverage/). - -### Parallel programming technologies: - The following parallel programming technologies are considered in practice: - * `Message Passing Interface (MPI)` - * `OpenMP (Open Multi-Processing)` - * `oneAPI Threading Building Blocks (oneTBB)` - * `Multithreading in C++ (std::thread)` - -### Rules for submissions: -1. You are not supposed to trigger CI jobs by frequent updates of your pull request. First you should test you work locally with all the scripts (code style). - * Respect others time and don't slow down the job queue -2. Carefully check if the program can hang. +[![Build application](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/main.yml/badge.svg?branch=master)](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/main.yml) +[![Pages](https://github.com/learning-process/parallel_programming_course/actions/workflows/pages.yml/badge.svg?branch=master)](https://github.com/learning-process/parallel_programming_course/actions/workflows/pages.yml) +[![CodeQL](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/codeql.yml/badge.svg?branch=master)](https://github.com/learning-process/ppc-2025-processes-engineers/actions/workflows/codeql.yml) +[![codecov](https://codecov.io/gh/learning-process/ppc-2025-processes-engineers/graph/badge.svg?token=qCOtqeFyIz)](https://codecov.io/gh/learning-process/ppc-2025-processes-engineers) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/learning-process/ppc-2025-processes-engineers/badge)](https://scorecard.dev/viewer/?uri=github.com/learning-process/ppc-2025-processes-engineers) + +# Parallel Programming Course + +Welcome to the Parallel Programming Course! + +For more detailed documentation and resources, please visit documentation pages: [en](https://learning-process.github.io/parallel_programming_course/en/), [ru](https://learning-process.github.io/parallel_programming_course/ru/). + +Course scoreboard is available [here](https://learning-process.github.io/ppc-2025-processes-engineers/scoreboard/). + +Coverage report is available [here](https://learning-process.github.io/ppc-2025-processes-engineers/coverage/). + +### Parallel programming technologies: + The following parallel programming technologies are considered in practice: + * `Message Passing Interface (MPI)` + * `OpenMP (Open Multi-Processing)` + * `oneAPI Threading Building Blocks (oneTBB)` + * `Multithreading in C++ (std::thread)` + +### Rules for submissions: +1. You are not supposed to trigger CI jobs by frequent updates of your pull request. First you should test you work locally with all the scripts (code style). + * Respect others time and don't slow down the job queue +2. Carefully check if the program can hang. diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 57f6334a53..bd615d395c 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -1,74 +1,74 @@ -if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE "Release") -endif(NOT CMAKE_BUILD_TYPE) - -if(MSVC) - add_compile_options("/utf-8") -endif() - -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG "${CMAKE_BINARY_DIR}/arch") -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG "${CMAKE_BINARY_DIR}/lib") -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG "${CMAKE_BINARY_DIR}/bin") - -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE "${CMAKE_BINARY_DIR}/arch") -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE "${CMAKE_BINARY_DIR}/lib") -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE "${CMAKE_BINARY_DIR}/bin") - -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/arch") -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") - -set(CMAKE_CXX_STANDARD 23) - -set(CMAKE_CXX_STANDARD_REQUIRED ON) - -set(CMAKE_COMPILE_WARNING_AS_ERROR ON) - -option(USE_COVERAGE "Enable coverage instrumentation" OFF) - -if(USE_COVERAGE) - set(CMAKE_INSTALL_RPATH "${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib") -else() - set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") -endif() -set(CMAKE_BUILD_RPATH "${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib") - -set(CMAKE_BUILD_WITH_INSTALL_RPATH OFF) -set(CMAKE_INSTALL_RPATH_USE_LINK_PATH ON) -set(CMAKE_SKIP_BUILD_RPATH OFF) - -if(UNIX) - add_compile_options(-Wall -Wextra -Wsign-compare) - - if(NOT APPLE) - add_compile_options( - -Wpedantic - -Wpointer-arith - -Wwrite-strings - -Wswitch-enum - -Wnull-dereference - -Wformat=2 - -Wmissing-declarations - -Wno-c11-extensions - -Wno-cast-function-type) - endif(NOT APPLE) - add_compile_options($<$:-Wold-style-definition>) - add_compile_options($<$:-Wmissing-prototypes>) - - if("${ENABLE_ADDRESS_SANITIZER}" - OR "${ENABLE_UB_SANITIZER}" - OR "${ENABLE_LEAK_SANITIZER}") - add_compile_options(-Wno-cast-align -Wno-cast-function-type) - endif() - - if(USE_COVERAGE) - add_compile_options(--coverage) - add_link_options(--coverage) - endif(USE_COVERAGE) -endif() - -if(MSVC) - add_compile_options(/W4 /wd4267 /wd4244) -endif(MSVC) - -find_package(Threads REQUIRED) +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") +endif(NOT CMAKE_BUILD_TYPE) + +if(MSVC) + add_compile_options("/utf-8") +endif() + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG "${CMAKE_BINARY_DIR}/arch") +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG "${CMAKE_BINARY_DIR}/lib") +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG "${CMAKE_BINARY_DIR}/bin") + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE "${CMAKE_BINARY_DIR}/arch") +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE "${CMAKE_BINARY_DIR}/lib") +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE "${CMAKE_BINARY_DIR}/bin") + +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/arch") +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") + +set(CMAKE_CXX_STANDARD 23) + +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +set(CMAKE_COMPILE_WARNING_AS_ERROR ON) + +option(USE_COVERAGE "Enable coverage instrumentation" OFF) + +if(USE_COVERAGE) + set(CMAKE_INSTALL_RPATH "${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib") +else() + set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") +endif() +set(CMAKE_BUILD_RPATH "${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib") + +set(CMAKE_BUILD_WITH_INSTALL_RPATH OFF) +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH ON) +set(CMAKE_SKIP_BUILD_RPATH OFF) + +if(UNIX) + add_compile_options(-Wall -Wextra -Wsign-compare) + + if(NOT APPLE) + add_compile_options( + -Wpedantic + -Wpointer-arith + -Wwrite-strings + -Wswitch-enum + -Wnull-dereference + -Wformat=2 + -Wmissing-declarations + -Wno-c11-extensions + -Wno-cast-function-type) + endif(NOT APPLE) + add_compile_options($<$:-Wold-style-definition>) + add_compile_options($<$:-Wmissing-prototypes>) + + if("${ENABLE_ADDRESS_SANITIZER}" + OR "${ENABLE_UB_SANITIZER}" + OR "${ENABLE_LEAK_SANITIZER}") + add_compile_options(-Wno-cast-align -Wno-cast-function-type) + endif() + + if(USE_COVERAGE) + add_compile_options(--coverage) + add_link_options(--coverage) + endif(USE_COVERAGE) +endif() + +if(MSVC) + add_compile_options(/W4 /wd4267 /wd4244) +endif(MSVC) + +find_package(Threads REQUIRED) diff --git a/cmake/functions.cmake b/cmake/functions.cmake index e7672113b5..bcd86b5ed5 100644 --- a/cmake/functions.cmake +++ b/cmake/functions.cmake @@ -1,97 +1,97 @@ -# ——— Helper function to add & register tests ————————————————————————— -function(ppc_add_test test_name test_src USE_FLAG) - if(${USE_FLAG}) - add_executable(${test_name} "${PROJECT_SOURCE_DIR}/${test_src}") - enable_testing() - add_test(NAME ${test_name} COMMAND ${test_name}) - install(TARGETS ${test_name} RUNTIME DESTINATION bin) - endif() -endfunction() - -# Function to configure tests -function(add_tests test_flag exec_target subdir) - if(${test_flag}) - # Gather all source files under tests/ - file(GLOB_RECURSE src_files "${TEST_DIR}/${subdir}/*.cpp" - "${TEST_DIR}/${subdir}/*.cxx" "${TEST_DIR}/${subdir}/*.cc") - target_sources(${exec_target} PRIVATE ${src_files}) - list(APPEND TEST_EXECUTABLES ${exec_target}) - set(TEST_EXECUTABLES - "${TEST_EXECUTABLES}" - PARENT_SCOPE) - endif() -endfunction() - -# ============================================================================ -# Function: setup_implementation - NAME: implementation sub‐directory name -# (e.g. “mpi”) - PROJ_NAME: project base name - BASE_DIR: root source -# directory - TESTS: list of test executables to link against -# ============================================================================ -function(setup_implementation) - # parse named args: NAME, PROJ_NAME, BASE_DIR; multi‐value: TESTS - cmake_parse_arguments(SETUP "" # no plain options - "NAME;PROJ_NAME;BASE_DIR" "TESTS" ${ARGN}) - - # skip if impl dir doesn't exist - set(IMP_DIR "${SETUP_BASE_DIR}/${SETUP_NAME}") - if(NOT EXISTS "${IMP_DIR}") - return() - endif() - message(STATUS " -- ${SETUP_NAME}") - - # collect sources - file(GLOB_RECURSE CPP_SOURCES "${IMP_DIR}/src/*.cpp") - file(GLOB_RECURSE ALL_SOURCES "${IMP_DIR}/include/*.h" - "${IMP_DIR}/include/*.hpp" "${IMP_DIR}/src/*.cpp") - - # create library (STATIC if .cpp exist, otherwise INTERFACE) - set(LIB_NAME "${SETUP_PROJ_NAME}_${SETUP_NAME}") - if(CPP_SOURCES) - add_library(${LIB_NAME} STATIC ${ALL_SOURCES}) - else() - add_library(${LIB_NAME} INTERFACE ${ALL_SOURCES}) - endif() - - # link core module - target_link_libraries(${LIB_NAME} PUBLIC core_module_lib) - - # and link into each enabled test executable - foreach(test_exec ${SETUP_TESTS}) - target_link_libraries(${test_exec} PUBLIC ${LIB_NAME}) - endforeach() -endfunction() - -# Function to configure each subproject -function(ppc_configure_subproject SUBDIR) - # Module-specific compile-time definitions - add_compile_definitions( - PPC_SETTINGS_${SUBDIR}="${CMAKE_CURRENT_SOURCE_DIR}/${SUBDIR}/settings.json" - PPC_ID_${SUBDIR}="${SUBDIR}") - - # Switch project context to the subproject - project(${SUBDIR}) - - # Directory with tests and list of test executables (populated by - # setup_implementation) - set(TEST_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${SUBDIR}/tests") - set(TEST_EXECUTABLES "") - - # Register functional and performance test runners - add_tests(USE_FUNC_TESTS ${FUNC_TEST_EXEC} functional) - add_tests(USE_PERF_TESTS ${PERF_TEST_EXEC} performance) - - message(STATUS "${SUBDIR}") - - # List of implementations to configure - foreach(IMPL IN LISTS PPC_IMPLEMENTATIONS) - setup_implementation( - NAME - ${IMPL} - PROJ_NAME - ${SUBDIR} - TESTS - "${TEST_EXECUTABLES}" - BASE_DIR - "${CMAKE_CURRENT_SOURCE_DIR}/${SUBDIR}") - endforeach() -endfunction() +# ——— Helper function to add & register tests ————————————————————————— +function(ppc_add_test test_name test_src USE_FLAG) + if(${USE_FLAG}) + add_executable(${test_name} "${PROJECT_SOURCE_DIR}/${test_src}") + enable_testing() + add_test(NAME ${test_name} COMMAND ${test_name}) + install(TARGETS ${test_name} RUNTIME DESTINATION bin) + endif() +endfunction() + +# Function to configure tests +function(add_tests test_flag exec_target subdir) + if(${test_flag}) + # Gather all source files under tests/ + file(GLOB_RECURSE src_files "${TEST_DIR}/${subdir}/*.cpp" + "${TEST_DIR}/${subdir}/*.cxx" "${TEST_DIR}/${subdir}/*.cc") + target_sources(${exec_target} PRIVATE ${src_files}) + list(APPEND TEST_EXECUTABLES ${exec_target}) + set(TEST_EXECUTABLES + "${TEST_EXECUTABLES}" + PARENT_SCOPE) + endif() +endfunction() + +# ============================================================================ +# Function: setup_implementation - NAME: implementation sub‐directory name +# (e.g. “mpi”) - PROJ_NAME: project base name - BASE_DIR: root source +# directory - TESTS: list of test executables to link against +# ============================================================================ +function(setup_implementation) + # parse named args: NAME, PROJ_NAME, BASE_DIR; multi‐value: TESTS + cmake_parse_arguments(SETUP "" # no plain options + "NAME;PROJ_NAME;BASE_DIR" "TESTS" ${ARGN}) + + # skip if impl dir doesn't exist + set(IMP_DIR "${SETUP_BASE_DIR}/${SETUP_NAME}") + if(NOT EXISTS "${IMP_DIR}") + return() + endif() + message(STATUS " -- ${SETUP_NAME}") + + # collect sources + file(GLOB_RECURSE CPP_SOURCES "${IMP_DIR}/src/*.cpp") + file(GLOB_RECURSE ALL_SOURCES "${IMP_DIR}/include/*.h" + "${IMP_DIR}/include/*.hpp" "${IMP_DIR}/src/*.cpp") + + # create library (STATIC if .cpp exist, otherwise INTERFACE) + set(LIB_NAME "${SETUP_PROJ_NAME}_${SETUP_NAME}") + if(CPP_SOURCES) + add_library(${LIB_NAME} STATIC ${ALL_SOURCES}) + else() + add_library(${LIB_NAME} INTERFACE ${ALL_SOURCES}) + endif() + + # link core module + target_link_libraries(${LIB_NAME} PUBLIC core_module_lib) + + # and link into each enabled test executable + foreach(test_exec ${SETUP_TESTS}) + target_link_libraries(${test_exec} PUBLIC ${LIB_NAME}) + endforeach() +endfunction() + +# Function to configure each subproject +function(ppc_configure_subproject SUBDIR) + # Module-specific compile-time definitions + add_compile_definitions( + PPC_SETTINGS_${SUBDIR}="${CMAKE_CURRENT_SOURCE_DIR}/${SUBDIR}/settings.json" + PPC_ID_${SUBDIR}="${SUBDIR}") + + # Switch project context to the subproject + project(${SUBDIR}) + + # Directory with tests and list of test executables (populated by + # setup_implementation) + set(TEST_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${SUBDIR}/tests") + set(TEST_EXECUTABLES "") + + # Register functional and performance test runners + add_tests(USE_FUNC_TESTS ${FUNC_TEST_EXEC} functional) + add_tests(USE_PERF_TESTS ${PERF_TEST_EXEC} performance) + + message(STATUS "${SUBDIR}") + + # List of implementations to configure + foreach(IMPL IN LISTS PPC_IMPLEMENTATIONS) + setup_implementation( + NAME + ${IMPL} + PROJ_NAME + ${SUBDIR} + TESTS + "${TEST_EXECUTABLES}" + BASE_DIR + "${CMAKE_CURRENT_SOURCE_DIR}/${SUBDIR}") + endforeach() +endfunction() diff --git a/cmake/gtest.cmake b/cmake/gtest.cmake index 7f0f7d441c..cf12890741 100644 --- a/cmake/gtest.cmake +++ b/cmake/gtest.cmake @@ -1,39 +1,39 @@ -include(ExternalProject) - -ExternalProject_Add( - ppc_googletest - SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/googletest" - PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest" - BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" - INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/install" - CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} - -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} - -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} - -DCMAKE_CXX_STANDARD_REQUIRED=${CMAKE_CXX_STANDARD_REQUIRED} - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - -DCMAKE_C_FLAGS=-w - -DCMAKE_CXX_FLAGS=-w - -DBUILD_GMOCK=OFF - $<$:-Dgtest_force_shared_crt=ON> - BUILD_COMMAND - "${CMAKE_COMMAND}" --build - "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --config $ - --parallel - INSTALL_COMMAND - "${CMAKE_COMMAND}" --install - "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --config $ - --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/install") - -function(ppc_link_gtest exec_func_lib) - # Add external project include directories - target_include_directories( - ${exec_func_lib} - PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/googletest/googletest/include) - - add_dependencies(${exec_func_lib} ppc_googletest) - target_link_directories(${exec_func_lib} PUBLIC - "${CMAKE_BINARY_DIR}/ppc_googletest/install/lib") - target_link_libraries(${exec_func_lib} PUBLIC gtest gtest_main) -endfunction() +include(ExternalProject) + +ExternalProject_Add( + ppc_googletest + SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/googletest" + PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest" + BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" + INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/install" + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} + -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} + -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} + -DCMAKE_CXX_STANDARD_REQUIRED=${CMAKE_CXX_STANDARD_REQUIRED} + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCMAKE_C_FLAGS=-w + -DCMAKE_CXX_FLAGS=-w + -DBUILD_GMOCK=OFF + $<$:-Dgtest_force_shared_crt=ON> + BUILD_COMMAND + "${CMAKE_COMMAND}" --build + "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --config $ + --parallel + INSTALL_COMMAND + "${CMAKE_COMMAND}" --install + "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --config $ + --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/install") + +function(ppc_link_gtest exec_func_lib) + # Add external project include directories + target_include_directories( + ${exec_func_lib} + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/googletest/googletest/include) + + add_dependencies(${exec_func_lib} ppc_googletest) + target_link_directories(${exec_func_lib} PUBLIC + "${CMAKE_BINARY_DIR}/ppc_googletest/install/lib") + target_link_libraries(${exec_func_lib} PUBLIC gtest gtest_main) +endfunction() diff --git a/cmake/json.cmake b/cmake/json.cmake index 30706b1563..a104063121 100644 --- a/cmake/json.cmake +++ b/cmake/json.cmake @@ -1,32 +1,32 @@ -include(ExternalProject) - -ExternalProject_Add( - ppc_json - SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/json" - PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_json" - BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" - INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/install" - CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} - -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} - -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} - -DCMAKE_CXX_STANDARD_REQUIRED=ON - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - -DJSON_BuildTests=OFF - BUILD_COMMAND - "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" - --config $ --parallel - INSTALL_COMMAND - "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" - --config $ --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/install") - -function(ppc_link_json exec_func_lib) - # Add external project include directories - target_include_directories(${exec_func_lib} - PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/json/include) - - add_dependencies(${exec_func_lib} ppc_json) - target_include_directories( - ${exec_func_lib} INTERFACE "${CMAKE_BINARY_DIR}/ppc_json/install/include") -endfunction() +include(ExternalProject) + +ExternalProject_Add( + ppc_json + SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/json" + PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_json" + BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" + INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/install" + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} + -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} + -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} + -DCMAKE_CXX_STANDARD_REQUIRED=ON + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DJSON_BuildTests=OFF + BUILD_COMMAND + "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" + --config $ --parallel + INSTALL_COMMAND + "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" + --config $ --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/install") + +function(ppc_link_json exec_func_lib) + # Add external project include directories + target_include_directories(${exec_func_lib} + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/json/include) + + add_dependencies(${exec_func_lib} ppc_json) + target_include_directories( + ${exec_func_lib} INTERFACE "${CMAKE_BINARY_DIR}/ppc_json/install/include") +endfunction() diff --git a/cmake/libenvpp.cmake b/cmake/libenvpp.cmake index c7217a8475..e82aaff268 100644 --- a/cmake/libenvpp.cmake +++ b/cmake/libenvpp.cmake @@ -1,53 +1,53 @@ -include(ExternalProject) -ExternalProject_Add( - ppc_libenvpp - SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/libenvpp" - PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp" - BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/build" - INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/install" - CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} - -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} - -DCMAKE_CXX_STANDARD_REQUIRED=ON - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - -DLIBENVPP_TESTS=OFF - -DLIBENVPP_EXAMPLES=OFF - BUILD_COMMAND - "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/build" - --config $ --parallel - INSTALL_COMMAND - "${CMAKE_COMMAND}" --install - "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/build" --config $ - --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/install") - -string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_lower) -if(cmake_build_type_lower STREQUAL "debug") - set(PPC_FMT_LIB_NAME fmtd) -else() - set(PPC_FMT_LIB_NAME fmt) -endif() - -if(WIN32) - set(PPC_ENVPP_LIB_NAME libenvpp) -else() - set(PPC_ENVPP_LIB_NAME envpp) -endif() - -function(ppc_link_envpp exec_func_lib) - # Add external project include directories - target_include_directories( - ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/include) - target_include_directories( - ${exec_func_lib} SYSTEM - PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/external/fmt/include) - - add_dependencies(${exec_func_lib} ppc_libenvpp) - target_link_directories(${exec_func_lib} PUBLIC - "${CMAKE_BINARY_DIR}/ppc_libenvpp/install/lib") - target_link_directories(${exec_func_lib} PUBLIC - "${CMAKE_BINARY_DIR}/ppc_libenvpp/build") - target_link_libraries(${exec_func_lib} PUBLIC ${PPC_ENVPP_LIB_NAME}) - target_link_libraries(${exec_func_lib} PUBLIC $<$:fmtd> - $<$>:fmt>) -endfunction() +include(ExternalProject) +ExternalProject_Add( + ppc_libenvpp + SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/libenvpp" + PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp" + BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/build" + INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/install" + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} + -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} + -DCMAKE_CXX_STANDARD_REQUIRED=ON + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DLIBENVPP_TESTS=OFF + -DLIBENVPP_EXAMPLES=OFF + BUILD_COMMAND + "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/build" + --config $ --parallel + INSTALL_COMMAND + "${CMAKE_COMMAND}" --install + "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/build" --config $ + --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_libenvpp/install") + +string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_lower) +if(cmake_build_type_lower STREQUAL "debug") + set(PPC_FMT_LIB_NAME fmtd) +else() + set(PPC_FMT_LIB_NAME fmt) +endif() + +if(WIN32) + set(PPC_ENVPP_LIB_NAME libenvpp) +else() + set(PPC_ENVPP_LIB_NAME envpp) +endif() + +function(ppc_link_envpp exec_func_lib) + # Add external project include directories + target_include_directories( + ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/include) + target_include_directories( + ${exec_func_lib} SYSTEM + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/external/fmt/include) + + add_dependencies(${exec_func_lib} ppc_libenvpp) + target_link_directories(${exec_func_lib} PUBLIC + "${CMAKE_BINARY_DIR}/ppc_libenvpp/install/lib") + target_link_directories(${exec_func_lib} PUBLIC + "${CMAKE_BINARY_DIR}/ppc_libenvpp/build") + target_link_libraries(${exec_func_lib} PUBLIC ${PPC_ENVPP_LIB_NAME}) + target_link_libraries(${exec_func_lib} PUBLIC $<$:fmtd> + $<$>:fmt>) +endfunction() diff --git a/cmake/modes.cmake b/cmake/modes.cmake index 946b171fa7..2c6c7526ac 100644 --- a/cmake/modes.cmake +++ b/cmake/modes.cmake @@ -1,27 +1,27 @@ -add_compile_definitions(PPC_PATH_TO_PROJECT="${CMAKE_CURRENT_SOURCE_DIR}") - -macro(SUBDIRLIST result curdir) - file( - GLOB children - RELATIVE ${curdir} - ${curdir}/*) - set(dirlist "") - foreach(child ${children}) - if(IS_DIRECTORY ${curdir}/${child}) - list(APPEND dirlist ${child}) - endif() - endforeach() - set(${result} ${dirlist}) -endmacro() - -option(USE_FUNC_TESTS "Enable functional tests" ON) -if(USE_FUNC_TESTS) - message(STATUS "Enable functional tests") - add_compile_definitions(USE_FUNC_TESTS) -endif(USE_FUNC_TESTS) - -option(USE_PERF_TESTS "Enable performance tests" ON) -if(USE_PERF_TESTS) - message(STATUS "Enable performance tests") - add_compile_definitions(USE_PERF_TESTS) -endif(USE_PERF_TESTS) +add_compile_definitions(PPC_PATH_TO_PROJECT="${CMAKE_CURRENT_SOURCE_DIR}") + +macro(SUBDIRLIST result curdir) + file( + GLOB children + RELATIVE ${curdir} + ${curdir}/*) + set(dirlist "") + foreach(child ${children}) + if(IS_DIRECTORY ${curdir}/${child}) + list(APPEND dirlist ${child}) + endif() + endforeach() + set(${result} ${dirlist}) +endmacro() + +option(USE_FUNC_TESTS "Enable functional tests" ON) +if(USE_FUNC_TESTS) + message(STATUS "Enable functional tests") + add_compile_definitions(USE_FUNC_TESTS) +endif(USE_FUNC_TESTS) + +option(USE_PERF_TESTS "Enable performance tests" ON) +if(USE_PERF_TESTS) + message(STATUS "Enable performance tests") + add_compile_definitions(USE_PERF_TESTS) +endif(USE_PERF_TESTS) diff --git a/cmake/mpi.cmake b/cmake/mpi.cmake index 4922f23b26..697bb28a4c 100644 --- a/cmake/mpi.cmake +++ b/cmake/mpi.cmake @@ -1,8 +1,8 @@ -find_package(MPI REQUIRED COMPONENTS CXX) -if(NOT MPI_FOUND) - message(FATAL_ERROR "MPI NOT FOUND") -endif() - -function(ppc_link_mpi exec_func_lib) - target_link_libraries(${exec_func_lib} PUBLIC MPI::MPI_CXX) -endfunction() +find_package(MPI REQUIRED COMPONENTS CXX) +if(NOT MPI_FOUND) + message(FATAL_ERROR "MPI NOT FOUND") +endif() + +function(ppc_link_mpi exec_func_lib) + target_link_libraries(${exec_func_lib} PUBLIC MPI::MPI_CXX) +endfunction() diff --git a/cmake/onetbb.cmake b/cmake/onetbb.cmake index 08ef716c90..ac93e93e3b 100644 --- a/cmake/onetbb.cmake +++ b/cmake/onetbb.cmake @@ -1,68 +1,68 @@ -include(ExternalProject) - -option(ENABLE_SYSTEM_TBB "Use system TBB instead of bundled version" OFF) - -if(NOT ENABLE_SYSTEM_TBB) - if(WIN32) - set(ppc_onetbb_TEST_COMMAND - "${CMAKE_COMMAND}" -E copy_directory - "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install/bin" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}") - else() - set(ppc_onetbb_TEST_COMMAND "") - endif() - - ExternalProject_Add( - ppc_onetbb - SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/onetbb" - PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb" - BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" - INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install" - CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} - -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} - -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} - -DCMAKE_CXX_STANDARD_REQUIRED=ON - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - -DTBB_STRICT=OFF - -DTBB_TEST=OFF - BUILD_COMMAND - "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" - --config $ --parallel - INSTALL_COMMAND - "${CMAKE_COMMAND}" --install - "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" --config $ --prefix - "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install" - TEST_COMMAND ${ppc_onetbb_TEST_COMMAND}) - - install(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install/" - DESTINATION "${CMAKE_INSTALL_PREFIX}") -else() - find_package(TBB REQUIRED) -endif() - -string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_lower) -if(cmake_build_type_lower STREQUAL "debug") - set(PPC_TBB_LIB_NAME tbb_debug) -else() - set(PPC_TBB_LIB_NAME tbb) -endif() - -function(ppc_link_tbb exec_func_lib) - if(NOT ENABLE_SYSTEM_TBB) - # Add external project include directories - target_include_directories( - ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/onetbb/include) - - add_dependencies(${exec_func_lib} ppc_onetbb) - target_link_directories(${exec_func_lib} PUBLIC - ${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib) - if(NOT MSVC) - target_link_libraries(${exec_func_lib} PUBLIC ${PPC_TBB_LIB_NAME}) - endif() - else() - # Use system TBB - target_link_libraries(${exec_func_lib} PUBLIC TBB::tbb) - endif() -endfunction() +include(ExternalProject) + +option(ENABLE_SYSTEM_TBB "Use system TBB instead of bundled version" OFF) + +if(NOT ENABLE_SYSTEM_TBB) + if(WIN32) + set(ppc_onetbb_TEST_COMMAND + "${CMAKE_COMMAND}" -E copy_directory + "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install/bin" + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}") + else() + set(ppc_onetbb_TEST_COMMAND "") + endif() + + ExternalProject_Add( + ppc_onetbb + SOURCE_DIR "${CMAKE_SOURCE_DIR}/3rdparty/onetbb" + PREFIX "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb" + BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" + INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install" + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} + -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} + -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} + -DCMAKE_CXX_STANDARD_REQUIRED=ON + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DTBB_STRICT=OFF + -DTBB_TEST=OFF + BUILD_COMMAND + "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" + --config $ --parallel + INSTALL_COMMAND + "${CMAKE_COMMAND}" --install + "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" --config $ --prefix + "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install" + TEST_COMMAND ${ppc_onetbb_TEST_COMMAND}) + + install(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install/" + DESTINATION "${CMAKE_INSTALL_PREFIX}") +else() + find_package(TBB REQUIRED) +endif() + +string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_lower) +if(cmake_build_type_lower STREQUAL "debug") + set(PPC_TBB_LIB_NAME tbb_debug) +else() + set(PPC_TBB_LIB_NAME tbb) +endif() + +function(ppc_link_tbb exec_func_lib) + if(NOT ENABLE_SYSTEM_TBB) + # Add external project include directories + target_include_directories( + ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/onetbb/include) + + add_dependencies(${exec_func_lib} ppc_onetbb) + target_link_directories(${exec_func_lib} PUBLIC + ${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib) + if(NOT MSVC) + target_link_libraries(${exec_func_lib} PUBLIC ${PPC_TBB_LIB_NAME}) + endif() + else() + # Use system TBB + target_link_libraries(${exec_func_lib} PUBLIC TBB::tbb) + endif() +endfunction() diff --git a/cmake/openmp.cmake b/cmake/openmp.cmake index a254a88fff..eb4109809e 100644 --- a/cmake/openmp.cmake +++ b/cmake/openmp.cmake @@ -1,25 +1,25 @@ -if(MSVC AND NOT (CMAKE_CXX_COMPILER_ID MATCHES "Clang")) - set(OpenMP_C_FLAGS - "/openmp:llvm" - CACHE STRING "OpenMP C flags" FORCE) - set(OpenMP_CXX_FLAGS - "/openmp:llvm" - CACHE STRING "OpenMP CXX flags" FORCE) - # Pretend Clang-style OpenMP 5.0 support so CMake reports a newer version - set(OpenMP_C_SPEC_DATE - 201811 - CACHE STRING "OpenMP C specification date" FORCE) - set(OpenMP_CXX_SPEC_DATE - 201811 - CACHE STRING "OpenMP CXX specification date" FORCE) -endif() - -function(ppc_link_threads exec_func_lib) - target_link_libraries(${exec_func_lib} PUBLIC Threads::Threads) -endfunction() - -function(ppc_link_openmp exec_func_lib) - find_package(OpenMP REQUIRED) - target_link_libraries(${exec_func_lib} PUBLIC ${OpenMP_libomp_LIBRARY} - OpenMP::OpenMP_CXX) -endfunction() +if(MSVC AND NOT (CMAKE_CXX_COMPILER_ID MATCHES "Clang")) + set(OpenMP_C_FLAGS + "/openmp:llvm" + CACHE STRING "OpenMP C flags" FORCE) + set(OpenMP_CXX_FLAGS + "/openmp:llvm" + CACHE STRING "OpenMP CXX flags" FORCE) + # Pretend Clang-style OpenMP 5.0 support so CMake reports a newer version + set(OpenMP_C_SPEC_DATE + 201811 + CACHE STRING "OpenMP C specification date" FORCE) + set(OpenMP_CXX_SPEC_DATE + 201811 + CACHE STRING "OpenMP CXX specification date" FORCE) +endif() + +function(ppc_link_threads exec_func_lib) + target_link_libraries(${exec_func_lib} PUBLIC Threads::Threads) +endfunction() + +function(ppc_link_openmp exec_func_lib) + find_package(OpenMP REQUIRED) + target_link_libraries(${exec_func_lib} PUBLIC ${OpenMP_libomp_LIBRARY} + OpenMP::OpenMP_CXX) +endfunction() diff --git a/cmake/sanitizers.cmake b/cmake/sanitizers.cmake index b57359ba5a..a0759759a0 100644 --- a/cmake/sanitizers.cmake +++ b/cmake/sanitizers.cmake @@ -1,23 +1,23 @@ -option(ENABLE_ADDRESS_SANITIZER OFF) -option(ENABLE_UB_SANITIZER OFF) -option(ENABLE_LEAK_SANITIZER OFF) -if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - if(ENABLE_ADDRESS_SANITIZER) - add_compile_options(-fsanitize=address) - add_link_options(-fsanitize=address) - message(STATUS "Enabled address sanitizer") - endif() - if(ENABLE_UB_SANITIZER) - add_compile_options(-fsanitize=undefined - -fno-sanitize=signed-integer-overflow) - add_link_options(-fsanitize=undefined -fno-sanitize=signed-integer-overflow) - message(STATUS "Enabled UB sanitizer") - endif() - if(ENABLE_LEAK_SANITIZER) - add_compile_options(-fsanitize=leak) - add_link_options(-fsanitize=leak) - message(STATUS "Enabled leak sanitizer") - endif() -else() - message(WARNING "Sanitizers are supported on gcc and clang compilers only!") -endif() +option(ENABLE_ADDRESS_SANITIZER OFF) +option(ENABLE_UB_SANITIZER OFF) +option(ENABLE_LEAK_SANITIZER OFF) +if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") + if(ENABLE_ADDRESS_SANITIZER) + add_compile_options(-fsanitize=address) + add_link_options(-fsanitize=address) + message(STATUS "Enabled address sanitizer") + endif() + if(ENABLE_UB_SANITIZER) + add_compile_options(-fsanitize=undefined + -fno-sanitize=signed-integer-overflow) + add_link_options(-fsanitize=undefined -fno-sanitize=signed-integer-overflow) + message(STATUS "Enabled UB sanitizer") + endif() + if(ENABLE_LEAK_SANITIZER) + add_compile_options(-fsanitize=leak) + add_link_options(-fsanitize=leak) + message(STATUS "Enabled leak sanitizer") + endif() +else() + message(WARNING "Sanitizers are supported on gcc and clang compilers only!") +endif() diff --git a/cmake/scoreboard.cmake b/cmake/scoreboard.cmake index a9f3dc3d86..277fb41ea9 100644 --- a/cmake/scoreboard.cmake +++ b/cmake/scoreboard.cmake @@ -1,4 +1,4 @@ -option(USE_SCOREBOARD OFF) -if(USE_SCOREBOARD) - find_package(Python REQUIRED COMPONENTS Interpreter) -endif(USE_SCOREBOARD) +option(USE_SCOREBOARD OFF) +if(USE_SCOREBOARD) + find_package(Python REQUIRED COMPONENTS Interpreter) +endif(USE_SCOREBOARD) diff --git a/cmake/sphinx.cmake b/cmake/sphinx.cmake index a94b11a423..e3abd77712 100644 --- a/cmake/sphinx.cmake +++ b/cmake/sphinx.cmake @@ -1,6 +1,6 @@ -option(USE_DOCS OFF) -if(USE_DOCS) - set(SPHINXBUILD "sphinx-build") - set(SPHINXINTL "sphinx-intl") - set(SPHINXOPTS "-W" "--keep-going" "-n") -endif(USE_DOCS) +option(USE_DOCS OFF) +if(USE_DOCS) + set(SPHINXBUILD "sphinx-build") + set(SPHINXINTL "sphinx-intl") + set(SPHINXOPTS "-W" "--keep-going" "-n") +endif(USE_DOCS) diff --git a/cmake/stb.cmake b/cmake/stb.cmake index c737f2f1ee..1d85104650 100644 --- a/cmake/stb.cmake +++ b/cmake/stb.cmake @@ -1,6 +1,6 @@ -function(ppc_link_stb exec_func_lib) - add_library(stb_image STATIC - ${CMAKE_SOURCE_DIR}/3rdparty/stb_image_wrapper.cpp) - target_include_directories(stb_image PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/stb) - target_link_libraries(${exec_func_lib} PUBLIC stb_image) -endfunction() +function(ppc_link_stb exec_func_lib) + add_library(stb_image STATIC + ${CMAKE_SOURCE_DIR}/3rdparty/stb_image_wrapper.cpp) + target_include_directories(stb_image PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/stb) + target_link_libraries(${exec_func_lib} PUBLIC stb_image) +endfunction() diff --git a/docker/README.md b/docker/README.md index c7efccac31..574ce030d4 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,13 +1,13 @@ -# Docker - -## Prerequisites - -- Docker with buildx support - -## Build - -Build multi-architecture Ubuntu 24.04 development image: - -```bash -docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/learning-process/ppc-ubuntu:latest -f ./ubuntu.Dockerfile . -``` +# Docker + +## Prerequisites + +- Docker with buildx support + +## Build + +Build multi-architecture Ubuntu 24.04 development image: + +```bash +docker buildx build --platform linux/amd64,linux/arm64 -t ghcr.io/learning-process/ppc-ubuntu:latest -f ./ubuntu.Dockerfile . +``` diff --git a/docker/ubuntu.Dockerfile b/docker/ubuntu.Dockerfile index 225a822851..fc2df6935d 100644 --- a/docker/ubuntu.Dockerfile +++ b/docker/ubuntu.Dockerfile @@ -1,29 +1,29 @@ -FROM ubuntu:24.04 - -ENV DEBIAN_FRONTEND=noninteractive - -RUN set -e \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential \ - git \ - ca-certificates curl wget gnupg lsb-release software-properties-common \ - python3 python3-pip \ - ninja-build cmake make \ - ccache \ - valgrind \ - libmpich-dev mpich \ - openmpi-bin openmpi-common libopenmpi-dev \ - libomp-dev \ - gcc-14 g++-14 \ - gcovr zip \ - && wget -q https://apt.llvm.org/llvm.sh \ - && chmod +x llvm.sh \ - && ./llvm.sh 21 all \ - && rm llvm.sh \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -ENV CC=gcc-14 CXX=g++-14 - -CMD ["bash"] +FROM ubuntu:24.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN set -e \ + && apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential \ + git \ + ca-certificates curl wget gnupg lsb-release software-properties-common \ + python3 python3-pip \ + ninja-build cmake make \ + ccache \ + valgrind \ + libmpich-dev mpich \ + openmpi-bin openmpi-common libopenmpi-dev \ + libomp-dev \ + gcc-14 g++-14 \ + gcovr zip \ + && wget -q https://apt.llvm.org/llvm.sh \ + && chmod +x llvm.sh \ + && ./llvm.sh 21 all \ + && rm llvm.sh \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +ENV CC=gcc-14 CXX=g++-14 + +CMD ["bash"] diff --git a/docs/.gitignore b/docs/.gitignore index 0003061435..a6e52fedbd 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,2 +1,2 @@ -_build -*.mo +_build +*.mo diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 7270262774..dc70210a23 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -1,39 +1,39 @@ -if(NOT USE_DOCS) - return() -endif() - -set(SOURCEDIR "${CMAKE_CURRENT_SOURCE_DIR}") -set(BUILDDIR "${CMAKE_CURRENT_BINARY_DIR}/_build/html") -set(GETTEXTDIR "${CMAKE_CURRENT_BINARY_DIR}/_build/gettext") - -add_custom_target( - docs_help - COMMAND ${SPHINXBUILD} -M help "${SOURCEDIR}" "${BUILDDIR}/en" ${SPHINXOPTS} - WORKING_DIRECTORY "${SOURCEDIR}" - COMMENT "Displaying Sphinx Makefile help") - -# 'gettext' target: generate gettext catalogs. -add_custom_target( - docs_gettext - COMMAND ${SPHINXBUILD} -b gettext "${SOURCEDIR}" "${GETTEXTDIR}" ${SPHINXOPTS} - WORKING_DIRECTORY "${SOURCEDIR}" - COMMENT "Generating gettext output") - -# 'update' target: update translations for the specified languages. -add_custom_target( - docs_update - COMMAND ${SPHINXINTL} update -p "${GETTEXTDIR}" -l en -l ru - WORKING_DIRECTORY "${SOURCEDIR}" - COMMENT "Updating Sphinx translations") - -# 'html' target: build the HTML documentation in both English and Russian. -add_custom_target( - docs_html - # Build English documentation. - COMMAND ${SPHINXBUILD} -b html -D language=en "${SOURCEDIR}" "${BUILDDIR}/en" - ${SPHINXOPTS} - # Build Russian documentation. - COMMAND ${SPHINXBUILD} -b html -D language=ru "${SOURCEDIR}" "${BUILDDIR}/ru" - ${SPHINXOPTS} - WORKING_DIRECTORY "${SOURCEDIR}" - COMMENT "Building HTML documentation for English and Russian") +if(NOT USE_DOCS) + return() +endif() + +set(SOURCEDIR "${CMAKE_CURRENT_SOURCE_DIR}") +set(BUILDDIR "${CMAKE_CURRENT_BINARY_DIR}/_build/html") +set(GETTEXTDIR "${CMAKE_CURRENT_BINARY_DIR}/_build/gettext") + +add_custom_target( + docs_help + COMMAND ${SPHINXBUILD} -M help "${SOURCEDIR}" "${BUILDDIR}/en" ${SPHINXOPTS} + WORKING_DIRECTORY "${SOURCEDIR}" + COMMENT "Displaying Sphinx Makefile help") + +# 'gettext' target: generate gettext catalogs. +add_custom_target( + docs_gettext + COMMAND ${SPHINXBUILD} -b gettext "${SOURCEDIR}" "${GETTEXTDIR}" ${SPHINXOPTS} + WORKING_DIRECTORY "${SOURCEDIR}" + COMMENT "Generating gettext output") + +# 'update' target: update translations for the specified languages. +add_custom_target( + docs_update + COMMAND ${SPHINXINTL} update -p "${GETTEXTDIR}" -l en -l ru + WORKING_DIRECTORY "${SOURCEDIR}" + COMMENT "Updating Sphinx translations") + +# 'html' target: build the HTML documentation in both English and Russian. +add_custom_target( + docs_html + # Build English documentation. + COMMAND ${SPHINXBUILD} -b html -D language=en "${SOURCEDIR}" "${BUILDDIR}/en" + ${SPHINXOPTS} + # Build Russian documentation. + COMMAND ${SPHINXBUILD} -b html -D language=ru "${SOURCEDIR}" "${BUILDDIR}/ru" + ${SPHINXOPTS} + WORKING_DIRECTORY "${SOURCEDIR}" + COMMENT "Building HTML documentation for English and Russian") diff --git a/docs/README.md b/docs/README.md index 763790e284..356c5e1312 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,63 +1,63 @@ -# Parallel Programming Course Documentation - -### How to make and test your changes - -1. Navigate to the project root directory: -```bash -cd parallel_programming_course -``` - -2. (Optional) Create a virtual environment: -```bash -python3 -m venv venv -``` - -3. Install the required dependencies (for the projects and the docs): -```bash -pip install -r requirements.txt -pip install -r docs/requirements.txt -``` - -4. Configure the documentation build: -```bash -cmake -S . -B build -DUSE_DOCS=ON -``` - -5. Generate API documentation with Doxygen: -```bash -doxygen Doxyfile -``` - -6. Build the documentation: -```bash -cmake --build build -t docs_html -``` - -7. Update the documentation: -```bash -cmake --build build -t docs_gettext -# update documentation -cmake --build build -t docs_update -``` - -8. Re-build the documentation: -```bash -cmake --build build -t docs_html -``` - -9. Make local deployment of the changes: -```bash -cd docs/_build/html -python3 -m http.server 8080 -``` - -10. Open the documentation in your browser. Depending on your platform use `open` (macOS), `xdg-open` (Linux), or `start` (Windows): -```bash -open http://localhost:8080/en # macOS -xdg-open http://localhost:8080/en # Linux -start http://localhost:8080/en # Windows - -open http://localhost:8080/ru # macOS -xdg-open http://localhost:8080/ru # Linux -start http://localhost:8080/ru # Windows -``` +# Parallel Programming Course Documentation + +### How to make and test your changes + +1. Navigate to the project root directory: +```bash +cd parallel_programming_course +``` + +2. (Optional) Create a virtual environment: +```bash +python3 -m venv venv +``` + +3. Install the required dependencies (for the projects and the docs): +```bash +pip install -r requirements.txt +pip install -r docs/requirements.txt +``` + +4. Configure the documentation build: +```bash +cmake -S . -B build -DUSE_DOCS=ON +``` + +5. Generate API documentation with Doxygen: +```bash +doxygen Doxyfile +``` + +6. Build the documentation: +```bash +cmake --build build -t docs_html +``` + +7. Update the documentation: +```bash +cmake --build build -t docs_gettext +# update documentation +cmake --build build -t docs_update +``` + +8. Re-build the documentation: +```bash +cmake --build build -t docs_html +``` + +9. Make local deployment of the changes: +```bash +cd docs/_build/html +python3 -m http.server 8080 +``` + +10. Open the documentation in your browser. Depending on your platform use `open` (macOS), `xdg-open` (Linux), or `start` (Windows): +```bash +open http://localhost:8080/en # macOS +xdg-open http://localhost:8080/en # Linux +start http://localhost:8080/en # Windows + +open http://localhost:8080/ru # macOS +xdg-open http://localhost:8080/ru # Linux +start http://localhost:8080/ru # Windows +``` diff --git a/docs/_static/ci_graph.svg b/docs/_static/ci_graph.svg index ed812dddfc..ff246fd466 100644 --- a/docs/_static/ci_graph.svg +++ b/docs/_static/ci_graph.svg @@ -1,114 +1,114 @@ - - - - - - - - - -cpp-lint - -cpp-lint - - - -ubuntu - -ubuntu - - - -cpp-lint->ubuntu - - - - - -mac - -mac - - - -cpp-lint->mac - - - - - -windows - -windows - - - -cpp-lint->windows - - - - - -shell-lint - -shell-lint - - - -shell-lint->ubuntu - - - - - -shell-lint->mac - - - - - -shell-lint->windows - - - - - -perf - -perf - - - -ubuntu->perf - - - - - -mac->perf - - - - - -windows->perf - - - - - -pages - -pages - - - -perf->pages - - - - - + + + + + + + + + +cpp-lint + +cpp-lint + + + +ubuntu + +ubuntu + + + +cpp-lint->ubuntu + + + + + +mac + +mac + + + +cpp-lint->mac + + + + + +windows + +windows + + + +cpp-lint->windows + + + + + +shell-lint + +shell-lint + + + +shell-lint->ubuntu + + + + + +shell-lint->mac + + + + + +shell-lint->windows + + + + + +perf + +perf + + + +ubuntu->perf + + + + + +mac->perf + + + + + +windows->perf + + + + + +pages + +pages + + + +perf->pages + + + + + diff --git a/docs/_static/custom.css b/docs/_static/custom.css index a93b276265..65c84667d6 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,5 +1,5 @@ -@import url('https://fonts.googleapis.com/css2?family=PT+Sans:wght@400;700&display=swap'); - -html[lang="ru"] body { - font-family: 'PT Sans', Arial, sans-serif; -} +@import url('https://fonts.googleapis.com/css2?family=PT+Sans:wght@400;700&display=swap'); + +html[lang="ru"] body { + font-family: 'PT Sans', Arial, sans-serif; +} diff --git a/docs/_templates/footer.html b/docs/_templates/footer.html index c72f5b8a8a..ee10300801 100644 --- a/docs/_templates/footer.html +++ b/docs/_templates/footer.html @@ -1,35 +1,35 @@ -{%- extends "!footer.html" %} - -{% block extrafooter %} - -
- {% set langs = { "en": "English", "ru": "Русский" } %} - {% for code, name in langs.items() %} - {% if code == language %} - {{ name }} - {% else %} - {{ name }} - {% endif %} - {% if not loop.last %} | {% endif %} - {% endfor %} -
-{% endblock %} +{%- extends "!footer.html" %} + +{% block extrafooter %} + +
+ {% set langs = { "en": "English", "ru": "Русский" } %} + {% for code, name in langs.items() %} + {% if code == language %} + {{ name }} + {% else %} + {{ name }} + {% endif %} + {% if not loop.last %} | {% endif %} + {% endfor %} +
+{% endblock %} diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index 8306423695..033cb26357 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -1,7 +1,7 @@ -{%- extends "!layout.html" %} - -{% block metatags %} - {{ super() }} - {# point every “/foo” URL at the real site root #} - -{% endblock %} +{%- extends "!layout.html" %} + +{% block metatags %} + {{ super() }} + {# point every “/foo” URL at the real site root #} + +{% endblock %} diff --git a/docs/common_information/introduction.rst b/docs/common_information/introduction.rst index 23e4d4a79b..ef1e9b73b5 100644 --- a/docs/common_information/introduction.rst +++ b/docs/common_information/introduction.rst @@ -1,42 +1,42 @@ -Introduction -============ - -Practice -~~~~~~~~ - -- We work online - - - Use GitHub repository - - Use Pull Requests - - Merge into the master branch - - Test verification - -- Task distribution is random for each student. -- An example for each technology can be found in the corresponding directory: ``tasks//example``. -- In each repository, the README.md contains a link to the course documentation (**read it fully!!!**). -- Additionally, each repository includes an example of a properly formatted PULL REQUEST. -- Submission of all tasks is mandatory to pass the course. -- A task that has been merged into the master branch continues to be monitored. - If a task fails in the master, it is disabled, and a record of this is added to the score table. - All disabled tasks will result in a zero points result for those tasks at the end of the semester. - It can be seen that your task is disabled due to the following reason: the directory of your task has been renamed - from ``seq/nesterov_a_vector_sum`` to ``seq/nesterov_a_vector_sum_disabled`` -- All resources for using the repository will be provided here: - - - `Git for half an hour: A Beginner’s Guide `__ - - `Getting Started with Git and GitHub: A Beginner’s Guide `__ - - `Git: A Quick Start Guide to Using Core Operations with Explanations `__ - - `Conflicts resolving in Git `__ - - `Google testing framework (gtest) `__ - - `GoogleTest Primer `__ - - `GitHub Actions documentation `__ - - `Parallel Programming Technologies. Message Passing Interface (MPI) `__ - - `Typing and Layout in the System LaTeX `__ - - `LaTeX for the beginners `__ - - `What is OpenMP? `__ - - `TBB-1 `__ - - `Writing Multithreaded Applications in C++ `__ - - `Multithreading: New Features of the C++11 Standard `__ - - `Introduction to Parallel Computing `__ - -\* *All instructions, repositories, and tables may be updated during the learning process for better usability. Be prepared for changes, check and update them periodically!!!* +Introduction +============ + +Practice +~~~~~~~~ + +- We work online + + - Use GitHub repository + - Use Pull Requests + - Merge into the master branch + - Test verification + +- Task distribution is random for each student. +- An example for each technology can be found in the corresponding directory: ``tasks//example``. +- In each repository, the README.md contains a link to the course documentation (**read it fully!!!**). +- Additionally, each repository includes an example of a properly formatted PULL REQUEST. +- Submission of all tasks is mandatory to pass the course. +- A task that has been merged into the master branch continues to be monitored. + If a task fails in the master, it is disabled, and a record of this is added to the score table. + All disabled tasks will result in a zero points result for those tasks at the end of the semester. + It can be seen that your task is disabled due to the following reason: the directory of your task has been renamed + from ``seq/nesterov_a_vector_sum`` to ``seq/nesterov_a_vector_sum_disabled`` +- All resources for using the repository will be provided here: + + - `Git for half an hour: A Beginner’s Guide `__ + - `Getting Started with Git and GitHub: A Beginner’s Guide `__ + - `Git: A Quick Start Guide to Using Core Operations with Explanations `__ + - `Conflicts resolving in Git `__ + - `Google testing framework (gtest) `__ + - `GoogleTest Primer `__ + - `GitHub Actions documentation `__ + - `Parallel Programming Technologies. Message Passing Interface (MPI) `__ + - `Typing and Layout in the System LaTeX `__ + - `LaTeX for the beginners `__ + - `What is OpenMP? `__ + - `TBB-1 `__ + - `Writing Multithreaded Applications in C++ `__ + - `Multithreading: New Features of the C++11 Standard `__ + - `Introduction to Parallel Computing `__ + +\* *All instructions, repositories, and tables may be updated during the learning process for better usability. Be prepared for changes, check and update them periodically!!!* diff --git a/docs/common_information/points.rst b/docs/common_information/points.rst index c9ca9602f4..acc870bca7 100644 --- a/docs/common_information/points.rst +++ b/docs/common_information/points.rst @@ -1,153 +1,153 @@ -Points -====== - -- For “process parallelism” semester - - - 1st MPI task - - +----------+ - | Solution | - +==========+ - | 10 | - +----------+ - - - 2nd MPI task - - ======== =========== - Solution Performance - ======== =========== - 15 5 - ======== =========== - - - 3rd MPI task - - ======== =========== - Solution Performance - ======== =========== - 20 10 - ======== =========== - - - **Total : 60 points** - -- For “thread parallelism” semester - - - Sequential version - - +----------+ - | Solution | - +==========+ - | 4 | - +----------+ - - - OpenMP version - - ======== =========== - Solution Performance - ======== =========== - 6 3 - ======== =========== - - - TBB version - - ======== =========== - Solution Performance - ======== =========== - 6 3 - ======== =========== - - - std::thread version - - ======== =========== - Solution Performance - ======== =========== - 8 6 - ======== =========== - - - “MPI + threads” version (The threading technology is chosen - randomly) - - ======== =========== - Solution Performance - ======== =========== - 10 8 - ======== =========== - - - **Total : 54 points** - -- The rule for earning performance points. - - The ratio of efficiency percentage to points percentage, where the maximum points is 100% and the minimum points is 0%. - - If the score is not an integer, it is rounded up. - - +----------------+-------------------+ - | Efficiency (%) | Points percentage | - +================+===================+ - | >= 50% | 100% | - +----------------+-------------------+ - | [45, 50) | 90% | - +----------------+-------------------+ - | [42, 45) | 80% | - +----------------+-------------------+ - | [40, 42) | 70% | - +----------------+-------------------+ - | [37, 40) | 60% | - +----------------+-------------------+ - | [35, 37) | 50% | - +----------------+-------------------+ - | [32, 35) | 40% | - +----------------+-------------------+ - | [30, 32) | 30% | - +----------------+-------------------+ - | [27, 30) | 20% | - +----------------+-------------------+ - | [25, 27) | 10% | - +----------------+-------------------+ - | < 25% | 0% | - +----------------+-------------------+ - -- Report - - +--------------+--------------+--------------------+-------+ - | Completeness | Text Quality | Formatting Quality | Total | - +==============+==============+====================+=======+ - | 5 | 2.5 | 2.5 | 10 | - +--------------+--------------+--------------------+-------+ - -- Conversion of points into exam assessment or pass/fail - - - For 5-point grading system - - ============ =============== ============ - Points range Exam Assessment Student Pass - ============ =============== ============ - [87, 100] 5 Passed - [70, 87) 4 Passed - [50, 70) 3 Passed - < 50 2 Not Passed - ============ =============== ============ - - - For 7-point grading system **(our current system)** - - ============ =============== ============ - Points range Exam Assessment Student Pass - ============ =============== ============ - [99, 100] 5.5 Passed - [92, 99) 5 Passed - [82, 92) 4.5 Passed - [70, 82) 4 Passed - [50, 70) 3 Passed - < 50 2 Not Passed - ============ =============== ============ - -- Penalties: - - - A deadline will be set for each version. - - 1 point is deducted from the version’s score for each day of delay in submission. - - The task is considered submitted when it is merged into the master/main branch. - - The submission time is defined as the timestamp of the last commit that successfully passes the CI pipeline. - -- Comments: - - - It is forbidden to write the report if all tasks are not completed. - - Please keep in mind that one week before the end of the semester, the repository will be closed for final assessment. +Points +====== + +- For “process parallelism” semester + + - 1st MPI task + + +----------+ + | Solution | + +==========+ + | 10 | + +----------+ + + - 2nd MPI task + + ======== =========== + Solution Performance + ======== =========== + 15 5 + ======== =========== + + - 3rd MPI task + + ======== =========== + Solution Performance + ======== =========== + 20 10 + ======== =========== + + - **Total : 60 points** + +- For “thread parallelism” semester + + - Sequential version + + +----------+ + | Solution | + +==========+ + | 4 | + +----------+ + + - OpenMP version + + ======== =========== + Solution Performance + ======== =========== + 6 3 + ======== =========== + + - TBB version + + ======== =========== + Solution Performance + ======== =========== + 6 3 + ======== =========== + + - std::thread version + + ======== =========== + Solution Performance + ======== =========== + 8 6 + ======== =========== + + - “MPI + threads” version (The threading technology is chosen + randomly) + + ======== =========== + Solution Performance + ======== =========== + 10 8 + ======== =========== + + - **Total : 54 points** + +- The rule for earning performance points. + + The ratio of efficiency percentage to points percentage, where the maximum points is 100% and the minimum points is 0%. + + If the score is not an integer, it is rounded up. + + +----------------+-------------------+ + | Efficiency (%) | Points percentage | + +================+===================+ + | >= 50% | 100% | + +----------------+-------------------+ + | [45, 50) | 90% | + +----------------+-------------------+ + | [42, 45) | 80% | + +----------------+-------------------+ + | [40, 42) | 70% | + +----------------+-------------------+ + | [37, 40) | 60% | + +----------------+-------------------+ + | [35, 37) | 50% | + +----------------+-------------------+ + | [32, 35) | 40% | + +----------------+-------------------+ + | [30, 32) | 30% | + +----------------+-------------------+ + | [27, 30) | 20% | + +----------------+-------------------+ + | [25, 27) | 10% | + +----------------+-------------------+ + | < 25% | 0% | + +----------------+-------------------+ + +- Report + + +--------------+--------------+--------------------+-------+ + | Completeness | Text Quality | Formatting Quality | Total | + +==============+==============+====================+=======+ + | 5 | 2.5 | 2.5 | 10 | + +--------------+--------------+--------------------+-------+ + +- Conversion of points into exam assessment or pass/fail + + - For 5-point grading system + + ============ =============== ============ + Points range Exam Assessment Student Pass + ============ =============== ============ + [87, 100] 5 Passed + [70, 87) 4 Passed + [50, 70) 3 Passed + < 50 2 Not Passed + ============ =============== ============ + + - For 7-point grading system **(our current system)** + + ============ =============== ============ + Points range Exam Assessment Student Pass + ============ =============== ============ + [99, 100] 5.5 Passed + [92, 99) 5 Passed + [82, 92) 4.5 Passed + [70, 82) 4 Passed + [50, 70) 3 Passed + < 50 2 Not Passed + ============ =============== ============ + +- Penalties: + + - A deadline will be set for each version. + - 1 point is deducted from the version’s score for each day of delay in submission. + - The task is considered submitted when it is merged into the master/main branch. + - The submission time is defined as the timestamp of the last commit that successfully passes the CI pipeline. + +- Comments: + + - It is forbidden to write the report if all tasks are not completed. + - Please keep in mind that one week before the end of the semester, the repository will be closed for final assessment. diff --git a/docs/common_information/processes_tasks.rst b/docs/common_information/processes_tasks.rst index f3e20965aa..0899d4dbee 100644 --- a/docs/common_information/processes_tasks.rst +++ b/docs/common_information/processes_tasks.rst @@ -1,279 +1,279 @@ -Processes parallelism tasks -=========================== - -First task ----------- - -+----------------+---------------------------------------------------------------------------------+ -| Variant Number | Task | -+================+=================================================================================+ -| 1 | Sum of vector elements | -+----------------+---------------------------------------------------------------------------------+ -| 2 | Calculating the average value of vector elements | -+----------------+---------------------------------------------------------------------------------+ -| 3 | Maximum value of vector elements | -+----------------+---------------------------------------------------------------------------------+ -| 4 | Minimum value of vector elements | -+----------------+---------------------------------------------------------------------------------+ -| 5 | Finding the number of sign alternations between adjacent elements of the vector | -+----------------+---------------------------------------------------------------------------------+ -| 6 | Finding the number of order violations between adjacent elements of the vector | -+----------------+---------------------------------------------------------------------------------+ -| 7 | Finding the most similar adjacent elements of the vector | -+----------------+---------------------------------------------------------------------------------+ -| 8 | Finding the most different adjacent elements of the vector | -+----------------+---------------------------------------------------------------------------------+ -| 9 | Scalar product of vectors | -+----------------+---------------------------------------------------------------------------------+ -| 10 | Sum of matrix elements | -+----------------+---------------------------------------------------------------------------------+ -| 11 | Sum of values by rows in the matrix | -+----------------+---------------------------------------------------------------------------------+ -| 12 | Sum of values by columns in the matrix | -+----------------+---------------------------------------------------------------------------------+ -| 13 | Maximum value of matrix elements | -+----------------+---------------------------------------------------------------------------------+ -| 14 | Minimum value of matrix elements | -+----------------+---------------------------------------------------------------------------------+ -| 15 | Finding maximum values by rows in the matrix | -+----------------+---------------------------------------------------------------------------------+ -| 16 | Finding maximum values by columns in the matrix | -+----------------+---------------------------------------------------------------------------------+ -| 17 | Finding minimum values by rows in the matrix | -+----------------+---------------------------------------------------------------------------------+ -| 18 | Finding minimum values by columns in the matrix | -+----------------+---------------------------------------------------------------------------------+ -| 19 | Integration – rectangle method | -+----------------+---------------------------------------------------------------------------------+ -| 20 | Integration – trapezoidal method | -+----------------+---------------------------------------------------------------------------------+ -| 21 | Integration – Monte Carlo method | -+----------------+---------------------------------------------------------------------------------+ -| 22 | Counting the number of alphabetical characters in a string | -+----------------+---------------------------------------------------------------------------------+ -| 23 | Counting the frequency of a character in a string | -+----------------+---------------------------------------------------------------------------------+ -| 24 | Counting the number of words in a string | -+----------------+---------------------------------------------------------------------------------+ -| 25 | Counting the number of sentences in a string | -+----------------+---------------------------------------------------------------------------------+ -| 26 | Checking lexicographical order of two strings | -+----------------+---------------------------------------------------------------------------------+ -| 27 | Counting the number of differing characters between two strings | -+----------------+---------------------------------------------------------------------------------+ - -Second task ------------ - -+----------------+-------------------------------------------------------------------------------------+ -| Variant Number | Task | -+================+=====================================================================================+ -| 1 | Broadcast (one to all transfer) | -+----------------+-------------------------------------------------------------------------------------+ -| 2 | Reduce (all to one transfer) | -+----------------+-------------------------------------------------------------------------------------+ -| 3 | Allreduce (all to one and broadcast) | -+----------------+-------------------------------------------------------------------------------------+ -| 4 | Scatter (one to all transfer) | -+----------------+-------------------------------------------------------------------------------------+ -| 5 | Gather (all to one transfer) | -+----------------+-------------------------------------------------------------------------------------+ -| 6 | Line | -+----------------+-------------------------------------------------------------------------------------+ -| 7 | Ring | -+----------------+-------------------------------------------------------------------------------------+ -| 8 | Star | -+----------------+-------------------------------------------------------------------------------------+ -| 9 | Torus Grid | -+----------------+-------------------------------------------------------------------------------------+ -| 10 | Hypercube | -+----------------+-------------------------------------------------------------------------------------+ -| 11 | Horizontal strip scheme - matrix-vector multiplication | -+----------------+-------------------------------------------------------------------------------------+ -| 12 | Vertical strip scheme - matrix-vector multiplication | -+----------------+-------------------------------------------------------------------------------------+ -| 13 | Horizontal strip scheme – partitioning only matrix A - matrix-matrix multiplication | -+----------------+-------------------------------------------------------------------------------------+ -| 14 | Horizontal strip scheme A, vertical strip scheme B - matrix-matrix multiplication | -+----------------+-------------------------------------------------------------------------------------+ -| 15 | Gaussian method – horizontal strip scheme | -+----------------+-------------------------------------------------------------------------------------+ -| 16 | Gaussian method – vertical strip scheme | -+----------------+-------------------------------------------------------------------------------------+ -| 17 | Gauss-Jordan method | -+----------------+-------------------------------------------------------------------------------------+ -| 18 | Iterative methods (Jacobi) | -+----------------+-------------------------------------------------------------------------------------+ -| 19 | Iterative methods (Gauss-Seidel) | -+----------------+-------------------------------------------------------------------------------------+ -| 20 | Iterative methods (Simple) | -+----------------+-------------------------------------------------------------------------------------+ -| 21 | Bubble sort (odd-even transposition algorithm) | -+----------------+-------------------------------------------------------------------------------------+ -| 22 | Image smoothing | -+----------------+-------------------------------------------------------------------------------------+ -| 23 | Contrast enhancement | -+----------------+-------------------------------------------------------------------------------------+ - -Third task ----------- - -+----------------+----------------------------------------------------------------------------------------------------------+ -| Variant Number | Task | -+================+==========================================================================================================+ -| 1 | Dense matrix multiplication. Elements of data type double. Block scheme, Cannon's algorithm. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 2 | Dense matrix multiplication. Elements of data type double. Block scheme, Fox's algorithm. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 3 | Dense matrix multiplication. Elements of data type double. Strassen's algorithm. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 4 | Sparse matrix multiplication. Elements of data type double. Matrix storage format – row format (CRS). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 5 | Sparse matrix multiplication. Elements of data type double. Matrix storage format – column format (CCS). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 6 | Solving systems of linear equations using the conjugate gradient method. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 7 | Computing multidimensional integrals using a multistep scheme (rectangle method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 8 | Computing multidimensional integrals using a multistep scheme (trapezoidal method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 9 | Computing multidimensional integrals using a multistep scheme (Simpson's method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 10 | Computing multidimensional integrals using the Monte Carlo method. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 11 | Global search algorithm (Strongin's) for one-dimensional optimization problems. Parallelization by | -| | characteristics. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 12 | Multistep scheme for solving two-dimensional global optimization problems. Parallelization by dividing | -| | the search area. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 13 | Multistep scheme for solving two-dimensional global optimization problems. Parallelization by | -| | characteristics. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 14 | Quick sort with simple merging. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 15 | Quick sort with odd-even merging (Batcher's method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 16 | Shell sort with simple merging. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 17 | Shell sort with odd-even merging (Batcher's method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 18 | Radix sort for integers with simple merging. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 19 | Radix sort for integers with odd-even merging (Batcher's method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 20 | Radix sort for floating-point numbers (type double) with simple merging. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 21 | Radix sort for floating-point numbers (type double) with odd-even merging (Batcher's method). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 22 | Shortest path search from one vertex (Dijkstra's algorithm). With CRS graphs. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 23 | Shortest path search from one vertex (Bellman-Ford algorithm). With CRS graphs. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 24 | Convex hull construction – Graham's scan. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 25 | Convex hull construction – Jarvis's march. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 26 | Linear image filtering (horizontal partition). Gaussian kernel 3x3. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 27 | Linear image filtering (vertical partition). Gaussian kernel 3x3. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 28 | Linear image filtering (block partition). Gaussian kernel 3x3. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 29 | Edge detection in an image using the Sobel operator. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 30 | Contrast enhancement of grayscale image using linear histogram stretching. | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 31 | Labeling components on a binary image (black areas correspond to objects, white to background). | -+----------------+----------------------------------------------------------------------------------------------------------+ -| 32 | Convex hull construction for components of a binary image. | -+----------------+----------------------------------------------------------------------------------------------------------+ - -Comments for tasks 2 and 3: -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- MESSAGE PASSING METHODS “You need to implement the specified methods - using only the Send and Recv functions. The implemented function - should have the same prototype as the corresponding MPI function. The - test program should allow selecting the root process number and - perform array transmission (broadcast, gather) for at least the - following types: MPI_INT, MPI_FLOAT, MPI_DOUBLE. In all operations, - the transmission should be carried out using the ‘tree’ of processes.” - - Comments relevant for: - - =================== =================== - Variants for task 2 Variants for task 3 - =================== =================== - 1 - 5 x - =================== =================== - -- DATA COMMUNICATION NETWORK TOPOLOGIES “You need to implement the - virtual topology specified in the task using MPI capabilities for - working with communicators and topologies, and ensure the ability to - transfer data from any selected process to any other process. (Do not - use MPI_Cart_Create and MPI_Graph_Create)” - - Comments relevant for: - - =================== =================== - Varinats for task 2 Varinats for task 3 - =================== =================== - 6 - 10 x - =================== =================== - -- MATRIX COMPUTATIONS “In the horizontal scheme, the matrix is divided - among processes by rows. In the vertical scheme, it is divided by - columns, and in this case, the vector is also divided among - processes.” - - Comments relevant for: - - =================== =================== - Varinats for task 2 Varinats for task 3 - =================== =================== - 11 - 14 1 - 5 - =================== =================== - -- COMPUTER GRAPHICS AND IMAGE PROCESSING “It is assumed that the image - is given in color or grayscale, with the input data being a - one-dimensional array. Loading a real image is not required, but is - allowed.” - - Comments relevant for: - - =================== =================== - Varinats for task 2 Varinats for task 3 - =================== =================== - 26 - 27 24 - 32 - =================== =================== - -- SOLUTION OF A SYSTEM OF LINEAR ALGEBRAIC EQUATIONS - - Comments relevant for: - - =================== =================== - Varinats for task 2 Varinats for task 3 - =================== =================== - 15 - 20 6 - =================== =================== - -- SORT ALGORITHMS - - Comments relevant for: - - =================== =================== - Varinats for task 2 Varinats for task 3 - =================== =================== - 21 14 - 21 - =================== =================== - -- GRAPH PROCESSING ALGORITHMS - - Comments relevant for: - - =================== =================== - Varinats for task 2 Varinats for task 3 - =================== =================== - x 22 - 23 - =================== =================== +Processes parallelism tasks +=========================== + +First task +---------- + ++----------------+---------------------------------------------------------------------------------+ +| Variant Number | Task | ++================+=================================================================================+ +| 1 | Sum of vector elements | ++----------------+---------------------------------------------------------------------------------+ +| 2 | Calculating the average value of vector elements | ++----------------+---------------------------------------------------------------------------------+ +| 3 | Maximum value of vector elements | ++----------------+---------------------------------------------------------------------------------+ +| 4 | Minimum value of vector elements | ++----------------+---------------------------------------------------------------------------------+ +| 5 | Finding the number of sign alternations between adjacent elements of the vector | ++----------------+---------------------------------------------------------------------------------+ +| 6 | Finding the number of order violations between adjacent elements of the vector | ++----------------+---------------------------------------------------------------------------------+ +| 7 | Finding the most similar adjacent elements of the vector | ++----------------+---------------------------------------------------------------------------------+ +| 8 | Finding the most different adjacent elements of the vector | ++----------------+---------------------------------------------------------------------------------+ +| 9 | Scalar product of vectors | ++----------------+---------------------------------------------------------------------------------+ +| 10 | Sum of matrix elements | ++----------------+---------------------------------------------------------------------------------+ +| 11 | Sum of values by rows in the matrix | ++----------------+---------------------------------------------------------------------------------+ +| 12 | Sum of values by columns in the matrix | ++----------------+---------------------------------------------------------------------------------+ +| 13 | Maximum value of matrix elements | ++----------------+---------------------------------------------------------------------------------+ +| 14 | Minimum value of matrix elements | ++----------------+---------------------------------------------------------------------------------+ +| 15 | Finding maximum values by rows in the matrix | ++----------------+---------------------------------------------------------------------------------+ +| 16 | Finding maximum values by columns in the matrix | ++----------------+---------------------------------------------------------------------------------+ +| 17 | Finding minimum values by rows in the matrix | ++----------------+---------------------------------------------------------------------------------+ +| 18 | Finding minimum values by columns in the matrix | ++----------------+---------------------------------------------------------------------------------+ +| 19 | Integration – rectangle method | ++----------------+---------------------------------------------------------------------------------+ +| 20 | Integration – trapezoidal method | ++----------------+---------------------------------------------------------------------------------+ +| 21 | Integration – Monte Carlo method | ++----------------+---------------------------------------------------------------------------------+ +| 22 | Counting the number of alphabetical characters in a string | ++----------------+---------------------------------------------------------------------------------+ +| 23 | Counting the frequency of a character in a string | ++----------------+---------------------------------------------------------------------------------+ +| 24 | Counting the number of words in a string | ++----------------+---------------------------------------------------------------------------------+ +| 25 | Counting the number of sentences in a string | ++----------------+---------------------------------------------------------------------------------+ +| 26 | Checking lexicographical order of two strings | ++----------------+---------------------------------------------------------------------------------+ +| 27 | Counting the number of differing characters between two strings | ++----------------+---------------------------------------------------------------------------------+ + +Second task +----------- + ++----------------+-------------------------------------------------------------------------------------+ +| Variant Number | Task | ++================+=====================================================================================+ +| 1 | Broadcast (one to all transfer) | ++----------------+-------------------------------------------------------------------------------------+ +| 2 | Reduce (all to one transfer) | ++----------------+-------------------------------------------------------------------------------------+ +| 3 | Allreduce (all to one and broadcast) | ++----------------+-------------------------------------------------------------------------------------+ +| 4 | Scatter (one to all transfer) | ++----------------+-------------------------------------------------------------------------------------+ +| 5 | Gather (all to one transfer) | ++----------------+-------------------------------------------------------------------------------------+ +| 6 | Line | ++----------------+-------------------------------------------------------------------------------------+ +| 7 | Ring | ++----------------+-------------------------------------------------------------------------------------+ +| 8 | Star | ++----------------+-------------------------------------------------------------------------------------+ +| 9 | Torus Grid | ++----------------+-------------------------------------------------------------------------------------+ +| 10 | Hypercube | ++----------------+-------------------------------------------------------------------------------------+ +| 11 | Horizontal strip scheme - matrix-vector multiplication | ++----------------+-------------------------------------------------------------------------------------+ +| 12 | Vertical strip scheme - matrix-vector multiplication | ++----------------+-------------------------------------------------------------------------------------+ +| 13 | Horizontal strip scheme – partitioning only matrix A - matrix-matrix multiplication | ++----------------+-------------------------------------------------------------------------------------+ +| 14 | Horizontal strip scheme A, vertical strip scheme B - matrix-matrix multiplication | ++----------------+-------------------------------------------------------------------------------------+ +| 15 | Gaussian method – horizontal strip scheme | ++----------------+-------------------------------------------------------------------------------------+ +| 16 | Gaussian method – vertical strip scheme | ++----------------+-------------------------------------------------------------------------------------+ +| 17 | Gauss-Jordan method | ++----------------+-------------------------------------------------------------------------------------+ +| 18 | Iterative methods (Jacobi) | ++----------------+-------------------------------------------------------------------------------------+ +| 19 | Iterative methods (Gauss-Seidel) | ++----------------+-------------------------------------------------------------------------------------+ +| 20 | Iterative methods (Simple) | ++----------------+-------------------------------------------------------------------------------------+ +| 21 | Bubble sort (odd-even transposition algorithm) | ++----------------+-------------------------------------------------------------------------------------+ +| 22 | Image smoothing | ++----------------+-------------------------------------------------------------------------------------+ +| 23 | Contrast enhancement | ++----------------+-------------------------------------------------------------------------------------+ + +Third task +---------- + ++----------------+----------------------------------------------------------------------------------------------------------+ +| Variant Number | Task | ++================+==========================================================================================================+ +| 1 | Dense matrix multiplication. Elements of data type double. Block scheme, Cannon's algorithm. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 2 | Dense matrix multiplication. Elements of data type double. Block scheme, Fox's algorithm. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 3 | Dense matrix multiplication. Elements of data type double. Strassen's algorithm. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 4 | Sparse matrix multiplication. Elements of data type double. Matrix storage format – row format (CRS). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 5 | Sparse matrix multiplication. Elements of data type double. Matrix storage format – column format (CCS). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 6 | Solving systems of linear equations using the conjugate gradient method. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 7 | Computing multidimensional integrals using a multistep scheme (rectangle method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 8 | Computing multidimensional integrals using a multistep scheme (trapezoidal method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 9 | Computing multidimensional integrals using a multistep scheme (Simpson's method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 10 | Computing multidimensional integrals using the Monte Carlo method. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 11 | Global search algorithm (Strongin's) for one-dimensional optimization problems. Parallelization by | +| | characteristics. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 12 | Multistep scheme for solving two-dimensional global optimization problems. Parallelization by dividing | +| | the search area. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 13 | Multistep scheme for solving two-dimensional global optimization problems. Parallelization by | +| | characteristics. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 14 | Quick sort with simple merging. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 15 | Quick sort with odd-even merging (Batcher's method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 16 | Shell sort with simple merging. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 17 | Shell sort with odd-even merging (Batcher's method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 18 | Radix sort for integers with simple merging. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 19 | Radix sort for integers with odd-even merging (Batcher's method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 20 | Radix sort for floating-point numbers (type double) with simple merging. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 21 | Radix sort for floating-point numbers (type double) with odd-even merging (Batcher's method). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 22 | Shortest path search from one vertex (Dijkstra's algorithm). With CRS graphs. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 23 | Shortest path search from one vertex (Bellman-Ford algorithm). With CRS graphs. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 24 | Convex hull construction – Graham's scan. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 25 | Convex hull construction – Jarvis's march. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 26 | Linear image filtering (horizontal partition). Gaussian kernel 3x3. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 27 | Linear image filtering (vertical partition). Gaussian kernel 3x3. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 28 | Linear image filtering (block partition). Gaussian kernel 3x3. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 29 | Edge detection in an image using the Sobel operator. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 30 | Contrast enhancement of grayscale image using linear histogram stretching. | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 31 | Labeling components on a binary image (black areas correspond to objects, white to background). | ++----------------+----------------------------------------------------------------------------------------------------------+ +| 32 | Convex hull construction for components of a binary image. | ++----------------+----------------------------------------------------------------------------------------------------------+ + +Comments for tasks 2 and 3: +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- MESSAGE PASSING METHODS “You need to implement the specified methods + using only the Send and Recv functions. The implemented function + should have the same prototype as the corresponding MPI function. The + test program should allow selecting the root process number and + perform array transmission (broadcast, gather) for at least the + following types: MPI_INT, MPI_FLOAT, MPI_DOUBLE. In all operations, + the transmission should be carried out using the ‘tree’ of processes.” + + Comments relevant for: + + =================== =================== + Variants for task 2 Variants for task 3 + =================== =================== + 1 - 5 x + =================== =================== + +- DATA COMMUNICATION NETWORK TOPOLOGIES “You need to implement the + virtual topology specified in the task using MPI capabilities for + working with communicators and topologies, and ensure the ability to + transfer data from any selected process to any other process. (Do not + use MPI_Cart_Create and MPI_Graph_Create)” + + Comments relevant for: + + =================== =================== + Varinats for task 2 Varinats for task 3 + =================== =================== + 6 - 10 x + =================== =================== + +- MATRIX COMPUTATIONS “In the horizontal scheme, the matrix is divided + among processes by rows. In the vertical scheme, it is divided by + columns, and in this case, the vector is also divided among + processes.” + + Comments relevant for: + + =================== =================== + Varinats for task 2 Varinats for task 3 + =================== =================== + 11 - 14 1 - 5 + =================== =================== + +- COMPUTER GRAPHICS AND IMAGE PROCESSING “It is assumed that the image + is given in color or grayscale, with the input data being a + one-dimensional array. Loading a real image is not required, but is + allowed.” + + Comments relevant for: + + =================== =================== + Varinats for task 2 Varinats for task 3 + =================== =================== + 26 - 27 24 - 32 + =================== =================== + +- SOLUTION OF A SYSTEM OF LINEAR ALGEBRAIC EQUATIONS + + Comments relevant for: + + =================== =================== + Varinats for task 2 Varinats for task 3 + =================== =================== + 15 - 20 6 + =================== =================== + +- SORT ALGORITHMS + + Comments relevant for: + + =================== =================== + Varinats for task 2 Varinats for task 3 + =================== =================== + 21 14 - 21 + =================== =================== + +- GRAPH PROCESSING ALGORITHMS + + Comments relevant for: + + =================== =================== + Varinats for task 2 Varinats for task 3 + =================== =================== + x 22 - 23 + =================== =================== diff --git a/docs/common_information/report.rst b/docs/common_information/report.rst index dee9afa569..03807dd6c2 100644 --- a/docs/common_information/report.rst +++ b/docs/common_information/report.rst @@ -1,49 +1,49 @@ -Report -====== - -- Report points - - +--------------+--------------+--------------------+-------+ - | Completeness | Text Quality | Formatting Quality | Total | - +==============+==============+====================+=======+ - | 5 | 2.5 | 2.5 | 10 | - +--------------+--------------+--------------------+-------+ - -- Requirements for Criteria - - - Completeness - - - Introduction (can be a short paragraph) - - Problem Statement (descriptive) - - Algorithm Description - - Description of the Parallel Algorithm Scheme - - Description of the MPI, OpenMP, TBB, std::threads, all versions (depending on the semester) – part of the software implementation description - - Experimental Results (execution time and algorithm quality assessment), description of correctness verification - - Conclusions from the Results - - Conclusion - - References - - Appendix (include code, ensuring readability) - - - Text Quality - - - Meaningfulness - - Coherence - - Clarity - - Quality of language (only the most obvious mistakes are considered) - - - Formatting Quality - - - Requirements for Headings - - Requirements for Text Alignment - - Requirements for Paragraph Indentation - - Requirements for the Formatting of Figures, Graphs, and Tables - - The “Teacher” field must include the full name, position, and title of the lecturer - -- Comments - - - Failure to meet the requirements will result in a deduction of points. - - The request will include points and comments regarding any requirement violations (if applicable). - - The report will be checked only once, and the grade will be assigned based on the submitted version according to the requirements. - - The report is reviewed online; the entire review process takes place in the request. - - If a student falls into the **blue zone** for the task, the report points will also be nullified at the end of the semester. - The report will be finally accepted and merged into the master branch only after both the **online** parts of the corresponding lab work are fully completed. +Report +====== + +- Report points + + +--------------+--------------+--------------------+-------+ + | Completeness | Text Quality | Formatting Quality | Total | + +==============+==============+====================+=======+ + | 5 | 2.5 | 2.5 | 10 | + +--------------+--------------+--------------------+-------+ + +- Requirements for Criteria + + - Completeness + + - Introduction (can be a short paragraph) + - Problem Statement (descriptive) + - Algorithm Description + - Description of the Parallel Algorithm Scheme + - Description of the MPI, OpenMP, TBB, std::threads, all versions (depending on the semester) – part of the software implementation description + - Experimental Results (execution time and algorithm quality assessment), description of correctness verification + - Conclusions from the Results + - Conclusion + - References + - Appendix (include code, ensuring readability) + + - Text Quality + + - Meaningfulness + - Coherence + - Clarity + - Quality of language (only the most obvious mistakes are considered) + + - Formatting Quality + + - Requirements for Headings + - Requirements for Text Alignment + - Requirements for Paragraph Indentation + - Requirements for the Formatting of Figures, Graphs, and Tables + - The “Teacher” field must include the full name, position, and title of the lecturer + +- Comments + + - Failure to meet the requirements will result in a deduction of points. + - The request will include points and comments regarding any requirement violations (if applicable). + - The report will be checked only once, and the grade will be assigned based on the submitted version according to the requirements. + - The report is reviewed online; the entire review process takes place in the request. + - If a student falls into the **blue zone** for the task, the report points will also be nullified at the end of the semester. + The report will be finally accepted and merged into the master branch only after both the **online** parts of the corresponding lab work are fully completed. diff --git a/docs/common_information/threading_tasks.rst b/docs/common_information/threading_tasks.rst index 03a85c67ae..e735be4ed3 100644 --- a/docs/common_information/threading_tasks.rst +++ b/docs/common_information/threading_tasks.rst @@ -1,66 +1,66 @@ -Thread parallelism tasks -======================== - -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| Variant Number | Tasks | -+================+===================================================================================================================================================+ -| 1 | Dense matrix multiplication. Elements of type double. Block scheme, Cannon's algorithm. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 2 | Dense matrix multiplication. Elements of type double. Block scheme, Fox's algorithm. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 3 | Dense matrix multiplication. Elements of type double. Strassen's algorithm. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 4 | Sparse matrix multiplication. Elements of type double. Matrix storage format – row format (Compressed Row Storage). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 5 | Sparse matrix multiplication. Elements of type double. Matrix storage format – column format (Compressed Column Storage). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 6 | Sparse matrix multiplication. Complex type elements. Matrix storage format – row format (Compressed Row Storage). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 7 | Sparse matrix multiplication. Complex type elements. Matrix storage format – column format (Compressed Column Storage). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 8 | Solving systems of linear equations using the conjugate gradient method. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 9 | Computing multidimensional integrals using a multistep scheme (rectangle method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 10 | Computing multidimensional integrals using a multistep scheme (trapezoidal method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 11 | Computing multidimensional integrals using a multistep scheme (Simpson's method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 12 | Computing multidimensional integrals using the Monte Carlo method. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 13 | Quick sort with simple merging. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 14 | Quick sort with odd-even merging (Batcher's method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 15 | Shell sort with simple merging. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 16 | Shell sort with odd-even merging (Batcher's method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 17 | Radix sort for integers with simple merging. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 18 | Radix sort for integers with odd-even merging (Batcher's method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 19 | Radix sort for floating-point numbers (type double) with simple merging. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 20 | Radix sort for floating-point numbers (type double) with odd-even merging (Batcher's method). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 21 | Shortest path search from one vertex (Dijkstra's algorithm). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 22 | Convex hull construction – Graham's scan. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 23 | Convex hull construction – Jarvis's march. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 24 | Linear image filtering (horizontal partition). Gaussian kernel 3x3. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 25 | Linear image filtering (vertical partition). Gaussian kernel 3x3. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 26 | Linear image filtering (block partition). Gaussian kernel 3x3. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 27 | Edge detection in an image using the Sobel operator. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 28 | Contrast enhancement of grayscale image using linear histogram stretching. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 29 | Labeling components on a binary image (black areas correspond to objects, white to background). | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ -| 30 | Convex hull construction for components of a binary image. | -+----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +Thread parallelism tasks +======================== + ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| Variant Number | Tasks | ++================+===================================================================================================================================================+ +| 1 | Dense matrix multiplication. Elements of type double. Block scheme, Cannon's algorithm. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 2 | Dense matrix multiplication. Elements of type double. Block scheme, Fox's algorithm. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 3 | Dense matrix multiplication. Elements of type double. Strassen's algorithm. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 4 | Sparse matrix multiplication. Elements of type double. Matrix storage format – row format (Compressed Row Storage). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 5 | Sparse matrix multiplication. Elements of type double. Matrix storage format – column format (Compressed Column Storage). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 6 | Sparse matrix multiplication. Complex type elements. Matrix storage format – row format (Compressed Row Storage). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 7 | Sparse matrix multiplication. Complex type elements. Matrix storage format – column format (Compressed Column Storage). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 8 | Solving systems of linear equations using the conjugate gradient method. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 9 | Computing multidimensional integrals using a multistep scheme (rectangle method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 10 | Computing multidimensional integrals using a multistep scheme (trapezoidal method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 11 | Computing multidimensional integrals using a multistep scheme (Simpson's method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 12 | Computing multidimensional integrals using the Monte Carlo method. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 13 | Quick sort with simple merging. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 14 | Quick sort with odd-even merging (Batcher's method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 15 | Shell sort with simple merging. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 16 | Shell sort with odd-even merging (Batcher's method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 17 | Radix sort for integers with simple merging. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 18 | Radix sort for integers with odd-even merging (Batcher's method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 19 | Radix sort for floating-point numbers (type double) with simple merging. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 20 | Radix sort for floating-point numbers (type double) with odd-even merging (Batcher's method). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 21 | Shortest path search from one vertex (Dijkstra's algorithm). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 22 | Convex hull construction – Graham's scan. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 23 | Convex hull construction – Jarvis's march. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 24 | Linear image filtering (horizontal partition). Gaussian kernel 3x3. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 25 | Linear image filtering (vertical partition). Gaussian kernel 3x3. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 26 | Linear image filtering (block partition). Gaussian kernel 3x3. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 27 | Edge detection in an image using the Sobel operator. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 28 | Contrast enhancement of grayscale image using linear histogram stretching. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 29 | Labeling components on a binary image (black areas correspond to objects, white to background). | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ +| 30 | Convex hull construction for components of a binary image. | ++----------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/docs/conf.py b/docs/conf.py index 88353ff806..f7c9bff91a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,61 +1,61 @@ -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -import os - -project = "Parallel Programming Course" -copyright = "2025, Learning Process" -author = "Learning Process" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -extensions = [ - "breathe", -] - -breathe_projects = { - "ParallelProgrammingCourse": os.path.join(os.path.dirname(__file__), "..", "xml"), -} -breathe_default_project = "ParallelProgrammingCourse" - -# Suppress specific warnings for API documentation -suppress_warnings = [ - "ref.ref", # undefined label warnings - "ref.identifier", # cpp:identifier reference target not found -] - -templates_path = ["_templates"] -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] -locale_dirs = ["locale"] -gettext_compact = False - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - -html_theme = "sphinx_rtd_theme" -html_static_path = ["_static"] - -html_sidebars = { - "**": [ - "globaltoc.html", - "sourcelink.html", - "searchbox.html", - "language_switcher.html", - ], -} - -html_theme_options = { - "collapse_navigation": False, - "navigation_depth": 2, -} - - -def setup(app): - """Add custom CSS files.""" - app.add_css_file("custom.css") +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +import os + +project = "Parallel Programming Course" +copyright = "2025, Learning Process" +author = "Learning Process" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "breathe", +] + +breathe_projects = { + "ParallelProgrammingCourse": os.path.join(os.path.dirname(__file__), "..", "xml"), +} +breathe_default_project = "ParallelProgrammingCourse" + +# Suppress specific warnings for API documentation +suppress_warnings = [ + "ref.ref", # undefined label warnings + "ref.identifier", # cpp:identifier reference target not found +] + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +locale_dirs = ["locale"] +gettext_compact = False + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] + +html_sidebars = { + "**": [ + "globaltoc.html", + "sourcelink.html", + "searchbox.html", + "language_switcher.html", + ], +} + +html_theme_options = { + "collapse_navigation": False, + "navigation_depth": 2, +} + + +def setup(app): + """Add custom CSS files.""" + app.add_css_file("custom.css") diff --git a/docs/index.rst b/docs/index.rst index bc5770d75f..6c4f502ab7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,26 +1,26 @@ -Parallel Programming Course documentation -========================================= - -Below is the table of contents for the Parallel Programming Course documentation. Follow the links to learn more about each topic. - -.. toctree:: - :maxdepth: 2 - :caption: User Guide: - - user_guide/environment - user_guide/download - user_guide/build - user_guide/submit_work - user_guide/ci - user_guide/api - user_guide/environment_variables - -.. toctree:: - :maxdepth: 2 - :caption: Course Details: - - common_information/introduction - common_information/points - common_information/processes_tasks - common_information/report - common_information/threading_tasks +Parallel Programming Course documentation +========================================= + +Below is the table of contents for the Parallel Programming Course documentation. Follow the links to learn more about each topic. + +.. toctree:: + :maxdepth: 2 + :caption: User Guide: + + user_guide/environment + user_guide/download + user_guide/build + user_guide/submit_work + user_guide/ci + user_guide/api + user_guide/environment_variables + +.. toctree:: + :maxdepth: 2 + :caption: Course Details: + + common_information/introduction + common_information/points + common_information/processes_tasks + common_information/report + common_information/threading_tasks diff --git a/docs/locale/en/LC_MESSAGES/common_information/introduction.po b/docs/locale/en/LC_MESSAGES/common_information/introduction.po index c5efcd4974..1ccf0dd2aa 100644 --- a/docs/locale/en/LC_MESSAGES/common_information/introduction.po +++ b/docs/locale/en/LC_MESSAGES/common_information/introduction.po @@ -1,184 +1,184 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-09 00:23+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/introduction.rst:2 -msgid "Introduction" -msgstr "" - -#: ../../common_information/introduction.rst:5 -msgid "Practice" -msgstr "" - -#: ../../common_information/introduction.rst:7 -msgid "We work online" -msgstr "" - -#: ../../common_information/introduction.rst:9 -msgid "Use GitHub repository" -msgstr "" - -#: ../../common_information/introduction.rst:10 -msgid "Use Pull Requests" -msgstr "" - -#: ../../common_information/introduction.rst:11 -msgid "Merge into the master branch" -msgstr "" - -#: ../../common_information/introduction.rst:12 -msgid "Test verification" -msgstr "" - -#: ../../common_information/introduction.rst:14 -msgid "Task distribution is random for each student." -msgstr "" - -#: ../../common_information/introduction.rst:15 -msgid "" -"An example for each technology can be found in the corresponding " -"directory: ``tasks//example``." -msgstr "" - -#: ../../common_information/introduction.rst:16 -msgid "" -"In each repository, the README.md contains a link to the course " -"documentation (**read it fully!!!**)." -msgstr "" - -#: ../../common_information/introduction.rst:17 -msgid "" -"Additionally, each repository includes an example of a properly formatted" -" PULL REQUEST." -msgstr "" - -#: ../../common_information/introduction.rst:18 -msgid "Submission of all tasks is mandatory to pass the course." -msgstr "" - -#: ../../common_information/introduction.rst:19 -msgid "" -"A task that has been merged into the master branch continues to be " -"monitored. If a task fails in the master, it is disabled, and a record of" -" this is added to the score table. All disabled tasks will result in a " -"zero points result for those tasks at the end of the semester. It can be " -"seen that your task is disabled due to the following reason: the " -"directory of your task has been renamed from " -"``seq/nesterov_a_vector_sum`` to ``seq/nesterov_a_vector_sum_disabled``" -msgstr "" - -#: ../../common_information/introduction.rst:24 -msgid "All resources for using the repository will be provided here:" -msgstr "" - -#: ../../common_information/introduction.rst:26 -msgid "" -"`Git for half an hour: A Beginner’s Guide `__" -msgstr "" - -#: ../../common_information/introduction.rst:27 -#, python-format -msgid "" -"`Getting Started with Git and GitHub: A Beginner’s Guide " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:28 -msgid "" -"`Git: A Quick Start Guide to Using Core Operations with Explanations " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:29 -msgid "" -"`Conflicts resolving in Git " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:30 -msgid "`Google testing framework (gtest) `__" -msgstr "" - -#: ../../common_information/introduction.rst:31 -msgid "" -"`GoogleTest Primer " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:32 -msgid "`GitHub Actions documentation `__" -msgstr "" - -#: ../../common_information/introduction.rst:33 -msgid "" -"`Parallel Programming Technologies. Message Passing Interface (MPI) " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:34 -msgid "" -"`Typing and Layout in the System LaTeX `__" -msgstr "" - -#: ../../common_information/introduction.rst:35 -msgid "" -"`LaTeX for the beginners " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:36 -msgid "`What is OpenMP? `__" -msgstr "" - -#: ../../common_information/introduction.rst:37 -msgid "" -"`TBB-1 " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:38 -msgid "" -"`Writing Multithreaded Applications in C++ `__" -msgstr "" - -#: ../../common_information/introduction.rst:39 -msgid "" -"`Multithreading: New Features of the C++11 Standard " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:40 -msgid "" -"`Introduction to Parallel Computing " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:42 -msgid "" -"\\* *All instructions, repositories, and tables may be updated during the" -" learning process for better usability. Be prepared for changes, check " -"and update them periodically!!!*" -msgstr "" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-09 00:23+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/introduction.rst:2 +msgid "Introduction" +msgstr "" + +#: ../../common_information/introduction.rst:5 +msgid "Practice" +msgstr "" + +#: ../../common_information/introduction.rst:7 +msgid "We work online" +msgstr "" + +#: ../../common_information/introduction.rst:9 +msgid "Use GitHub repository" +msgstr "" + +#: ../../common_information/introduction.rst:10 +msgid "Use Pull Requests" +msgstr "" + +#: ../../common_information/introduction.rst:11 +msgid "Merge into the master branch" +msgstr "" + +#: ../../common_information/introduction.rst:12 +msgid "Test verification" +msgstr "" + +#: ../../common_information/introduction.rst:14 +msgid "Task distribution is random for each student." +msgstr "" + +#: ../../common_information/introduction.rst:15 +msgid "" +"An example for each technology can be found in the corresponding " +"directory: ``tasks//example``." +msgstr "" + +#: ../../common_information/introduction.rst:16 +msgid "" +"In each repository, the README.md contains a link to the course " +"documentation (**read it fully!!!**)." +msgstr "" + +#: ../../common_information/introduction.rst:17 +msgid "" +"Additionally, each repository includes an example of a properly formatted" +" PULL REQUEST." +msgstr "" + +#: ../../common_information/introduction.rst:18 +msgid "Submission of all tasks is mandatory to pass the course." +msgstr "" + +#: ../../common_information/introduction.rst:19 +msgid "" +"A task that has been merged into the master branch continues to be " +"monitored. If a task fails in the master, it is disabled, and a record of" +" this is added to the score table. All disabled tasks will result in a " +"zero points result for those tasks at the end of the semester. It can be " +"seen that your task is disabled due to the following reason: the " +"directory of your task has been renamed from " +"``seq/nesterov_a_vector_sum`` to ``seq/nesterov_a_vector_sum_disabled``" +msgstr "" + +#: ../../common_information/introduction.rst:24 +msgid "All resources for using the repository will be provided here:" +msgstr "" + +#: ../../common_information/introduction.rst:26 +msgid "" +"`Git for half an hour: A Beginner’s Guide `__" +msgstr "" + +#: ../../common_information/introduction.rst:27 +#, python-format +msgid "" +"`Getting Started with Git and GitHub: A Beginner’s Guide " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:28 +msgid "" +"`Git: A Quick Start Guide to Using Core Operations with Explanations " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:29 +msgid "" +"`Conflicts resolving in Git " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:30 +msgid "`Google testing framework (gtest) `__" +msgstr "" + +#: ../../common_information/introduction.rst:31 +msgid "" +"`GoogleTest Primer " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:32 +msgid "`GitHub Actions documentation `__" +msgstr "" + +#: ../../common_information/introduction.rst:33 +msgid "" +"`Parallel Programming Technologies. Message Passing Interface (MPI) " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:34 +msgid "" +"`Typing and Layout in the System LaTeX `__" +msgstr "" + +#: ../../common_information/introduction.rst:35 +msgid "" +"`LaTeX for the beginners " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:36 +msgid "`What is OpenMP? `__" +msgstr "" + +#: ../../common_information/introduction.rst:37 +msgid "" +"`TBB-1 " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:38 +msgid "" +"`Writing Multithreaded Applications in C++ `__" +msgstr "" + +#: ../../common_information/introduction.rst:39 +msgid "" +"`Multithreading: New Features of the C++11 Standard " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:40 +msgid "" +"`Introduction to Parallel Computing " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:42 +msgid "" +"\\* *All instructions, repositories, and tables may be updated during the" +" learning process for better usability. Be prepared for changes, check " +"and update them periodically!!!*" +msgstr "" diff --git a/docs/locale/en/LC_MESSAGES/common_information/points.po b/docs/locale/en/LC_MESSAGES/common_information/points.po index 40fefadaf5..0582241b6d 100644 --- a/docs/locale/en/LC_MESSAGES/common_information/points.po +++ b/docs/locale/en/LC_MESSAGES/common_information/points.po @@ -1,402 +1,402 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-09 00:58+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/points.rst:2 -msgid "Points" -msgstr "" - -#: ../../common_information/points.rst:4 -msgid "For “process parallelism” semester" -msgstr "" - -#: ../../common_information/points.rst:6 -msgid "1st MPI task" -msgstr "" - -#: ../../common_information/points.rst:9 ../../common_information/points.rst:17 -#: ../../common_information/points.rst:25 -#: ../../common_information/points.rst:37 -#: ../../common_information/points.rst:45 -#: ../../common_information/points.rst:53 -#: ../../common_information/points.rst:61 -#: ../../common_information/points.rst:70 -msgid "Solution" -msgstr "" - -#: ../../common_information/points.rst:11 -#: ../../common_information/points.rst:27 -#: ../../common_information/points.rst:72 -#: ../../common_information/points.rst:114 -msgid "10" -msgstr "" - -#: ../../common_information/points.rst:14 -msgid "2nd MPI task" -msgstr "" - -#: ../../common_information/points.rst:17 -#: ../../common_information/points.rst:25 -#: ../../common_information/points.rst:45 -#: ../../common_information/points.rst:53 -#: ../../common_information/points.rst:61 -#: ../../common_information/points.rst:70 -msgid "Performance" -msgstr "" - -#: ../../common_information/points.rst:19 -msgid "15" -msgstr "" - -#: ../../common_information/points.rst:19 -#: ../../common_information/points.rst:114 -#: ../../common_information/points.rst:124 -#: ../../common_information/points.rst:136 -msgid "5" -msgstr "" - -#: ../../common_information/points.rst:22 -msgid "3rd MPI task" -msgstr "" - -#: ../../common_information/points.rst:27 -msgid "20" -msgstr "" - -#: ../../common_information/points.rst:30 -msgid "**Total : 60 points**" -msgstr "" - -#: ../../common_information/points.rst:32 -msgid "For “thread parallelism” semester" -msgstr "" - -#: ../../common_information/points.rst:34 -msgid "Sequential version" -msgstr "" - -#: ../../common_information/points.rst:39 -#: ../../common_information/points.rst:125 -#: ../../common_information/points.rst:138 -msgid "4" -msgstr "" - -#: ../../common_information/points.rst:42 -msgid "OpenMP version" -msgstr "" - -#: ../../common_information/points.rst:47 -#: ../../common_information/points.rst:55 -#: ../../common_information/points.rst:63 -msgid "6" -msgstr "" - -#: ../../common_information/points.rst:47 -#: ../../common_information/points.rst:55 -#: ../../common_information/points.rst:126 -#: ../../common_information/points.rst:139 -msgid "3" -msgstr "" - -#: ../../common_information/points.rst:50 -msgid "TBB version" -msgstr "" - -#: ../../common_information/points.rst:58 -msgid "std::thread version" -msgstr "" - -#: ../../common_information/points.rst:63 -#: ../../common_information/points.rst:72 -msgid "8" -msgstr "" - -#: ../../common_information/points.rst:66 -msgid "“MPI + threads” version (The threading technology is chosen randomly)" -msgstr "" - -#: ../../common_information/points.rst:75 -msgid "**Total : 54 points**" -msgstr "" - -#: ../../common_information/points.rst:77 -msgid "The rule for earning performance points." -msgstr "" - -#: ../../common_information/points.rst:79 -msgid "" -"The ratio of efficiency percentage to points percentage, where the " -"maximum points is 100% and the minimum points is 0%." -msgstr "" - -#: ../../common_information/points.rst:81 -msgid "If the score is not an integer, it is rounded up." -msgstr "" - -#: ../../common_information/points.rst:84 -msgid "Efficiency (%)" -msgstr "" - -#: ../../common_information/points.rst:84 -msgid "Points percentage" -msgstr "" - -#: ../../common_information/points.rst:86 -msgid ">= 50%" -msgstr "" - -#: ../../common_information/points.rst:86 -msgid "100%" -msgstr "" - -#: ../../common_information/points.rst:88 -msgid "[45, 50)" -msgstr "" - -#: ../../common_information/points.rst:88 -msgid "90%" -msgstr "" - -#: ../../common_information/points.rst:90 -msgid "[42, 45)" -msgstr "" - -#: ../../common_information/points.rst:90 -msgid "80%" -msgstr "" - -#: ../../common_information/points.rst:92 -msgid "[40, 42)" -msgstr "" - -#: ../../common_information/points.rst:92 -msgid "70%" -msgstr "" - -#: ../../common_information/points.rst:94 -msgid "[37, 40)" -msgstr "" - -#: ../../common_information/points.rst:94 -msgid "60%" -msgstr "" - -#: ../../common_information/points.rst:96 -msgid "[35, 37)" -msgstr "" - -#: ../../common_information/points.rst:96 -msgid "50%" -msgstr "" - -#: ../../common_information/points.rst:98 -msgid "[32, 35)" -msgstr "" - -#: ../../common_information/points.rst:98 -msgid "40%" -msgstr "" - -#: ../../common_information/points.rst:100 -msgid "[30, 32)" -msgstr "" - -#: ../../common_information/points.rst:100 -msgid "30%" -msgstr "" - -#: ../../common_information/points.rst:102 -msgid "[27, 30)" -msgstr "" - -#: ../../common_information/points.rst:102 -msgid "20%" -msgstr "" - -#: ../../common_information/points.rst:104 -msgid "[25, 27)" -msgstr "" - -#: ../../common_information/points.rst:104 -msgid "10%" -msgstr "" - -#: ../../common_information/points.rst:106 -msgid "< 25%" -msgstr "" - -#: ../../common_information/points.rst:106 -msgid "0%" -msgstr "" - -#: ../../common_information/points.rst:109 -msgid "Report" -msgstr "" - -#: ../../common_information/points.rst:112 -msgid "Completeness" -msgstr "" - -#: ../../common_information/points.rst:112 -msgid "Text Quality" -msgstr "" - -#: ../../common_information/points.rst:112 -msgid "Formatting Quality" -msgstr "" - -#: ../../common_information/points.rst:112 -msgid "Total" -msgstr "" - -#: ../../common_information/points.rst:114 -msgid "2.5" -msgstr "" - -#: ../../common_information/points.rst:117 -msgid "Conversion of points into exam assessment or pass/fail" -msgstr "" - -#: ../../common_information/points.rst:119 -msgid "For 5-point grading system" -msgstr "" - -#: ../../common_information/points.rst:122 -#: ../../common_information/points.rst:133 -msgid "Points range" -msgstr "" - -#: ../../common_information/points.rst:122 -#: ../../common_information/points.rst:133 -msgid "Exam Assessment" -msgstr "" - -#: ../../common_information/points.rst:122 -#: ../../common_information/points.rst:133 -msgid "Student Pass" -msgstr "" - -#: ../../common_information/points.rst:124 -msgid "[87, 100]" -msgstr "" - -#: ../../common_information/points.rst:124 -#: ../../common_information/points.rst:125 -#: ../../common_information/points.rst:126 -#: ../../common_information/points.rst:135 -#: ../../common_information/points.rst:136 -#: ../../common_information/points.rst:137 -#: ../../common_information/points.rst:138 -#: ../../common_information/points.rst:139 -msgid "Passed" -msgstr "" - -#: ../../common_information/points.rst:125 -msgid "[70, 87)" -msgstr "" - -#: ../../common_information/points.rst:126 -#: ../../common_information/points.rst:139 -msgid "[50, 70)" -msgstr "" - -#: ../../common_information/points.rst:127 -#: ../../common_information/points.rst:140 -msgid "< 50" -msgstr "" - -#: ../../common_information/points.rst:127 -#: ../../common_information/points.rst:140 -msgid "2" -msgstr "" - -#: ../../common_information/points.rst:127 -#: ../../common_information/points.rst:140 -msgid "Not Passed" -msgstr "" - -#: ../../common_information/points.rst:130 -msgid "For 7-point grading system **(our current system)**" -msgstr "" - -#: ../../common_information/points.rst:135 -msgid "[99, 100]" -msgstr "" - -#: ../../common_information/points.rst:135 -msgid "5.5" -msgstr "" - -#: ../../common_information/points.rst:136 -msgid "[92, 99)" -msgstr "" - -#: ../../common_information/points.rst:137 -msgid "[82, 92)" -msgstr "" - -#: ../../common_information/points.rst:137 -msgid "4.5" -msgstr "" - -#: ../../common_information/points.rst:138 -msgid "[70, 82)" -msgstr "" - -#: ../../common_information/points.rst:143 -msgid "Penalties:" -msgstr "" - -#: ../../common_information/points.rst:145 -msgid "A deadline will be set for each version." -msgstr "" - -#: ../../common_information/points.rst:146 -msgid "" -"1 point is deducted from the version’s score for each day of delay in " -"submission." -msgstr "" - -#: ../../common_information/points.rst:147 -msgid "" -"The task is considered submitted when it is merged into the master/main " -"branch." -msgstr "" - -#: ../../common_information/points.rst:148 -msgid "" -"The submission time is defined as the timestamp of the last commit that " -"successfully passes the CI pipeline." -msgstr "" - -#: ../../common_information/points.rst:150 -msgid "Comments:" -msgstr "" - -#: ../../common_information/points.rst:152 -msgid "It is forbidden to write the report if all tasks are not completed." -msgstr "" - -#: ../../common_information/points.rst:153 -msgid "" -"Please keep in mind that one week before the end of the semester, the " -"repository will be closed for final assessment." -msgstr "" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-09 00:58+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/points.rst:2 +msgid "Points" +msgstr "" + +#: ../../common_information/points.rst:4 +msgid "For “process parallelism” semester" +msgstr "" + +#: ../../common_information/points.rst:6 +msgid "1st MPI task" +msgstr "" + +#: ../../common_information/points.rst:9 ../../common_information/points.rst:17 +#: ../../common_information/points.rst:25 +#: ../../common_information/points.rst:37 +#: ../../common_information/points.rst:45 +#: ../../common_information/points.rst:53 +#: ../../common_information/points.rst:61 +#: ../../common_information/points.rst:70 +msgid "Solution" +msgstr "" + +#: ../../common_information/points.rst:11 +#: ../../common_information/points.rst:27 +#: ../../common_information/points.rst:72 +#: ../../common_information/points.rst:114 +msgid "10" +msgstr "" + +#: ../../common_information/points.rst:14 +msgid "2nd MPI task" +msgstr "" + +#: ../../common_information/points.rst:17 +#: ../../common_information/points.rst:25 +#: ../../common_information/points.rst:45 +#: ../../common_information/points.rst:53 +#: ../../common_information/points.rst:61 +#: ../../common_information/points.rst:70 +msgid "Performance" +msgstr "" + +#: ../../common_information/points.rst:19 +msgid "15" +msgstr "" + +#: ../../common_information/points.rst:19 +#: ../../common_information/points.rst:114 +#: ../../common_information/points.rst:124 +#: ../../common_information/points.rst:136 +msgid "5" +msgstr "" + +#: ../../common_information/points.rst:22 +msgid "3rd MPI task" +msgstr "" + +#: ../../common_information/points.rst:27 +msgid "20" +msgstr "" + +#: ../../common_information/points.rst:30 +msgid "**Total : 60 points**" +msgstr "" + +#: ../../common_information/points.rst:32 +msgid "For “thread parallelism” semester" +msgstr "" + +#: ../../common_information/points.rst:34 +msgid "Sequential version" +msgstr "" + +#: ../../common_information/points.rst:39 +#: ../../common_information/points.rst:125 +#: ../../common_information/points.rst:138 +msgid "4" +msgstr "" + +#: ../../common_information/points.rst:42 +msgid "OpenMP version" +msgstr "" + +#: ../../common_information/points.rst:47 +#: ../../common_information/points.rst:55 +#: ../../common_information/points.rst:63 +msgid "6" +msgstr "" + +#: ../../common_information/points.rst:47 +#: ../../common_information/points.rst:55 +#: ../../common_information/points.rst:126 +#: ../../common_information/points.rst:139 +msgid "3" +msgstr "" + +#: ../../common_information/points.rst:50 +msgid "TBB version" +msgstr "" + +#: ../../common_information/points.rst:58 +msgid "std::thread version" +msgstr "" + +#: ../../common_information/points.rst:63 +#: ../../common_information/points.rst:72 +msgid "8" +msgstr "" + +#: ../../common_information/points.rst:66 +msgid "“MPI + threads” version (The threading technology is chosen randomly)" +msgstr "" + +#: ../../common_information/points.rst:75 +msgid "**Total : 54 points**" +msgstr "" + +#: ../../common_information/points.rst:77 +msgid "The rule for earning performance points." +msgstr "" + +#: ../../common_information/points.rst:79 +msgid "" +"The ratio of efficiency percentage to points percentage, where the " +"maximum points is 100% and the minimum points is 0%." +msgstr "" + +#: ../../common_information/points.rst:81 +msgid "If the score is not an integer, it is rounded up." +msgstr "" + +#: ../../common_information/points.rst:84 +msgid "Efficiency (%)" +msgstr "" + +#: ../../common_information/points.rst:84 +msgid "Points percentage" +msgstr "" + +#: ../../common_information/points.rst:86 +msgid ">= 50%" +msgstr "" + +#: ../../common_information/points.rst:86 +msgid "100%" +msgstr "" + +#: ../../common_information/points.rst:88 +msgid "[45, 50)" +msgstr "" + +#: ../../common_information/points.rst:88 +msgid "90%" +msgstr "" + +#: ../../common_information/points.rst:90 +msgid "[42, 45)" +msgstr "" + +#: ../../common_information/points.rst:90 +msgid "80%" +msgstr "" + +#: ../../common_information/points.rst:92 +msgid "[40, 42)" +msgstr "" + +#: ../../common_information/points.rst:92 +msgid "70%" +msgstr "" + +#: ../../common_information/points.rst:94 +msgid "[37, 40)" +msgstr "" + +#: ../../common_information/points.rst:94 +msgid "60%" +msgstr "" + +#: ../../common_information/points.rst:96 +msgid "[35, 37)" +msgstr "" + +#: ../../common_information/points.rst:96 +msgid "50%" +msgstr "" + +#: ../../common_information/points.rst:98 +msgid "[32, 35)" +msgstr "" + +#: ../../common_information/points.rst:98 +msgid "40%" +msgstr "" + +#: ../../common_information/points.rst:100 +msgid "[30, 32)" +msgstr "" + +#: ../../common_information/points.rst:100 +msgid "30%" +msgstr "" + +#: ../../common_information/points.rst:102 +msgid "[27, 30)" +msgstr "" + +#: ../../common_information/points.rst:102 +msgid "20%" +msgstr "" + +#: ../../common_information/points.rst:104 +msgid "[25, 27)" +msgstr "" + +#: ../../common_information/points.rst:104 +msgid "10%" +msgstr "" + +#: ../../common_information/points.rst:106 +msgid "< 25%" +msgstr "" + +#: ../../common_information/points.rst:106 +msgid "0%" +msgstr "" + +#: ../../common_information/points.rst:109 +msgid "Report" +msgstr "" + +#: ../../common_information/points.rst:112 +msgid "Completeness" +msgstr "" + +#: ../../common_information/points.rst:112 +msgid "Text Quality" +msgstr "" + +#: ../../common_information/points.rst:112 +msgid "Formatting Quality" +msgstr "" + +#: ../../common_information/points.rst:112 +msgid "Total" +msgstr "" + +#: ../../common_information/points.rst:114 +msgid "2.5" +msgstr "" + +#: ../../common_information/points.rst:117 +msgid "Conversion of points into exam assessment or pass/fail" +msgstr "" + +#: ../../common_information/points.rst:119 +msgid "For 5-point grading system" +msgstr "" + +#: ../../common_information/points.rst:122 +#: ../../common_information/points.rst:133 +msgid "Points range" +msgstr "" + +#: ../../common_information/points.rst:122 +#: ../../common_information/points.rst:133 +msgid "Exam Assessment" +msgstr "" + +#: ../../common_information/points.rst:122 +#: ../../common_information/points.rst:133 +msgid "Student Pass" +msgstr "" + +#: ../../common_information/points.rst:124 +msgid "[87, 100]" +msgstr "" + +#: ../../common_information/points.rst:124 +#: ../../common_information/points.rst:125 +#: ../../common_information/points.rst:126 +#: ../../common_information/points.rst:135 +#: ../../common_information/points.rst:136 +#: ../../common_information/points.rst:137 +#: ../../common_information/points.rst:138 +#: ../../common_information/points.rst:139 +msgid "Passed" +msgstr "" + +#: ../../common_information/points.rst:125 +msgid "[70, 87)" +msgstr "" + +#: ../../common_information/points.rst:126 +#: ../../common_information/points.rst:139 +msgid "[50, 70)" +msgstr "" + +#: ../../common_information/points.rst:127 +#: ../../common_information/points.rst:140 +msgid "< 50" +msgstr "" + +#: ../../common_information/points.rst:127 +#: ../../common_information/points.rst:140 +msgid "2" +msgstr "" + +#: ../../common_information/points.rst:127 +#: ../../common_information/points.rst:140 +msgid "Not Passed" +msgstr "" + +#: ../../common_information/points.rst:130 +msgid "For 7-point grading system **(our current system)**" +msgstr "" + +#: ../../common_information/points.rst:135 +msgid "[99, 100]" +msgstr "" + +#: ../../common_information/points.rst:135 +msgid "5.5" +msgstr "" + +#: ../../common_information/points.rst:136 +msgid "[92, 99)" +msgstr "" + +#: ../../common_information/points.rst:137 +msgid "[82, 92)" +msgstr "" + +#: ../../common_information/points.rst:137 +msgid "4.5" +msgstr "" + +#: ../../common_information/points.rst:138 +msgid "[70, 82)" +msgstr "" + +#: ../../common_information/points.rst:143 +msgid "Penalties:" +msgstr "" + +#: ../../common_information/points.rst:145 +msgid "A deadline will be set for each version." +msgstr "" + +#: ../../common_information/points.rst:146 +msgid "" +"1 point is deducted from the version’s score for each day of delay in " +"submission." +msgstr "" + +#: ../../common_information/points.rst:147 +msgid "" +"The task is considered submitted when it is merged into the master/main " +"branch." +msgstr "" + +#: ../../common_information/points.rst:148 +msgid "" +"The submission time is defined as the timestamp of the last commit that " +"successfully passes the CI pipeline." +msgstr "" + +#: ../../common_information/points.rst:150 +msgid "Comments:" +msgstr "" + +#: ../../common_information/points.rst:152 +msgid "It is forbidden to write the report if all tasks are not completed." +msgstr "" + +#: ../../common_information/points.rst:153 +msgid "" +"Please keep in mind that one week before the end of the semester, the " +"repository will be closed for final assessment." +msgstr "" diff --git a/docs/locale/en/LC_MESSAGES/common_information/processes_tasks.po b/docs/locale/en/LC_MESSAGES/common_information/processes_tasks.po index ff73c90f45..194cdaf9d9 100644 --- a/docs/locale/en/LC_MESSAGES/common_information/processes_tasks.po +++ b/docs/locale/en/LC_MESSAGES/common_information/processes_tasks.po @@ -1,721 +1,721 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-05 13:29+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/processes_tasks.rst:2 -msgid "Processes parallelism tasks" -msgstr "" - -#: ../../common_information/processes_tasks.rst:5 -msgid "First task" -msgstr "" - -#: ../../common_information/processes_tasks.rst:8 -#: ../../common_information/processes_tasks.rst:69 -#: ../../common_information/processes_tasks.rst:122 -msgid "Variant Number" -msgstr "" - -#: ../../common_information/processes_tasks.rst:8 -#: ../../common_information/processes_tasks.rst:69 -#: ../../common_information/processes_tasks.rst:122 -msgid "Task" -msgstr "" - -#: ../../common_information/processes_tasks.rst:10 -#: ../../common_information/processes_tasks.rst:71 -#: ../../common_information/processes_tasks.rst:124 -msgid "1" -msgstr "" - -#: ../../common_information/processes_tasks.rst:10 -msgid "Sum of vector elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:12 -#: ../../common_information/processes_tasks.rst:73 -#: ../../common_information/processes_tasks.rst:126 -msgid "2" -msgstr "" - -#: ../../common_information/processes_tasks.rst:12 -msgid "Calculating the average value of vector elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:14 -#: ../../common_information/processes_tasks.rst:75 -#: ../../common_information/processes_tasks.rst:128 -msgid "3" -msgstr "" - -#: ../../common_information/processes_tasks.rst:14 -msgid "Maximum value of vector elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:16 -#: ../../common_information/processes_tasks.rst:77 -#: ../../common_information/processes_tasks.rst:130 -msgid "4" -msgstr "" - -#: ../../common_information/processes_tasks.rst:16 -msgid "Minimum value of vector elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:18 -#: ../../common_information/processes_tasks.rst:79 -#: ../../common_information/processes_tasks.rst:132 -msgid "5" -msgstr "" - -#: ../../common_information/processes_tasks.rst:18 -msgid "" -"Finding the number of sign alternations between adjacent elements of the " -"vector" -msgstr "" - -#: ../../common_information/processes_tasks.rst:20 -#: ../../common_information/processes_tasks.rst:81 -#: ../../common_information/processes_tasks.rst:134 -#: ../../common_information/processes_tasks.rst:258 -msgid "6" -msgstr "" - -#: ../../common_information/processes_tasks.rst:20 -msgid "" -"Finding the number of order violations between adjacent elements of the " -"vector" -msgstr "" - -#: ../../common_information/processes_tasks.rst:22 -#: ../../common_information/processes_tasks.rst:83 -#: ../../common_information/processes_tasks.rst:136 -msgid "7" -msgstr "" - -#: ../../common_information/processes_tasks.rst:22 -msgid "Finding the most similar adjacent elements of the vector" -msgstr "" - -#: ../../common_information/processes_tasks.rst:24 -#: ../../common_information/processes_tasks.rst:85 -#: ../../common_information/processes_tasks.rst:138 -msgid "8" -msgstr "" - -#: ../../common_information/processes_tasks.rst:24 -msgid "Finding the most different adjacent elements of the vector" -msgstr "" - -#: ../../common_information/processes_tasks.rst:26 -#: ../../common_information/processes_tasks.rst:87 -#: ../../common_information/processes_tasks.rst:140 -msgid "9" -msgstr "" - -#: ../../common_information/processes_tasks.rst:26 -msgid "Scalar product of vectors" -msgstr "" - -#: ../../common_information/processes_tasks.rst:28 -#: ../../common_information/processes_tasks.rst:89 -#: ../../common_information/processes_tasks.rst:142 -msgid "10" -msgstr "" - -#: ../../common_information/processes_tasks.rst:28 -msgid "Sum of matrix elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:30 -#: ../../common_information/processes_tasks.rst:91 -#: ../../common_information/processes_tasks.rst:144 -msgid "11" -msgstr "" - -#: ../../common_information/processes_tasks.rst:30 -msgid "Sum of values by rows in the matrix" -msgstr "" - -#: ../../common_information/processes_tasks.rst:32 -#: ../../common_information/processes_tasks.rst:93 -#: ../../common_information/processes_tasks.rst:147 -msgid "12" -msgstr "" - -#: ../../common_information/processes_tasks.rst:32 -msgid "Sum of values by columns in the matrix" -msgstr "" - -#: ../../common_information/processes_tasks.rst:34 -#: ../../common_information/processes_tasks.rst:95 -#: ../../common_information/processes_tasks.rst:150 -msgid "13" -msgstr "" - -#: ../../common_information/processes_tasks.rst:34 -msgid "Maximum value of matrix elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:36 -#: ../../common_information/processes_tasks.rst:97 -#: ../../common_information/processes_tasks.rst:153 -msgid "14" -msgstr "" - -#: ../../common_information/processes_tasks.rst:36 -msgid "Minimum value of matrix elements" -msgstr "" - -#: ../../common_information/processes_tasks.rst:38 -#: ../../common_information/processes_tasks.rst:99 -#: ../../common_information/processes_tasks.rst:155 -msgid "15" -msgstr "" - -#: ../../common_information/processes_tasks.rst:38 -msgid "Finding maximum values by rows in the matrix" -msgstr "" - -#: ../../common_information/processes_tasks.rst:40 -#: ../../common_information/processes_tasks.rst:101 -#: ../../common_information/processes_tasks.rst:157 -msgid "16" -msgstr "" - -#: ../../common_information/processes_tasks.rst:40 -msgid "Finding maximum values by columns in the matrix" -msgstr "" - -#: ../../common_information/processes_tasks.rst:42 -#: ../../common_information/processes_tasks.rst:103 -#: ../../common_information/processes_tasks.rst:159 -msgid "17" -msgstr "" - -#: ../../common_information/processes_tasks.rst:42 -msgid "Finding minimum values by rows in the matrix" -msgstr "" - -#: ../../common_information/processes_tasks.rst:44 -#: ../../common_information/processes_tasks.rst:105 -#: ../../common_information/processes_tasks.rst:161 -msgid "18" -msgstr "" - -#: ../../common_information/processes_tasks.rst:44 -msgid "Finding minimum values by columns in the matrix" -msgstr "" - -#: ../../common_information/processes_tasks.rst:46 -#: ../../common_information/processes_tasks.rst:107 -#: ../../common_information/processes_tasks.rst:163 -msgid "19" -msgstr "" - -#: ../../common_information/processes_tasks.rst:46 -msgid "Integration – rectangle method" -msgstr "" - -#: ../../common_information/processes_tasks.rst:48 -#: ../../common_information/processes_tasks.rst:109 -#: ../../common_information/processes_tasks.rst:165 -msgid "20" -msgstr "" - -#: ../../common_information/processes_tasks.rst:48 -msgid "Integration – trapezoidal method" -msgstr "" - -#: ../../common_information/processes_tasks.rst:50 -#: ../../common_information/processes_tasks.rst:111 -#: ../../common_information/processes_tasks.rst:167 -#: ../../common_information/processes_tasks.rst:268 -msgid "21" -msgstr "" - -#: ../../common_information/processes_tasks.rst:50 -msgid "Integration – Monte Carlo method" -msgstr "" - -#: ../../common_information/processes_tasks.rst:52 -#: ../../common_information/processes_tasks.rst:113 -#: ../../common_information/processes_tasks.rst:169 -msgid "22" -msgstr "" - -#: ../../common_information/processes_tasks.rst:52 -msgid "Counting the number of alphabetical characters in a string" -msgstr "" - -#: ../../common_information/processes_tasks.rst:54 -#: ../../common_information/processes_tasks.rst:115 -#: ../../common_information/processes_tasks.rst:171 -msgid "23" -msgstr "" - -#: ../../common_information/processes_tasks.rst:54 -msgid "Counting the frequency of a character in a string" -msgstr "" - -#: ../../common_information/processes_tasks.rst:56 -#: ../../common_information/processes_tasks.rst:173 -msgid "24" -msgstr "" - -#: ../../common_information/processes_tasks.rst:56 -msgid "Counting the number of words in a string" -msgstr "" - -#: ../../common_information/processes_tasks.rst:58 -#: ../../common_information/processes_tasks.rst:175 -msgid "25" -msgstr "" - -#: ../../common_information/processes_tasks.rst:58 -msgid "Counting the number of sentences in a string" -msgstr "" - -#: ../../common_information/processes_tasks.rst:60 -#: ../../common_information/processes_tasks.rst:177 -msgid "26" -msgstr "" - -#: ../../common_information/processes_tasks.rst:60 -msgid "Checking lexicographical order of two strings" -msgstr "" - -#: ../../common_information/processes_tasks.rst:62 -#: ../../common_information/processes_tasks.rst:179 -msgid "27" -msgstr "" - -#: ../../common_information/processes_tasks.rst:62 -msgid "Counting the number of differing characters between two strings" -msgstr "" - -#: ../../common_information/processes_tasks.rst:66 -msgid "Second task" -msgstr "" - -#: ../../common_information/processes_tasks.rst:71 -msgid "Broadcast (one to all transfer)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:73 -msgid "Reduce (all to one transfer)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:75 -msgid "Allreduce (all to one and broadcast)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:77 -msgid "Scatter (one to all transfer)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:79 -msgid "Gather (all to one transfer)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:81 -msgid "Line" -msgstr "" - -#: ../../common_information/processes_tasks.rst:83 -msgid "Ring" -msgstr "" - -#: ../../common_information/processes_tasks.rst:85 -msgid "Star" -msgstr "" - -#: ../../common_information/processes_tasks.rst:87 -msgid "Torus Grid" -msgstr "" - -#: ../../common_information/processes_tasks.rst:89 -msgid "Hypercube" -msgstr "" - -#: ../../common_information/processes_tasks.rst:91 -msgid "Horizontal strip scheme - matrix-vector multiplication" -msgstr "" - -#: ../../common_information/processes_tasks.rst:93 -msgid "Vertical strip scheme - matrix-vector multiplication" -msgstr "" - -#: ../../common_information/processes_tasks.rst:95 -msgid "" -"Horizontal strip scheme – partitioning only matrix A - matrix-matrix " -"multiplication" -msgstr "" - -#: ../../common_information/processes_tasks.rst:97 -msgid "" -"Horizontal strip scheme A, vertical strip scheme B - matrix-matrix " -"multiplication" -msgstr "" - -#: ../../common_information/processes_tasks.rst:99 -msgid "Gaussian method – horizontal strip scheme" -msgstr "" - -#: ../../common_information/processes_tasks.rst:101 -msgid "Gaussian method – vertical strip scheme" -msgstr "" - -#: ../../common_information/processes_tasks.rst:103 -msgid "Gauss-Jordan method" -msgstr "" - -#: ../../common_information/processes_tasks.rst:105 -msgid "Iterative methods (Jacobi)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:107 -msgid "Iterative methods (Gauss-Seidel)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:109 -msgid "Iterative methods (Simple)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:111 -msgid "Bubble sort (odd-even transposition algorithm)" -msgstr "" - -#: ../../common_information/processes_tasks.rst:113 -msgid "Image smoothing" -msgstr "" - -#: ../../common_information/processes_tasks.rst:115 -msgid "Contrast enhancement" -msgstr "" - -#: ../../common_information/processes_tasks.rst:119 -msgid "Third task" -msgstr "" - -#: ../../common_information/processes_tasks.rst:124 -msgid "" -"Dense matrix multiplication. Elements of data type double. Block scheme, " -"Cannon's algorithm." -msgstr "" - -#: ../../common_information/processes_tasks.rst:126 -msgid "" -"Dense matrix multiplication. Elements of data type double. Block scheme, " -"Fox's algorithm." -msgstr "" - -#: ../../common_information/processes_tasks.rst:128 -msgid "" -"Dense matrix multiplication. Elements of data type double. Strassen's " -"algorithm." -msgstr "" - -#: ../../common_information/processes_tasks.rst:130 -msgid "" -"Sparse matrix multiplication. Elements of data type double. Matrix " -"storage format – row format (CRS)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:132 -msgid "" -"Sparse matrix multiplication. Elements of data type double. Matrix " -"storage format – column format (CCS)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:134 -msgid "Solving systems of linear equations using the conjugate gradient method." -msgstr "" - -#: ../../common_information/processes_tasks.rst:136 -msgid "" -"Computing multidimensional integrals using a multistep scheme (rectangle " -"method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:138 -msgid "" -"Computing multidimensional integrals using a multistep scheme " -"(trapezoidal method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:140 -msgid "" -"Computing multidimensional integrals using a multistep scheme (Simpson's " -"method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:142 -msgid "Computing multidimensional integrals using the Monte Carlo method." -msgstr "" - -#: ../../common_information/processes_tasks.rst:144 -msgid "" -"Global search algorithm (Strongin's) for one-dimensional optimization " -"problems. Parallelization by characteristics." -msgstr "" - -#: ../../common_information/processes_tasks.rst:147 -msgid "" -"Multistep scheme for solving two-dimensional global optimization " -"problems. Parallelization by dividing the search area." -msgstr "" - -#: ../../common_information/processes_tasks.rst:150 -msgid "" -"Multistep scheme for solving two-dimensional global optimization " -"problems. Parallelization by characteristics." -msgstr "" - -#: ../../common_information/processes_tasks.rst:153 -msgid "Quick sort with simple merging." -msgstr "" - -#: ../../common_information/processes_tasks.rst:155 -msgid "Quick sort with odd-even merging (Batcher's method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:157 -msgid "Shell sort with simple merging." -msgstr "" - -#: ../../common_information/processes_tasks.rst:159 -msgid "Shell sort with odd-even merging (Batcher's method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:161 -msgid "Radix sort for integers with simple merging." -msgstr "" - -#: ../../common_information/processes_tasks.rst:163 -msgid "Radix sort for integers with odd-even merging (Batcher's method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:165 -msgid "Radix sort for floating-point numbers (type double) with simple merging." -msgstr "" - -#: ../../common_information/processes_tasks.rst:167 -msgid "" -"Radix sort for floating-point numbers (type double) with odd-even merging" -" (Batcher's method)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:169 -msgid "" -"Shortest path search from one vertex (Dijkstra's algorithm). With CRS " -"graphs." -msgstr "" - -#: ../../common_information/processes_tasks.rst:171 -msgid "" -"Shortest path search from one vertex (Bellman-Ford algorithm). With CRS " -"graphs." -msgstr "" - -#: ../../common_information/processes_tasks.rst:173 -msgid "Convex hull construction – Graham's scan." -msgstr "" - -#: ../../common_information/processes_tasks.rst:175 -msgid "Convex hull construction – Jarvis's march." -msgstr "" - -#: ../../common_information/processes_tasks.rst:177 -msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." -msgstr "" - -#: ../../common_information/processes_tasks.rst:179 -msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." -msgstr "" - -#: ../../common_information/processes_tasks.rst:181 -msgid "28" -msgstr "" - -#: ../../common_information/processes_tasks.rst:181 -msgid "Linear image filtering (block partition). Gaussian kernel 3x3." -msgstr "" - -#: ../../common_information/processes_tasks.rst:183 -msgid "29" -msgstr "" - -#: ../../common_information/processes_tasks.rst:183 -msgid "Edge detection in an image using the Sobel operator." -msgstr "" - -#: ../../common_information/processes_tasks.rst:185 -msgid "30" -msgstr "" - -#: ../../common_information/processes_tasks.rst:185 -msgid "Contrast enhancement of grayscale image using linear histogram stretching." -msgstr "" - -#: ../../common_information/processes_tasks.rst:187 -msgid "31" -msgstr "" - -#: ../../common_information/processes_tasks.rst:187 -msgid "" -"Labeling components on a binary image (black areas correspond to objects," -" white to background)." -msgstr "" - -#: ../../common_information/processes_tasks.rst:189 -msgid "32" -msgstr "" - -#: ../../common_information/processes_tasks.rst:189 -msgid "Convex hull construction for components of a binary image." -msgstr "" - -#: ../../common_information/processes_tasks.rst:193 -msgid "Comments for tasks 2 and 3:" -msgstr "" - -#: ../../common_information/processes_tasks.rst:195 -msgid "" -"MESSAGE PASSING METHODS “You need to implement the specified methods " -"using only the Send and Recv functions. The implemented function should " -"have the same prototype as the corresponding MPI function. The test " -"program should allow selecting the root process number and perform array " -"transmission (broadcast, gather) for at least the following types: " -"MPI_INT, MPI_FLOAT, MPI_DOUBLE. In all operations, the transmission " -"should be carried out using the ‘tree’ of processes.”" -msgstr "" - -#: ../../common_information/processes_tasks.rst:203 -#: ../../common_information/processes_tasks.rst:217 -#: ../../common_information/processes_tasks.rst:230 -#: ../../common_information/processes_tasks.rst:243 -#: ../../common_information/processes_tasks.rst:253 -#: ../../common_information/processes_tasks.rst:263 -#: ../../common_information/processes_tasks.rst:273 -msgid "Comments relevant for:" -msgstr "" - -#: ../../common_information/processes_tasks.rst:206 -msgid "Variants for task 2" -msgstr "" - -#: ../../common_information/processes_tasks.rst:206 -msgid "Variants for task 3" -msgstr "" - -#: ../../common_information/processes_tasks.rst:208 -#: ../../common_information/processes_tasks.rst:235 -msgid "1 - 5" -msgstr "" - -#: ../../common_information/processes_tasks.rst:208 -#: ../../common_information/processes_tasks.rst:222 -#: ../../common_information/processes_tasks.rst:278 -msgid "x" -msgstr "" - -#: ../../common_information/processes_tasks.rst:211 -msgid "" -"DATA COMMUNICATION NETWORK TOPOLOGIES “You need to implement the virtual " -"topology specified in the task using MPI capabilities for working with " -"communicators and topologies, and ensure the ability to transfer data " -"from any selected process to any other process. (Do not use " -"MPI_Cart_Create and MPI_Graph_Create)”" -msgstr "" - -#: ../../common_information/processes_tasks.rst:220 -#: ../../common_information/processes_tasks.rst:233 -#: ../../common_information/processes_tasks.rst:246 -#: ../../common_information/processes_tasks.rst:256 -#: ../../common_information/processes_tasks.rst:266 -#: ../../common_information/processes_tasks.rst:276 -msgid "Varinats for task 2" -msgstr "" - -#: ../../common_information/processes_tasks.rst:220 -#: ../../common_information/processes_tasks.rst:233 -#: ../../common_information/processes_tasks.rst:246 -#: ../../common_information/processes_tasks.rst:256 -#: ../../common_information/processes_tasks.rst:266 -#: ../../common_information/processes_tasks.rst:276 -msgid "Varinats for task 3" -msgstr "" - -#: ../../common_information/processes_tasks.rst:222 -msgid "6 - 10" -msgstr "" - -#: ../../common_information/processes_tasks.rst:225 -msgid "" -"MATRIX COMPUTATIONS “In the horizontal scheme, the matrix is divided " -"among processes by rows. In the vertical scheme, it is divided by " -"columns, and in this case, the vector is also divided among processes.”" -msgstr "" - -#: ../../common_information/processes_tasks.rst:235 -msgid "11 - 14" -msgstr "" - -#: ../../common_information/processes_tasks.rst:238 -msgid "" -"COMPUTER GRAPHICS AND IMAGE PROCESSING “It is assumed that the image is " -"given in color or grayscale, with the input data being a one-dimensional " -"array. Loading a real image is not required, but is allowed.”" -msgstr "" - -#: ../../common_information/processes_tasks.rst:248 -msgid "26 - 27" -msgstr "" - -#: ../../common_information/processes_tasks.rst:248 -msgid "24 - 32" -msgstr "" - -#: ../../common_information/processes_tasks.rst:251 -msgid "SOLUTION OF A SYSTEM OF LINEAR ALGEBRAIC EQUATIONS" -msgstr "" - -#: ../../common_information/processes_tasks.rst:258 -msgid "15 - 20" -msgstr "" - -#: ../../common_information/processes_tasks.rst:261 -msgid "SORT ALGORITHMS" -msgstr "" - -#: ../../common_information/processes_tasks.rst:268 -msgid "14 - 21" -msgstr "" - -#: ../../common_information/processes_tasks.rst:271 -msgid "GRAPH PROCESSING ALGORITHMS" -msgstr "" - -#: ../../common_information/processes_tasks.rst:278 -msgid "22 - 23" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-05 13:29+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/processes_tasks.rst:2 +msgid "Processes parallelism tasks" +msgstr "" + +#: ../../common_information/processes_tasks.rst:5 +msgid "First task" +msgstr "" + +#: ../../common_information/processes_tasks.rst:8 +#: ../../common_information/processes_tasks.rst:69 +#: ../../common_information/processes_tasks.rst:122 +msgid "Variant Number" +msgstr "" + +#: ../../common_information/processes_tasks.rst:8 +#: ../../common_information/processes_tasks.rst:69 +#: ../../common_information/processes_tasks.rst:122 +msgid "Task" +msgstr "" + +#: ../../common_information/processes_tasks.rst:10 +#: ../../common_information/processes_tasks.rst:71 +#: ../../common_information/processes_tasks.rst:124 +msgid "1" +msgstr "" + +#: ../../common_information/processes_tasks.rst:10 +msgid "Sum of vector elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:12 +#: ../../common_information/processes_tasks.rst:73 +#: ../../common_information/processes_tasks.rst:126 +msgid "2" +msgstr "" + +#: ../../common_information/processes_tasks.rst:12 +msgid "Calculating the average value of vector elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:14 +#: ../../common_information/processes_tasks.rst:75 +#: ../../common_information/processes_tasks.rst:128 +msgid "3" +msgstr "" + +#: ../../common_information/processes_tasks.rst:14 +msgid "Maximum value of vector elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:16 +#: ../../common_information/processes_tasks.rst:77 +#: ../../common_information/processes_tasks.rst:130 +msgid "4" +msgstr "" + +#: ../../common_information/processes_tasks.rst:16 +msgid "Minimum value of vector elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:18 +#: ../../common_information/processes_tasks.rst:79 +#: ../../common_information/processes_tasks.rst:132 +msgid "5" +msgstr "" + +#: ../../common_information/processes_tasks.rst:18 +msgid "" +"Finding the number of sign alternations between adjacent elements of the " +"vector" +msgstr "" + +#: ../../common_information/processes_tasks.rst:20 +#: ../../common_information/processes_tasks.rst:81 +#: ../../common_information/processes_tasks.rst:134 +#: ../../common_information/processes_tasks.rst:258 +msgid "6" +msgstr "" + +#: ../../common_information/processes_tasks.rst:20 +msgid "" +"Finding the number of order violations between adjacent elements of the " +"vector" +msgstr "" + +#: ../../common_information/processes_tasks.rst:22 +#: ../../common_information/processes_tasks.rst:83 +#: ../../common_information/processes_tasks.rst:136 +msgid "7" +msgstr "" + +#: ../../common_information/processes_tasks.rst:22 +msgid "Finding the most similar adjacent elements of the vector" +msgstr "" + +#: ../../common_information/processes_tasks.rst:24 +#: ../../common_information/processes_tasks.rst:85 +#: ../../common_information/processes_tasks.rst:138 +msgid "8" +msgstr "" + +#: ../../common_information/processes_tasks.rst:24 +msgid "Finding the most different adjacent elements of the vector" +msgstr "" + +#: ../../common_information/processes_tasks.rst:26 +#: ../../common_information/processes_tasks.rst:87 +#: ../../common_information/processes_tasks.rst:140 +msgid "9" +msgstr "" + +#: ../../common_information/processes_tasks.rst:26 +msgid "Scalar product of vectors" +msgstr "" + +#: ../../common_information/processes_tasks.rst:28 +#: ../../common_information/processes_tasks.rst:89 +#: ../../common_information/processes_tasks.rst:142 +msgid "10" +msgstr "" + +#: ../../common_information/processes_tasks.rst:28 +msgid "Sum of matrix elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:30 +#: ../../common_information/processes_tasks.rst:91 +#: ../../common_information/processes_tasks.rst:144 +msgid "11" +msgstr "" + +#: ../../common_information/processes_tasks.rst:30 +msgid "Sum of values by rows in the matrix" +msgstr "" + +#: ../../common_information/processes_tasks.rst:32 +#: ../../common_information/processes_tasks.rst:93 +#: ../../common_information/processes_tasks.rst:147 +msgid "12" +msgstr "" + +#: ../../common_information/processes_tasks.rst:32 +msgid "Sum of values by columns in the matrix" +msgstr "" + +#: ../../common_information/processes_tasks.rst:34 +#: ../../common_information/processes_tasks.rst:95 +#: ../../common_information/processes_tasks.rst:150 +msgid "13" +msgstr "" + +#: ../../common_information/processes_tasks.rst:34 +msgid "Maximum value of matrix elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:36 +#: ../../common_information/processes_tasks.rst:97 +#: ../../common_information/processes_tasks.rst:153 +msgid "14" +msgstr "" + +#: ../../common_information/processes_tasks.rst:36 +msgid "Minimum value of matrix elements" +msgstr "" + +#: ../../common_information/processes_tasks.rst:38 +#: ../../common_information/processes_tasks.rst:99 +#: ../../common_information/processes_tasks.rst:155 +msgid "15" +msgstr "" + +#: ../../common_information/processes_tasks.rst:38 +msgid "Finding maximum values by rows in the matrix" +msgstr "" + +#: ../../common_information/processes_tasks.rst:40 +#: ../../common_information/processes_tasks.rst:101 +#: ../../common_information/processes_tasks.rst:157 +msgid "16" +msgstr "" + +#: ../../common_information/processes_tasks.rst:40 +msgid "Finding maximum values by columns in the matrix" +msgstr "" + +#: ../../common_information/processes_tasks.rst:42 +#: ../../common_information/processes_tasks.rst:103 +#: ../../common_information/processes_tasks.rst:159 +msgid "17" +msgstr "" + +#: ../../common_information/processes_tasks.rst:42 +msgid "Finding minimum values by rows in the matrix" +msgstr "" + +#: ../../common_information/processes_tasks.rst:44 +#: ../../common_information/processes_tasks.rst:105 +#: ../../common_information/processes_tasks.rst:161 +msgid "18" +msgstr "" + +#: ../../common_information/processes_tasks.rst:44 +msgid "Finding minimum values by columns in the matrix" +msgstr "" + +#: ../../common_information/processes_tasks.rst:46 +#: ../../common_information/processes_tasks.rst:107 +#: ../../common_information/processes_tasks.rst:163 +msgid "19" +msgstr "" + +#: ../../common_information/processes_tasks.rst:46 +msgid "Integration – rectangle method" +msgstr "" + +#: ../../common_information/processes_tasks.rst:48 +#: ../../common_information/processes_tasks.rst:109 +#: ../../common_information/processes_tasks.rst:165 +msgid "20" +msgstr "" + +#: ../../common_information/processes_tasks.rst:48 +msgid "Integration – trapezoidal method" +msgstr "" + +#: ../../common_information/processes_tasks.rst:50 +#: ../../common_information/processes_tasks.rst:111 +#: ../../common_information/processes_tasks.rst:167 +#: ../../common_information/processes_tasks.rst:268 +msgid "21" +msgstr "" + +#: ../../common_information/processes_tasks.rst:50 +msgid "Integration – Monte Carlo method" +msgstr "" + +#: ../../common_information/processes_tasks.rst:52 +#: ../../common_information/processes_tasks.rst:113 +#: ../../common_information/processes_tasks.rst:169 +msgid "22" +msgstr "" + +#: ../../common_information/processes_tasks.rst:52 +msgid "Counting the number of alphabetical characters in a string" +msgstr "" + +#: ../../common_information/processes_tasks.rst:54 +#: ../../common_information/processes_tasks.rst:115 +#: ../../common_information/processes_tasks.rst:171 +msgid "23" +msgstr "" + +#: ../../common_information/processes_tasks.rst:54 +msgid "Counting the frequency of a character in a string" +msgstr "" + +#: ../../common_information/processes_tasks.rst:56 +#: ../../common_information/processes_tasks.rst:173 +msgid "24" +msgstr "" + +#: ../../common_information/processes_tasks.rst:56 +msgid "Counting the number of words in a string" +msgstr "" + +#: ../../common_information/processes_tasks.rst:58 +#: ../../common_information/processes_tasks.rst:175 +msgid "25" +msgstr "" + +#: ../../common_information/processes_tasks.rst:58 +msgid "Counting the number of sentences in a string" +msgstr "" + +#: ../../common_information/processes_tasks.rst:60 +#: ../../common_information/processes_tasks.rst:177 +msgid "26" +msgstr "" + +#: ../../common_information/processes_tasks.rst:60 +msgid "Checking lexicographical order of two strings" +msgstr "" + +#: ../../common_information/processes_tasks.rst:62 +#: ../../common_information/processes_tasks.rst:179 +msgid "27" +msgstr "" + +#: ../../common_information/processes_tasks.rst:62 +msgid "Counting the number of differing characters between two strings" +msgstr "" + +#: ../../common_information/processes_tasks.rst:66 +msgid "Second task" +msgstr "" + +#: ../../common_information/processes_tasks.rst:71 +msgid "Broadcast (one to all transfer)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:73 +msgid "Reduce (all to one transfer)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:75 +msgid "Allreduce (all to one and broadcast)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:77 +msgid "Scatter (one to all transfer)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:79 +msgid "Gather (all to one transfer)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:81 +msgid "Line" +msgstr "" + +#: ../../common_information/processes_tasks.rst:83 +msgid "Ring" +msgstr "" + +#: ../../common_information/processes_tasks.rst:85 +msgid "Star" +msgstr "" + +#: ../../common_information/processes_tasks.rst:87 +msgid "Torus Grid" +msgstr "" + +#: ../../common_information/processes_tasks.rst:89 +msgid "Hypercube" +msgstr "" + +#: ../../common_information/processes_tasks.rst:91 +msgid "Horizontal strip scheme - matrix-vector multiplication" +msgstr "" + +#: ../../common_information/processes_tasks.rst:93 +msgid "Vertical strip scheme - matrix-vector multiplication" +msgstr "" + +#: ../../common_information/processes_tasks.rst:95 +msgid "" +"Horizontal strip scheme – partitioning only matrix A - matrix-matrix " +"multiplication" +msgstr "" + +#: ../../common_information/processes_tasks.rst:97 +msgid "" +"Horizontal strip scheme A, vertical strip scheme B - matrix-matrix " +"multiplication" +msgstr "" + +#: ../../common_information/processes_tasks.rst:99 +msgid "Gaussian method – horizontal strip scheme" +msgstr "" + +#: ../../common_information/processes_tasks.rst:101 +msgid "Gaussian method – vertical strip scheme" +msgstr "" + +#: ../../common_information/processes_tasks.rst:103 +msgid "Gauss-Jordan method" +msgstr "" + +#: ../../common_information/processes_tasks.rst:105 +msgid "Iterative methods (Jacobi)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:107 +msgid "Iterative methods (Gauss-Seidel)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:109 +msgid "Iterative methods (Simple)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:111 +msgid "Bubble sort (odd-even transposition algorithm)" +msgstr "" + +#: ../../common_information/processes_tasks.rst:113 +msgid "Image smoothing" +msgstr "" + +#: ../../common_information/processes_tasks.rst:115 +msgid "Contrast enhancement" +msgstr "" + +#: ../../common_information/processes_tasks.rst:119 +msgid "Third task" +msgstr "" + +#: ../../common_information/processes_tasks.rst:124 +msgid "" +"Dense matrix multiplication. Elements of data type double. Block scheme, " +"Cannon's algorithm." +msgstr "" + +#: ../../common_information/processes_tasks.rst:126 +msgid "" +"Dense matrix multiplication. Elements of data type double. Block scheme, " +"Fox's algorithm." +msgstr "" + +#: ../../common_information/processes_tasks.rst:128 +msgid "" +"Dense matrix multiplication. Elements of data type double. Strassen's " +"algorithm." +msgstr "" + +#: ../../common_information/processes_tasks.rst:130 +msgid "" +"Sparse matrix multiplication. Elements of data type double. Matrix " +"storage format – row format (CRS)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:132 +msgid "" +"Sparse matrix multiplication. Elements of data type double. Matrix " +"storage format – column format (CCS)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:134 +msgid "Solving systems of linear equations using the conjugate gradient method." +msgstr "" + +#: ../../common_information/processes_tasks.rst:136 +msgid "" +"Computing multidimensional integrals using a multistep scheme (rectangle " +"method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:138 +msgid "" +"Computing multidimensional integrals using a multistep scheme " +"(trapezoidal method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:140 +msgid "" +"Computing multidimensional integrals using a multistep scheme (Simpson's " +"method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:142 +msgid "Computing multidimensional integrals using the Monte Carlo method." +msgstr "" + +#: ../../common_information/processes_tasks.rst:144 +msgid "" +"Global search algorithm (Strongin's) for one-dimensional optimization " +"problems. Parallelization by characteristics." +msgstr "" + +#: ../../common_information/processes_tasks.rst:147 +msgid "" +"Multistep scheme for solving two-dimensional global optimization " +"problems. Parallelization by dividing the search area." +msgstr "" + +#: ../../common_information/processes_tasks.rst:150 +msgid "" +"Multistep scheme for solving two-dimensional global optimization " +"problems. Parallelization by characteristics." +msgstr "" + +#: ../../common_information/processes_tasks.rst:153 +msgid "Quick sort with simple merging." +msgstr "" + +#: ../../common_information/processes_tasks.rst:155 +msgid "Quick sort with odd-even merging (Batcher's method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:157 +msgid "Shell sort with simple merging." +msgstr "" + +#: ../../common_information/processes_tasks.rst:159 +msgid "Shell sort with odd-even merging (Batcher's method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:161 +msgid "Radix sort for integers with simple merging." +msgstr "" + +#: ../../common_information/processes_tasks.rst:163 +msgid "Radix sort for integers with odd-even merging (Batcher's method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:165 +msgid "Radix sort for floating-point numbers (type double) with simple merging." +msgstr "" + +#: ../../common_information/processes_tasks.rst:167 +msgid "" +"Radix sort for floating-point numbers (type double) with odd-even merging" +" (Batcher's method)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:169 +msgid "" +"Shortest path search from one vertex (Dijkstra's algorithm). With CRS " +"graphs." +msgstr "" + +#: ../../common_information/processes_tasks.rst:171 +msgid "" +"Shortest path search from one vertex (Bellman-Ford algorithm). With CRS " +"graphs." +msgstr "" + +#: ../../common_information/processes_tasks.rst:173 +msgid "Convex hull construction – Graham's scan." +msgstr "" + +#: ../../common_information/processes_tasks.rst:175 +msgid "Convex hull construction – Jarvis's march." +msgstr "" + +#: ../../common_information/processes_tasks.rst:177 +msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." +msgstr "" + +#: ../../common_information/processes_tasks.rst:179 +msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." +msgstr "" + +#: ../../common_information/processes_tasks.rst:181 +msgid "28" +msgstr "" + +#: ../../common_information/processes_tasks.rst:181 +msgid "Linear image filtering (block partition). Gaussian kernel 3x3." +msgstr "" + +#: ../../common_information/processes_tasks.rst:183 +msgid "29" +msgstr "" + +#: ../../common_information/processes_tasks.rst:183 +msgid "Edge detection in an image using the Sobel operator." +msgstr "" + +#: ../../common_information/processes_tasks.rst:185 +msgid "30" +msgstr "" + +#: ../../common_information/processes_tasks.rst:185 +msgid "Contrast enhancement of grayscale image using linear histogram stretching." +msgstr "" + +#: ../../common_information/processes_tasks.rst:187 +msgid "31" +msgstr "" + +#: ../../common_information/processes_tasks.rst:187 +msgid "" +"Labeling components on a binary image (black areas correspond to objects," +" white to background)." +msgstr "" + +#: ../../common_information/processes_tasks.rst:189 +msgid "32" +msgstr "" + +#: ../../common_information/processes_tasks.rst:189 +msgid "Convex hull construction for components of a binary image." +msgstr "" + +#: ../../common_information/processes_tasks.rst:193 +msgid "Comments for tasks 2 and 3:" +msgstr "" + +#: ../../common_information/processes_tasks.rst:195 +msgid "" +"MESSAGE PASSING METHODS “You need to implement the specified methods " +"using only the Send and Recv functions. The implemented function should " +"have the same prototype as the corresponding MPI function. The test " +"program should allow selecting the root process number and perform array " +"transmission (broadcast, gather) for at least the following types: " +"MPI_INT, MPI_FLOAT, MPI_DOUBLE. In all operations, the transmission " +"should be carried out using the ‘tree’ of processes.”" +msgstr "" + +#: ../../common_information/processes_tasks.rst:203 +#: ../../common_information/processes_tasks.rst:217 +#: ../../common_information/processes_tasks.rst:230 +#: ../../common_information/processes_tasks.rst:243 +#: ../../common_information/processes_tasks.rst:253 +#: ../../common_information/processes_tasks.rst:263 +#: ../../common_information/processes_tasks.rst:273 +msgid "Comments relevant for:" +msgstr "" + +#: ../../common_information/processes_tasks.rst:206 +msgid "Variants for task 2" +msgstr "" + +#: ../../common_information/processes_tasks.rst:206 +msgid "Variants for task 3" +msgstr "" + +#: ../../common_information/processes_tasks.rst:208 +#: ../../common_information/processes_tasks.rst:235 +msgid "1 - 5" +msgstr "" + +#: ../../common_information/processes_tasks.rst:208 +#: ../../common_information/processes_tasks.rst:222 +#: ../../common_information/processes_tasks.rst:278 +msgid "x" +msgstr "" + +#: ../../common_information/processes_tasks.rst:211 +msgid "" +"DATA COMMUNICATION NETWORK TOPOLOGIES “You need to implement the virtual " +"topology specified in the task using MPI capabilities for working with " +"communicators and topologies, and ensure the ability to transfer data " +"from any selected process to any other process. (Do not use " +"MPI_Cart_Create and MPI_Graph_Create)”" +msgstr "" + +#: ../../common_information/processes_tasks.rst:220 +#: ../../common_information/processes_tasks.rst:233 +#: ../../common_information/processes_tasks.rst:246 +#: ../../common_information/processes_tasks.rst:256 +#: ../../common_information/processes_tasks.rst:266 +#: ../../common_information/processes_tasks.rst:276 +msgid "Varinats for task 2" +msgstr "" + +#: ../../common_information/processes_tasks.rst:220 +#: ../../common_information/processes_tasks.rst:233 +#: ../../common_information/processes_tasks.rst:246 +#: ../../common_information/processes_tasks.rst:256 +#: ../../common_information/processes_tasks.rst:266 +#: ../../common_information/processes_tasks.rst:276 +msgid "Varinats for task 3" +msgstr "" + +#: ../../common_information/processes_tasks.rst:222 +msgid "6 - 10" +msgstr "" + +#: ../../common_information/processes_tasks.rst:225 +msgid "" +"MATRIX COMPUTATIONS “In the horizontal scheme, the matrix is divided " +"among processes by rows. In the vertical scheme, it is divided by " +"columns, and in this case, the vector is also divided among processes.”" +msgstr "" + +#: ../../common_information/processes_tasks.rst:235 +msgid "11 - 14" +msgstr "" + +#: ../../common_information/processes_tasks.rst:238 +msgid "" +"COMPUTER GRAPHICS AND IMAGE PROCESSING “It is assumed that the image is " +"given in color or grayscale, with the input data being a one-dimensional " +"array. Loading a real image is not required, but is allowed.”" +msgstr "" + +#: ../../common_information/processes_tasks.rst:248 +msgid "26 - 27" +msgstr "" + +#: ../../common_information/processes_tasks.rst:248 +msgid "24 - 32" +msgstr "" + +#: ../../common_information/processes_tasks.rst:251 +msgid "SOLUTION OF A SYSTEM OF LINEAR ALGEBRAIC EQUATIONS" +msgstr "" + +#: ../../common_information/processes_tasks.rst:258 +msgid "15 - 20" +msgstr "" + +#: ../../common_information/processes_tasks.rst:261 +msgid "SORT ALGORITHMS" +msgstr "" + +#: ../../common_information/processes_tasks.rst:268 +msgid "14 - 21" +msgstr "" + +#: ../../common_information/processes_tasks.rst:271 +msgid "GRAPH PROCESSING ALGORITHMS" +msgstr "" + +#: ../../common_information/processes_tasks.rst:278 +msgid "22 - 23" +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/common_information/report.po b/docs/locale/en/LC_MESSAGES/common_information/report.po index d85fc9104c..dc149d7c2c 100644 --- a/docs/locale/en/LC_MESSAGES/common_information/report.po +++ b/docs/locale/en/LC_MESSAGES/common_information/report.po @@ -1,178 +1,178 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-08 23:43+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/report.rst:2 -msgid "Report" -msgstr "" - -#: ../../common_information/report.rst:4 -msgid "Report points" -msgstr "" - -#: ../../common_information/report.rst:7 ../../common_information/report.rst:14 -msgid "Completeness" -msgstr "" - -#: ../../common_information/report.rst:7 ../../common_information/report.rst:27 -msgid "Text Quality" -msgstr "" - -#: ../../common_information/report.rst:7 ../../common_information/report.rst:34 -msgid "Formatting Quality" -msgstr "" - -#: ../../common_information/report.rst:7 -msgid "Total" -msgstr "" - -#: ../../common_information/report.rst:9 -msgid "5" -msgstr "" - -#: ../../common_information/report.rst:9 -msgid "2.5" -msgstr "" - -#: ../../common_information/report.rst:9 -msgid "10" -msgstr "" - -#: ../../common_information/report.rst:12 -msgid "Requirements for Criteria" -msgstr "" - -#: ../../common_information/report.rst:16 -msgid "Introduction (can be a short paragraph)" -msgstr "" - -#: ../../common_information/report.rst:17 -msgid "Problem Statement (descriptive)" -msgstr "" - -#: ../../common_information/report.rst:18 -msgid "Algorithm Description" -msgstr "" - -#: ../../common_information/report.rst:19 -msgid "Description of the Parallel Algorithm Scheme" -msgstr "" - -#: ../../common_information/report.rst:20 -msgid "" -"Description of the MPI, OpenMP, TBB, std::threads, all versions " -"(depending on the semester) – part of the software implementation " -"description" -msgstr "" - -#: ../../common_information/report.rst:21 -msgid "" -"Experimental Results (execution time and algorithm quality assessment), " -"description of correctness verification" -msgstr "" - -#: ../../common_information/report.rst:22 -msgid "Conclusions from the Results" -msgstr "" - -#: ../../common_information/report.rst:23 -msgid "Conclusion" -msgstr "" - -#: ../../common_information/report.rst:24 -msgid "References" -msgstr "" - -#: ../../common_information/report.rst:25 -msgid "Appendix (include code, ensuring readability)" -msgstr "" - -#: ../../common_information/report.rst:29 -msgid "Meaningfulness" -msgstr "" - -#: ../../common_information/report.rst:30 -msgid "Coherence" -msgstr "" - -#: ../../common_information/report.rst:31 -msgid "Clarity" -msgstr "" - -#: ../../common_information/report.rst:32 -msgid "Quality of language (only the most obvious mistakes are considered)" -msgstr "" - -#: ../../common_information/report.rst:36 -msgid "Requirements for Headings" -msgstr "" - -#: ../../common_information/report.rst:37 -msgid "Requirements for Text Alignment" -msgstr "" - -#: ../../common_information/report.rst:38 -msgid "Requirements for Paragraph Indentation" -msgstr "" - -#: ../../common_information/report.rst:39 -msgid "Requirements for the Formatting of Figures, Graphs, and Tables" -msgstr "" - -#: ../../common_information/report.rst:40 -msgid "" -"The “Teacher” field must include the full name, position, and title of " -"the lecturer" -msgstr "" - -#: ../../common_information/report.rst:42 -msgid "Comments" -msgstr "" - -#: ../../common_information/report.rst:44 -msgid "Failure to meet the requirements will result in a deduction of points." -msgstr "" - -#: ../../common_information/report.rst:45 -msgid "" -"The request will include points and comments regarding any requirement " -"violations (if applicable)." -msgstr "" - -#: ../../common_information/report.rst:46 -msgid "" -"The report will be checked only once, and the grade will be assigned " -"based on the submitted version according to the requirements." -msgstr "" - -#: ../../common_information/report.rst:47 -msgid "" -"The report is reviewed online; the entire review process takes place in " -"the request." -msgstr "" - -#: ../../common_information/report.rst:48 -msgid "" -"If a student falls into the **blue zone** for the task, the report points" -" will also be nullified at the end of the semester. The report will be " -"finally accepted and merged into the master branch only after both the " -"**online** parts of the corresponding lab work are fully completed." -msgstr "" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-08 23:43+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/report.rst:2 +msgid "Report" +msgstr "" + +#: ../../common_information/report.rst:4 +msgid "Report points" +msgstr "" + +#: ../../common_information/report.rst:7 ../../common_information/report.rst:14 +msgid "Completeness" +msgstr "" + +#: ../../common_information/report.rst:7 ../../common_information/report.rst:27 +msgid "Text Quality" +msgstr "" + +#: ../../common_information/report.rst:7 ../../common_information/report.rst:34 +msgid "Formatting Quality" +msgstr "" + +#: ../../common_information/report.rst:7 +msgid "Total" +msgstr "" + +#: ../../common_information/report.rst:9 +msgid "5" +msgstr "" + +#: ../../common_information/report.rst:9 +msgid "2.5" +msgstr "" + +#: ../../common_information/report.rst:9 +msgid "10" +msgstr "" + +#: ../../common_information/report.rst:12 +msgid "Requirements for Criteria" +msgstr "" + +#: ../../common_information/report.rst:16 +msgid "Introduction (can be a short paragraph)" +msgstr "" + +#: ../../common_information/report.rst:17 +msgid "Problem Statement (descriptive)" +msgstr "" + +#: ../../common_information/report.rst:18 +msgid "Algorithm Description" +msgstr "" + +#: ../../common_information/report.rst:19 +msgid "Description of the Parallel Algorithm Scheme" +msgstr "" + +#: ../../common_information/report.rst:20 +msgid "" +"Description of the MPI, OpenMP, TBB, std::threads, all versions " +"(depending on the semester) – part of the software implementation " +"description" +msgstr "" + +#: ../../common_information/report.rst:21 +msgid "" +"Experimental Results (execution time and algorithm quality assessment), " +"description of correctness verification" +msgstr "" + +#: ../../common_information/report.rst:22 +msgid "Conclusions from the Results" +msgstr "" + +#: ../../common_information/report.rst:23 +msgid "Conclusion" +msgstr "" + +#: ../../common_information/report.rst:24 +msgid "References" +msgstr "" + +#: ../../common_information/report.rst:25 +msgid "Appendix (include code, ensuring readability)" +msgstr "" + +#: ../../common_information/report.rst:29 +msgid "Meaningfulness" +msgstr "" + +#: ../../common_information/report.rst:30 +msgid "Coherence" +msgstr "" + +#: ../../common_information/report.rst:31 +msgid "Clarity" +msgstr "" + +#: ../../common_information/report.rst:32 +msgid "Quality of language (only the most obvious mistakes are considered)" +msgstr "" + +#: ../../common_information/report.rst:36 +msgid "Requirements for Headings" +msgstr "" + +#: ../../common_information/report.rst:37 +msgid "Requirements for Text Alignment" +msgstr "" + +#: ../../common_information/report.rst:38 +msgid "Requirements for Paragraph Indentation" +msgstr "" + +#: ../../common_information/report.rst:39 +msgid "Requirements for the Formatting of Figures, Graphs, and Tables" +msgstr "" + +#: ../../common_information/report.rst:40 +msgid "" +"The “Teacher” field must include the full name, position, and title of " +"the lecturer" +msgstr "" + +#: ../../common_information/report.rst:42 +msgid "Comments" +msgstr "" + +#: ../../common_information/report.rst:44 +msgid "Failure to meet the requirements will result in a deduction of points." +msgstr "" + +#: ../../common_information/report.rst:45 +msgid "" +"The request will include points and comments regarding any requirement " +"violations (if applicable)." +msgstr "" + +#: ../../common_information/report.rst:46 +msgid "" +"The report will be checked only once, and the grade will be assigned " +"based on the submitted version according to the requirements." +msgstr "" + +#: ../../common_information/report.rst:47 +msgid "" +"The report is reviewed online; the entire review process takes place in " +"the request." +msgstr "" + +#: ../../common_information/report.rst:48 +msgid "" +"If a student falls into the **blue zone** for the task, the report points" +" will also be nullified at the end of the semester. The report will be " +"finally accepted and merged into the master branch only after both the " +"**online** parts of the corresponding lab work are fully completed." +msgstr "" diff --git a/docs/locale/en/LC_MESSAGES/common_information/threading_tasks.po b/docs/locale/en/LC_MESSAGES/common_information/threading_tasks.po index a7c45a7fa9..f2ab9c2e71 100644 --- a/docs/locale/en/LC_MESSAGES/common_information/threading_tasks.po +++ b/docs/locale/en/LC_MESSAGES/common_information/threading_tasks.po @@ -1,298 +1,298 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-05 13:29+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/threading_tasks.rst:2 -msgid "Thread parallelism tasks" -msgstr "" - -#: ../../common_information/threading_tasks.rst:5 -msgid "Variant Number" -msgstr "" - -#: ../../common_information/threading_tasks.rst:5 -msgid "Tasks" -msgstr "" - -#: ../../common_information/threading_tasks.rst:7 -msgid "1" -msgstr "" - -#: ../../common_information/threading_tasks.rst:7 -msgid "" -"Dense matrix multiplication. Elements of type double. Block scheme, " -"Cannon's algorithm." -msgstr "" - -#: ../../common_information/threading_tasks.rst:9 -msgid "2" -msgstr "" - -#: ../../common_information/threading_tasks.rst:9 -msgid "" -"Dense matrix multiplication. Elements of type double. Block scheme, Fox's" -" algorithm." -msgstr "" - -#: ../../common_information/threading_tasks.rst:11 -msgid "3" -msgstr "" - -#: ../../common_information/threading_tasks.rst:11 -msgid "" -"Dense matrix multiplication. Elements of type double. Strassen's " -"algorithm." -msgstr "" - -#: ../../common_information/threading_tasks.rst:13 -msgid "4" -msgstr "" - -#: ../../common_information/threading_tasks.rst:13 -msgid "" -"Sparse matrix multiplication. Elements of type double. Matrix storage " -"format – row format (Compressed Row Storage)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:15 -msgid "5" -msgstr "" - -#: ../../common_information/threading_tasks.rst:15 -msgid "" -"Sparse matrix multiplication. Elements of type double. Matrix storage " -"format – column format (Compressed Column Storage)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:17 -msgid "6" -msgstr "" - -#: ../../common_information/threading_tasks.rst:17 -msgid "" -"Sparse matrix multiplication. Complex type elements. Matrix storage " -"format – row format (Compressed Row Storage)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:19 -msgid "7" -msgstr "" - -#: ../../common_information/threading_tasks.rst:19 -msgid "" -"Sparse matrix multiplication. Complex type elements. Matrix storage " -"format – column format (Compressed Column Storage)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:21 -msgid "8" -msgstr "" - -#: ../../common_information/threading_tasks.rst:21 -msgid "Solving systems of linear equations using the conjugate gradient method." -msgstr "" - -#: ../../common_information/threading_tasks.rst:23 -msgid "9" -msgstr "" - -#: ../../common_information/threading_tasks.rst:23 -msgid "" -"Computing multidimensional integrals using a multistep scheme (rectangle " -"method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:25 -msgid "10" -msgstr "" - -#: ../../common_information/threading_tasks.rst:25 -msgid "" -"Computing multidimensional integrals using a multistep scheme " -"(trapezoidal method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:27 -msgid "11" -msgstr "" - -#: ../../common_information/threading_tasks.rst:27 -msgid "" -"Computing multidimensional integrals using a multistep scheme (Simpson's " -"method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:29 -msgid "12" -msgstr "" - -#: ../../common_information/threading_tasks.rst:29 -msgid "Computing multidimensional integrals using the Monte Carlo method." -msgstr "" - -#: ../../common_information/threading_tasks.rst:31 -msgid "13" -msgstr "" - -#: ../../common_information/threading_tasks.rst:31 -msgid "Quick sort with simple merging." -msgstr "" - -#: ../../common_information/threading_tasks.rst:33 -msgid "14" -msgstr "" - -#: ../../common_information/threading_tasks.rst:33 -msgid "Quick sort with odd-even merging (Batcher's method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:35 -msgid "15" -msgstr "" - -#: ../../common_information/threading_tasks.rst:35 -msgid "Shell sort with simple merging." -msgstr "" - -#: ../../common_information/threading_tasks.rst:37 -msgid "16" -msgstr "" - -#: ../../common_information/threading_tasks.rst:37 -msgid "Shell sort with odd-even merging (Batcher's method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:39 -msgid "17" -msgstr "" - -#: ../../common_information/threading_tasks.rst:39 -msgid "Radix sort for integers with simple merging." -msgstr "" - -#: ../../common_information/threading_tasks.rst:41 -msgid "18" -msgstr "" - -#: ../../common_information/threading_tasks.rst:41 -msgid "Radix sort for integers with odd-even merging (Batcher's method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:43 -msgid "19" -msgstr "" - -#: ../../common_information/threading_tasks.rst:43 -msgid "Radix sort for floating-point numbers (type double) with simple merging." -msgstr "" - -#: ../../common_information/threading_tasks.rst:45 -msgid "20" -msgstr "" - -#: ../../common_information/threading_tasks.rst:45 -msgid "" -"Radix sort for floating-point numbers (type double) with odd-even merging" -" (Batcher's method)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:47 -msgid "21" -msgstr "" - -#: ../../common_information/threading_tasks.rst:47 -msgid "Shortest path search from one vertex (Dijkstra's algorithm)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:49 -msgid "22" -msgstr "" - -#: ../../common_information/threading_tasks.rst:49 -msgid "Convex hull construction – Graham's scan." -msgstr "" - -#: ../../common_information/threading_tasks.rst:51 -msgid "23" -msgstr "" - -#: ../../common_information/threading_tasks.rst:51 -msgid "Convex hull construction – Jarvis's march." -msgstr "" - -#: ../../common_information/threading_tasks.rst:53 -msgid "24" -msgstr "" - -#: ../../common_information/threading_tasks.rst:53 -msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." -msgstr "" - -#: ../../common_information/threading_tasks.rst:55 -msgid "25" -msgstr "" - -#: ../../common_information/threading_tasks.rst:55 -msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." -msgstr "" - -#: ../../common_information/threading_tasks.rst:57 -msgid "26" -msgstr "" - -#: ../../common_information/threading_tasks.rst:57 -msgid "Linear image filtering (block partition). Gaussian kernel 3x3." -msgstr "" - -#: ../../common_information/threading_tasks.rst:59 -msgid "27" -msgstr "" - -#: ../../common_information/threading_tasks.rst:59 -msgid "Edge detection in an image using the Sobel operator." -msgstr "" - -#: ../../common_information/threading_tasks.rst:61 -msgid "28" -msgstr "" - -#: ../../common_information/threading_tasks.rst:61 -msgid "Contrast enhancement of grayscale image using linear histogram stretching." -msgstr "" - -#: ../../common_information/threading_tasks.rst:63 -msgid "29" -msgstr "" - -#: ../../common_information/threading_tasks.rst:63 -msgid "" -"Labeling components on a binary image (black areas correspond to objects," -" white to background)." -msgstr "" - -#: ../../common_information/threading_tasks.rst:65 -msgid "30" -msgstr "" - -#: ../../common_information/threading_tasks.rst:65 -msgid "Convex hull construction for components of a binary image." -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-05 13:29+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/threading_tasks.rst:2 +msgid "Thread parallelism tasks" +msgstr "" + +#: ../../common_information/threading_tasks.rst:5 +msgid "Variant Number" +msgstr "" + +#: ../../common_information/threading_tasks.rst:5 +msgid "Tasks" +msgstr "" + +#: ../../common_information/threading_tasks.rst:7 +msgid "1" +msgstr "" + +#: ../../common_information/threading_tasks.rst:7 +msgid "" +"Dense matrix multiplication. Elements of type double. Block scheme, " +"Cannon's algorithm." +msgstr "" + +#: ../../common_information/threading_tasks.rst:9 +msgid "2" +msgstr "" + +#: ../../common_information/threading_tasks.rst:9 +msgid "" +"Dense matrix multiplication. Elements of type double. Block scheme, Fox's" +" algorithm." +msgstr "" + +#: ../../common_information/threading_tasks.rst:11 +msgid "3" +msgstr "" + +#: ../../common_information/threading_tasks.rst:11 +msgid "" +"Dense matrix multiplication. Elements of type double. Strassen's " +"algorithm." +msgstr "" + +#: ../../common_information/threading_tasks.rst:13 +msgid "4" +msgstr "" + +#: ../../common_information/threading_tasks.rst:13 +msgid "" +"Sparse matrix multiplication. Elements of type double. Matrix storage " +"format – row format (Compressed Row Storage)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:15 +msgid "5" +msgstr "" + +#: ../../common_information/threading_tasks.rst:15 +msgid "" +"Sparse matrix multiplication. Elements of type double. Matrix storage " +"format – column format (Compressed Column Storage)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:17 +msgid "6" +msgstr "" + +#: ../../common_information/threading_tasks.rst:17 +msgid "" +"Sparse matrix multiplication. Complex type elements. Matrix storage " +"format – row format (Compressed Row Storage)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:19 +msgid "7" +msgstr "" + +#: ../../common_information/threading_tasks.rst:19 +msgid "" +"Sparse matrix multiplication. Complex type elements. Matrix storage " +"format – column format (Compressed Column Storage)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:21 +msgid "8" +msgstr "" + +#: ../../common_information/threading_tasks.rst:21 +msgid "Solving systems of linear equations using the conjugate gradient method." +msgstr "" + +#: ../../common_information/threading_tasks.rst:23 +msgid "9" +msgstr "" + +#: ../../common_information/threading_tasks.rst:23 +msgid "" +"Computing multidimensional integrals using a multistep scheme (rectangle " +"method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:25 +msgid "10" +msgstr "" + +#: ../../common_information/threading_tasks.rst:25 +msgid "" +"Computing multidimensional integrals using a multistep scheme " +"(trapezoidal method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:27 +msgid "11" +msgstr "" + +#: ../../common_information/threading_tasks.rst:27 +msgid "" +"Computing multidimensional integrals using a multistep scheme (Simpson's " +"method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:29 +msgid "12" +msgstr "" + +#: ../../common_information/threading_tasks.rst:29 +msgid "Computing multidimensional integrals using the Monte Carlo method." +msgstr "" + +#: ../../common_information/threading_tasks.rst:31 +msgid "13" +msgstr "" + +#: ../../common_information/threading_tasks.rst:31 +msgid "Quick sort with simple merging." +msgstr "" + +#: ../../common_information/threading_tasks.rst:33 +msgid "14" +msgstr "" + +#: ../../common_information/threading_tasks.rst:33 +msgid "Quick sort with odd-even merging (Batcher's method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:35 +msgid "15" +msgstr "" + +#: ../../common_information/threading_tasks.rst:35 +msgid "Shell sort with simple merging." +msgstr "" + +#: ../../common_information/threading_tasks.rst:37 +msgid "16" +msgstr "" + +#: ../../common_information/threading_tasks.rst:37 +msgid "Shell sort with odd-even merging (Batcher's method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:39 +msgid "17" +msgstr "" + +#: ../../common_information/threading_tasks.rst:39 +msgid "Radix sort for integers with simple merging." +msgstr "" + +#: ../../common_information/threading_tasks.rst:41 +msgid "18" +msgstr "" + +#: ../../common_information/threading_tasks.rst:41 +msgid "Radix sort for integers with odd-even merging (Batcher's method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:43 +msgid "19" +msgstr "" + +#: ../../common_information/threading_tasks.rst:43 +msgid "Radix sort for floating-point numbers (type double) with simple merging." +msgstr "" + +#: ../../common_information/threading_tasks.rst:45 +msgid "20" +msgstr "" + +#: ../../common_information/threading_tasks.rst:45 +msgid "" +"Radix sort for floating-point numbers (type double) with odd-even merging" +" (Batcher's method)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:47 +msgid "21" +msgstr "" + +#: ../../common_information/threading_tasks.rst:47 +msgid "Shortest path search from one vertex (Dijkstra's algorithm)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:49 +msgid "22" +msgstr "" + +#: ../../common_information/threading_tasks.rst:49 +msgid "Convex hull construction – Graham's scan." +msgstr "" + +#: ../../common_information/threading_tasks.rst:51 +msgid "23" +msgstr "" + +#: ../../common_information/threading_tasks.rst:51 +msgid "Convex hull construction – Jarvis's march." +msgstr "" + +#: ../../common_information/threading_tasks.rst:53 +msgid "24" +msgstr "" + +#: ../../common_information/threading_tasks.rst:53 +msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." +msgstr "" + +#: ../../common_information/threading_tasks.rst:55 +msgid "25" +msgstr "" + +#: ../../common_information/threading_tasks.rst:55 +msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." +msgstr "" + +#: ../../common_information/threading_tasks.rst:57 +msgid "26" +msgstr "" + +#: ../../common_information/threading_tasks.rst:57 +msgid "Linear image filtering (block partition). Gaussian kernel 3x3." +msgstr "" + +#: ../../common_information/threading_tasks.rst:59 +msgid "27" +msgstr "" + +#: ../../common_information/threading_tasks.rst:59 +msgid "Edge detection in an image using the Sobel operator." +msgstr "" + +#: ../../common_information/threading_tasks.rst:61 +msgid "28" +msgstr "" + +#: ../../common_information/threading_tasks.rst:61 +msgid "Contrast enhancement of grayscale image using linear histogram stretching." +msgstr "" + +#: ../../common_information/threading_tasks.rst:63 +msgid "29" +msgstr "" + +#: ../../common_information/threading_tasks.rst:63 +msgid "" +"Labeling components on a binary image (black areas correspond to objects," +" white to background)." +msgstr "" + +#: ../../common_information/threading_tasks.rst:65 +msgid "30" +msgstr "" + +#: ../../common_information/threading_tasks.rst:65 +msgid "Convex hull construction for components of a binary image." +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/index.po b/docs/locale/en/LC_MESSAGES/index.po index c62b1aaab5..66596c3ef5 100644 --- a/docs/locale/en/LC_MESSAGES/index.po +++ b/docs/locale/en/LC_MESSAGES/index.po @@ -1,43 +1,43 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-05 13:28+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../index.rst:6 -msgid "User Guide:" -msgstr "" - -#: ../../index.rst:16 -msgid "Course Details:" -msgstr "" - -#: ../../index.rst:2 -msgid "Parallel Programming Course documentation" -msgstr "" - -#: ../../index.rst:4 -msgid "" -"Below is the table of contents for the Parallel Programming Course " -"documentation. Follow the links to learn more about each topic." -msgstr "" - -#~ msgid "Common Information:" -#~ msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-05 13:28+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../index.rst:6 +msgid "User Guide:" +msgstr "" + +#: ../../index.rst:16 +msgid "Course Details:" +msgstr "" + +#: ../../index.rst:2 +msgid "Parallel Programming Course documentation" +msgstr "" + +#: ../../index.rst:4 +msgid "" +"Below is the table of contents for the Parallel Programming Course " +"documentation. Follow the links to learn more about each topic." +msgstr "" + +#~ msgid "Common Information:" +#~ msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/api.po b/docs/locale/en/LC_MESSAGES/user_guide/api.po index 88a83db8a4..4df7f2f88d 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/api.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/api.po @@ -1,73 +1,73 @@ -# Parallel Programming Course Documentation. -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-08-22 21:38+0200\n" -"PO-Revision-Date: 2025-08-22 21:45+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../../../docs/user_guide/api.rst:2 -msgid "API Reference" -msgstr "" - -#: ../../../../docs/user_guide/api.rst:8 -msgid "Runners Module" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Functions" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Parameters" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Returns" -msgstr "" - -#: ../../../../docs/user_guide/api.rst:14 -msgid "Task Module" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Typedefs" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Template Parameters" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Enums" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Throws" -msgstr "" - -#: ../../../../docs/user_guide/api.rst -msgid "Variables" -msgstr "" - -#: ../../../../docs/user_guide/api.rst:20 -msgid "Utility Module" -msgstr "" - -#: ../../../../docs/user_guide/api.rst:26 -msgid "Performance Module" -msgstr "" - +# Parallel Programming Course Documentation. +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-08-22 21:38+0200\n" +"PO-Revision-Date: 2025-08-22 21:45+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../../../docs/user_guide/api.rst:2 +msgid "API Reference" +msgstr "" + +#: ../../../../docs/user_guide/api.rst:8 +msgid "Runners Module" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Functions" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Parameters" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Returns" +msgstr "" + +#: ../../../../docs/user_guide/api.rst:14 +msgid "Task Module" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Typedefs" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Template Parameters" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Enums" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Throws" +msgstr "" + +#: ../../../../docs/user_guide/api.rst +msgid "Variables" +msgstr "" + +#: ../../../../docs/user_guide/api.rst:20 +msgid "Utility Module" +msgstr "" + +#: ../../../../docs/user_guide/api.rst:26 +msgid "Performance Module" +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/build.po b/docs/locale/en/LC_MESSAGES/user_guide/build.po index fa14545907..f4bf59a726 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/build.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/build.po @@ -1,74 +1,74 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-20 23:19+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/build.rst:2 -msgid "Build the Project with ``CMake``" -msgstr "" - -#: ../../user_guide/build.rst:4 -msgid "Navigate to a source code folder." -msgstr "" - -#: ../../user_guide/build.rst:6 -msgid "**Configure the build**: ``Makefile``, ``.sln``, etc." -msgstr "" - -#: ../../user_guide/build.rst:13 -msgid "*Help on CMake keys:*" -msgstr "" - -#: ../../user_guide/build.rst:20 -msgid "``-D USE_FUNC_TESTS=ON`` enable functional tests." -msgstr "" - -#: ../../user_guide/build.rst:21 -msgid "``-D USE_PERF_TESTS=ON`` enable performance tests." -msgstr "" - -#: ../../user_guide/build.rst:22 -msgid "``-D CMAKE_BUILD_TYPE=Release`` normal build (default)." -msgstr "" - -#: ../../user_guide/build.rst:23 -msgid "``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` recommended when using sanitizers or running ``valgrind`` to keep debug information." -msgstr "" - -#: ../../user_guide/build.rst:24 -msgid "``-D CMAKE_BUILD_TYPE=Debug`` for debugging sessions." -msgstr "" - -#: ../../user_guide/build.rst:24 -msgid "*A corresponding flag can be omitted if it's not needed.*" -msgstr "" - -#: ../../user_guide/build.rst:26 -msgid "**Build the project**:" -msgstr "" - -#: ../../user_guide/build.rst:32 -msgid "**Check the task**:" -msgstr "" - -#: ../../user_guide/build.rst:34 -msgid "Run ``/build/bin``" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-20 23:19+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/build.rst:2 +msgid "Build the Project with ``CMake``" +msgstr "" + +#: ../../user_guide/build.rst:4 +msgid "Navigate to a source code folder." +msgstr "" + +#: ../../user_guide/build.rst:6 +msgid "**Configure the build**: ``Makefile``, ``.sln``, etc." +msgstr "" + +#: ../../user_guide/build.rst:13 +msgid "*Help on CMake keys:*" +msgstr "" + +#: ../../user_guide/build.rst:20 +msgid "``-D USE_FUNC_TESTS=ON`` enable functional tests." +msgstr "" + +#: ../../user_guide/build.rst:21 +msgid "``-D USE_PERF_TESTS=ON`` enable performance tests." +msgstr "" + +#: ../../user_guide/build.rst:22 +msgid "``-D CMAKE_BUILD_TYPE=Release`` normal build (default)." +msgstr "" + +#: ../../user_guide/build.rst:23 +msgid "``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` recommended when using sanitizers or running ``valgrind`` to keep debug information." +msgstr "" + +#: ../../user_guide/build.rst:24 +msgid "``-D CMAKE_BUILD_TYPE=Debug`` for debugging sessions." +msgstr "" + +#: ../../user_guide/build.rst:24 +msgid "*A corresponding flag can be omitted if it's not needed.*" +msgstr "" + +#: ../../user_guide/build.rst:26 +msgid "**Build the project**:" +msgstr "" + +#: ../../user_guide/build.rst:32 +msgid "**Check the task**:" +msgstr "" + +#: ../../user_guide/build.rst:34 +msgid "Run ``/build/bin``" +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/ci.po b/docs/locale/en/LC_MESSAGES/user_guide/ci.po index 60a135e565..ae8c10c4a8 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/ci.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/ci.po @@ -1,46 +1,46 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-25 16:54+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/ci.rst:2 -msgid "Continuous Integration (CI)" -msgstr "" - -#: ../../user_guide/ci.rst:4 -msgid "" -"Students need to pass all the checks in the CI pipeline before their work" -" can be considered for submission. This includes successful code " -"checkout, build ans testing stages. Each integration is verified by an " -"automated build and automated tests." -msgstr "" - -#: ../../user_guide/ci.rst:9 -msgid "CI Pipeline" -msgstr "" - -#: ../../user_guide/ci.rst:11 -msgid "The CI pipeline for this project is illustrated in the following diagram:" -msgstr "" - -#: ../../user_guide/ci.rst:13 -msgid "CI Pipeline Diagram" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-25 16:54+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/ci.rst:2 +msgid "Continuous Integration (CI)" +msgstr "" + +#: ../../user_guide/ci.rst:4 +msgid "" +"Students need to pass all the checks in the CI pipeline before their work" +" can be considered for submission. This includes successful code " +"checkout, build ans testing stages. Each integration is verified by an " +"automated build and automated tests." +msgstr "" + +#: ../../user_guide/ci.rst:9 +msgid "CI Pipeline" +msgstr "" + +#: ../../user_guide/ci.rst:11 +msgid "The CI pipeline for this project is illustrated in the following diagram:" +msgstr "" + +#: ../../user_guide/ci.rst:13 +msgid "CI Pipeline Diagram" +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/download.po b/docs/locale/en/LC_MESSAGES/user_guide/download.po index a66fc73ddb..bba506ca5b 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/download.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/download.po @@ -1,26 +1,26 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-20 23:19+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/download.rst:2 -msgid "Download all submodules" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-20 23:19+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/download.rst:2 +msgid "Download all submodules" +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/environment.po b/docs/locale/en/LC_MESSAGES/user_guide/environment.po index 36751ae21f..1177d7f3fc 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/environment.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/environment.po @@ -1,120 +1,120 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-05-19 16:44+0200\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/environment.rst:2 -msgid "Set Up Your Environment" -msgstr "" - -#: ../../user_guide/environment.rst:5 -msgid "Build prerequisites" -msgstr "" - -#: ../../user_guide/environment.rst:6 -msgid "" -"**Windows**: Download and install CMake from https://cmake.org/download " -"(select the Windows installer) or install using Chocolatey:" -msgstr "" - -#: ../../user_guide/environment.rst:12 -msgid "**Linux (Ubuntu/Debian)**: Install using package manager:" -msgstr "" - -#: ../../user_guide/environment.rst:19 -msgid "**macOS**: Install using Homebrew:" -msgstr "" - -#: ../../user_guide/environment.rst:27 -msgid "Code Style Analysis" -msgstr "" - -#: ../../user_guide/environment.rst:28 -msgid "" -"Please follow the `Google C++ Style Guide " -"`_." -msgstr "" - -#: ../../user_guide/environment.rst:30 -msgid "" -"Code style is checked using the `clang-format " -"`_ tool." -msgstr "" - -#: ../../user_guide/environment.rst:33 -msgid "Parallel Programming Technologies" -msgstr "" - -#: ../../user_guide/environment.rst:36 -msgid "``MPI``" -msgstr "" - -#: ../../user_guide/environment.rst:37 -msgid "**Windows (MSVC)**:" -msgstr "" - -#: ../../user_guide/environment.rst:39 -msgid "" -"`Installers link `_. You have to install " -"``msmpisdk.msi`` and ``msmpisetup.exe``." -msgstr "" - -#: ../../user_guide/environment.rst:41 -#: ../../user_guide/environment.rst:57 -msgid "**Linux (gcc and clang)**:" -msgstr "" - -#: ../../user_guide/environment.rst:47 -msgid "**MacOS (apple clang)**:" -msgstr "" - -#: ../../user_guide/environment.rst:54 -msgid "``OpenMP``" -msgstr "" - -#: ../../user_guide/environment.rst:55 -msgid "" -"``OpenMP`` is included in ``gcc`` and ``msvc``, but some components " -"should be installed additionally:" -msgstr "" - -#: ../../user_guide/environment.rst:63 -msgid "**MacOS (llvm)**:" -msgstr "" - -#: ../../user_guide/environment.rst:71 -msgid "``TBB``" -msgstr "" - -#: ../../user_guide/environment.rst:72 -msgid "" -"**Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: " -"Build as 3rdparty in the current project." -msgstr "" - -#: ../../user_guide/environment.rst:76 -msgid "``std::thread``" -msgstr "" - -#: ../../user_guide/environment.rst:77 -msgid "``std::thread`` is included in STL libraries." -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-05-19 16:44+0200\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/environment.rst:2 +msgid "Set Up Your Environment" +msgstr "" + +#: ../../user_guide/environment.rst:5 +msgid "Build prerequisites" +msgstr "" + +#: ../../user_guide/environment.rst:6 +msgid "" +"**Windows**: Download and install CMake from https://cmake.org/download " +"(select the Windows installer) or install using Chocolatey:" +msgstr "" + +#: ../../user_guide/environment.rst:12 +msgid "**Linux (Ubuntu/Debian)**: Install using package manager:" +msgstr "" + +#: ../../user_guide/environment.rst:19 +msgid "**macOS**: Install using Homebrew:" +msgstr "" + +#: ../../user_guide/environment.rst:27 +msgid "Code Style Analysis" +msgstr "" + +#: ../../user_guide/environment.rst:28 +msgid "" +"Please follow the `Google C++ Style Guide " +"`_." +msgstr "" + +#: ../../user_guide/environment.rst:30 +msgid "" +"Code style is checked using the `clang-format " +"`_ tool." +msgstr "" + +#: ../../user_guide/environment.rst:33 +msgid "Parallel Programming Technologies" +msgstr "" + +#: ../../user_guide/environment.rst:36 +msgid "``MPI``" +msgstr "" + +#: ../../user_guide/environment.rst:37 +msgid "**Windows (MSVC)**:" +msgstr "" + +#: ../../user_guide/environment.rst:39 +msgid "" +"`Installers link `_. You have to install " +"``msmpisdk.msi`` and ``msmpisetup.exe``." +msgstr "" + +#: ../../user_guide/environment.rst:41 +#: ../../user_guide/environment.rst:57 +msgid "**Linux (gcc and clang)**:" +msgstr "" + +#: ../../user_guide/environment.rst:47 +msgid "**MacOS (apple clang)**:" +msgstr "" + +#: ../../user_guide/environment.rst:54 +msgid "``OpenMP``" +msgstr "" + +#: ../../user_guide/environment.rst:55 +msgid "" +"``OpenMP`` is included in ``gcc`` and ``msvc``, but some components " +"should be installed additionally:" +msgstr "" + +#: ../../user_guide/environment.rst:63 +msgid "**MacOS (llvm)**:" +msgstr "" + +#: ../../user_guide/environment.rst:71 +msgid "``TBB``" +msgstr "" + +#: ../../user_guide/environment.rst:72 +msgid "" +"**Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: " +"Build as 3rdparty in the current project." +msgstr "" + +#: ../../user_guide/environment.rst:76 +msgid "``std::thread``" +msgstr "" + +#: ../../user_guide/environment.rst:77 +msgid "``std::thread`` is included in STL libraries." +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/environment_variables.po b/docs/locale/en/LC_MESSAGES/user_guide/environment_variables.po index 3347c68772..3db5eec0b5 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/environment_variables.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/environment_variables.po @@ -1,58 +1,58 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-05-29 19:31+0200\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../../../docs/user_guide/environment_variables.rst:2 -msgid "Environment Variables" -msgstr "" - -#: ../../../../docs/user_guide/environment_variables.rst:4 -msgid "" -"The following environment variables can be used to configure the " -"project's runtime behavior:" -msgstr "" - -#: ../../../../docs/user_guide/environment_variables.rst:6 -msgid "" -"``PPC_NUM_PROC``: Specifies the number of processes to launch. Default: " -"``1``" -msgstr "" - -#: ../../../../docs/user_guide/environment_variables.rst:9 -msgid "" -"``PPC_NUM_THREADS``: Specifies the number of threads to use. Default: " -"``1``" -msgstr "" - -#: ../../../../docs/user_guide/environment_variables.rst:12 -msgid "" -"``PPC_ASAN_RUN``: Specifies that application is compiler with sanitizers." -" Used by ``scripts/run_tests.py`` to skip ``valgrind`` runs. Default: " -"``0``" -msgstr "" - -#: ../../../../docs/user_guide/environment_variables.rst:15 -msgid "" -"``PPC_IGNORE_TEST_TIME_LIMIT``: Specifies that test time limits are " -"ignored. Used by ``scripts/run_tests.py`` to disable time limit " -"enforcement. Default: ``0``" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-05-29 19:31+0200\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../../../docs/user_guide/environment_variables.rst:2 +msgid "Environment Variables" +msgstr "" + +#: ../../../../docs/user_guide/environment_variables.rst:4 +msgid "" +"The following environment variables can be used to configure the " +"project's runtime behavior:" +msgstr "" + +#: ../../../../docs/user_guide/environment_variables.rst:6 +msgid "" +"``PPC_NUM_PROC``: Specifies the number of processes to launch. Default: " +"``1``" +msgstr "" + +#: ../../../../docs/user_guide/environment_variables.rst:9 +msgid "" +"``PPC_NUM_THREADS``: Specifies the number of threads to use. Default: " +"``1``" +msgstr "" + +#: ../../../../docs/user_guide/environment_variables.rst:12 +msgid "" +"``PPC_ASAN_RUN``: Specifies that application is compiler with sanitizers." +" Used by ``scripts/run_tests.py`` to skip ``valgrind`` runs. Default: " +"``0``" +msgstr "" + +#: ../../../../docs/user_guide/environment_variables.rst:15 +msgid "" +"``PPC_IGNORE_TEST_TIME_LIMIT``: Specifies that test time limits are " +"ignored. Used by ``scripts/run_tests.py`` to disable time limit " +"enforcement. Default: ``0``" +msgstr "" + diff --git a/docs/locale/en/LC_MESSAGES/user_guide/submit_work.po b/docs/locale/en/LC_MESSAGES/user_guide/submit_work.po index 43087b2e77..382c7cc0d1 100644 --- a/docs/locale/en/LC_MESSAGES/user_guide/submit_work.po +++ b/docs/locale/en/LC_MESSAGES/user_guide/submit_work.po @@ -1,133 +1,133 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-20 23:19+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: en\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/submit_work.rst:2 -msgid "How to submit your work" -msgstr "" - -#: ../../user_guide/submit_work.rst:4 -msgid "" -"There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in" -" the ``tasks`` directory. Move to a folder of your task. Create a " -"directory named ``__``." -msgstr "" - -#: ../../user_guide/submit_work.rst:6 -msgid "" -"Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the " -"same** name directory. If the ``seq`` task is named " -"``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named " -"``omp/nesterov_a_vector_sum``." -msgstr "" - -#: ../../user_guide/submit_work.rst:8 -msgid "" -"Navigate into the newly created folder and begin your work on the task. " -"The folder must contain only 4 directories with files:" -msgstr "" - -#: ../../user_guide/submit_work.rst:10 -msgid "" -"``data`` - Directory with own data files for functional testing of " -"the task." -msgstr "" - -#: ../../user_guide/submit_work.rst:11 -msgid "" -"``func_tests`` - Directory with Google tests for functional testing of " -"the task." -msgstr "" - -#: ../../user_guide/submit_work.rst:12 -msgid "``include`` - Directory for header files with function prototypes." -msgstr "" - -#: ../../user_guide/submit_work.rst:13 -msgid "" -"``perf_tests`` - Directory with Google tests for performance testing. The" -" number of tests must be 2: ``run_task`` and ``run_pipeline``." -msgstr "" - -#: ../../user_guide/submit_work.rst:14 -msgid "" -"``src`` - Directory with source files containing the function " -"implementations." -msgstr "" - -#: ../../user_guide/submit_work.rst:16 -msgid "There must be 10 executable files for running:" -msgstr "" - -#: ../../user_guide/submit_work.rst:18 -msgid "" -"``__tests``. For example, " -"``omp_perf_tests`` - an executable file for performance tests of OpenMP " -"practice tasks." -msgstr "" - -#: ../../user_guide/submit_work.rst:20 -msgid "" -"All prototypes and classes in the ``include`` directory must be " -"namespace-escaped. Name your namespace as follows:" -msgstr "" - -#: ../../user_guide/submit_work.rst:36 -msgid "Name your group of tests and individual test cases as follows:" -msgstr "" - -#: ../../user_guide/submit_work.rst:38 -msgid "For functional tests (for maximum coverage):" -msgstr "" - -#: ../../user_guide/submit_work.rst:50 -msgid "" -"For performance tests (only 2 tests - ``pipeline`` and ``task`` - no more" -" no less):" -msgstr "" - -#: ../../user_guide/submit_work.rst:65 -msgid "Name your pull request as follows:" -msgstr "" - -#: ../../user_guide/submit_work.rst:67 -msgid "For tasks:" -msgstr "" - -#: ../../user_guide/submit_work.rst:74 -msgid "Provide the full task definition in the pull request's description." -msgstr "" - -#: ../../user_guide/submit_work.rst:76 -msgid "Example pull request can be found in the repository's pull requests." -msgstr "" - -#: ../../user_guide/submit_work.rst:78 -msgid "" -"Work on your forked repository. Keep your work on a separate branch (not " -"on ``master``)!!! Name your branch the same as your task's folder. To " -"create a branch, run:" -msgstr "" - -#: ../../user_guide/submit_work.rst:84 -msgid "**Failing to follow the rules will result in a red project build.**" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-20 23:19+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: en\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/submit_work.rst:2 +msgid "How to submit your work" +msgstr "" + +#: ../../user_guide/submit_work.rst:4 +msgid "" +"There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in" +" the ``tasks`` directory. Move to a folder of your task. Create a " +"directory named ``__``." +msgstr "" + +#: ../../user_guide/submit_work.rst:6 +msgid "" +"Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the " +"same** name directory. If the ``seq`` task is named " +"``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named " +"``omp/nesterov_a_vector_sum``." +msgstr "" + +#: ../../user_guide/submit_work.rst:8 +msgid "" +"Navigate into the newly created folder and begin your work on the task. " +"The folder must contain only 4 directories with files:" +msgstr "" + +#: ../../user_guide/submit_work.rst:10 +msgid "" +"``data`` - Directory with own data files for functional testing of " +"the task." +msgstr "" + +#: ../../user_guide/submit_work.rst:11 +msgid "" +"``func_tests`` - Directory with Google tests for functional testing of " +"the task." +msgstr "" + +#: ../../user_guide/submit_work.rst:12 +msgid "``include`` - Directory for header files with function prototypes." +msgstr "" + +#: ../../user_guide/submit_work.rst:13 +msgid "" +"``perf_tests`` - Directory with Google tests for performance testing. The" +" number of tests must be 2: ``run_task`` and ``run_pipeline``." +msgstr "" + +#: ../../user_guide/submit_work.rst:14 +msgid "" +"``src`` - Directory with source files containing the function " +"implementations." +msgstr "" + +#: ../../user_guide/submit_work.rst:16 +msgid "There must be 10 executable files for running:" +msgstr "" + +#: ../../user_guide/submit_work.rst:18 +msgid "" +"``__tests``. For example, " +"``omp_perf_tests`` - an executable file for performance tests of OpenMP " +"practice tasks." +msgstr "" + +#: ../../user_guide/submit_work.rst:20 +msgid "" +"All prototypes and classes in the ``include`` directory must be " +"namespace-escaped. Name your namespace as follows:" +msgstr "" + +#: ../../user_guide/submit_work.rst:36 +msgid "Name your group of tests and individual test cases as follows:" +msgstr "" + +#: ../../user_guide/submit_work.rst:38 +msgid "For functional tests (for maximum coverage):" +msgstr "" + +#: ../../user_guide/submit_work.rst:50 +msgid "" +"For performance tests (only 2 tests - ``pipeline`` and ``task`` - no more" +" no less):" +msgstr "" + +#: ../../user_guide/submit_work.rst:65 +msgid "Name your pull request as follows:" +msgstr "" + +#: ../../user_guide/submit_work.rst:67 +msgid "For tasks:" +msgstr "" + +#: ../../user_guide/submit_work.rst:74 +msgid "Provide the full task definition in the pull request's description." +msgstr "" + +#: ../../user_guide/submit_work.rst:76 +msgid "Example pull request can be found in the repository's pull requests." +msgstr "" + +#: ../../user_guide/submit_work.rst:78 +msgid "" +"Work on your forked repository. Keep your work on a separate branch (not " +"on ``master``)!!! Name your branch the same as your task's folder. To " +"create a branch, run:" +msgstr "" + +#: ../../user_guide/submit_work.rst:84 +msgid "**Failing to follow the rules will result in a red project build.**" +msgstr "" + diff --git a/docs/locale/ru/LC_MESSAGES/common_information/introduction.po b/docs/locale/ru/LC_MESSAGES/common_information/introduction.po index 093279aa23..9e727a6d33 100644 --- a/docs/locale/ru/LC_MESSAGES/common_information/introduction.po +++ b/docs/locale/ru/LC_MESSAGES/common_information/introduction.po @@ -1,225 +1,225 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-09 00:23+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/introduction.rst:2 -msgid "Introduction" -msgstr "Вводная информация" - -#: ../../common_information/introduction.rst:5 -msgid "Practice" -msgstr "Практика" - -#: ../../common_information/introduction.rst:7 -msgid "We work online" -msgstr "Работа происходит в режиме online" - -#: ../../common_information/introduction.rst:9 -msgid "Use GitHub repository" -msgstr "Используется GitHub репозиторий" - -#: ../../common_information/introduction.rst:10 -msgid "Use Pull Requests" -msgstr "Для задач задействуются Pull Request'ы" - -#: ../../common_information/introduction.rst:11 -msgid "Merge into the master branch" -msgstr "Рабочая git-ветка курса - master" - -#: ../../common_information/introduction.rst:12 -msgid "Test verification" -msgstr "Используются различные виды тестирования" - -#: ../../common_information/introduction.rst:14 -msgid "Task distribution is random for each student." -msgstr "Распределение задач производится случайным образом для каждого человека" - -#: ../../common_information/introduction.rst:15 -msgid "" -"An example for each technology can be found in the corresponding " -"directory: ``tasks//example``." -msgstr "" -"Пример для каждой технологии находится в директории: " -"``tasks//example``." - -#: ../../common_information/introduction.rst:16 -msgid "" -"In each repository, the README.md contains a link to the course " -"documentation (**read it fully!!!**)." -msgstr "" -"В каждом из репозиториев в README.md находится документация курса " -"(просьба читать полностью!!!)" - -#: ../../common_information/introduction.rst:17 -msgid "" -"Additionally, each repository includes an example of a properly formatted" -" PULL REQUEST." -msgstr "Также в каждом репозитории представлен пример оформления PULL REQUEST'а" - -#: ../../common_information/introduction.rst:18 -msgid "Submission of all tasks is mandatory to pass the course." -msgstr "Все задачи обязательны к выполнению" - -#: ../../common_information/introduction.rst:19 -msgid "" -"A task that has been merged into the master branch continues to be " -"monitored. If a task fails in the master, it is disabled, and a record of" -" this is added to the score table. All disabled tasks will result in a " -"zero points result for those tasks at the end of the semester. It can be " -"seen that your task is disabled due to the following reason: the " -"directory of your task has been renamed from " -"``seq/nesterov_a_vector_sum`` to ``seq/nesterov_a_vector_sum_disabled``" -msgstr "" -"Задача, которая оказывается в мастер-ветке, продолжает проверяться. Если " -"задача упала (падение теста) в мастере, она отключается, и запись об этом" -" добавляется в таблицу результатов. Все отключенные (не исправленные) " -"задания в конце семестра приведут к обнулению баллов за их выполнение. " -"Можно увидеть, что ваша задача является отключенной, по следующему признаку: " -"имя директории вашей задачи будет изменено с " -"``seq/nesterov_a_vector_sum`` на ``seq/nesterov_a_vector_sum_disabled``" - -#: ../../common_information/introduction.rst:24 -msgid "All resources for using the repository will be provided here:" -msgstr "Все материалы по использованию репозитория будут представлены здесь:" - -#: ../../common_information/introduction.rst:26 -msgid "" -"`Git for half an hour: A Beginner’s Guide `__" -msgstr "" -"`Git за полчаса: руководство для начинающих `__" - -#: ../../common_information/introduction.rst:27 -#, python-format -msgid "" -"`Getting Started with Git and GitHub: A Beginner’s Guide " -"`__" -msgstr "" -"`Знакомство с Git и GitHub: руководство для начинающих " -"`__" - -#: ../../common_information/introduction.rst:28 -msgid "" -"`Git: A Quick Start Guide to Using Core Operations with Explanations " -"`__" -msgstr "" -"`Git. Быстрый старт по использованию основных операций с объяснениями " -"`__" - -#: ../../common_information/introduction.rst:29 -msgid "" -"`Conflicts resolving in Git " -"`__" -msgstr "" -"`Разрешение конфликтов в Git " -"`__" - -#: ../../common_information/introduction.rst:30 -msgid "`Google testing framework (gtest) `__" -msgstr "" - -#: ../../common_information/introduction.rst:31 -msgid "" -"`GoogleTest Primer " -"`__" -msgstr "" - -#: ../../common_information/introduction.rst:32 -msgid "`GitHub Actions documentation `__" -msgstr "`GitHub Actions документация `__" - -#: ../../common_information/introduction.rst:33 -msgid "" -"`Parallel Programming Technologies. Message Passing Interface (MPI) " -"`__" -msgstr "" -"`Технологии параллельного программирования. Message Passing Interface " -"(MPI) `__" - -#: ../../common_information/introduction.rst:34 -msgid "" -"`Typing and Layout in the System LaTeX `__" -msgstr "" -"`Набор и вёрстка в системе LaTeX `__" - -#: ../../common_information/introduction.rst:35 -msgid "" -"`LaTeX for the beginners " -"`__" -msgstr "" -"`LaTeX для начинающих " -"`__" - -#: ../../common_information/introduction.rst:36 -msgid "`What is OpenMP? `__" -msgstr "`Что такое OpenMP? `__" - -#: ../../common_information/introduction.rst:37 -msgid "" -"`TBB-1 " -"`__" -msgstr "" -"`Средства разработки параллельных программ для систем с общей памятью. " -"Библиотека Intel Threading Building Blocks " -"`__" - -#: ../../common_information/introduction.rst:38 -msgid "" -"`Writing Multithreaded Applications in C++ `__" -msgstr "" -"`Написание многопоточных приложений на C++ `__" - -#: ../../common_information/introduction.rst:39 -msgid "" -"`Multithreading: New Features of the C++11 Standard " -"`__" -msgstr "" -"`Многопоточность, новые возможности стандарта C++11 " -"`__" - -#: ../../common_information/introduction.rst:40 -msgid "" -"`Introduction to Parallel Computing " -"`__" -msgstr "" -"`Введение в параллельные вычисления " -"`__" - -#: ../../common_information/introduction.rst:42 -msgid "" -"\\* *All instructions, repositories, and tables may be updated during the" -" learning process for better usability. Be prepared for changes, check " -"and update them periodically!!!*" -msgstr "" -"\\* *Все инструкции, репозитории и таблицы могут быть изменены в ходе " -"учебного процесса для более удобного использования, будьте готовы к " -"изменениям и периодически проверяйте это!!!!!!*" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-09 00:23+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/introduction.rst:2 +msgid "Introduction" +msgstr "Вводная информация" + +#: ../../common_information/introduction.rst:5 +msgid "Practice" +msgstr "Практика" + +#: ../../common_information/introduction.rst:7 +msgid "We work online" +msgstr "Работа происходит в режиме online" + +#: ../../common_information/introduction.rst:9 +msgid "Use GitHub repository" +msgstr "Используется GitHub репозиторий" + +#: ../../common_information/introduction.rst:10 +msgid "Use Pull Requests" +msgstr "Для задач задействуются Pull Request'ы" + +#: ../../common_information/introduction.rst:11 +msgid "Merge into the master branch" +msgstr "Рабочая git-ветка курса - master" + +#: ../../common_information/introduction.rst:12 +msgid "Test verification" +msgstr "Используются различные виды тестирования" + +#: ../../common_information/introduction.rst:14 +msgid "Task distribution is random for each student." +msgstr "Распределение задач производится случайным образом для каждого человека" + +#: ../../common_information/introduction.rst:15 +msgid "" +"An example for each technology can be found in the corresponding " +"directory: ``tasks//example``." +msgstr "" +"Пример для каждой технологии находится в директории: " +"``tasks//example``." + +#: ../../common_information/introduction.rst:16 +msgid "" +"In each repository, the README.md contains a link to the course " +"documentation (**read it fully!!!**)." +msgstr "" +"В каждом из репозиториев в README.md находится документация курса " +"(просьба читать полностью!!!)" + +#: ../../common_information/introduction.rst:17 +msgid "" +"Additionally, each repository includes an example of a properly formatted" +" PULL REQUEST." +msgstr "Также в каждом репозитории представлен пример оформления PULL REQUEST'а" + +#: ../../common_information/introduction.rst:18 +msgid "Submission of all tasks is mandatory to pass the course." +msgstr "Все задачи обязательны к выполнению" + +#: ../../common_information/introduction.rst:19 +msgid "" +"A task that has been merged into the master branch continues to be " +"monitored. If a task fails in the master, it is disabled, and a record of" +" this is added to the score table. All disabled tasks will result in a " +"zero points result for those tasks at the end of the semester. It can be " +"seen that your task is disabled due to the following reason: the " +"directory of your task has been renamed from " +"``seq/nesterov_a_vector_sum`` to ``seq/nesterov_a_vector_sum_disabled``" +msgstr "" +"Задача, которая оказывается в мастер-ветке, продолжает проверяться. Если " +"задача упала (падение теста) в мастере, она отключается, и запись об этом" +" добавляется в таблицу результатов. Все отключенные (не исправленные) " +"задания в конце семестра приведут к обнулению баллов за их выполнение. " +"Можно увидеть, что ваша задача является отключенной, по следующему признаку: " +"имя директории вашей задачи будет изменено с " +"``seq/nesterov_a_vector_sum`` на ``seq/nesterov_a_vector_sum_disabled``" + +#: ../../common_information/introduction.rst:24 +msgid "All resources for using the repository will be provided here:" +msgstr "Все материалы по использованию репозитория будут представлены здесь:" + +#: ../../common_information/introduction.rst:26 +msgid "" +"`Git for half an hour: A Beginner’s Guide `__" +msgstr "" +"`Git за полчаса: руководство для начинающих `__" + +#: ../../common_information/introduction.rst:27 +#, python-format +msgid "" +"`Getting Started with Git and GitHub: A Beginner’s Guide " +"`__" +msgstr "" +"`Знакомство с Git и GitHub: руководство для начинающих " +"`__" + +#: ../../common_information/introduction.rst:28 +msgid "" +"`Git: A Quick Start Guide to Using Core Operations with Explanations " +"`__" +msgstr "" +"`Git. Быстрый старт по использованию основных операций с объяснениями " +"`__" + +#: ../../common_information/introduction.rst:29 +msgid "" +"`Conflicts resolving in Git " +"`__" +msgstr "" +"`Разрешение конфликтов в Git " +"`__" + +#: ../../common_information/introduction.rst:30 +msgid "`Google testing framework (gtest) `__" +msgstr "" + +#: ../../common_information/introduction.rst:31 +msgid "" +"`GoogleTest Primer " +"`__" +msgstr "" + +#: ../../common_information/introduction.rst:32 +msgid "`GitHub Actions documentation `__" +msgstr "`GitHub Actions документация `__" + +#: ../../common_information/introduction.rst:33 +msgid "" +"`Parallel Programming Technologies. Message Passing Interface (MPI) " +"`__" +msgstr "" +"`Технологии параллельного программирования. Message Passing Interface " +"(MPI) `__" + +#: ../../common_information/introduction.rst:34 +msgid "" +"`Typing and Layout in the System LaTeX `__" +msgstr "" +"`Набор и вёрстка в системе LaTeX `__" + +#: ../../common_information/introduction.rst:35 +msgid "" +"`LaTeX for the beginners " +"`__" +msgstr "" +"`LaTeX для начинающих " +"`__" + +#: ../../common_information/introduction.rst:36 +msgid "`What is OpenMP? `__" +msgstr "`Что такое OpenMP? `__" + +#: ../../common_information/introduction.rst:37 +msgid "" +"`TBB-1 " +"`__" +msgstr "" +"`Средства разработки параллельных программ для систем с общей памятью. " +"Библиотека Intel Threading Building Blocks " +"`__" + +#: ../../common_information/introduction.rst:38 +msgid "" +"`Writing Multithreaded Applications in C++ `__" +msgstr "" +"`Написание многопоточных приложений на C++ `__" + +#: ../../common_information/introduction.rst:39 +msgid "" +"`Multithreading: New Features of the C++11 Standard " +"`__" +msgstr "" +"`Многопоточность, новые возможности стандарта C++11 " +"`__" + +#: ../../common_information/introduction.rst:40 +msgid "" +"`Introduction to Parallel Computing " +"`__" +msgstr "" +"`Введение в параллельные вычисления " +"`__" + +#: ../../common_information/introduction.rst:42 +msgid "" +"\\* *All instructions, repositories, and tables may be updated during the" +" learning process for better usability. Be prepared for changes, check " +"and update them periodically!!!*" +msgstr "" +"\\* *Все инструкции, репозитории и таблицы могут быть изменены в ходе " +"учебного процесса для более удобного использования, будьте готовы к " +"изменениям и периодически проверяйте это!!!!!!*" + diff --git a/docs/locale/ru/LC_MESSAGES/common_information/points.po b/docs/locale/ru/LC_MESSAGES/common_information/points.po index 3a6096d659..b145d32266 100644 --- a/docs/locale/ru/LC_MESSAGES/common_information/points.po +++ b/docs/locale/ru/LC_MESSAGES/common_information/points.po @@ -1,413 +1,413 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-09 00:58+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/points.rst:2 -msgid "Points" -msgstr "Балльно-рейтинговая система" - -#: ../../common_information/points.rst:4 -msgid "For “process parallelism” semester" -msgstr "Для семестра, где студенты изучают параллелизм на процессах" - -#: ../../common_information/points.rst:6 -msgid "1st MPI task" -msgstr "Первая MPI задача" - -#: ../../common_information/points.rst:9 ../../common_information/points.rst:17 -#: ../../common_information/points.rst:25 -#: ../../common_information/points.rst:37 -#: ../../common_information/points.rst:45 -#: ../../common_information/points.rst:53 -#: ../../common_information/points.rst:61 -#: ../../common_information/points.rst:70 -msgid "Solution" -msgstr "Решение" - -#: ../../common_information/points.rst:11 -#: ../../common_information/points.rst:27 -#: ../../common_information/points.rst:72 -#: ../../common_information/points.rst:114 -msgid "10" -msgstr "" - -#: ../../common_information/points.rst:14 -msgid "2nd MPI task" -msgstr "Вторая MPI задача" - -#: ../../common_information/points.rst:17 -#: ../../common_information/points.rst:25 -#: ../../common_information/points.rst:45 -#: ../../common_information/points.rst:53 -#: ../../common_information/points.rst:61 -#: ../../common_information/points.rst:70 -msgid "Performance" -msgstr "Производительность" - -#: ../../common_information/points.rst:19 -msgid "15" -msgstr "" - -#: ../../common_information/points.rst:19 -#: ../../common_information/points.rst:114 -#: ../../common_information/points.rst:124 -#: ../../common_information/points.rst:136 -msgid "5" -msgstr "" - -#: ../../common_information/points.rst:22 -msgid "3rd MPI task" -msgstr "Третья MPI задача" - -#: ../../common_information/points.rst:27 -msgid "20" -msgstr "" - -#: ../../common_information/points.rst:30 -msgid "**Total : 60 points**" -msgstr "**Всего : 60 баллов**" - -#: ../../common_information/points.rst:32 -msgid "For “thread parallelism” semester" -msgstr "Для семестра, где студенты изучают параллелизм на потоках" - -#: ../../common_information/points.rst:34 -msgid "Sequential version" -msgstr "Последовательная версия" - -#: ../../common_information/points.rst:39 -#: ../../common_information/points.rst:125 -#: ../../common_information/points.rst:138 -msgid "4" -msgstr "" - -#: ../../common_information/points.rst:42 -msgid "OpenMP version" -msgstr "OpenMP версия" - -#: ../../common_information/points.rst:47 -#: ../../common_information/points.rst:55 -#: ../../common_information/points.rst:63 -msgid "6" -msgstr "" - -#: ../../common_information/points.rst:47 -#: ../../common_information/points.rst:55 -#: ../../common_information/points.rst:126 -#: ../../common_information/points.rst:139 -msgid "3" -msgstr "" - -#: ../../common_information/points.rst:50 -msgid "TBB version" -msgstr "TBB версия" - -#: ../../common_information/points.rst:58 -msgid "std::thread version" -msgstr "std::thread версия" - -#: ../../common_information/points.rst:63 -#: ../../common_information/points.rst:72 -msgid "8" -msgstr "" - -#: ../../common_information/points.rst:66 -msgid "“MPI + threads” version (The threading technology is chosen randomly)" -msgstr "" -"“MPI + threads” версия (Технология параллелизма на потоках будет выбрана " -"случайным образом)" - -#: ../../common_information/points.rst:75 -msgid "**Total : 54 points**" -msgstr "**Всего : 54 балла**" - -#: ../../common_information/points.rst:77 -msgid "The rule for earning performance points." -msgstr "Правило получения баллов за производительность" - -#: ../../common_information/points.rst:79 -msgid "" -"The ratio of efficiency percentage to points percentage, where the " -"maximum points is 100% and the minimum points is 0%." -msgstr "" -"Соотношение процента эффективности к проценту баллов за " -"производительность, где максимум баллов - 100%, а минимум баллов - 0% " - -#: ../../common_information/points.rst:81 -msgid "If the score is not an integer, it is rounded up." -msgstr "Если получается не целочисленное значение баллов, то баллы округляются в большую сторону" - -#: ../../common_information/points.rst:84 -msgid "Efficiency (%)" -msgstr "Эффективность" - -#: ../../common_information/points.rst:84 -msgid "Points percentage" -msgstr "Процент баллов за производительность" - -#: ../../common_information/points.rst:86 -msgid ">= 50%" -msgstr "" - -#: ../../common_information/points.rst:86 -msgid "100%" -msgstr "" - -#: ../../common_information/points.rst:88 -msgid "[45, 50)" -msgstr "" - -#: ../../common_information/points.rst:88 -msgid "90%" -msgstr "" - -#: ../../common_information/points.rst:90 -msgid "[42, 45)" -msgstr "" - -#: ../../common_information/points.rst:90 -msgid "80%" -msgstr "" - -#: ../../common_information/points.rst:92 -msgid "[40, 42)" -msgstr "" - -#: ../../common_information/points.rst:92 -msgid "70%" -msgstr "" - -#: ../../common_information/points.rst:94 -msgid "[37, 40)" -msgstr "" - -#: ../../common_information/points.rst:94 -msgid "60%" -msgstr "" - -#: ../../common_information/points.rst:96 -msgid "[35, 37)" -msgstr "" - -#: ../../common_information/points.rst:96 -msgid "50%" -msgstr "" - -#: ../../common_information/points.rst:98 -msgid "[32, 35)" -msgstr "" - -#: ../../common_information/points.rst:98 -msgid "40%" -msgstr "" - -#: ../../common_information/points.rst:100 -msgid "[30, 32)" -msgstr "" - -#: ../../common_information/points.rst:100 -msgid "30%" -msgstr "" - -#: ../../common_information/points.rst:102 -msgid "[27, 30)" -msgstr "" - -#: ../../common_information/points.rst:102 -msgid "20%" -msgstr "" - -#: ../../common_information/points.rst:104 -msgid "[25, 27)" -msgstr "" - -#: ../../common_information/points.rst:104 -msgid "10%" -msgstr "" - -#: ../../common_information/points.rst:106 -msgid "< 25%" -msgstr "" - -#: ../../common_information/points.rst:106 -msgid "0%" -msgstr "" - -#: ../../common_information/points.rst:109 -msgid "Report" -msgstr "Отчет" - -#: ../../common_information/points.rst:112 -msgid "Completeness" -msgstr "Наличие всех требуемых пунктов" - -#: ../../common_information/points.rst:112 -msgid "Text Quality" -msgstr "Качество текста" - -#: ../../common_information/points.rst:112 -msgid "Formatting Quality" -msgstr "Качество оформления" - -#: ../../common_information/points.rst:112 -msgid "Total" -msgstr "Итог" - -#: ../../common_information/points.rst:114 -msgid "2.5" -msgstr "" - -#: ../../common_information/points.rst:117 -msgid "Conversion of points into exam assessment or pass/fail" -msgstr "Перевод баллов в экзаменационную оценку или зачет/незачет" - -#: ../../common_information/points.rst:119 -msgid "For 5-point grading system" -msgstr "5-ти балльная система оценки" - -#: ../../common_information/points.rst:122 -#: ../../common_information/points.rst:133 -msgid "Points range" -msgstr "Диапозон баллов" - -#: ../../common_information/points.rst:122 -#: ../../common_information/points.rst:133 -msgid "Exam Assessment" -msgstr "Экзаменационная оценка" - -#: ../../common_information/points.rst:122 -#: ../../common_information/points.rst:133 -msgid "Student Pass" -msgstr "Зачет" - -#: ../../common_information/points.rst:124 -msgid "[87, 100]" -msgstr "" - -#: ../../common_information/points.rst:124 -#: ../../common_information/points.rst:125 -#: ../../common_information/points.rst:126 -#: ../../common_information/points.rst:135 -#: ../../common_information/points.rst:136 -#: ../../common_information/points.rst:137 -#: ../../common_information/points.rst:138 -#: ../../common_information/points.rst:139 -msgid "Passed" -msgstr "Зачет" - -#: ../../common_information/points.rst:125 -msgid "[70, 87)" -msgstr "" - -#: ../../common_information/points.rst:126 -#: ../../common_information/points.rst:139 -msgid "[50, 70)" -msgstr "" - -#: ../../common_information/points.rst:127 -#: ../../common_information/points.rst:140 -msgid "< 50" -msgstr "" - -#: ../../common_information/points.rst:127 -#: ../../common_information/points.rst:140 -msgid "2" -msgstr "" - -#: ../../common_information/points.rst:127 -#: ../../common_information/points.rst:140 -msgid "Not Passed" -msgstr "Незачет" - -#: ../../common_information/points.rst:130 -msgid "For 7-point grading system **(our current system)**" -msgstr "7-ми балльная система оценки **(наша текущая система)**" - -#: ../../common_information/points.rst:135 -msgid "[99, 100]" -msgstr "" - -#: ../../common_information/points.rst:135 -msgid "5.5" -msgstr "" - -#: ../../common_information/points.rst:136 -msgid "[92, 99)" -msgstr "" - -#: ../../common_information/points.rst:137 -msgid "[82, 92)" -msgstr "" - -#: ../../common_information/points.rst:137 -msgid "4.5" -msgstr "" - -#: ../../common_information/points.rst:138 -msgid "[70, 82)" -msgstr "" - -#: ../../common_information/points.rst:143 -msgid "Penalties:" -msgstr "Штрафные санкции:" - -#: ../../common_information/points.rst:145 -msgid "A deadline will be set for each version." -msgstr "Для каждой версии будет установлен свой крайний срок сдачи." - -#: ../../common_information/points.rst:146 -msgid "" -"1 point is deducted from the version’s score for each day of delay in " -"submission." -msgstr "За каждый день просрочки из оценки версии вычитается 1 балл" - -#: ../../common_information/points.rst:147 -msgid "" -"The task is considered submitted when it is merged into the master/main " -"branch." -msgstr "Задание считается выполненным, когда оно попал в master/main ветку." - -#: ../../common_information/points.rst:148 -msgid "" -"The submission time is defined as the timestamp of the last commit that " -"successfully passes the CI pipeline." -msgstr "" -"Фиксированное время сдачи работы определяется как время последнего " -"коммита, который успешно прошел CI." - -#: ../../common_information/points.rst:150 -msgid "Comments:" -msgstr "Комментарии" - -#: ../../common_information/points.rst:152 -msgid "It is forbidden to write the report if all tasks are not completed." -msgstr "" -"Запрещено писать отчет, если не выполнены все задания, которые связаны с " -"этим отчетом." - -#: ../../common_information/points.rst:153 -msgid "" -"Please keep in mind that one week before the end of the semester, the " -"repository will be closed for final assessment." -msgstr "" -"Просьба иметь ввиду, что за неделю до конца семестра репозиторий будет " -"закрыт для подведения окончательных итогов." +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-09 00:58+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/points.rst:2 +msgid "Points" +msgstr "Балльно-рейтинговая система" + +#: ../../common_information/points.rst:4 +msgid "For “process parallelism” semester" +msgstr "Для семестра, где студенты изучают параллелизм на процессах" + +#: ../../common_information/points.rst:6 +msgid "1st MPI task" +msgstr "Первая MPI задача" + +#: ../../common_information/points.rst:9 ../../common_information/points.rst:17 +#: ../../common_information/points.rst:25 +#: ../../common_information/points.rst:37 +#: ../../common_information/points.rst:45 +#: ../../common_information/points.rst:53 +#: ../../common_information/points.rst:61 +#: ../../common_information/points.rst:70 +msgid "Solution" +msgstr "Решение" + +#: ../../common_information/points.rst:11 +#: ../../common_information/points.rst:27 +#: ../../common_information/points.rst:72 +#: ../../common_information/points.rst:114 +msgid "10" +msgstr "" + +#: ../../common_information/points.rst:14 +msgid "2nd MPI task" +msgstr "Вторая MPI задача" + +#: ../../common_information/points.rst:17 +#: ../../common_information/points.rst:25 +#: ../../common_information/points.rst:45 +#: ../../common_information/points.rst:53 +#: ../../common_information/points.rst:61 +#: ../../common_information/points.rst:70 +msgid "Performance" +msgstr "Производительность" + +#: ../../common_information/points.rst:19 +msgid "15" +msgstr "" + +#: ../../common_information/points.rst:19 +#: ../../common_information/points.rst:114 +#: ../../common_information/points.rst:124 +#: ../../common_information/points.rst:136 +msgid "5" +msgstr "" + +#: ../../common_information/points.rst:22 +msgid "3rd MPI task" +msgstr "Третья MPI задача" + +#: ../../common_information/points.rst:27 +msgid "20" +msgstr "" + +#: ../../common_information/points.rst:30 +msgid "**Total : 60 points**" +msgstr "**Всего : 60 баллов**" + +#: ../../common_information/points.rst:32 +msgid "For “thread parallelism” semester" +msgstr "Для семестра, где студенты изучают параллелизм на потоках" + +#: ../../common_information/points.rst:34 +msgid "Sequential version" +msgstr "Последовательная версия" + +#: ../../common_information/points.rst:39 +#: ../../common_information/points.rst:125 +#: ../../common_information/points.rst:138 +msgid "4" +msgstr "" + +#: ../../common_information/points.rst:42 +msgid "OpenMP version" +msgstr "OpenMP версия" + +#: ../../common_information/points.rst:47 +#: ../../common_information/points.rst:55 +#: ../../common_information/points.rst:63 +msgid "6" +msgstr "" + +#: ../../common_information/points.rst:47 +#: ../../common_information/points.rst:55 +#: ../../common_information/points.rst:126 +#: ../../common_information/points.rst:139 +msgid "3" +msgstr "" + +#: ../../common_information/points.rst:50 +msgid "TBB version" +msgstr "TBB версия" + +#: ../../common_information/points.rst:58 +msgid "std::thread version" +msgstr "std::thread версия" + +#: ../../common_information/points.rst:63 +#: ../../common_information/points.rst:72 +msgid "8" +msgstr "" + +#: ../../common_information/points.rst:66 +msgid "“MPI + threads” version (The threading technology is chosen randomly)" +msgstr "" +"“MPI + threads” версия (Технология параллелизма на потоках будет выбрана " +"случайным образом)" + +#: ../../common_information/points.rst:75 +msgid "**Total : 54 points**" +msgstr "**Всего : 54 балла**" + +#: ../../common_information/points.rst:77 +msgid "The rule for earning performance points." +msgstr "Правило получения баллов за производительность" + +#: ../../common_information/points.rst:79 +msgid "" +"The ratio of efficiency percentage to points percentage, where the " +"maximum points is 100% and the minimum points is 0%." +msgstr "" +"Соотношение процента эффективности к проценту баллов за " +"производительность, где максимум баллов - 100%, а минимум баллов - 0% " + +#: ../../common_information/points.rst:81 +msgid "If the score is not an integer, it is rounded up." +msgstr "Если получается не целочисленное значение баллов, то баллы округляются в большую сторону" + +#: ../../common_information/points.rst:84 +msgid "Efficiency (%)" +msgstr "Эффективность" + +#: ../../common_information/points.rst:84 +msgid "Points percentage" +msgstr "Процент баллов за производительность" + +#: ../../common_information/points.rst:86 +msgid ">= 50%" +msgstr "" + +#: ../../common_information/points.rst:86 +msgid "100%" +msgstr "" + +#: ../../common_information/points.rst:88 +msgid "[45, 50)" +msgstr "" + +#: ../../common_information/points.rst:88 +msgid "90%" +msgstr "" + +#: ../../common_information/points.rst:90 +msgid "[42, 45)" +msgstr "" + +#: ../../common_information/points.rst:90 +msgid "80%" +msgstr "" + +#: ../../common_information/points.rst:92 +msgid "[40, 42)" +msgstr "" + +#: ../../common_information/points.rst:92 +msgid "70%" +msgstr "" + +#: ../../common_information/points.rst:94 +msgid "[37, 40)" +msgstr "" + +#: ../../common_information/points.rst:94 +msgid "60%" +msgstr "" + +#: ../../common_information/points.rst:96 +msgid "[35, 37)" +msgstr "" + +#: ../../common_information/points.rst:96 +msgid "50%" +msgstr "" + +#: ../../common_information/points.rst:98 +msgid "[32, 35)" +msgstr "" + +#: ../../common_information/points.rst:98 +msgid "40%" +msgstr "" + +#: ../../common_information/points.rst:100 +msgid "[30, 32)" +msgstr "" + +#: ../../common_information/points.rst:100 +msgid "30%" +msgstr "" + +#: ../../common_information/points.rst:102 +msgid "[27, 30)" +msgstr "" + +#: ../../common_information/points.rst:102 +msgid "20%" +msgstr "" + +#: ../../common_information/points.rst:104 +msgid "[25, 27)" +msgstr "" + +#: ../../common_information/points.rst:104 +msgid "10%" +msgstr "" + +#: ../../common_information/points.rst:106 +msgid "< 25%" +msgstr "" + +#: ../../common_information/points.rst:106 +msgid "0%" +msgstr "" + +#: ../../common_information/points.rst:109 +msgid "Report" +msgstr "Отчет" + +#: ../../common_information/points.rst:112 +msgid "Completeness" +msgstr "Наличие всех требуемых пунктов" + +#: ../../common_information/points.rst:112 +msgid "Text Quality" +msgstr "Качество текста" + +#: ../../common_information/points.rst:112 +msgid "Formatting Quality" +msgstr "Качество оформления" + +#: ../../common_information/points.rst:112 +msgid "Total" +msgstr "Итог" + +#: ../../common_information/points.rst:114 +msgid "2.5" +msgstr "" + +#: ../../common_information/points.rst:117 +msgid "Conversion of points into exam assessment or pass/fail" +msgstr "Перевод баллов в экзаменационную оценку или зачет/незачет" + +#: ../../common_information/points.rst:119 +msgid "For 5-point grading system" +msgstr "5-ти балльная система оценки" + +#: ../../common_information/points.rst:122 +#: ../../common_information/points.rst:133 +msgid "Points range" +msgstr "Диапозон баллов" + +#: ../../common_information/points.rst:122 +#: ../../common_information/points.rst:133 +msgid "Exam Assessment" +msgstr "Экзаменационная оценка" + +#: ../../common_information/points.rst:122 +#: ../../common_information/points.rst:133 +msgid "Student Pass" +msgstr "Зачет" + +#: ../../common_information/points.rst:124 +msgid "[87, 100]" +msgstr "" + +#: ../../common_information/points.rst:124 +#: ../../common_information/points.rst:125 +#: ../../common_information/points.rst:126 +#: ../../common_information/points.rst:135 +#: ../../common_information/points.rst:136 +#: ../../common_information/points.rst:137 +#: ../../common_information/points.rst:138 +#: ../../common_information/points.rst:139 +msgid "Passed" +msgstr "Зачет" + +#: ../../common_information/points.rst:125 +msgid "[70, 87)" +msgstr "" + +#: ../../common_information/points.rst:126 +#: ../../common_information/points.rst:139 +msgid "[50, 70)" +msgstr "" + +#: ../../common_information/points.rst:127 +#: ../../common_information/points.rst:140 +msgid "< 50" +msgstr "" + +#: ../../common_information/points.rst:127 +#: ../../common_information/points.rst:140 +msgid "2" +msgstr "" + +#: ../../common_information/points.rst:127 +#: ../../common_information/points.rst:140 +msgid "Not Passed" +msgstr "Незачет" + +#: ../../common_information/points.rst:130 +msgid "For 7-point grading system **(our current system)**" +msgstr "7-ми балльная система оценки **(наша текущая система)**" + +#: ../../common_information/points.rst:135 +msgid "[99, 100]" +msgstr "" + +#: ../../common_information/points.rst:135 +msgid "5.5" +msgstr "" + +#: ../../common_information/points.rst:136 +msgid "[92, 99)" +msgstr "" + +#: ../../common_information/points.rst:137 +msgid "[82, 92)" +msgstr "" + +#: ../../common_information/points.rst:137 +msgid "4.5" +msgstr "" + +#: ../../common_information/points.rst:138 +msgid "[70, 82)" +msgstr "" + +#: ../../common_information/points.rst:143 +msgid "Penalties:" +msgstr "Штрафные санкции:" + +#: ../../common_information/points.rst:145 +msgid "A deadline will be set for each version." +msgstr "Для каждой версии будет установлен свой крайний срок сдачи." + +#: ../../common_information/points.rst:146 +msgid "" +"1 point is deducted from the version’s score for each day of delay in " +"submission." +msgstr "За каждый день просрочки из оценки версии вычитается 1 балл" + +#: ../../common_information/points.rst:147 +msgid "" +"The task is considered submitted when it is merged into the master/main " +"branch." +msgstr "Задание считается выполненным, когда оно попал в master/main ветку." + +#: ../../common_information/points.rst:148 +msgid "" +"The submission time is defined as the timestamp of the last commit that " +"successfully passes the CI pipeline." +msgstr "" +"Фиксированное время сдачи работы определяется как время последнего " +"коммита, который успешно прошел CI." + +#: ../../common_information/points.rst:150 +msgid "Comments:" +msgstr "Комментарии" + +#: ../../common_information/points.rst:152 +msgid "It is forbidden to write the report if all tasks are not completed." +msgstr "" +"Запрещено писать отчет, если не выполнены все задания, которые связаны с " +"этим отчетом." + +#: ../../common_information/points.rst:153 +msgid "" +"Please keep in mind that one week before the end of the semester, the " +"repository will be closed for final assessment." +msgstr "" +"Просьба иметь ввиду, что за неделю до конца семестра репозиторий будет " +"закрыт для подведения окончательных итогов." diff --git a/docs/locale/ru/LC_MESSAGES/common_information/processes_tasks.po b/docs/locale/ru/LC_MESSAGES/common_information/processes_tasks.po index 3653104aaf..25f33ba86f 100644 --- a/docs/locale/ru/LC_MESSAGES/common_information/processes_tasks.po +++ b/docs/locale/ru/LC_MESSAGES/common_information/processes_tasks.po @@ -1,731 +1,731 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-05 13:29+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/processes_tasks.rst:2 -msgid "Processes parallelism tasks" -msgstr "Задачи параллелизма на процессах" - -#: ../../common_information/processes_tasks.rst:5 -msgid "First task" -msgstr "Первая задача" - -#: ../../common_information/processes_tasks.rst:8 -#: ../../common_information/processes_tasks.rst:69 -#: ../../common_information/processes_tasks.rst:122 -msgid "Variant Number" -msgstr "Номер варианта" - -#: ../../common_information/processes_tasks.rst:8 -#: ../../common_information/processes_tasks.rst:69 -#: ../../common_information/processes_tasks.rst:122 -msgid "Task" -msgstr "Задача" - -#: ../../common_information/processes_tasks.rst:10 -#: ../../common_information/processes_tasks.rst:71 -#: ../../common_information/processes_tasks.rst:124 -msgid "1" -msgstr "" - -#: ../../common_information/processes_tasks.rst:10 -msgid "Sum of vector elements" -msgstr "Сумма элементов вектора" - -#: ../../common_information/processes_tasks.rst:12 -#: ../../common_information/processes_tasks.rst:73 -#: ../../common_information/processes_tasks.rst:126 -msgid "2" -msgstr "" - -#: ../../common_information/processes_tasks.rst:12 -msgid "Calculating the average value of vector elements" -msgstr "Вычисление среднего значения элементов вектора" - -#: ../../common_information/processes_tasks.rst:14 -#: ../../common_information/processes_tasks.rst:75 -#: ../../common_information/processes_tasks.rst:128 -msgid "3" -msgstr "" - -#: ../../common_information/processes_tasks.rst:14 -msgid "Maximum value of vector elements" -msgstr "Максимальное значение элементов вектора" - -#: ../../common_information/processes_tasks.rst:16 -#: ../../common_information/processes_tasks.rst:77 -#: ../../common_information/processes_tasks.rst:130 -msgid "4" -msgstr "" - -#: ../../common_information/processes_tasks.rst:16 -msgid "Minimum value of vector elements" -msgstr "Минимальное значение элементов вектора" - -#: ../../common_information/processes_tasks.rst:18 -#: ../../common_information/processes_tasks.rst:79 -#: ../../common_information/processes_tasks.rst:132 -msgid "5" -msgstr "" - -#: ../../common_information/processes_tasks.rst:18 -msgid "" -"Finding the number of sign alternations between adjacent elements of the " -"vector" -msgstr "Нахождение числа чередований знаков значений соседних элементов вектора" - -#: ../../common_information/processes_tasks.rst:20 -#: ../../common_information/processes_tasks.rst:81 -#: ../../common_information/processes_tasks.rst:134 -#: ../../common_information/processes_tasks.rst:258 -msgid "6" -msgstr "" - -#: ../../common_information/processes_tasks.rst:20 -msgid "" -"Finding the number of order violations between adjacent elements of the " -"vector" -msgstr "Нахождение числа нарушений упорядоченности соседних элементов вектора" - -#: ../../common_information/processes_tasks.rst:22 -#: ../../common_information/processes_tasks.rst:83 -#: ../../common_information/processes_tasks.rst:136 -msgid "7" -msgstr "" - -#: ../../common_information/processes_tasks.rst:22 -msgid "Finding the most similar adjacent elements of the vector" -msgstr "Нахождение наиболее близких соседних элементов вектора" - -#: ../../common_information/processes_tasks.rst:24 -#: ../../common_information/processes_tasks.rst:85 -#: ../../common_information/processes_tasks.rst:138 -msgid "8" -msgstr "" - -#: ../../common_information/processes_tasks.rst:24 -msgid "Finding the most different adjacent elements of the vector" -msgstr "Нахождение наиболее отличающихся по значению соседних элементов вектора" - -#: ../../common_information/processes_tasks.rst:26 -#: ../../common_information/processes_tasks.rst:87 -#: ../../common_information/processes_tasks.rst:140 -msgid "9" -msgstr "" - -#: ../../common_information/processes_tasks.rst:26 -msgid "Scalar product of vectors" -msgstr "Скалярное произведение векторов" - -#: ../../common_information/processes_tasks.rst:28 -#: ../../common_information/processes_tasks.rst:89 -#: ../../common_information/processes_tasks.rst:142 -msgid "10" -msgstr "" - -#: ../../common_information/processes_tasks.rst:28 -msgid "Sum of matrix elements" -msgstr "Сумма элементов матрицы" - -#: ../../common_information/processes_tasks.rst:30 -#: ../../common_information/processes_tasks.rst:91 -#: ../../common_information/processes_tasks.rst:144 -msgid "11" -msgstr "" - -#: ../../common_information/processes_tasks.rst:30 -msgid "Sum of values by rows in the matrix" -msgstr "Сумма значений по строкам матрицы" - -#: ../../common_information/processes_tasks.rst:32 -#: ../../common_information/processes_tasks.rst:93 -#: ../../common_information/processes_tasks.rst:147 -msgid "12" -msgstr "" - -#: ../../common_information/processes_tasks.rst:32 -msgid "Sum of values by columns in the matrix" -msgstr "Сумма значений по столбцам матрицы" - -#: ../../common_information/processes_tasks.rst:34 -#: ../../common_information/processes_tasks.rst:95 -#: ../../common_information/processes_tasks.rst:150 -msgid "13" -msgstr "" - -#: ../../common_information/processes_tasks.rst:34 -msgid "Maximum value of matrix elements" -msgstr "Максимальное значение элементов матрицы" - -#: ../../common_information/processes_tasks.rst:36 -#: ../../common_information/processes_tasks.rst:97 -#: ../../common_information/processes_tasks.rst:153 -msgid "14" -msgstr "" - -#: ../../common_information/processes_tasks.rst:36 -msgid "Minimum value of matrix elements" -msgstr "Минимальное значение элементов матрицы" - -#: ../../common_information/processes_tasks.rst:38 -#: ../../common_information/processes_tasks.rst:99 -#: ../../common_information/processes_tasks.rst:155 -msgid "15" -msgstr "" - -#: ../../common_information/processes_tasks.rst:38 -msgid "Finding maximum values by rows in the matrix" -msgstr "Нахождение максимальных значений по строкам матрицы" - -#: ../../common_information/processes_tasks.rst:40 -#: ../../common_information/processes_tasks.rst:101 -#: ../../common_information/processes_tasks.rst:157 -msgid "16" -msgstr "" - -#: ../../common_information/processes_tasks.rst:40 -msgid "Finding maximum values by columns in the matrix" -msgstr "Нахождение максимальных значений по столбцам матрицы" - -#: ../../common_information/processes_tasks.rst:42 -#: ../../common_information/processes_tasks.rst:103 -#: ../../common_information/processes_tasks.rst:159 -msgid "17" -msgstr "" - -#: ../../common_information/processes_tasks.rst:42 -msgid "Finding minimum values by rows in the matrix" -msgstr "Нахождение минимальных значений по строкам матрицы" - -#: ../../common_information/processes_tasks.rst:44 -#: ../../common_information/processes_tasks.rst:105 -#: ../../common_information/processes_tasks.rst:161 -msgid "18" -msgstr "" - -#: ../../common_information/processes_tasks.rst:44 -msgid "Finding minimum values by columns in the matrix" -msgstr "Нахождение минимальных значений по столбцам матрицы" - -#: ../../common_information/processes_tasks.rst:46 -#: ../../common_information/processes_tasks.rst:107 -#: ../../common_information/processes_tasks.rst:163 -msgid "19" -msgstr "" - -#: ../../common_information/processes_tasks.rst:46 -msgid "Integration – rectangle method" -msgstr "Интегрирование – метод прямоугольников" - -#: ../../common_information/processes_tasks.rst:48 -#: ../../common_information/processes_tasks.rst:109 -#: ../../common_information/processes_tasks.rst:165 -msgid "20" -msgstr "" - -#: ../../common_information/processes_tasks.rst:48 -msgid "Integration – trapezoidal method" -msgstr "Интегрирование – метод трапеций" - -#: ../../common_information/processes_tasks.rst:50 -#: ../../common_information/processes_tasks.rst:111 -#: ../../common_information/processes_tasks.rst:167 -#: ../../common_information/processes_tasks.rst:268 -msgid "21" -msgstr "" - -#: ../../common_information/processes_tasks.rst:50 -msgid "Integration – Monte Carlo method" -msgstr "Интегрирование – метод Монте-Карло" - -#: ../../common_information/processes_tasks.rst:52 -#: ../../common_information/processes_tasks.rst:113 -#: ../../common_information/processes_tasks.rst:169 -msgid "22" -msgstr "" - -#: ../../common_information/processes_tasks.rst:52 -msgid "Counting the number of alphabetical characters in a string" -msgstr "Подсчет числа буквенных символов в строке" - -#: ../../common_information/processes_tasks.rst:54 -#: ../../common_information/processes_tasks.rst:115 -#: ../../common_information/processes_tasks.rst:171 -msgid "23" -msgstr "" - -#: ../../common_information/processes_tasks.rst:54 -msgid "Counting the frequency of a character in a string" -msgstr "Подсчет частоты символа в строке" - -#: ../../common_information/processes_tasks.rst:56 -#: ../../common_information/processes_tasks.rst:173 -msgid "24" -msgstr "" - -#: ../../common_information/processes_tasks.rst:56 -msgid "Counting the number of words in a string" -msgstr "Подсчет числа слов в строке" - -#: ../../common_information/processes_tasks.rst:58 -#: ../../common_information/processes_tasks.rst:175 -msgid "25" -msgstr "" - -#: ../../common_information/processes_tasks.rst:58 -msgid "Counting the number of sentences in a string" -msgstr "Подсчет числа предложений в строке" - -#: ../../common_information/processes_tasks.rst:60 -#: ../../common_information/processes_tasks.rst:177 -msgid "26" -msgstr "" - -#: ../../common_information/processes_tasks.rst:60 -msgid "Checking lexicographical order of two strings" -msgstr "Проверка лексикографической упорядоченности двух строк" - -#: ../../common_information/processes_tasks.rst:62 -#: ../../common_information/processes_tasks.rst:179 -msgid "27" -msgstr "" - -#: ../../common_information/processes_tasks.rst:62 -msgid "Counting the number of differing characters between two strings" -msgstr "Подсчет числа несовпадающих символов двух строк" - -#: ../../common_information/processes_tasks.rst:66 -msgid "Second task" -msgstr "Вторая задача" - -#: ../../common_information/processes_tasks.rst:71 -msgid "Broadcast (one to all transfer)" -msgstr "Передача от одного всем (broadcast)" - -#: ../../common_information/processes_tasks.rst:73 -msgid "Reduce (all to one transfer)" -msgstr "Передача от всех одному (reduce)" - -#: ../../common_information/processes_tasks.rst:75 -msgid "Allreduce (all to one and broadcast)" -msgstr "Передача от всех одному и рассылка (allreduce)" - -#: ../../common_information/processes_tasks.rst:77 -msgid "Scatter (one to all transfer)" -msgstr "Обобщенная передача от одного всем (scatter)" - -#: ../../common_information/processes_tasks.rst:79 -msgid "Gather (all to one transfer)" -msgstr "Обобщенная передача от всех одному (gather)" - -#: ../../common_information/processes_tasks.rst:81 -msgid "Line" -msgstr "Линейка" - -#: ../../common_information/processes_tasks.rst:83 -msgid "Ring" -msgstr "Кольцо" - -#: ../../common_information/processes_tasks.rst:85 -msgid "Star" -msgstr "Звезда" - -#: ../../common_information/processes_tasks.rst:87 -msgid "Torus Grid" -msgstr "Решетка-тор" - -#: ../../common_information/processes_tasks.rst:89 -msgid "Hypercube" -msgstr "Гиперкуб" - -#: ../../common_information/processes_tasks.rst:91 -msgid "Horizontal strip scheme - matrix-vector multiplication" -msgstr "Ленточная горизонтальная схема - умножение матрицы на вектор" - -#: ../../common_information/processes_tasks.rst:93 -msgid "Vertical strip scheme - matrix-vector multiplication" -msgstr "Ленточная вертикальная схема - умножение матрицы на вектор" - -#: ../../common_information/processes_tasks.rst:95 -msgid "" -"Horizontal strip scheme – partitioning only matrix A - matrix-matrix " -"multiplication" -msgstr "Ленточная горизонтальная схема - разбиение только матрицы А - умножение матрицы на матрицу" - -#: ../../common_information/processes_tasks.rst:97 -msgid "" -"Horizontal strip scheme A, vertical strip scheme B - matrix-matrix " -"multiplication" -msgstr "Ленточная горизонтальная схема А, вертикальное В - умножение матрицы на матрицу" - -#: ../../common_information/processes_tasks.rst:99 -msgid "Gaussian method – horizontal strip scheme" -msgstr "Метод Гаусса – ленточная горизонтальная схема" - -#: ../../common_information/processes_tasks.rst:101 -msgid "Gaussian method – vertical strip scheme" -msgstr "Метод Гаусса – ленточная вертикальная схема" - -#: ../../common_information/processes_tasks.rst:103 -msgid "Gauss-Jordan method" -msgstr "Метод Гаусса-Жордана" - -#: ../../common_information/processes_tasks.rst:105 -msgid "Iterative methods (Jacobi)" -msgstr "Итеративные методы (Якоби)" - -#: ../../common_information/processes_tasks.rst:107 -msgid "Iterative methods (Gauss-Seidel)" -msgstr "Итеративные методы (Зейделя)" - -#: ../../common_information/processes_tasks.rst:109 -msgid "Iterative methods (Simple)" -msgstr "Метод простой итерации" - -#: ../../common_information/processes_tasks.rst:111 -msgid "Bubble sort (odd-even transposition algorithm)" -msgstr "Сортировка пузырьком (алгоритм чет-нечетной перестановки)" - -#: ../../common_information/processes_tasks.rst:113 -msgid "Image smoothing" -msgstr "Сглаживание изображения" - -#: ../../common_information/processes_tasks.rst:115 -msgid "Contrast enhancement" -msgstr "Повышение контраста" - -#: ../../common_information/processes_tasks.rst:119 -msgid "Third task" -msgstr "Третья задача" - -#: ../../common_information/processes_tasks.rst:124 -msgid "" -"Dense matrix multiplication. Elements of data type double. Block scheme, " -"Cannon's algorithm." -msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Кэннона." - -#: ../../common_information/processes_tasks.rst:126 -msgid "" -"Dense matrix multiplication. Elements of data type double. Block scheme, " -"Fox's algorithm." -msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Фокса." - -#: ../../common_information/processes_tasks.rst:128 -msgid "" -"Dense matrix multiplication. Elements of data type double. Strassen's " -"algorithm." -msgstr "Умножение плотных матриц. Элементы типа double. Алгоритм Штрассена." - -#: ../../common_information/processes_tasks.rst:130 -msgid "" -"Sparse matrix multiplication. Elements of data type double. Matrix " -"storage format – row format (CRS)." -msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – строковый (CRS)." - -#: ../../common_information/processes_tasks.rst:132 -msgid "" -"Sparse matrix multiplication. Elements of data type double. Matrix " -"storage format – column format (CCS)." -msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – столбцовый (CCS)." - -#: ../../common_information/processes_tasks.rst:134 -msgid "Solving systems of linear equations using the conjugate gradient method." -msgstr "Решение систем линейных уравнений методом сопряженных градиентов." - -#: ../../common_information/processes_tasks.rst:136 -msgid "" -"Computing multidimensional integrals using a multistep scheme (rectangle " -"method)." -msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод прямоугольников)." - -#: ../../common_information/processes_tasks.rst:138 -msgid "" -"Computing multidimensional integrals using a multistep scheme " -"(trapezoidal method)." -msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод трапеций)." - -#: ../../common_information/processes_tasks.rst:140 -msgid "" -"Computing multidimensional integrals using a multistep scheme (Simpson's " -"method)." -msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод Симпсона)." - -#: ../../common_information/processes_tasks.rst:142 -msgid "Computing multidimensional integrals using the Monte Carlo method." -msgstr "Вычисление многомерных интегралов методом Монте-Карло." - -#: ../../common_information/processes_tasks.rst:144 -msgid "" -"Global search algorithm (Strongin's) for one-dimensional optimization " -"problems. Parallelization by characteristics." -msgstr "Алгоритм глобального поиска (Стронгина) для одномерных задач оптимизации. Распараллеливание по характеристикам." - -#: ../../common_information/processes_tasks.rst:147 -msgid "" -"Multistep scheme for solving two-dimensional global optimization " -"problems. Parallelization by dividing the search area." -msgstr "Многошаговая схема решения двумерных задач глобальной оптимизации. Распараллеливание путем разделения области поиска." - -#: ../../common_information/processes_tasks.rst:150 -msgid "" -"Multistep scheme for solving two-dimensional global optimization " -"problems. Parallelization by characteristics." -msgstr "Многошаговая схема решения двумерных задач глобальной оптимизации. Распараллеливание по характеристикам." - -#: ../../common_information/processes_tasks.rst:153 -msgid "Quick sort with simple merging." -msgstr "Быстрая сортировка с простым слиянием." - -#: ../../common_information/processes_tasks.rst:155 -msgid "Quick sort with odd-even merging (Batcher's method)." -msgstr "Быстрая сортировка с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/processes_tasks.rst:157 -msgid "Shell sort with simple merging." -msgstr "Сортировка Шелла с простым слиянием." - -#: ../../common_information/processes_tasks.rst:159 -msgid "Shell sort with odd-even merging (Batcher's method)." -msgstr "Сортировка Шелла с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/processes_tasks.rst:161 -msgid "Radix sort for integers with simple merging." -msgstr "Поразрядная сортировка для целых чисел с простым слиянием." - -#: ../../common_information/processes_tasks.rst:163 -msgid "Radix sort for integers with odd-even merging (Batcher's method)." -msgstr "Поразрядная сортировка для целых чисел с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/processes_tasks.rst:165 -msgid "Radix sort for floating-point numbers (type double) with simple merging." -msgstr "Поразрядная сортировка для вещественных чисел (тип double) с простым слиянием." - -#: ../../common_information/processes_tasks.rst:167 -msgid "" -"Radix sort for floating-point numbers (type double) with odd-even merging" -" (Batcher's method)." -msgstr "Поразрядная сортировка для вещественных чисел (тип double) с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/processes_tasks.rst:169 -msgid "" -"Shortest path search from one vertex (Dijkstra's algorithm). With CRS " -"graphs." -msgstr "Поиск кратчайших путей из одной вершины (алгоритм Дейкстры). С CRS формой хранения графа." - -#: ../../common_information/processes_tasks.rst:171 -msgid "" -"Shortest path search from one vertex (Bellman-Ford algorithm). With CRS " -"graphs." -msgstr "Поиск кратчайших путей из одной вершины (алгоритм Беллмана-Форда). С CRS формой хранения графа." - -#: ../../common_information/processes_tasks.rst:173 -msgid "Convex hull construction – Graham's scan." -msgstr "Построение выпуклой оболочки – проход Грэхема." - -#: ../../common_information/processes_tasks.rst:175 -msgid "Convex hull construction – Jarvis's march." -msgstr "Построение выпуклой оболочки – проход Джарвиса." - -#: ../../common_information/processes_tasks.rst:177 -msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." -msgstr "Линейная фильтрация изображений (горизонтальное разбиение). Ядро Гаусса 3x3." - -#: ../../common_information/processes_tasks.rst:179 -msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." -msgstr "Линейная фильтрация изображений (вертикальное разбиение). Ядро Гаусса 3x3." - -#: ../../common_information/processes_tasks.rst:181 -msgid "28" -msgstr "" - -#: ../../common_information/processes_tasks.rst:181 -msgid "Linear image filtering (block partition). Gaussian kernel 3x3." -msgstr "Линейная фильтрация изображений (блочное разбиение). Ядро Гаусса 3x3." - -#: ../../common_information/processes_tasks.rst:183 -msgid "29" -msgstr "" - -#: ../../common_information/processes_tasks.rst:183 -msgid "Edge detection in an image using the Sobel operator." -msgstr "Выделение ребер на изображении с использованием оператора Собеля." - -#: ../../common_information/processes_tasks.rst:185 -msgid "30" -msgstr "" - -#: ../../common_information/processes_tasks.rst:185 -msgid "Contrast enhancement of grayscale image using linear histogram stretching." -msgstr "Повышение контраста полутонового изображения посредством линейной растяжки гистограммы" - -#: ../../common_information/processes_tasks.rst:187 -msgid "31" -msgstr "" - -#: ../../common_information/processes_tasks.rst:187 -msgid "" -"Labeling components on a binary image (black areas correspond to objects," -" white to background)." -msgstr "Маркировка компонент на бинарном изображении (черные области соответствуют объектам, белые – фону)." - -#: ../../common_information/processes_tasks.rst:189 -msgid "32" -msgstr "" - -#: ../../common_information/processes_tasks.rst:189 -msgid "Convex hull construction for components of a binary image." -msgstr "Построение выпуклой оболочки для компонент бинарного изображения." - -#: ../../common_information/processes_tasks.rst:193 -msgid "Comments for tasks 2 and 3:" -msgstr "Комментарии для 2-ой и 3-й задач:" - -#: ../../common_information/processes_tasks.rst:195 -msgid "" -"MESSAGE PASSING METHODS “You need to implement the specified methods " -"using only the Send and Recv functions. The implemented function should " -"have the same prototype as the corresponding MPI function. The test " -"program should allow selecting the root process number and perform array " -"transmission (broadcast, gather) for at least the following types: " -"MPI_INT, MPI_FLOAT, MPI_DOUBLE. In all operations, the transmission " -"should be carried out using the ‘tree’ of processes.”" -msgstr "МЕТОДЫ ПЕРЕДАЧИ СООБЩЕНИЙ. Нужно реализовать указанные методы, используя только функции Send и Recv. " -"Реализованная функция должна иметь тот же прототип, что и соответствующая функция MPI. " -"Тестовая программа должна позволять выбрать номер процесса root и выполнять пересылку " -"(рассылку, сбор) массива как минимум следующих типов: MPI_INT, MPI_FLOAT, MPI_DOUBLE. " -"Во всех операциях передача должна выполняться с использованием \"дерева\" процессов." - -#: ../../common_information/processes_tasks.rst:203 -#: ../../common_information/processes_tasks.rst:217 -#: ../../common_information/processes_tasks.rst:230 -#: ../../common_information/processes_tasks.rst:243 -#: ../../common_information/processes_tasks.rst:253 -#: ../../common_information/processes_tasks.rst:263 -#: ../../common_information/processes_tasks.rst:273 -msgid "Comments relevant for:" -msgstr "Комментарии относятся к:" - -#: ../../common_information/processes_tasks.rst:206 -msgid "Variants for task 2" -msgstr "Варинты для второй задачи" - -#: ../../common_information/processes_tasks.rst:206 -msgid "Variants for task 3" -msgstr "Варинты для третьей задачи" - -#: ../../common_information/processes_tasks.rst:208 -#: ../../common_information/processes_tasks.rst:235 -msgid "1 - 5" -msgstr "" - -#: ../../common_information/processes_tasks.rst:208 -#: ../../common_information/processes_tasks.rst:222 -#: ../../common_information/processes_tasks.rst:278 -msgid "x" -msgstr "" - -#: ../../common_information/processes_tasks.rst:211 -msgid "" -"DATA COMMUNICATION NETWORK TOPOLOGIES “You need to implement the virtual " -"topology specified in the task using MPI capabilities for working with " -"communicators and topologies, and ensure the ability to transfer data " -"from any selected process to any other process. (Do not use " -"MPI_Cart_Create and MPI_Graph_Create)”" -msgstr "ТОПОЛОГИИ СЕТЕЙ ПЕРЕДАЧИ ДАННЫХ. Нужно реализовать указанную в задаче виртуальную топологию, используя возможности MPI " -"по работе с коммуникаторами и топологиями и обеспечить возможность передачи данных " -"от любого выбранного процесса любому другому процессу. " -"(Не используя MPI_Cart_Create и MPI_Graph_Create)" - -#: ../../common_information/processes_tasks.rst:220 -#: ../../common_information/processes_tasks.rst:233 -#: ../../common_information/processes_tasks.rst:246 -#: ../../common_information/processes_tasks.rst:256 -#: ../../common_information/processes_tasks.rst:266 -#: ../../common_information/processes_tasks.rst:276 -msgid "Varinats for task 2" -msgstr "Варинты для второй задачи" - -#: ../../common_information/processes_tasks.rst:220 -#: ../../common_information/processes_tasks.rst:233 -#: ../../common_information/processes_tasks.rst:246 -#: ../../common_information/processes_tasks.rst:256 -#: ../../common_information/processes_tasks.rst:266 -#: ../../common_information/processes_tasks.rst:276 -msgid "Varinats for task 3" -msgstr "Варинты для третьей задачи" - -#: ../../common_information/processes_tasks.rst:222 -msgid "6 - 10" -msgstr "" - -#: ../../common_information/processes_tasks.rst:225 -msgid "" -"MATRIX COMPUTATIONS “In the horizontal scheme, the matrix is divided " -"among processes by rows. In the vertical scheme, it is divided by " -"columns, and in this case, the vector is also divided among processes.”" -msgstr "МАТРИЧНЫЕ ВЫЧИСЛЕНИЯ. В горизонтальной схеме матрица делится между процессами по строкам. " -"В вертикальной - по столбцам, вектор в этом случае также делится между процессами." - -#: ../../common_information/processes_tasks.rst:235 -msgid "11 - 14" -msgstr "" - -#: ../../common_information/processes_tasks.rst:238 -msgid "" -"COMPUTER GRAPHICS AND IMAGE PROCESSING “It is assumed that the image is " -"given in color or grayscale, with the input data being a one-dimensional " -"array. Loading a real image is not required, but is allowed.”" -msgstr "МАШИННАЯ ГРАФИКА И ОБРАБОТКА ИЗОБРАЖЕНИЙ. Считается, что изображение задано в цветном виде или оттенках серого, " -"входные данные - одномерный массив. " -"Загружать реальное изображение не требуется, но и не возбраняется." - -#: ../../common_information/processes_tasks.rst:248 -msgid "26 - 27" -msgstr "" - -#: ../../common_information/processes_tasks.rst:248 -msgid "24 - 32" -msgstr "" - -#: ../../common_information/processes_tasks.rst:251 -msgid "SOLUTION OF A SYSTEM OF LINEAR ALGEBRAIC EQUATIONS" -msgstr "РЕШЕНИЕ СИСТЕМЫ ЛИНЕЙНЫХ АЛГЕБРАИЧЕСКИХ УРАВНЕНИЙ" - -#: ../../common_information/processes_tasks.rst:258 -msgid "15 - 20" -msgstr "" - -#: ../../common_information/processes_tasks.rst:261 -msgid "SORT ALGORITHMS" -msgstr "АЛГОРИТМЫ СОРТИРОВКИ" - -#: ../../common_information/processes_tasks.rst:268 -msgid "14 - 21" -msgstr "" - -#: ../../common_information/processes_tasks.rst:271 -msgid "GRAPH PROCESSING ALGORITHMS" -msgstr "АЛГОРИТМЫ НА ГРАФАХ" - -#: ../../common_information/processes_tasks.rst:278 -msgid "22 - 23" -msgstr "" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-05 13:29+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/processes_tasks.rst:2 +msgid "Processes parallelism tasks" +msgstr "Задачи параллелизма на процессах" + +#: ../../common_information/processes_tasks.rst:5 +msgid "First task" +msgstr "Первая задача" + +#: ../../common_information/processes_tasks.rst:8 +#: ../../common_information/processes_tasks.rst:69 +#: ../../common_information/processes_tasks.rst:122 +msgid "Variant Number" +msgstr "Номер варианта" + +#: ../../common_information/processes_tasks.rst:8 +#: ../../common_information/processes_tasks.rst:69 +#: ../../common_information/processes_tasks.rst:122 +msgid "Task" +msgstr "Задача" + +#: ../../common_information/processes_tasks.rst:10 +#: ../../common_information/processes_tasks.rst:71 +#: ../../common_information/processes_tasks.rst:124 +msgid "1" +msgstr "" + +#: ../../common_information/processes_tasks.rst:10 +msgid "Sum of vector elements" +msgstr "Сумма элементов вектора" + +#: ../../common_information/processes_tasks.rst:12 +#: ../../common_information/processes_tasks.rst:73 +#: ../../common_information/processes_tasks.rst:126 +msgid "2" +msgstr "" + +#: ../../common_information/processes_tasks.rst:12 +msgid "Calculating the average value of vector elements" +msgstr "Вычисление среднего значения элементов вектора" + +#: ../../common_information/processes_tasks.rst:14 +#: ../../common_information/processes_tasks.rst:75 +#: ../../common_information/processes_tasks.rst:128 +msgid "3" +msgstr "" + +#: ../../common_information/processes_tasks.rst:14 +msgid "Maximum value of vector elements" +msgstr "Максимальное значение элементов вектора" + +#: ../../common_information/processes_tasks.rst:16 +#: ../../common_information/processes_tasks.rst:77 +#: ../../common_information/processes_tasks.rst:130 +msgid "4" +msgstr "" + +#: ../../common_information/processes_tasks.rst:16 +msgid "Minimum value of vector elements" +msgstr "Минимальное значение элементов вектора" + +#: ../../common_information/processes_tasks.rst:18 +#: ../../common_information/processes_tasks.rst:79 +#: ../../common_information/processes_tasks.rst:132 +msgid "5" +msgstr "" + +#: ../../common_information/processes_tasks.rst:18 +msgid "" +"Finding the number of sign alternations between adjacent elements of the " +"vector" +msgstr "Нахождение числа чередований знаков значений соседних элементов вектора" + +#: ../../common_information/processes_tasks.rst:20 +#: ../../common_information/processes_tasks.rst:81 +#: ../../common_information/processes_tasks.rst:134 +#: ../../common_information/processes_tasks.rst:258 +msgid "6" +msgstr "" + +#: ../../common_information/processes_tasks.rst:20 +msgid "" +"Finding the number of order violations between adjacent elements of the " +"vector" +msgstr "Нахождение числа нарушений упорядоченности соседних элементов вектора" + +#: ../../common_information/processes_tasks.rst:22 +#: ../../common_information/processes_tasks.rst:83 +#: ../../common_information/processes_tasks.rst:136 +msgid "7" +msgstr "" + +#: ../../common_information/processes_tasks.rst:22 +msgid "Finding the most similar adjacent elements of the vector" +msgstr "Нахождение наиболее близких соседних элементов вектора" + +#: ../../common_information/processes_tasks.rst:24 +#: ../../common_information/processes_tasks.rst:85 +#: ../../common_information/processes_tasks.rst:138 +msgid "8" +msgstr "" + +#: ../../common_information/processes_tasks.rst:24 +msgid "Finding the most different adjacent elements of the vector" +msgstr "Нахождение наиболее отличающихся по значению соседних элементов вектора" + +#: ../../common_information/processes_tasks.rst:26 +#: ../../common_information/processes_tasks.rst:87 +#: ../../common_information/processes_tasks.rst:140 +msgid "9" +msgstr "" + +#: ../../common_information/processes_tasks.rst:26 +msgid "Scalar product of vectors" +msgstr "Скалярное произведение векторов" + +#: ../../common_information/processes_tasks.rst:28 +#: ../../common_information/processes_tasks.rst:89 +#: ../../common_information/processes_tasks.rst:142 +msgid "10" +msgstr "" + +#: ../../common_information/processes_tasks.rst:28 +msgid "Sum of matrix elements" +msgstr "Сумма элементов матрицы" + +#: ../../common_information/processes_tasks.rst:30 +#: ../../common_information/processes_tasks.rst:91 +#: ../../common_information/processes_tasks.rst:144 +msgid "11" +msgstr "" + +#: ../../common_information/processes_tasks.rst:30 +msgid "Sum of values by rows in the matrix" +msgstr "Сумма значений по строкам матрицы" + +#: ../../common_information/processes_tasks.rst:32 +#: ../../common_information/processes_tasks.rst:93 +#: ../../common_information/processes_tasks.rst:147 +msgid "12" +msgstr "" + +#: ../../common_information/processes_tasks.rst:32 +msgid "Sum of values by columns in the matrix" +msgstr "Сумма значений по столбцам матрицы" + +#: ../../common_information/processes_tasks.rst:34 +#: ../../common_information/processes_tasks.rst:95 +#: ../../common_information/processes_tasks.rst:150 +msgid "13" +msgstr "" + +#: ../../common_information/processes_tasks.rst:34 +msgid "Maximum value of matrix elements" +msgstr "Максимальное значение элементов матрицы" + +#: ../../common_information/processes_tasks.rst:36 +#: ../../common_information/processes_tasks.rst:97 +#: ../../common_information/processes_tasks.rst:153 +msgid "14" +msgstr "" + +#: ../../common_information/processes_tasks.rst:36 +msgid "Minimum value of matrix elements" +msgstr "Минимальное значение элементов матрицы" + +#: ../../common_information/processes_tasks.rst:38 +#: ../../common_information/processes_tasks.rst:99 +#: ../../common_information/processes_tasks.rst:155 +msgid "15" +msgstr "" + +#: ../../common_information/processes_tasks.rst:38 +msgid "Finding maximum values by rows in the matrix" +msgstr "Нахождение максимальных значений по строкам матрицы" + +#: ../../common_information/processes_tasks.rst:40 +#: ../../common_information/processes_tasks.rst:101 +#: ../../common_information/processes_tasks.rst:157 +msgid "16" +msgstr "" + +#: ../../common_information/processes_tasks.rst:40 +msgid "Finding maximum values by columns in the matrix" +msgstr "Нахождение максимальных значений по столбцам матрицы" + +#: ../../common_information/processes_tasks.rst:42 +#: ../../common_information/processes_tasks.rst:103 +#: ../../common_information/processes_tasks.rst:159 +msgid "17" +msgstr "" + +#: ../../common_information/processes_tasks.rst:42 +msgid "Finding minimum values by rows in the matrix" +msgstr "Нахождение минимальных значений по строкам матрицы" + +#: ../../common_information/processes_tasks.rst:44 +#: ../../common_information/processes_tasks.rst:105 +#: ../../common_information/processes_tasks.rst:161 +msgid "18" +msgstr "" + +#: ../../common_information/processes_tasks.rst:44 +msgid "Finding minimum values by columns in the matrix" +msgstr "Нахождение минимальных значений по столбцам матрицы" + +#: ../../common_information/processes_tasks.rst:46 +#: ../../common_information/processes_tasks.rst:107 +#: ../../common_information/processes_tasks.rst:163 +msgid "19" +msgstr "" + +#: ../../common_information/processes_tasks.rst:46 +msgid "Integration – rectangle method" +msgstr "Интегрирование – метод прямоугольников" + +#: ../../common_information/processes_tasks.rst:48 +#: ../../common_information/processes_tasks.rst:109 +#: ../../common_information/processes_tasks.rst:165 +msgid "20" +msgstr "" + +#: ../../common_information/processes_tasks.rst:48 +msgid "Integration – trapezoidal method" +msgstr "Интегрирование – метод трапеций" + +#: ../../common_information/processes_tasks.rst:50 +#: ../../common_information/processes_tasks.rst:111 +#: ../../common_information/processes_tasks.rst:167 +#: ../../common_information/processes_tasks.rst:268 +msgid "21" +msgstr "" + +#: ../../common_information/processes_tasks.rst:50 +msgid "Integration – Monte Carlo method" +msgstr "Интегрирование – метод Монте-Карло" + +#: ../../common_information/processes_tasks.rst:52 +#: ../../common_information/processes_tasks.rst:113 +#: ../../common_information/processes_tasks.rst:169 +msgid "22" +msgstr "" + +#: ../../common_information/processes_tasks.rst:52 +msgid "Counting the number of alphabetical characters in a string" +msgstr "Подсчет числа буквенных символов в строке" + +#: ../../common_information/processes_tasks.rst:54 +#: ../../common_information/processes_tasks.rst:115 +#: ../../common_information/processes_tasks.rst:171 +msgid "23" +msgstr "" + +#: ../../common_information/processes_tasks.rst:54 +msgid "Counting the frequency of a character in a string" +msgstr "Подсчет частоты символа в строке" + +#: ../../common_information/processes_tasks.rst:56 +#: ../../common_information/processes_tasks.rst:173 +msgid "24" +msgstr "" + +#: ../../common_information/processes_tasks.rst:56 +msgid "Counting the number of words in a string" +msgstr "Подсчет числа слов в строке" + +#: ../../common_information/processes_tasks.rst:58 +#: ../../common_information/processes_tasks.rst:175 +msgid "25" +msgstr "" + +#: ../../common_information/processes_tasks.rst:58 +msgid "Counting the number of sentences in a string" +msgstr "Подсчет числа предложений в строке" + +#: ../../common_information/processes_tasks.rst:60 +#: ../../common_information/processes_tasks.rst:177 +msgid "26" +msgstr "" + +#: ../../common_information/processes_tasks.rst:60 +msgid "Checking lexicographical order of two strings" +msgstr "Проверка лексикографической упорядоченности двух строк" + +#: ../../common_information/processes_tasks.rst:62 +#: ../../common_information/processes_tasks.rst:179 +msgid "27" +msgstr "" + +#: ../../common_information/processes_tasks.rst:62 +msgid "Counting the number of differing characters between two strings" +msgstr "Подсчет числа несовпадающих символов двух строк" + +#: ../../common_information/processes_tasks.rst:66 +msgid "Second task" +msgstr "Вторая задача" + +#: ../../common_information/processes_tasks.rst:71 +msgid "Broadcast (one to all transfer)" +msgstr "Передача от одного всем (broadcast)" + +#: ../../common_information/processes_tasks.rst:73 +msgid "Reduce (all to one transfer)" +msgstr "Передача от всех одному (reduce)" + +#: ../../common_information/processes_tasks.rst:75 +msgid "Allreduce (all to one and broadcast)" +msgstr "Передача от всех одному и рассылка (allreduce)" + +#: ../../common_information/processes_tasks.rst:77 +msgid "Scatter (one to all transfer)" +msgstr "Обобщенная передача от одного всем (scatter)" + +#: ../../common_information/processes_tasks.rst:79 +msgid "Gather (all to one transfer)" +msgstr "Обобщенная передача от всех одному (gather)" + +#: ../../common_information/processes_tasks.rst:81 +msgid "Line" +msgstr "Линейка" + +#: ../../common_information/processes_tasks.rst:83 +msgid "Ring" +msgstr "Кольцо" + +#: ../../common_information/processes_tasks.rst:85 +msgid "Star" +msgstr "Звезда" + +#: ../../common_information/processes_tasks.rst:87 +msgid "Torus Grid" +msgstr "Решетка-тор" + +#: ../../common_information/processes_tasks.rst:89 +msgid "Hypercube" +msgstr "Гиперкуб" + +#: ../../common_information/processes_tasks.rst:91 +msgid "Horizontal strip scheme - matrix-vector multiplication" +msgstr "Ленточная горизонтальная схема - умножение матрицы на вектор" + +#: ../../common_information/processes_tasks.rst:93 +msgid "Vertical strip scheme - matrix-vector multiplication" +msgstr "Ленточная вертикальная схема - умножение матрицы на вектор" + +#: ../../common_information/processes_tasks.rst:95 +msgid "" +"Horizontal strip scheme – partitioning only matrix A - matrix-matrix " +"multiplication" +msgstr "Ленточная горизонтальная схема - разбиение только матрицы А - умножение матрицы на матрицу" + +#: ../../common_information/processes_tasks.rst:97 +msgid "" +"Horizontal strip scheme A, vertical strip scheme B - matrix-matrix " +"multiplication" +msgstr "Ленточная горизонтальная схема А, вертикальное В - умножение матрицы на матрицу" + +#: ../../common_information/processes_tasks.rst:99 +msgid "Gaussian method – horizontal strip scheme" +msgstr "Метод Гаусса – ленточная горизонтальная схема" + +#: ../../common_information/processes_tasks.rst:101 +msgid "Gaussian method – vertical strip scheme" +msgstr "Метод Гаусса – ленточная вертикальная схема" + +#: ../../common_information/processes_tasks.rst:103 +msgid "Gauss-Jordan method" +msgstr "Метод Гаусса-Жордана" + +#: ../../common_information/processes_tasks.rst:105 +msgid "Iterative methods (Jacobi)" +msgstr "Итеративные методы (Якоби)" + +#: ../../common_information/processes_tasks.rst:107 +msgid "Iterative methods (Gauss-Seidel)" +msgstr "Итеративные методы (Зейделя)" + +#: ../../common_information/processes_tasks.rst:109 +msgid "Iterative methods (Simple)" +msgstr "Метод простой итерации" + +#: ../../common_information/processes_tasks.rst:111 +msgid "Bubble sort (odd-even transposition algorithm)" +msgstr "Сортировка пузырьком (алгоритм чет-нечетной перестановки)" + +#: ../../common_information/processes_tasks.rst:113 +msgid "Image smoothing" +msgstr "Сглаживание изображения" + +#: ../../common_information/processes_tasks.rst:115 +msgid "Contrast enhancement" +msgstr "Повышение контраста" + +#: ../../common_information/processes_tasks.rst:119 +msgid "Third task" +msgstr "Третья задача" + +#: ../../common_information/processes_tasks.rst:124 +msgid "" +"Dense matrix multiplication. Elements of data type double. Block scheme, " +"Cannon's algorithm." +msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Кэннона." + +#: ../../common_information/processes_tasks.rst:126 +msgid "" +"Dense matrix multiplication. Elements of data type double. Block scheme, " +"Fox's algorithm." +msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Фокса." + +#: ../../common_information/processes_tasks.rst:128 +msgid "" +"Dense matrix multiplication. Elements of data type double. Strassen's " +"algorithm." +msgstr "Умножение плотных матриц. Элементы типа double. Алгоритм Штрассена." + +#: ../../common_information/processes_tasks.rst:130 +msgid "" +"Sparse matrix multiplication. Elements of data type double. Matrix " +"storage format – row format (CRS)." +msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – строковый (CRS)." + +#: ../../common_information/processes_tasks.rst:132 +msgid "" +"Sparse matrix multiplication. Elements of data type double. Matrix " +"storage format – column format (CCS)." +msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – столбцовый (CCS)." + +#: ../../common_information/processes_tasks.rst:134 +msgid "Solving systems of linear equations using the conjugate gradient method." +msgstr "Решение систем линейных уравнений методом сопряженных градиентов." + +#: ../../common_information/processes_tasks.rst:136 +msgid "" +"Computing multidimensional integrals using a multistep scheme (rectangle " +"method)." +msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод прямоугольников)." + +#: ../../common_information/processes_tasks.rst:138 +msgid "" +"Computing multidimensional integrals using a multistep scheme " +"(trapezoidal method)." +msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод трапеций)." + +#: ../../common_information/processes_tasks.rst:140 +msgid "" +"Computing multidimensional integrals using a multistep scheme (Simpson's " +"method)." +msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод Симпсона)." + +#: ../../common_information/processes_tasks.rst:142 +msgid "Computing multidimensional integrals using the Monte Carlo method." +msgstr "Вычисление многомерных интегралов методом Монте-Карло." + +#: ../../common_information/processes_tasks.rst:144 +msgid "" +"Global search algorithm (Strongin's) for one-dimensional optimization " +"problems. Parallelization by characteristics." +msgstr "Алгоритм глобального поиска (Стронгина) для одномерных задач оптимизации. Распараллеливание по характеристикам." + +#: ../../common_information/processes_tasks.rst:147 +msgid "" +"Multistep scheme for solving two-dimensional global optimization " +"problems. Parallelization by dividing the search area." +msgstr "Многошаговая схема решения двумерных задач глобальной оптимизации. Распараллеливание путем разделения области поиска." + +#: ../../common_information/processes_tasks.rst:150 +msgid "" +"Multistep scheme for solving two-dimensional global optimization " +"problems. Parallelization by characteristics." +msgstr "Многошаговая схема решения двумерных задач глобальной оптимизации. Распараллеливание по характеристикам." + +#: ../../common_information/processes_tasks.rst:153 +msgid "Quick sort with simple merging." +msgstr "Быстрая сортировка с простым слиянием." + +#: ../../common_information/processes_tasks.rst:155 +msgid "Quick sort with odd-even merging (Batcher's method)." +msgstr "Быстрая сортировка с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/processes_tasks.rst:157 +msgid "Shell sort with simple merging." +msgstr "Сортировка Шелла с простым слиянием." + +#: ../../common_information/processes_tasks.rst:159 +msgid "Shell sort with odd-even merging (Batcher's method)." +msgstr "Сортировка Шелла с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/processes_tasks.rst:161 +msgid "Radix sort for integers with simple merging." +msgstr "Поразрядная сортировка для целых чисел с простым слиянием." + +#: ../../common_information/processes_tasks.rst:163 +msgid "Radix sort for integers with odd-even merging (Batcher's method)." +msgstr "Поразрядная сортировка для целых чисел с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/processes_tasks.rst:165 +msgid "Radix sort for floating-point numbers (type double) with simple merging." +msgstr "Поразрядная сортировка для вещественных чисел (тип double) с простым слиянием." + +#: ../../common_information/processes_tasks.rst:167 +msgid "" +"Radix sort for floating-point numbers (type double) with odd-even merging" +" (Batcher's method)." +msgstr "Поразрядная сортировка для вещественных чисел (тип double) с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/processes_tasks.rst:169 +msgid "" +"Shortest path search from one vertex (Dijkstra's algorithm). With CRS " +"graphs." +msgstr "Поиск кратчайших путей из одной вершины (алгоритм Дейкстры). С CRS формой хранения графа." + +#: ../../common_information/processes_tasks.rst:171 +msgid "" +"Shortest path search from one vertex (Bellman-Ford algorithm). With CRS " +"graphs." +msgstr "Поиск кратчайших путей из одной вершины (алгоритм Беллмана-Форда). С CRS формой хранения графа." + +#: ../../common_information/processes_tasks.rst:173 +msgid "Convex hull construction – Graham's scan." +msgstr "Построение выпуклой оболочки – проход Грэхема." + +#: ../../common_information/processes_tasks.rst:175 +msgid "Convex hull construction – Jarvis's march." +msgstr "Построение выпуклой оболочки – проход Джарвиса." + +#: ../../common_information/processes_tasks.rst:177 +msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." +msgstr "Линейная фильтрация изображений (горизонтальное разбиение). Ядро Гаусса 3x3." + +#: ../../common_information/processes_tasks.rst:179 +msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." +msgstr "Линейная фильтрация изображений (вертикальное разбиение). Ядро Гаусса 3x3." + +#: ../../common_information/processes_tasks.rst:181 +msgid "28" +msgstr "" + +#: ../../common_information/processes_tasks.rst:181 +msgid "Linear image filtering (block partition). Gaussian kernel 3x3." +msgstr "Линейная фильтрация изображений (блочное разбиение). Ядро Гаусса 3x3." + +#: ../../common_information/processes_tasks.rst:183 +msgid "29" +msgstr "" + +#: ../../common_information/processes_tasks.rst:183 +msgid "Edge detection in an image using the Sobel operator." +msgstr "Выделение ребер на изображении с использованием оператора Собеля." + +#: ../../common_information/processes_tasks.rst:185 +msgid "30" +msgstr "" + +#: ../../common_information/processes_tasks.rst:185 +msgid "Contrast enhancement of grayscale image using linear histogram stretching." +msgstr "Повышение контраста полутонового изображения посредством линейной растяжки гистограммы" + +#: ../../common_information/processes_tasks.rst:187 +msgid "31" +msgstr "" + +#: ../../common_information/processes_tasks.rst:187 +msgid "" +"Labeling components on a binary image (black areas correspond to objects," +" white to background)." +msgstr "Маркировка компонент на бинарном изображении (черные области соответствуют объектам, белые – фону)." + +#: ../../common_information/processes_tasks.rst:189 +msgid "32" +msgstr "" + +#: ../../common_information/processes_tasks.rst:189 +msgid "Convex hull construction for components of a binary image." +msgstr "Построение выпуклой оболочки для компонент бинарного изображения." + +#: ../../common_information/processes_tasks.rst:193 +msgid "Comments for tasks 2 and 3:" +msgstr "Комментарии для 2-ой и 3-й задач:" + +#: ../../common_information/processes_tasks.rst:195 +msgid "" +"MESSAGE PASSING METHODS “You need to implement the specified methods " +"using only the Send and Recv functions. The implemented function should " +"have the same prototype as the corresponding MPI function. The test " +"program should allow selecting the root process number and perform array " +"transmission (broadcast, gather) for at least the following types: " +"MPI_INT, MPI_FLOAT, MPI_DOUBLE. In all operations, the transmission " +"should be carried out using the ‘tree’ of processes.”" +msgstr "МЕТОДЫ ПЕРЕДАЧИ СООБЩЕНИЙ. Нужно реализовать указанные методы, используя только функции Send и Recv. " +"Реализованная функция должна иметь тот же прототип, что и соответствующая функция MPI. " +"Тестовая программа должна позволять выбрать номер процесса root и выполнять пересылку " +"(рассылку, сбор) массива как минимум следующих типов: MPI_INT, MPI_FLOAT, MPI_DOUBLE. " +"Во всех операциях передача должна выполняться с использованием \"дерева\" процессов." + +#: ../../common_information/processes_tasks.rst:203 +#: ../../common_information/processes_tasks.rst:217 +#: ../../common_information/processes_tasks.rst:230 +#: ../../common_information/processes_tasks.rst:243 +#: ../../common_information/processes_tasks.rst:253 +#: ../../common_information/processes_tasks.rst:263 +#: ../../common_information/processes_tasks.rst:273 +msgid "Comments relevant for:" +msgstr "Комментарии относятся к:" + +#: ../../common_information/processes_tasks.rst:206 +msgid "Variants for task 2" +msgstr "Варинты для второй задачи" + +#: ../../common_information/processes_tasks.rst:206 +msgid "Variants for task 3" +msgstr "Варинты для третьей задачи" + +#: ../../common_information/processes_tasks.rst:208 +#: ../../common_information/processes_tasks.rst:235 +msgid "1 - 5" +msgstr "" + +#: ../../common_information/processes_tasks.rst:208 +#: ../../common_information/processes_tasks.rst:222 +#: ../../common_information/processes_tasks.rst:278 +msgid "x" +msgstr "" + +#: ../../common_information/processes_tasks.rst:211 +msgid "" +"DATA COMMUNICATION NETWORK TOPOLOGIES “You need to implement the virtual " +"topology specified in the task using MPI capabilities for working with " +"communicators and topologies, and ensure the ability to transfer data " +"from any selected process to any other process. (Do not use " +"MPI_Cart_Create and MPI_Graph_Create)”" +msgstr "ТОПОЛОГИИ СЕТЕЙ ПЕРЕДАЧИ ДАННЫХ. Нужно реализовать указанную в задаче виртуальную топологию, используя возможности MPI " +"по работе с коммуникаторами и топологиями и обеспечить возможность передачи данных " +"от любого выбранного процесса любому другому процессу. " +"(Не используя MPI_Cart_Create и MPI_Graph_Create)" + +#: ../../common_information/processes_tasks.rst:220 +#: ../../common_information/processes_tasks.rst:233 +#: ../../common_information/processes_tasks.rst:246 +#: ../../common_information/processes_tasks.rst:256 +#: ../../common_information/processes_tasks.rst:266 +#: ../../common_information/processes_tasks.rst:276 +msgid "Varinats for task 2" +msgstr "Варинты для второй задачи" + +#: ../../common_information/processes_tasks.rst:220 +#: ../../common_information/processes_tasks.rst:233 +#: ../../common_information/processes_tasks.rst:246 +#: ../../common_information/processes_tasks.rst:256 +#: ../../common_information/processes_tasks.rst:266 +#: ../../common_information/processes_tasks.rst:276 +msgid "Varinats for task 3" +msgstr "Варинты для третьей задачи" + +#: ../../common_information/processes_tasks.rst:222 +msgid "6 - 10" +msgstr "" + +#: ../../common_information/processes_tasks.rst:225 +msgid "" +"MATRIX COMPUTATIONS “In the horizontal scheme, the matrix is divided " +"among processes by rows. In the vertical scheme, it is divided by " +"columns, and in this case, the vector is also divided among processes.”" +msgstr "МАТРИЧНЫЕ ВЫЧИСЛЕНИЯ. В горизонтальной схеме матрица делится между процессами по строкам. " +"В вертикальной - по столбцам, вектор в этом случае также делится между процессами." + +#: ../../common_information/processes_tasks.rst:235 +msgid "11 - 14" +msgstr "" + +#: ../../common_information/processes_tasks.rst:238 +msgid "" +"COMPUTER GRAPHICS AND IMAGE PROCESSING “It is assumed that the image is " +"given in color or grayscale, with the input data being a one-dimensional " +"array. Loading a real image is not required, but is allowed.”" +msgstr "МАШИННАЯ ГРАФИКА И ОБРАБОТКА ИЗОБРАЖЕНИЙ. Считается, что изображение задано в цветном виде или оттенках серого, " +"входные данные - одномерный массив. " +"Загружать реальное изображение не требуется, но и не возбраняется." + +#: ../../common_information/processes_tasks.rst:248 +msgid "26 - 27" +msgstr "" + +#: ../../common_information/processes_tasks.rst:248 +msgid "24 - 32" +msgstr "" + +#: ../../common_information/processes_tasks.rst:251 +msgid "SOLUTION OF A SYSTEM OF LINEAR ALGEBRAIC EQUATIONS" +msgstr "РЕШЕНИЕ СИСТЕМЫ ЛИНЕЙНЫХ АЛГЕБРАИЧЕСКИХ УРАВНЕНИЙ" + +#: ../../common_information/processes_tasks.rst:258 +msgid "15 - 20" +msgstr "" + +#: ../../common_information/processes_tasks.rst:261 +msgid "SORT ALGORITHMS" +msgstr "АЛГОРИТМЫ СОРТИРОВКИ" + +#: ../../common_information/processes_tasks.rst:268 +msgid "14 - 21" +msgstr "" + +#: ../../common_information/processes_tasks.rst:271 +msgid "GRAPH PROCESSING ALGORITHMS" +msgstr "АЛГОРИТМЫ НА ГРАФАХ" + +#: ../../common_information/processes_tasks.rst:278 +msgid "22 - 23" +msgstr "" diff --git a/docs/locale/ru/LC_MESSAGES/common_information/report.po b/docs/locale/ru/LC_MESSAGES/common_information/report.po index a32a3b5880..01ba2c6631 100644 --- a/docs/locale/ru/LC_MESSAGES/common_information/report.po +++ b/docs/locale/ru/LC_MESSAGES/common_information/report.po @@ -1,183 +1,183 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-08 23:43+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/report.rst:2 -msgid "Report" -msgstr "Отчет" - -#: ../../common_information/report.rst:4 -msgid "Report points" -msgstr "Баллы за отчет" - -#: ../../common_information/report.rst:7 ../../common_information/report.rst:14 -msgid "Completeness" -msgstr "Наличие всех требуемых пунктов" - -#: ../../common_information/report.rst:7 ../../common_information/report.rst:27 -msgid "Text Quality" -msgstr "Качество текста" - -#: ../../common_information/report.rst:7 ../../common_information/report.rst:34 -msgid "Formatting Quality" -msgstr "Качество оформления" - -#: ../../common_information/report.rst:7 -msgid "Total" -msgstr "Всего" - -#: ../../common_information/report.rst:9 -msgid "5" -msgstr "" - -#: ../../common_information/report.rst:9 -msgid "2.5" -msgstr "" - -#: ../../common_information/report.rst:9 -msgid "10" -msgstr "" - -#: ../../common_information/report.rst:12 -msgid "Requirements for Criteria" -msgstr "Требования к критериям" - -#: ../../common_information/report.rst:16 -msgid "Introduction (can be a short paragraph)" -msgstr "Введение (допустимо на абзац)" - -#: ../../common_information/report.rst:17 -msgid "Problem Statement (descriptive)" -msgstr "Постановка задачи (содержательная)" - -#: ../../common_information/report.rst:18 -msgid "Algorithm Description" -msgstr "Описание алгоритма" - -#: ../../common_information/report.rst:19 -msgid "Description of the Parallel Algorithm Scheme" -msgstr "Описание схемы параллельного алгоритма" - -#: ../../common_information/report.rst:20 -msgid "" -"Description of the MPI, OpenMP, TBB, std::threads, all versions " -"(depending on the semester) – part of the software implementation " -"description" -msgstr "Описание OpenMP-версии и TBB-версии или MPI-версии (смотря какой семестр) - пункт описания программной реализации" - -#: ../../common_information/report.rst:21 -msgid "" -"Experimental Results (execution time and algorithm quality assessment), " -"description of correctness verification" -msgstr "Результаты экспериментов (по времени работы и оценке качества работы алогритма), описание подтверждения корректности" - -#: ../../common_information/report.rst:22 -msgid "Conclusions from the Results" -msgstr "Выводы из результатов" - -#: ../../common_information/report.rst:23 -msgid "Conclusion" -msgstr "Заключение" - -#: ../../common_information/report.rst:24 -msgid "References" -msgstr "Список литературы" - -#: ../../common_information/report.rst:25 -msgid "Appendix (include code, ensuring readability)" -msgstr "Приложение (вставить код, сохранить его читаемость)" - -#: ../../common_information/report.rst:29 -msgid "Meaningfulness" -msgstr "Осмысленность" - -#: ../../common_information/report.rst:30 -msgid "Coherence" -msgstr "Связность" - -#: ../../common_information/report.rst:31 -msgid "Clarity" -msgstr "Понятность" - -#: ../../common_information/report.rst:32 -msgid "Quality of language (only the most obvious mistakes are considered)" -msgstr "Качество языка (рассматриваются только самые очевидные ошибки)" - -#: ../../common_information/report.rst:36 -msgid "Requirements for Headings" -msgstr "Требования к заголовкам" - -#: ../../common_information/report.rst:37 -msgid "Requirements for Text Alignment" -msgstr "Требования к выравниванию текста" - -#: ../../common_information/report.rst:38 -msgid "Requirements for Paragraph Indentation" -msgstr "Требования к абзацным отступам" - -#: ../../common_information/report.rst:39 -msgid "Requirements for the Formatting of Figures, Graphs, and Tables" -msgstr "Требования к оформлению рисунков, графиков и таблиц" - -#: ../../common_information/report.rst:40 -msgid "" -"The “Teacher” field must include the full name, position, and title of " -"the lecturer" -msgstr "В графе преподаватель должны стоять ФИО, должность и звание лектора" - -#: ../../common_information/report.rst:42 -msgid "Comments" -msgstr "Комментарии" - -#: ../../common_information/report.rst:44 -msgid "Failure to meet the requirements will result in a deduction of points." -msgstr "За невыполнение требований следует снижение баллов." - -#: ../../common_information/report.rst:45 -msgid "" -"The request will include points and comments regarding any requirement " -"violations (if applicable)." -msgstr "В реквесте будут писаться баллы и комментарии по поводу нарушения требований (если таковые имеются)" - -#: ../../common_information/report.rst:46 -msgid "" -"The report will be checked only once, and the grade will be assigned " -"based on the submitted version according to the requirements." -msgstr "Отчет будет проверяться один раз, по версии отчета будет выставлена оценка согласно требованиям" - -#: ../../common_information/report.rst:47 -msgid "" -"The report is reviewed online; the entire review process takes place in " -"the request." -msgstr "Отчет проверяется заочно, вся проверка проходит в реквесте, очно отчет НЕ сдается." - -#: ../../common_information/report.rst:48 -msgid "" -"If a student falls into the **blue zone** for the task, the report points" -" will also be nullified at the end of the semester. The report will be " -"finally accepted and merged into the master branch only after both the " -"**online** parts of the corresponding lab work are fully completed." -msgstr "Если студент попал в синюю зону по задаче, баллы за отчет также " -"обнуляются в конце семестра. Отчет окончательно будет " -"принят и попадет в мастер, когда будет полностью сдана " -"очно и заочно лабораторная работа, " -"по которой был сделан отчет." +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-08 23:43+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/report.rst:2 +msgid "Report" +msgstr "Отчет" + +#: ../../common_information/report.rst:4 +msgid "Report points" +msgstr "Баллы за отчет" + +#: ../../common_information/report.rst:7 ../../common_information/report.rst:14 +msgid "Completeness" +msgstr "Наличие всех требуемых пунктов" + +#: ../../common_information/report.rst:7 ../../common_information/report.rst:27 +msgid "Text Quality" +msgstr "Качество текста" + +#: ../../common_information/report.rst:7 ../../common_information/report.rst:34 +msgid "Formatting Quality" +msgstr "Качество оформления" + +#: ../../common_information/report.rst:7 +msgid "Total" +msgstr "Всего" + +#: ../../common_information/report.rst:9 +msgid "5" +msgstr "" + +#: ../../common_information/report.rst:9 +msgid "2.5" +msgstr "" + +#: ../../common_information/report.rst:9 +msgid "10" +msgstr "" + +#: ../../common_information/report.rst:12 +msgid "Requirements for Criteria" +msgstr "Требования к критериям" + +#: ../../common_information/report.rst:16 +msgid "Introduction (can be a short paragraph)" +msgstr "Введение (допустимо на абзац)" + +#: ../../common_information/report.rst:17 +msgid "Problem Statement (descriptive)" +msgstr "Постановка задачи (содержательная)" + +#: ../../common_information/report.rst:18 +msgid "Algorithm Description" +msgstr "Описание алгоритма" + +#: ../../common_information/report.rst:19 +msgid "Description of the Parallel Algorithm Scheme" +msgstr "Описание схемы параллельного алгоритма" + +#: ../../common_information/report.rst:20 +msgid "" +"Description of the MPI, OpenMP, TBB, std::threads, all versions " +"(depending on the semester) – part of the software implementation " +"description" +msgstr "Описание OpenMP-версии и TBB-версии или MPI-версии (смотря какой семестр) - пункт описания программной реализации" + +#: ../../common_information/report.rst:21 +msgid "" +"Experimental Results (execution time and algorithm quality assessment), " +"description of correctness verification" +msgstr "Результаты экспериментов (по времени работы и оценке качества работы алогритма), описание подтверждения корректности" + +#: ../../common_information/report.rst:22 +msgid "Conclusions from the Results" +msgstr "Выводы из результатов" + +#: ../../common_information/report.rst:23 +msgid "Conclusion" +msgstr "Заключение" + +#: ../../common_information/report.rst:24 +msgid "References" +msgstr "Список литературы" + +#: ../../common_information/report.rst:25 +msgid "Appendix (include code, ensuring readability)" +msgstr "Приложение (вставить код, сохранить его читаемость)" + +#: ../../common_information/report.rst:29 +msgid "Meaningfulness" +msgstr "Осмысленность" + +#: ../../common_information/report.rst:30 +msgid "Coherence" +msgstr "Связность" + +#: ../../common_information/report.rst:31 +msgid "Clarity" +msgstr "Понятность" + +#: ../../common_information/report.rst:32 +msgid "Quality of language (only the most obvious mistakes are considered)" +msgstr "Качество языка (рассматриваются только самые очевидные ошибки)" + +#: ../../common_information/report.rst:36 +msgid "Requirements for Headings" +msgstr "Требования к заголовкам" + +#: ../../common_information/report.rst:37 +msgid "Requirements for Text Alignment" +msgstr "Требования к выравниванию текста" + +#: ../../common_information/report.rst:38 +msgid "Requirements for Paragraph Indentation" +msgstr "Требования к абзацным отступам" + +#: ../../common_information/report.rst:39 +msgid "Requirements for the Formatting of Figures, Graphs, and Tables" +msgstr "Требования к оформлению рисунков, графиков и таблиц" + +#: ../../common_information/report.rst:40 +msgid "" +"The “Teacher” field must include the full name, position, and title of " +"the lecturer" +msgstr "В графе преподаватель должны стоять ФИО, должность и звание лектора" + +#: ../../common_information/report.rst:42 +msgid "Comments" +msgstr "Комментарии" + +#: ../../common_information/report.rst:44 +msgid "Failure to meet the requirements will result in a deduction of points." +msgstr "За невыполнение требований следует снижение баллов." + +#: ../../common_information/report.rst:45 +msgid "" +"The request will include points and comments regarding any requirement " +"violations (if applicable)." +msgstr "В реквесте будут писаться баллы и комментарии по поводу нарушения требований (если таковые имеются)" + +#: ../../common_information/report.rst:46 +msgid "" +"The report will be checked only once, and the grade will be assigned " +"based on the submitted version according to the requirements." +msgstr "Отчет будет проверяться один раз, по версии отчета будет выставлена оценка согласно требованиям" + +#: ../../common_information/report.rst:47 +msgid "" +"The report is reviewed online; the entire review process takes place in " +"the request." +msgstr "Отчет проверяется заочно, вся проверка проходит в реквесте, очно отчет НЕ сдается." + +#: ../../common_information/report.rst:48 +msgid "" +"If a student falls into the **blue zone** for the task, the report points" +" will also be nullified at the end of the semester. The report will be " +"finally accepted and merged into the master branch only after both the " +"**online** parts of the corresponding lab work are fully completed." +msgstr "Если студент попал в синюю зону по задаче, баллы за отчет также " +"обнуляются в конце семестра. Отчет окончательно будет " +"принят и попадет в мастер, когда будет полностью сдана " +"очно и заочно лабораторная работа, " +"по которой был сделан отчет." diff --git a/docs/locale/ru/LC_MESSAGES/common_information/threading_tasks.po b/docs/locale/ru/LC_MESSAGES/common_information/threading_tasks.po index 9d938ac4ea..d72933c9b4 100644 --- a/docs/locale/ru/LC_MESSAGES/common_information/threading_tasks.po +++ b/docs/locale/ru/LC_MESSAGES/common_information/threading_tasks.po @@ -1,298 +1,298 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-05 13:29+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../common_information/threading_tasks.rst:2 -msgid "Thread parallelism tasks" -msgstr "Задачи параллелизма на потоках" - -#: ../../common_information/threading_tasks.rst:5 -msgid "Variant Number" -msgstr "Номер варианта" - -#: ../../common_information/threading_tasks.rst:5 -msgid "Tasks" -msgstr "Задача" - -#: ../../common_information/threading_tasks.rst:7 -msgid "1" -msgstr "" - -#: ../../common_information/threading_tasks.rst:7 -msgid "" -"Dense matrix multiplication. Elements of type double. Block scheme, " -"Cannon's algorithm." -msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Кэннона." - -#: ../../common_information/threading_tasks.rst:9 -msgid "2" -msgstr "" - -#: ../../common_information/threading_tasks.rst:9 -msgid "" -"Dense matrix multiplication. Elements of type double. Block scheme, Fox's" -" algorithm." -msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Фокса." - -#: ../../common_information/threading_tasks.rst:11 -msgid "3" -msgstr "" - -#: ../../common_information/threading_tasks.rst:11 -msgid "" -"Dense matrix multiplication. Elements of type double. Strassen's " -"algorithm." -msgstr "Умножение плотных матриц. Элементы типа double. Алгоритм Штрассена." - -#: ../../common_information/threading_tasks.rst:13 -msgid "4" -msgstr "" - -#: ../../common_information/threading_tasks.rst:13 -msgid "" -"Sparse matrix multiplication. Elements of type double. Matrix storage " -"format – row format (Compressed Row Storage)." -msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – строковый (CRS)." - -#: ../../common_information/threading_tasks.rst:15 -msgid "5" -msgstr "" - -#: ../../common_information/threading_tasks.rst:15 -msgid "" -"Sparse matrix multiplication. Elements of type double. Matrix storage " -"format – column format (Compressed Column Storage)." -msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – столбцовый (CCS)." - -#: ../../common_information/threading_tasks.rst:17 -msgid "6" -msgstr "" - -#: ../../common_information/threading_tasks.rst:17 -msgid "" -"Sparse matrix multiplication. Complex type elements. Matrix storage " -"format – row format (Compressed Row Storage)." -msgstr "Умножение разреженных матриц. Элементы комплексного типа. Формат хранения матрицы – строковый (CRS)." - -#: ../../common_information/threading_tasks.rst:19 -msgid "7" -msgstr "" - -#: ../../common_information/threading_tasks.rst:19 -msgid "" -"Sparse matrix multiplication. Complex type elements. Matrix storage " -"format – column format (Compressed Column Storage)." -msgstr "Умножение разреженных матриц. Элементы комплексного типа. Формат хранения матрицы – столбцовый (CCS)." - -#: ../../common_information/threading_tasks.rst:21 -msgid "8" -msgstr "" - -#: ../../common_information/threading_tasks.rst:21 -msgid "Solving systems of linear equations using the conjugate gradient method." -msgstr "Решение систем линейных уравнений методом сопряженных градиентов." - -#: ../../common_information/threading_tasks.rst:23 -msgid "9" -msgstr "" - -#: ../../common_information/threading_tasks.rst:23 -msgid "" -"Computing multidimensional integrals using a multistep scheme (rectangle " -"method)." -msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод прямоугольников)." - -#: ../../common_information/threading_tasks.rst:25 -msgid "10" -msgstr "" - -#: ../../common_information/threading_tasks.rst:25 -msgid "" -"Computing multidimensional integrals using a multistep scheme " -"(trapezoidal method)." -msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод трапеций)." - -#: ../../common_information/threading_tasks.rst:27 -msgid "11" -msgstr "" - -#: ../../common_information/threading_tasks.rst:27 -msgid "" -"Computing multidimensional integrals using a multistep scheme (Simpson's " -"method)." -msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод Симпсона)." - -#: ../../common_information/threading_tasks.rst:29 -msgid "12" -msgstr "" - -#: ../../common_information/threading_tasks.rst:29 -msgid "Computing multidimensional integrals using the Monte Carlo method." -msgstr "Вычисление многомерных интегралов методом Монте-Карло." - -#: ../../common_information/threading_tasks.rst:31 -msgid "13" -msgstr "" - -#: ../../common_information/threading_tasks.rst:31 -msgid "Quick sort with simple merging." -msgstr "Сортировка Хоара с простым слиянием." - -#: ../../common_information/threading_tasks.rst:33 -msgid "14" -msgstr "" - -#: ../../common_information/threading_tasks.rst:33 -msgid "Quick sort with odd-even merging (Batcher's method)." -msgstr "Сортировка Хоара с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/threading_tasks.rst:35 -msgid "15" -msgstr "" - -#: ../../common_information/threading_tasks.rst:35 -msgid "Shell sort with simple merging." -msgstr "Сортировка Шелла с простым слиянием." - -#: ../../common_information/threading_tasks.rst:37 -msgid "16" -msgstr "" - -#: ../../common_information/threading_tasks.rst:37 -msgid "Shell sort with odd-even merging (Batcher's method)." -msgstr "Сортировка Шелла с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/threading_tasks.rst:39 -msgid "17" -msgstr "" - -#: ../../common_information/threading_tasks.rst:39 -msgid "Radix sort for integers with simple merging." -msgstr "Поразрядная сортировка для целых чисел с простым слиянием." - -#: ../../common_information/threading_tasks.rst:41 -msgid "18" -msgstr "" - -#: ../../common_information/threading_tasks.rst:41 -msgid "Radix sort for integers with odd-even merging (Batcher's method)." -msgstr "Поразрядная сортировка для целых чисел с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/threading_tasks.rst:43 -msgid "19" -msgstr "" - -#: ../../common_information/threading_tasks.rst:43 -msgid "Radix sort for floating-point numbers (type double) with simple merging." -msgstr "Поразрядная сортировка для вещественных чисел (тип double) с простым слиянием." - -#: ../../common_information/threading_tasks.rst:45 -msgid "20" -msgstr "" - -#: ../../common_information/threading_tasks.rst:45 -msgid "" -"Radix sort for floating-point numbers (type double) with odd-even merging" -" (Batcher's method)." -msgstr "Поразрядная сортировка для вещественных чисел (тип double) с четно-нечетным слиянием Бэтчера." - -#: ../../common_information/threading_tasks.rst:47 -msgid "21" -msgstr "" - -#: ../../common_information/threading_tasks.rst:47 -msgid "Shortest path search from one vertex (Dijkstra's algorithm)." -msgstr "Поиск кратчайших путей из одной вершины (алгоритм Дейкстры)." - -#: ../../common_information/threading_tasks.rst:49 -msgid "22" -msgstr "" - -#: ../../common_information/threading_tasks.rst:49 -msgid "Convex hull construction – Graham's scan." -msgstr "Построение выпуклой оболочки – проход Грэхема." - -#: ../../common_information/threading_tasks.rst:51 -msgid "23" -msgstr "" - -#: ../../common_information/threading_tasks.rst:51 -msgid "Convex hull construction – Jarvis's march." -msgstr "Построение выпуклой оболочки – проход Джарвиса." - -#: ../../common_information/threading_tasks.rst:53 -msgid "24" -msgstr "" - -#: ../../common_information/threading_tasks.rst:53 -msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." -msgstr "Линейная фильтрация изображений (горизонтальное разбиение). Ядро Гаусса 3x3." - -#: ../../common_information/threading_tasks.rst:55 -msgid "25" -msgstr "" - -#: ../../common_information/threading_tasks.rst:55 -msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." -msgstr "Линейная фильтрация изображений (вертикальное разбиение). Ядро Гаусса 3x3." - -#: ../../common_information/threading_tasks.rst:57 -msgid "26" -msgstr "" - -#: ../../common_information/threading_tasks.rst:57 -msgid "Linear image filtering (block partition). Gaussian kernel 3x3." -msgstr "Линейная фильтрация изображений (блочное разбиение). Ядро Гаусса 3x3." - -#: ../../common_information/threading_tasks.rst:59 -msgid "27" -msgstr "" - -#: ../../common_information/threading_tasks.rst:59 -msgid "Edge detection in an image using the Sobel operator." -msgstr "Выделение ребер на изображении с использованием оператора Собеля." - -#: ../../common_information/threading_tasks.rst:61 -msgid "28" -msgstr "" - -#: ../../common_information/threading_tasks.rst:61 -msgid "Contrast enhancement of grayscale image using linear histogram stretching." -msgstr "Повышение контраста полутонового изображения посредством линейной растяжки гистограммы." - -#: ../../common_information/threading_tasks.rst:63 -msgid "29" -msgstr "" - -#: ../../common_information/threading_tasks.rst:63 -msgid "" -"Labeling components on a binary image (black areas correspond to objects," -" white to background)." -msgstr "Маркировка компонент на бинарном изображении (черные области соответствуют объектам, белые – фону)." - -#: ../../common_information/threading_tasks.rst:65 -msgid "30" -msgstr "" - -#: ../../common_information/threading_tasks.rst:65 -msgid "Convex hull construction for components of a binary image." -msgstr "Построение выпуклой оболочки для компонент бинарного изображения." +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-05 13:29+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../common_information/threading_tasks.rst:2 +msgid "Thread parallelism tasks" +msgstr "Задачи параллелизма на потоках" + +#: ../../common_information/threading_tasks.rst:5 +msgid "Variant Number" +msgstr "Номер варианта" + +#: ../../common_information/threading_tasks.rst:5 +msgid "Tasks" +msgstr "Задача" + +#: ../../common_information/threading_tasks.rst:7 +msgid "1" +msgstr "" + +#: ../../common_information/threading_tasks.rst:7 +msgid "" +"Dense matrix multiplication. Elements of type double. Block scheme, " +"Cannon's algorithm." +msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Кэннона." + +#: ../../common_information/threading_tasks.rst:9 +msgid "2" +msgstr "" + +#: ../../common_information/threading_tasks.rst:9 +msgid "" +"Dense matrix multiplication. Elements of type double. Block scheme, Fox's" +" algorithm." +msgstr "Умножение плотных матриц. Элементы типа double. Блочная схема, алгоритм Фокса." + +#: ../../common_information/threading_tasks.rst:11 +msgid "3" +msgstr "" + +#: ../../common_information/threading_tasks.rst:11 +msgid "" +"Dense matrix multiplication. Elements of type double. Strassen's " +"algorithm." +msgstr "Умножение плотных матриц. Элементы типа double. Алгоритм Штрассена." + +#: ../../common_information/threading_tasks.rst:13 +msgid "4" +msgstr "" + +#: ../../common_information/threading_tasks.rst:13 +msgid "" +"Sparse matrix multiplication. Elements of type double. Matrix storage " +"format – row format (Compressed Row Storage)." +msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – строковый (CRS)." + +#: ../../common_information/threading_tasks.rst:15 +msgid "5" +msgstr "" + +#: ../../common_information/threading_tasks.rst:15 +msgid "" +"Sparse matrix multiplication. Elements of type double. Matrix storage " +"format – column format (Compressed Column Storage)." +msgstr "Умножение разреженных матриц. Элементы типа double. Формат хранения матрицы – столбцовый (CCS)." + +#: ../../common_information/threading_tasks.rst:17 +msgid "6" +msgstr "" + +#: ../../common_information/threading_tasks.rst:17 +msgid "" +"Sparse matrix multiplication. Complex type elements. Matrix storage " +"format – row format (Compressed Row Storage)." +msgstr "Умножение разреженных матриц. Элементы комплексного типа. Формат хранения матрицы – строковый (CRS)." + +#: ../../common_information/threading_tasks.rst:19 +msgid "7" +msgstr "" + +#: ../../common_information/threading_tasks.rst:19 +msgid "" +"Sparse matrix multiplication. Complex type elements. Matrix storage " +"format – column format (Compressed Column Storage)." +msgstr "Умножение разреженных матриц. Элементы комплексного типа. Формат хранения матрицы – столбцовый (CCS)." + +#: ../../common_information/threading_tasks.rst:21 +msgid "8" +msgstr "" + +#: ../../common_information/threading_tasks.rst:21 +msgid "Solving systems of linear equations using the conjugate gradient method." +msgstr "Решение систем линейных уравнений методом сопряженных градиентов." + +#: ../../common_information/threading_tasks.rst:23 +msgid "9" +msgstr "" + +#: ../../common_information/threading_tasks.rst:23 +msgid "" +"Computing multidimensional integrals using a multistep scheme (rectangle " +"method)." +msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод прямоугольников)." + +#: ../../common_information/threading_tasks.rst:25 +msgid "10" +msgstr "" + +#: ../../common_information/threading_tasks.rst:25 +msgid "" +"Computing multidimensional integrals using a multistep scheme " +"(trapezoidal method)." +msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод трапеций)." + +#: ../../common_information/threading_tasks.rst:27 +msgid "11" +msgstr "" + +#: ../../common_information/threading_tasks.rst:27 +msgid "" +"Computing multidimensional integrals using a multistep scheme (Simpson's " +"method)." +msgstr "Вычисление многомерных интегралов с использованием многошаговой схемы (метод Симпсона)." + +#: ../../common_information/threading_tasks.rst:29 +msgid "12" +msgstr "" + +#: ../../common_information/threading_tasks.rst:29 +msgid "Computing multidimensional integrals using the Monte Carlo method." +msgstr "Вычисление многомерных интегралов методом Монте-Карло." + +#: ../../common_information/threading_tasks.rst:31 +msgid "13" +msgstr "" + +#: ../../common_information/threading_tasks.rst:31 +msgid "Quick sort with simple merging." +msgstr "Сортировка Хоара с простым слиянием." + +#: ../../common_information/threading_tasks.rst:33 +msgid "14" +msgstr "" + +#: ../../common_information/threading_tasks.rst:33 +msgid "Quick sort with odd-even merging (Batcher's method)." +msgstr "Сортировка Хоара с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/threading_tasks.rst:35 +msgid "15" +msgstr "" + +#: ../../common_information/threading_tasks.rst:35 +msgid "Shell sort with simple merging." +msgstr "Сортировка Шелла с простым слиянием." + +#: ../../common_information/threading_tasks.rst:37 +msgid "16" +msgstr "" + +#: ../../common_information/threading_tasks.rst:37 +msgid "Shell sort with odd-even merging (Batcher's method)." +msgstr "Сортировка Шелла с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/threading_tasks.rst:39 +msgid "17" +msgstr "" + +#: ../../common_information/threading_tasks.rst:39 +msgid "Radix sort for integers with simple merging." +msgstr "Поразрядная сортировка для целых чисел с простым слиянием." + +#: ../../common_information/threading_tasks.rst:41 +msgid "18" +msgstr "" + +#: ../../common_information/threading_tasks.rst:41 +msgid "Radix sort for integers with odd-even merging (Batcher's method)." +msgstr "Поразрядная сортировка для целых чисел с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/threading_tasks.rst:43 +msgid "19" +msgstr "" + +#: ../../common_information/threading_tasks.rst:43 +msgid "Radix sort for floating-point numbers (type double) with simple merging." +msgstr "Поразрядная сортировка для вещественных чисел (тип double) с простым слиянием." + +#: ../../common_information/threading_tasks.rst:45 +msgid "20" +msgstr "" + +#: ../../common_information/threading_tasks.rst:45 +msgid "" +"Radix sort for floating-point numbers (type double) with odd-even merging" +" (Batcher's method)." +msgstr "Поразрядная сортировка для вещественных чисел (тип double) с четно-нечетным слиянием Бэтчера." + +#: ../../common_information/threading_tasks.rst:47 +msgid "21" +msgstr "" + +#: ../../common_information/threading_tasks.rst:47 +msgid "Shortest path search from one vertex (Dijkstra's algorithm)." +msgstr "Поиск кратчайших путей из одной вершины (алгоритм Дейкстры)." + +#: ../../common_information/threading_tasks.rst:49 +msgid "22" +msgstr "" + +#: ../../common_information/threading_tasks.rst:49 +msgid "Convex hull construction – Graham's scan." +msgstr "Построение выпуклой оболочки – проход Грэхема." + +#: ../../common_information/threading_tasks.rst:51 +msgid "23" +msgstr "" + +#: ../../common_information/threading_tasks.rst:51 +msgid "Convex hull construction – Jarvis's march." +msgstr "Построение выпуклой оболочки – проход Джарвиса." + +#: ../../common_information/threading_tasks.rst:53 +msgid "24" +msgstr "" + +#: ../../common_information/threading_tasks.rst:53 +msgid "Linear image filtering (horizontal partition). Gaussian kernel 3x3." +msgstr "Линейная фильтрация изображений (горизонтальное разбиение). Ядро Гаусса 3x3." + +#: ../../common_information/threading_tasks.rst:55 +msgid "25" +msgstr "" + +#: ../../common_information/threading_tasks.rst:55 +msgid "Linear image filtering (vertical partition). Gaussian kernel 3x3." +msgstr "Линейная фильтрация изображений (вертикальное разбиение). Ядро Гаусса 3x3." + +#: ../../common_information/threading_tasks.rst:57 +msgid "26" +msgstr "" + +#: ../../common_information/threading_tasks.rst:57 +msgid "Linear image filtering (block partition). Gaussian kernel 3x3." +msgstr "Линейная фильтрация изображений (блочное разбиение). Ядро Гаусса 3x3." + +#: ../../common_information/threading_tasks.rst:59 +msgid "27" +msgstr "" + +#: ../../common_information/threading_tasks.rst:59 +msgid "Edge detection in an image using the Sobel operator." +msgstr "Выделение ребер на изображении с использованием оператора Собеля." + +#: ../../common_information/threading_tasks.rst:61 +msgid "28" +msgstr "" + +#: ../../common_information/threading_tasks.rst:61 +msgid "Contrast enhancement of grayscale image using linear histogram stretching." +msgstr "Повышение контраста полутонового изображения посредством линейной растяжки гистограммы." + +#: ../../common_information/threading_tasks.rst:63 +msgid "29" +msgstr "" + +#: ../../common_information/threading_tasks.rst:63 +msgid "" +"Labeling components on a binary image (black areas correspond to objects," +" white to background)." +msgstr "Маркировка компонент на бинарном изображении (черные области соответствуют объектам, белые – фону)." + +#: ../../common_information/threading_tasks.rst:65 +msgid "30" +msgstr "" + +#: ../../common_information/threading_tasks.rst:65 +msgid "Convex hull construction for components of a binary image." +msgstr "Построение выпуклой оболочки для компонент бинарного изображения." diff --git a/docs/locale/ru/LC_MESSAGES/index.po b/docs/locale/ru/LC_MESSAGES/index.po index c594e6ba15..f90f995c5b 100644 --- a/docs/locale/ru/LC_MESSAGES/index.po +++ b/docs/locale/ru/LC_MESSAGES/index.po @@ -1,47 +1,47 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-02-05 13:28+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../index.rst:6 -msgid "User Guide:" -msgstr "Инструкция по выполнению работы" - -#: ../../index.rst:16 -msgid "Course Details:" -msgstr "Детали курса" - -#: ../../index.rst:2 -msgid "Parallel Programming Course documentation" -msgstr "Документация по курсу «Параллельное программирование»" - -#: ../../index.rst:4 -msgid "" -"Below is the table of contents for the Parallel Programming Course " -"documentation. Follow the links to learn more about each topic." -msgstr "" -"Ниже приведено оглавление документации по курсу «Параллельное " -"программирование». Вы можете перейти по ссылкам, чтобы узнать больше о " -"каждой теме." - -#~ msgid "Common Information:" -#~ msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-02-05 13:28+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../index.rst:6 +msgid "User Guide:" +msgstr "Инструкция по выполнению работы" + +#: ../../index.rst:16 +msgid "Course Details:" +msgstr "Детали курса" + +#: ../../index.rst:2 +msgid "Parallel Programming Course documentation" +msgstr "Документация по курсу «Параллельное программирование»" + +#: ../../index.rst:4 +msgid "" +"Below is the table of contents for the Parallel Programming Course " +"documentation. Follow the links to learn more about each topic." +msgstr "" +"Ниже приведено оглавление документации по курсу «Параллельное " +"программирование». Вы можете перейти по ссылкам, чтобы узнать больше о " +"каждой теме." + +#~ msgid "Common Information:" +#~ msgstr "" + diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/api.po b/docs/locale/ru/LC_MESSAGES/user_guide/api.po index 7379ae5056..afc1f0853c 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/api.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/api.po @@ -1,74 +1,74 @@ -# Parallel Programming Course Documentation. -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-08-22 21:38+0200\n" -"PO-Revision-Date: 2025-08-22 21:45+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../../../docs/user_guide/api.rst:2 -msgid "API Reference" -msgstr "Справочник API" - -#: ../../../../docs/user_guide/api.rst:8 -msgid "Runners Module" -msgstr "Модуль выполнения" - -#: ../../../../docs/user_guide/api.rst -msgid "Functions" -msgstr "Функции" - -#: ../../../../docs/user_guide/api.rst -msgid "Parameters" -msgstr "Параметры" - -#: ../../../../docs/user_guide/api.rst -msgid "Returns" -msgstr "Возвращаемые значения" - -#: ../../../../docs/user_guide/api.rst:14 -msgid "Task Module" -msgstr "Модуль задач" - -#: ../../../../docs/user_guide/api.rst -msgid "Typedefs" -msgstr "Псевдонимы типов" - -#: ../../../../docs/user_guide/api.rst -msgid "Template Parameters" -msgstr "Параметры шаблона" - -#: ../../../../docs/user_guide/api.rst -msgid "Enums" -msgstr "Перечисления" - -#: ../../../../docs/user_guide/api.rst -msgid "Throws" -msgstr "Исключения" - -#: ../../../../docs/user_guide/api.rst -msgid "Variables" -msgstr "Переменные" - -#: ../../../../docs/user_guide/api.rst:20 -msgid "Utility Module" -msgstr "Вспомогательный модуль (модуль с утилитами)" - -#: ../../../../docs/user_guide/api.rst:26 -msgid "Performance Module" -msgstr "Модуль измерения производительности" - +# Parallel Programming Course Documentation. +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-08-22 21:38+0200\n" +"PO-Revision-Date: 2025-08-22 21:45+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../../../docs/user_guide/api.rst:2 +msgid "API Reference" +msgstr "Справочник API" + +#: ../../../../docs/user_guide/api.rst:8 +msgid "Runners Module" +msgstr "Модуль выполнения" + +#: ../../../../docs/user_guide/api.rst +msgid "Functions" +msgstr "Функции" + +#: ../../../../docs/user_guide/api.rst +msgid "Parameters" +msgstr "Параметры" + +#: ../../../../docs/user_guide/api.rst +msgid "Returns" +msgstr "Возвращаемые значения" + +#: ../../../../docs/user_guide/api.rst:14 +msgid "Task Module" +msgstr "Модуль задач" + +#: ../../../../docs/user_guide/api.rst +msgid "Typedefs" +msgstr "Псевдонимы типов" + +#: ../../../../docs/user_guide/api.rst +msgid "Template Parameters" +msgstr "Параметры шаблона" + +#: ../../../../docs/user_guide/api.rst +msgid "Enums" +msgstr "Перечисления" + +#: ../../../../docs/user_guide/api.rst +msgid "Throws" +msgstr "Исключения" + +#: ../../../../docs/user_guide/api.rst +msgid "Variables" +msgstr "Переменные" + +#: ../../../../docs/user_guide/api.rst:20 +msgid "Utility Module" +msgstr "Вспомогательный модуль (модуль с утилитами)" + +#: ../../../../docs/user_guide/api.rst:26 +msgid "Performance Module" +msgstr "Модуль измерения производительности" + diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/build.po b/docs/locale/ru/LC_MESSAGES/user_guide/build.po index a6cc4131ef..9f7a446592 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/build.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/build.po @@ -1,74 +1,74 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-20 23:19+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/build.rst:2 -msgid "Build the Project with ``CMake``" -msgstr "Построение проекта с помощью системы сборки ``CMake``" - -#: ../../user_guide/build.rst:4 -msgid "Navigate to a source code folder." -msgstr "Перейдите в корень директории проекта" - -#: ../../user_guide/build.rst:6 -msgid "**Configure the build**: ``Makefile``, ``.sln``, etc." -msgstr "**Конфигурация проекта**: ``Makefile``, ``.sln``, и т.д." - -#: ../../user_guide/build.rst:13 -msgid "*Help on CMake keys:*" -msgstr "*Важные CMake ключи для конфигурации проекта:*" - -#: ../../user_guide/build.rst:20 -msgid "``-D USE_FUNC_TESTS=ON`` enable functional tests." -msgstr "``-D USE_FUNC_TESTS=ON`` включает функциональные тесты." - -#: ../../user_guide/build.rst:21 -msgid "``-D USE_PERF_TESTS=ON`` enable performance tests." -msgstr "``-D USE_PERF_TESTS=ON`` включает тесты на производительность." - -#: ../../user_guide/build.rst:22 -msgid "``-D CMAKE_BUILD_TYPE=Release`` normal build (default)." -msgstr "``-D CMAKE_BUILD_TYPE=Release`` нормальная сборка (по умолчанию)." - -#: ../../user_guide/build.rst:23 -msgid "``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` recommended when using sanitizers or running ``valgrind`` to keep debug information." -msgstr "``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` рекомендуется при использовании санитайзеров или запуске ``valgrind`` для сохранения отладочной информации." - -#: ../../user_guide/build.rst:24 -msgid "``-D CMAKE_BUILD_TYPE=Debug`` for debugging sessions." -msgstr "``-D CMAKE_BUILD_TYPE=Debug`` используется при отладке." - -#: ../../user_guide/build.rst:24 -msgid "*A corresponding flag can be omitted if it's not needed.*" -msgstr "*Ряд CMake флагов может быть выключен, если они не требуются для выполнения работы.*" - -#: ../../user_guide/build.rst:26 -msgid "**Build the project**:" -msgstr "**Построение проекта**:" - -#: ../../user_guide/build.rst:32 -msgid "**Check the task**:" -msgstr "**Проверка задач**:" - -#: ../../user_guide/build.rst:34 -msgid "Run ``/build/bin``" -msgstr "Запустите ``/build/bin``" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-20 23:19+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/build.rst:2 +msgid "Build the Project with ``CMake``" +msgstr "Построение проекта с помощью системы сборки ``CMake``" + +#: ../../user_guide/build.rst:4 +msgid "Navigate to a source code folder." +msgstr "Перейдите в корень директории проекта" + +#: ../../user_guide/build.rst:6 +msgid "**Configure the build**: ``Makefile``, ``.sln``, etc." +msgstr "**Конфигурация проекта**: ``Makefile``, ``.sln``, и т.д." + +#: ../../user_guide/build.rst:13 +msgid "*Help on CMake keys:*" +msgstr "*Важные CMake ключи для конфигурации проекта:*" + +#: ../../user_guide/build.rst:20 +msgid "``-D USE_FUNC_TESTS=ON`` enable functional tests." +msgstr "``-D USE_FUNC_TESTS=ON`` включает функциональные тесты." + +#: ../../user_guide/build.rst:21 +msgid "``-D USE_PERF_TESTS=ON`` enable performance tests." +msgstr "``-D USE_PERF_TESTS=ON`` включает тесты на производительность." + +#: ../../user_guide/build.rst:22 +msgid "``-D CMAKE_BUILD_TYPE=Release`` normal build (default)." +msgstr "``-D CMAKE_BUILD_TYPE=Release`` нормальная сборка (по умолчанию)." + +#: ../../user_guide/build.rst:23 +msgid "``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` recommended when using sanitizers or running ``valgrind`` to keep debug information." +msgstr "``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` рекомендуется при использовании санитайзеров или запуске ``valgrind`` для сохранения отладочной информации." + +#: ../../user_guide/build.rst:24 +msgid "``-D CMAKE_BUILD_TYPE=Debug`` for debugging sessions." +msgstr "``-D CMAKE_BUILD_TYPE=Debug`` используется при отладке." + +#: ../../user_guide/build.rst:24 +msgid "*A corresponding flag can be omitted if it's not needed.*" +msgstr "*Ряд CMake флагов может быть выключен, если они не требуются для выполнения работы.*" + +#: ../../user_guide/build.rst:26 +msgid "**Build the project**:" +msgstr "**Построение проекта**:" + +#: ../../user_guide/build.rst:32 +msgid "**Check the task**:" +msgstr "**Проверка задач**:" + +#: ../../user_guide/build.rst:34 +msgid "Run ``/build/bin``" +msgstr "Запустите ``/build/bin``" diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/ci.po b/docs/locale/ru/LC_MESSAGES/user_guide/ci.po index 494f28d3eb..3b75c1b6ff 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/ci.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/ci.po @@ -1,151 +1,151 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-07-27 12:32+0200\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../user_guide/ci.rst:2 -msgid "Continuous Integration (CI)" -msgstr "Непрерывная интеграция" - -#: ../../user_guide/ci.rst:4 -msgid "" -"Students need to pass all the checks in the CI pipeline before their work" -" can be considered for submission. This includes successful code " -"checkout, build ans testing stages. Each integration is verified by an " -"automated build and automated tests." -msgstr "" -"Студенты должны пройти все проверки в конвейере CI, прежде чем начнется " -"проверка работы студентами и преподавателями. Это включает в себя " -"успешную проверку кода, а также стадии построения и тестирования. Каждая" -" интеграция проверяется автоматизированной сборкой и автоматизированными " -"тестами." - -#: ../../user_guide/ci.rst:9 -msgid "CI Pipeline" -msgstr "CI конвейер" - -#: ../../user_guide/ci.rst:11 -msgid "The CI pipeline for this project is illustrated in the following diagram:" -msgstr "CI конвейер для данного проекта выражается с помощью наглядной схемы:" - -#: ../../user_guide/ci.rst:13 -msgid "CI Pipeline Diagram" -msgstr "Схема CI конвейера" - -#: ../../user_guide/ci.rst:18 -msgid "Running ``scripts/run_tests.py``" -msgstr "Запуск ``scripts/run_tests.py``" - -#: ../../user_guide/ci.rst:20 -msgid "" -"Automated tests are executed through the ``scripts/run_tests.py`` helper." -" The script requires several environment variables to be defined:" -msgstr "" -"Автоматические тесты запускаются с помощью вспомогательного скрипта " -"``scripts/run_tests.py``. Для его работы следует задать несколько переменных " -"окружения:" - -#: ../../user_guide/ci.rst:23 -msgid "``PPC_NUM_THREADS``" -msgstr "" - -#: ../../user_guide/ci.rst:24 -msgid "" -"Number of threads to use. The value is also exported as " -"``OMP_NUM_THREADS``." -msgstr "" -"Количество потоков для запуска. Это же значение экспортируется в " -"``OMP_NUM_THREADS``." - -#: ../../user_guide/ci.rst:27 -msgid "``PPC_NUM_PROC``" -msgstr "" - -#: ../../user_guide/ci.rst:28 -msgid "Number of MPI processes to launch." -msgstr "Количество процессов MPI, которые нужно запустить." - -#: ../../user_guide/ci.rst:30 -msgid "``PPC_ASAN_RUN``" -msgstr "" - -#: ../../user_guide/ci.rst:31 -msgid "" -"Set to ``1`` when sanitizers are enabled to skip ``valgrind`` runs " -"(optional, default ``0``)." -msgstr "" -"Установите ``1``, если включены санитайзеры, чтобы пропустить запуск " -"``valgrind`` (необязательно, по умолчанию ``0``)." - -#: ../../user_guide/ci.rst:34 -msgid "``PPC_IGNORE_TEST_TIME_LIMIT``" -msgstr "" - -#: ../../user_guide/ci.rst:35 -msgid "Set to ``1`` to disable test time limits (optional, default ``0``)." -msgstr "" -"Установите ``1``, чтобы отключить ограничение времени тестов " -"(необязательно, по умолчанию ``0``)." - -#: ../../user_guide/ci.rst:37 -msgid "" -"The execution mode is selected with ``--running-type``. The most common " -"modes are ``threads`` for shared-memory backends and ``processes`` for " -"MPI based tests. ``performance`` mode runs performance benchmarks." -msgstr "" -"Режим выполнения выбирается параметром ``--running-type``. Чаще всего " -"используются режимы ``threads`` для бекендов с общей памятью и " -"``processes`` для тестов на основе MPI. Режим ``performance`` запускает " -"бенчмарки производительности." - -#: ../../user_guide/ci.rst:41 -msgid "Example usage:" -msgstr "Пример использования:" - -#: ../../user_guide/ci.rst:57 -msgid "" -"Additional MPI arguments can be supplied with ``--additional-mpi-args`` " -"when running in ``processes`` mode." -msgstr "" -"Дополнительные аргументы MPI можно передать через ``--additional-mpi-args`` " -"при запуске в режиме ``processes``." - -#: ../../user_guide/ci.rst:60 -msgid "" -"The ``--counts`` option allows sequential execution of tests with several" -" thread/process counts. When specified, the script will iterate over the" -" provided values, updating ``PPC_NUM_THREADS`` or ``PPC_NUM_PROC`` " -"accordingly before each run." -msgstr "" -"Параметр ``--counts`` позволяет последовательно запускать тесты с несколькими " -"значениями числа потоков или процессов. При его указании скрипт перебирает " -"переданные значения, перед каждым запуском обновляя ``PPC_NUM_THREADS`` или " -"``PPC_NUM_PROC``." - -#: ../../user_guide/ci.rst:65 -msgid "" -"Use ``--verbose`` to print every command executed by ``run_tests.py``. " -"This can be helpful for debugging CI failures or verifying the exact " -"arguments passed to the test binaries." -msgstr "" -"Используйте ``--verbose``, чтобы вывести каждую команду, выполняемую " -"``run_tests.py``. Это помогает отлаживать сбои в CI или проверять точные " -"аргументы, передаваемые тестовым бинарникам." +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-07-27 12:32+0200\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../user_guide/ci.rst:2 +msgid "Continuous Integration (CI)" +msgstr "Непрерывная интеграция" + +#: ../../user_guide/ci.rst:4 +msgid "" +"Students need to pass all the checks in the CI pipeline before their work" +" can be considered for submission. This includes successful code " +"checkout, build ans testing stages. Each integration is verified by an " +"automated build and automated tests." +msgstr "" +"Студенты должны пройти все проверки в конвейере CI, прежде чем начнется " +"проверка работы студентами и преподавателями. Это включает в себя " +"успешную проверку кода, а также стадии построения и тестирования. Каждая" +" интеграция проверяется автоматизированной сборкой и автоматизированными " +"тестами." + +#: ../../user_guide/ci.rst:9 +msgid "CI Pipeline" +msgstr "CI конвейер" + +#: ../../user_guide/ci.rst:11 +msgid "The CI pipeline for this project is illustrated in the following diagram:" +msgstr "CI конвейер для данного проекта выражается с помощью наглядной схемы:" + +#: ../../user_guide/ci.rst:13 +msgid "CI Pipeline Diagram" +msgstr "Схема CI конвейера" + +#: ../../user_guide/ci.rst:18 +msgid "Running ``scripts/run_tests.py``" +msgstr "Запуск ``scripts/run_tests.py``" + +#: ../../user_guide/ci.rst:20 +msgid "" +"Automated tests are executed through the ``scripts/run_tests.py`` helper." +" The script requires several environment variables to be defined:" +msgstr "" +"Автоматические тесты запускаются с помощью вспомогательного скрипта " +"``scripts/run_tests.py``. Для его работы следует задать несколько переменных " +"окружения:" + +#: ../../user_guide/ci.rst:23 +msgid "``PPC_NUM_THREADS``" +msgstr "" + +#: ../../user_guide/ci.rst:24 +msgid "" +"Number of threads to use. The value is also exported as " +"``OMP_NUM_THREADS``." +msgstr "" +"Количество потоков для запуска. Это же значение экспортируется в " +"``OMP_NUM_THREADS``." + +#: ../../user_guide/ci.rst:27 +msgid "``PPC_NUM_PROC``" +msgstr "" + +#: ../../user_guide/ci.rst:28 +msgid "Number of MPI processes to launch." +msgstr "Количество процессов MPI, которые нужно запустить." + +#: ../../user_guide/ci.rst:30 +msgid "``PPC_ASAN_RUN``" +msgstr "" + +#: ../../user_guide/ci.rst:31 +msgid "" +"Set to ``1`` when sanitizers are enabled to skip ``valgrind`` runs " +"(optional, default ``0``)." +msgstr "" +"Установите ``1``, если включены санитайзеры, чтобы пропустить запуск " +"``valgrind`` (необязательно, по умолчанию ``0``)." + +#: ../../user_guide/ci.rst:34 +msgid "``PPC_IGNORE_TEST_TIME_LIMIT``" +msgstr "" + +#: ../../user_guide/ci.rst:35 +msgid "Set to ``1`` to disable test time limits (optional, default ``0``)." +msgstr "" +"Установите ``1``, чтобы отключить ограничение времени тестов " +"(необязательно, по умолчанию ``0``)." + +#: ../../user_guide/ci.rst:37 +msgid "" +"The execution mode is selected with ``--running-type``. The most common " +"modes are ``threads`` for shared-memory backends and ``processes`` for " +"MPI based tests. ``performance`` mode runs performance benchmarks." +msgstr "" +"Режим выполнения выбирается параметром ``--running-type``. Чаще всего " +"используются режимы ``threads`` для бекендов с общей памятью и " +"``processes`` для тестов на основе MPI. Режим ``performance`` запускает " +"бенчмарки производительности." + +#: ../../user_guide/ci.rst:41 +msgid "Example usage:" +msgstr "Пример использования:" + +#: ../../user_guide/ci.rst:57 +msgid "" +"Additional MPI arguments can be supplied with ``--additional-mpi-args`` " +"when running in ``processes`` mode." +msgstr "" +"Дополнительные аргументы MPI можно передать через ``--additional-mpi-args`` " +"при запуске в режиме ``processes``." + +#: ../../user_guide/ci.rst:60 +msgid "" +"The ``--counts`` option allows sequential execution of tests with several" +" thread/process counts. When specified, the script will iterate over the" +" provided values, updating ``PPC_NUM_THREADS`` or ``PPC_NUM_PROC`` " +"accordingly before each run." +msgstr "" +"Параметр ``--counts`` позволяет последовательно запускать тесты с несколькими " +"значениями числа потоков или процессов. При его указании скрипт перебирает " +"переданные значения, перед каждым запуском обновляя ``PPC_NUM_THREADS`` или " +"``PPC_NUM_PROC``." + +#: ../../user_guide/ci.rst:65 +msgid "" +"Use ``--verbose`` to print every command executed by ``run_tests.py``. " +"This can be helpful for debugging CI failures or verifying the exact " +"arguments passed to the test binaries." +msgstr "" +"Используйте ``--verbose``, чтобы вывести каждую команду, выполняемую " +"``run_tests.py``. Это помогает отлаживать сбои в CI или проверять точные " +"аргументы, передаваемые тестовым бинарникам." diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/download.po b/docs/locale/ru/LC_MESSAGES/user_guide/download.po index c4430876d3..b0e70bd2ff 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/download.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/download.po @@ -1,26 +1,26 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-20 23:19+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/download.rst:2 -msgid "Download all submodules" -msgstr "Скачивание всех внешних библиотек" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-20 23:19+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/download.rst:2 +msgid "Download all submodules" +msgstr "Скачивание всех внешних библиотек" diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/environment.po b/docs/locale/ru/LC_MESSAGES/user_guide/environment.po index a70ae2cbe4..9235f1ea51 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/environment.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/environment.po @@ -1,228 +1,228 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-07-27 12:55+0200\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../user_guide/environment.rst:2 -msgid "Set Up Your Environment" -msgstr "Настройка окружения" - -#: ../../user_guide/environment.rst:5 -msgid "Development Container (Recommended)" -msgstr "Контейнер разработки (Рекомендуется)" - -#: ../../user_guide/environment.rst:6 -msgid "" -"The easiest way to set up your development environment is using the " -"provided ``.devcontainer`` configuration with VS Code and Docker." -msgstr "" -"Самый простой способ настроить среду разработки - использовать " -"предоставленную конфигурацию ``.devcontainer`` с VS Code и Docker." - -#: ../../user_guide/environment.rst:8 -msgid "**Prerequisites:**" -msgstr "**Требования:**" - -#: ../../user_guide/environment.rst:10 -msgid "`Visual Studio Code `_" -msgstr "`Visual Studio Code `_" - -#: ../../user_guide/environment.rst:11 -msgid "`Docker Desktop `_" -msgstr "`Docker Desktop `_" - -#: ../../user_guide/environment.rst:12 -msgid "" -"`Dev Containers extension " -"`_" -msgstr "" -"`Расширение Dev Containers " -"`_" - -#: ../../user_guide/environment.rst:14 -msgid "**Setup:**" -msgstr "**Настройка:**" - -#: ../../user_guide/environment.rst:16 -msgid "Clone the repository and open it in VS Code" -msgstr "Клонируйте репозиторий и откройте его в VS Code" - -#: ../../user_guide/environment.rst:17 -msgid "" -"When prompted, click \"Reopen in Container\" or use Command Palette: " -"``Dev Containers: Reopen in Container``" -msgstr "" -"При появлении запроса нажмите \"Reopen in Container\" или используйте " -"палитру команд: ``Dev Containers: Reopen in Container``" - -#: ../../user_guide/environment.rst:18 -msgid "" -"VS Code will automatically build the container with all dependencies pre-" -"installed" -msgstr "" -"VS Code автоматически соберет контейнер со всеми предустановленными " -"зависимостями" - -#: ../../user_guide/environment.rst:19 -msgid "The container includes:" -msgstr "Контейнер включает:" - -#: ../../user_guide/environment.rst:21 -msgid "Ubuntu environment with gcc-14, CMake, MPI, OpenMP" -msgstr "Окружение Ubuntu с gcc-14, CMake, MPI, OpenMP" - -#: ../../user_guide/environment.rst:22 -msgid "Pre-configured C++ and Python development tools" -msgstr "Предварительно настроенные инструменты разработки C++ и Python" - -#: ../../user_guide/environment.rst:23 -msgid "All project dependencies ready to use" -msgstr "Все зависимости проекта готовы к использованию" - -#: ../../user_guide/environment.rst:25 -msgid "" -"This provides a consistent development environment across all platforms " -"without manual dependency installation." -msgstr "" -"Это обеспечивает единообразную среду разработки на всех платформах без " -"ручной установки зависимостей." - -#: ../../user_guide/environment.rst:28 -msgid "Manual Setup" -msgstr "Ручная настройка" - -#: ../../user_guide/environment.rst:30 -msgid "" -"If you prefer manual setup or cannot use containers, follow the " -"instructions below." -msgstr "" -"Если вы предпочитаете ручную настройку или не можете использовать " -"контейнеры, следуйте инструкциям ниже." - -#: ../../user_guide/environment.rst:33 -msgid "Build prerequisites" -msgstr "Требования к сборке" - -#: ../../user_guide/environment.rst:34 -msgid "" -"**Windows**: Download and install CMake from https://cmake.org/download " -"(select the Windows installer) or install using Chocolatey:" -msgstr "" -"**Windows**: Загрузите и установите CMake с https://cmake.org/download " -"(выберите установщик для Windows) или установите с помощью Chocolatey:" - -#: ../../user_guide/environment.rst:40 -msgid "**Linux (Ubuntu/Debian)**: Install using package manager:" -msgstr "**Linux (Ubuntu/Debian)**: Установите с помощью менеджера пакетов:" - -#: ../../user_guide/environment.rst:47 -msgid "**macOS**: Install using Homebrew:" -msgstr "**macOS**: Установите с помощью Homebrew:" - -#: ../../user_guide/environment.rst:55 -msgid "Code Style Analysis" -msgstr "Анализ стиля кодирования" - -#: ../../user_guide/environment.rst:56 -msgid "" -"Please follow the `Google C++ Style Guide " -"`_." -msgstr "" -"Пожалуйста пройдите по ссылке для изучения стиля кодирования - `Google " -"C++ Style Guide `_." - -#: ../../user_guide/environment.rst:58 -msgid "" -"Code style is checked using the `clang-format " -"`_ tool." -msgstr "" -"Проверка стиля кода выполняется с помощью инструмента `clang-format " -"`_." - -#: ../../user_guide/environment.rst:61 -msgid "Parallel Programming Technologies" -msgstr "Технологии параллельного программирования" - -#: ../../user_guide/environment.rst:64 -msgid "``MPI``" -msgstr "``MPI``" - -#: ../../user_guide/environment.rst:65 -msgid "**Windows (MSVC)**:" -msgstr "**Windows (MSVC)**:" - -#: ../../user_guide/environment.rst:67 -msgid "" -"`Installers link `_. You have to install " -"``msmpisdk.msi`` and ``msmpisetup.exe``." -msgstr "" -"`Ссылка на установку `_. Вы должны установить 2 файла - " -"``msmpisdk.msi`` и ``msmpisetup.exe``." - -#: ../../user_guide/environment.rst:69 ../../user_guide/environment.rst:85 -msgid "**Linux (gcc and clang)**:" -msgstr "**Linux (gcc and clang)**:" - -#: ../../user_guide/environment.rst:75 -msgid "**MacOS (apple clang)**:" -msgstr "**MacOS (apple clang)**:" - -#: ../../user_guide/environment.rst:82 -msgid "``OpenMP``" -msgstr "``OpenMP``" - -#: ../../user_guide/environment.rst:83 -msgid "" -"``OpenMP`` is included in ``gcc`` and ``msvc``, but some components " -"should be installed additionally:" -msgstr "" -"``OpenMP`` в большинстве случаев включен в компиляторы ``gcc`` и " -"``msvc``, но ряд компонент все равно должны быть установлены " -"дополнительно:" - -#: ../../user_guide/environment.rst:91 -msgid "**MacOS (llvm)**:" -msgstr "**MacOS (llvm)**:" - -#: ../../user_guide/environment.rst:99 -msgid "``TBB``" -msgstr "``TBB``" - -#: ../../user_guide/environment.rst:100 -msgid "" -"**Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: " -"Build as 3rdparty in the current project." -msgstr "" -"**Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: " -"Данная библиотека строится как внешняя в составе текущего проекта и не " -"требует дополнительных операций." - -#: ../../user_guide/environment.rst:104 -msgid "``std::thread``" -msgstr "``std::thread``" - -#: ../../user_guide/environment.rst:105 -msgid "``std::thread`` is included in STL libraries." -msgstr "``std::thread`` включена в состав STL библиотек." +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-07-27 12:55+0200\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../user_guide/environment.rst:2 +msgid "Set Up Your Environment" +msgstr "Настройка окружения" + +#: ../../user_guide/environment.rst:5 +msgid "Development Container (Recommended)" +msgstr "Контейнер разработки (Рекомендуется)" + +#: ../../user_guide/environment.rst:6 +msgid "" +"The easiest way to set up your development environment is using the " +"provided ``.devcontainer`` configuration with VS Code and Docker." +msgstr "" +"Самый простой способ настроить среду разработки - использовать " +"предоставленную конфигурацию ``.devcontainer`` с VS Code и Docker." + +#: ../../user_guide/environment.rst:8 +msgid "**Prerequisites:**" +msgstr "**Требования:**" + +#: ../../user_guide/environment.rst:10 +msgid "`Visual Studio Code `_" +msgstr "`Visual Studio Code `_" + +#: ../../user_guide/environment.rst:11 +msgid "`Docker Desktop `_" +msgstr "`Docker Desktop `_" + +#: ../../user_guide/environment.rst:12 +msgid "" +"`Dev Containers extension " +"`_" +msgstr "" +"`Расширение Dev Containers " +"`_" + +#: ../../user_guide/environment.rst:14 +msgid "**Setup:**" +msgstr "**Настройка:**" + +#: ../../user_guide/environment.rst:16 +msgid "Clone the repository and open it in VS Code" +msgstr "Клонируйте репозиторий и откройте его в VS Code" + +#: ../../user_guide/environment.rst:17 +msgid "" +"When prompted, click \"Reopen in Container\" or use Command Palette: " +"``Dev Containers: Reopen in Container``" +msgstr "" +"При появлении запроса нажмите \"Reopen in Container\" или используйте " +"палитру команд: ``Dev Containers: Reopen in Container``" + +#: ../../user_guide/environment.rst:18 +msgid "" +"VS Code will automatically build the container with all dependencies pre-" +"installed" +msgstr "" +"VS Code автоматически соберет контейнер со всеми предустановленными " +"зависимостями" + +#: ../../user_guide/environment.rst:19 +msgid "The container includes:" +msgstr "Контейнер включает:" + +#: ../../user_guide/environment.rst:21 +msgid "Ubuntu environment with gcc-14, CMake, MPI, OpenMP" +msgstr "Окружение Ubuntu с gcc-14, CMake, MPI, OpenMP" + +#: ../../user_guide/environment.rst:22 +msgid "Pre-configured C++ and Python development tools" +msgstr "Предварительно настроенные инструменты разработки C++ и Python" + +#: ../../user_guide/environment.rst:23 +msgid "All project dependencies ready to use" +msgstr "Все зависимости проекта готовы к использованию" + +#: ../../user_guide/environment.rst:25 +msgid "" +"This provides a consistent development environment across all platforms " +"without manual dependency installation." +msgstr "" +"Это обеспечивает единообразную среду разработки на всех платформах без " +"ручной установки зависимостей." + +#: ../../user_guide/environment.rst:28 +msgid "Manual Setup" +msgstr "Ручная настройка" + +#: ../../user_guide/environment.rst:30 +msgid "" +"If you prefer manual setup or cannot use containers, follow the " +"instructions below." +msgstr "" +"Если вы предпочитаете ручную настройку или не можете использовать " +"контейнеры, следуйте инструкциям ниже." + +#: ../../user_guide/environment.rst:33 +msgid "Build prerequisites" +msgstr "Требования к сборке" + +#: ../../user_guide/environment.rst:34 +msgid "" +"**Windows**: Download and install CMake from https://cmake.org/download " +"(select the Windows installer) or install using Chocolatey:" +msgstr "" +"**Windows**: Загрузите и установите CMake с https://cmake.org/download " +"(выберите установщик для Windows) или установите с помощью Chocolatey:" + +#: ../../user_guide/environment.rst:40 +msgid "**Linux (Ubuntu/Debian)**: Install using package manager:" +msgstr "**Linux (Ubuntu/Debian)**: Установите с помощью менеджера пакетов:" + +#: ../../user_guide/environment.rst:47 +msgid "**macOS**: Install using Homebrew:" +msgstr "**macOS**: Установите с помощью Homebrew:" + +#: ../../user_guide/environment.rst:55 +msgid "Code Style Analysis" +msgstr "Анализ стиля кодирования" + +#: ../../user_guide/environment.rst:56 +msgid "" +"Please follow the `Google C++ Style Guide " +"`_." +msgstr "" +"Пожалуйста пройдите по ссылке для изучения стиля кодирования - `Google " +"C++ Style Guide `_." + +#: ../../user_guide/environment.rst:58 +msgid "" +"Code style is checked using the `clang-format " +"`_ tool." +msgstr "" +"Проверка стиля кода выполняется с помощью инструмента `clang-format " +"`_." + +#: ../../user_guide/environment.rst:61 +msgid "Parallel Programming Technologies" +msgstr "Технологии параллельного программирования" + +#: ../../user_guide/environment.rst:64 +msgid "``MPI``" +msgstr "``MPI``" + +#: ../../user_guide/environment.rst:65 +msgid "**Windows (MSVC)**:" +msgstr "**Windows (MSVC)**:" + +#: ../../user_guide/environment.rst:67 +msgid "" +"`Installers link `_. You have to install " +"``msmpisdk.msi`` and ``msmpisetup.exe``." +msgstr "" +"`Ссылка на установку `_. Вы должны установить 2 файла - " +"``msmpisdk.msi`` и ``msmpisetup.exe``." + +#: ../../user_guide/environment.rst:69 ../../user_guide/environment.rst:85 +msgid "**Linux (gcc and clang)**:" +msgstr "**Linux (gcc and clang)**:" + +#: ../../user_guide/environment.rst:75 +msgid "**MacOS (apple clang)**:" +msgstr "**MacOS (apple clang)**:" + +#: ../../user_guide/environment.rst:82 +msgid "``OpenMP``" +msgstr "``OpenMP``" + +#: ../../user_guide/environment.rst:83 +msgid "" +"``OpenMP`` is included in ``gcc`` and ``msvc``, but some components " +"should be installed additionally:" +msgstr "" +"``OpenMP`` в большинстве случаев включен в компиляторы ``gcc`` и " +"``msvc``, но ряд компонент все равно должны быть установлены " +"дополнительно:" + +#: ../../user_guide/environment.rst:91 +msgid "**MacOS (llvm)**:" +msgstr "**MacOS (llvm)**:" + +#: ../../user_guide/environment.rst:99 +msgid "``TBB``" +msgstr "``TBB``" + +#: ../../user_guide/environment.rst:100 +msgid "" +"**Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: " +"Build as 3rdparty in the current project." +msgstr "" +"**Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: " +"Данная библиотека строится как внешняя в составе текущего проекта и не " +"требует дополнительных операций." + +#: ../../user_guide/environment.rst:104 +msgid "``std::thread``" +msgstr "``std::thread``" + +#: ../../user_guide/environment.rst:105 +msgid "``std::thread`` is included in STL libraries." +msgstr "``std::thread`` включена в состав STL библиотек." diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/environment_variables.po b/docs/locale/ru/LC_MESSAGES/user_guide/environment_variables.po index fd2ecb1a66..be782904dd 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/environment_variables.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/environment_variables.po @@ -1,85 +1,85 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-07-27 12:32+0200\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.17.0\n" - -#: ../../user_guide/environment_variables.rst:2 -msgid "Environment Variables" -msgstr "Переменные окружения" - -#: ../../user_guide/environment_variables.rst:4 -msgid "" -"The following environment variables can be used to configure the " -"project's runtime behavior:" -msgstr "" -"Следующие переменные окружения могут использоваться для настройки " -"поведения программы во время выполнения:" - -#: ../../user_guide/environment_variables.rst:6 -#, fuzzy -msgid "" -"``PPC_NUM_PROC``: Specifies the number of processes to launch. Default: " -"``1`` Can be queried from C++ with ``ppc::util::GetNumProc()``." -msgstr "" -"``PPC_NUM_PROC``: задаёт количество запускаемых процессов. По умолчанию: " -"``1``" - -#: ../../user_guide/environment_variables.rst:10 -msgid "" -"``PPC_NUM_THREADS``: Specifies the number of threads to use. Default: " -"``1``" -msgstr "" -"``PPC_NUM_THREADS``: задаёт количество используемых потоков. По " -"умолчанию: ``1``" - -#: ../../user_guide/environment_variables.rst:13 -msgid "" -"``PPC_ASAN_RUN``: Specifies that application is compiler with sanitizers." -" Used by ``scripts/run_tests.py`` to skip ``valgrind`` runs. Default: " -"``0``" -msgstr "" -"``PPC_ASAN_RUN``: указывает, что приложение собрано с санитайзерами. " -"Используется в ``scripts/run_tests.py`` для пропуска запусков под " -"``valgrind``. По умолчанию: ``0``" - -#: ../../user_guide/environment_variables.rst:16 -#, fuzzy -msgid "" -"``PPC_IGNORE_TEST_TIME_LIMIT``: Specifies that test time limits are " -"ignored. Used by ``scripts/run_tests.py`` to disable time limit " -"enforcement. Default: ``0``" -msgstr "" -"``PPC_IGNORE_TEST_TIME_LIMIT``: указывает, что ограничения по времени " -"выполнения тестов игнорируются. Используется в ``scripts/run_tests.py`` " -"для отключения контроля ограничений по времени. По умолчанию: ``0``" - -#: ../../user_guide/environment_variables.rst:18 -msgid "" -"``PPC_TASK_MAX_TIME``: Maximum allowed execution time in seconds for " -"functional tests. Default: ``1.0``" -msgstr "" - -#: ../../user_guide/environment_variables.rst:20 -msgid "" -"``PPC_PERF_MAX_TIME``: Maximum allowed execution time in seconds for " -"performance tests. Default: ``10.0``" -msgstr "" - +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-07-27 12:32+0200\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.17.0\n" + +#: ../../user_guide/environment_variables.rst:2 +msgid "Environment Variables" +msgstr "Переменные окружения" + +#: ../../user_guide/environment_variables.rst:4 +msgid "" +"The following environment variables can be used to configure the " +"project's runtime behavior:" +msgstr "" +"Следующие переменные окружения могут использоваться для настройки " +"поведения программы во время выполнения:" + +#: ../../user_guide/environment_variables.rst:6 +#, fuzzy +msgid "" +"``PPC_NUM_PROC``: Specifies the number of processes to launch. Default: " +"``1`` Can be queried from C++ with ``ppc::util::GetNumProc()``." +msgstr "" +"``PPC_NUM_PROC``: задаёт количество запускаемых процессов. По умолчанию: " +"``1``" + +#: ../../user_guide/environment_variables.rst:10 +msgid "" +"``PPC_NUM_THREADS``: Specifies the number of threads to use. Default: " +"``1``" +msgstr "" +"``PPC_NUM_THREADS``: задаёт количество используемых потоков. По " +"умолчанию: ``1``" + +#: ../../user_guide/environment_variables.rst:13 +msgid "" +"``PPC_ASAN_RUN``: Specifies that application is compiler with sanitizers." +" Used by ``scripts/run_tests.py`` to skip ``valgrind`` runs. Default: " +"``0``" +msgstr "" +"``PPC_ASAN_RUN``: указывает, что приложение собрано с санитайзерами. " +"Используется в ``scripts/run_tests.py`` для пропуска запусков под " +"``valgrind``. По умолчанию: ``0``" + +#: ../../user_guide/environment_variables.rst:16 +#, fuzzy +msgid "" +"``PPC_IGNORE_TEST_TIME_LIMIT``: Specifies that test time limits are " +"ignored. Used by ``scripts/run_tests.py`` to disable time limit " +"enforcement. Default: ``0``" +msgstr "" +"``PPC_IGNORE_TEST_TIME_LIMIT``: указывает, что ограничения по времени " +"выполнения тестов игнорируются. Используется в ``scripts/run_tests.py`` " +"для отключения контроля ограничений по времени. По умолчанию: ``0``" + +#: ../../user_guide/environment_variables.rst:18 +msgid "" +"``PPC_TASK_MAX_TIME``: Maximum allowed execution time in seconds for " +"functional tests. Default: ``1.0``" +msgstr "" + +#: ../../user_guide/environment_variables.rst:20 +msgid "" +"``PPC_PERF_MAX_TIME``: Maximum allowed execution time in seconds for " +"performance tests. Default: ``10.0``" +msgstr "" + diff --git a/docs/locale/ru/LC_MESSAGES/user_guide/submit_work.po b/docs/locale/ru/LC_MESSAGES/user_guide/submit_work.po index 1775ab7c6d..7631510d15 100644 --- a/docs/locale/ru/LC_MESSAGES/user_guide/submit_work.po +++ b/docs/locale/ru/LC_MESSAGES/user_guide/submit_work.po @@ -1,154 +1,154 @@ -# Parallel Programming Course Documentation -# Copyright (C) 2025, Learning Process -# This file is distributed under the same license as the Parallel -# Programming Course package. -# Learning Process Team , 2025. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: Parallel Programming Course \n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-01-20 23:19+0100\n" -"PO-Revision-Date: 2025-07-27 18:21+0200\n" -"Last-Translator: Learning Process Team \n" -"Language: ru\n" -"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " -"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.16.0\n" - -#: ../../user_guide/submit_work.rst:2 -msgid "How to submit your work" -msgstr "Как создать, открыть и отправить на проверку Вашу работу" - -#: ../../user_guide/submit_work.rst:4 -msgid "" -"There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in" -" the ``tasks`` directory. Move to a folder of your task. Create a " -"directory named ``__``." -msgstr "" -"Существуют следующие типы ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` поддиректорий (типов задач) в" -" ``tasks`` директории. Ваша директория будет размещаться в директории согласно типу Вашей задачи. Создайте " -"поддиректорию и назовите ее латинскими буквами следующим образом - ``<фамилия>_<первая буква имени>_<краткое название задачи>``." - -#: ../../user_guide/submit_work.rst:6 -msgid "" -"Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the " -"same** name directory. If the ``seq`` task is named " -"``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named " -"``omp/nesterov_a_vector_sum``." -msgstr "" -"Пример: ``seq/nesterov_a_vector_sum``. Пожалуйста называйте все задачи **одинаково**, если у вас одна задача и несколько технологий. " -"Если Ваша задача ``seq`` то задача будет размещена следующим образом - " -"``seq/nesterov_a_vector_sum``, если ``omp`` то так " -"``omp/nesterov_a_vector_sum``." - -#: ../../user_guide/submit_work.rst:8 -msgid "" -"Navigate into the newly created folder and begin your work on the task. " -"The folder must contain only 4 directories with files:" -msgstr "" -"Перейдите в созданную Вами, директорию и начните работу. " -"Директория должна содержать 4 обязательных поддиректории с файлами и 1 опциональную:" - -#: ../../user_guide/submit_work.rst:10 -msgid "" -"``data`` - Directory with own data files for functional testing of " -"the task." -msgstr "" -"``data`` - Опциональная директория с тестовыми файлами для экспериментов и тестов" - -#: ../../user_guide/submit_work.rst:11 -msgid "" -"``func_tests`` - Directory with Google tests for functional testing of " -"the task." -msgstr "" -"``func_tests`` - Директория с Google тестами для функционального тестирования." - -#: ../../user_guide/submit_work.rst:12 -msgid "``include`` - Directory for header files with function prototypes." -msgstr "``include`` - Директория с header файлами, в которых содержатся чаще всего прототипы ваших функций и классов." - -#: ../../user_guide/submit_work.rst:13 -msgid "" -"``perf_tests`` - Directory with Google tests for performance testing. The" -" number of tests must be 2: ``run_task`` and ``run_pipeline``." -msgstr "" -"``perf_tests`` - Директория с Google тестами для тестов на производительность. " -"Должно быть всего 2 теста: ``run_task`` и ``run_pipeline``." - -#: ../../user_guide/submit_work.rst:14 -msgid "" -"``src`` - Directory with source files containing the function " -"implementations." -msgstr "" -"``src`` - Директория с исходными кодами, содержащая основную реализацию задач." - -#: ../../user_guide/submit_work.rst:16 -msgid "There must be 10 executable files for running:" -msgstr "При максимальной конфигурации должно быть 12 исполняемых файлов для запуска:" - -#: ../../user_guide/submit_work.rst:18 -msgid "" -"``__tests``. For example, " -"``omp_perf_tests`` - an executable file for performance tests of OpenMP " -"practice tasks." -msgstr "" -"``__tests``. К примеру, " -"``omp_perf_tests`` - исполняемый файл запуска тестов на производительность, задач, связанных с технологией OpenMP." - -#: ../../user_guide/submit_work.rst:20 -msgid "" -"All prototypes and classes in the ``include`` directory must be " -"namespace-escaped. Name your namespace as follows:" -msgstr "" -"Все Ваши прототипы и классы в ``include`` директории должны быть экранированы с помощью namespace. " -"Имя namespace должно соотвествовать следующему примеру:" - -#: ../../user_guide/submit_work.rst:36 -msgid "Name your group of tests and individual test cases as follows:" -msgstr "Имя вашей группы тестов и одного ндивидуального тест кейса должно именоваться так:" - -#: ../../user_guide/submit_work.rst:38 -msgid "For functional tests (for maximum coverage):" -msgstr "Для функциональных тестов (для максимального покрытия):" - -#: ../../user_guide/submit_work.rst:50 -msgid "" -"For performance tests (only 2 tests - ``pipeline`` and ``task`` - no more" -" no less):" -msgstr "Для тестов на производительность (только 2 теста - ``pipeline`` and ``task`` - ни больше, ни меньше):" - -#: ../../user_guide/submit_work.rst:65 -msgid "Name your pull request as follows:" -msgstr "Имя вашего pull request должно называться следующим образом:" - -#: ../../user_guide/submit_work.rst:67 -msgid "For tasks:" -msgstr "Для задач:" - -#: ../../user_guide/submit_work.rst:74 -msgid "Provide the full task definition in the pull request's description." -msgstr "Приведите полную формулировку задачи в описании pull request." - -#: ../../user_guide/submit_work.rst:76 -msgid "Example pull request can be found in the repository's pull requests." -msgstr "Пример pull request может быть найден в основном репозитории в разделе pull requests." - -#: ../../user_guide/submit_work.rst:78 -msgid "" -"Work on your forked repository. Keep your work on a separate branch (not " -"on ``master``)!!! Name your branch the same as your task's folder. To " -"create a branch, run:" -msgstr "" -"Работайте в своем собственном fork-репозитории. Каждую задачу нужно делать на новой ветке (не " -"на ``master`` ветке)!!! Имя ветки такое же как и имя директории Вашей задачи. " -"Для создания ветки воспользуйтесь следующей командой:" - -#: ../../user_guide/submit_work.rst:84 -msgid "**Failing to follow the rules will result in a red project build.**" -msgstr "**Несоблюдение правил приведет к падению сборки проекта.**" +# Parallel Programming Course Documentation +# Copyright (C) 2025, Learning Process +# This file is distributed under the same license as the Parallel +# Programming Course package. +# Learning Process Team , 2025. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Parallel Programming Course \n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-20 23:19+0100\n" +"PO-Revision-Date: 2025-07-27 18:21+0200\n" +"Last-Translator: Learning Process Team \n" +"Language: ru\n" +"Language-Team: Learning Process Team (gooddoog@student.su, nesterov.alexander@outlook.com)\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.16.0\n" + +#: ../../user_guide/submit_work.rst:2 +msgid "How to submit your work" +msgstr "Как создать, открыть и отправить на проверку Вашу работу" + +#: ../../user_guide/submit_work.rst:4 +msgid "" +"There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in" +" the ``tasks`` directory. Move to a folder of your task. Create a " +"directory named ``__``." +msgstr "" +"Существуют следующие типы ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` поддиректорий (типов задач) в" +" ``tasks`` директории. Ваша директория будет размещаться в директории согласно типу Вашей задачи. Создайте " +"поддиректорию и назовите ее латинскими буквами следующим образом - ``<фамилия>_<первая буква имени>_<краткое название задачи>``." + +#: ../../user_guide/submit_work.rst:6 +msgid "" +"Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the " +"same** name directory. If the ``seq`` task is named " +"``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named " +"``omp/nesterov_a_vector_sum``." +msgstr "" +"Пример: ``seq/nesterov_a_vector_sum``. Пожалуйста называйте все задачи **одинаково**, если у вас одна задача и несколько технологий. " +"Если Ваша задача ``seq`` то задача будет размещена следующим образом - " +"``seq/nesterov_a_vector_sum``, если ``omp`` то так " +"``omp/nesterov_a_vector_sum``." + +#: ../../user_guide/submit_work.rst:8 +msgid "" +"Navigate into the newly created folder and begin your work on the task. " +"The folder must contain only 4 directories with files:" +msgstr "" +"Перейдите в созданную Вами, директорию и начните работу. " +"Директория должна содержать 4 обязательных поддиректории с файлами и 1 опциональную:" + +#: ../../user_guide/submit_work.rst:10 +msgid "" +"``data`` - Directory with own data files for functional testing of " +"the task." +msgstr "" +"``data`` - Опциональная директория с тестовыми файлами для экспериментов и тестов" + +#: ../../user_guide/submit_work.rst:11 +msgid "" +"``func_tests`` - Directory with Google tests for functional testing of " +"the task." +msgstr "" +"``func_tests`` - Директория с Google тестами для функционального тестирования." + +#: ../../user_guide/submit_work.rst:12 +msgid "``include`` - Directory for header files with function prototypes." +msgstr "``include`` - Директория с header файлами, в которых содержатся чаще всего прототипы ваших функций и классов." + +#: ../../user_guide/submit_work.rst:13 +msgid "" +"``perf_tests`` - Directory with Google tests for performance testing. The" +" number of tests must be 2: ``run_task`` and ``run_pipeline``." +msgstr "" +"``perf_tests`` - Директория с Google тестами для тестов на производительность. " +"Должно быть всего 2 теста: ``run_task`` и ``run_pipeline``." + +#: ../../user_guide/submit_work.rst:14 +msgid "" +"``src`` - Directory with source files containing the function " +"implementations." +msgstr "" +"``src`` - Директория с исходными кодами, содержащая основную реализацию задач." + +#: ../../user_guide/submit_work.rst:16 +msgid "There must be 10 executable files for running:" +msgstr "При максимальной конфигурации должно быть 12 исполняемых файлов для запуска:" + +#: ../../user_guide/submit_work.rst:18 +msgid "" +"``__tests``. For example, " +"``omp_perf_tests`` - an executable file for performance tests of OpenMP " +"practice tasks." +msgstr "" +"``__tests``. К примеру, " +"``omp_perf_tests`` - исполняемый файл запуска тестов на производительность, задач, связанных с технологией OpenMP." + +#: ../../user_guide/submit_work.rst:20 +msgid "" +"All prototypes and classes in the ``include`` directory must be " +"namespace-escaped. Name your namespace as follows:" +msgstr "" +"Все Ваши прототипы и классы в ``include`` директории должны быть экранированы с помощью namespace. " +"Имя namespace должно соотвествовать следующему примеру:" + +#: ../../user_guide/submit_work.rst:36 +msgid "Name your group of tests and individual test cases as follows:" +msgstr "Имя вашей группы тестов и одного ндивидуального тест кейса должно именоваться так:" + +#: ../../user_guide/submit_work.rst:38 +msgid "For functional tests (for maximum coverage):" +msgstr "Для функциональных тестов (для максимального покрытия):" + +#: ../../user_guide/submit_work.rst:50 +msgid "" +"For performance tests (only 2 tests - ``pipeline`` and ``task`` - no more" +" no less):" +msgstr "Для тестов на производительность (только 2 теста - ``pipeline`` and ``task`` - ни больше, ни меньше):" + +#: ../../user_guide/submit_work.rst:65 +msgid "Name your pull request as follows:" +msgstr "Имя вашего pull request должно называться следующим образом:" + +#: ../../user_guide/submit_work.rst:67 +msgid "For tasks:" +msgstr "Для задач:" + +#: ../../user_guide/submit_work.rst:74 +msgid "Provide the full task definition in the pull request's description." +msgstr "Приведите полную формулировку задачи в описании pull request." + +#: ../../user_guide/submit_work.rst:76 +msgid "Example pull request can be found in the repository's pull requests." +msgstr "Пример pull request может быть найден в основном репозитории в разделе pull requests." + +#: ../../user_guide/submit_work.rst:78 +msgid "" +"Work on your forked repository. Keep your work on a separate branch (not " +"on ``master``)!!! Name your branch the same as your task's folder. To " +"create a branch, run:" +msgstr "" +"Работайте в своем собственном fork-репозитории. Каждую задачу нужно делать на новой ветке (не " +"на ``master`` ветке)!!! Имя ветки такое же как и имя директории Вашей задачи. " +"Для создания ветки воспользуйтесь следующей командой:" + +#: ../../user_guide/submit_work.rst:84 +msgid "**Failing to follow the rules will result in a red project build.**" +msgstr "**Несоблюдение правил приведет к падению сборки проекта.**" diff --git a/docs/requirements.txt b/docs/requirements.txt index fb156cb038..61677cab57 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -Sphinx==8.2.3 -sphinx-rtd-theme==3.0.2 -sphinx-intl==2.3.2 -breathe==4.36.0 +Sphinx==8.2.3 +sphinx-rtd-theme==3.0.2 +sphinx-intl==2.3.2 +breathe==4.36.0 diff --git a/docs/user_guide/api.rst b/docs/user_guide/api.rst index 178c3f4014..5e6d6a18ff 100644 --- a/docs/user_guide/api.rst +++ b/docs/user_guide/api.rst @@ -1,29 +1,29 @@ -API Reference -============= - -.. toctree:: - :maxdepth: 1 - -Runners Module --------------- - -.. doxygennamespace:: ppc::runners - :project: ParallelProgrammingCourse - -Task Module ------------ - -.. doxygennamespace:: ppc::task - :project: ParallelProgrammingCourse - -Utility Module --------------- - -.. doxygennamespace:: ppc::util - :project: ParallelProgrammingCourse - -Performance Module ------------------- - -.. doxygennamespace:: ppc::performance - :project: ParallelProgrammingCourse +API Reference +============= + +.. toctree:: + :maxdepth: 1 + +Runners Module +-------------- + +.. doxygennamespace:: ppc::runners + :project: ParallelProgrammingCourse + +Task Module +----------- + +.. doxygennamespace:: ppc::task + :project: ParallelProgrammingCourse + +Utility Module +-------------- + +.. doxygennamespace:: ppc::util + :project: ParallelProgrammingCourse + +Performance Module +------------------ + +.. doxygennamespace:: ppc::performance + :project: ParallelProgrammingCourse diff --git a/docs/user_guide/build.rst b/docs/user_guide/build.rst index e1ac0ceea5..857678f907 100644 --- a/docs/user_guide/build.rst +++ b/docs/user_guide/build.rst @@ -1,33 +1,33 @@ -Build the Project with ``CMake`` -================================ - -Navigate to a source code folder. - -1. **Configure the build**: ``Makefile``, ``.sln``, etc. - - .. code-block:: bash - - mkdir build && cd build - cmake -D USE_FUNC_TESTS=ON -D USE_PERF_TESTS=ON -D CMAKE_BUILD_TYPE=Release .. - - *Help on CMake keys:* - - - - ``-D USE_FUNC_TESTS=ON`` enable functional tests. - - ``-D USE_PERF_TESTS=ON`` enable performance tests. - - ``-D CMAKE_BUILD_TYPE=Release`` normal build (default). - - ``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` recommended when using sanitizers or - running ``valgrind`` to keep debug information. - - ``-D CMAKE_BUILD_TYPE=Debug`` for debugging sessions. - - *A corresponding flag can be omitted if it's not needed.* - -2. **Build the project**: - - .. code-block:: bash - - cmake --build . --config Release --parallel - -3. **Check the task**: - - * Run ``/build/bin`` +Build the Project with ``CMake`` +================================ + +Navigate to a source code folder. + +1. **Configure the build**: ``Makefile``, ``.sln``, etc. + + .. code-block:: bash + + mkdir build && cd build + cmake -D USE_FUNC_TESTS=ON -D USE_PERF_TESTS=ON -D CMAKE_BUILD_TYPE=Release .. + + *Help on CMake keys:* + + + - ``-D USE_FUNC_TESTS=ON`` enable functional tests. + - ``-D USE_PERF_TESTS=ON`` enable performance tests. + - ``-D CMAKE_BUILD_TYPE=Release`` normal build (default). + - ``-D CMAKE_BUILD_TYPE=RelWithDebInfo`` recommended when using sanitizers or + running ``valgrind`` to keep debug information. + - ``-D CMAKE_BUILD_TYPE=Debug`` for debugging sessions. + + *A corresponding flag can be omitted if it's not needed.* + +2. **Build the project**: + + .. code-block:: bash + + cmake --build . --config Release --parallel + +3. **Check the task**: + + * Run ``/build/bin`` diff --git a/docs/user_guide/ci.rst b/docs/user_guide/ci.rst index fea751f6ac..d6cc9745ba 100644 --- a/docs/user_guide/ci.rst +++ b/docs/user_guide/ci.rst @@ -1,67 +1,67 @@ -Continuous Integration (CI) -============================ - -Students need to pass all the checks in the CI pipeline before their work can be considered for submission. -This includes successful code checkout, build ans testing stages. -Each integration is verified by an automated build and automated tests. - -CI Pipeline ------------- - -The CI pipeline for this project is illustrated in the following diagram: - -.. image:: ../_static/ci_graph.svg - :alt: CI Pipeline Diagram - :align: center - -Running ``scripts/run_tests.py`` --------------------------------- - -Automated tests are executed through the ``scripts/run_tests.py`` helper. The -script requires several environment variables to be defined: - -``PPC_NUM_THREADS`` - Number of threads to use. The value is also exported as - ``OMP_NUM_THREADS``. - -``PPC_NUM_PROC`` - Number of MPI processes to launch. - -``PPC_ASAN_RUN`` - Set to ``1`` when sanitizers are enabled to skip ``valgrind`` runs (optional, - default ``0``). - -``PPC_IGNORE_TEST_TIME_LIMIT`` - Set to ``1`` to disable test time limits (optional, default ``0``). - -The execution mode is selected with ``--running-type``. The most common modes -are ``threads`` for shared-memory backends and ``processes`` for MPI based -tests. ``performance`` mode runs performance benchmarks. - -Example usage: - -.. code-block:: bash - - export PPC_NUM_THREADS=4 - export PPC_NUM_PROC=2 - - # Multithreaded functional tests - scripts/run_tests.py --running-type="threads" - - # MPI functional tests - scripts/run_tests.py --running-type="processes" - - # Performance benchmarks - scripts/run_tests.py --running-type="performance" - -Additional MPI arguments can be supplied with ``--additional-mpi-args`` when -running in ``processes`` mode. - -The ``--counts`` option allows sequential execution of tests with several -thread/process counts. When specified, the script will iterate over the provided -values, updating ``PPC_NUM_THREADS`` or ``PPC_NUM_PROC`` accordingly before each -run. - -Use ``--verbose`` to print every command executed by ``run_tests.py``. This can -be helpful for debugging CI failures or verifying the exact arguments passed to -the test binaries. +Continuous Integration (CI) +============================ + +Students need to pass all the checks in the CI pipeline before their work can be considered for submission. +This includes successful code checkout, build ans testing stages. +Each integration is verified by an automated build and automated tests. + +CI Pipeline +------------ + +The CI pipeline for this project is illustrated in the following diagram: + +.. image:: ../_static/ci_graph.svg + :alt: CI Pipeline Diagram + :align: center + +Running ``scripts/run_tests.py`` +-------------------------------- + +Automated tests are executed through the ``scripts/run_tests.py`` helper. The +script requires several environment variables to be defined: + +``PPC_NUM_THREADS`` + Number of threads to use. The value is also exported as + ``OMP_NUM_THREADS``. + +``PPC_NUM_PROC`` + Number of MPI processes to launch. + +``PPC_ASAN_RUN`` + Set to ``1`` when sanitizers are enabled to skip ``valgrind`` runs (optional, + default ``0``). + +``PPC_IGNORE_TEST_TIME_LIMIT`` + Set to ``1`` to disable test time limits (optional, default ``0``). + +The execution mode is selected with ``--running-type``. The most common modes +are ``threads`` for shared-memory backends and ``processes`` for MPI based +tests. ``performance`` mode runs performance benchmarks. + +Example usage: + +.. code-block:: bash + + export PPC_NUM_THREADS=4 + export PPC_NUM_PROC=2 + + # Multithreaded functional tests + scripts/run_tests.py --running-type="threads" + + # MPI functional tests + scripts/run_tests.py --running-type="processes" + + # Performance benchmarks + scripts/run_tests.py --running-type="performance" + +Additional MPI arguments can be supplied with ``--additional-mpi-args`` when +running in ``processes`` mode. + +The ``--counts`` option allows sequential execution of tests with several +thread/process counts. When specified, the script will iterate over the provided +values, updating ``PPC_NUM_THREADS`` or ``PPC_NUM_PROC`` accordingly before each +run. + +Use ``--verbose`` to print every command executed by ``run_tests.py``. This can +be helpful for debugging CI failures or verifying the exact arguments passed to +the test binaries. diff --git a/docs/user_guide/download.rst b/docs/user_guide/download.rst index c36b4cf39e..68f8c2ec8d 100644 --- a/docs/user_guide/download.rst +++ b/docs/user_guide/download.rst @@ -1,6 +1,6 @@ -Download all submodules -======================= - -.. code-block:: bash - - git submodule update --init --recursive --depth=1 +Download all submodules +======================= + +.. code-block:: bash + + git submodule update --init --recursive --depth=1 diff --git a/docs/user_guide/environment.rst b/docs/user_guide/environment.rst index 67b5233c4e..0bc89045c7 100644 --- a/docs/user_guide/environment.rst +++ b/docs/user_guide/environment.rst @@ -1,105 +1,105 @@ -Set Up Your Environment -======================== - -Development Container (Recommended) ------------------------------------- -The easiest way to set up your development environment is using the provided ``.devcontainer`` configuration with VS Code and Docker. - -**Prerequisites:** - -- `Visual Studio Code `_ -- `Docker Desktop `_ -- `Dev Containers extension `_ - -**Setup:** - -1. Clone the repository and open it in VS Code -2. When prompted, click "Reopen in Container" or use Command Palette: ``Dev Containers: Reopen in Container`` -3. VS Code will automatically build the container with all dependencies pre-installed -4. The container includes: - - - Ubuntu environment with gcc-14, CMake, MPI, OpenMP - - Pre-configured C++ and Python development tools - - All project dependencies ready to use - -This provides a consistent development environment across all platforms without manual dependency installation. - -Manual Setup ------------- - -If you prefer manual setup or cannot use containers, follow the instructions below. - -Build prerequisites -------------------- -- **Windows**: Download and install CMake from https://cmake.org/download (select the Windows installer) or install using Chocolatey: - - .. code-block:: powershell - - choco install cmake - -- **Linux (Ubuntu/Debian)**: Install using package manager: - - .. code-block:: bash - - sudo apt update - sudo apt install -y cmake - -- **macOS**: Install using Homebrew: - - .. code-block:: bash - - brew update - brew install cmake - -Code Style Analysis --------------------- -Please follow the `Google C++ Style Guide `_. - -Code style is checked using the `clang-format `_ tool. - -Parallel Programming Technologies ---------------------------------- - -``MPI`` -~~~~~~~ -- **Windows (MSVC)**: - - `Installers link `_. You have to install ``msmpisdk.msi`` and ``msmpisetup.exe``. - -- **Linux (gcc and clang)**: - - .. code-block:: bash - - sudo apt install -y mpich openmpi-bin libopenmpi-dev - -- **MacOS (apple clang)**: - - .. code-block:: bash - - brew install open-mpi - -``OpenMP`` -~~~~~~~~~~ -``OpenMP`` is included in ``gcc`` and ``msvc``, but some components should be installed additionally: - -- **Linux (gcc and clang)**: - - .. code-block:: bash - - sudo apt install -y libomp-dev - -- **MacOS (llvm)**: - - .. code-block:: bash - - brew install llvm - brew install libomp - -``TBB`` -~~~~~~~ -- **Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: - Build as 3rdparty in the current project. - -``std::thread`` -~~~~~~~~~~~~~~~ -``std::thread`` is included in STL libraries. +Set Up Your Environment +======================== + +Development Container (Recommended) +------------------------------------ +The easiest way to set up your development environment is using the provided ``.devcontainer`` configuration with VS Code and Docker. + +**Prerequisites:** + +- `Visual Studio Code `_ +- `Docker Desktop `_ +- `Dev Containers extension `_ + +**Setup:** + +1. Clone the repository and open it in VS Code +2. When prompted, click "Reopen in Container" or use Command Palette: ``Dev Containers: Reopen in Container`` +3. VS Code will automatically build the container with all dependencies pre-installed +4. The container includes: + + - Ubuntu environment with gcc-14, CMake, MPI, OpenMP + - Pre-configured C++ and Python development tools + - All project dependencies ready to use + +This provides a consistent development environment across all platforms without manual dependency installation. + +Manual Setup +------------ + +If you prefer manual setup or cannot use containers, follow the instructions below. + +Build prerequisites +------------------- +- **Windows**: Download and install CMake from https://cmake.org/download (select the Windows installer) or install using Chocolatey: + + .. code-block:: powershell + + choco install cmake + +- **Linux (Ubuntu/Debian)**: Install using package manager: + + .. code-block:: bash + + sudo apt update + sudo apt install -y cmake + +- **macOS**: Install using Homebrew: + + .. code-block:: bash + + brew update + brew install cmake + +Code Style Analysis +-------------------- +Please follow the `Google C++ Style Guide `_. + +Code style is checked using the `clang-format `_ tool. + +Parallel Programming Technologies +--------------------------------- + +``MPI`` +~~~~~~~ +- **Windows (MSVC)**: + + `Installers link `_. You have to install ``msmpisdk.msi`` and ``msmpisetup.exe``. + +- **Linux (gcc and clang)**: + + .. code-block:: bash + + sudo apt install -y mpich openmpi-bin libopenmpi-dev + +- **MacOS (apple clang)**: + + .. code-block:: bash + + brew install open-mpi + +``OpenMP`` +~~~~~~~~~~ +``OpenMP`` is included in ``gcc`` and ``msvc``, but some components should be installed additionally: + +- **Linux (gcc and clang)**: + + .. code-block:: bash + + sudo apt install -y libomp-dev + +- **MacOS (llvm)**: + + .. code-block:: bash + + brew install llvm + brew install libomp + +``TBB`` +~~~~~~~ +- **Windows (MSVC)**, **Linux (gcc and clang)**, **MacOS (apple clang)**: + Build as 3rdparty in the current project. + +``std::thread`` +~~~~~~~~~~~~~~~ +``std::thread`` is included in STL libraries. diff --git a/docs/user_guide/environment_variables.rst b/docs/user_guide/environment_variables.rst index 6a896dcb86..d7dd2746d1 100644 --- a/docs/user_guide/environment_variables.rst +++ b/docs/user_guide/environment_variables.rst @@ -1,21 +1,21 @@ -Environment Variables -===================== - -The following environment variables can be used to configure the project's runtime behavior: - -- ``PPC_NUM_PROC``: Specifies the number of processes to launch. - Default: ``1`` - Can be queried from C++ with ``ppc::util::GetNumProc()``. - -- ``PPC_NUM_THREADS``: Specifies the number of threads to use. - Default: ``1`` - -- ``PPC_ASAN_RUN``: Specifies that application is compiler with sanitizers. Used by ``scripts/run_tests.py`` to skip ``valgrind`` runs. - Default: ``0`` - -- ``PPC_IGNORE_TEST_TIME_LIMIT``: Specifies that test time limits are ignored. Used by ``scripts/run_tests.py`` to disable time limit enforcement. - Default: ``0`` -- ``PPC_TASK_MAX_TIME``: Maximum allowed execution time in seconds for functional tests. - Default: ``1.0`` -- ``PPC_PERF_MAX_TIME``: Maximum allowed execution time in seconds for performance tests. - Default: ``10.0`` +Environment Variables +===================== + +The following environment variables can be used to configure the project's runtime behavior: + +- ``PPC_NUM_PROC``: Specifies the number of processes to launch. + Default: ``1`` + Can be queried from C++ with ``ppc::util::GetNumProc()``. + +- ``PPC_NUM_THREADS``: Specifies the number of threads to use. + Default: ``1`` + +- ``PPC_ASAN_RUN``: Specifies that application is compiler with sanitizers. Used by ``scripts/run_tests.py`` to skip ``valgrind`` runs. + Default: ``0`` + +- ``PPC_IGNORE_TEST_TIME_LIMIT``: Specifies that test time limits are ignored. Used by ``scripts/run_tests.py`` to disable time limit enforcement. + Default: ``0`` +- ``PPC_TASK_MAX_TIME``: Maximum allowed execution time in seconds for functional tests. + Default: ``1.0`` +- ``PPC_PERF_MAX_TIME``: Maximum allowed execution time in seconds for performance tests. + Default: ``10.0`` diff --git a/docs/user_guide/submit_work.rst b/docs/user_guide/submit_work.rst index 51362a28e3..c16d205531 100644 --- a/docs/user_guide/submit_work.rst +++ b/docs/user_guide/submit_work.rst @@ -1,84 +1,84 @@ -How to submit your work -======================== - -- There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in the ``tasks`` directory. Move to a folder of your task. Create a directory named ``__``. - - Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the same** name directory. If the ``seq`` task is named ``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named ``omp/nesterov_a_vector_sum``. - -- Navigate into the newly created folder and begin your work on the task. The folder must contain only 4 directories with files: - - - ``data`` - Directory with own data files for functional testing of the task. - - ``func_tests`` - Directory with Google tests for functional testing of the task. - - ``include`` - Directory for header files with function prototypes. - - ``perf_tests`` - Directory with Google tests for performance testing. The number of tests must be 2: ``run_task`` and ``run_pipeline``. - - ``src`` - Directory with source files containing the function implementations. - -- There must be 10 executable files for running: - - - ``__tests``. For example, ``omp_perf_tests`` - an executable file for performance tests of OpenMP practice tasks. - -- All prototypes and classes in the ``include`` directory must be namespace-escaped. Name your namespace as follows: - - .. code-block:: cpp - - namespace ___ { - ... - } - e.g. - namespace nesterov_a_test_task_seq { - - class TestTaskSequential : public ppc::core::Task { - ... - }; - - } // namespace nesterov_a_test_task_seq - -- Name your group of tests and individual test cases as follows: - - - For functional tests (for maximum coverage): - - .. code-block:: cpp - - TEST(___, ) { - ... - } - e.g. - TEST(nesterov_a_vector_sum_omp, test_sum) { - ... - } - - - For performance tests (only 2 tests - ``pipeline`` and ``task`` - no more no less): - - .. code-block:: cpp - - TEST(___, ) { - ... - } - e.g. - TEST(nesterov_a_vector_sum_stl, test_pipeline_run) { - ... - } - TEST(nesterov_a_vector_sum_stl, test_task_run) { - ... - } - -- Name your pull request as follows: - - - For tasks: - - .. code-block:: - - . Task . Variant . Technology . . - Нестеров Александр. Задача 1. Вариант 123. Технология MPI. Сумма элементов вектора. - -- Provide the full task definition in the pull request's description. - - Example pull request can be found in the repository's pull requests. - -- Work on your forked repository. Keep your work on a separate branch (not on ``master``)!!! Name your branch the same as your task's folder. To create a branch, run: - - .. code-block:: bash - - git checkout -b nesterov_a_vector_sum_mpi - -**Failing to follow the rules will result in a red project build.** +How to submit your work +======================== + +- There are ``all``, ``mpi``, ``omp``, ``seq``, ``stl``, ``tbb`` folders in the ``tasks`` directory. Move to a folder of your task. Create a directory named ``__``. + + Example: ``seq/nesterov_a_vector_sum``. Please name all tasks **with the same** name directory. If the ``seq`` task is named ``seq/nesterov_a_vector_sum``, then the ``omp`` task must be named ``omp/nesterov_a_vector_sum``. + +- Navigate into the newly created folder and begin your work on the task. The folder must contain only 4 directories with files: + + - ``data`` - Directory with own data files for functional testing of the task. + - ``func_tests`` - Directory with Google tests for functional testing of the task. + - ``include`` - Directory for header files with function prototypes. + - ``perf_tests`` - Directory with Google tests for performance testing. The number of tests must be 2: ``run_task`` and ``run_pipeline``. + - ``src`` - Directory with source files containing the function implementations. + +- There must be 10 executable files for running: + + - ``__tests``. For example, ``omp_perf_tests`` - an executable file for performance tests of OpenMP practice tasks. + +- All prototypes and classes in the ``include`` directory must be namespace-escaped. Name your namespace as follows: + + .. code-block:: cpp + + namespace ___ { + ... + } + e.g. + namespace nesterov_a_test_task_seq { + + class TestTaskSequential : public ppc::core::Task { + ... + }; + + } // namespace nesterov_a_test_task_seq + +- Name your group of tests and individual test cases as follows: + + - For functional tests (for maximum coverage): + + .. code-block:: cpp + + TEST(___, ) { + ... + } + e.g. + TEST(nesterov_a_vector_sum_omp, test_sum) { + ... + } + + - For performance tests (only 2 tests - ``pipeline`` and ``task`` - no more no less): + + .. code-block:: cpp + + TEST(___, ) { + ... + } + e.g. + TEST(nesterov_a_vector_sum_stl, test_pipeline_run) { + ... + } + TEST(nesterov_a_vector_sum_stl, test_task_run) { + ... + } + +- Name your pull request as follows: + + - For tasks: + + .. code-block:: + + . Task . Variant . Technology . . + Нестеров Александр. Задача 1. Вариант 123. Технология MPI. Сумма элементов вектора. + +- Provide the full task definition in the pull request's description. + + Example pull request can be found in the repository's pull requests. + +- Work on your forked repository. Keep your work on a separate branch (not on ``master``)!!! Name your branch the same as your task's folder. To create a branch, run: + + .. code-block:: bash + + git checkout -b nesterov_a_vector_sum_mpi + +**Failing to follow the rules will result in a red project build.** diff --git a/modules/CMakeLists.txt b/modules/CMakeLists.txt index d9b5057e35..c8f3ab8a7f 100644 --- a/modules/CMakeLists.txt +++ b/modules/CMakeLists.txt @@ -1,57 +1,57 @@ -message(STATUS "Core components") -set(exec_func_tests "core_func_tests") -set(exec_func_lib "core_module_lib") - -subdirlist(subdirs ${CMAKE_CURRENT_SOURCE_DIR}) - -foreach(subd ${subdirs}) - get_filename_component(PROJECT_ID ${subd} NAME) - set(PATH_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/${subd}") - set(PROJECT_ID "${PROJECT_ID}") - message(STATUS "-- " ${PROJECT_ID}) - - file(GLOB_RECURSE TMP_LIB_SOURCE_FILES ${PATH_PREFIX}/include/* - ${PATH_PREFIX}/src/*) - list(APPEND LIB_SOURCE_FILES ${TMP_LIB_SOURCE_FILES}) - - file(GLOB_RECURSE TMP_FUNC_TESTS_SOURCE_FILES ${PATH_PREFIX}/tests/*) - list(APPEND FUNC_TESTS_SOURCE_FILES ${TMP_FUNC_TESTS_SOURCE_FILES}) -endforeach() - -project(${exec_func_lib}) -add_library(${exec_func_lib} STATIC ${LIB_SOURCE_FILES}) -set_target_properties(${exec_func_lib} PROPERTIES LINKER_LANGUAGE CXX) - -# Add include directories to target -target_include_directories( - ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty - ${CMAKE_SOURCE_DIR}/modules ${CMAKE_SOURCE_DIR}/tasks) - -foreach( - link - envpp - json - gtest - threads - openmp - tbb - mpi - stb) - cmake_language(CALL "ppc_link_${link}" ${exec_func_lib}) -endforeach() - -add_executable(${exec_func_tests} ${FUNC_TESTS_SOURCE_FILES}) - -target_link_libraries(${exec_func_tests} PUBLIC ${exec_func_lib}) - -enable_testing() -add_test(NAME ${exec_func_tests} COMMAND ${exec_func_tests}) - -# Installation rules -install( - TARGETS ${exec_func_lib} - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - RUNTIME DESTINATION bin) - -install(TARGETS ${exec_func_tests} RUNTIME DESTINATION bin) +message(STATUS "Core components") +set(exec_func_tests "core_func_tests") +set(exec_func_lib "core_module_lib") + +subdirlist(subdirs ${CMAKE_CURRENT_SOURCE_DIR}) + +foreach(subd ${subdirs}) + get_filename_component(PROJECT_ID ${subd} NAME) + set(PATH_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/${subd}") + set(PROJECT_ID "${PROJECT_ID}") + message(STATUS "-- " ${PROJECT_ID}) + + file(GLOB_RECURSE TMP_LIB_SOURCE_FILES ${PATH_PREFIX}/include/* + ${PATH_PREFIX}/src/*) + list(APPEND LIB_SOURCE_FILES ${TMP_LIB_SOURCE_FILES}) + + file(GLOB_RECURSE TMP_FUNC_TESTS_SOURCE_FILES ${PATH_PREFIX}/tests/*) + list(APPEND FUNC_TESTS_SOURCE_FILES ${TMP_FUNC_TESTS_SOURCE_FILES}) +endforeach() + +project(${exec_func_lib}) +add_library(${exec_func_lib} STATIC ${LIB_SOURCE_FILES}) +set_target_properties(${exec_func_lib} PROPERTIES LINKER_LANGUAGE CXX) + +# Add include directories to target +target_include_directories( + ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty + ${CMAKE_SOURCE_DIR}/modules ${CMAKE_SOURCE_DIR}/tasks) + +foreach( + link + envpp + json + gtest + threads + openmp + tbb + mpi + stb) + cmake_language(CALL "ppc_link_${link}" ${exec_func_lib}) +endforeach() + +add_executable(${exec_func_tests} ${FUNC_TESTS_SOURCE_FILES}) + +target_link_libraries(${exec_func_tests} PUBLIC ${exec_func_lib}) + +enable_testing() +add_test(NAME ${exec_func_tests} COMMAND ${exec_func_tests}) + +# Installation rules +install( + TARGETS ${exec_func_lib} + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION bin) + +install(TARGETS ${exec_func_tests} RUNTIME DESTINATION bin) diff --git a/modules/performance/include/performance.hpp b/modules/performance/include/performance.hpp index 6b74462b05..865a515299 100644 --- a/modules/performance/include/performance.hpp +++ b/modules/performance/include/performance.hpp @@ -1,129 +1,129 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "task/include/task.hpp" -#include "util/include/util.hpp" - -namespace ppc::performance { - -inline double DefaultTimer() { - return -1.0; -} - -struct PerfAttr { - /// @brief Number of times the task is run for performance evaluation. - uint64_t num_running = 5; - /// @brief Timer function returning current time in seconds. - /// @cond - std::function current_timer = DefaultTimer; - /// @endcond -}; - -struct PerfResults { - /// @brief Measured execution time in seconds. - double time_sec = 0.0; - enum class TypeOfRunning : uint8_t { kPipeline, kTaskRun, kNone }; - TypeOfRunning type_of_running = TypeOfRunning::kNone; - constexpr static double kMaxTime = 10.0; -}; - -template -class Perf { - public: - // Init performance analysis with an initialized task and initialized data - explicit Perf(const ppc::task::TaskPtr &task_ptr) : task_(task_ptr) { - task_ptr->GetStateOfTesting() = ppc::task::StateOfTesting::kPerf; - } - // Check performance of full task's pipeline: PreProcessing() -> - // Validation() -> Run() -> PostProcessing() - void PipelineRun(const PerfAttr &perf_attr) { - perf_results_.type_of_running = PerfResults::TypeOfRunning::kPipeline; - - CommonRun(perf_attr, [&] { - task_->Validation(); - task_->PreProcessing(); - task_->Run(); - task_->PostProcessing(); - }, perf_results_); - } - // Check performance of task's Run() function - void TaskRun(const PerfAttr &perf_attr) { - perf_results_.type_of_running = PerfResults::TypeOfRunning::kTaskRun; - - task_->Validation(); - task_->PreProcessing(); - CommonRun(perf_attr, [&] { task_->Run(); }, perf_results_); - task_->PostProcessing(); - - task_->Validation(); - task_->PreProcessing(); - task_->Run(); - task_->PostProcessing(); - } - // Print results for automation checkers - void PrintPerfStatistic(const std::string &test_id) const { - std::string type_test_name; - if (perf_results_.type_of_running == PerfResults::TypeOfRunning::kTaskRun) { - type_test_name = "task_run"; - } else if (perf_results_.type_of_running == PerfResults::TypeOfRunning::kPipeline) { - type_test_name = "pipeline"; - } else { - std::stringstream err_msg; - err_msg << '\n' << "The type of performance check for the task was not selected.\n"; - throw std::runtime_error(err_msg.str().c_str()); - } - - auto time_secs = perf_results_.time_sec; - const auto max_time = ppc::util::GetPerfMaxTime(); - std::stringstream perf_res_str; - if (time_secs < max_time) { - perf_res_str << std::fixed << std::setprecision(10) << time_secs; - std::cout << test_id << ":" << type_test_name << ":" << perf_res_str.str() << '\n'; - } else { - std::stringstream err_msg; - err_msg << '\n' << "Task execute time need to be: "; - err_msg << "time < " << max_time << " secs." << '\n'; - err_msg << "Original time in secs: " << time_secs << '\n'; - perf_res_str << std::fixed << std::setprecision(10) << -1.0; - std::cout << test_id << ":" << type_test_name << ":" << perf_res_str.str() << '\n'; - throw std::runtime_error(err_msg.str().c_str()); - } - } - /// @brief Retrieves the performance test results. - /// @return The latest PerfResults structure. - [[nodiscard]] PerfResults GetPerfResults() const { - return perf_results_; - } - - private: - PerfResults perf_results_; - std::shared_ptr> task_; - static void CommonRun(const PerfAttr &perf_attr, const std::function &pipeline, PerfResults &perf_results) { - auto begin = perf_attr.current_timer(); - for (uint64_t i = 0; i < perf_attr.num_running; i++) { - pipeline(); - } - auto end = perf_attr.current_timer(); - perf_results.time_sec = (end - begin) / static_cast(perf_attr.num_running); - } -}; - -inline std::string GetStringParamName(PerfResults::TypeOfRunning type_of_running) { - if (type_of_running == PerfResults::TypeOfRunning::kTaskRun) { - return "task_run"; - } - if (type_of_running == PerfResults::TypeOfRunning::kPipeline) { - return "pipeline"; - } - return "none"; -} - -} // namespace ppc::performance +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "task/include/task.hpp" +#include "util/include/util.hpp" + +namespace ppc::performance { + +inline double DefaultTimer() { + return -1.0; +} + +struct PerfAttr { + /// @brief Number of times the task is run for performance evaluation. + uint64_t num_running = 5; + /// @brief Timer function returning current time in seconds. + /// @cond + std::function current_timer = DefaultTimer; + /// @endcond +}; + +struct PerfResults { + /// @brief Measured execution time in seconds. + double time_sec = 0.0; + enum class TypeOfRunning : uint8_t { kPipeline, kTaskRun, kNone }; + TypeOfRunning type_of_running = TypeOfRunning::kNone; + constexpr static double kMaxTime = 10.0; +}; + +template +class Perf { + public: + // Init performance analysis with an initialized task and initialized data + explicit Perf(const ppc::task::TaskPtr &task_ptr) : task_(task_ptr) { + task_ptr->GetStateOfTesting() = ppc::task::StateOfTesting::kPerf; + } + // Check performance of full task's pipeline: PreProcessing() -> + // Validation() -> Run() -> PostProcessing() + void PipelineRun(const PerfAttr &perf_attr) { + perf_results_.type_of_running = PerfResults::TypeOfRunning::kPipeline; + + CommonRun(perf_attr, [&] { + task_->Validation(); + task_->PreProcessing(); + task_->Run(); + task_->PostProcessing(); + }, perf_results_); + } + // Check performance of task's Run() function + void TaskRun(const PerfAttr &perf_attr) { + perf_results_.type_of_running = PerfResults::TypeOfRunning::kTaskRun; + + task_->Validation(); + task_->PreProcessing(); + CommonRun(perf_attr, [&] { task_->Run(); }, perf_results_); + task_->PostProcessing(); + + task_->Validation(); + task_->PreProcessing(); + task_->Run(); + task_->PostProcessing(); + } + // Print results for automation checkers + void PrintPerfStatistic(const std::string &test_id) const { + std::string type_test_name; + if (perf_results_.type_of_running == PerfResults::TypeOfRunning::kTaskRun) { + type_test_name = "task_run"; + } else if (perf_results_.type_of_running == PerfResults::TypeOfRunning::kPipeline) { + type_test_name = "pipeline"; + } else { + std::stringstream err_msg; + err_msg << '\n' << "The type of performance check for the task was not selected.\n"; + throw std::runtime_error(err_msg.str().c_str()); + } + + auto time_secs = perf_results_.time_sec; + const auto max_time = ppc::util::GetPerfMaxTime(); + std::stringstream perf_res_str; + if (time_secs < max_time) { + perf_res_str << std::fixed << std::setprecision(10) << time_secs; + std::cout << test_id << ":" << type_test_name << ":" << perf_res_str.str() << '\n'; + } else { + std::stringstream err_msg; + err_msg << '\n' << "Task execute time need to be: "; + err_msg << "time < " << max_time << " secs." << '\n'; + err_msg << "Original time in secs: " << time_secs << '\n'; + perf_res_str << std::fixed << std::setprecision(10) << -1.0; + std::cout << test_id << ":" << type_test_name << ":" << perf_res_str.str() << '\n'; + throw std::runtime_error(err_msg.str().c_str()); + } + } + /// @brief Retrieves the performance test results. + /// @return The latest PerfResults structure. + [[nodiscard]] PerfResults GetPerfResults() const { + return perf_results_; + } + + private: + PerfResults perf_results_; + std::shared_ptr> task_; + static void CommonRun(const PerfAttr &perf_attr, const std::function &pipeline, PerfResults &perf_results) { + auto begin = perf_attr.current_timer(); + for (uint64_t i = 0; i < perf_attr.num_running; i++) { + pipeline(); + } + auto end = perf_attr.current_timer(); + perf_results.time_sec = (end - begin) / static_cast(perf_attr.num_running); + } +}; + +inline std::string GetStringParamName(PerfResults::TypeOfRunning type_of_running) { + if (type_of_running == PerfResults::TypeOfRunning::kTaskRun) { + return "task_run"; + } + if (type_of_running == PerfResults::TypeOfRunning::kPipeline) { + return "pipeline"; + } + return "none"; +} + +} // namespace ppc::performance diff --git a/modules/performance/tests/.clang-tidy b/modules/performance/tests/.clang-tidy index ef43b7aa8a..d68523c24e 100644 --- a/modules/performance/tests/.clang-tidy +++ b/modules/performance/tests/.clang-tidy @@ -1,13 +1,13 @@ -InheritParentConfig: true - -Checks: > - -modernize-loop-convert, - -cppcoreguidelines-avoid-goto, - -cppcoreguidelines-avoid-non-const-global-variables, - -misc-use-anonymous-namespace, - -modernize-use-std-print, - -modernize-type-traits - -CheckOptions: - - key: readability-function-cognitive-complexity.Threshold - value: 50 # Relaxed for tests +InheritParentConfig: true + +Checks: > + -modernize-loop-convert, + -cppcoreguidelines-avoid-goto, + -cppcoreguidelines-avoid-non-const-global-variables, + -misc-use-anonymous-namespace, + -modernize-use-std-print, + -modernize-type-traits + +CheckOptions: + - key: readability-function-cognitive-complexity.Threshold + value: 50 # Relaxed for tests diff --git a/modules/performance/tests/perf_tests.cpp b/modules/performance/tests/perf_tests.cpp index 9a4ce9e366..7332b7b066 100644 --- a/modules/performance/tests/perf_tests.cpp +++ b/modules/performance/tests/perf_tests.cpp @@ -1,412 +1,414 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "performance/include/performance.hpp" -#include "task/include/task.hpp" -#include "util/include/util.hpp" - -using ppc::task::StatusOfTask; -using ppc::task::Task; -using ppc::task::TypeOfTask; - -namespace ppc::test { - -template -class TestPerfTask : public ppc::task::Task { - public: - explicit TestPerfTask(const InType &in) { - this->GetInput() = in; - } - - bool ValidationImpl() override { - return !this->GetInput().empty(); - } - - bool PreProcessingImpl() override { - this->GetOutput() = 0; - return true; - } - - bool RunImpl() override { - for (unsigned i = 0; i < this->GetInput().size(); i++) { - this->GetOutput() += this->GetInput()[i]; - } - return true; - } - - bool PostProcessingImpl() override { - return true; - } -}; - -template -class FakePerfTask : public TestPerfTask { - public: - explicit FakePerfTask(const InType &in) : TestPerfTask(in) {} - - bool RunImpl() override { - std::this_thread::sleep_for(std::chrono::seconds(11)); - return TestPerfTask::RunImpl(); - } -}; - -} // namespace ppc::test - -namespace ppc::performance { - -TEST(PerfTests, CheckPerfPipeline) { - std::vector in(2000, 1); - - auto test_task = std::make_shared, uint32_t>>(in); - - Perf, uint32_t> perf_analyzer(test_task); - - PerfAttr perf_attr; - perf_analyzer.PipelineRun(perf_attr); - - perf_analyzer.PrintPerfStatistic("check_perf_pipeline"); - ASSERT_LE(perf_analyzer.GetPerfResults().time_sec, PerfResults::kMaxTime); - EXPECT_EQ(test_task->GetOutput(), in.size()); -} - -TEST(PerfTests, CheckPerfPipelineFloat) { - std::vector in(2000, 1); - - auto test_task = std::make_shared, float>>(in); - - Perf, float> perf_analyzer(test_task); - - PerfAttr perf_attr; - perf_analyzer.PipelineRun(perf_attr); - - perf_analyzer.PrintPerfStatistic("check_perf_pipeline_float"); - ASSERT_LE(perf_analyzer.GetPerfResults().time_sec, PerfResults::kMaxTime); - EXPECT_EQ(test_task->GetOutput(), in.size()); -} - -TEST(PerfTests, CheckPerfPipelineUint8tSlowTest) { - std::vector in(128, 1); - - auto test_task = std::make_shared, uint8_t>>(in); - - Perf, uint8_t> perf_analyzer(test_task); - - PerfAttr perf_attr; - perf_attr.num_running = 1; - - const auto t0 = std::chrono::high_resolution_clock::now(); - perf_attr.current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - perf_analyzer.PipelineRun(perf_attr); - - ASSERT_ANY_THROW(perf_analyzer.PrintPerfStatistic("check_perf_pipeline_uint8_t_slow_test")); -} - -TEST(PerfTests, SlowPerfRespectsEnvOverride) { - env::detail::set_scoped_environment_variable scoped("PPC_PERF_MAX_TIME", "12"); - std::vector in(128, 1); - auto test_task = std::make_shared, uint8_t>>(in); - Perf, uint8_t> perf_analyzer(test_task); - PerfAttr perf_attr; - perf_attr.num_running = 1; - const auto t0 = std::chrono::high_resolution_clock::now(); - perf_attr.current_timer = [&] { - auto current_time_point = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(current_time_point - t0).count(); - return static_cast(duration) * 1e-9; - }; - perf_analyzer.PipelineRun(perf_attr); - EXPECT_NO_THROW(perf_analyzer.PrintPerfStatistic("slow_perf_respects_env_override")); -} - -TEST(PerfTests, CheckPerfTaskException) { - std::vector in(2000, 1); - - auto test_task = std::make_shared, uint32_t>>(in); - - Perf, uint32_t> perf_analyzer(test_task); - - ASSERT_ANY_THROW(perf_analyzer.PrintPerfStatistic("check_perf_task_exception")); - - PerfAttr perf_attr; - perf_analyzer.TaskRun(perf_attr); -} - -TEST(PerfTests, CheckPerfTaskFloat) { - std::vector in(2000, 1); - - auto test_task = std::make_shared, float>>(in); - - Perf, float> perf_analyzer(test_task); - - PerfAttr perf_attr; - perf_analyzer.TaskRun(perf_attr); - - perf_analyzer.PrintPerfStatistic("check_perf_task_float"); - ASSERT_LE(perf_analyzer.GetPerfResults().time_sec, PerfResults::kMaxTime); - EXPECT_EQ(test_task->GetOutput(), in.size()); -} - -struct ParamTestCase { - PerfResults::TypeOfRunning input; - std::string expected_output; - friend void PrintTo(const ParamTestCase ¶m, std::ostream *os) { - *os << "{ input = " << static_cast(param.input) << ", expected = " << param.expected_output << " }"; - } -}; - -class GetStringParamNameParamTest : public ::testing::TestWithParam {}; - -TEST_P(GetStringParamNameParamTest, ReturnsExpectedString) { - const auto ¶m = GetParam(); - EXPECT_EQ(GetStringParamName(param.input), param.expected_output); -} - -INSTANTIATE_TEST_SUITE_P(ParamTests, GetStringParamNameParamTest, - ::testing::Values(ParamTestCase{PerfResults::TypeOfRunning::kTaskRun, "task_run"}, - ParamTestCase{PerfResults::TypeOfRunning::kPipeline, "pipeline"}, - ParamTestCase{PerfResults::TypeOfRunning::kNone, "none"}), - [](const ::testing::TestParamInfo &info) { - return info.param.expected_output; - }); - -struct TaskTypeTestCase { - TypeOfTask type; - std::string expected; - std::string label; - friend void PrintTo(const TaskTypeTestCase ¶m, std::ostream *os) { - *os << "{ type = " << static_cast(param.type) << ", expected = " << param.expected - << ", label = " << param.label << " }"; - } -}; - -class GetStringTaskTypeTest : public ::testing::TestWithParam { - protected: - std::string temp_path; - - void SetUp() override { - temp_path = (std::filesystem::temp_directory_path() / "test_settings.json").string(); - auto j = ppc::util::InitJSONPtr(); - (*j)["tasks"]["all"] = "ALL"; - (*j)["tasks"]["stl"] = "STL"; - (*j)["tasks"]["omp"] = "OMP"; - (*j)["tasks"]["mpi"] = "MPI"; - (*j)["tasks"]["tbb"] = "TBB"; - (*j)["tasks"]["seq"] = "SEQ"; - - std::ofstream(temp_path) << j->dump(); - } - - void TearDown() override { - std::filesystem::remove(temp_path); - } -}; - -TEST_P(GetStringTaskTypeTest, ReturnsExpectedString) { - const auto ¶m = GetParam(); - EXPECT_EQ(GetStringTaskType(param.type, temp_path), param.expected) << "Failed on: " << param.label; -} - -INSTANTIATE_TEST_SUITE_P(AllTypeCases, GetStringTaskTypeTest, - ::testing::Values(TaskTypeTestCase{TypeOfTask::kALL, "all_ALL", "kALL"}, - TaskTypeTestCase{TypeOfTask::kSTL, "stl_STL", "kSTL"}, - TaskTypeTestCase{TypeOfTask::kOMP, "omp_OMP", "kOMP"}, - TaskTypeTestCase{TypeOfTask::kMPI, "mpi_MPI", "kMPI"}, - TaskTypeTestCase{TypeOfTask::kTBB, "tbb_TBB", "kTBB"}, - TaskTypeTestCase{TypeOfTask::kSEQ, "seq_SEQ", "kSEQ"})); - -TEST(GetStringTaskTypeStandaloneTest, ThrowsIfFileMissing) { - std::string missing_path = "non_existent_settings.json"; - EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, missing_path), std::runtime_error); -} - -TEST(GetStringTaskTypeStandaloneTest, ExceptionMessageContainsPath) { - const std::string missing_path = "non_existent_settings.json"; - EXPECT_THROW(try { GetStringTaskType(TypeOfTask::kSEQ, missing_path); } catch (const std::runtime_error &e) { - EXPECT_NE(std::string(e.what()).find(missing_path), std::string::npos); - throw; - }, - std::runtime_error); -} - -TEST(GetStringTaskTypeStandaloneTest, ReturnsUnknownForInvalidEnum) { - std::string path = (std::filesystem::temp_directory_path() / "tmp_settings.json").string(); - std::ofstream(path) << R"({"tasks":{"seq":"SEQ"}})"; - - auto result = GetStringTaskType(TypeOfTask::kUnknown, path); - EXPECT_EQ(result, "unknown"); - - std::filesystem::remove(path); -} - -TEST(GetStringTaskTypeEdgeCases, ThrowsIfFileCannotBeOpened) { - EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, "definitely_missing_file.json"), std::runtime_error); -} - -TEST(GetStringTaskTypeEdgeCases, ThrowsIfJsonIsMalformed) { - std::string path = (std::filesystem::temp_directory_path() / "bad_json.json").string(); - std::ofstream(path) << "{ this is not valid json "; - EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, path), NlohmannJsonParseError); - std::filesystem::remove(path); -} - -TEST(GetStringTaskTypeEdgeCases, ThrowsIfJsonValueIsNull) { - std::string path = (std::filesystem::temp_directory_path() / "null_value.json").string(); - std::ofstream(path) << R"({"tasks": { "seq": null }})"; - - EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, path), NlohmannJsonTypeError); - - std::filesystem::remove(path); -} - -TEST(GetStringTaskTypeEdgeCases, ReturnsUnknownIfEnumOutOfRange) { - std::string path = (std::filesystem::temp_directory_path() / "ok.json").string(); - std::ofstream(path) << R"({"tasks":{"seq":"SEQ"}})"; - auto result = GetStringTaskType(TypeOfTask::kUnknown, path); - EXPECT_EQ(result, "unknown"); - std::filesystem::remove(path); -} - -TEST(GetStringTaskStatusTest, HandlesEnabledAndDisabled) { - EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kEnabled), "enabled"); - EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kDisabled), "disabled"); -} - -class DummyTask : public Task { - public: - using Task::Task; - bool ValidationImpl() override { - return true; - } - bool PreProcessingImpl() override { - return true; - } - bool RunImpl() override { - return true; - } - bool PostProcessingImpl() override { - return true; - } -}; - -TEST(TaskTest, GetDynamicTypeReturnsCorrectEnum) { - DummyTask task; - task.SetTypeOfTask(TypeOfTask::kOMP); - task.Validation(); - task.PreProcessing(); - task.Run(); - task.PostProcessing(); - EXPECT_EQ(task.GetDynamicTypeOfTask(), TypeOfTask::kOMP); -} - -TEST(TaskTest, DestructorTerminatesIfWrongOrder) { - DummyTask task; - EXPECT_THROW(task.Run(), std::runtime_error); -} - -namespace my { -namespace nested { -struct Type {}; -} // namespace nested - -class Another {}; -} // namespace my - -template -class GetNamespaceTest : public ::testing::Test {}; - -using TestTypes = ::testing::Types; - -TYPED_TEST_SUITE(GetNamespaceTest, TestTypes); - -TYPED_TEST(GetNamespaceTest, ExtractsNamespaceCorrectly) { - std::string k_ns = ppc::util::GetNamespace(); - - if constexpr (std::is_same_v) { - EXPECT_EQ(k_ns, "ppc::performance::my::nested"); - } else if constexpr (std::is_same_v) { - EXPECT_EQ(k_ns, "ppc::performance::my"); - } else if constexpr (std::is_same_v) { - EXPECT_EQ(k_ns, ""); - } else { - FAIL() << "Unhandled type in test"; - } -} - -TEST(PerfTest, PipelineRunAndTaskRun) { - auto task_ptr = std::make_shared(); - Perf perf(task_ptr); - - PerfAttr attr; - double time = 0.0; - attr.num_running = 2; - attr.current_timer = [&time]() { - double t = time; - time += 1.0; - return t; - }; - - EXPECT_NO_THROW(perf.PipelineRun(attr)); - auto res_pipeline = perf.GetPerfResults(); - EXPECT_EQ(res_pipeline.type_of_running, PerfResults::TypeOfRunning::kPipeline); - EXPECT_GT(res_pipeline.time_sec, 0.0); - - EXPECT_NO_THROW(perf.TaskRun(attr)); - auto res_taskrun = perf.GetPerfResults(); - EXPECT_EQ(res_taskrun.type_of_running, PerfResults::TypeOfRunning::kTaskRun); - EXPECT_GT(res_taskrun.time_sec, 0.0); -} - -TEST(PerfTest, PrintPerfStatisticThrowsOnNone) { - { - auto task_ptr = std::make_shared(); - Perf perf(task_ptr); - EXPECT_THROW(perf.PrintPerfStatistic("test"), std::runtime_error); - } - EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); - ppc::util::DestructorFailureFlag::Unset(); -} - -TEST(PerfTest, GetStringParamNameTest) { - EXPECT_EQ(GetStringParamName(PerfResults::TypeOfRunning::kTaskRun), "task_run"); - EXPECT_EQ(GetStringParamName(PerfResults::TypeOfRunning::kPipeline), "pipeline"); - EXPECT_EQ(GetStringParamName(PerfResults::TypeOfRunning::kNone), "none"); -} - -TEST(TaskTest, DestructorInvalidPipelineOrderTerminatesPartialPipeline) { - { - struct BadTask : Task { - bool ValidationImpl() override { - return true; - } - bool PreProcessingImpl() override { - return true; - } - bool RunImpl() override { - return true; - } - bool PostProcessingImpl() override { - return true; - } - } task; - task.Validation(); - } - EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); - ppc::util::DestructorFailureFlag::Unset(); -} - -} // namespace ppc::performance +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "performance/include/performance.hpp" +#include "task/include/task.hpp" +#include "util/include/util.hpp" + +using ppc::task::StatusOfTask; +using ppc::task::Task; +using ppc::task::TypeOfTask; + +namespace ppc::test { + +template +class TestPerfTask : public ppc::task::Task { + public: + explicit TestPerfTask(const InType &in) { + this->GetInput() = in; + } + + bool ValidationImpl() override { + return !this->GetInput().empty(); + } + + bool PreProcessingImpl() override { + this->GetOutput() = 0; + return true; + } + + bool RunImpl() override { + std::cout << __FILE__ << ":" << __LINE__ << ": " << "" << std::endl; + + for (unsigned i = 0; i < this->GetInput().size(); i++) { + this->GetOutput() += this->GetInput()[i]; + } + return true; + } + + bool PostProcessingImpl() override { + return true; + } +}; + +template +class FakePerfTask : public TestPerfTask { + public: + explicit FakePerfTask(const InType &in) : TestPerfTask(in) {} + + bool RunImpl() override { + std::this_thread::sleep_for(std::chrono::seconds(11)); + return TestPerfTask::RunImpl(); + } +}; + +} // namespace ppc::test + +namespace ppc::performance { + +TEST(PerfTests, CheckPerfPipeline) { + std::vector in(2000, 1); + + auto test_task = std::make_shared, uint32_t>>(in); + + Perf, uint32_t> perf_analyzer(test_task); + + PerfAttr perf_attr; + perf_analyzer.PipelineRun(perf_attr); + + perf_analyzer.PrintPerfStatistic("check_perf_pipeline"); + ASSERT_LE(perf_analyzer.GetPerfResults().time_sec, PerfResults::kMaxTime); + EXPECT_EQ(test_task->GetOutput(), in.size()); +} + +TEST(PerfTests, CheckPerfPipelineFloat) { + std::vector in(2000, 1); + + auto test_task = std::make_shared, float>>(in); + + Perf, float> perf_analyzer(test_task); + + PerfAttr perf_attr; + perf_analyzer.PipelineRun(perf_attr); + + perf_analyzer.PrintPerfStatistic("check_perf_pipeline_float"); + ASSERT_LE(perf_analyzer.GetPerfResults().time_sec, PerfResults::kMaxTime); + EXPECT_EQ(test_task->GetOutput(), in.size()); +} + +TEST(PerfTests, CheckPerfPipelineUint8tSlowTest) { + std::vector in(128, 1); + + auto test_task = std::make_shared, uint8_t>>(in); + + Perf, uint8_t> perf_analyzer(test_task); + + PerfAttr perf_attr; + perf_attr.num_running = 1; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perf_attr.current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + perf_analyzer.PipelineRun(perf_attr); + + ASSERT_ANY_THROW(perf_analyzer.PrintPerfStatistic("check_perf_pipeline_uint8_t_slow_test")); +} + +TEST(PerfTests, SlowPerfRespectsEnvOverride) { + env::detail::set_scoped_environment_variable scoped("PPC_PERF_MAX_TIME", "12"); + std::vector in(128, 1); + auto test_task = std::make_shared, uint8_t>>(in); + Perf, uint8_t> perf_analyzer(test_task); + PerfAttr perf_attr; + perf_attr.num_running = 1; + const auto t0 = std::chrono::high_resolution_clock::now(); + perf_attr.current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + perf_analyzer.PipelineRun(perf_attr); + EXPECT_NO_THROW(perf_analyzer.PrintPerfStatistic("slow_perf_respects_env_override")); +} + +TEST(PerfTests, CheckPerfTaskException) { + std::vector in(2000, 1); + + auto test_task = std::make_shared, uint32_t>>(in); + + Perf, uint32_t> perf_analyzer(test_task); + + ASSERT_ANY_THROW(perf_analyzer.PrintPerfStatistic("check_perf_task_exception")); + + PerfAttr perf_attr; + perf_analyzer.TaskRun(perf_attr); +} + +TEST(PerfTests, CheckPerfTaskFloat) { + std::vector in(2000, 1); + + auto test_task = std::make_shared, float>>(in); + + Perf, float> perf_analyzer(test_task); + + PerfAttr perf_attr; + perf_analyzer.TaskRun(perf_attr); + + perf_analyzer.PrintPerfStatistic("check_perf_task_float"); + ASSERT_LE(perf_analyzer.GetPerfResults().time_sec, PerfResults::kMaxTime); + EXPECT_EQ(test_task->GetOutput(), in.size()); +} + +struct ParamTestCase { + PerfResults::TypeOfRunning input; + std::string expected_output; + friend void PrintTo(const ParamTestCase ¶m, std::ostream *os) { + *os << "{ input = " << static_cast(param.input) << ", expected = " << param.expected_output << " }"; + } +}; + +class GetStringParamNameParamTest : public ::testing::TestWithParam {}; + +TEST_P(GetStringParamNameParamTest, ReturnsExpectedString) { + const auto ¶m = GetParam(); + EXPECT_EQ(GetStringParamName(param.input), param.expected_output); +} + +INSTANTIATE_TEST_SUITE_P(ParamTests, GetStringParamNameParamTest, + ::testing::Values(ParamTestCase{PerfResults::TypeOfRunning::kTaskRun, "task_run"}, + ParamTestCase{PerfResults::TypeOfRunning::kPipeline, "pipeline"}, + ParamTestCase{PerfResults::TypeOfRunning::kNone, "none"}), + [](const ::testing::TestParamInfo &info) { + return info.param.expected_output; + }); + +struct TaskTypeTestCase { + TypeOfTask type; + std::string expected; + std::string label; + friend void PrintTo(const TaskTypeTestCase ¶m, std::ostream *os) { + *os << "{ type = " << static_cast(param.type) << ", expected = " << param.expected + << ", label = " << param.label << " }"; + } +}; + +class GetStringTaskTypeTest : public ::testing::TestWithParam { + protected: + std::string temp_path; + + void SetUp() override { + temp_path = (std::filesystem::temp_directory_path() / "test_settings.json").string(); + auto j = ppc::util::InitJSONPtr(); + (*j)["tasks"]["all"] = "ALL"; + (*j)["tasks"]["stl"] = "STL"; + (*j)["tasks"]["omp"] = "OMP"; + (*j)["tasks"]["mpi"] = "MPI"; + (*j)["tasks"]["tbb"] = "TBB"; + (*j)["tasks"]["seq"] = "SEQ"; + + std::ofstream(temp_path) << j->dump(); + } + + void TearDown() override { + std::filesystem::remove(temp_path); + } +}; + +TEST_P(GetStringTaskTypeTest, ReturnsExpectedString) { + const auto ¶m = GetParam(); + EXPECT_EQ(GetStringTaskType(param.type, temp_path), param.expected) << "Failed on: " << param.label; +} + +INSTANTIATE_TEST_SUITE_P(AllTypeCases, GetStringTaskTypeTest, + ::testing::Values(TaskTypeTestCase{TypeOfTask::kALL, "all_ALL", "kALL"}, + TaskTypeTestCase{TypeOfTask::kSTL, "stl_STL", "kSTL"}, + TaskTypeTestCase{TypeOfTask::kOMP, "omp_OMP", "kOMP"}, + TaskTypeTestCase{TypeOfTask::kMPI, "mpi_MPI", "kMPI"}, + TaskTypeTestCase{TypeOfTask::kTBB, "tbb_TBB", "kTBB"}, + TaskTypeTestCase{TypeOfTask::kSEQ, "seq_SEQ", "kSEQ"})); + +TEST(GetStringTaskTypeStandaloneTest, ThrowsIfFileMissing) { + std::string missing_path = "non_existent_settings.json"; + EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, missing_path), std::runtime_error); +} + +TEST(GetStringTaskTypeStandaloneTest, ExceptionMessageContainsPath) { + const std::string missing_path = "non_existent_settings.json"; + EXPECT_THROW(try { GetStringTaskType(TypeOfTask::kSEQ, missing_path); } catch (const std::runtime_error &e) { + EXPECT_NE(std::string(e.what()).find(missing_path), std::string::npos); + throw; + }, + std::runtime_error); +} + +TEST(GetStringTaskTypeStandaloneTest, ReturnsUnknownForInvalidEnum) { + std::string path = (std::filesystem::temp_directory_path() / "tmp_settings.json").string(); + std::ofstream(path) << R"({"tasks":{"seq":"SEQ"}})"; + + auto result = GetStringTaskType(TypeOfTask::kUnknown, path); + EXPECT_EQ(result, "unknown"); + + std::filesystem::remove(path); +} + +TEST(GetStringTaskTypeEdgeCases, ThrowsIfFileCannotBeOpened) { + EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, "definitely_missing_file.json"), std::runtime_error); +} + +TEST(GetStringTaskTypeEdgeCases, ThrowsIfJsonIsMalformed) { + std::string path = (std::filesystem::temp_directory_path() / "bad_json.json").string(); + std::ofstream(path) << "{ this is not valid json "; + EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, path), NlohmannJsonParseError); + std::filesystem::remove(path); +} + +TEST(GetStringTaskTypeEdgeCases, ThrowsIfJsonValueIsNull) { + std::string path = (std::filesystem::temp_directory_path() / "null_value.json").string(); + std::ofstream(path) << R"({"tasks": { "seq": null }})"; + + EXPECT_THROW(GetStringTaskType(TypeOfTask::kSEQ, path), NlohmannJsonTypeError); + + std::filesystem::remove(path); +} + +TEST(GetStringTaskTypeEdgeCases, ReturnsUnknownIfEnumOutOfRange) { + std::string path = (std::filesystem::temp_directory_path() / "ok.json").string(); + std::ofstream(path) << R"({"tasks":{"seq":"SEQ"}})"; + auto result = GetStringTaskType(TypeOfTask::kUnknown, path); + EXPECT_EQ(result, "unknown"); + std::filesystem::remove(path); +} + +TEST(GetStringTaskStatusTest, HandlesEnabledAndDisabled) { + EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kEnabled), "enabled"); + EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kDisabled), "disabled"); +} + +class DummyTask : public Task { + public: + using Task::Task; + bool ValidationImpl() override { + return true; + } + bool PreProcessingImpl() override { + return true; + } + bool RunImpl() override { + return true; + } + bool PostProcessingImpl() override { + return true; + } +}; + +TEST(TaskTest, GetDynamicTypeReturnsCorrectEnum) { + DummyTask task; + task.SetTypeOfTask(TypeOfTask::kOMP); + task.Validation(); + task.PreProcessing(); + task.Run(); + task.PostProcessing(); + EXPECT_EQ(task.GetDynamicTypeOfTask(), TypeOfTask::kOMP); +} + +TEST(TaskTest, DestructorTerminatesIfWrongOrder) { + DummyTask task; + EXPECT_THROW(task.Run(), std::runtime_error); +} + +namespace my { +namespace nested { +struct Type {}; +} // namespace nested + +class Another {}; +} // namespace my + +template +class GetNamespaceTest : public ::testing::Test {}; + +using TestTypes = ::testing::Types; + +TYPED_TEST_SUITE(GetNamespaceTest, TestTypes); + +TYPED_TEST(GetNamespaceTest, ExtractsNamespaceCorrectly) { + std::string k_ns = ppc::util::GetNamespace(); + + if constexpr (std::is_same_v) { + EXPECT_EQ(k_ns, "ppc::performance::my::nested"); + } else if constexpr (std::is_same_v) { + EXPECT_EQ(k_ns, "ppc::performance::my"); + } else if constexpr (std::is_same_v) { + EXPECT_EQ(k_ns, ""); + } else { + FAIL() << "Unhandled type in test"; + } +} + +TEST(PerfTest, PipelineRunAndTaskRun) { + auto task_ptr = std::make_shared(); + Perf perf(task_ptr); + + PerfAttr attr; + double time = 0.0; + attr.num_running = 2; + attr.current_timer = [&time]() { + double t = time; + time += 1.0; + return t; + }; + + EXPECT_NO_THROW(perf.PipelineRun(attr)); + auto res_pipeline = perf.GetPerfResults(); + EXPECT_EQ(res_pipeline.type_of_running, PerfResults::TypeOfRunning::kPipeline); + EXPECT_GT(res_pipeline.time_sec, 0.0); + + EXPECT_NO_THROW(perf.TaskRun(attr)); + auto res_taskrun = perf.GetPerfResults(); + EXPECT_EQ(res_taskrun.type_of_running, PerfResults::TypeOfRunning::kTaskRun); + EXPECT_GT(res_taskrun.time_sec, 0.0); +} + +TEST(PerfTest, PrintPerfStatisticThrowsOnNone) { + { + auto task_ptr = std::make_shared(); + Perf perf(task_ptr); + EXPECT_THROW(perf.PrintPerfStatistic("test"), std::runtime_error); + } + EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); + ppc::util::DestructorFailureFlag::Unset(); +} + +TEST(PerfTest, GetStringParamNameTest) { + EXPECT_EQ(GetStringParamName(PerfResults::TypeOfRunning::kTaskRun), "task_run"); + EXPECT_EQ(GetStringParamName(PerfResults::TypeOfRunning::kPipeline), "pipeline"); + EXPECT_EQ(GetStringParamName(PerfResults::TypeOfRunning::kNone), "none"); +} + +TEST(TaskTest, DestructorInvalidPipelineOrderTerminatesPartialPipeline) { + { + struct BadTask : Task { + bool ValidationImpl() override { + return true; + } + bool PreProcessingImpl() override { + return true; + } + bool RunImpl() override { + return true; + } + bool PostProcessingImpl() override { + return true; + } + } task; + task.Validation(); + } + EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); + ppc::util::DestructorFailureFlag::Unset(); +} + +} // namespace ppc::performance diff --git a/modules/runners/include/runners.hpp b/modules/runners/include/runners.hpp index b957e53236..3287a53ae0 100644 --- a/modules/runners/include/runners.hpp +++ b/modules/runners/include/runners.hpp @@ -1,52 +1,52 @@ -#pragma once - -#include - -#include -#include - -namespace ppc::runners { - -/// @brief GTest event listener that checks for unread MPI messages after each test. -/// @note Used to detect unexpected inter-process communication leftovers. -class UnreadMessagesDetector : public ::testing::EmptyTestEventListener { - public: - UnreadMessagesDetector() = default; - /// @brief Called by GTest after a test ends. Checks for unread messages. - void OnTestEnd(const ::testing::TestInfo & /*test_info*/) override; - - private: -}; - -/// @brief GTest event listener that prints additional information on test failures in worker processes. -/// @details Includes MPI rank info in failure output for debugging. -class WorkerTestFailurePrinter : public ::testing::EmptyTestEventListener { - public: - /// @brief Constructs the listener with a base listener for delegation. - /// @param base A shared pointer to another GTest event listener. - explicit WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener> base) : base_(std::move(base)) {} - /// @brief Called after a test ends. Passes call base listener and print failures with rank. - void OnTestEnd(const ::testing::TestInfo &test_info) override; - /// @brief Called when a test part fails. Prints MPI rank info along with the failure. - void OnTestPartResult(const ::testing::TestPartResult &test_part_result) override; - - private: - /// @brief Prints the MPI rank of the current process to stderr. - static void PrintProcessRank(); - std::shared_ptr<::testing::TestEventListener> base_; -}; - -/// @brief Initializes the testing environment (e.g., MPI, logging). -/// @param argc Argument count. -/// @param argv Argument vector. -/// @return Exit code from RUN_ALL_TESTS or MPI error code if initialization/ -/// finalization fails. -int Init(int argc, char **argv); - -/// @brief Initializes the testing environment only for gtest. -/// @param argc Argument count. -/// @param argv Argument vector. -/// @return Exit code from RUN_ALL_TESTS. -int SimpleInit(int argc, char **argv); - -} // namespace ppc::runners +#pragma once + +#include + +#include +#include + +namespace ppc::runners { + +/// @brief GTest event listener that checks for unread MPI messages after each test. +/// @note Used to detect unexpected inter-process communication leftovers. +class UnreadMessagesDetector : public ::testing::EmptyTestEventListener { + public: + UnreadMessagesDetector() = default; + /// @brief Called by GTest after a test ends. Checks for unread messages. + void OnTestEnd(const ::testing::TestInfo & /*test_info*/) override; + + private: +}; + +/// @brief GTest event listener that prints additional information on test failures in worker processes. +/// @details Includes MPI rank info in failure output for debugging. +class WorkerTestFailurePrinter : public ::testing::EmptyTestEventListener { + public: + /// @brief Constructs the listener with a base listener for delegation. + /// @param base A shared pointer to another GTest event listener. + explicit WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener> base) : base_(std::move(base)) {} + /// @brief Called after a test ends. Passes call base listener and print failures with rank. + void OnTestEnd(const ::testing::TestInfo &test_info) override; + /// @brief Called when a test part fails. Prints MPI rank info along with the failure. + void OnTestPartResult(const ::testing::TestPartResult &test_part_result) override; + + private: + /// @brief Prints the MPI rank of the current process to stderr. + static void PrintProcessRank(); + std::shared_ptr<::testing::TestEventListener> base_; +}; + +/// @brief Initializes the testing environment (e.g., MPI, logging). +/// @param argc Argument count. +/// @param argv Argument vector. +/// @return Exit code from RUN_ALL_TESTS or MPI error code if initialization/ +/// finalization fails. +int Init(int argc, char **argv); + +/// @brief Initializes the testing environment only for gtest. +/// @param argc Argument count. +/// @param argv Argument vector. +/// @return Exit code from RUN_ALL_TESTS. +int SimpleInit(int argc, char **argv); + +} // namespace ppc::runners diff --git a/modules/runners/src/runners.cpp b/modules/runners/src/runners.cpp index 5ab4e8e6b5..dc1db63bee 100644 --- a/modules/runners/src/runners.cpp +++ b/modules/runners/src/runners.cpp @@ -1,188 +1,188 @@ -#include "runners/include/runners.hpp" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "oneapi/tbb/global_control.h" -#include "util/include/util.hpp" - -namespace ppc::runners { - -void UnreadMessagesDetector::OnTestEnd(const ::testing::TestInfo & /*test_info*/) { - int rank = -1; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - MPI_Barrier(MPI_COMM_WORLD); - - int flag = -1; - MPI_Status status; - - const int iprobe_res = MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status); - if (iprobe_res != MPI_SUCCESS) { - std::cerr << std::format("[ PROCESS {} ] [ ERROR ] MPI_Iprobe failed with code {}", rank, iprobe_res) << '\n'; - MPI_Abort(MPI_COMM_WORLD, iprobe_res); - } - - if (flag != 0) { - std::cerr - << std::format( - "[ PROCESS {} ] [ FAILED ] MPI message queue has an unread message from process {} with tag {}", - rank, status.MPI_SOURCE, status.MPI_TAG) - << '\n'; - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - } - - MPI_Barrier(MPI_COMM_WORLD); -} - -void WorkerTestFailurePrinter::OnTestEnd(const ::testing::TestInfo &test_info) { - if (test_info.result()->Passed()) { - return; - } - PrintProcessRank(); - base_->OnTestEnd(test_info); - // Abort the whole MPI job on any test failure to avoid other ranks hanging on barriers. - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); -} - -void WorkerTestFailurePrinter::OnTestPartResult(const ::testing::TestPartResult &test_part_result) { - if (test_part_result.passed() || test_part_result.skipped()) { - return; - } - PrintProcessRank(); - base_->OnTestPartResult(test_part_result); -} - -void WorkerTestFailurePrinter::PrintProcessRank() { - int rank = -1; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - std::cerr << std::format(" [ PROCESS {} ] ", rank); -} - -namespace { -int RunAllTests() { - auto status = RUN_ALL_TESTS(); - if (ppc::util::DestructorFailureFlag::Get()) { - throw std::runtime_error( - std::format("[ ERROR ] Destructor failed with code {}", ppc::util::DestructorFailureFlag::Get())); - } - return status; -} - -void SyncGTestSeed() { - unsigned int seed = 0; - int rank = -1; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - if (rank == 0) { - try { - seed = std::random_device{}(); - } catch (...) { - seed = 0; - } - if (seed == 0) { - const auto now = static_cast(std::chrono::steady_clock::now().time_since_epoch().count()); - seed = static_cast(((now & 0x7fffffffULL) | 1ULL)); - } - } - MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); - ::testing::GTEST_FLAG(random_seed) = static_cast(seed); -} - -void SyncGTestFilter() { - int rank = -1; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - std::string filter = (rank == 0) ? ::testing::GTEST_FLAG(filter) : std::string{}; - int len = static_cast(filter.size()); - MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD); - if (rank != 0) { - filter.resize(static_cast(len)); - } - if (len > 0) { - MPI_Bcast(filter.data(), len, MPI_CHAR, 0, MPI_COMM_WORLD); - } - ::testing::GTEST_FLAG(filter) = filter; -} - -bool HasFlag(int argc, char **argv, std::string_view flag) { - for (int i = 1; i < argc; ++i) { - if (argv[i] != nullptr && std::string_view(argv[i]) == flag) { - return true; - } - } - return false; -} - -int RunAllTestsSafely() { - try { - return RunAllTests(); - } catch (const std::exception &e) { - std::cerr << std::format("[ ERROR ] Exception after tests: {}", e.what()) << '\n'; - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - return EXIT_FAILURE; - } catch (...) { - std::cerr << "[ ERROR ] Unknown exception after tests" << '\n'; - MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); - return EXIT_FAILURE; - } -} -} // namespace - -int Init(int argc, char **argv) { - const int init_res = MPI_Init(&argc, &argv); - if (init_res != MPI_SUCCESS) { - std::cerr << std::format("[ ERROR ] MPI_Init failed with code {}", init_res) << '\n'; - MPI_Abort(MPI_COMM_WORLD, init_res); - return init_res; - } - - // Limit the number of threads in TBB - tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads()); - - ::testing::InitGoogleTest(&argc, argv); - - // Synchronize GoogleTest internals across ranks to avoid divergence - SyncGTestSeed(); - SyncGTestFilter(); - - auto &listeners = ::testing::UnitTest::GetInstance()->listeners(); - int rank = -1; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - const bool print_workers = HasFlag(argc, argv, "--print-workers"); - if (rank != 0 && !print_workers) { - auto *listener = listeners.Release(listeners.default_result_printer()); - listeners.Append(new WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener>(listener))); - } - listeners.Append(new UnreadMessagesDetector()); - - const int status = RunAllTestsSafely(); - - const int finalize_res = MPI_Finalize(); - if (finalize_res != MPI_SUCCESS) { - std::cerr << std::format("[ ERROR ] MPI_Finalize failed with code {}", finalize_res) << '\n'; - MPI_Abort(MPI_COMM_WORLD, finalize_res); - return finalize_res; - } - return status; -} - -int SimpleInit(int argc, char **argv) { - // Limit the number of threads in TBB - tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads()); - - testing::InitGoogleTest(&argc, argv); - return RunAllTests(); -} - -} // namespace ppc::runners +#include "runners/include/runners.hpp" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "oneapi/tbb/global_control.h" +#include "util/include/util.hpp" + +namespace ppc::runners { + +void UnreadMessagesDetector::OnTestEnd(const ::testing::TestInfo & /*test_info*/) { + int rank = -1; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Barrier(MPI_COMM_WORLD); + + int flag = -1; + MPI_Status status; + + const int iprobe_res = MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status); + if (iprobe_res != MPI_SUCCESS) { + std::cerr << std::format("[ PROCESS {} ] [ ERROR ] MPI_Iprobe failed with code {}", rank, iprobe_res) << '\n'; + MPI_Abort(MPI_COMM_WORLD, iprobe_res); + } + + if (flag != 0) { + std::cerr + << std::format( + "[ PROCESS {} ] [ FAILED ] MPI message queue has an unread message from process {} with tag {}", + rank, status.MPI_SOURCE, status.MPI_TAG) + << '\n'; + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + + MPI_Barrier(MPI_COMM_WORLD); +} + +void WorkerTestFailurePrinter::OnTestEnd(const ::testing::TestInfo &test_info) { + if (test_info.result()->Passed()) { + return; + } + PrintProcessRank(); + base_->OnTestEnd(test_info); + // Abort the whole MPI job on any test failure to avoid other ranks hanging on barriers. + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); +} + +void WorkerTestFailurePrinter::OnTestPartResult(const ::testing::TestPartResult &test_part_result) { + if (test_part_result.passed() || test_part_result.skipped()) { + return; + } + PrintProcessRank(); + base_->OnTestPartResult(test_part_result); +} + +void WorkerTestFailurePrinter::PrintProcessRank() { + int rank = -1; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + std::cerr << std::format(" [ PROCESS {} ] ", rank); +} + +namespace { +int RunAllTests() { + auto status = RUN_ALL_TESTS(); + if (ppc::util::DestructorFailureFlag::Get()) { + throw std::runtime_error( + std::format("[ ERROR ] Destructor failed with code {}", ppc::util::DestructorFailureFlag::Get())); + } + return status; +} + +void SyncGTestSeed() { + unsigned int seed = 0; + int rank = -1; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank == 0) { + try { + seed = std::random_device{}(); + } catch (...) { + seed = 0; + } + if (seed == 0) { + const auto now = static_cast(std::chrono::steady_clock::now().time_since_epoch().count()); + seed = static_cast(((now & 0x7fffffffULL) | 1ULL)); + } + } + MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); + ::testing::GTEST_FLAG(random_seed) = static_cast(seed); +} + +void SyncGTestFilter() { + int rank = -1; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + std::string filter = (rank == 0) ? ::testing::GTEST_FLAG(filter) : std::string{}; + int len = static_cast(filter.size()); + MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD); + if (rank != 0) { + filter.resize(static_cast(len)); + } + if (len > 0) { + MPI_Bcast(filter.data(), len, MPI_CHAR, 0, MPI_COMM_WORLD); + } + ::testing::GTEST_FLAG(filter) = filter; +} + +bool HasFlag(int argc, char **argv, std::string_view flag) { + for (int i = 1; i < argc; ++i) { + if (argv[i] != nullptr && std::string_view(argv[i]) == flag) { + return true; + } + } + return false; +} + +int RunAllTestsSafely() { + try { + return RunAllTests(); + } catch (const std::exception &e) { + std::cerr << std::format("[ ERROR ] Exception after tests: {}", e.what()) << '\n'; + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + return EXIT_FAILURE; + } catch (...) { + std::cerr << "[ ERROR ] Unknown exception after tests" << '\n'; + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + return EXIT_FAILURE; + } +} +} // namespace + +int Init(int argc, char **argv) { + const int init_res = MPI_Init(&argc, &argv); + if (init_res != MPI_SUCCESS) { + std::cerr << std::format("[ ERROR ] MPI_Init failed with code {}", init_res) << '\n'; + MPI_Abort(MPI_COMM_WORLD, init_res); + return init_res; + } + + // Limit the number of threads in TBB + tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads()); + + ::testing::InitGoogleTest(&argc, argv); + + // Synchronize GoogleTest internals across ranks to avoid divergence + SyncGTestSeed(); + SyncGTestFilter(); + + auto &listeners = ::testing::UnitTest::GetInstance()->listeners(); + int rank = -1; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + const bool print_workers = HasFlag(argc, argv, "--print-workers"); + if (rank != 0 && !print_workers) { + auto *listener = listeners.Release(listeners.default_result_printer()); + listeners.Append(new WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener>(listener))); + } + listeners.Append(new UnreadMessagesDetector()); + + const int status = RunAllTestsSafely(); + + const int finalize_res = MPI_Finalize(); + if (finalize_res != MPI_SUCCESS) { + std::cerr << std::format("[ ERROR ] MPI_Finalize failed with code {}", finalize_res) << '\n'; + MPI_Abort(MPI_COMM_WORLD, finalize_res); + return finalize_res; + } + return status; +} + +int SimpleInit(int argc, char **argv) { + // Limit the number of threads in TBB + tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads()); + + testing::InitGoogleTest(&argc, argv); + return RunAllTests(); +} + +} // namespace ppc::runners diff --git a/modules/task/include/task.hpp b/modules/task/include/task.hpp index 5cf19331b1..234dec6b5f 100644 --- a/modules/task/include/task.hpp +++ b/modules/task/include/task.hpp @@ -1,290 +1,290 @@ -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ppc::task { - -/// @brief Represents the type of task (parallelization technology). -/// @details Used to select the implementation type in tests and execution logic. -enum class TypeOfTask : uint8_t { - /// Use all available implementations - kALL, - /// MPI (Message Passing Interface) - kMPI, - /// OpenMP (Open Multi-Processing) - kOMP, - /// Sequential implementation - kSEQ, - /// Standard Thread Library (STL threads) - kSTL, - /// Intel Threading Building Blocks (TBB) - kTBB, - /// Unknown task type - kUnknown -}; - -using TaskMapping = std::pair; -using TaskMappingArray = std::array; - -const TaskMappingArray kTaskTypeMappings = {{{TypeOfTask::kALL, "all"}, - {TypeOfTask::kMPI, "mpi"}, - {TypeOfTask::kOMP, "omp"}, - {TypeOfTask::kSEQ, "seq"}, - {TypeOfTask::kSTL, "stl"}, - {TypeOfTask::kTBB, "tbb"}}}; - -inline std::string TypeOfTaskToString(TypeOfTask type) { - for (const auto &[key, value] : kTaskTypeMappings) { - if (key == type) { - return value; - } - } - return "unknown"; -} - -/// @brief Indicates whether a task is enabled or disabled. -enum class StatusOfTask : uint8_t { - /// Task is enabled and should be executed - kEnabled, - /// Task is disabled and will be skipped - kDisabled -}; - -/// @brief Returns a string representation of the task status. -/// @param status_of_task Task status (enabled or disabled). -/// @return "enabled" if the task is enabled, otherwise "disabled". -inline std::string GetStringTaskStatus(StatusOfTask status_of_task) { - if (status_of_task == StatusOfTask::kDisabled) { - return "disabled"; - } - return "enabled"; -} - -/// @brief Returns a string representation of the task type based on the JSON settings file. -/// @param type_of_task Type of the task. -/// @param settings_file_path Path to the JSON file containing task type strings. -/// @return Formatted string combining the task type and its corresponding value from the file. -/// @throws std::runtime_error If the file cannot be opened. -inline std::string GetStringTaskType(TypeOfTask type_of_task, const std::string &settings_file_path) { - std::ifstream file(settings_file_path); - if (!file.is_open()) { - throw std::runtime_error("Failed to open " + settings_file_path); - } - - auto list_settings = ppc::util::InitJSONPtr(); - file >> *list_settings; - - std::string type_str = TypeOfTaskToString(type_of_task); - if (type_str == "unknown") { - return type_str; - } - - return type_str + "_" + std::string((*list_settings)["tasks"][type_str]); -} - -enum class StateOfTesting : uint8_t { kFunc, kPerf }; - -template -/// @brief Base abstract class representing a generic task with a defined pipeline. -/// @tparam InType Input data type. -/// @tparam OutType Output data type. -class Task { - public: - /// @brief Validates input data and task attributes before execution. - /// @return True if validation is successful. - virtual bool Validation() final { - if (stage_ == PipelineStage::kNone || stage_ == PipelineStage::kDone) { - stage_ = PipelineStage::kValidation; - } else { - stage_ = PipelineStage::kException; - throw std::runtime_error("Validation should be called before preprocessing"); - } - return ValidationImpl(); - } - - /// @brief Performs preprocessing on the input data. - /// @return True if preprocessing is successful. - virtual bool PreProcessing() final { - if (stage_ == PipelineStage::kValidation) { - stage_ = PipelineStage::kPreProcessing; - } else { - stage_ = PipelineStage::kException; - throw std::runtime_error("Preprocessing should be called after validation"); - } - if (state_of_testing_ == StateOfTesting::kFunc) { - InternalTimeTest(); - } - return PreProcessingImpl(); - } - - /// @brief Executes the main logic of the task. - /// @return True if execution is successful. - virtual bool Run() final { - if (stage_ == PipelineStage::kPreProcessing || stage_ == PipelineStage::kRun) { - stage_ = PipelineStage::kRun; - } else { - stage_ = PipelineStage::kException; - throw std::runtime_error("Run should be called after preprocessing"); - } - return RunImpl(); - } - - /// @brief Performs postprocessing on the output data. - /// @return True if postprocessing is successful. - virtual bool PostProcessing() final { - if (stage_ == PipelineStage::kRun) { - stage_ = PipelineStage::kDone; - } else { - stage_ = PipelineStage::kException; - throw std::runtime_error("Postprocessing should be called after run"); - } - if (state_of_testing_ == StateOfTesting::kFunc) { - InternalTimeTest(); - } - return PostProcessingImpl(); - } - - /// @brief Returns the current testing mode. - /// @return Reference to the current StateOfTesting. - StateOfTesting &GetStateOfTesting() { - return state_of_testing_; - } - - /// @brief Sets the dynamic task type. - /// @param type_of_task Task type to set. - void SetTypeOfTask(TypeOfTask type_of_task) { - type_of_task_ = type_of_task; - } - - /// @brief Returns the dynamic task type. - /// @return Current dynamic task type. - [[nodiscard]] TypeOfTask GetDynamicTypeOfTask() const { - return type_of_task_; - } - - /// @brief Returns the current task status. - /// @return Task status (enabled or disabled). - [[nodiscard]] StatusOfTask GetStatusOfTask() const { - return status_of_task_; - } - - /// @brief Returns the static task type. - /// @return Static task type (default: kUnknown). - static constexpr TypeOfTask GetStaticTypeOfTask() { - return TypeOfTask::kUnknown; - } - - /// @brief Returns a reference to the input data. - /// @return Reference to the task's input data. - InType &GetInput() { - return input_; - } - - /// @brief Returns a reference to the output data. - /// @return Reference to the task's output data. - OutType &GetOutput() { - return output_; - } - - /// @brief Destructor. Verifies that the pipeline was executed in the correct order. - /// @note Terminates the program if the pipeline order is incorrect or incomplete. - virtual ~Task() { - if (stage_ != PipelineStage::kDone && stage_ != PipelineStage::kException) { - ppc::util::DestructorFailureFlag::Set(); - } -#if _OPENMP >= 201811 - omp_pause_resource_all(omp_pause_soft); -#endif - } - - protected: - /// @brief Measures execution time between preprocessing and postprocessing steps. - /// @throws std::runtime_error If execution exceeds the allowed time limit. - virtual void InternalTimeTest() final { - if (stage_ == PipelineStage::kPreProcessing) { - tmp_time_point_ = std::chrono::high_resolution_clock::now(); - } - - if (stage_ == PipelineStage::kDone) { - auto duration = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - - tmp_time_point_) - .count(); - auto diff = static_cast(duration) * 1e-9; - - const auto max_time = ppc::util::GetTaskMaxTime(); - std::stringstream err_msg; - if (diff < max_time) { - err_msg << "Test time:" << std::fixed << std::setprecision(10) << diff << '\n'; - } else { - err_msg << "\nTask execute time need to be: "; - err_msg << "time < " << max_time << " secs.\n"; - err_msg << "Original time in secs: " << diff << '\n'; - throw std::runtime_error(err_msg.str().c_str()); - } - } - } - - /// @brief User-defined validation logic. - /// @return True if validation is successful. - virtual bool ValidationImpl() = 0; - - /// @brief User-defined preprocessing logic. - /// @return True if preprocessing is successful. - virtual bool PreProcessingImpl() = 0; - - /// @brief User-defined task execution logic. - /// @return True if a run is successful. - virtual bool RunImpl() = 0; - - /// @brief User-defined postprocessing logic. - /// @return True if postprocessing is successful. - virtual bool PostProcessingImpl() = 0; - - private: - InType input_{}; - OutType output_{}; - StateOfTesting state_of_testing_ = StateOfTesting::kFunc; - TypeOfTask type_of_task_ = TypeOfTask::kUnknown; - StatusOfTask status_of_task_ = StatusOfTask::kEnabled; - std::chrono::high_resolution_clock::time_point tmp_time_point_; - enum class PipelineStage : uint8_t { - kNone, - kValidation, - kPreProcessing, - kRun, - kDone, - kException - } stage_ = PipelineStage::kNone; -}; - -/// @brief Smart pointer alias for Task. -/// @tparam InType Input data type. -/// @tparam OutType Output data type. -template -using TaskPtr = std::shared_ptr>; - -/// @brief Constructs and returns a shared pointer to a task with the given input. -/// @tparam TaskType Type of the task to create. -/// @tparam InType Type of the input. -/// @param in Input to pass to the task constructor. -/// @return Shared a pointer to the newly created task. -template -std::shared_ptr TaskGetter(const InType &in) { - return std::make_shared(in); -} - -} // namespace ppc::task +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ppc::task { + +/// @brief Represents the type of task (parallelization technology). +/// @details Used to select the implementation type in tests and execution logic. +enum class TypeOfTask : uint8_t { + /// Use all available implementations + kALL, + /// MPI (Message Passing Interface) + kMPI, + /// OpenMP (Open Multi-Processing) + kOMP, + /// Sequential implementation + kSEQ, + /// Standard Thread Library (STL threads) + kSTL, + /// Intel Threading Building Blocks (TBB) + kTBB, + /// Unknown task type + kUnknown +}; + +using TaskMapping = std::pair; +using TaskMappingArray = std::array; + +const TaskMappingArray kTaskTypeMappings = {{{TypeOfTask::kALL, "all"}, + {TypeOfTask::kMPI, "mpi"}, + {TypeOfTask::kOMP, "omp"}, + {TypeOfTask::kSEQ, "seq"}, + {TypeOfTask::kSTL, "stl"}, + {TypeOfTask::kTBB, "tbb"}}}; + +inline std::string TypeOfTaskToString(TypeOfTask type) { + for (const auto &[key, value] : kTaskTypeMappings) { + if (key == type) { + return value; + } + } + return "unknown"; +} + +/// @brief Indicates whether a task is enabled or disabled. +enum class StatusOfTask : uint8_t { + /// Task is enabled and should be executed + kEnabled, + /// Task is disabled and will be skipped + kDisabled +}; + +/// @brief Returns a string representation of the task status. +/// @param status_of_task Task status (enabled or disabled). +/// @return "enabled" if the task is enabled, otherwise "disabled". +inline std::string GetStringTaskStatus(StatusOfTask status_of_task) { + if (status_of_task == StatusOfTask::kDisabled) { + return "disabled"; + } + return "enabled"; +} + +/// @brief Returns a string representation of the task type based on the JSON settings file. +/// @param type_of_task Type of the task. +/// @param settings_file_path Path to the JSON file containing task type strings. +/// @return Formatted string combining the task type and its corresponding value from the file. +/// @throws std::runtime_error If the file cannot be opened. +inline std::string GetStringTaskType(TypeOfTask type_of_task, const std::string &settings_file_path) { + std::ifstream file(settings_file_path); + if (!file.is_open()) { + throw std::runtime_error("Failed to open " + settings_file_path); + } + + auto list_settings = ppc::util::InitJSONPtr(); + file >> *list_settings; + + std::string type_str = TypeOfTaskToString(type_of_task); + if (type_str == "unknown") { + return type_str; + } + + return type_str + "_" + std::string((*list_settings)["tasks"][type_str]); +} + +enum class StateOfTesting : uint8_t { kFunc, kPerf }; + +template +/// @brief Base abstract class representing a generic task with a defined pipeline. +/// @tparam InType Input data type. +/// @tparam OutType Output data type. +class Task { + public: + /// @brief Validates input data and task attributes before execution. + /// @return True if validation is successful. + virtual bool Validation() final { + if (stage_ == PipelineStage::kNone || stage_ == PipelineStage::kDone) { + stage_ = PipelineStage::kValidation; + } else { + stage_ = PipelineStage::kException; + throw std::runtime_error("Validation should be called before preprocessing"); + } + return ValidationImpl(); + } + + /// @brief Performs preprocessing on the input data. + /// @return True if preprocessing is successful. + virtual bool PreProcessing() final { + if (stage_ == PipelineStage::kValidation) { + stage_ = PipelineStage::kPreProcessing; + } else { + stage_ = PipelineStage::kException; + throw std::runtime_error("Preprocessing should be called after validation"); + } + if (state_of_testing_ == StateOfTesting::kFunc) { + InternalTimeTest(); + } + return PreProcessingImpl(); + } + + /// @brief Executes the main logic of the task. + /// @return True if execution is successful. + virtual bool Run() final { + if (stage_ == PipelineStage::kPreProcessing || stage_ == PipelineStage::kRun) { + stage_ = PipelineStage::kRun; + } else { + stage_ = PipelineStage::kException; + throw std::runtime_error("Run should be called after preprocessing"); + } + return RunImpl(); + } + + /// @brief Performs postprocessing on the output data. + /// @return True if postprocessing is successful. + virtual bool PostProcessing() final { + if (stage_ == PipelineStage::kRun) { + stage_ = PipelineStage::kDone; + } else { + stage_ = PipelineStage::kException; + throw std::runtime_error("Postprocessing should be called after run"); + } + if (state_of_testing_ == StateOfTesting::kFunc) { + InternalTimeTest(); + } + return PostProcessingImpl(); + } + + /// @brief Returns the current testing mode. + /// @return Reference to the current StateOfTesting. + StateOfTesting &GetStateOfTesting() { + return state_of_testing_; + } + + /// @brief Sets the dynamic task type. + /// @param type_of_task Task type to set. + void SetTypeOfTask(TypeOfTask type_of_task) { + type_of_task_ = type_of_task; + } + + /// @brief Returns the dynamic task type. + /// @return Current dynamic task type. + [[nodiscard]] TypeOfTask GetDynamicTypeOfTask() const { + return type_of_task_; + } + + /// @brief Returns the current task status. + /// @return Task status (enabled or disabled). + [[nodiscard]] StatusOfTask GetStatusOfTask() const { + return status_of_task_; + } + + /// @brief Returns the static task type. + /// @return Static task type (default: kUnknown). + static constexpr TypeOfTask GetStaticTypeOfTask() { + return TypeOfTask::kUnknown; + } + + /// @brief Returns a reference to the input data. + /// @return Reference to the task's input data. + InType &GetInput() { + return input_; + } + + /// @brief Returns a reference to the output data. + /// @return Reference to the task's output data. + OutType &GetOutput() { + return output_; + } + + /// @brief Destructor. Verifies that the pipeline was executed in the correct order. + /// @note Terminates the program if the pipeline order is incorrect or incomplete. + virtual ~Task() { + if (stage_ != PipelineStage::kDone && stage_ != PipelineStage::kException) { + ppc::util::DestructorFailureFlag::Set(); + } +#if _OPENMP >= 201811 + omp_pause_resource_all(omp_pause_soft); +#endif + } + + protected: + /// @brief Measures execution time between preprocessing and postprocessing steps. + /// @throws std::runtime_error If execution exceeds the allowed time limit. + virtual void InternalTimeTest() final { + if (stage_ == PipelineStage::kPreProcessing) { + tmp_time_point_ = std::chrono::high_resolution_clock::now(); + } + + if (stage_ == PipelineStage::kDone) { + auto duration = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - + tmp_time_point_) + .count(); + auto diff = static_cast(duration) * 1e-9; + + const auto max_time = ppc::util::GetTaskMaxTime(); + std::stringstream err_msg; + if (diff < max_time) { + err_msg << "Test time:" << std::fixed << std::setprecision(10) << diff << '\n'; + } else { + err_msg << "\nTask execute time need to be: "; + err_msg << "time < " << max_time << " secs.\n"; + err_msg << "Original time in secs: " << diff << '\n'; + throw std::runtime_error(err_msg.str().c_str()); + } + } + } + + /// @brief User-defined validation logic. + /// @return True if validation is successful. + virtual bool ValidationImpl() = 0; + + /// @brief User-defined preprocessing logic. + /// @return True if preprocessing is successful. + virtual bool PreProcessingImpl() = 0; + + /// @brief User-defined task execution logic. + /// @return True if a run is successful. + virtual bool RunImpl() = 0; + + /// @brief User-defined postprocessing logic. + /// @return True if postprocessing is successful. + virtual bool PostProcessingImpl() = 0; + + private: + InType input_{}; + OutType output_{}; + StateOfTesting state_of_testing_ = StateOfTesting::kFunc; + TypeOfTask type_of_task_ = TypeOfTask::kUnknown; + StatusOfTask status_of_task_ = StatusOfTask::kEnabled; + std::chrono::high_resolution_clock::time_point tmp_time_point_; + enum class PipelineStage : uint8_t { + kNone, + kValidation, + kPreProcessing, + kRun, + kDone, + kException + } stage_ = PipelineStage::kNone; +}; + +/// @brief Smart pointer alias for Task. +/// @tparam InType Input data type. +/// @tparam OutType Output data type. +template +using TaskPtr = std::shared_ptr>; + +/// @brief Constructs and returns a shared pointer to a task with the given input. +/// @tparam TaskType Type of the task to create. +/// @tparam InType Type of the input. +/// @param in Input to pass to the task constructor. +/// @return Shared a pointer to the newly created task. +template +std::shared_ptr TaskGetter(const InType &in) { + return std::make_shared(in); +} + +} // namespace ppc::task diff --git a/modules/task/tests/.clang-tidy b/modules/task/tests/.clang-tidy index 9e502745e8..6dadf02bdb 100644 --- a/modules/task/tests/.clang-tidy +++ b/modules/task/tests/.clang-tidy @@ -1,13 +1,13 @@ -InheritParentConfig: true - -Checks: > - -modernize-loop-convert, - -cppcoreguidelines-avoid-goto, - -cppcoreguidelines-avoid-non-const-global-variables, - -misc-use-anonymous-namespace, - -modernize-use-std-print, - -modernize-type-traits - -CheckOptions: - - key: readability-function-cognitive-complexity.Threshold - value: 100 # Relaxed for tests +InheritParentConfig: true + +Checks: > + -modernize-loop-convert, + -cppcoreguidelines-avoid-goto, + -cppcoreguidelines-avoid-non-const-global-variables, + -misc-use-anonymous-namespace, + -modernize-use-std-print, + -modernize-type-traits + +CheckOptions: + - key: readability-function-cognitive-complexity.Threshold + value: 100 # Relaxed for tests diff --git a/modules/task/tests/task_tests.cpp b/modules/task/tests/task_tests.cpp index f7170e745d..76f136fc1d 100644 --- a/modules/task/tests/task_tests.cpp +++ b/modules/task/tests/task_tests.cpp @@ -1,351 +1,351 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "runners/include/runners.hpp" -#include "task/include/task.hpp" -#include "util/include/util.hpp" - -using ppc::task::StateOfTesting; -using ppc::task::StatusOfTask; -using ppc::task::Task; -using ppc::task::TypeOfTask; - -class ScopedFile { - public: - explicit ScopedFile(std::string path) : path_(std::move(path)) {} - ~ScopedFile() { - std::error_code ec; - std::filesystem::remove(path_, ec); - } - - private: - std::string path_; -}; - -namespace ppc::test { - -template -class TestTask : public ppc::task::Task { - public: - explicit TestTask(const InType &in) { - this->GetInput() = in; - } - - bool ValidationImpl() override { - return !this->GetInput().empty(); - } - - bool PreProcessingImpl() override { - this->GetOutput() = 0; - return true; - } - - bool RunImpl() override { - for (unsigned i = 0; i < this->GetInput().size(); i++) { - this->GetOutput() += this->GetInput()[i]; - } - return true; - } - - bool PostProcessingImpl() override { - return true; - } -}; - -template -class FakeSlowTask : public TestTask { - public: - explicit FakeSlowTask(const InType &in) : TestTask(in) {} - - bool RunImpl() override { - std::this_thread::sleep_for(std::chrono::seconds(2)); - return TestTask::RunImpl(); - } -}; - -} // namespace ppc::test - -TEST(TaskTests, CheckInt32t) { - std::vector in(20, 1); - ppc::test::TestTask, int32_t> test_task(in); - ASSERT_EQ(test_task.Validation(), true); - test_task.PreProcessing(); - test_task.Run(); - test_task.PostProcessing(); - ASSERT_EQ(static_cast(test_task.GetOutput()), in.size()); -} - -TEST(TaskTests, CheckInt32tSlow) { - std::vector in(20, 1); - ppc::test::FakeSlowTask, int32_t> test_task(in); - ASSERT_EQ(test_task.Validation(), true); - test_task.PreProcessing(); - test_task.Run(); - ASSERT_ANY_THROW(test_task.PostProcessing()); -} - -TEST(TaskTests, SlowTaskRespectsEnvOverride) { - env::detail::set_scoped_environment_variable scoped("PPC_TASK_MAX_TIME", "3"); - std::vector in(20, 1); - ppc::test::FakeSlowTask, int32_t> test_task(in); - ASSERT_EQ(test_task.Validation(), true); - test_task.PreProcessing(); - test_task.Run(); - EXPECT_NO_THROW(test_task.PostProcessing()); -} - -TEST(TaskTests, CheckValidateFunc) { - std::vector in; - ppc::test::TestTask, int32_t> test_task(in); - ASSERT_EQ(test_task.Validation(), false); - test_task.PreProcessing(); - test_task.Run(); - test_task.PostProcessing(); -} - -TEST(TaskTests, CheckDouble) { - std::vector in(20, 1); - ppc::test::TestTask, double> test_task(in); - ASSERT_EQ(test_task.Validation(), true); - test_task.PreProcessing(); - test_task.Run(); - test_task.PostProcessing(); - EXPECT_NEAR(test_task.GetOutput(), static_cast(in.size()), 1e-6); -} - -TEST(TaskTests, CheckFloat) { - std::vector in(20, 1); - ppc::test::TestTask, float> test_task(in); - ASSERT_EQ(test_task.Validation(), true); - test_task.PreProcessing(); - test_task.Run(); - test_task.PostProcessing(); - EXPECT_NEAR(test_task.GetOutput(), in.size(), 1e-3); -} - -TEST(TaskTests, CheckWrongOrderDisabledValgrind) { - std::vector in(20, 1); - ppc::test::TestTask, float> test_task(in); - ASSERT_EQ(test_task.Validation(), true); - test_task.PreProcessing(); - EXPECT_THROW(test_task.PostProcessing(), std::runtime_error); -} - -TEST(TaskTests, PrematurePostprocessingNoSteps) { - std::vector in(20, 1); - ppc::test::TestTask, float> test_task(in); - EXPECT_THROW(test_task.PostProcessing(), std::runtime_error); -} - -TEST(TaskTests, PrematurePostprocessingAfterPreprocessing) { - std::vector in(20, 1); - ppc::test::TestTask, float> test_task(in); - EXPECT_THROW(test_task.PreProcessing(), std::runtime_error); - EXPECT_THROW(test_task.PostProcessing(), std::runtime_error); -} - -TEST(TaskTest, GetStringTaskStatusDisabled) { - EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kDisabled), "disabled"); -} - -TEST(TaskTest, GetStringTaskStatusEnabled) { - EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kEnabled), "enabled"); -} - -TEST(TaskTest, GetStringTaskTypeInvalidFileThrows) { - EXPECT_THROW({ GetStringTaskType(TypeOfTask::kALL, "non_existing_file.json"); }, std::runtime_error); -} - -TEST(TaskTest, GetStringTaskTypeUnknownTypeWithValidFile) { - std::string path = "settings_valid.json"; - ScopedFile cleaner(path); - std::ofstream file(path); - file - << R"({"tasks": {"all": "enabled", "stl": "enabled", "omp": "enabled", "mpi": "enabled", "tbb": "enabled", "seq": "enabled"}})"; - file.close(); - EXPECT_NO_THROW({ GetStringTaskType(TypeOfTask::kUnknown, path); }); -} - -TEST(TaskTest, GetStringTaskTypeThrowsOnBadJSON) { - std::string path = "bad_settings.json"; - ScopedFile cleaner(path); - std::ofstream file(path); - file << "{"; - file.close(); - EXPECT_THROW({ GetStringTaskType(TypeOfTask::kALL, path); }, std::exception); -} - -TEST(TaskTest, GetStringTaskTypeEachTypeWithValidFile) { - std::string path = "settings_valid_all.json"; - ScopedFile cleaner(path); - std::ofstream file(path); - file - << R"({"tasks": {"all": "enabled", "stl": "enabled", "omp": "enabled", "mpi": "enabled", "tbb": "enabled", "seq": "enabled"}})"; - file.close(); - - EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kALL, path)); - EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kSTL, path)); - EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kOMP, path)); - EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kMPI, path)); - EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kTBB, path)); - EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kSEQ, path)); -} - -TEST(TaskTest, GetStringTaskTypeReturnsUnknownOnDefault) { - std::string path = "settings_valid_unknown.json"; - ScopedFile cleaner(path); - std::ofstream file(path); - file << R"({"tasks": {"all": "enabled"}})"; - file.close(); - - auto result = GetStringTaskType(TypeOfTask::kUnknown, path); - EXPECT_EQ(result, "unknown"); -} - -TEST(TaskTest, GetStringTaskTypeThrowsIfKeyMissing) { - std::string path = "settings_partial.json"; - ScopedFile cleaner(path); - std::ofstream file(path); - file << R"({"tasks": {"all": "enabled"}})"; - file.close(); - - EXPECT_ANY_THROW(GetStringTaskType(TypeOfTask::kSTL, path)); -} - -TEST(TaskTest, TaskDestructorThrowsIfStageIncomplete) { - { - std::vector in(20, 1); - struct LocalTask : Task, int32_t> { - explicit LocalTask(const std::vector &in) { - this->GetInput() = in; - } - bool ValidationImpl() override { - return true; - } - bool PreProcessingImpl() override { - return true; - } - bool RunImpl() override { - return true; - } - bool PostProcessingImpl() override { - return true; - } - } task(in); - task.Validation(); - } - EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); - ppc::util::DestructorFailureFlag::Unset(); -} - -TEST(TaskTest, TaskDestructorThrowsIfEmpty) { - { - std::vector in(20, 1); - struct LocalTask : Task, int32_t> { - explicit LocalTask(const std::vector &in) { - this->GetInput() = in; - } - bool ValidationImpl() override { - return true; - } - bool PreProcessingImpl() override { - return true; - } - bool RunImpl() override { - return true; - } - bool PostProcessingImpl() override { - return true; - } - } task(in); - } - EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); - ppc::util::DestructorFailureFlag::Unset(); -} - -TEST(TaskTest, InternalTimeTestThrowsIfTimeoutExceeded) { - struct SlowTask : Task, int32_t> { - explicit SlowTask(const std::vector &in) { - this->GetInput() = in; - } - bool ValidationImpl() override { - return true; - } - bool PreProcessingImpl() override { - std::this_thread::sleep_for(std::chrono::seconds(2)); - return true; - } - bool RunImpl() override { - return true; - } - bool PostProcessingImpl() override { - return true; - } - }; - - std::vector in(20, 1); - SlowTask task(in); - task.GetStateOfTesting() = StateOfTesting::kFunc; - task.Validation(); - EXPECT_NO_THROW(task.PreProcessing()); - task.Run(); - EXPECT_THROW(task.PostProcessing(), std::runtime_error); -} - -class DummyTask : public Task { - public: - using Task::Task; - bool ValidationImpl() override { - return true; - } - bool PreProcessingImpl() override { - return true; - } - bool RunImpl() override { - return true; - } - bool PostProcessingImpl() override { - return true; - } -}; - -TEST(TaskTest, ValidationThrowsIfCalledTwice) { - auto task = std::make_shared(); - task->Validation(); - EXPECT_THROW(task->Validation(), std::runtime_error); -} - -TEST(TaskTest, PreProcessingThrowsIfCalledBeforeValidation) { - auto task = std::make_shared(); - EXPECT_THROW(task->PreProcessing(), std::runtime_error); -} - -TEST(TaskTest, RunThrowsIfCalledBeforePreProcessing) { - auto task = std::make_shared(); - EXPECT_THROW(task->Run(), std::runtime_error); -} - -TEST(TaskTest, PostProcessingThrowsIfCalledBeforeRun) { - auto task = std::make_shared(); - task->Validation(); - task->PreProcessing(); - EXPECT_THROW(task->PostProcessing(), std::runtime_error); -} - -int main(int argc, char **argv) { - return ppc::runners::SimpleInit(argc, argv); -} +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "runners/include/runners.hpp" +#include "task/include/task.hpp" +#include "util/include/util.hpp" + +using ppc::task::StateOfTesting; +using ppc::task::StatusOfTask; +using ppc::task::Task; +using ppc::task::TypeOfTask; + +class ScopedFile { + public: + explicit ScopedFile(std::string path) : path_(std::move(path)) {} + ~ScopedFile() { + std::error_code ec; + std::filesystem::remove(path_, ec); + } + + private: + std::string path_; +}; + +namespace ppc::test { + +template +class TestTask : public ppc::task::Task { + public: + explicit TestTask(const InType &in) { + this->GetInput() = in; + } + + bool ValidationImpl() override { + return !this->GetInput().empty(); + } + + bool PreProcessingImpl() override { + this->GetOutput() = 0; + return true; + } + + bool RunImpl() override { + for (unsigned i = 0; i < this->GetInput().size(); i++) { + this->GetOutput() += this->GetInput()[i]; + } + return true; + } + + bool PostProcessingImpl() override { + return true; + } +}; + +template +class FakeSlowTask : public TestTask { + public: + explicit FakeSlowTask(const InType &in) : TestTask(in) {} + + bool RunImpl() override { + std::this_thread::sleep_for(std::chrono::seconds(2)); + return TestTask::RunImpl(); + } +}; + +} // namespace ppc::test + +TEST(TaskTests, CheckInt32t) { + std::vector in(20, 1); + ppc::test::TestTask, int32_t> test_task(in); + ASSERT_EQ(test_task.Validation(), true); + test_task.PreProcessing(); + test_task.Run(); + test_task.PostProcessing(); + ASSERT_EQ(static_cast(test_task.GetOutput()), in.size()); +} + +TEST(TaskTests, CheckInt32tSlow) { + std::vector in(20, 1); + ppc::test::FakeSlowTask, int32_t> test_task(in); + ASSERT_EQ(test_task.Validation(), true); + test_task.PreProcessing(); + test_task.Run(); + ASSERT_ANY_THROW(test_task.PostProcessing()); +} + +TEST(TaskTests, SlowTaskRespectsEnvOverride) { + env::detail::set_scoped_environment_variable scoped("PPC_TASK_MAX_TIME", "3"); + std::vector in(20, 1); + ppc::test::FakeSlowTask, int32_t> test_task(in); + ASSERT_EQ(test_task.Validation(), true); + test_task.PreProcessing(); + test_task.Run(); + EXPECT_NO_THROW(test_task.PostProcessing()); +} + +TEST(TaskTests, CheckValidateFunc) { + std::vector in; + ppc::test::TestTask, int32_t> test_task(in); + ASSERT_EQ(test_task.Validation(), false); + test_task.PreProcessing(); + test_task.Run(); + test_task.PostProcessing(); +} + +TEST(TaskTests, CheckDouble) { + std::vector in(20, 1); + ppc::test::TestTask, double> test_task(in); + ASSERT_EQ(test_task.Validation(), true); + test_task.PreProcessing(); + test_task.Run(); + test_task.PostProcessing(); + EXPECT_NEAR(test_task.GetOutput(), static_cast(in.size()), 1e-6); +} + +TEST(TaskTests, CheckFloat) { + std::vector in(20, 1); + ppc::test::TestTask, float> test_task(in); + ASSERT_EQ(test_task.Validation(), true); + test_task.PreProcessing(); + test_task.Run(); + test_task.PostProcessing(); + EXPECT_NEAR(test_task.GetOutput(), in.size(), 1e-3); +} + +TEST(TaskTests, CheckWrongOrderDisabledValgrind) { + std::vector in(20, 1); + ppc::test::TestTask, float> test_task(in); + ASSERT_EQ(test_task.Validation(), true); + test_task.PreProcessing(); + EXPECT_THROW(test_task.PostProcessing(), std::runtime_error); +} + +TEST(TaskTests, PrematurePostprocessingNoSteps) { + std::vector in(20, 1); + ppc::test::TestTask, float> test_task(in); + EXPECT_THROW(test_task.PostProcessing(), std::runtime_error); +} + +TEST(TaskTests, PrematurePostprocessingAfterPreprocessing) { + std::vector in(20, 1); + ppc::test::TestTask, float> test_task(in); + EXPECT_THROW(test_task.PreProcessing(), std::runtime_error); + EXPECT_THROW(test_task.PostProcessing(), std::runtime_error); +} + +TEST(TaskTest, GetStringTaskStatusDisabled) { + EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kDisabled), "disabled"); +} + +TEST(TaskTest, GetStringTaskStatusEnabled) { + EXPECT_EQ(GetStringTaskStatus(StatusOfTask::kEnabled), "enabled"); +} + +TEST(TaskTest, GetStringTaskTypeInvalidFileThrows) { + EXPECT_THROW({ GetStringTaskType(TypeOfTask::kALL, "non_existing_file.json"); }, std::runtime_error); +} + +TEST(TaskTest, GetStringTaskTypeUnknownTypeWithValidFile) { + std::string path = "settings_valid.json"; + ScopedFile cleaner(path); + std::ofstream file(path); + file + << R"({"tasks": {"all": "enabled", "stl": "enabled", "omp": "enabled", "mpi": "enabled", "tbb": "enabled", "seq": "enabled"}})"; + file.close(); + EXPECT_NO_THROW({ GetStringTaskType(TypeOfTask::kUnknown, path); }); +} + +TEST(TaskTest, GetStringTaskTypeThrowsOnBadJSON) { + std::string path = "bad_settings.json"; + ScopedFile cleaner(path); + std::ofstream file(path); + file << "{"; + file.close(); + EXPECT_THROW({ GetStringTaskType(TypeOfTask::kALL, path); }, std::exception); +} + +TEST(TaskTest, GetStringTaskTypeEachTypeWithValidFile) { + std::string path = "settings_valid_all.json"; + ScopedFile cleaner(path); + std::ofstream file(path); + file + << R"({"tasks": {"all": "enabled", "stl": "enabled", "omp": "enabled", "mpi": "enabled", "tbb": "enabled", "seq": "enabled"}})"; + file.close(); + + EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kALL, path)); + EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kSTL, path)); + EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kOMP, path)); + EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kMPI, path)); + EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kTBB, path)); + EXPECT_NO_THROW(GetStringTaskType(TypeOfTask::kSEQ, path)); +} + +TEST(TaskTest, GetStringTaskTypeReturnsUnknownOnDefault) { + std::string path = "settings_valid_unknown.json"; + ScopedFile cleaner(path); + std::ofstream file(path); + file << R"({"tasks": {"all": "enabled"}})"; + file.close(); + + auto result = GetStringTaskType(TypeOfTask::kUnknown, path); + EXPECT_EQ(result, "unknown"); +} + +TEST(TaskTest, GetStringTaskTypeThrowsIfKeyMissing) { + std::string path = "settings_partial.json"; + ScopedFile cleaner(path); + std::ofstream file(path); + file << R"({"tasks": {"all": "enabled"}})"; + file.close(); + + EXPECT_ANY_THROW(GetStringTaskType(TypeOfTask::kSTL, path)); +} + +TEST(TaskTest, TaskDestructorThrowsIfStageIncomplete) { + { + std::vector in(20, 1); + struct LocalTask : Task, int32_t> { + explicit LocalTask(const std::vector &in) { + this->GetInput() = in; + } + bool ValidationImpl() override { + return true; + } + bool PreProcessingImpl() override { + return true; + } + bool RunImpl() override { + return true; + } + bool PostProcessingImpl() override { + return true; + } + } task(in); + task.Validation(); + } + EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); + ppc::util::DestructorFailureFlag::Unset(); +} + +TEST(TaskTest, TaskDestructorThrowsIfEmpty) { + { + std::vector in(20, 1); + struct LocalTask : Task, int32_t> { + explicit LocalTask(const std::vector &in) { + this->GetInput() = in; + } + bool ValidationImpl() override { + return true; + } + bool PreProcessingImpl() override { + return true; + } + bool RunImpl() override { + return true; + } + bool PostProcessingImpl() override { + return true; + } + } task(in); + } + EXPECT_TRUE(ppc::util::DestructorFailureFlag::Get()); + ppc::util::DestructorFailureFlag::Unset(); +} + +TEST(TaskTest, InternalTimeTestThrowsIfTimeoutExceeded) { + struct SlowTask : Task, int32_t> { + explicit SlowTask(const std::vector &in) { + this->GetInput() = in; + } + bool ValidationImpl() override { + return true; + } + bool PreProcessingImpl() override { + std::this_thread::sleep_for(std::chrono::seconds(2)); + return true; + } + bool RunImpl() override { + return true; + } + bool PostProcessingImpl() override { + return true; + } + }; + + std::vector in(20, 1); + SlowTask task(in); + task.GetStateOfTesting() = StateOfTesting::kFunc; + task.Validation(); + EXPECT_NO_THROW(task.PreProcessing()); + task.Run(); + EXPECT_THROW(task.PostProcessing(), std::runtime_error); +} + +class DummyTask : public Task { + public: + using Task::Task; + bool ValidationImpl() override { + return true; + } + bool PreProcessingImpl() override { + return true; + } + bool RunImpl() override { + return true; + } + bool PostProcessingImpl() override { + return true; + } +}; + +TEST(TaskTest, ValidationThrowsIfCalledTwice) { + auto task = std::make_shared(); + task->Validation(); + EXPECT_THROW(task->Validation(), std::runtime_error); +} + +TEST(TaskTest, PreProcessingThrowsIfCalledBeforeValidation) { + auto task = std::make_shared(); + EXPECT_THROW(task->PreProcessing(), std::runtime_error); +} + +TEST(TaskTest, RunThrowsIfCalledBeforePreProcessing) { + auto task = std::make_shared(); + EXPECT_THROW(task->Run(), std::runtime_error); +} + +TEST(TaskTest, PostProcessingThrowsIfCalledBeforeRun) { + auto task = std::make_shared(); + task->Validation(); + task->PreProcessing(); + EXPECT_THROW(task->PostProcessing(), std::runtime_error); +} + +int main(int argc, char **argv) { + return ppc::runners::SimpleInit(argc, argv); +} diff --git a/modules/util/include/func_test_util.hpp b/modules/util/include/func_test_util.hpp index 6cae302886..adf1f301ac 100644 --- a/modules/util/include/func_test_util.hpp +++ b/modules/util/include/func_test_util.hpp @@ -1,145 +1,145 @@ -#pragma once - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "task/include/task.hpp" -#include "util/include/util.hpp" - -namespace ppc::util { - -template -using FuncTestParam = std::tuple(InType)>, std::string, TestType>; - -template -using GTestFuncParam = ::testing::TestParamInfo>; - -template -concept HasPrintTestParam = requires(TestType value) { - { T::PrintTestParam(value) } -> std::same_as; -}; - -template -/// @brief Base class for running functional tests on parallel tasks. -/// @tparam InType Type of input data. -/// @tparam OutType Type of output data. -/// @tparam TestType Type of the test case or parameter. -class BaseRunFuncTests : public ::testing::TestWithParam> { - public: - virtual bool CheckTestOutputData(OutType &output_data) = 0; - /// @brief Provides input data for the task. - /// @return Initialized input data. - virtual InType GetTestInputData() = 0; - - template - static void RequireStaticInterface() { - static_assert(HasPrintTestParam, - "Derived class must implement: static std::string PrintTestParam(TestType)"); - } - - template - static std::string PrintFuncTestName(const GTestFuncParam &info) { - RequireStaticInterface(); - TestType test_param = std::get(ppc::util::GTestParamIndex::kTestParams)>(info.param); - return std::get(GTestParamIndex::kNameTest)>(info.param) + "_" + - Derived::PrintTestParam(test_param); - } - - protected: - void ExecuteTest(FuncTestParam test_param) { - const std::string &test_name = std::get(GTestParamIndex::kNameTest)>(test_param); - - ValidateTestName(test_name); - - const auto test_env_scope = ppc::util::test::MakePerTestEnvForCurrentGTest(test_name); - - if (IsTestDisabled(test_name)) { - GTEST_SKIP(); - } - - if (ShouldSkipNonMpiTask(test_name)) { - std::cerr << "kALL and kMPI tasks are not under mpirun\n"; - GTEST_SKIP(); - } - - InitializeAndRunTask(test_param); - } - - void ValidateTestName(const std::string &test_name) { - EXPECT_FALSE(test_name.find("unknown") != std::string::npos); - } - - bool IsTestDisabled(const std::string &test_name) { - return test_name.find("disabled") != std::string::npos; - } - - bool ShouldSkipNonMpiTask(const std::string &test_name) { - auto contains_substring = [&](const std::string &substring) { - return test_name.find(substring) != std::string::npos; - }; - - return !ppc::util::IsUnderMpirun() && (contains_substring("_all") || contains_substring("_mpi")); - } - - /// @brief Initializes task instance and runs it through the full pipeline. - void InitializeAndRunTask(const FuncTestParam &test_param) { - task_ = std::get(GTestParamIndex::kTaskGetter)>(test_param)(GetTestInputData()); - ExecuteTaskPipeline(); - } - - /// @brief Executes the full task pipeline with validation. - // NOLINTNEXTLINE(readability-function-cognitive-complexity) - void ExecuteTaskPipeline() { - EXPECT_TRUE(task_->Validation()); - EXPECT_TRUE(task_->PreProcessing()); - EXPECT_TRUE(task_->Run()); - EXPECT_TRUE(task_->PostProcessing()); - EXPECT_TRUE(CheckTestOutputData(task_->GetOutput())); - } - - private: - ppc::task::TaskPtr task_; -}; - -template -auto ExpandToValuesImpl(const Tuple &t, std::index_sequence /*unused*/) { - return ::testing::Values(std::get(t)...); -} - -template -auto ExpandToValues(const Tuple &t) { - constexpr std::size_t kN = std::tuple_size_v; - return ExpandToValuesImpl(t, std::make_index_sequence{}); -} - -template -auto GenTaskTuplesImpl(const SizesContainer &sizes, const std::string &settings_path, - std::index_sequence /*unused*/) { - return std::make_tuple(std::make_tuple(ppc::task::TaskGetter, - std::string(GetNamespace()) + "_" + - ppc::task::GetStringTaskType(Task::GetStaticTypeOfTask(), settings_path), - sizes[Is])...); -} - -template -auto TaskListGenerator(const SizesContainer &sizes, const std::string &settings_path) { - return GenTaskTuplesImpl(sizes, settings_path, - std::make_index_sequence>>{}); -} - -template -constexpr auto AddFuncTask(const SizesContainer &sizes, const std::string &settings_path) { - return TaskListGenerator(sizes, settings_path); -} - -} // namespace ppc::util +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "task/include/task.hpp" +#include "util/include/util.hpp" + +namespace ppc::util { + +template +using FuncTestParam = std::tuple(InType)>, std::string, TestType>; + +template +using GTestFuncParam = ::testing::TestParamInfo>; + +template +concept HasPrintTestParam = requires(TestType value) { + { T::PrintTestParam(value) } -> std::same_as; +}; + +template +/// @brief Base class for running functional tests on parallel tasks. +/// @tparam InType Type of input data. +/// @tparam OutType Type of output data. +/// @tparam TestType Type of the test case or parameter. +class BaseRunFuncTests : public ::testing::TestWithParam> { + public: + virtual bool CheckTestOutputData(OutType &output_data) = 0; + /// @brief Provides input data for the task. + /// @return Initialized input data. + virtual InType GetTestInputData() = 0; + + template + static void RequireStaticInterface() { + static_assert(HasPrintTestParam, + "Derived class must implement: static std::string PrintTestParam(TestType)"); + } + + template + static std::string PrintFuncTestName(const GTestFuncParam &info) { + RequireStaticInterface(); + TestType test_param = std::get(ppc::util::GTestParamIndex::kTestParams)>(info.param); + return std::get(GTestParamIndex::kNameTest)>(info.param) + "_" + + Derived::PrintTestParam(test_param); + } + + protected: + void ExecuteTest(FuncTestParam test_param) { + const std::string &test_name = std::get(GTestParamIndex::kNameTest)>(test_param); + + ValidateTestName(test_name); + + const auto test_env_scope = ppc::util::test::MakePerTestEnvForCurrentGTest(test_name); + + if (IsTestDisabled(test_name)) { + GTEST_SKIP(); + } + + if (ShouldSkipNonMpiTask(test_name)) { + std::cerr << "kALL and kMPI tasks are not under mpirun\n"; + GTEST_SKIP(); + } + + InitializeAndRunTask(test_param); + } + + void ValidateTestName(const std::string &test_name) { + EXPECT_FALSE(test_name.find("unknown") != std::string::npos); + } + + bool IsTestDisabled(const std::string &test_name) { + return test_name.find("disabled") != std::string::npos; + } + + bool ShouldSkipNonMpiTask(const std::string &test_name) { + auto contains_substring = [&](const std::string &substring) { + return test_name.find(substring) != std::string::npos; + }; + + return !ppc::util::IsUnderMpirun() && (contains_substring("_all") || contains_substring("_mpi")); + } + + /// @brief Initializes task instance and runs it through the full pipeline. + void InitializeAndRunTask(const FuncTestParam &test_param) { + task_ = std::get(GTestParamIndex::kTaskGetter)>(test_param)(GetTestInputData()); + ExecuteTaskPipeline(); + } + + /// @brief Executes the full task pipeline with validation. + // NOLINTNEXTLINE(readability-function-cognitive-complexity) + void ExecuteTaskPipeline() { + EXPECT_TRUE(task_->Validation()); + EXPECT_TRUE(task_->PreProcessing()); + EXPECT_TRUE(task_->Run()); + EXPECT_TRUE(task_->PostProcessing()); + EXPECT_TRUE(CheckTestOutputData(task_->GetOutput())); + } + + private: + ppc::task::TaskPtr task_; +}; + +template +auto ExpandToValuesImpl(const Tuple &t, std::index_sequence /*unused*/) { + return ::testing::Values(std::get(t)...); +} + +template +auto ExpandToValues(const Tuple &t) { + constexpr std::size_t kN = std::tuple_size_v; + return ExpandToValuesImpl(t, std::make_index_sequence{}); +} + +template +auto GenTaskTuplesImpl(const SizesContainer &sizes, const std::string &settings_path, + std::index_sequence /*unused*/) { + return std::make_tuple(std::make_tuple(ppc::task::TaskGetter, + std::string(GetNamespace()) + "_" + + ppc::task::GetStringTaskType(Task::GetStaticTypeOfTask(), settings_path), + sizes[Is])...); +} + +template +auto TaskListGenerator(const SizesContainer &sizes, const std::string &settings_path) { + return GenTaskTuplesImpl(sizes, settings_path, + std::make_index_sequence>>{}); +} + +template +constexpr auto AddFuncTask(const SizesContainer &sizes, const std::string &settings_path) { + return TaskListGenerator(sizes, settings_path); +} + +} // namespace ppc::util diff --git a/modules/util/include/perf_test_util.hpp b/modules/util/include/perf_test_util.hpp index 738ea95679..ede95be90a 100644 --- a/modules/util/include/perf_test_util.hpp +++ b/modules/util/include/perf_test_util.hpp @@ -1,136 +1,136 @@ -#pragma once - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "performance/include/performance.hpp" -#include "task/include/task.hpp" -#include "util/include/util.hpp" - -namespace ppc::util { - -double GetTimeMPI(); -int GetMPIRank(); - -template -using PerfTestParam = std::tuple(InType)>, std::string, - ppc::performance::PerfResults::TypeOfRunning>; - -template -/// @brief Base class for performance testing of parallel tasks. -/// @tparam InType Input data type. -/// @tparam OutType Output data type. -class BaseRunPerfTests : public ::testing::TestWithParam> { - public: - /// @brief Generates a readable name for the performance test case. - static std::string CustomPerfTestName(const ::testing::TestParamInfo> &info) { - return ppc::performance::GetStringParamName( - std::get(GTestParamIndex::kTestParams)>(info.param)) + - "_" + std::get(GTestParamIndex::kNameTest)>(info.param); - } - - protected: - virtual bool CheckTestOutputData(OutType &output_data) = 0; - /// @brief Supplies input data for performance testing. - virtual InType GetTestInputData() = 0; - - virtual void SetPerfAttributes(ppc::performance::PerfAttr &perf_attrs) { - if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kMPI || - task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kALL) { - const double t0 = GetTimeMPI(); - perf_attrs.current_timer = [t0] { return GetTimeMPI() - t0; }; - } else if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kOMP) { - const double t0 = omp_get_wtime(); - perf_attrs.current_timer = [t0] { return omp_get_wtime() - t0; }; - } else if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kSEQ || - task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kSTL || - task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kTBB) { - const auto t0 = std::chrono::high_resolution_clock::now(); - perf_attrs.current_timer = [&] { - auto now = std::chrono::high_resolution_clock::now(); - auto ns = std::chrono::duration_cast(now - t0).count(); - return static_cast(ns) * 1e-9; - }; - } else { - throw std::runtime_error("The task type is not supported for performance testing."); - } - } - - void ExecuteTest(const PerfTestParam &perf_test_param) { - auto task_getter = std::get(GTestParamIndex::kTaskGetter)>(perf_test_param); - auto test_name = std::get(GTestParamIndex::kNameTest)>(perf_test_param); - auto mode = std::get(GTestParamIndex::kTestParams)>(perf_test_param); - - ASSERT_FALSE(test_name.find("unknown") != std::string::npos); - if (test_name.find("disabled") != std::string::npos) { - GTEST_SKIP(); - } - - const auto test_env_scope = ppc::util::test::MakePerTestEnvForCurrentGTest(test_name); - - task_ = task_getter(GetTestInputData()); - ppc::performance::Perf perf(task_); - ppc::performance::PerfAttr perf_attr; - SetPerfAttributes(perf_attr); - - if (mode == ppc::performance::PerfResults::TypeOfRunning::kPipeline) { - perf.PipelineRun(perf_attr); - } else if (mode == ppc::performance::PerfResults::TypeOfRunning::kTaskRun) { - perf.TaskRun(perf_attr); - } else { - std::stringstream err_msg; - err_msg << '\n' << "The type of performance check for the task was not selected.\n"; - throw std::runtime_error(err_msg.str().c_str()); - } - - if (GetMPIRank() == 0) { - perf.PrintPerfStatistic(test_name); - } - - OutType output_data = task_->GetOutput(); - ASSERT_TRUE(CheckTestOutputData(output_data)); - } - - private: - ppc::task::TaskPtr task_; -}; - -template -auto MakePerfTaskTuples(const std::string &settings_path) { - const auto name = std::string(GetNamespace()) + "_" + - ppc::task::GetStringTaskType(TaskType::GetStaticTypeOfTask(), settings_path); - - return std::make_tuple(std::make_tuple(ppc::task::TaskGetter, name, - ppc::performance::PerfResults::TypeOfRunning::kPipeline), - std::make_tuple(ppc::task::TaskGetter, name, - ppc::performance::PerfResults::TypeOfRunning::kTaskRun)); -} - -template -auto TupleToGTestValuesImpl(const Tuple &tup, std::index_sequence /*unused*/) { - return ::testing::Values(std::get(tup)...); -} - -template -auto TupleToGTestValues(Tuple &&tup) { - constexpr size_t kSize = std::tuple_size_v>; - return TupleToGTestValuesImpl(std::forward(tup), std::make_index_sequence{}); -} - -template -auto MakeAllPerfTasks(const std::string &settings_path) { - return std::tuple_cat(MakePerfTaskTuples(settings_path)...); -} - -} // namespace ppc::util +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "performance/include/performance.hpp" +#include "task/include/task.hpp" +#include "util/include/util.hpp" + +namespace ppc::util { + +double GetTimeMPI(); +int GetMPIRank(); + +template +using PerfTestParam = std::tuple(InType)>, std::string, + ppc::performance::PerfResults::TypeOfRunning>; + +template +/// @brief Base class for performance testing of parallel tasks. +/// @tparam InType Input data type. +/// @tparam OutType Output data type. +class BaseRunPerfTests : public ::testing::TestWithParam> { + public: + /// @brief Generates a readable name for the performance test case. + static std::string CustomPerfTestName(const ::testing::TestParamInfo> &info) { + return ppc::performance::GetStringParamName( + std::get(GTestParamIndex::kTestParams)>(info.param)) + + "_" + std::get(GTestParamIndex::kNameTest)>(info.param); + } + + protected: + virtual bool CheckTestOutputData(OutType &output_data) = 0; + /// @brief Supplies input data for performance testing. + virtual InType GetTestInputData() = 0; + + virtual void SetPerfAttributes(ppc::performance::PerfAttr &perf_attrs) { + if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kMPI || + task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kALL) { + const double t0 = GetTimeMPI(); + perf_attrs.current_timer = [t0] { return GetTimeMPI() - t0; }; + } else if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kOMP) { + const double t0 = omp_get_wtime(); + perf_attrs.current_timer = [t0] { return omp_get_wtime() - t0; }; + } else if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kSEQ || + task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kSTL || + task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kTBB) { + const auto t0 = std::chrono::high_resolution_clock::now(); + perf_attrs.current_timer = [&] { + auto now = std::chrono::high_resolution_clock::now(); + auto ns = std::chrono::duration_cast(now - t0).count(); + return static_cast(ns) * 1e-9; + }; + } else { + throw std::runtime_error("The task type is not supported for performance testing."); + } + } + + void ExecuteTest(const PerfTestParam &perf_test_param) { + auto task_getter = std::get(GTestParamIndex::kTaskGetter)>(perf_test_param); + auto test_name = std::get(GTestParamIndex::kNameTest)>(perf_test_param); + auto mode = std::get(GTestParamIndex::kTestParams)>(perf_test_param); + + ASSERT_FALSE(test_name.find("unknown") != std::string::npos); + if (test_name.find("disabled") != std::string::npos) { + GTEST_SKIP(); + } + + const auto test_env_scope = ppc::util::test::MakePerTestEnvForCurrentGTest(test_name); + + task_ = task_getter(GetTestInputData()); + ppc::performance::Perf perf(task_); + ppc::performance::PerfAttr perf_attr; + SetPerfAttributes(perf_attr); + + if (mode == ppc::performance::PerfResults::TypeOfRunning::kPipeline) { + perf.PipelineRun(perf_attr); + } else if (mode == ppc::performance::PerfResults::TypeOfRunning::kTaskRun) { + perf.TaskRun(perf_attr); + } else { + std::stringstream err_msg; + err_msg << '\n' << "The type of performance check for the task was not selected.\n"; + throw std::runtime_error(err_msg.str().c_str()); + } + + if (GetMPIRank() == 0) { + perf.PrintPerfStatistic(test_name); + } + + OutType output_data = task_->GetOutput(); + ASSERT_TRUE(CheckTestOutputData(output_data)); + } + + private: + ppc::task::TaskPtr task_; +}; + +template +auto MakePerfTaskTuples(const std::string &settings_path) { + const auto name = std::string(GetNamespace()) + "_" + + ppc::task::GetStringTaskType(TaskType::GetStaticTypeOfTask(), settings_path); + + return std::make_tuple(std::make_tuple(ppc::task::TaskGetter, name, + ppc::performance::PerfResults::TypeOfRunning::kPipeline), + std::make_tuple(ppc::task::TaskGetter, name, + ppc::performance::PerfResults::TypeOfRunning::kTaskRun)); +} + +template +auto TupleToGTestValuesImpl(const Tuple &tup, std::index_sequence /*unused*/) { + return ::testing::Values(std::get(tup)...); +} + +template +auto TupleToGTestValues(Tuple &&tup) { + constexpr size_t kSize = std::tuple_size_v>; + return TupleToGTestValuesImpl(std::forward(tup), std::make_index_sequence{}); +} + +template +auto MakeAllPerfTasks(const std::string &settings_path) { + return std::tuple_cat(MakePerfTaskTuples(settings_path)...); +} + +} // namespace ppc::util diff --git a/modules/util/include/util.hpp b/modules/util/include/util.hpp index ca93fd9b44..b623e8162b 100644 --- a/modules/util/include/util.hpp +++ b/modules/util/include/util.hpp @@ -1,169 +1,169 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef __GNUG__ -# include -#endif - -#include "nlohmann/json_fwd.hpp" - -#ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable : 4459) -#endif - -#include - -#include -#include -#include - -/// @brief JSON namespace used for settings and config parsing. -using NlohmannJsonParseError = nlohmann::json::parse_error; -/// @brief JSON namespace used for settings and config typing. -using NlohmannJsonTypeError = nlohmann::json::type_error; -#ifdef _MSC_VER -# pragma warning(pop) -#endif - -namespace ppc::util { - -/// @brief Utility class for tracking destructor failure across tests. -/// @details Provides thread-safe methods to set, unset, and check the failure flag. -class DestructorFailureFlag { - public: - /// @brief Marks that a destructor failure has occurred. - static void Set() { - failure_flag.store(true); - } - - /// @brief Clears the destructor failure flag. - static void Unset() { - failure_flag.store(false); - } - - /// @brief Checks if a destructor failure was recorded. - /// @return True if failure occurred, false otherwise. - static bool Get() { - return failure_flag.load(); - } - - private: - inline static std::atomic failure_flag{false}; -}; - -enum class GTestParamIndex : uint8_t { kTaskGetter, kNameTest, kTestParams }; - -std::string GetAbsoluteTaskPath(const std::string &id_path, const std::string &relative_path); -int GetNumThreads(); -int GetNumProc(); -double GetTaskMaxTime(); -double GetPerfMaxTime(); - -template -std::string GetNamespace() { - std::string name = typeid(T).name(); -#ifdef __GNUC__ - int status = 0; - std::unique_ptr demangled{abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), - std::free}; - name = (status == 0) ? demangled.get() : name; -#endif -#ifdef _MSC_VER - const std::string prefixes[] = {"class ", "struct ", "enum ", "union "}; - for (const auto &prefix : prefixes) { - if (name.starts_with(prefix)) { - name = name.substr(prefix.size()); - break; - } - } - name.erase(0, name.find_first_not_of(' ')); -#endif - auto pos = name.rfind("::"); - return (pos != std::string::npos) ? name.substr(0, pos) : std::string{}; -} - -inline std::shared_ptr InitJSONPtr() { - return std::make_shared(); -} - -bool IsUnderMpirun(); - -namespace test { - -[[nodiscard]] inline std::string SanitizeToken(std::string_view token_sv) { - std::string token{token_sv}; - auto is_allowed = [](char c) { - return std::isalnum(static_cast(c)) || c == '_' || c == '-' || c == '.'; - }; - std::ranges::replace(token, ' ', '_'); - for (char &ch : token) { - if (!is_allowed(ch)) { - ch = '_'; - } - } - return token; -} - -class ScopedPerTestEnv { - public: - explicit ScopedPerTestEnv(const std::string &token) - : set_uid_("PPC_TEST_UID", token), set_tmp_("PPC_TEST_TMPDIR", CreateTmpDir(token)) {} - - private: - static std::string CreateTmpDir(const std::string &token) { - namespace fs = std::filesystem; - auto make_rank_suffix = []() -> std::string { - // Derive rank from common MPI env vars without including MPI headers - constexpr std::array kRankVars = {"OMPI_COMM_WORLD_RANK", "PMI_RANK", "PMIX_RANK", - "SLURM_PROCID", "MSMPI_RANK"}; - for (auto name : kRankVars) { - if (auto r = env::get(name); r.has_value() && r.value() >= 0) { - return std::string("_rank_") + std::to_string(r.value()); - } - } - return std::string{}; - }; - const std::string rank_suffix = IsUnderMpirun() ? make_rank_suffix() : std::string{}; - const fs::path tmp = fs::temp_directory_path() / (std::string("ppc_test_") + token + rank_suffix); - std::error_code ec; - fs::create_directories(tmp, ec); - (void)ec; - return tmp.string(); - } - - env::detail::set_scoped_environment_variable set_uid_; - env::detail::set_scoped_environment_variable set_tmp_; -}; - -[[nodiscard]] inline std::string MakeCurrentGTestToken(std::string_view fallback_name) { - const auto *unit = ::testing::UnitTest::GetInstance(); - const auto *info = (unit != nullptr) ? unit->current_test_info() : nullptr; - std::ostringstream os; - if (info != nullptr) { - os << info->test_suite_name() << "." << info->name(); - } else { - os << fallback_name; - } - return SanitizeToken(os.str()); -} - -inline ScopedPerTestEnv MakePerTestEnvForCurrentGTest(std::string_view fallback_name) { - return ScopedPerTestEnv(MakeCurrentGTestToken(fallback_name)); -} - -} // namespace test - -} // namespace ppc::util +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef __GNUG__ +# include +#endif + +#include "nlohmann/json_fwd.hpp" + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4459) +#endif + +#include + +#include +#include +#include + +/// @brief JSON namespace used for settings and config parsing. +using NlohmannJsonParseError = nlohmann::json::parse_error; +/// @brief JSON namespace used for settings and config typing. +using NlohmannJsonTypeError = nlohmann::json::type_error; +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +namespace ppc::util { + +/// @brief Utility class for tracking destructor failure across tests. +/// @details Provides thread-safe methods to set, unset, and check the failure flag. +class DestructorFailureFlag { + public: + /// @brief Marks that a destructor failure has occurred. + static void Set() { + failure_flag.store(true); + } + + /// @brief Clears the destructor failure flag. + static void Unset() { + failure_flag.store(false); + } + + /// @brief Checks if a destructor failure was recorded. + /// @return True if failure occurred, false otherwise. + static bool Get() { + return failure_flag.load(); + } + + private: + inline static std::atomic failure_flag{false}; +}; + +enum class GTestParamIndex : uint8_t { kTaskGetter, kNameTest, kTestParams }; + +std::string GetAbsoluteTaskPath(const std::string &id_path, const std::string &relative_path); +int GetNumThreads(); +int GetNumProc(); +double GetTaskMaxTime(); +double GetPerfMaxTime(); + +template +std::string GetNamespace() { + std::string name = typeid(T).name(); +#ifdef __GNUC__ + int status = 0; + std::unique_ptr demangled{abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), + std::free}; + name = (status == 0) ? demangled.get() : name; +#endif +#ifdef _MSC_VER + const std::string prefixes[] = {"class ", "struct ", "enum ", "union "}; + for (const auto &prefix : prefixes) { + if (name.starts_with(prefix)) { + name = name.substr(prefix.size()); + break; + } + } + name.erase(0, name.find_first_not_of(' ')); +#endif + auto pos = name.rfind("::"); + return (pos != std::string::npos) ? name.substr(0, pos) : std::string{}; +} + +inline std::shared_ptr InitJSONPtr() { + return std::make_shared(); +} + +bool IsUnderMpirun(); + +namespace test { + +[[nodiscard]] inline std::string SanitizeToken(std::string_view token_sv) { + std::string token{token_sv}; + auto is_allowed = [](char c) { + return std::isalnum(static_cast(c)) || c == '_' || c == '-' || c == '.'; + }; + std::ranges::replace(token, ' ', '_'); + for (char &ch : token) { + if (!is_allowed(ch)) { + ch = '_'; + } + } + return token; +} + +class ScopedPerTestEnv { + public: + explicit ScopedPerTestEnv(const std::string &token) + : set_uid_("PPC_TEST_UID", token), set_tmp_("PPC_TEST_TMPDIR", CreateTmpDir(token)) {} + + private: + static std::string CreateTmpDir(const std::string &token) { + namespace fs = std::filesystem; + auto make_rank_suffix = []() -> std::string { + // Derive rank from common MPI env vars without including MPI headers + constexpr std::array kRankVars = {"OMPI_COMM_WORLD_RANK", "PMI_RANK", "PMIX_RANK", + "SLURM_PROCID", "MSMPI_RANK"}; + for (auto name : kRankVars) { + if (auto r = env::get(name); r.has_value() && r.value() >= 0) { + return std::string("_rank_") + std::to_string(r.value()); + } + } + return std::string{}; + }; + const std::string rank_suffix = IsUnderMpirun() ? make_rank_suffix() : std::string{}; + const fs::path tmp = fs::temp_directory_path() / (std::string("ppc_test_") + token + rank_suffix); + std::error_code ec; + fs::create_directories(tmp, ec); + (void)ec; + return tmp.string(); + } + + env::detail::set_scoped_environment_variable set_uid_; + env::detail::set_scoped_environment_variable set_tmp_; +}; + +[[nodiscard]] inline std::string MakeCurrentGTestToken(std::string_view fallback_name) { + const auto *unit = ::testing::UnitTest::GetInstance(); + const auto *info = (unit != nullptr) ? unit->current_test_info() : nullptr; + std::ostringstream os; + if (info != nullptr) { + os << info->test_suite_name() << "." << info->name(); + } else { + os << fallback_name; + } + return SanitizeToken(os.str()); +} + +inline ScopedPerTestEnv MakePerTestEnvForCurrentGTest(std::string_view fallback_name) { + return ScopedPerTestEnv(MakeCurrentGTestToken(fallback_name)); +} + +} // namespace test + +} // namespace ppc::util diff --git a/modules/util/src/func_test_util.cpp b/modules/util/src/func_test_util.cpp index a5dfe0811d..91d6ccacef 100644 --- a/modules/util/src/func_test_util.cpp +++ b/modules/util/src/func_test_util.cpp @@ -1,13 +1,13 @@ -#include - -#include "util/include/perf_test_util.hpp" - -double ppc::util::GetTimeMPI() { - return MPI_Wtime(); -} - -int ppc::util::GetMPIRank() { - int rank = -1; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - return rank; -} +#include + +#include "util/include/perf_test_util.hpp" + +double ppc::util::GetTimeMPI() { + return MPI_Wtime(); +} + +int ppc::util::GetMPIRank() { + int rank = -1; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + return rank; +} diff --git a/modules/util/src/util.cpp b/modules/util/src/util.cpp index 34c065388b..6847f41fe5 100644 --- a/modules/util/src/util.cpp +++ b/modules/util/src/util.cpp @@ -1,67 +1,67 @@ -#include "util/include/util.hpp" - -#include -#include -#include -#include -#include - -namespace { - -std::string GetAbsolutePath(const std::string &relative_path) { - std::filesystem::path path = std::filesystem::path(PPC_PATH_TO_PROJECT) / "tasks" / relative_path; - return path.string(); -} - -} // namespace - -std::string ppc::util::GetAbsoluteTaskPath(const std::string &id_path, const std::string &relative_path) { - std::filesystem::path task_relative = std::filesystem::path(id_path) / "data" / relative_path; - return GetAbsolutePath(task_relative.string()); -} - -int ppc::util::GetNumThreads() { - const auto num_threads = env::get("PPC_NUM_THREADS"); - if (num_threads.has_value()) { - return num_threads.value(); - } - return 1; -} - -int ppc::util::GetNumProc() { - const auto num_proc = env::get("PPC_NUM_PROC"); - if (num_proc.has_value()) { - return num_proc.value(); - } - return 1; -} - -double ppc::util::GetTaskMaxTime() { - const auto val = env::get("PPC_TASK_MAX_TIME"); - if (val.has_value()) { - return val.value(); - } - return 1.0; -} - -double ppc::util::GetPerfMaxTime() { - const auto val = env::get("PPC_PERF_MAX_TIME"); - if (val.has_value()) { - return val.value(); - } - return 10.0; -} - -// List of environment variables that signal the application is running under -// an MPI launcher. The array size must match the number of entries to avoid -// looking up empty environment variable names. -constexpr std::array kMpiEnvVars = { - "OMPI_COMM_WORLD_SIZE", "OMPI_UNIVERSE_SIZE", "PMI_SIZE", "PMI_RANK", "PMI_FD", - "HYDRA_CONTROL_FD", "PMIX_RANK", "SLURM_PROCID", "MSMPI_RANK", "MSMPI_LOCALRANK"}; - -bool ppc::util::IsUnderMpirun() { - return std::ranges::any_of(kMpiEnvVars, [&](const auto &env_var) { - const auto mpi_env = env::get(env_var); - return static_cast(mpi_env.has_value()); - }); -} +#include "util/include/util.hpp" + +#include +#include +#include +#include +#include + +namespace { + +std::string GetAbsolutePath(const std::string &relative_path) { + std::filesystem::path path = std::filesystem::path(PPC_PATH_TO_PROJECT) / "tasks" / relative_path; + return path.string(); +} + +} // namespace + +std::string ppc::util::GetAbsoluteTaskPath(const std::string &id_path, const std::string &relative_path) { + std::filesystem::path task_relative = std::filesystem::path(id_path) / "data" / relative_path; + return GetAbsolutePath(task_relative.string()); +} + +int ppc::util::GetNumThreads() { + const auto num_threads = env::get("PPC_NUM_THREADS"); + if (num_threads.has_value()) { + return num_threads.value(); + } + return 1; +} + +int ppc::util::GetNumProc() { + const auto num_proc = env::get("PPC_NUM_PROC"); + if (num_proc.has_value()) { + return num_proc.value(); + } + return 1; +} + +double ppc::util::GetTaskMaxTime() { + const auto val = env::get("PPC_TASK_MAX_TIME"); + if (val.has_value()) { + return val.value(); + } + return 1.0; +} + +double ppc::util::GetPerfMaxTime() { + const auto val = env::get("PPC_PERF_MAX_TIME"); + if (val.has_value()) { + return val.value(); + } + return 10.0; +} + +// List of environment variables that signal the application is running under +// an MPI launcher. The array size must match the number of entries to avoid +// looking up empty environment variable names. +constexpr std::array kMpiEnvVars = { + "OMPI_COMM_WORLD_SIZE", "OMPI_UNIVERSE_SIZE", "PMI_SIZE", "PMI_RANK", "PMI_FD", + "HYDRA_CONTROL_FD", "PMIX_RANK", "SLURM_PROCID", "MSMPI_RANK", "MSMPI_LOCALRANK"}; + +bool ppc::util::IsUnderMpirun() { + return std::ranges::any_of(kMpiEnvVars, [&](const auto &env_var) { + const auto mpi_env = env::get(env_var); + return static_cast(mpi_env.has_value()); + }); +} diff --git a/modules/util/tests/util.cpp b/modules/util/tests/util.cpp index 6da563a9fa..94d31c5903 100644 --- a/modules/util/tests/util.cpp +++ b/modules/util/tests/util.cpp @@ -1,126 +1,126 @@ -#include "util/include/util.hpp" - -#include - -#include -#include -#include - -#include "omp.h" - -namespace my::nested { -struct Type {}; -} // namespace my::nested - -TEST(UtilTests, ExtractsCorrectNamespace) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, "my::nested"); -} - -TEST(UtilTests, ThreadsControlCheckOpenmpDisabledValgrind) { - const auto num_threads_env_var = env::get("PPC_NUM_THREADS"); - - EXPECT_EQ(ppc::util::GetNumThreads(), omp_get_max_threads()); -} - -namespace test_ns { -struct TypeInNamespace {}; -} // namespace test_ns - -struct PlainType {}; - -TEST(GetNamespaceTest, ReturnsExpectedNamespace) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, "test_ns"); -} - -TEST(GetNamespaceTest, ReturnsEmptyIfNoNamespacePrimitiveType) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, ""); -} - -TEST(GetNamespaceTest, ReturnsEmptyIfNoNamespacePlainStruct) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, ""); -} - -namespace test_ns { -struct Nested {}; -} // namespace test_ns - -TEST(GetNamespaceTest, ReturnsNamespaceCorrectly) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, "test_ns"); -} - -struct NoNamespaceType {}; - -TEST(GetNamespaceTest, NoNamespaceInType) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, ""); -} - -template -struct NotATemplate {}; - -TEST(GetNamespaceTest, NoKeyInPrettyFunction) { - std::string k_ns = ppc::util::GetNamespace>(); - EXPECT_EQ(k_ns, ""); -} - -namespace crazy { -struct VeryLongTypeNameWithOnlyLettersAndUnderscores {}; -} // namespace crazy - -TEST(GetNamespaceTest, NoTerminatorCharactersInPrettyFunction) { - std::string k_ns = ppc::util::GetNamespace(); - EXPECT_EQ(k_ns, "crazy"); -} - -TEST(GetTaskMaxTime, ReturnsDefaultWhenUnset) { - const auto old = env::get("PPC_TASK_MAX_TIME"); - if (old.has_value()) { - env::detail::delete_environment_variable("PPC_TASK_MAX_TIME"); - } - EXPECT_DOUBLE_EQ(ppc::util::GetTaskMaxTime(), 1.0); - if (old.has_value()) { - env::detail::set_environment_variable("PPC_TASK_MAX_TIME", std::to_string(*old)); - } -} - -TEST(GetTaskMaxTime, ReadsFromEnvironment) { - env::detail::set_scoped_environment_variable scoped("PPC_TASK_MAX_TIME", "2.5"); - EXPECT_DOUBLE_EQ(ppc::util::GetTaskMaxTime(), 2.5); -} - -TEST(GetPerfMaxTime, ReturnsDefaultWhenUnset) { - const auto old = env::get("PPC_PERF_MAX_TIME"); - if (old.has_value()) { - env::detail::delete_environment_variable("PPC_PERF_MAX_TIME"); - } - EXPECT_DOUBLE_EQ(ppc::util::GetPerfMaxTime(), 10.0); - if (old.has_value()) { - env::detail::set_environment_variable("PPC_PERF_MAX_TIME", std::to_string(*old)); - } -} - -TEST(GetPerfMaxTime, ReadsFromEnvironment) { - env::detail::set_scoped_environment_variable scoped("PPC_PERF_MAX_TIME", "12.5"); - EXPECT_DOUBLE_EQ(ppc::util::GetPerfMaxTime(), 12.5); -} - -TEST(GetNumProc, ReturnsDefaultWhenUnset) { - const auto old = env::get("PPC_NUM_PROC"); - if (old.has_value()) { - env::detail::delete_environment_variable("PPC_NUM_PROC"); - } - EXPECT_EQ(ppc::util::GetNumProc(), 1); - if (old.has_value()) { - env::detail::set_environment_variable("PPC_NUM_PROC", std::to_string(*old)); - } -} - -TEST(GetNumProc, ReadsFromEnvironment) { - env::detail::set_scoped_environment_variable scoped("PPC_NUM_PROC", "4"); - EXPECT_EQ(ppc::util::GetNumProc(), 4); -} +#include "util/include/util.hpp" + +#include + +#include +#include +#include + +#include "omp.h" + +namespace my::nested { +struct Type {}; +} // namespace my::nested + +TEST(UtilTests, ExtractsCorrectNamespace) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, "my::nested"); +} + +TEST(UtilTests, ThreadsControlCheckOpenmpDisabledValgrind) { + const auto num_threads_env_var = env::get("PPC_NUM_THREADS"); + + EXPECT_EQ(ppc::util::GetNumThreads(), omp_get_max_threads()); +} + +namespace test_ns { +struct TypeInNamespace {}; +} // namespace test_ns + +struct PlainType {}; + +TEST(GetNamespaceTest, ReturnsExpectedNamespace) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, "test_ns"); +} + +TEST(GetNamespaceTest, ReturnsEmptyIfNoNamespacePrimitiveType) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, ""); +} + +TEST(GetNamespaceTest, ReturnsEmptyIfNoNamespacePlainStruct) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, ""); +} + +namespace test_ns { +struct Nested {}; +} // namespace test_ns + +TEST(GetNamespaceTest, ReturnsNamespaceCorrectly) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, "test_ns"); +} + +struct NoNamespaceType {}; + +TEST(GetNamespaceTest, NoNamespaceInType) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, ""); +} + +template +struct NotATemplate {}; + +TEST(GetNamespaceTest, NoKeyInPrettyFunction) { + std::string k_ns = ppc::util::GetNamespace>(); + EXPECT_EQ(k_ns, ""); +} + +namespace crazy { +struct VeryLongTypeNameWithOnlyLettersAndUnderscores {}; +} // namespace crazy + +TEST(GetNamespaceTest, NoTerminatorCharactersInPrettyFunction) { + std::string k_ns = ppc::util::GetNamespace(); + EXPECT_EQ(k_ns, "crazy"); +} + +TEST(GetTaskMaxTime, ReturnsDefaultWhenUnset) { + const auto old = env::get("PPC_TASK_MAX_TIME"); + if (old.has_value()) { + env::detail::delete_environment_variable("PPC_TASK_MAX_TIME"); + } + EXPECT_DOUBLE_EQ(ppc::util::GetTaskMaxTime(), 1.0); + if (old.has_value()) { + env::detail::set_environment_variable("PPC_TASK_MAX_TIME", std::to_string(*old)); + } +} + +TEST(GetTaskMaxTime, ReadsFromEnvironment) { + env::detail::set_scoped_environment_variable scoped("PPC_TASK_MAX_TIME", "2.5"); + EXPECT_DOUBLE_EQ(ppc::util::GetTaskMaxTime(), 2.5); +} + +TEST(GetPerfMaxTime, ReturnsDefaultWhenUnset) { + const auto old = env::get("PPC_PERF_MAX_TIME"); + if (old.has_value()) { + env::detail::delete_environment_variable("PPC_PERF_MAX_TIME"); + } + EXPECT_DOUBLE_EQ(ppc::util::GetPerfMaxTime(), 10.0); + if (old.has_value()) { + env::detail::set_environment_variable("PPC_PERF_MAX_TIME", std::to_string(*old)); + } +} + +TEST(GetPerfMaxTime, ReadsFromEnvironment) { + env::detail::set_scoped_environment_variable scoped("PPC_PERF_MAX_TIME", "12.5"); + EXPECT_DOUBLE_EQ(ppc::util::GetPerfMaxTime(), 12.5); +} + +TEST(GetNumProc, ReturnsDefaultWhenUnset) { + const auto old = env::get("PPC_NUM_PROC"); + if (old.has_value()) { + env::detail::delete_environment_variable("PPC_NUM_PROC"); + } + EXPECT_EQ(ppc::util::GetNumProc(), 1); + if (old.has_value()) { + env::detail::set_environment_variable("PPC_NUM_PROC", std::to_string(*old)); + } +} + +TEST(GetNumProc, ReadsFromEnvironment) { + env::detail::set_scoped_environment_variable scoped("PPC_NUM_PROC", "4"); + EXPECT_EQ(ppc::util::GetNumProc(), 4); +} diff --git a/requirements.txt b/requirements.txt index 2eca2d5402..aa7ce60a3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy==2.3.3 -XlsxWriter==3.2.9 -PyYAML==6.0.3 -pre-commit==4.3.0 +numpy==2.3.3 +XlsxWriter==3.2.9 +PyYAML==6.0.3 +pre-commit==4.3.0 diff --git a/scoreboard/CMakeLists.txt b/scoreboard/CMakeLists.txt index b53310abef..f051eba2d1 100644 --- a/scoreboard/CMakeLists.txt +++ b/scoreboard/CMakeLists.txt @@ -1,13 +1,13 @@ -if(NOT USE_SCOREBOARD) - return() -endif() - -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html) -set(OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/html) - -add_custom_target( - generate_scoreboard ALL - COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/main.py -o - ${OUTPUT_DIR} - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - COMMENT "Running main.py") +if(NOT USE_SCOREBOARD) + return() +endif() + +file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html) +set(OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/html) + +add_custom_target( + generate_scoreboard ALL + COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/main.py -o + ${OUTPUT_DIR} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + COMMENT "Running main.py") diff --git a/scoreboard/README.md b/scoreboard/README.md index b43c87571c..8e13a1d92d 100644 --- a/scoreboard/README.md +++ b/scoreboard/README.md @@ -1,57 +1,57 @@ -# Scoreboard - -HTML scoreboard generator for parallel programming tasks. - -## Usage - -```bash -# Install dependencies -pip install -r requirements.txt - -# Generate scoreboard -python main.py -o output_directory -``` - -Generates `output_directory/index.html` with the scoreboard. - -## Configuration - -- `data/points-info.yml` - Task points, deadlines, penalties -- `data/plagiarism.yml` - Flagged submissions -- `data/deadlines.yml` - Optional display deadlines and day offsets - -## Testing - -```bash -# Install test dependencies -pip install -r tests/requirements.txt - -# Run tests -python -m pytest tests/ -v -``` - -## Output - -HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), C (copying), Total. - -### Deadlines display - -- Threads deadlines are auto-distributed across the Spring window: 1 Feb → 15 May. -- Processes deadlines are auto-distributed across the Autumn window: 15 Oct → 14 Dec. -- Due time is 23:59 MSK on the shown date. -- File `data/deadlines.yml` can shift dates per item by integer day offsets (default 0). Example: - -``` -threads: - seq: 0 # no shift - omp: -2 # 2 days earlier - tbb: 3 # 3 days later - stl: 0 - all: 0 -processes: - task_1: 0 - task_2: 5 - task_3: -1 -``` - -- If you put a non-integer string instead of a number, it is used as-is as the label (e.g., `"10 Nov"`). +# Scoreboard + +HTML scoreboard generator for parallel programming tasks. + +## Usage + +```bash +# Install dependencies +pip install -r requirements.txt + +# Generate scoreboard +python main.py -o output_directory +``` + +Generates `output_directory/index.html` with the scoreboard. + +## Configuration + +- `data/points-info.yml` - Task points, deadlines, penalties +- `data/plagiarism.yml` - Flagged submissions +- `data/deadlines.yml` - Optional display deadlines and day offsets + +## Testing + +```bash +# Install test dependencies +pip install -r tests/requirements.txt + +# Run tests +python -m pytest tests/ -v +``` + +## Output + +HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), C (copying), Total. + +### Deadlines display + +- Threads deadlines are auto-distributed across the Spring window: 1 Feb → 15 May. +- Processes deadlines are auto-distributed across the Autumn window: 15 Oct → 14 Dec. +- Due time is 23:59 MSK on the shown date. +- File `data/deadlines.yml` can shift dates per item by integer day offsets (default 0). Example: + +``` +threads: + seq: 0 # no shift + omp: -2 # 2 days earlier + tbb: 3 # 3 days later + stl: 0 + all: 0 +processes: + task_1: 0 + task_2: 5 + task_3: -1 +``` + +- If you put a non-integer string instead of a number, it is used as-is as the label (e.g., `"10 Nov"`). diff --git a/scoreboard/assign_variant.py b/scoreboard/assign_variant.py index feb021fa3e..a47bba8792 100644 --- a/scoreboard/assign_variant.py +++ b/scoreboard/assign_variant.py @@ -1,142 +1,142 @@ -# file: assign_variant.py -""" -Deterministic variant assignment from Full Name + Group -with the repository name as the ONLY salt. - -Algorithm: - 1) Normalize strings (NFKC, trim, lowercase, map 'ё'->'е', collapse spaces). - 2) Build a key: "surname|name|patronymic|group|repo". - 3) SHA-256(key) -> big integer -> modulo `num_variants`. - -Properties: -- Stable: same inputs → same output. -- Uniform: modulo of a cryptographic hash distributes evenly. -- Note: Without the full group roster, zero collisions cannot be *guaranteed* - (birthday paradox). This is intended for “approximately unique” per-group use. - -Usage: - from assign_variant import assign_variant - v = assign_variant( - surname="Petrov", - name="Pyotr", - patronymic="Petrovich", - group="MEN-201", - repo="learning-process/parallel_programming_course", - num_variants=31, # produces values in 0..30 - ) - print(v) -""" - -from __future__ import annotations - -import hashlib -import re -import unicodedata -from typing import Optional - -__all__ = ["assign_variant", "normalize"] - - -def normalize(s: Optional[str]) -> str: - """ - Normalize a string: - - Unicode NFKC, - - trim, - - lowercase, - - map Cyrillic 'ё' -> 'е' (common normalization in Russian names), - - collapse multiple spaces to a single space. - - None -> '' (empty string). - """ - if not s: - return "" - s = unicodedata.normalize("NFKC", s).strip().lower() - s = s.replace("ё", "е") - s = re.sub(r"\s+", " ", s) - return s - - -def _hash_int(key: str) -> int: - """Return SHA-256(key) as a big integer.""" - return int.from_bytes(hashlib.sha256(key.encode("utf-8")).digest(), "big") - - -def assign_variant( - surname: str, - name: str, - group: str, - repo: str, - patronymic: Optional[str] = "", - num_variants: int = 31, -) -> int: - """ - Deterministically returns a variant index in [0 .. num_variants-1] - based on (surname, name, patronymic, group) and the repository name (repo) - as the sole salt. - - :param surname: Last name - :param name: First name - :param group: Group identifier (e.g., "MEN-201") - :param repo: Repository name used as salt (e.g., "org/repo" or just "repo") - :param patronymic: Middle name / patronymic (optional) - :param num_variants: Total number of variants (> 0). Output range: 0..num_variants-1 - :return: int — the variant index - """ - if not isinstance(num_variants, int) or num_variants < 1: - raise ValueError("num_variants must be a positive integer (> 0)") - if not repo or not isinstance(repo, str): - raise ValueError("repo must be a non-empty string") - - key = "|".join( - ( - normalize(surname), - normalize(name), - normalize(patronymic), - normalize(group), - normalize(repo), - ) - ) - h = _hash_int(key) - return h % num_variants - - -# Minimal self-check when executed directly (no CLI arguments). -if __name__ == "__main__": - - def demo(): - print("Demo: deterministic assignment\n") - - v1 = assign_variant( - surname="Петров", - name="Пётр", - patronymic="Петрович", - group="МЕН-201", - repo="learning-process/parallel_programming_course", - num_variants=31, - ) - # Different casing/spacing/ё→е should not change the result: - v2 = assign_variant( - surname="ПЕТРОВ", - name="петр ", - patronymic="пЕТРОВИЧ", - group=" мен-201 ", - repo="learning-process/parallel_programming_course", - num_variants=31, - ) - assert v1 == v2, "Normalization should make results identical" - - v_other_repo = assign_variant( - surname="Petrov", - name="Pyotr", - patronymic="Petrovich", - group="MEN-201", - repo="learning-process/ppc_2025_fall", # different salt → likely different value - num_variants=31, - ) - - print(f"Variant (repo=A): {v1}") - print(f"Variant (same inputs, normalized): {v2}") - print(f"Variant (repo=B): {v_other_repo}") - print("\nOK: deterministic & normalized.") - - demo() +# file: assign_variant.py +""" +Deterministic variant assignment from Full Name + Group +with the repository name as the ONLY salt. + +Algorithm: + 1) Normalize strings (NFKC, trim, lowercase, map 'ё'->'е', collapse spaces). + 2) Build a key: "surname|name|patronymic|group|repo". + 3) SHA-256(key) -> big integer -> modulo `num_variants`. + +Properties: +- Stable: same inputs → same output. +- Uniform: modulo of a cryptographic hash distributes evenly. +- Note: Without the full group roster, zero collisions cannot be *guaranteed* + (birthday paradox). This is intended for “approximately unique” per-group use. + +Usage: + from assign_variant import assign_variant + v = assign_variant( + surname="Petrov", + name="Pyotr", + patronymic="Petrovich", + group="MEN-201", + repo="learning-process/parallel_programming_course", + num_variants=31, # produces values in 0..30 + ) + print(v) +""" + +from __future__ import annotations + +import hashlib +import re +import unicodedata +from typing import Optional + +__all__ = ["assign_variant", "normalize"] + + +def normalize(s: Optional[str]) -> str: + """ + Normalize a string: + - Unicode NFKC, + - trim, + - lowercase, + - map Cyrillic 'ё' -> 'е' (common normalization in Russian names), + - collapse multiple spaces to a single space. + + None -> '' (empty string). + """ + if not s: + return "" + s = unicodedata.normalize("NFKC", s).strip().lower() + s = s.replace("ё", "е") + s = re.sub(r"\s+", " ", s) + return s + + +def _hash_int(key: str) -> int: + """Return SHA-256(key) as a big integer.""" + return int.from_bytes(hashlib.sha256(key.encode("utf-8")).digest(), "big") + + +def assign_variant( + surname: str, + name: str, + group: str, + repo: str, + patronymic: Optional[str] = "", + num_variants: int = 31, +) -> int: + """ + Deterministically returns a variant index in [0 .. num_variants-1] + based on (surname, name, patronymic, group) and the repository name (repo) + as the sole salt. + + :param surname: Last name + :param name: First name + :param group: Group identifier (e.g., "MEN-201") + :param repo: Repository name used as salt (e.g., "org/repo" or just "repo") + :param patronymic: Middle name / patronymic (optional) + :param num_variants: Total number of variants (> 0). Output range: 0..num_variants-1 + :return: int — the variant index + """ + if not isinstance(num_variants, int) or num_variants < 1: + raise ValueError("num_variants must be a positive integer (> 0)") + if not repo or not isinstance(repo, str): + raise ValueError("repo must be a non-empty string") + + key = "|".join( + ( + normalize(surname), + normalize(name), + normalize(patronymic), + normalize(group), + normalize(repo), + ) + ) + h = _hash_int(key) + return h % num_variants + + +# Minimal self-check when executed directly (no CLI arguments). +if __name__ == "__main__": + + def demo(): + print("Demo: deterministic assignment\n") + + v1 = assign_variant( + surname="Петров", + name="Пётр", + patronymic="Петрович", + group="МЕН-201", + repo="learning-process/parallel_programming_course", + num_variants=31, + ) + # Different casing/spacing/ё→е should not change the result: + v2 = assign_variant( + surname="ПЕТРОВ", + name="петр ", + patronymic="пЕТРОВИЧ", + group=" мен-201 ", + repo="learning-process/parallel_programming_course", + num_variants=31, + ) + assert v1 == v2, "Normalization should make results identical" + + v_other_repo = assign_variant( + surname="Petrov", + name="Pyotr", + patronymic="Petrovich", + group="MEN-201", + repo="learning-process/ppc_2025_fall", # different salt → likely different value + num_variants=31, + ) + + print(f"Variant (repo=A): {v1}") + print(f"Variant (same inputs, normalized): {v2}") + print(f"Variant (repo=B): {v_other_repo}") + print("\nOK: deterministic & normalized.") + + demo() diff --git a/scoreboard/data/copying.yml b/scoreboard/data/copying.yml index f38f6fbadb..7d8af699d2 100644 --- a/scoreboard/data/copying.yml +++ b/scoreboard/data/copying.yml @@ -1,14 +1,14 @@ -threads: - copying: - seq: - - example_threads - omp: [] - tbb: [] - stl: [] - all: [] -processes: - copying: - mpi: - - example_processes - seq: - - example_processes +threads: + copying: + seq: + - example_threads + omp: [] + tbb: [] + stl: [] + all: [] +processes: + copying: + mpi: + - example_processes + seq: + - example_processes diff --git a/scoreboard/data/deadlines.yml b/scoreboard/data/deadlines.yml index 338ac383f7..c2a9bee262 100644 --- a/scoreboard/data/deadlines.yml +++ b/scoreboard/data/deadlines.yml @@ -1,13 +1,13 @@ -threads: - # Put integer to shift auto date by N days (negative allowed). Default 0. - seq: 0 - omp: 0 - tbb: 0 - stl: 0 - all: 0 - -processes: - # Use integer offsets for tasks; default 0. - task_1: 17 - task_2: 17 - task_3: 17 +threads: + # Put integer to shift auto date by N days (negative allowed). Default 0. + seq: 0 + omp: 0 + tbb: 0 + stl: 0 + all: 0 + +processes: + # Use integer offsets for tasks; default 0. + task_1: 17 + task_2: 17 + task_3: 17 diff --git a/scoreboard/data/points-info.yml b/scoreboard/data/points-info.yml index 82c5b774fd..afc6fc9e99 100644 --- a/scoreboard/data/points-info.yml +++ b/scoreboard/data/points-info.yml @@ -1,63 +1,63 @@ -processes: - semester_total: 70 - tasks: - - name: mpi_task_1 - mpi: - - S: 8 - - A: 0 - seq: - - S: 2 - R: 2 - variants_max: 27 - Total: 12 - - name: mpi_task_2 - mpi: - - S: 12 - - A: 5 - seq: - - S: 3 - R: 3 - variants_max: 23 - Total: 23 - - name: mpi_task_3 - mpi: - - S: 16 - - A: 10 - seq: - - S: 4 - R: 5 - variants_max: 32 - Total: 35 -threads: - semester_total: 64 - variants_max: 30 - tasks: - - name: seq - S: 4 - R: 1 - Total: 5 - - name: omp - S: 6 - A: 3 - R: 2 - Total: 11 - - name: tbb - S: 6 - A: 3 - R: 2 - Total: 11 - - name: stl - S: 8 - A: 6 - R: 2 - Total: 16 - - name: all - S: 10 - A: 8 - R: 3 - Total: 21 -efficiency: - num_proc: 4 -copying: - coefficient: 0.5 - note: "Penalty C = -coefficient * S (scoreboard notation)" +processes: + semester_total: 70 + tasks: + - name: mpi_task_1 + mpi: + - S: 8 + - A: 0 + seq: + - S: 2 + R: 2 + variants_max: 27 + Total: 12 + - name: mpi_task_2 + mpi: + - S: 12 + - A: 5 + seq: + - S: 3 + R: 3 + variants_max: 23 + Total: 23 + - name: mpi_task_3 + mpi: + - S: 16 + - A: 10 + seq: + - S: 4 + R: 5 + variants_max: 32 + Total: 35 +threads: + semester_total: 64 + variants_max: 30 + tasks: + - name: seq + S: 4 + R: 1 + Total: 5 + - name: omp + S: 6 + A: 3 + R: 2 + Total: 11 + - name: tbb + S: 6 + A: 3 + R: 2 + Total: 11 + - name: stl + S: 8 + A: 6 + R: 2 + Total: 16 + - name: all + S: 10 + A: 8 + R: 3 + Total: 21 +efficiency: + num_proc: 4 +copying: + coefficient: 0.5 + note: "Penalty C = -coefficient * S (scoreboard notation)" diff --git a/scoreboard/main.py b/scoreboard/main.py index ec187d3a0d..ebcb51da4d 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -1,1505 +1,1505 @@ -from pathlib import Path -from collections import defaultdict, Counter -from datetime import datetime -import csv -import argparse -import subprocess -import yaml -import shutil -from jinja2 import Environment, FileSystemLoader -import logging -import sys - -# Try ZoneInfo from stdlib, then from backports, else fall back to naive time -try: - from zoneinfo import ZoneInfo # type: ignore -except Exception: # pragma: no cover - fallback for Python < 3.9 - try: - from backports.zoneinfo import ZoneInfo # type: ignore - except Exception: # Last resort: define a stub - ZoneInfo = None # type: ignore - -logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") -logger = logging.getLogger(__name__) - -task_types = ["all", "mpi", "omp", "seq", "stl", "tbb"] -# Threads table order: seq first, then omp, tbb, stl, all -task_types_threads = ["seq", "omp", "tbb", "stl", "all"] -task_types_processes = ["mpi", "seq"] - -script_dir = Path(__file__).parent -tasks_dir = script_dir.parent / "tasks" -# Salt is derived from the repository root directory name (dynamic) -REPO_ROOT = script_dir.parent.resolve() -# Salt format: "learning_process/" -REPO_SALT = f"learning_process/{REPO_ROOT.name}" - -# Ensure we can import assign_variant from scoreboard directory -if str(script_dir) not in sys.path: - sys.path.insert(0, str(script_dir)) -try: - from assign_variant import assign_variant -except Exception: - - def assign_variant( - surname: str, - name: str, - group: str, - repo: str, - patronymic: str = "", - num_variants: int = 1, - ) -> int: - return 0 - - -def _now_msk(): - """Return current datetime in MSK if tz support is available, else local time.""" - try: - if ZoneInfo is not None: - return datetime.now(ZoneInfo("Europe/Moscow")) - except Exception: - pass - return datetime.now() - - -def _read_tasks_type(task_dir: Path) -> str | None: - """Read tasks_type from settings.json in the task directory (if present).""" - settings_path = task_dir / "settings.json" - if settings_path.exists(): - try: - import json - - with open(settings_path, "r") as f: - data = json.load(f) - return data.get("tasks_type") # "threads" or "processes" - except Exception as e: - logger.warning("Failed to parse %s: %s", settings_path, e) - return None - - -def discover_tasks(tasks_dir, task_types): - """Discover tasks and their implementation status from the filesystem. - - Returns: - directories: dict[task_name][task_type] -> status - tasks_type_map: dict[task_name] -> "threads" | "processes" | None - """ - directories = defaultdict(dict) - tasks_type_map: dict[str, str | None] = {} - - if tasks_dir.exists() and tasks_dir.is_dir(): - for task_name_dir in tasks_dir.iterdir(): - if task_name_dir.is_dir() and task_name_dir.name not in ["common"]: - task_name = task_name_dir.name - # Save tasks_type from settings.json if present - tasks_type_map[task_name] = _read_tasks_type(task_name_dir) - for task_type in task_types: - task_type_dir = task_name_dir / task_type - if task_type_dir.exists() and task_type_dir.is_dir(): - if task_name.endswith("_disabled"): - clean_task_name = task_name[: -len("_disabled")] - directories[clean_task_name][task_type] = "disabled" - else: - directories[task_name][task_type] = "done" - - return directories, tasks_type_map - - -directories, tasks_type_map = discover_tasks(tasks_dir, task_types) - - -def load_performance_data_threads(perf_stat_file_path: Path) -> dict: - """Load threads performance ratios (T_x/T_seq) from CSV. - Expected header: Task, SEQ, OMP, TBB, STL, ALL - """ - perf_stats: dict[str, dict] = {} - if perf_stat_file_path.exists(): - with open(perf_stat_file_path, "r", newline="") as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - task_name = row.get("Task") - if not task_name: - continue - perf_stats[task_name] = { - "seq": row.get("SEQ", "?"), - "omp": row.get("OMP", "?"), - "tbb": row.get("TBB", "?"), - "stl": row.get("STL", "?"), - "all": row.get("ALL", "?"), - } - else: - logger.warning("Threads perf stats CSV not found at %s", perf_stat_file_path) - return perf_stats - - -def load_performance_data_processes(perf_stat_file_path: Path) -> dict: - """Load processes performance ratios (T_x/T_seq) from CSV. - Expected header: Task, SEQ, MPI - """ - perf_stats: dict[str, dict] = {} - if perf_stat_file_path.exists(): - with open(perf_stat_file_path, "r", newline="") as csvfile: - reader = csv.DictReader(csvfile) - for row in reader: - task_name = row.get("Task") - if not task_name: - continue - perf_stats[task_name] = { - "seq": row.get("SEQ", "?"), - "mpi": row.get("MPI", "?"), - } - else: - logger.warning("Processes perf stats CSV not found at %s", perf_stat_file_path) - return perf_stats - - -def calculate_performance_metrics(perf_val, eff_num_proc, task_type): - """Calculate acceleration and efficiency from performance value.""" - acceleration = "?" - efficiency = "?" - try: - perf_float = float(perf_val) - if perf_float > 0 and not ( - perf_float == float("inf") or perf_float != perf_float - ): - speedup = 1.0 / perf_float - # For sequential code, acceleration and efficiency don't make sense - # as it should be the baseline (speedup = 1.0 by definition) - if task_type == "seq": - acceleration = "1.00" # Sequential is the baseline - efficiency = "N/A" - else: - acceleration = f"{speedup:.2f}" - efficiency = f"{speedup / eff_num_proc * 100:.2f}%" - except (ValueError, TypeError): - pass - return acceleration, efficiency - - -def _find_max_solution(points_info, task_type: str) -> int: - """Resolve max S for a given task type from points-info (threads list).""" - threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) - for t in threads_tasks: - if str(t.get("name")) == task_type: - try: - return int(t.get("S", 0)) - except Exception: - return 0 - if task_type == "mpi": - return 0 - return 0 - - -def _find_report_max(points_info, task_type: str) -> int: - """Resolve max Report (R) points for a given task type from points-info (threads). - Returns 0 if not found. - """ - threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) - for t in threads_tasks: - if str(t.get("name")) == task_type: - try: - return int(t.get("R", 0)) - except Exception: - return 0 - return 0 - - -def _find_performance_max(points_info, task_type: str) -> int: - """Resolve max Performance (A) points for a given task type (threads).""" - threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) - for t in threads_tasks: - if str(t.get("name")) == task_type: - try: - return int(t.get("A", 0)) - except Exception: - return 0 - return 0 - - -def _calc_perf_points_from_efficiency(efficiency_str: str, max_points: int) -> float: - """Calculate Performance points as a real number (x.yy). - - Mapping (eff -> percent of max): - >=50 -> 100; [45,50) -> 90; [42,45) -> 80; [40,42) -> 70; [37,40) -> 60; - [35,37) -> 50; [32,35) -> 40; [30,32) -> 30; [27,30) -> 20; [25,27) -> 10; <25 -> 0 - Returns a float rounded to 2 decimals (no ceil). - """ - if not isinstance(efficiency_str, str) or not efficiency_str.endswith("%"): - return 0.0 - try: - val = float(efficiency_str.rstrip("%")) - except Exception: - return 0.0 - perc = 0.0 - if val >= 50: - perc = 1.0 - elif 45 <= val < 50: - perc = 0.9 - elif 42 <= val < 45: - perc = 0.8 - elif 40 <= val < 42: - perc = 0.7 - elif 37 <= val < 40: - perc = 0.6 - elif 35 <= val < 37: - perc = 0.5 - elif 32 <= val < 35: - perc = 0.4 - elif 30 <= val < 32: - perc = 0.3 - elif 27 <= val < 30: - perc = 0.2 - elif 25 <= val < 27: - perc = 0.1 - else: - perc = 0.0 - pts = max_points * perc if max_points > 0 else 0.0 - # round to 2 decimals (banker's rounding acceptable here) - return round(pts, 2) - - -def _find_process_report_max(points_info, task_number: int) -> int: - """Get max report (R) points for process task by ordinal (1..3). - Looks up processes.tasks with names like 'mpi_task_1'. - """ - proc = (points_info.get("processes", {}) or {}).get("tasks", []) - key = f"mpi_task_{task_number}" - for t in proc: - if str(t.get("name")) == key: - try: - return int(t.get("R", 0)) - except Exception: - return 0 - return 0 - - -def _find_process_points(points_info, task_number: int) -> tuple[int, int, int, int]: - """Return (S_mpi, S_seq, A_mpi, R) maxima for a given process task ordinal (1..3). - Supports both mapping and list-of-maps (per user's YAML example). - """ - proc_tasks = (points_info.get("processes", {}) or {}).get("tasks", []) - key = f"mpi_task_{task_number}" - for t in proc_tasks: - if str(t.get("name")) == key: - - def _extract(obj, k): - if isinstance(obj, dict): - return int(obj.get(k, 0)) - if isinstance(obj, list): - for it in obj: - if isinstance(it, dict) and k in it: - try: - return int(it.get(k, 0)) - except Exception: - return 0 - return 0 - - mpi_blk = t.get("mpi", {}) - seq_blk = t.get("seq", {}) - s_mpi = _extract(mpi_blk, "S") - a_mpi = _extract(mpi_blk, "A") - s_seq = _extract(seq_blk, "S") - try: - r = int(t.get("R", 0)) - except Exception: - r = 0 - return s_mpi, s_seq, a_mpi, r - return 0, 0, 0, 0 - - -def _find_process_variants_max(points_info, task_number: int) -> int: - proc_tasks = (points_info.get("processes", {}) or {}).get("tasks", []) - key = f"mpi_task_{task_number}" - for t in proc_tasks: - if str(t.get("name")) == key: - try: - return int(t.get("variants_max", 1)) - except Exception: - return 1 - return 1 - - -def get_solution_points_and_style(task_type, status, cfg): - """Get solution points and CSS style based on task type and status.""" - max_sol_points = _find_max_solution(cfg, task_type) - sol_points = max_sol_points if status in ("done", "disabled") else 0 - solution_style = "" - if status == "done": - solution_style = "background-color: lightgreen;" - elif status == "disabled": - solution_style = "background-color: #6495ED;" - return sol_points, solution_style - - -def check_plagiarism_and_calculate_penalty( - dir, task_type, sol_points, plagiarism_cfg, cfg, semester: str | None -): - """Check if task is plagiarized and calculate penalty points. - - Supports two config layouts: - - legacy: { plagiarism: { seq: [...], omp: [...], ... } } - - semesters: { threads: {plagiarism: {...}}, processes: {plagiarism: {...}} } - """ - clean_dir = dir[: -len("_disabled")] if dir.endswith("_disabled") else dir - - # Resolve copying/plagiarism mapping based on layout - plag_map = {} - if isinstance(plagiarism_cfg, dict) and ( - "copying" in plagiarism_cfg or "plagiarism" in plagiarism_cfg - ): - plag_map = ( - plagiarism_cfg.get("copying") - if "copying" in plagiarism_cfg - else plagiarism_cfg.get("plagiarism", {}) - ) or {} - elif ( - isinstance(plagiarism_cfg, dict) - and semester - and semester in plagiarism_cfg - and isinstance(plagiarism_cfg[semester], dict) - ): - inner = plagiarism_cfg[semester] - plag_map = ( - inner.get("copying") if "copying" in inner else inner.get("plagiarism", {}) - ) or {} - - flagged_list = set(plag_map.get(task_type, []) or []) - is_cheated = dir in flagged_list or clean_dir in flagged_list - plagiarism_points = 0 - if is_cheated: - # Prefer new key 'copying', fallback to legacy 'plagiarism' - try: - plag_coeff = float( - (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( - "coefficient", 0.0 - ) - ) - except Exception: - plag_coeff = 0.0 - plagiarism_points = -plag_coeff * sol_points - return is_cheated, plagiarism_points - - -def calculate_deadline_penalty(dir, task_type, status, deadlines_cfg, tasks_dir): - """Calculate deadline penalty points based on git commit timestamp.""" - deadline_points = 0 - deadline_str = deadlines_cfg.get(task_type) - if status == "done" and deadline_str: - try: - deadline_dt = datetime.fromisoformat(deadline_str) - git_cmd = [ - "git", - "log", - "-1", - "--format=%ct", - str( - tasks_dir - / (dir + ("_disabled" if status == "disabled" else "")) - / task_type - ), - ] - result = subprocess.run(git_cmd, capture_output=True, text=True) - if result.stdout.strip().isdigit(): - commit_dt = datetime.fromtimestamp(int(result.stdout.strip())) - days_late = (commit_dt - deadline_dt).days - if days_late > 0: - deadline_points = -days_late - except Exception: - pass - return deadline_points - - -def load_configurations(): - """Load points-info (max points, deadlines, efficiency) and plagiarism lists.""" - points_info_path = Path(__file__).parent / "data" / "points-info.yml" - assert points_info_path.exists(), f"Points info file not found: {points_info_path}" - with open(points_info_path, "r") as f: - points_info = yaml.safe_load(f) - assert points_info, "Points info is empty" - - eff_num_proc = int(points_info.get("efficiency", {}).get("num_proc", 1)) - deadlines_cfg = points_info.get("deadlines", {}) - - plagiarism_config_path = Path(__file__).parent / "data" / "copying.yml" - with open(plagiarism_config_path, "r") as file: - plagiarism_cfg = yaml.safe_load(file) - assert plagiarism_cfg, "Plagiarism configuration is empty" - - return points_info, eff_num_proc, deadlines_cfg, plagiarism_cfg - - -def _build_rows_for_task_types( - selected_task_types: list[str], - dir_names: list[str], - perf_stats: dict, - cfg, - eff_num_proc, - deadlines_cfg, -): - """Build rows for the given list of task directories and selected task types.""" - rows = [] - - def _load_student_info_label(dir_name: str): - import json - - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return None - try: - with open(info_path, "r") as f: - data = json.load(f) - s = data.get("student", {}) - last = s.get("last_name", "") - first = s.get("first_name", "") - middle = s.get("middle_name", "") - parts = [p for p in [last, first, middle] if p] - label = "
".join(parts) - return label if label else None - except Exception: - return None - - def _load_student_fields(dir_name: str): - import json - - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return None - try: - with open(info_path, "r") as f: - data = json.load(f) - s = data.get("student", {}) - return ( - str(s.get("last_name", "")), - str(s.get("first_name", "")), - str(s.get("middle_name", "")), - str(s.get("group_number", "")), - ) - except Exception: - return None - - for dir in sorted(dir_names): - row_types = [] - total_count = 0 - for task_type in selected_task_types: - status = directories[dir].get(task_type) - sol_points, solution_style = get_solution_points_and_style( - task_type, status, cfg - ) - - task_points = sol_points - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - dir, task_type, sol_points, plagiarism_cfg, cfg, semester="threads" - ) - task_points += plagiarism_points - - perf_val = perf_stats.get(dir, {}).get(task_type, "?") - - # Calculate acceleration and efficiency if performance data is available - acceleration, efficiency = calculate_performance_metrics( - perf_val, eff_num_proc, task_type - ) - - # Calculate deadline penalty points - deadline_points = calculate_deadline_penalty( - dir, task_type, status, deadlines_cfg, tasks_dir - ) - - # Report presence: award R only if report.md exists inside the task directory - report_present = (tasks_dir / dir / "report.md").exists() - report_points = _find_report_max(cfg, task_type) if report_present else 0 - - # Performance points P for non-seq types, based on efficiency - perf_max = _find_performance_max(cfg, task_type) - if task_type != "seq": - perf_points = _calc_perf_points_from_efficiency(efficiency, perf_max) - perf_points_display = ( - f"{perf_points:.2f}" - if isinstance(efficiency, str) and efficiency.endswith("%") - else "—" - ) - else: - perf_points = 0.0 - perf_points_display = "—" - - row_types.append( - { - "solution_points": sol_points, - "solution_style": solution_style, - "perf": perf_val, - "acceleration": acceleration, - "efficiency": efficiency, - "perf_points": perf_points, - "perf_points_display": perf_points_display, - "deadline_points": deadline_points, - "plagiarised": is_cheated, - "plagiarism_points": plagiarism_points, - "report": report_points, - } - ) - # Total: include Solution + Performance + Report + Copying penalty (exclude Deadline) - total_count += task_points + perf_points + report_points - - label_name = _load_student_info_label(dir) or dir - # Generate variant for threads based on student info and variants_max - threads_vmax = int((cfg.get("threads", {}) or {}).get("variants_max", 1)) - fields = _load_student_fields(dir) - if fields: - last, first, middle, group = fields - try: - v_idx = assign_variant( - last, - first, - group, - REPO_SALT, - patronymic=middle, - num_variants=threads_vmax, - ) - variant = str(v_idx + 1) - except Exception: - variant = "?" - else: - variant = "?" - rows.append( - { - "task": label_name, - "variant": variant, - "types": row_types, - "total": total_count, - } - ) - return rows - - -def main(): - """Main function to generate the scoreboard. - - Now generates three pages in the output dir: - - index.html: simple menu linking to threads.html and processes.html - - threads.html: scoreboard for thread-based tasks - - processes.html: scoreboard for process-based tasks - """ - cfg, eff_num_proc, deadlines_cfg, plagiarism_cfg_local = load_configurations() - - # Make plagiarism config available to rows builder - global plagiarism_cfg - plagiarism_cfg = plagiarism_cfg_local - - env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates")) - - # Load optional display deadlines from deadlines.yml and/or auto-compute evenly - deadlines_display_threads: dict[str, str] | None = None - deadlines_display_processes: dict[str, str] | None = None - try: - dl_file = script_dir / "data" / "deadlines.yml" - if dl_file.exists(): - with open(dl_file, "r") as f: - dl_cfg = yaml.safe_load(f) or {} - deadlines_display_threads = dl_cfg.get("threads") or {} - deadlines_display_processes = dl_cfg.get("processes") or {} - except Exception: - pass - - # Helper: compute evenly spaced dates for current semester (MSK) - from datetime import date, timedelta - import calendar - - def _abbr(day: date) -> str: - return f"{day.day} {calendar.month_abbr[day.month]}" - - def _spring_bounds(today: date) -> tuple[date, date]: - """Return [1 Feb .. 15 May] window for the appropriate year. - If today is past 15 May, use next year's spring; otherwise this year's. - """ - y = today.year - start = date(y, 2, 1) - end = date(y, 5, 15) - if today > end: - y += 1 - start = date(y, 2, 1) - end = date(y, 5, 15) - return start, end - - def _autumn_bounds(today: date) -> tuple[date, date]: - """Return [15 Oct .. 14 Dec] window for the appropriate year. - If today is past 14 Dec, use next year's autumn; otherwise this year's. - """ - y = today.year - start = date(y, 10, 15) - end = date(y, 12, 14) - if today > end: - y += 1 - start = date(y, 10, 15) - end = date(y, 12, 14) - return start, end - - def _evenly_spaced_dates(n: int, start: date, end: date) -> list[date]: - """ - Return n deadlines evenly spaced across the window (start..end], - i.e., strictly after the start date, with the last at end. - Positions are at fractions (i+1)/n of the total span. - """ - if n <= 1: - return [end] - total = (end - start).days - if total < 0: - start, end = end, start - total = -total - res = [] - for i in range(n): - off = int(round((i + 1) * total / n)) - if off <= 0: - off = 1 - if off > total: - off = total - res.append(start + timedelta(days=off)) - return res - - def _compute_display_deadlines_threads(order: list[str]) -> dict[str, date]: - # Threads = Spring semester (prefer MSK; fallback to local time) - try: - today = _now_msk().date() - except Exception: - today = datetime.now().date() - s, e = _spring_bounds(today) - ds = _evenly_spaced_dates(len(order), s, e) - return {t: d for t, d in zip(order, ds)} - - def _compute_display_deadlines_processes(n_items: int) -> list[date]: - # Processes = Autumn semester (prefer MSK; fallback to local time) - try: - today = _now_msk().date() - except Exception: - today = datetime.now().date() - s, e = _autumn_bounds(today) - ds = _evenly_spaced_dates(n_items, s, e) - return ds - - # Locate perf CSVs from CI or local runs (threads and processes) - candidates_threads = [ - script_dir.parent - / "build" - / "perf_stat_dir" - / "threads_task_run_perf_table.csv", - script_dir.parent / "perf_stat_dir" / "threads_task_run_perf_table.csv", - # Fallback to old single-file name - script_dir.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv", - script_dir.parent / "perf_stat_dir" / "task_run_perf_table.csv", - ] - threads_csv = next( - (p for p in candidates_threads if p.exists()), candidates_threads[0] - ) - - candidates_processes = [ - script_dir.parent - / "build" - / "perf_stat_dir" - / "processes_task_run_perf_table.csv", - script_dir.parent / "perf_stat_dir" / "processes_task_run_perf_table.csv", - ] - processes_csv = next( - (p for p in candidates_processes if p.exists()), candidates_processes[0] - ) - - # Read and merge performance statistics CSVs (keys = CSV Task column) - perf_stats_threads = load_performance_data_threads(threads_csv) - perf_stats_processes = load_performance_data_processes(processes_csv) - perf_stats_raw: dict[str, dict] = {} - perf_stats_raw.update(perf_stats_threads) - for k, v in perf_stats_processes.items(): - perf_stats_raw[k] = {**perf_stats_raw.get(k, {}), **v} - - # Partition tasks by tasks_type from settings.json - threads_task_dirs = [ - name for name, ttype in tasks_type_map.items() if ttype == "threads" - ] - processes_task_dirs = [ - name for name, ttype in tasks_type_map.items() if ttype == "processes" - ] - - # Fallback: if settings.json is missing, guess by directory name heuristic - for name in directories.keys(): - if name not in tasks_type_map or tasks_type_map[name] is None: - if "threads" in name: - threads_task_dirs.append(name) - elif "processes" in name: - processes_task_dirs.append(name) - - # Resolve performance stats keys (from CSV Task names) to actual task directories - import re as _re - - def _family_from_name(name: str) -> tuple[str, int]: - # Infer family from CSV Task value, using only structural markers - # threads -> ("threads", 0); processes[_N] -> ("processes", N|1) - if "threads" in name: - return "threads", 0 - if "processes" in name: - m = _re.search(r"processes(?:_(\d+))?", name) - if m: - try: - idx = int(m.group(1)) if m.group(1) else 1 - except Exception: - idx = 1 - else: - idx = 1 - return "processes", idx - # Fallback: treat as threads family - return "threads", 0 - - def _family_from_dir(dir_name: str) -> tuple[str, int]: - # Prefer explicit tasks_type from settings.json and task_number from info.json - kind_guess = tasks_type_map.get(dir_name) or ( - "threads" if "threads" in dir_name else "processes" - ) - idx = 0 - if kind_guess == "processes": - # Lightweight reader to avoid dependency on later-scoped helpers - try: - import json as _json - - info_path = tasks_dir / dir_name / "info.json" - if info_path.exists(): - with open(info_path, "r") as _f: - data = _json.load(_f) - s = data.get("student", {}) if isinstance(data, dict) else {} - try: - idx = int(str(s.get("task_number", "0"))) - except Exception: - idx = 0 - except Exception: - idx = 0 - return kind_guess, idx - - # Build map family -> list of dir names in this repo - family_to_dirs: dict[tuple[str, int], list[str]] = {} - for d in sorted(directories.keys()): - fam = _family_from_dir(d) - family_to_dirs.setdefault(fam, []).append(d) - - # Aggregate perf by family (CSV keys may not match dir names) - perf_by_family: dict[tuple[str, int], dict] = {} - for key, vals in perf_stats_raw.items(): - fam = _family_from_name(key) - perf_by_family[fam] = {**perf_by_family.get(fam, {}), **vals} - - # Project family perf onto actual directories (prefer exact one per family) - perf_stats: dict[str, dict] = {} - for fam, vals in perf_by_family.items(): - dirs_for_family = family_to_dirs.get(fam, []) - if not dirs_for_family: - continue - # Assign same perf to all dirs in the family (usually one) - for d in dirs_for_family: - perf_stats[d] = vals.copy() - - # Build rows for each page - threads_rows = _build_rows_for_task_types( - task_types_threads, - threads_task_dirs, - perf_stats, - cfg, - eff_num_proc, - deadlines_cfg, - ) - # Processes page: build 3 tasks as columns for a single student - import json - - def _load_student_info(dir_name: str): - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return None - try: - with open(info_path, "r") as f: - data = json.load(f) - return data.get("student", {}) - except Exception as e: - logger.warning("Failed to parse %s: %s", info_path, e) - return None - - def _identity_key(student: dict) -> str: - return "|".join( - [ - str(student.get("first_name", "")), - str(student.get("last_name", "")), - str(student.get("middle_name", "")), - str(student.get("group_number", "")), - ] - ) - - def _build_cell(dir_name: str, ttype: str, perf_map: dict[str, dict]): - status = directories[dir_name].get(ttype) - sol_points, solution_style = get_solution_points_and_style(ttype, status, cfg) - task_points = sol_points - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - dir_name, ttype, sol_points, plagiarism_cfg, cfg, semester="processes" - ) - task_points += plagiarism_points - perf_val = perf_map.get(dir_name, {}).get(ttype, "?") - acceleration, efficiency = calculate_performance_metrics( - perf_val, eff_num_proc, ttype - ) - deadline_points = calculate_deadline_penalty( - dir_name, ttype, status, deadlines_cfg, tasks_dir - ) - return ( - { - "solution_points": sol_points, - "solution_style": solution_style, - "perf": perf_val, - "acceleration": acceleration, - "efficiency": efficiency, - "deadline_points": deadline_points, - "plagiarised": is_cheated, - "plagiarism_points": plagiarism_points, - }, - task_points, - ) - - proc_infos = [] - for d in processes_task_dirs: - s = _load_student_info(d) - if s: - proc_infos.append((d, s)) - - # Choose target identity: prefer example_processes; otherwise most common - target_identity = None - if "example_processes" in processes_task_dirs: - s0 = _load_student_info("example_processes") - if s0: - target_identity = _identity_key(s0) - if not target_identity and proc_infos: - cnt = Counter(_identity_key(s) for _, s in proc_infos) - target_identity = cnt.most_common(1)[0][0] - - # Map task_number -> (dir_name, display_label) - num_to_dir: dict[int, tuple[str, str]] = {} - if target_identity: - for d, s in proc_infos: - if _identity_key(s) == target_identity: - try: - tn = int(str(s.get("task_number", "0"))) - except Exception: - continue - display = d - num_to_dir[tn] = (d, display) - - expected_numbers = [1, 2, 3] - proc_group_headers = [] - proc_top_headers = [] - proc_groups = [] - proc_r_values = [] - total_points_sum = 0 - for n in expected_numbers: - entry = num_to_dir.get(n) - if entry: - d, display_label = entry - # Top header shows task name (directory) - proc_top_headers.append(f"task-{n}") - # Second header row shows only mpi/seq - proc_group_headers.append({"type": "mpi"}) - proc_group_headers.append({"type": "seq"}) - group_cells = [] - for ttype in ["mpi", "seq"]: - cell, _ = _build_cell(d, ttype, perf_stats) - group_cells.append(cell) - # Override displayed points for processes: S under MPI/SEQ from points-info; A points under MPI only - s_mpi, s_seq, a_mpi, r_max = _find_process_points(cfg, n) - has_mpi = bool(directories[d].get("mpi")) - has_seq = bool(directories[d].get("seq")) - report_present = (tasks_dir / d / "report.md").exists() - group_cells[0]["solution_points"] = s_mpi if has_mpi else 0 - group_cells[1]["solution_points"] = s_seq if has_seq else 0 - # Calculate Performance P for MPI based on efficiency and max a_mpi - mpi_eff = group_cells[0].get("efficiency", "N/A") - perf_points_mpi = ( - _calc_perf_points_from_efficiency(mpi_eff, a_mpi) - if (has_mpi and has_seq) - else 0 - ) - # Display '—' instead of 0 when metrics are absent (efficiency not a percent) - if isinstance(mpi_eff, str) and mpi_eff.endswith("%"): - perf_points_mpi_display = perf_points_mpi - else: - perf_points_mpi_display = "—" - group_cells[0]["perf_points"] = perf_points_mpi - group_cells[0]["perf_points_display"] = perf_points_mpi_display - group_cells[1]["perf_points"] = 0 - # Recompute plagiarism penalty based on processes S maxima - try: - plag_coeff = float( - (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( - "coefficient", 0.0 - ) - ) - except Exception: - plag_coeff = 0.0 - p_mpi = ( - -plag_coeff * s_mpi - if (has_mpi and group_cells[0].get("plagiarised")) - else 0 - ) - p_seq = ( - -plag_coeff * s_seq - if (has_seq and group_cells[1].get("plagiarised")) - else 0 - ) - group_cells[0]["plagiarism_points"] = p_mpi - group_cells[1]["plagiarism_points"] = p_seq - proc_groups.extend(group_cells) - # Sum points S + P + R + C (penalty negative) with gating - s_inc = (s_mpi if has_mpi else 0) + (s_seq if has_seq else 0) - p_inc = perf_points_mpi - r_inc = r_max if report_present else 0 - total_points_sum += s_inc + p_inc + r_inc + p_mpi + p_seq - proc_r_values.append(r_inc) - else: - proc_group_headers.append({"type": "mpi", "task_label": f"task_{n}"}) - proc_group_headers.append({"type": "seq", "task_label": f"task_{n}"}) - proc_top_headers.append(f"task-{n}") - for _ in ["mpi", "seq"]: - proc_groups.append( - { - "solution_points": "?", - "solution_style": "", - "perf": "?", - "acceleration": "?", - "efficiency": "?", - "deadline_points": "?", - "plagiarised": False, - "plagiarism_points": "?", - } - ) - # Do not affect total; sum only existing tasks; report points 0 - proc_r_values.append(0) - - # Label for processes row: show Last, First, Middle on separate lines; no group number - row_label = "processes" - row_variant = "?" - if target_identity: - parts = target_identity.split("|") - if len(parts) >= 3: - first, last, middle = parts[0], parts[1], parts[2] - name_parts = [p for p in [last, first, middle] if p] - name = "
".join(name_parts) - row_label = name or row_label - - # Build three variants (one per task) based on student identity - row_variant = "?" - if target_identity: - parts = target_identity.split("|") - if len(parts) >= 4: - first, last, middle, group = parts[0], parts[1], parts[2], parts[3] - variants_render = [] - for n in expected_numbers: - vmax = _find_process_variants_max(cfg, n) - try: - v_idx = assign_variant( - surname=last, - name=first, - patronymic=middle, - group=group, - repo=f"{REPO_SALT}/processes/task-{n}", - num_variants=vmax, - ) - variants_render.append(str(v_idx + 1)) - except Exception: - variants_render.append("?") - row_variant = "
".join(variants_render) - processes_rows = [ - { - "task": row_label, - "variant": row_variant, - "groups": proc_groups, - "r_values": proc_r_values, - "r_total": sum(proc_r_values), - "total": total_points_sum, - } - ] - - # Rebuild threads rows with resolved perf stats - threads_rows = _build_rows_for_task_types( - task_types_threads, - threads_task_dirs, - perf_stats, - cfg, - eff_num_proc, - deadlines_cfg, - ) - - parser = argparse.ArgumentParser(description="Generate HTML scoreboard.") - parser.add_argument( - "-o", "--output", type=str, required=True, help="Output directory path" - ) - args = parser.parse_args() - - output_path = Path(args.output) - output_path.mkdir(parents=True, exist_ok=True) - - # Render tables - generated_msk = _now_msk().strftime("%Y-%m-%d %H:%M:%S") - table_template = env.get_template("index.html.j2") - threads_vmax = int((cfg.get("threads", {}) or {}).get("variants_max", 1)) - # Build display deadlines (use file values if present, fill missing with auto) - threads_order = task_types_threads - auto_threads_dl = _compute_display_deadlines_threads(threads_order) - dl_threads_out = {} - for t in threads_order: - base_date = auto_threads_dl.get(t) - # Default = 0 shift - shift_days = 0 - label = None - if deadlines_display_threads and t in deadlines_display_threads: - val = deadlines_display_threads.get(t) - if isinstance(val, int): - shift_days = val - else: - # try int-like string, else treat as explicit label - try: - shift_days = int(str(val).strip()) - except Exception: - label = str(val) - if label is None and isinstance(base_date, date): - vdate = base_date + timedelta(days=shift_days) - dl_threads_out[t] = _abbr(vdate) - else: - dl_threads_out[t] = label or "" - - threads_html = table_template.render( - task_types=task_types_threads, - rows=threads_rows, - generated_msk=generated_msk, - repo_salt=REPO_SALT, - threads_variants_max=threads_vmax, - deadlines_threads=dl_threads_out, - ) - # Use dedicated template for processes table layout - processes_template = env.get_template("processes.html.j2") - proc_vmaxes = [_find_process_variants_max(cfg, n) for n in expected_numbers] - # Build display deadlines for processes in task order (1..3) - auto_proc_dl = _compute_display_deadlines_processes(len(expected_numbers)) - proc_deadlines_list: list[str] = [] - for i, n in enumerate(expected_numbers): - base_date = auto_proc_dl[i] - shift_days = 0 - label = None - if deadlines_display_processes: - key = f"task_{n}" - val = deadlines_display_processes.get( - key - ) or deadlines_display_processes.get(f"mpi_task_{n}") - if val is not None: - if isinstance(val, int): - shift_days = val - else: - try: - shift_days = int(str(val).strip()) - except Exception: - label = str(val) - if label is None and isinstance(base_date, date): - vdate = base_date + timedelta(days=shift_days) - proc_deadlines_list.append(_abbr(vdate)) - else: - proc_deadlines_list.append(label or "") - - processes_html = processes_template.render( - top_task_names=proc_top_headers, - group_headers=proc_group_headers, - rows=processes_rows, - generated_msk=generated_msk, - repo_salt=REPO_SALT, - processes_variants_max=proc_vmaxes, - deadlines_processes=proc_deadlines_list, - ) - - with open(output_path / "threads.html", "w") as f: - f.write(threads_html) - with open(output_path / "processes.html", "w") as f: - f.write(processes_html) - - # ——— Build per-group pages and group menus ———————————————————————— - def _load_group_number(dir_name: str): - import json - - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return None - try: - with open(info_path, "r") as f: - data = json.load(f) - return data.get("student", {}).get("group_number") - except Exception: - return None - - def _slugify(text: str) -> str: - return "".join( - ch if ch.isalnum() or ch in ("-", "_") else "_" for ch in str(text) - ) - - # Collect groups - threads_groups = sorted( - set(filter(None, (_load_group_number(d) for d in threads_task_dirs))) - ) - processes_groups = sorted( - set(filter(None, (_load_group_number(d) for d in processes_task_dirs))) - ) - - # Threads: per-group pages - threads_groups_menu = [] - for g in threads_groups: - slug = _slugify(g) - out_file = output_path / f"threads_{slug}.html" - filtered_dirs = [d for d in threads_task_dirs if _load_group_number(d) == g] - rows_g = _build_rows_for_task_types( - task_types_threads, - filtered_dirs, - perf_stats, - cfg, - eff_num_proc, - deadlines_cfg, - ) - # Rebuild deadline labels for this page - auto_threads_dl_g = _compute_display_deadlines_threads(threads_order) - dl_threads_out_g = {} - for t in threads_order: - base_date = auto_threads_dl_g.get(t) - shift_days = 0 - label = None - if deadlines_display_threads and t in deadlines_display_threads: - val = deadlines_display_threads.get(t) - if isinstance(val, int): - shift_days = val - else: - try: - shift_days = int(str(val).strip()) - except Exception: - label = str(val) - if label is None and isinstance(base_date, date): - vdate = base_date + timedelta(days=shift_days) - dl_threads_out_g[t] = _abbr(vdate) - else: - dl_threads_out_g[t] = label or "" - - html_g = table_template.render( - task_types=task_types_threads, - rows=rows_g, - generated_msk=generated_msk, - repo_salt=REPO_SALT, - threads_variants_max=threads_vmax, - deadlines_threads=dl_threads_out_g, - ) - with open(out_file, "w") as f: - f.write(html_g) - threads_groups_menu.append({"href": out_file.name, "title": g}) - - # Processes: per-group pages - processes_groups_menu = [] - for g in processes_groups: - slug = _slugify(g) - out_file = output_path / f"processes_{slug}.html" - filtered_dirs = [d for d in processes_task_dirs if _load_group_number(d) == g] - - # Reuse earlier logic but limited to filtered_dirs - import json as _json - - def _load_student_info_group(dir_name: str): - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return None - try: - with open(info_path, "r") as f: - data = _json.load(f) - return data.get("student", {}) - except Exception: - return None - - def _id_key(stud: dict) -> str: - return "|".join( - [ - str(stud.get("first_name", "")), - str(stud.get("last_name", "")), - str(stud.get("middle_name", "")), - str(stud.get("group_number", "")), - ] - ) - - proc_infos_g = [] - for d in filtered_dirs: - s = _load_student_info_group(d) - if s: - proc_infos_g.append((d, s)) - - target_identity_g = None - if "example_processes" in filtered_dirs: - s0 = _load_student_info_group("example_processes") - if s0 and s0.get("group_number") == g: - target_identity_g = _id_key(s0) - if not target_identity_g and proc_infos_g: - cnt = Counter(_id_key(s) for _, s in proc_infos_g) - target_identity_g = cnt.most_common(1)[0][0] - - num_to_dir_g: dict[int, tuple[str, str]] = {} - if target_identity_g: - for d, s in proc_infos_g: - if _id_key(s) == target_identity_g: - try: - tn = int(str(s.get("task_number", "0"))) - except Exception: - continue - num_to_dir_g[tn] = (d, d) - - proc_top_headers_g = [] - proc_group_headers_g = [] - proc_groups_g = [] - proc_r_values_g = [] - total_points_sum_g = 0 - for n in [1, 2, 3]: - entry = num_to_dir_g.get(n) - if entry: - d, display_label = entry - proc_top_headers_g.append(f"task-{n}") - for ttype in ["mpi", "seq"]: - proc_group_headers_g.append({"type": ttype}) - # build cell - status = directories[d].get(ttype) - sol_points, solution_style = get_solution_points_and_style( - ttype, status, cfg - ) - task_points = sol_points - is_cheated, plagiarism_points = ( - check_plagiarism_and_calculate_penalty( - d, - ttype, - sol_points, - plagiarism_cfg, - cfg, - semester="processes", - ) - ) - task_points += plagiarism_points - perf_val = perf_stats.get(d, {}).get(ttype, "?") - acceleration, efficiency = calculate_performance_metrics( - perf_val, eff_num_proc, ttype - ) - deadline_points = calculate_deadline_penalty( - d, ttype, status, deadlines_cfg, tasks_dir - ) - proc_groups_g.append( - { - "solution_points": sol_points, - "solution_style": solution_style, - "perf": perf_val, - "acceleration": acceleration, - "efficiency": efficiency, - "deadline_points": deadline_points, - "plagiarised": is_cheated, - "plagiarism_points": plagiarism_points, - } - ) - # Override displayed points to processes maxima and recompute P - s_mpi_g, s_seq_g, a_max_g, r_max_g = _find_process_points(cfg, n) - has_mpi_g = bool(directories[d].get("mpi")) - has_seq_g = bool(directories[d].get("seq")) - report_present_g = (tasks_dir / d / "report.md").exists() - base_idx = len(proc_groups_g) - 2 - if base_idx >= 0: - proc_groups_g[base_idx]["solution_points"] = ( - s_mpi_g if has_mpi_g else 0 - ) - proc_groups_g[base_idx + 1]["solution_points"] = ( - s_seq_g if has_seq_g else 0 - ) - # Performance for MPI cell - mpi_eff_g = proc_groups_g[base_idx].get("efficiency", "N/A") - perf_points_mpi_g = ( - _calc_perf_points_from_efficiency(mpi_eff_g, a_max_g) - if (has_mpi_g and has_seq_g) - else 0 - ) - if isinstance(mpi_eff_g, str) and mpi_eff_g.endswith("%"): - perf_points_mpi_display_g = perf_points_mpi_g - else: - perf_points_mpi_display_g = "—" - proc_groups_g[base_idx]["perf_points"] = perf_points_mpi_g - proc_groups_g[base_idx]["perf_points_display"] = ( - perf_points_mpi_display_g - ) - proc_groups_g[base_idx + 1]["perf_points"] = 0 - try: - plag_coeff_g = float( - (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( - "coefficient", 0.0 - ) - ) - except Exception: - plag_coeff_g = 0.0 - p_mpi_g = ( - -plag_coeff_g * s_mpi_g - if (has_mpi_g and proc_groups_g[base_idx].get("plagiarised")) - else 0 - ) - p_seq_g = ( - -plag_coeff_g * s_seq_g - if ( - has_seq_g and proc_groups_g[base_idx + 1].get("plagiarised") - ) - else 0 - ) - proc_groups_g[base_idx]["plagiarism_points"] = p_mpi_g - proc_groups_g[base_idx + 1]["plagiarism_points"] = p_seq_g - - # Sum points by processes S + P + R (and C penalties) - s_inc_g = (s_mpi_g if has_mpi_g else 0) + (s_seq_g if has_seq_g else 0) - r_inc_g = r_max_g if report_present_g else 0 - total_points_sum_g += ( - s_inc_g + perf_points_mpi_g + r_inc_g + p_mpi_g + p_seq_g - ) - proc_r_values_g.append(r_inc_g) - else: - proc_top_headers_g.append(f"task-{n}") - for ttype in ["mpi", "seq"]: - proc_group_headers_g.append({"type": ttype}) - proc_groups_g.append( - { - "solution_points": "?", - "solution_style": "", - "perf": "?", - "acceleration": "?", - "efficiency": "?", - "deadline_points": "?", - "plagiarised": False, - "plagiarism_points": "?", - } - ) - # Missing task: do not affect total; sum only existing; report=0 - proc_r_values_g.append(0) - - # Row label for group page: name without group (three lines max) - row_label_g = f"group {g}" - if target_identity_g: - parts = target_identity_g.split("|") - if len(parts) >= 3: - first, last, middle = parts[0], parts[1], parts[2] - nm_parts = [p for p in [last, first, middle] if p] - nm = "
".join(nm_parts) - row_label_g = nm or row_label_g - - # Build three variants (one per task) based on student identity - row_variant_g = "?" - if target_identity_g: - parts = target_identity_g.split("|") - if len(parts) >= 4: - first, last, middle, group = parts[0], parts[1], parts[2], parts[3] - vrender = [] - for n in [1, 2, 3]: - vmax = _find_process_variants_max(cfg, n) - try: - v_idx = assign_variant( - surname=last, - name=first, - patronymic=middle, - group=group, - repo=f"{REPO_SALT}/processes/task-{n}", - num_variants=vmax, - ) - vrender.append(str(v_idx + 1)) - except Exception: - vrender.append("?") - row_variant_g = "
".join(vrender) - - rows_g = [ - { - "task": row_label_g, - "variant": row_variant_g, - "groups": proc_groups_g, - "r_values": proc_r_values_g, - "r_total": sum(proc_r_values_g), - "total": total_points_sum_g, - } - ] - - proc_vmaxes_g = [_find_process_variants_max(cfg, n) for n in [1, 2, 3]] - # Build display deadlines for processes group page - auto_proc_dl_g = _compute_display_deadlines_processes(3) - proc_deadlines_list_g: list[str] = [] - for i, n in enumerate([1, 2, 3]): - base_date = auto_proc_dl_g[i] - shift_days = 0 - label = None - if deadlines_display_processes: - key = f"task_{n}" - val = deadlines_display_processes.get( - key - ) or deadlines_display_processes.get(f"mpi_task_{n}") - if val is not None: - if isinstance(val, int): - shift_days = val - else: - try: - shift_days = int(str(val).strip()) - except Exception: - label = str(val) - if label is None and isinstance(base_date, date): - vdate = base_date + timedelta(days=shift_days) - proc_deadlines_list_g.append(_abbr(vdate)) - else: - proc_deadlines_list_g.append(label or "") - - html_g = processes_template.render( - top_task_names=proc_top_headers_g, - group_headers=proc_group_headers_g, - rows=rows_g, - generated_msk=generated_msk, - repo_salt=REPO_SALT, - processes_variants_max=proc_vmaxes_g, - deadlines_processes=proc_deadlines_list_g, - ) - with open(out_file, "w") as f: - f.write(html_g) - processes_groups_menu.append({"href": out_file.name, "title": g}) - - # Render index menu page - try: - menu_template = env.get_template("menu_index.html.j2") - except Exception: - # Simple fallback menu if template missing - menu_html_content = ( - 'Scoreboard' - "

Scoreboard

" - "" - ) - else: - menu_html_content = menu_template.render( - pages=[ - {"href": "threads.html", "title": "Threads Scoreboard"}, - {"href": "processes.html", "title": "Processes Scoreboard"}, - ], - groups_threads=threads_groups_menu, - groups_processes=processes_groups_menu, - generated_msk=generated_msk, - ) - - with open(output_path / "index.html", "w") as f: - f.write(menu_html_content) - - # Copy static assets - static_src = script_dir / "static" - static_dst = output_path / "static" - if static_src.exists(): - if static_dst.exists(): - shutil.rmtree(static_dst) - shutil.copytree(static_src, static_dst) - logger.info("Static directory copied to %s", static_dst) - else: - logger.warning("Static directory not found at %s", static_src) - - logger.info( - "HTML pages generated at %s (index.html, threads.html, processes.html)", - output_path, - ) - - -if __name__ == "__main__": - main() +from pathlib import Path +from collections import defaultdict, Counter +from datetime import datetime +import csv +import argparse +import subprocess +import yaml +import shutil +from jinja2 import Environment, FileSystemLoader +import logging +import sys + +# Try ZoneInfo from stdlib, then from backports, else fall back to naive time +try: + from zoneinfo import ZoneInfo # type: ignore +except Exception: # pragma: no cover - fallback for Python < 3.9 + try: + from backports.zoneinfo import ZoneInfo # type: ignore + except Exception: # Last resort: define a stub + ZoneInfo = None # type: ignore + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +task_types = ["all", "mpi", "omp", "seq", "stl", "tbb"] +# Threads table order: seq first, then omp, tbb, stl, all +task_types_threads = ["seq", "omp", "tbb", "stl", "all"] +task_types_processes = ["mpi", "seq"] + +script_dir = Path(__file__).parent +tasks_dir = script_dir.parent / "tasks" +# Salt is derived from the repository root directory name (dynamic) +REPO_ROOT = script_dir.parent.resolve() +# Salt format: "learning_process/" +REPO_SALT = f"learning_process/{REPO_ROOT.name}" + +# Ensure we can import assign_variant from scoreboard directory +if str(script_dir) not in sys.path: + sys.path.insert(0, str(script_dir)) +try: + from assign_variant import assign_variant +except Exception: + + def assign_variant( + surname: str, + name: str, + group: str, + repo: str, + patronymic: str = "", + num_variants: int = 1, + ) -> int: + return 0 + + +def _now_msk(): + """Return current datetime in MSK if tz support is available, else local time.""" + try: + if ZoneInfo is not None: + return datetime.now(ZoneInfo("Europe/Moscow")) + except Exception: + pass + return datetime.now() + + +def _read_tasks_type(task_dir: Path) -> str | None: + """Read tasks_type from settings.json in the task directory (if present).""" + settings_path = task_dir / "settings.json" + if settings_path.exists(): + try: + import json + + with open(settings_path, "r") as f: + data = json.load(f) + return data.get("tasks_type") # "threads" or "processes" + except Exception as e: + logger.warning("Failed to parse %s: %s", settings_path, e) + return None + + +def discover_tasks(tasks_dir, task_types): + """Discover tasks and their implementation status from the filesystem. + + Returns: + directories: dict[task_name][task_type] -> status + tasks_type_map: dict[task_name] -> "threads" | "processes" | None + """ + directories = defaultdict(dict) + tasks_type_map: dict[str, str | None] = {} + + if tasks_dir.exists() and tasks_dir.is_dir(): + for task_name_dir in tasks_dir.iterdir(): + if task_name_dir.is_dir() and task_name_dir.name not in ["common"]: + task_name = task_name_dir.name + # Save tasks_type from settings.json if present + tasks_type_map[task_name] = _read_tasks_type(task_name_dir) + for task_type in task_types: + task_type_dir = task_name_dir / task_type + if task_type_dir.exists() and task_type_dir.is_dir(): + if task_name.endswith("_disabled"): + clean_task_name = task_name[: -len("_disabled")] + directories[clean_task_name][task_type] = "disabled" + else: + directories[task_name][task_type] = "done" + + return directories, tasks_type_map + + +directories, tasks_type_map = discover_tasks(tasks_dir, task_types) + + +def load_performance_data_threads(perf_stat_file_path: Path) -> dict: + """Load threads performance ratios (T_x/T_seq) from CSV. + Expected header: Task, SEQ, OMP, TBB, STL, ALL + """ + perf_stats: dict[str, dict] = {} + if perf_stat_file_path.exists(): + with open(perf_stat_file_path, "r", newline="") as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + task_name = row.get("Task") + if not task_name: + continue + perf_stats[task_name] = { + "seq": row.get("SEQ", "?"), + "omp": row.get("OMP", "?"), + "tbb": row.get("TBB", "?"), + "stl": row.get("STL", "?"), + "all": row.get("ALL", "?"), + } + else: + logger.warning("Threads perf stats CSV not found at %s", perf_stat_file_path) + return perf_stats + + +def load_performance_data_processes(perf_stat_file_path: Path) -> dict: + """Load processes performance ratios (T_x/T_seq) from CSV. + Expected header: Task, SEQ, MPI + """ + perf_stats: dict[str, dict] = {} + if perf_stat_file_path.exists(): + with open(perf_stat_file_path, "r", newline="") as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + task_name = row.get("Task") + if not task_name: + continue + perf_stats[task_name] = { + "seq": row.get("SEQ", "?"), + "mpi": row.get("MPI", "?"), + } + else: + logger.warning("Processes perf stats CSV not found at %s", perf_stat_file_path) + return perf_stats + + +def calculate_performance_metrics(perf_val, eff_num_proc, task_type): + """Calculate acceleration and efficiency from performance value.""" + acceleration = "?" + efficiency = "?" + try: + perf_float = float(perf_val) + if perf_float > 0 and not ( + perf_float == float("inf") or perf_float != perf_float + ): + speedup = 1.0 / perf_float + # For sequential code, acceleration and efficiency don't make sense + # as it should be the baseline (speedup = 1.0 by definition) + if task_type == "seq": + acceleration = "1.00" # Sequential is the baseline + efficiency = "N/A" + else: + acceleration = f"{speedup:.2f}" + efficiency = f"{speedup / eff_num_proc * 100:.2f}%" + except (ValueError, TypeError): + pass + return acceleration, efficiency + + +def _find_max_solution(points_info, task_type: str) -> int: + """Resolve max S for a given task type from points-info (threads list).""" + threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) + for t in threads_tasks: + if str(t.get("name")) == task_type: + try: + return int(t.get("S", 0)) + except Exception: + return 0 + if task_type == "mpi": + return 0 + return 0 + + +def _find_report_max(points_info, task_type: str) -> int: + """Resolve max Report (R) points for a given task type from points-info (threads). + Returns 0 if not found. + """ + threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) + for t in threads_tasks: + if str(t.get("name")) == task_type: + try: + return int(t.get("R", 0)) + except Exception: + return 0 + return 0 + + +def _find_performance_max(points_info, task_type: str) -> int: + """Resolve max Performance (A) points for a given task type (threads).""" + threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) + for t in threads_tasks: + if str(t.get("name")) == task_type: + try: + return int(t.get("A", 0)) + except Exception: + return 0 + return 0 + + +def _calc_perf_points_from_efficiency(efficiency_str: str, max_points: int) -> float: + """Calculate Performance points as a real number (x.yy). + + Mapping (eff -> percent of max): + >=50 -> 100; [45,50) -> 90; [42,45) -> 80; [40,42) -> 70; [37,40) -> 60; + [35,37) -> 50; [32,35) -> 40; [30,32) -> 30; [27,30) -> 20; [25,27) -> 10; <25 -> 0 + Returns a float rounded to 2 decimals (no ceil). + """ + if not isinstance(efficiency_str, str) or not efficiency_str.endswith("%"): + return 0.0 + try: + val = float(efficiency_str.rstrip("%")) + except Exception: + return 0.0 + perc = 0.0 + if val >= 50: + perc = 1.0 + elif 45 <= val < 50: + perc = 0.9 + elif 42 <= val < 45: + perc = 0.8 + elif 40 <= val < 42: + perc = 0.7 + elif 37 <= val < 40: + perc = 0.6 + elif 35 <= val < 37: + perc = 0.5 + elif 32 <= val < 35: + perc = 0.4 + elif 30 <= val < 32: + perc = 0.3 + elif 27 <= val < 30: + perc = 0.2 + elif 25 <= val < 27: + perc = 0.1 + else: + perc = 0.0 + pts = max_points * perc if max_points > 0 else 0.0 + # round to 2 decimals (banker's rounding acceptable here) + return round(pts, 2) + + +def _find_process_report_max(points_info, task_number: int) -> int: + """Get max report (R) points for process task by ordinal (1..3). + Looks up processes.tasks with names like 'mpi_task_1'. + """ + proc = (points_info.get("processes", {}) or {}).get("tasks", []) + key = f"mpi_task_{task_number}" + for t in proc: + if str(t.get("name")) == key: + try: + return int(t.get("R", 0)) + except Exception: + return 0 + return 0 + + +def _find_process_points(points_info, task_number: int) -> tuple[int, int, int, int]: + """Return (S_mpi, S_seq, A_mpi, R) maxima for a given process task ordinal (1..3). + Supports both mapping and list-of-maps (per user's YAML example). + """ + proc_tasks = (points_info.get("processes", {}) or {}).get("tasks", []) + key = f"mpi_task_{task_number}" + for t in proc_tasks: + if str(t.get("name")) == key: + + def _extract(obj, k): + if isinstance(obj, dict): + return int(obj.get(k, 0)) + if isinstance(obj, list): + for it in obj: + if isinstance(it, dict) and k in it: + try: + return int(it.get(k, 0)) + except Exception: + return 0 + return 0 + + mpi_blk = t.get("mpi", {}) + seq_blk = t.get("seq", {}) + s_mpi = _extract(mpi_blk, "S") + a_mpi = _extract(mpi_blk, "A") + s_seq = _extract(seq_blk, "S") + try: + r = int(t.get("R", 0)) + except Exception: + r = 0 + return s_mpi, s_seq, a_mpi, r + return 0, 0, 0, 0 + + +def _find_process_variants_max(points_info, task_number: int) -> int: + proc_tasks = (points_info.get("processes", {}) or {}).get("tasks", []) + key = f"mpi_task_{task_number}" + for t in proc_tasks: + if str(t.get("name")) == key: + try: + return int(t.get("variants_max", 1)) + except Exception: + return 1 + return 1 + + +def get_solution_points_and_style(task_type, status, cfg): + """Get solution points and CSS style based on task type and status.""" + max_sol_points = _find_max_solution(cfg, task_type) + sol_points = max_sol_points if status in ("done", "disabled") else 0 + solution_style = "" + if status == "done": + solution_style = "background-color: lightgreen;" + elif status == "disabled": + solution_style = "background-color: #6495ED;" + return sol_points, solution_style + + +def check_plagiarism_and_calculate_penalty( + dir, task_type, sol_points, plagiarism_cfg, cfg, semester: str | None +): + """Check if task is plagiarized and calculate penalty points. + + Supports two config layouts: + - legacy: { plagiarism: { seq: [...], omp: [...], ... } } + - semesters: { threads: {plagiarism: {...}}, processes: {plagiarism: {...}} } + """ + clean_dir = dir[: -len("_disabled")] if dir.endswith("_disabled") else dir + + # Resolve copying/plagiarism mapping based on layout + plag_map = {} + if isinstance(plagiarism_cfg, dict) and ( + "copying" in plagiarism_cfg or "plagiarism" in plagiarism_cfg + ): + plag_map = ( + plagiarism_cfg.get("copying") + if "copying" in plagiarism_cfg + else plagiarism_cfg.get("plagiarism", {}) + ) or {} + elif ( + isinstance(plagiarism_cfg, dict) + and semester + and semester in plagiarism_cfg + and isinstance(plagiarism_cfg[semester], dict) + ): + inner = plagiarism_cfg[semester] + plag_map = ( + inner.get("copying") if "copying" in inner else inner.get("plagiarism", {}) + ) or {} + + flagged_list = set(plag_map.get(task_type, []) or []) + is_cheated = dir in flagged_list or clean_dir in flagged_list + plagiarism_points = 0 + if is_cheated: + # Prefer new key 'copying', fallback to legacy 'plagiarism' + try: + plag_coeff = float( + (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( + "coefficient", 0.0 + ) + ) + except Exception: + plag_coeff = 0.0 + plagiarism_points = -plag_coeff * sol_points + return is_cheated, plagiarism_points + + +def calculate_deadline_penalty(dir, task_type, status, deadlines_cfg, tasks_dir): + """Calculate deadline penalty points based on git commit timestamp.""" + deadline_points = 0 + deadline_str = deadlines_cfg.get(task_type) + if status == "done" and deadline_str: + try: + deadline_dt = datetime.fromisoformat(deadline_str) + git_cmd = [ + "git", + "log", + "-1", + "--format=%ct", + str( + tasks_dir + / (dir + ("_disabled" if status == "disabled" else "")) + / task_type + ), + ] + result = subprocess.run(git_cmd, capture_output=True, text=True) + if result.stdout.strip().isdigit(): + commit_dt = datetime.fromtimestamp(int(result.stdout.strip())) + days_late = (commit_dt - deadline_dt).days + if days_late > 0: + deadline_points = -days_late + except Exception: + pass + return deadline_points + + +def load_configurations(): + """Load points-info (max points, deadlines, efficiency) and plagiarism lists.""" + points_info_path = Path(__file__).parent / "data" / "points-info.yml" + assert points_info_path.exists(), f"Points info file not found: {points_info_path}" + with open(points_info_path, "r") as f: + points_info = yaml.safe_load(f) + assert points_info, "Points info is empty" + + eff_num_proc = int(points_info.get("efficiency", {}).get("num_proc", 1)) + deadlines_cfg = points_info.get("deadlines", {}) + + plagiarism_config_path = Path(__file__).parent / "data" / "copying.yml" + with open(plagiarism_config_path, "r") as file: + plagiarism_cfg = yaml.safe_load(file) + assert plagiarism_cfg, "Plagiarism configuration is empty" + + return points_info, eff_num_proc, deadlines_cfg, plagiarism_cfg + + +def _build_rows_for_task_types( + selected_task_types: list[str], + dir_names: list[str], + perf_stats: dict, + cfg, + eff_num_proc, + deadlines_cfg, +): + """Build rows for the given list of task directories and selected task types.""" + rows = [] + + def _load_student_info_label(dir_name: str): + import json + + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + s = data.get("student", {}) + last = s.get("last_name", "") + first = s.get("first_name", "") + middle = s.get("middle_name", "") + parts = [p for p in [last, first, middle] if p] + label = "
".join(parts) + return label if label else None + except Exception: + return None + + def _load_student_fields(dir_name: str): + import json + + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + s = data.get("student", {}) + return ( + str(s.get("last_name", "")), + str(s.get("first_name", "")), + str(s.get("middle_name", "")), + str(s.get("group_number", "")), + ) + except Exception: + return None + + for dir in sorted(dir_names): + row_types = [] + total_count = 0 + for task_type in selected_task_types: + status = directories[dir].get(task_type) + sol_points, solution_style = get_solution_points_and_style( + task_type, status, cfg + ) + + task_points = sol_points + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + dir, task_type, sol_points, plagiarism_cfg, cfg, semester="threads" + ) + task_points += plagiarism_points + + perf_val = perf_stats.get(dir, {}).get(task_type, "?") + + # Calculate acceleration and efficiency if performance data is available + acceleration, efficiency = calculate_performance_metrics( + perf_val, eff_num_proc, task_type + ) + + # Calculate deadline penalty points + deadline_points = calculate_deadline_penalty( + dir, task_type, status, deadlines_cfg, tasks_dir + ) + + # Report presence: award R only if report.md exists inside the task directory + report_present = (tasks_dir / dir / "report.md").exists() + report_points = _find_report_max(cfg, task_type) if report_present else 0 + + # Performance points P for non-seq types, based on efficiency + perf_max = _find_performance_max(cfg, task_type) + if task_type != "seq": + perf_points = _calc_perf_points_from_efficiency(efficiency, perf_max) + perf_points_display = ( + f"{perf_points:.2f}" + if isinstance(efficiency, str) and efficiency.endswith("%") + else "—" + ) + else: + perf_points = 0.0 + perf_points_display = "—" + + row_types.append( + { + "solution_points": sol_points, + "solution_style": solution_style, + "perf": perf_val, + "acceleration": acceleration, + "efficiency": efficiency, + "perf_points": perf_points, + "perf_points_display": perf_points_display, + "deadline_points": deadline_points, + "plagiarised": is_cheated, + "plagiarism_points": plagiarism_points, + "report": report_points, + } + ) + # Total: include Solution + Performance + Report + Copying penalty (exclude Deadline) + total_count += task_points + perf_points + report_points + + label_name = _load_student_info_label(dir) or dir + # Generate variant for threads based on student info and variants_max + threads_vmax = int((cfg.get("threads", {}) or {}).get("variants_max", 1)) + fields = _load_student_fields(dir) + if fields: + last, first, middle, group = fields + try: + v_idx = assign_variant( + last, + first, + group, + REPO_SALT, + patronymic=middle, + num_variants=threads_vmax, + ) + variant = str(v_idx + 1) + except Exception: + variant = "?" + else: + variant = "?" + rows.append( + { + "task": label_name, + "variant": variant, + "types": row_types, + "total": total_count, + } + ) + return rows + + +def main(): + """Main function to generate the scoreboard. + + Now generates three pages in the output dir: + - index.html: simple menu linking to threads.html and processes.html + - threads.html: scoreboard for thread-based tasks + - processes.html: scoreboard for process-based tasks + """ + cfg, eff_num_proc, deadlines_cfg, plagiarism_cfg_local = load_configurations() + + # Make plagiarism config available to rows builder + global plagiarism_cfg + plagiarism_cfg = plagiarism_cfg_local + + env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates")) + + # Load optional display deadlines from deadlines.yml and/or auto-compute evenly + deadlines_display_threads: dict[str, str] | None = None + deadlines_display_processes: dict[str, str] | None = None + try: + dl_file = script_dir / "data" / "deadlines.yml" + if dl_file.exists(): + with open(dl_file, "r") as f: + dl_cfg = yaml.safe_load(f) or {} + deadlines_display_threads = dl_cfg.get("threads") or {} + deadlines_display_processes = dl_cfg.get("processes") or {} + except Exception: + pass + + # Helper: compute evenly spaced dates for current semester (MSK) + from datetime import date, timedelta + import calendar + + def _abbr(day: date) -> str: + return f"{day.day} {calendar.month_abbr[day.month]}" + + def _spring_bounds(today: date) -> tuple[date, date]: + """Return [1 Feb .. 15 May] window for the appropriate year. + If today is past 15 May, use next year's spring; otherwise this year's. + """ + y = today.year + start = date(y, 2, 1) + end = date(y, 5, 15) + if today > end: + y += 1 + start = date(y, 2, 1) + end = date(y, 5, 15) + return start, end + + def _autumn_bounds(today: date) -> tuple[date, date]: + """Return [15 Oct .. 14 Dec] window for the appropriate year. + If today is past 14 Dec, use next year's autumn; otherwise this year's. + """ + y = today.year + start = date(y, 10, 15) + end = date(y, 12, 14) + if today > end: + y += 1 + start = date(y, 10, 15) + end = date(y, 12, 14) + return start, end + + def _evenly_spaced_dates(n: int, start: date, end: date) -> list[date]: + """ + Return n deadlines evenly spaced across the window (start..end], + i.e., strictly after the start date, with the last at end. + Positions are at fractions (i+1)/n of the total span. + """ + if n <= 1: + return [end] + total = (end - start).days + if total < 0: + start, end = end, start + total = -total + res = [] + for i in range(n): + off = int(round((i + 1) * total / n)) + if off <= 0: + off = 1 + if off > total: + off = total + res.append(start + timedelta(days=off)) + return res + + def _compute_display_deadlines_threads(order: list[str]) -> dict[str, date]: + # Threads = Spring semester (prefer MSK; fallback to local time) + try: + today = _now_msk().date() + except Exception: + today = datetime.now().date() + s, e = _spring_bounds(today) + ds = _evenly_spaced_dates(len(order), s, e) + return {t: d for t, d in zip(order, ds)} + + def _compute_display_deadlines_processes(n_items: int) -> list[date]: + # Processes = Autumn semester (prefer MSK; fallback to local time) + try: + today = _now_msk().date() + except Exception: + today = datetime.now().date() + s, e = _autumn_bounds(today) + ds = _evenly_spaced_dates(n_items, s, e) + return ds + + # Locate perf CSVs from CI or local runs (threads and processes) + candidates_threads = [ + script_dir.parent + / "build" + / "perf_stat_dir" + / "threads_task_run_perf_table.csv", + script_dir.parent / "perf_stat_dir" / "threads_task_run_perf_table.csv", + # Fallback to old single-file name + script_dir.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv", + script_dir.parent / "perf_stat_dir" / "task_run_perf_table.csv", + ] + threads_csv = next( + (p for p in candidates_threads if p.exists()), candidates_threads[0] + ) + + candidates_processes = [ + script_dir.parent + / "build" + / "perf_stat_dir" + / "processes_task_run_perf_table.csv", + script_dir.parent / "perf_stat_dir" / "processes_task_run_perf_table.csv", + ] + processes_csv = next( + (p for p in candidates_processes if p.exists()), candidates_processes[0] + ) + + # Read and merge performance statistics CSVs (keys = CSV Task column) + perf_stats_threads = load_performance_data_threads(threads_csv) + perf_stats_processes = load_performance_data_processes(processes_csv) + perf_stats_raw: dict[str, dict] = {} + perf_stats_raw.update(perf_stats_threads) + for k, v in perf_stats_processes.items(): + perf_stats_raw[k] = {**perf_stats_raw.get(k, {}), **v} + + # Partition tasks by tasks_type from settings.json + threads_task_dirs = [ + name for name, ttype in tasks_type_map.items() if ttype == "threads" + ] + processes_task_dirs = [ + name for name, ttype in tasks_type_map.items() if ttype == "processes" + ] + + # Fallback: if settings.json is missing, guess by directory name heuristic + for name in directories.keys(): + if name not in tasks_type_map or tasks_type_map[name] is None: + if "threads" in name: + threads_task_dirs.append(name) + elif "processes" in name: + processes_task_dirs.append(name) + + # Resolve performance stats keys (from CSV Task names) to actual task directories + import re as _re + + def _family_from_name(name: str) -> tuple[str, int]: + # Infer family from CSV Task value, using only structural markers + # threads -> ("threads", 0); processes[_N] -> ("processes", N|1) + if "threads" in name: + return "threads", 0 + if "processes" in name: + m = _re.search(r"processes(?:_(\d+))?", name) + if m: + try: + idx = int(m.group(1)) if m.group(1) else 1 + except Exception: + idx = 1 + else: + idx = 1 + return "processes", idx + # Fallback: treat as threads family + return "threads", 0 + + def _family_from_dir(dir_name: str) -> tuple[str, int]: + # Prefer explicit tasks_type from settings.json and task_number from info.json + kind_guess = tasks_type_map.get(dir_name) or ( + "threads" if "threads" in dir_name else "processes" + ) + idx = 0 + if kind_guess == "processes": + # Lightweight reader to avoid dependency on later-scoped helpers + try: + import json as _json + + info_path = tasks_dir / dir_name / "info.json" + if info_path.exists(): + with open(info_path, "r") as _f: + data = _json.load(_f) + s = data.get("student", {}) if isinstance(data, dict) else {} + try: + idx = int(str(s.get("task_number", "0"))) + except Exception: + idx = 0 + except Exception: + idx = 0 + return kind_guess, idx + + # Build map family -> list of dir names in this repo + family_to_dirs: dict[tuple[str, int], list[str]] = {} + for d in sorted(directories.keys()): + fam = _family_from_dir(d) + family_to_dirs.setdefault(fam, []).append(d) + + # Aggregate perf by family (CSV keys may not match dir names) + perf_by_family: dict[tuple[str, int], dict] = {} + for key, vals in perf_stats_raw.items(): + fam = _family_from_name(key) + perf_by_family[fam] = {**perf_by_family.get(fam, {}), **vals} + + # Project family perf onto actual directories (prefer exact one per family) + perf_stats: dict[str, dict] = {} + for fam, vals in perf_by_family.items(): + dirs_for_family = family_to_dirs.get(fam, []) + if not dirs_for_family: + continue + # Assign same perf to all dirs in the family (usually one) + for d in dirs_for_family: + perf_stats[d] = vals.copy() + + # Build rows for each page + threads_rows = _build_rows_for_task_types( + task_types_threads, + threads_task_dirs, + perf_stats, + cfg, + eff_num_proc, + deadlines_cfg, + ) + # Processes page: build 3 tasks as columns for a single student + import json + + def _load_student_info(dir_name: str): + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + return data.get("student", {}) + except Exception as e: + logger.warning("Failed to parse %s: %s", info_path, e) + return None + + def _identity_key(student: dict) -> str: + return "|".join( + [ + str(student.get("first_name", "")), + str(student.get("last_name", "")), + str(student.get("middle_name", "")), + str(student.get("group_number", "")), + ] + ) + + def _build_cell(dir_name: str, ttype: str, perf_map: dict[str, dict]): + status = directories[dir_name].get(ttype) + sol_points, solution_style = get_solution_points_and_style(ttype, status, cfg) + task_points = sol_points + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + dir_name, ttype, sol_points, plagiarism_cfg, cfg, semester="processes" + ) + task_points += plagiarism_points + perf_val = perf_map.get(dir_name, {}).get(ttype, "?") + acceleration, efficiency = calculate_performance_metrics( + perf_val, eff_num_proc, ttype + ) + deadline_points = calculate_deadline_penalty( + dir_name, ttype, status, deadlines_cfg, tasks_dir + ) + return ( + { + "solution_points": sol_points, + "solution_style": solution_style, + "perf": perf_val, + "acceleration": acceleration, + "efficiency": efficiency, + "deadline_points": deadline_points, + "plagiarised": is_cheated, + "plagiarism_points": plagiarism_points, + }, + task_points, + ) + + proc_infos = [] + for d in processes_task_dirs: + s = _load_student_info(d) + if s: + proc_infos.append((d, s)) + + # Choose target identity: prefer example_processes; otherwise most common + target_identity = None + if "example_processes" in processes_task_dirs: + s0 = _load_student_info("example_processes") + if s0: + target_identity = _identity_key(s0) + if not target_identity and proc_infos: + cnt = Counter(_identity_key(s) for _, s in proc_infos) + target_identity = cnt.most_common(1)[0][0] + + # Map task_number -> (dir_name, display_label) + num_to_dir: dict[int, tuple[str, str]] = {} + if target_identity: + for d, s in proc_infos: + if _identity_key(s) == target_identity: + try: + tn = int(str(s.get("task_number", "0"))) + except Exception: + continue + display = d + num_to_dir[tn] = (d, display) + + expected_numbers = [1, 2, 3] + proc_group_headers = [] + proc_top_headers = [] + proc_groups = [] + proc_r_values = [] + total_points_sum = 0 + for n in expected_numbers: + entry = num_to_dir.get(n) + if entry: + d, display_label = entry + # Top header shows task name (directory) + proc_top_headers.append(f"task-{n}") + # Second header row shows only mpi/seq + proc_group_headers.append({"type": "mpi"}) + proc_group_headers.append({"type": "seq"}) + group_cells = [] + for ttype in ["mpi", "seq"]: + cell, _ = _build_cell(d, ttype, perf_stats) + group_cells.append(cell) + # Override displayed points for processes: S under MPI/SEQ from points-info; A points under MPI only + s_mpi, s_seq, a_mpi, r_max = _find_process_points(cfg, n) + has_mpi = bool(directories[d].get("mpi")) + has_seq = bool(directories[d].get("seq")) + report_present = (tasks_dir / d / "report.md").exists() + group_cells[0]["solution_points"] = s_mpi if has_mpi else 0 + group_cells[1]["solution_points"] = s_seq if has_seq else 0 + # Calculate Performance P for MPI based on efficiency and max a_mpi + mpi_eff = group_cells[0].get("efficiency", "N/A") + perf_points_mpi = ( + _calc_perf_points_from_efficiency(mpi_eff, a_mpi) + if (has_mpi and has_seq) + else 0 + ) + # Display '—' instead of 0 when metrics are absent (efficiency not a percent) + if isinstance(mpi_eff, str) and mpi_eff.endswith("%"): + perf_points_mpi_display = perf_points_mpi + else: + perf_points_mpi_display = "—" + group_cells[0]["perf_points"] = perf_points_mpi + group_cells[0]["perf_points_display"] = perf_points_mpi_display + group_cells[1]["perf_points"] = 0 + # Recompute plagiarism penalty based on processes S maxima + try: + plag_coeff = float( + (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( + "coefficient", 0.0 + ) + ) + except Exception: + plag_coeff = 0.0 + p_mpi = ( + -plag_coeff * s_mpi + if (has_mpi and group_cells[0].get("plagiarised")) + else 0 + ) + p_seq = ( + -plag_coeff * s_seq + if (has_seq and group_cells[1].get("plagiarised")) + else 0 + ) + group_cells[0]["plagiarism_points"] = p_mpi + group_cells[1]["plagiarism_points"] = p_seq + proc_groups.extend(group_cells) + # Sum points S + P + R + C (penalty negative) with gating + s_inc = (s_mpi if has_mpi else 0) + (s_seq if has_seq else 0) + p_inc = perf_points_mpi + r_inc = r_max if report_present else 0 + total_points_sum += s_inc + p_inc + r_inc + p_mpi + p_seq + proc_r_values.append(r_inc) + else: + proc_group_headers.append({"type": "mpi", "task_label": f"task_{n}"}) + proc_group_headers.append({"type": "seq", "task_label": f"task_{n}"}) + proc_top_headers.append(f"task-{n}") + for _ in ["mpi", "seq"]: + proc_groups.append( + { + "solution_points": "?", + "solution_style": "", + "perf": "?", + "acceleration": "?", + "efficiency": "?", + "deadline_points": "?", + "plagiarised": False, + "plagiarism_points": "?", + } + ) + # Do not affect total; sum only existing tasks; report points 0 + proc_r_values.append(0) + + # Label for processes row: show Last, First, Middle on separate lines; no group number + row_label = "processes" + row_variant = "?" + if target_identity: + parts = target_identity.split("|") + if len(parts) >= 3: + first, last, middle = parts[0], parts[1], parts[2] + name_parts = [p for p in [last, first, middle] if p] + name = "
".join(name_parts) + row_label = name or row_label + + # Build three variants (one per task) based on student identity + row_variant = "?" + if target_identity: + parts = target_identity.split("|") + if len(parts) >= 4: + first, last, middle, group = parts[0], parts[1], parts[2], parts[3] + variants_render = [] + for n in expected_numbers: + vmax = _find_process_variants_max(cfg, n) + try: + v_idx = assign_variant( + surname=last, + name=first, + patronymic=middle, + group=group, + repo=f"{REPO_SALT}/processes/task-{n}", + num_variants=vmax, + ) + variants_render.append(str(v_idx + 1)) + except Exception: + variants_render.append("?") + row_variant = "
".join(variants_render) + processes_rows = [ + { + "task": row_label, + "variant": row_variant, + "groups": proc_groups, + "r_values": proc_r_values, + "r_total": sum(proc_r_values), + "total": total_points_sum, + } + ] + + # Rebuild threads rows with resolved perf stats + threads_rows = _build_rows_for_task_types( + task_types_threads, + threads_task_dirs, + perf_stats, + cfg, + eff_num_proc, + deadlines_cfg, + ) + + parser = argparse.ArgumentParser(description="Generate HTML scoreboard.") + parser.add_argument( + "-o", "--output", type=str, required=True, help="Output directory path" + ) + args = parser.parse_args() + + output_path = Path(args.output) + output_path.mkdir(parents=True, exist_ok=True) + + # Render tables + generated_msk = _now_msk().strftime("%Y-%m-%d %H:%M:%S") + table_template = env.get_template("index.html.j2") + threads_vmax = int((cfg.get("threads", {}) or {}).get("variants_max", 1)) + # Build display deadlines (use file values if present, fill missing with auto) + threads_order = task_types_threads + auto_threads_dl = _compute_display_deadlines_threads(threads_order) + dl_threads_out = {} + for t in threads_order: + base_date = auto_threads_dl.get(t) + # Default = 0 shift + shift_days = 0 + label = None + if deadlines_display_threads and t in deadlines_display_threads: + val = deadlines_display_threads.get(t) + if isinstance(val, int): + shift_days = val + else: + # try int-like string, else treat as explicit label + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + dl_threads_out[t] = _abbr(vdate) + else: + dl_threads_out[t] = label or "" + + threads_html = table_template.render( + task_types=task_types_threads, + rows=threads_rows, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + threads_variants_max=threads_vmax, + deadlines_threads=dl_threads_out, + ) + # Use dedicated template for processes table layout + processes_template = env.get_template("processes.html.j2") + proc_vmaxes = [_find_process_variants_max(cfg, n) for n in expected_numbers] + # Build display deadlines for processes in task order (1..3) + auto_proc_dl = _compute_display_deadlines_processes(len(expected_numbers)) + proc_deadlines_list: list[str] = [] + for i, n in enumerate(expected_numbers): + base_date = auto_proc_dl[i] + shift_days = 0 + label = None + if deadlines_display_processes: + key = f"task_{n}" + val = deadlines_display_processes.get( + key + ) or deadlines_display_processes.get(f"mpi_task_{n}") + if val is not None: + if isinstance(val, int): + shift_days = val + else: + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + proc_deadlines_list.append(_abbr(vdate)) + else: + proc_deadlines_list.append(label or "") + + processes_html = processes_template.render( + top_task_names=proc_top_headers, + group_headers=proc_group_headers, + rows=processes_rows, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + processes_variants_max=proc_vmaxes, + deadlines_processes=proc_deadlines_list, + ) + + with open(output_path / "threads.html", "w") as f: + f.write(threads_html) + with open(output_path / "processes.html", "w") as f: + f.write(processes_html) + + # ——— Build per-group pages and group menus ———————————————————————— + def _load_group_number(dir_name: str): + import json + + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + return data.get("student", {}).get("group_number") + except Exception: + return None + + def _slugify(text: str) -> str: + return "".join( + ch if ch.isalnum() or ch in ("-", "_") else "_" for ch in str(text) + ) + + # Collect groups + threads_groups = sorted( + set(filter(None, (_load_group_number(d) for d in threads_task_dirs))) + ) + processes_groups = sorted( + set(filter(None, (_load_group_number(d) for d in processes_task_dirs))) + ) + + # Threads: per-group pages + threads_groups_menu = [] + for g in threads_groups: + slug = _slugify(g) + out_file = output_path / f"threads_{slug}.html" + filtered_dirs = [d for d in threads_task_dirs if _load_group_number(d) == g] + rows_g = _build_rows_for_task_types( + task_types_threads, + filtered_dirs, + perf_stats, + cfg, + eff_num_proc, + deadlines_cfg, + ) + # Rebuild deadline labels for this page + auto_threads_dl_g = _compute_display_deadlines_threads(threads_order) + dl_threads_out_g = {} + for t in threads_order: + base_date = auto_threads_dl_g.get(t) + shift_days = 0 + label = None + if deadlines_display_threads and t in deadlines_display_threads: + val = deadlines_display_threads.get(t) + if isinstance(val, int): + shift_days = val + else: + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + dl_threads_out_g[t] = _abbr(vdate) + else: + dl_threads_out_g[t] = label or "" + + html_g = table_template.render( + task_types=task_types_threads, + rows=rows_g, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + threads_variants_max=threads_vmax, + deadlines_threads=dl_threads_out_g, + ) + with open(out_file, "w") as f: + f.write(html_g) + threads_groups_menu.append({"href": out_file.name, "title": g}) + + # Processes: per-group pages + processes_groups_menu = [] + for g in processes_groups: + slug = _slugify(g) + out_file = output_path / f"processes_{slug}.html" + filtered_dirs = [d for d in processes_task_dirs if _load_group_number(d) == g] + + # Reuse earlier logic but limited to filtered_dirs + import json as _json + + def _load_student_info_group(dir_name: str): + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = _json.load(f) + return data.get("student", {}) + except Exception: + return None + + def _id_key(stud: dict) -> str: + return "|".join( + [ + str(stud.get("first_name", "")), + str(stud.get("last_name", "")), + str(stud.get("middle_name", "")), + str(stud.get("group_number", "")), + ] + ) + + proc_infos_g = [] + for d in filtered_dirs: + s = _load_student_info_group(d) + if s: + proc_infos_g.append((d, s)) + + target_identity_g = None + if "example_processes" in filtered_dirs: + s0 = _load_student_info_group("example_processes") + if s0 and s0.get("group_number") == g: + target_identity_g = _id_key(s0) + if not target_identity_g and proc_infos_g: + cnt = Counter(_id_key(s) for _, s in proc_infos_g) + target_identity_g = cnt.most_common(1)[0][0] + + num_to_dir_g: dict[int, tuple[str, str]] = {} + if target_identity_g: + for d, s in proc_infos_g: + if _id_key(s) == target_identity_g: + try: + tn = int(str(s.get("task_number", "0"))) + except Exception: + continue + num_to_dir_g[tn] = (d, d) + + proc_top_headers_g = [] + proc_group_headers_g = [] + proc_groups_g = [] + proc_r_values_g = [] + total_points_sum_g = 0 + for n in [1, 2, 3]: + entry = num_to_dir_g.get(n) + if entry: + d, display_label = entry + proc_top_headers_g.append(f"task-{n}") + for ttype in ["mpi", "seq"]: + proc_group_headers_g.append({"type": ttype}) + # build cell + status = directories[d].get(ttype) + sol_points, solution_style = get_solution_points_and_style( + ttype, status, cfg + ) + task_points = sol_points + is_cheated, plagiarism_points = ( + check_plagiarism_and_calculate_penalty( + d, + ttype, + sol_points, + plagiarism_cfg, + cfg, + semester="processes", + ) + ) + task_points += plagiarism_points + perf_val = perf_stats.get(d, {}).get(ttype, "?") + acceleration, efficiency = calculate_performance_metrics( + perf_val, eff_num_proc, ttype + ) + deadline_points = calculate_deadline_penalty( + d, ttype, status, deadlines_cfg, tasks_dir + ) + proc_groups_g.append( + { + "solution_points": sol_points, + "solution_style": solution_style, + "perf": perf_val, + "acceleration": acceleration, + "efficiency": efficiency, + "deadline_points": deadline_points, + "plagiarised": is_cheated, + "plagiarism_points": plagiarism_points, + } + ) + # Override displayed points to processes maxima and recompute P + s_mpi_g, s_seq_g, a_max_g, r_max_g = _find_process_points(cfg, n) + has_mpi_g = bool(directories[d].get("mpi")) + has_seq_g = bool(directories[d].get("seq")) + report_present_g = (tasks_dir / d / "report.md").exists() + base_idx = len(proc_groups_g) - 2 + if base_idx >= 0: + proc_groups_g[base_idx]["solution_points"] = ( + s_mpi_g if has_mpi_g else 0 + ) + proc_groups_g[base_idx + 1]["solution_points"] = ( + s_seq_g if has_seq_g else 0 + ) + # Performance for MPI cell + mpi_eff_g = proc_groups_g[base_idx].get("efficiency", "N/A") + perf_points_mpi_g = ( + _calc_perf_points_from_efficiency(mpi_eff_g, a_max_g) + if (has_mpi_g and has_seq_g) + else 0 + ) + if isinstance(mpi_eff_g, str) and mpi_eff_g.endswith("%"): + perf_points_mpi_display_g = perf_points_mpi_g + else: + perf_points_mpi_display_g = "—" + proc_groups_g[base_idx]["perf_points"] = perf_points_mpi_g + proc_groups_g[base_idx]["perf_points_display"] = ( + perf_points_mpi_display_g + ) + proc_groups_g[base_idx + 1]["perf_points"] = 0 + try: + plag_coeff_g = float( + (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( + "coefficient", 0.0 + ) + ) + except Exception: + plag_coeff_g = 0.0 + p_mpi_g = ( + -plag_coeff_g * s_mpi_g + if (has_mpi_g and proc_groups_g[base_idx].get("plagiarised")) + else 0 + ) + p_seq_g = ( + -plag_coeff_g * s_seq_g + if ( + has_seq_g and proc_groups_g[base_idx + 1].get("plagiarised") + ) + else 0 + ) + proc_groups_g[base_idx]["plagiarism_points"] = p_mpi_g + proc_groups_g[base_idx + 1]["plagiarism_points"] = p_seq_g + + # Sum points by processes S + P + R (and C penalties) + s_inc_g = (s_mpi_g if has_mpi_g else 0) + (s_seq_g if has_seq_g else 0) + r_inc_g = r_max_g if report_present_g else 0 + total_points_sum_g += ( + s_inc_g + perf_points_mpi_g + r_inc_g + p_mpi_g + p_seq_g + ) + proc_r_values_g.append(r_inc_g) + else: + proc_top_headers_g.append(f"task-{n}") + for ttype in ["mpi", "seq"]: + proc_group_headers_g.append({"type": ttype}) + proc_groups_g.append( + { + "solution_points": "?", + "solution_style": "", + "perf": "?", + "acceleration": "?", + "efficiency": "?", + "deadline_points": "?", + "plagiarised": False, + "plagiarism_points": "?", + } + ) + # Missing task: do not affect total; sum only existing; report=0 + proc_r_values_g.append(0) + + # Row label for group page: name without group (three lines max) + row_label_g = f"group {g}" + if target_identity_g: + parts = target_identity_g.split("|") + if len(parts) >= 3: + first, last, middle = parts[0], parts[1], parts[2] + nm_parts = [p for p in [last, first, middle] if p] + nm = "
".join(nm_parts) + row_label_g = nm or row_label_g + + # Build three variants (one per task) based on student identity + row_variant_g = "?" + if target_identity_g: + parts = target_identity_g.split("|") + if len(parts) >= 4: + first, last, middle, group = parts[0], parts[1], parts[2], parts[3] + vrender = [] + for n in [1, 2, 3]: + vmax = _find_process_variants_max(cfg, n) + try: + v_idx = assign_variant( + surname=last, + name=first, + patronymic=middle, + group=group, + repo=f"{REPO_SALT}/processes/task-{n}", + num_variants=vmax, + ) + vrender.append(str(v_idx + 1)) + except Exception: + vrender.append("?") + row_variant_g = "
".join(vrender) + + rows_g = [ + { + "task": row_label_g, + "variant": row_variant_g, + "groups": proc_groups_g, + "r_values": proc_r_values_g, + "r_total": sum(proc_r_values_g), + "total": total_points_sum_g, + } + ] + + proc_vmaxes_g = [_find_process_variants_max(cfg, n) for n in [1, 2, 3]] + # Build display deadlines for processes group page + auto_proc_dl_g = _compute_display_deadlines_processes(3) + proc_deadlines_list_g: list[str] = [] + for i, n in enumerate([1, 2, 3]): + base_date = auto_proc_dl_g[i] + shift_days = 0 + label = None + if deadlines_display_processes: + key = f"task_{n}" + val = deadlines_display_processes.get( + key + ) or deadlines_display_processes.get(f"mpi_task_{n}") + if val is not None: + if isinstance(val, int): + shift_days = val + else: + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + proc_deadlines_list_g.append(_abbr(vdate)) + else: + proc_deadlines_list_g.append(label or "") + + html_g = processes_template.render( + top_task_names=proc_top_headers_g, + group_headers=proc_group_headers_g, + rows=rows_g, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + processes_variants_max=proc_vmaxes_g, + deadlines_processes=proc_deadlines_list_g, + ) + with open(out_file, "w") as f: + f.write(html_g) + processes_groups_menu.append({"href": out_file.name, "title": g}) + + # Render index menu page + try: + menu_template = env.get_template("menu_index.html.j2") + except Exception: + # Simple fallback menu if template missing + menu_html_content = ( + 'Scoreboard' + "

Scoreboard

" + "" + ) + else: + menu_html_content = menu_template.render( + pages=[ + {"href": "threads.html", "title": "Threads Scoreboard"}, + {"href": "processes.html", "title": "Processes Scoreboard"}, + ], + groups_threads=threads_groups_menu, + groups_processes=processes_groups_menu, + generated_msk=generated_msk, + ) + + with open(output_path / "index.html", "w") as f: + f.write(menu_html_content) + + # Copy static assets + static_src = script_dir / "static" + static_dst = output_path / "static" + if static_src.exists(): + if static_dst.exists(): + shutil.rmtree(static_dst) + shutil.copytree(static_src, static_dst) + logger.info("Static directory copied to %s", static_dst) + else: + logger.warning("Static directory not found at %s", static_src) + + logger.info( + "HTML pages generated at %s (index.html, threads.html, processes.html)", + output_path, + ) + + +if __name__ == "__main__": + main() diff --git a/scoreboard/requirements.txt b/scoreboard/requirements.txt index d86cf76506..b0649269b6 100644 --- a/scoreboard/requirements.txt +++ b/scoreboard/requirements.txt @@ -1,3 +1,3 @@ -Jinja2>=3.0 -PyYAML>=6.0 -backports.zoneinfo; python_version < "3.9" +Jinja2>=3.0 +PyYAML>=6.0 +backports.zoneinfo; python_version < "3.9" diff --git a/scoreboard/static/main.css b/scoreboard/static/main.css index dcb8f2e805..c46d941abf 100644 --- a/scoreboard/static/main.css +++ b/scoreboard/static/main.css @@ -1,12 +1,12 @@ -table { - width: 100%; - border-collapse: collapse; -} -th, td { - border: 1px solid black; - padding: 8px; - text-align: left; -} -th { - background-color: #f2f2f2; -} +table { + width: 100%; + border-collapse: collapse; +} +th, td { + border: 1px solid black; + padding: 8px; + text-align: left; +} +th { + background-color: #f2f2f2; +} diff --git a/scoreboard/templates/index.html.j2 b/scoreboard/templates/index.html.j2 index 37151c30c1..1174938b25 100644 --- a/scoreboard/templates/index.html.j2 +++ b/scoreboard/templates/index.html.j2 @@ -1,117 +1,117 @@ - - - - Task Directories - - - -
- Generated (MSK): {{ generated_msk }} -
-
-
Variant Calculator (Threads)
-
- - - - - - -
- -
- - - - - {% for type in task_types %} - {% set span = 4 if type == 'seq' else 7 %} - - {% endfor %} - - - - {% for type in task_types %} - {% if type == 'seq' %} - {% for letter in ('S', 'D', 'C', 'R') %} - - {% endfor %} - {% else %} - {% for letter in ('S', 'P', 'A', 'E', 'D', 'C', 'R') %} - - {% endfor %} - {% endif %} - {% endfor %} - - {% for row in rows %} - - - - {% for type in task_types %} - {% set cell = row.types[loop.index0] %} - {% if type == 'seq' %} - - - - - {% else %} - - - - - - - - {% endif %} - {% endfor %} - - - {% endfor %} -
NameV -
- {{ type }} - {% if deadlines_threads %} - {{ deadlines_threads.get(type, '') }} - {% endif %} -
-
Total
{{ letter }}{{ letter }}
{{ row.task }}{{ row.variant }}{{ cell.solution_points }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.report }}{{ cell.solution_points }}{{ cell.perf_points_display }}{{ cell.acceleration }}{{ cell.efficiency }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.report }}{{ row.total }}
- - + + + + Task Directories + + + +
+ Generated (MSK): {{ generated_msk }} +
+
+
Variant Calculator (Threads)
+
+ + + + + + +
+ +
+ + + + + {% for type in task_types %} + {% set span = 4 if type == 'seq' else 7 %} + + {% endfor %} + + + + {% for type in task_types %} + {% if type == 'seq' %} + {% for letter in ('S', 'D', 'C', 'R') %} + + {% endfor %} + {% else %} + {% for letter in ('S', 'P', 'A', 'E', 'D', 'C', 'R') %} + + {% endfor %} + {% endif %} + {% endfor %} + + {% for row in rows %} + + + + {% for type in task_types %} + {% set cell = row.types[loop.index0] %} + {% if type == 'seq' %} + + + + + {% else %} + + + + + + + + {% endif %} + {% endfor %} + + + {% endfor %} +
NameV +
+ {{ type }} + {% if deadlines_threads %} + {{ deadlines_threads.get(type, '') }} + {% endif %} +
+
Total
{{ letter }}{{ letter }}
{{ row.task }}{{ row.variant }}{{ cell.solution_points }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.report }}{{ cell.solution_points }}{{ cell.perf_points_display }}{{ cell.acceleration }}{{ cell.efficiency }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.report }}{{ row.total }}
+ + diff --git a/scoreboard/templates/menu_index.html.j2 b/scoreboard/templates/menu_index.html.j2 index 73d125e983..fea16df8b3 100644 --- a/scoreboard/templates/menu_index.html.j2 +++ b/scoreboard/templates/menu_index.html.j2 @@ -1,56 +1,56 @@ - - - - - Scoreboard Menu - - - - - -

Scoreboard

- - -

- (V)ariant - Task variant number assigned to the student.
- (R)eport - Task report in Markdown (.md), required.
- (S)olution - The correctness and completeness of the implemented solution.
- (A)cceleration - The process of speeding up software to improve performance. Speedup = T(seq) / T(parallel)
- (E)fficiency - Optimizing software speed-up by improving CPU utilization and resource management. Efficiency = Speedup / NumProcs * 100%
- (P)erformance - Points awarded based on efficiency thresholds (see docs).
- (D)eadline - The timeliness of the submission in relation to the given deadline (due at 23:59 MSK on the shown date).
- (C)opying - Penalty for detected copying cases. -

- - - + + + + + Scoreboard Menu + + + + + +

Scoreboard

+ + +

+ (V)ariant - Task variant number assigned to the student.
+ (R)eport - Task report in Markdown (.md), required.
+ (S)olution - The correctness and completeness of the implemented solution.
+ (A)cceleration - The process of speeding up software to improve performance. Speedup = T(seq) / T(parallel)
+ (E)fficiency - Optimizing software speed-up by improving CPU utilization and resource management. Efficiency = Speedup / NumProcs * 100%
+ (P)erformance - Points awarded based on efficiency thresholds (see docs).
+ (D)eadline - The timeliness of the submission in relation to the given deadline (due at 23:59 MSK on the shown date).
+ (C)opying - Penalty for detected copying cases. +

+ + + diff --git a/scoreboard/templates/processes.html.j2 b/scoreboard/templates/processes.html.j2 index c39cf32e76..f46196e910 100644 --- a/scoreboard/templates/processes.html.j2 +++ b/scoreboard/templates/processes.html.j2 @@ -1,133 +1,133 @@ - - - - Processes Scoreboard - - - -
- Generated (MSK): {{ generated_msk }} -
-
-
Variant Calculator (Processes)
-
- - - - - - -
- -
- - - - - {% for name in top_task_names %} - {# For each task: seq (3) + mpi (6) + R (1) = 10 #} - - {% endfor %} - - - - {% for _ in top_task_names %} - - - - {% endfor %} - - - {% for _ in top_task_names %} - {# seq sub-columns (no A/E) #} - {% for letter in ('S', 'D', 'C') %} - - {% endfor %} - {# mpi sub-columns include P (points) + A/E metrics #} - {% for letter in ('S', 'P', 'A', 'E', 'D', 'C') %} - - {% endfor %} - {% endfor %} - - {% for row in rows %} - - - - {% set ns = namespace(idx=0, gi=0) %} - {% for _ in top_task_names %} - {# cells are stored as [mpi, seq] per task; render seq first #} - {% set cell_mpi = row.groups[ns.idx] %} - {% set cell_seq = row.groups[ns.idx + 1] %} - {# seq: S, D, P #} - - - - {# mpi: S, P, A, E, D, C #} - - - - - - - {% set ns.idx = ns.idx + 2 %} - {# R value for this task group #} - - {% set ns.gi = ns.gi + 1 %} - {% endfor %} - - - {% endfor %} -
NameV -
- {{ name }} - {% if deadlines_processes %} - {{ deadlines_processes[loop.index0] }} - {% endif %} -
-
Total
seqmpiR
{{ letter }}{{ letter }}
{{ row.task }}{{ row.variant }}{{ cell_seq.solution_points }}{{ cell_seq.deadline_points }}{{ cell_seq.plagiarism_points }}{{ cell_mpi.solution_points }}{{ (cell_mpi.perf_points_display if cell_mpi.perf_points_display is defined else cell_mpi.perf_points) }}{{ cell_mpi.acceleration }}{{ cell_mpi.efficiency }}{{ cell_mpi.deadline_points }}{{ cell_mpi.plagiarism_points }}{{ row.r_values[ns.gi] if row.r_values is defined else 0 }}{{ row.total }}
- - + + + + Processes Scoreboard + + + +
+ Generated (MSK): {{ generated_msk }} +
+
+
Variant Calculator (Processes)
+
+ + + + + + +
+ +
+ + + + + {% for name in top_task_names %} + {# For each task: seq (3) + mpi (6) + R (1) = 10 #} + + {% endfor %} + + + + {% for _ in top_task_names %} + + + + {% endfor %} + + + {% for _ in top_task_names %} + {# seq sub-columns (no A/E) #} + {% for letter in ('S', 'D', 'C') %} + + {% endfor %} + {# mpi sub-columns include P (points) + A/E metrics #} + {% for letter in ('S', 'P', 'A', 'E', 'D', 'C') %} + + {% endfor %} + {% endfor %} + + {% for row in rows %} + + + + {% set ns = namespace(idx=0, gi=0) %} + {% for _ in top_task_names %} + {# cells are stored as [mpi, seq] per task; render seq first #} + {% set cell_mpi = row.groups[ns.idx] %} + {% set cell_seq = row.groups[ns.idx + 1] %} + {# seq: S, D, P #} + + + + {# mpi: S, P, A, E, D, C #} + + + + + + + {% set ns.idx = ns.idx + 2 %} + {# R value for this task group #} + + {% set ns.gi = ns.gi + 1 %} + {% endfor %} + + + {% endfor %} +
NameV +
+ {{ name }} + {% if deadlines_processes %} + {{ deadlines_processes[loop.index0] }} + {% endif %} +
+
Total
seqmpiR
{{ letter }}{{ letter }}
{{ row.task }}{{ row.variant }}{{ cell_seq.solution_points }}{{ cell_seq.deadline_points }}{{ cell_seq.plagiarism_points }}{{ cell_mpi.solution_points }}{{ (cell_mpi.perf_points_display if cell_mpi.perf_points_display is defined else cell_mpi.perf_points) }}{{ cell_mpi.acceleration }}{{ cell_mpi.efficiency }}{{ cell_mpi.deadline_points }}{{ cell_mpi.plagiarism_points }}{{ row.r_values[ns.gi] if row.r_values is defined else 0 }}{{ row.total }}
+ + diff --git a/scoreboard/tests/conftest.py b/scoreboard/tests/conftest.py index 0b257fde31..d9d20d5107 100644 --- a/scoreboard/tests/conftest.py +++ b/scoreboard/tests/conftest.py @@ -1,144 +1,144 @@ -""" -Pytest configuration and shared fixtures for scoreboard tests. -""" - -import pytest -import tempfile -import shutil -from pathlib import Path -import yaml -import csv - - -@pytest.fixture -def temp_dir(): - """Create a temporary directory for tests.""" - temp_path = Path(tempfile.mkdtemp()) - yield temp_path - shutil.rmtree(temp_path) - - -@pytest.fixture -def sample_config(): - """Sample configuration dictionary.""" - return { - "scoreboard": { - "task": { - "seq": {"solution": {"max": 4}}, - "omp": {"solution": {"max": 6}}, - "stl": {"solution": {"max": 8}}, - "tbb": {"solution": {"max": 6}}, - "all": {"solution": {"max": 10}}, - "mpi": {"solution": {"max": 0}}, - }, - "plagiarism": {"coefficient": 0.5}, - "efficiency": {"num_proc": 4}, - "deadlines": { - "seq": "2025-01-15", - "omp": "2025-02-15", - "stl": "2025-03-15", - "tbb": "2025-04-15", - "all": "2025-05-15", - "mpi": "2025-06-15", - }, - } - } - - -@pytest.fixture -def sample_plagiarism_config(): - """Sample plagiarism configuration dictionary.""" - return { - "plagiarism": { - "seq": ["broken_example"], - "omp": [], - "stl": ["cheater_task"], - "tbb": [], - "all": [], - "mpi": [], - } - } - - -@pytest.fixture -def sample_task_structure(temp_dir): - """Create a sample task directory structure.""" - tasks_dir = temp_dir / "tasks" - - # Create task directories - task_dirs = [ - "example_task/seq", - "example_task/omp", - "example_task/stl", - "disabled_task_disabled/seq", - "disabled_task_disabled/omp", - "partial_task/seq", - ] - - for task_dir in task_dirs: - (tasks_dir / task_dir).mkdir(parents=True) - # Create a dummy source file - (tasks_dir / task_dir / "main.cpp").touch() - - return tasks_dir - - -@pytest.fixture -def sample_performance_csv(temp_dir): - """Create a sample performance CSV file.""" - csv_file = temp_dir / "performance.csv" - - data = [ - { - "Task": "example_task", - "SEQ": "1.0", - "OMP": "0.5", - "STL": "0.3", - "TBB": "0.4", - "ALL": "0.2", - }, - { - "Task": "disabled_task", - "SEQ": "2.0", - "OMP": "1.0", - "STL": "0.8", - "TBB": "0.9", - "ALL": "0.7", - }, - { - "Task": "partial_task", - "SEQ": "1.5", - "OMP": "N/A", - "STL": "N/A", - "TBB": "N/A", - "ALL": "N/A", - }, - ] - - with open(csv_file, "w", newline="") as f: - writer = csv.DictWriter( - f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] - ) - writer.writeheader() - writer.writerows(data) - - return csv_file - - -@pytest.fixture -def sample_config_files(temp_dir, sample_config, sample_plagiarism_config): - """Create sample configuration files.""" - data_dir = temp_dir / "data" - data_dir.mkdir() - - # Create threads-config.yml - config_file = data_dir / "threads-config.yml" - with open(config_file, "w") as f: - yaml.dump(sample_config, f) - - # Create plagiarism.yml - plagiarism_file = data_dir / "plagiarism.yml" - with open(plagiarism_file, "w") as f: - yaml.dump(sample_plagiarism_config, f) - - return data_dir +""" +Pytest configuration and shared fixtures for scoreboard tests. +""" + +import pytest +import tempfile +import shutil +from pathlib import Path +import yaml +import csv + + +@pytest.fixture +def temp_dir(): + """Create a temporary directory for tests.""" + temp_path = Path(tempfile.mkdtemp()) + yield temp_path + shutil.rmtree(temp_path) + + +@pytest.fixture +def sample_config(): + """Sample configuration dictionary.""" + return { + "scoreboard": { + "task": { + "seq": {"solution": {"max": 4}}, + "omp": {"solution": {"max": 6}}, + "stl": {"solution": {"max": 8}}, + "tbb": {"solution": {"max": 6}}, + "all": {"solution": {"max": 10}}, + "mpi": {"solution": {"max": 0}}, + }, + "plagiarism": {"coefficient": 0.5}, + "efficiency": {"num_proc": 4}, + "deadlines": { + "seq": "2025-01-15", + "omp": "2025-02-15", + "stl": "2025-03-15", + "tbb": "2025-04-15", + "all": "2025-05-15", + "mpi": "2025-06-15", + }, + } + } + + +@pytest.fixture +def sample_plagiarism_config(): + """Sample plagiarism configuration dictionary.""" + return { + "plagiarism": { + "seq": ["broken_example"], + "omp": [], + "stl": ["cheater_task"], + "tbb": [], + "all": [], + "mpi": [], + } + } + + +@pytest.fixture +def sample_task_structure(temp_dir): + """Create a sample task directory structure.""" + tasks_dir = temp_dir / "tasks" + + # Create task directories + task_dirs = [ + "example_task/seq", + "example_task/omp", + "example_task/stl", + "disabled_task_disabled/seq", + "disabled_task_disabled/omp", + "partial_task/seq", + ] + + for task_dir in task_dirs: + (tasks_dir / task_dir).mkdir(parents=True) + # Create a dummy source file + (tasks_dir / task_dir / "main.cpp").touch() + + return tasks_dir + + +@pytest.fixture +def sample_performance_csv(temp_dir): + """Create a sample performance CSV file.""" + csv_file = temp_dir / "performance.csv" + + data = [ + { + "Task": "example_task", + "SEQ": "1.0", + "OMP": "0.5", + "STL": "0.3", + "TBB": "0.4", + "ALL": "0.2", + }, + { + "Task": "disabled_task", + "SEQ": "2.0", + "OMP": "1.0", + "STL": "0.8", + "TBB": "0.9", + "ALL": "0.7", + }, + { + "Task": "partial_task", + "SEQ": "1.5", + "OMP": "N/A", + "STL": "N/A", + "TBB": "N/A", + "ALL": "N/A", + }, + ] + + with open(csv_file, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] + ) + writer.writeheader() + writer.writerows(data) + + return csv_file + + +@pytest.fixture +def sample_config_files(temp_dir, sample_config, sample_plagiarism_config): + """Create sample configuration files.""" + data_dir = temp_dir / "data" + data_dir.mkdir() + + # Create threads-config.yml + config_file = data_dir / "threads-config.yml" + with open(config_file, "w") as f: + yaml.dump(sample_config, f) + + # Create plagiarism.yml + plagiarism_file = data_dir / "plagiarism.yml" + with open(plagiarism_file, "w") as f: + yaml.dump(sample_plagiarism_config, f) + + return data_dir diff --git a/scoreboard/tests/requirements.txt b/scoreboard/tests/requirements.txt index b197d322ce..f20c1fb8ed 100644 --- a/scoreboard/tests/requirements.txt +++ b/scoreboard/tests/requirements.txt @@ -1 +1 @@ -pytest>=7.0 +pytest>=7.0 diff --git a/scoreboard/tests/test_calculate_performance_metrics.py b/scoreboard/tests/test_calculate_performance_metrics.py index 4ed144b4e3..21cbdbbac6 100644 --- a/scoreboard/tests/test_calculate_performance_metrics.py +++ b/scoreboard/tests/test_calculate_performance_metrics.py @@ -1,95 +1,95 @@ -from main import calculate_performance_metrics - - -class TestCalculatePerformanceMetrics: - def test_calculate_performance_metrics_valid_values(self): - acceleration, efficiency = calculate_performance_metrics("0.5", 4) - assert acceleration == "2.00" - assert efficiency == "50.00%" - - acceleration, efficiency = calculate_performance_metrics("0.25", 4) - assert acceleration == "4.00" - assert efficiency == "100.00%" - - acceleration, efficiency = calculate_performance_metrics("0.5", 2) - assert acceleration == "2.00" - assert efficiency == "100.00%" - - def test_calculate_performance_metrics_edge_cases(self): - acceleration, efficiency = calculate_performance_metrics("0.1", 4) - assert acceleration == "10.00" - assert efficiency == "250.00%" - - acceleration, efficiency = calculate_performance_metrics("1.0", 4) - assert acceleration == "1.00" - assert efficiency == "25.00%" - - acceleration, efficiency = calculate_performance_metrics("2.0", 4) - assert acceleration == "0.50" - assert efficiency == "12.50%" - - def test_calculate_performance_metrics_invalid_values(self): - acceleration, efficiency = calculate_performance_metrics("0.0", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics("-1.0", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics("invalid", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics("", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics("inf", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics("nan", 4) - assert acceleration == "?" - assert efficiency == "?" - - def test_calculate_performance_metrics_special_strings(self): - acceleration, efficiency = calculate_performance_metrics("?", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics("N/A", 4) - assert acceleration == "?" - assert efficiency == "?" - - acceleration, efficiency = calculate_performance_metrics(None, 4) - assert acceleration == "?" - assert efficiency == "?" - - def test_calculate_performance_metrics_different_proc_counts(self): - perf_val = "0.25" - - acceleration, efficiency = calculate_performance_metrics(perf_val, 1) - assert acceleration == "4.00" - assert efficiency == "400.00%" - - acceleration, efficiency = calculate_performance_metrics(perf_val, 2) - assert acceleration == "4.00" - assert efficiency == "200.00%" - - acceleration, efficiency = calculate_performance_metrics(perf_val, 8) - assert acceleration == "4.00" - assert efficiency == "50.00%" - - acceleration, efficiency = calculate_performance_metrics(perf_val, 16) - assert acceleration == "4.00" - assert efficiency == "25.00%" - - def test_calculate_performance_metrics_precision(self): - acceleration, efficiency = calculate_performance_metrics("0.3", 3) - assert acceleration == "3.33" - assert efficiency == "111.11%" - - acceleration, efficiency = calculate_performance_metrics("0.7", 6) - assert acceleration == "1.43" - assert efficiency == "23.81%" +from main import calculate_performance_metrics + + +class TestCalculatePerformanceMetrics: + def test_calculate_performance_metrics_valid_values(self): + acceleration, efficiency = calculate_performance_metrics("0.5", 4) + assert acceleration == "2.00" + assert efficiency == "50.00%" + + acceleration, efficiency = calculate_performance_metrics("0.25", 4) + assert acceleration == "4.00" + assert efficiency == "100.00%" + + acceleration, efficiency = calculate_performance_metrics("0.5", 2) + assert acceleration == "2.00" + assert efficiency == "100.00%" + + def test_calculate_performance_metrics_edge_cases(self): + acceleration, efficiency = calculate_performance_metrics("0.1", 4) + assert acceleration == "10.00" + assert efficiency == "250.00%" + + acceleration, efficiency = calculate_performance_metrics("1.0", 4) + assert acceleration == "1.00" + assert efficiency == "25.00%" + + acceleration, efficiency = calculate_performance_metrics("2.0", 4) + assert acceleration == "0.50" + assert efficiency == "12.50%" + + def test_calculate_performance_metrics_invalid_values(self): + acceleration, efficiency = calculate_performance_metrics("0.0", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("-1.0", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("invalid", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("inf", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("nan", 4) + assert acceleration == "?" + assert efficiency == "?" + + def test_calculate_performance_metrics_special_strings(self): + acceleration, efficiency = calculate_performance_metrics("?", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("N/A", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics(None, 4) + assert acceleration == "?" + assert efficiency == "?" + + def test_calculate_performance_metrics_different_proc_counts(self): + perf_val = "0.25" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 1) + assert acceleration == "4.00" + assert efficiency == "400.00%" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 2) + assert acceleration == "4.00" + assert efficiency == "200.00%" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 8) + assert acceleration == "4.00" + assert efficiency == "50.00%" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 16) + assert acceleration == "4.00" + assert efficiency == "25.00%" + + def test_calculate_performance_metrics_precision(self): + acceleration, efficiency = calculate_performance_metrics("0.3", 3) + assert acceleration == "3.33" + assert efficiency == "111.11%" + + acceleration, efficiency = calculate_performance_metrics("0.7", 6) + assert acceleration == "1.43" + assert efficiency == "23.81%" diff --git a/scoreboard/tests/test_check_plagiarism.py b/scoreboard/tests/test_check_plagiarism.py index 763656aadf..c318f58244 100644 --- a/scoreboard/tests/test_check_plagiarism.py +++ b/scoreboard/tests/test_check_plagiarism.py @@ -1,81 +1,81 @@ -from main import check_plagiarism_and_calculate_penalty - - -class TestCheckPlagiarismAndCalculatePenalty: - def test_check_plagiarism_flagged_task( - self, sample_config, sample_plagiarism_config - ): - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "broken_example", "seq", 4, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == -2 - - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "cheater_task", "stl", 8, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == -4 - - def test_check_plagiarism_clean_task(self, sample_config, sample_plagiarism_config): - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "clean_task", "seq", 4, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == 0 - - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "another_task", "omp", 6, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == 0 - - def test_check_plagiarism_disabled_task_suffix( - self, sample_config, sample_plagiarism_config - ): - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "broken_example_disabled", "seq", 4, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == -2 - - def test_check_plagiarism_different_task_types( - self, sample_config, sample_plagiarism_config - ): - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "broken_example", "omp", 6, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == 0 - - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "cheater_task", "seq", 4, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == 0 - - def test_check_plagiarism_zero_points( - self, sample_config, sample_plagiarism_config - ): - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "broken_example", "seq", 0, sample_plagiarism_config, sample_config - ) - assert is_cheated - assert plagiarism_points == 0 - - def test_check_plagiarism_different_coefficients(self, sample_plagiarism_config): - config_75_percent = {"scoreboard": {"plagiarism": {"coefficient": 0.75}}} - - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "broken_example", "seq", 4, sample_plagiarism_config, config_75_percent - ) - assert is_cheated - assert plagiarism_points == -3 - - config_25_percent = {"scoreboard": {"plagiarism": {"coefficient": 0.25}}} - - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - "broken_example", "seq", 8, sample_plagiarism_config, config_25_percent - ) - assert is_cheated - assert plagiarism_points == -2 +from main import check_plagiarism_and_calculate_penalty + + +class TestCheckPlagiarismAndCalculatePenalty: + def test_check_plagiarism_flagged_task( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == -2 + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "cheater_task", "stl", 8, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == -4 + + def test_check_plagiarism_clean_task(self, sample_config, sample_plagiarism_config): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "clean_task", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "another_task", "omp", 6, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + def test_check_plagiarism_disabled_task_suffix( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example_disabled", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == -2 + + def test_check_plagiarism_different_task_types( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "omp", 6, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "cheater_task", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + def test_check_plagiarism_zero_points( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 0, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + def test_check_plagiarism_different_coefficients(self, sample_plagiarism_config): + config_75_percent = {"scoreboard": {"plagiarism": {"coefficient": 0.75}}} + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 4, sample_plagiarism_config, config_75_percent + ) + assert is_cheated + assert plagiarism_points == -3 + + config_25_percent = {"scoreboard": {"plagiarism": {"coefficient": 0.25}}} + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 8, sample_plagiarism_config, config_25_percent + ) + assert is_cheated + assert plagiarism_points == -2 diff --git a/scoreboard/tests/test_discover_tasks.py b/scoreboard/tests/test_discover_tasks.py index d938257d74..fe5e376898 100644 --- a/scoreboard/tests/test_discover_tasks.py +++ b/scoreboard/tests/test_discover_tasks.py @@ -1,110 +1,110 @@ -""" -Tests for the discover_tasks function. -""" - -from main import discover_tasks - - -class TestDiscoverTasks: - """Test cases for discover_tasks function.""" - - def test_discover_tasks_with_valid_structure(self, sample_task_structure): - """Test discovering tasks with a valid directory structure.""" - task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] - - result = discover_tasks(sample_task_structure, task_types) - - # Check that tasks are discovered correctly - assert "example_task" in result - assert "disabled_task" in result - assert "partial_task" in result - - # Check task statuses - assert result["example_task"]["seq"] == "done" - assert result["example_task"]["omp"] == "done" - assert result["example_task"]["stl"] == "done" - - assert result["disabled_task"]["seq"] == "disabled" - assert result["disabled_task"]["omp"] == "disabled" - - assert result["partial_task"]["seq"] == "done" - assert "omp" not in result["partial_task"] # No omp implementation - - def test_discover_tasks_empty_directory(self, temp_dir): - """Test discovering tasks in an empty directory.""" - task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] - - result = discover_tasks(temp_dir / "nonexistent", task_types) - - assert result == {} - - def test_discover_tasks_no_task_directories(self, temp_dir): - """Test discovering tasks when no valid task directories exist.""" - tasks_dir = temp_dir / "tasks" - tasks_dir.mkdir() - - # Create common directory (should be ignored) - (tasks_dir / "common").mkdir() - (tasks_dir / "common" / "utils.hpp").touch() - - task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] - - result = discover_tasks(tasks_dir, task_types) - - assert result == {} - - def test_discover_tasks_with_mixed_implementations(self, temp_dir): - """Test discovering tasks with mixed implementation availability.""" - tasks_dir = temp_dir / "tasks" - - # Create task with only some implementations - task_dir = tasks_dir / "mixed_task" - (task_dir / "seq").mkdir(parents=True) - (task_dir / "omp").mkdir(parents=True) - # No stl, tbb, all, mpi implementations - - task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] - - result = discover_tasks(tasks_dir, task_types) - - assert "mixed_task" in result - assert result["mixed_task"]["seq"] == "done" - assert result["mixed_task"]["omp"] == "done" - assert "stl" not in result["mixed_task"] - assert "tbb" not in result["mixed_task"] - assert "all" not in result["mixed_task"] - assert "mpi" not in result["mixed_task"] - - def test_discover_tasks_disabled_suffix_handling(self, temp_dir): - """Test correct handling of _disabled suffix in task names.""" - tasks_dir = temp_dir / "tasks" - - # Create disabled task - disabled_dir = tasks_dir / "test_task_disabled" - (disabled_dir / "seq").mkdir(parents=True) - (disabled_dir / "omp").mkdir(parents=True) - - task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] - - result = discover_tasks(tasks_dir, task_types) - - # Should be indexed under clean name without _disabled - assert "test_task" in result - assert "test_task_disabled" not in result - - # Should be marked as disabled - assert result["test_task"]["seq"] == "disabled" - assert result["test_task"]["omp"] == "disabled" - - def test_discover_tasks_custom_task_types(self, sample_task_structure): - """Test discovering tasks with custom task types list.""" - # Only look for seq and omp - task_types = ["seq", "omp"] - - result = discover_tasks(sample_task_structure, task_types) - - assert "example_task" in result - assert result["example_task"]["seq"] == "done" - assert result["example_task"]["omp"] == "done" - # stl should not be included even though directory exists - assert "stl" not in result["example_task"] +""" +Tests for the discover_tasks function. +""" + +from main import discover_tasks + + +class TestDiscoverTasks: + """Test cases for discover_tasks function.""" + + def test_discover_tasks_with_valid_structure(self, sample_task_structure): + """Test discovering tasks with a valid directory structure.""" + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(sample_task_structure, task_types) + + # Check that tasks are discovered correctly + assert "example_task" in result + assert "disabled_task" in result + assert "partial_task" in result + + # Check task statuses + assert result["example_task"]["seq"] == "done" + assert result["example_task"]["omp"] == "done" + assert result["example_task"]["stl"] == "done" + + assert result["disabled_task"]["seq"] == "disabled" + assert result["disabled_task"]["omp"] == "disabled" + + assert result["partial_task"]["seq"] == "done" + assert "omp" not in result["partial_task"] # No omp implementation + + def test_discover_tasks_empty_directory(self, temp_dir): + """Test discovering tasks in an empty directory.""" + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(temp_dir / "nonexistent", task_types) + + assert result == {} + + def test_discover_tasks_no_task_directories(self, temp_dir): + """Test discovering tasks when no valid task directories exist.""" + tasks_dir = temp_dir / "tasks" + tasks_dir.mkdir() + + # Create common directory (should be ignored) + (tasks_dir / "common").mkdir() + (tasks_dir / "common" / "utils.hpp").touch() + + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(tasks_dir, task_types) + + assert result == {} + + def test_discover_tasks_with_mixed_implementations(self, temp_dir): + """Test discovering tasks with mixed implementation availability.""" + tasks_dir = temp_dir / "tasks" + + # Create task with only some implementations + task_dir = tasks_dir / "mixed_task" + (task_dir / "seq").mkdir(parents=True) + (task_dir / "omp").mkdir(parents=True) + # No stl, tbb, all, mpi implementations + + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(tasks_dir, task_types) + + assert "mixed_task" in result + assert result["mixed_task"]["seq"] == "done" + assert result["mixed_task"]["omp"] == "done" + assert "stl" not in result["mixed_task"] + assert "tbb" not in result["mixed_task"] + assert "all" not in result["mixed_task"] + assert "mpi" not in result["mixed_task"] + + def test_discover_tasks_disabled_suffix_handling(self, temp_dir): + """Test correct handling of _disabled suffix in task names.""" + tasks_dir = temp_dir / "tasks" + + # Create disabled task + disabled_dir = tasks_dir / "test_task_disabled" + (disabled_dir / "seq").mkdir(parents=True) + (disabled_dir / "omp").mkdir(parents=True) + + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(tasks_dir, task_types) + + # Should be indexed under clean name without _disabled + assert "test_task" in result + assert "test_task_disabled" not in result + + # Should be marked as disabled + assert result["test_task"]["seq"] == "disabled" + assert result["test_task"]["omp"] == "disabled" + + def test_discover_tasks_custom_task_types(self, sample_task_structure): + """Test discovering tasks with custom task types list.""" + # Only look for seq and omp + task_types = ["seq", "omp"] + + result = discover_tasks(sample_task_structure, task_types) + + assert "example_task" in result + assert result["example_task"]["seq"] == "done" + assert result["example_task"]["omp"] == "done" + # stl should not be included even though directory exists + assert "stl" not in result["example_task"] diff --git a/scoreboard/tests/test_get_solution_points_and_style.py b/scoreboard/tests/test_get_solution_points_and_style.py index e92ac9cf69..f72bee6be3 100644 --- a/scoreboard/tests/test_get_solution_points_and_style.py +++ b/scoreboard/tests/test_get_solution_points_and_style.py @@ -1,90 +1,90 @@ -import pytest -from main import get_solution_points_and_style - - -class TestGetSolutionPointsAndStyle: - def test_get_solution_points_done_status(self, sample_config): - sol_points, solution_style = get_solution_points_and_style( - "seq", "done", sample_config - ) - assert sol_points == 4 - assert solution_style == "background-color: lightgreen;" - - sol_points, solution_style = get_solution_points_and_style( - "omp", "done", sample_config - ) - assert sol_points == 6 - assert solution_style == "background-color: lightgreen;" - - sol_points, solution_style = get_solution_points_and_style( - "all", "done", sample_config - ) - assert sol_points == 10 - assert solution_style == "background-color: lightgreen;" - - def test_get_solution_points_disabled_status(self, sample_config): - sol_points, solution_style = get_solution_points_and_style( - "seq", "disabled", sample_config - ) - assert sol_points == 4 - assert solution_style == "background-color: #6495ED;" - - sol_points, solution_style = get_solution_points_and_style( - "omp", "disabled", sample_config - ) - assert sol_points == 6 - assert solution_style == "background-color: #6495ED;" - - sol_points, solution_style = get_solution_points_and_style( - "all", "disabled", sample_config - ) - assert sol_points == 10 - assert solution_style == "background-color: #6495ED;" - - def test_get_solution_points_missing_status(self, sample_config): - sol_points, solution_style = get_solution_points_and_style( - "seq", None, sample_config - ) - assert sol_points == 0 - assert solution_style == "" - - sol_points, solution_style = get_solution_points_and_style( - "omp", "missing", sample_config - ) - assert sol_points == 0 - assert solution_style == "" - - sol_points, solution_style = get_solution_points_and_style( - "all", "", sample_config - ) - assert sol_points == 0 - assert solution_style == "" - - def test_get_solution_points_all_task_types(self, sample_config): - task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] - expected_points = [4, 6, 8, 6, 10, 0] - - for task_type, expected in zip(task_types, expected_points): - sol_points, solution_style = get_solution_points_and_style( - task_type, "done", sample_config - ) - assert sol_points == expected - assert solution_style == "background-color: lightgreen;" - - def test_get_solution_points_invalid_task_type(self, sample_config): - with pytest.raises((KeyError, ValueError, TypeError)): - get_solution_points_and_style("invalid_type", "done", sample_config) - - def test_get_solution_points_malformed_config(self): - malformed_config = { - "scoreboard": {"task": {"seq": {"solution": {"max": "invalid"}}}} - } - - with pytest.raises((ValueError, TypeError)): - get_solution_points_and_style("seq", "done", malformed_config) - - def test_get_solution_points_missing_config_keys(self): - incomplete_config = {"scoreboard": {}} - - with pytest.raises(KeyError): - get_solution_points_and_style("seq", "done", incomplete_config) +import pytest +from main import get_solution_points_and_style + + +class TestGetSolutionPointsAndStyle: + def test_get_solution_points_done_status(self, sample_config): + sol_points, solution_style = get_solution_points_and_style( + "seq", "done", sample_config + ) + assert sol_points == 4 + assert solution_style == "background-color: lightgreen;" + + sol_points, solution_style = get_solution_points_and_style( + "omp", "done", sample_config + ) + assert sol_points == 6 + assert solution_style == "background-color: lightgreen;" + + sol_points, solution_style = get_solution_points_and_style( + "all", "done", sample_config + ) + assert sol_points == 10 + assert solution_style == "background-color: lightgreen;" + + def test_get_solution_points_disabled_status(self, sample_config): + sol_points, solution_style = get_solution_points_and_style( + "seq", "disabled", sample_config + ) + assert sol_points == 4 + assert solution_style == "background-color: #6495ED;" + + sol_points, solution_style = get_solution_points_and_style( + "omp", "disabled", sample_config + ) + assert sol_points == 6 + assert solution_style == "background-color: #6495ED;" + + sol_points, solution_style = get_solution_points_and_style( + "all", "disabled", sample_config + ) + assert sol_points == 10 + assert solution_style == "background-color: #6495ED;" + + def test_get_solution_points_missing_status(self, sample_config): + sol_points, solution_style = get_solution_points_and_style( + "seq", None, sample_config + ) + assert sol_points == 0 + assert solution_style == "" + + sol_points, solution_style = get_solution_points_and_style( + "omp", "missing", sample_config + ) + assert sol_points == 0 + assert solution_style == "" + + sol_points, solution_style = get_solution_points_and_style( + "all", "", sample_config + ) + assert sol_points == 0 + assert solution_style == "" + + def test_get_solution_points_all_task_types(self, sample_config): + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + expected_points = [4, 6, 8, 6, 10, 0] + + for task_type, expected in zip(task_types, expected_points): + sol_points, solution_style = get_solution_points_and_style( + task_type, "done", sample_config + ) + assert sol_points == expected + assert solution_style == "background-color: lightgreen;" + + def test_get_solution_points_invalid_task_type(self, sample_config): + with pytest.raises((KeyError, ValueError, TypeError)): + get_solution_points_and_style("invalid_type", "done", sample_config) + + def test_get_solution_points_malformed_config(self): + malformed_config = { + "scoreboard": {"task": {"seq": {"solution": {"max": "invalid"}}}} + } + + with pytest.raises((ValueError, TypeError)): + get_solution_points_and_style("seq", "done", malformed_config) + + def test_get_solution_points_missing_config_keys(self): + incomplete_config = {"scoreboard": {}} + + with pytest.raises(KeyError): + get_solution_points_and_style("seq", "done", incomplete_config) diff --git a/scoreboard/tests/test_load_performance_data.py b/scoreboard/tests/test_load_performance_data.py index 45c657a36b..caea3010d6 100644 --- a/scoreboard/tests/test_load_performance_data.py +++ b/scoreboard/tests/test_load_performance_data.py @@ -1,144 +1,144 @@ -""" -Tests for the load_performance_data function. -""" - -import csv -from main import load_performance_data - - -class TestLoadPerformanceData: - """Test cases for load_performance_data function.""" - - def test_load_performance_data_valid_csv(self, sample_performance_csv): - """Test loading performance data from a valid CSV file.""" - result = load_performance_data(sample_performance_csv) - - # Check structure - assert isinstance(result, dict) - assert len(result) == 3 - - # Check example_task data - assert "example_task" in result - example_data = result["example_task"] - assert example_data["seq"] == "1.0" - assert example_data["omp"] == "0.5" - assert example_data["stl"] == "0.3" - assert example_data["tbb"] == "0.4" - assert example_data["all"] == "0.2" - assert example_data["mpi"] == "N/A" - - # Check disabled_task data - assert "disabled_task" in result - disabled_data = result["disabled_task"] - assert disabled_data["seq"] == "2.0" - assert disabled_data["omp"] == "1.0" - - # Check partial_task data - assert "partial_task" in result - partial_data = result["partial_task"] - assert partial_data["seq"] == "1.5" - assert partial_data["omp"] == "N/A" - assert partial_data["mpi"] == "N/A" - - def test_load_performance_data_nonexistent_file(self, temp_dir): - """Test loading performance data when file doesn't exist.""" - nonexistent_file = temp_dir / "nonexistent.csv" - - result = load_performance_data(nonexistent_file) - - assert result == {} - - def test_load_performance_data_empty_csv(self, temp_dir): - """Test loading performance data from an empty CSV file.""" - empty_csv = temp_dir / "empty.csv" - empty_csv.touch() - - result = load_performance_data(empty_csv) - - assert result == {} - - def test_load_performance_data_header_only_csv(self, temp_dir): - """Test loading performance data from CSV with only headers.""" - header_only_csv = temp_dir / "header_only.csv" - - with open(header_only_csv, "w", newline="") as f: - writer = csv.DictWriter( - f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] - ) - writer.writeheader() - - result = load_performance_data(header_only_csv) - - assert result == {} - - def test_load_performance_data_malformed_csv(self, temp_dir): - """Test loading performance data from malformed CSV.""" - malformed_csv = temp_dir / "malformed.csv" - - with open(malformed_csv, "w") as f: - f.write("Task,SEQ,OMP\n") - f.write("test_task,1.0\n") # Missing OMP value - f.write("another_task,invalid,0.5\n") # Invalid SEQ value - - # Should not crash, but may have incomplete data - result = load_performance_data(malformed_csv) - - # Function should handle this gracefully - assert isinstance(result, dict) - - def test_load_performance_data_missing_columns(self, temp_dir): - """Test loading performance data when some columns are missing.""" - partial_csv = temp_dir / "partial.csv" - - data = [ - {"Task": "test_task", "SEQ": "1.0", "OMP": "0.5"} - # Missing STL, TBB, ALL columns - ] - - with open(partial_csv, "w", newline="") as f: - writer = csv.DictWriter(f, fieldnames=["Task", "SEQ", "OMP"]) - writer.writeheader() - writer.writerows(data) - - # Should handle missing columns gracefully - result = load_performance_data(partial_csv) - - assert "test_task" in result - # Missing columns should be handled (likely as empty strings or errors) - task_data = result["test_task"] - assert task_data["seq"] == "1.0" - assert task_data["omp"] == "0.5" - assert task_data["mpi"] == "N/A" # This should always be set - - def test_load_performance_data_special_values(self, temp_dir): - """Test loading performance data with special values.""" - special_csv = temp_dir / "special.csv" - - data = [ - { - "Task": "special_task", - "SEQ": "0.0", - "OMP": "inf", - "STL": "-1", - "TBB": "", - "ALL": "N/A", - } - ] - - with open(special_csv, "w", newline="") as f: - writer = csv.DictWriter( - f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] - ) - writer.writeheader() - writer.writerows(data) - - result = load_performance_data(special_csv) - - assert "special_task" in result - task_data = result["special_task"] - assert task_data["seq"] == "0.0" - assert task_data["omp"] == "inf" - assert task_data["stl"] == "-1" - assert task_data["tbb"] == "" - assert task_data["all"] == "N/A" - assert task_data["mpi"] == "N/A" +""" +Tests for the load_performance_data function. +""" + +import csv +from main import load_performance_data + + +class TestLoadPerformanceData: + """Test cases for load_performance_data function.""" + + def test_load_performance_data_valid_csv(self, sample_performance_csv): + """Test loading performance data from a valid CSV file.""" + result = load_performance_data(sample_performance_csv) + + # Check structure + assert isinstance(result, dict) + assert len(result) == 3 + + # Check example_task data + assert "example_task" in result + example_data = result["example_task"] + assert example_data["seq"] == "1.0" + assert example_data["omp"] == "0.5" + assert example_data["stl"] == "0.3" + assert example_data["tbb"] == "0.4" + assert example_data["all"] == "0.2" + assert example_data["mpi"] == "N/A" + + # Check disabled_task data + assert "disabled_task" in result + disabled_data = result["disabled_task"] + assert disabled_data["seq"] == "2.0" + assert disabled_data["omp"] == "1.0" + + # Check partial_task data + assert "partial_task" in result + partial_data = result["partial_task"] + assert partial_data["seq"] == "1.5" + assert partial_data["omp"] == "N/A" + assert partial_data["mpi"] == "N/A" + + def test_load_performance_data_nonexistent_file(self, temp_dir): + """Test loading performance data when file doesn't exist.""" + nonexistent_file = temp_dir / "nonexistent.csv" + + result = load_performance_data(nonexistent_file) + + assert result == {} + + def test_load_performance_data_empty_csv(self, temp_dir): + """Test loading performance data from an empty CSV file.""" + empty_csv = temp_dir / "empty.csv" + empty_csv.touch() + + result = load_performance_data(empty_csv) + + assert result == {} + + def test_load_performance_data_header_only_csv(self, temp_dir): + """Test loading performance data from CSV with only headers.""" + header_only_csv = temp_dir / "header_only.csv" + + with open(header_only_csv, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] + ) + writer.writeheader() + + result = load_performance_data(header_only_csv) + + assert result == {} + + def test_load_performance_data_malformed_csv(self, temp_dir): + """Test loading performance data from malformed CSV.""" + malformed_csv = temp_dir / "malformed.csv" + + with open(malformed_csv, "w") as f: + f.write("Task,SEQ,OMP\n") + f.write("test_task,1.0\n") # Missing OMP value + f.write("another_task,invalid,0.5\n") # Invalid SEQ value + + # Should not crash, but may have incomplete data + result = load_performance_data(malformed_csv) + + # Function should handle this gracefully + assert isinstance(result, dict) + + def test_load_performance_data_missing_columns(self, temp_dir): + """Test loading performance data when some columns are missing.""" + partial_csv = temp_dir / "partial.csv" + + data = [ + {"Task": "test_task", "SEQ": "1.0", "OMP": "0.5"} + # Missing STL, TBB, ALL columns + ] + + with open(partial_csv, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["Task", "SEQ", "OMP"]) + writer.writeheader() + writer.writerows(data) + + # Should handle missing columns gracefully + result = load_performance_data(partial_csv) + + assert "test_task" in result + # Missing columns should be handled (likely as empty strings or errors) + task_data = result["test_task"] + assert task_data["seq"] == "1.0" + assert task_data["omp"] == "0.5" + assert task_data["mpi"] == "N/A" # This should always be set + + def test_load_performance_data_special_values(self, temp_dir): + """Test loading performance data with special values.""" + special_csv = temp_dir / "special.csv" + + data = [ + { + "Task": "special_task", + "SEQ": "0.0", + "OMP": "inf", + "STL": "-1", + "TBB": "", + "ALL": "N/A", + } + ] + + with open(special_csv, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] + ) + writer.writeheader() + writer.writerows(data) + + result = load_performance_data(special_csv) + + assert "special_task" in result + task_data = result["special_task"] + assert task_data["seq"] == "0.0" + assert task_data["omp"] == "inf" + assert task_data["stl"] == "-1" + assert task_data["tbb"] == "" + assert task_data["all"] == "N/A" + assert task_data["mpi"] == "N/A" diff --git a/scripts/create_perf_table.py b/scripts/create_perf_table.py index 5e096358df..632d6cb78e 100644 --- a/scripts/create_perf_table.py +++ b/scripts/create_perf_table.py @@ -1,285 +1,285 @@ -import argparse -import os -import re -import xlsxwriter -import csv - -# ------------------------------- -# Helpers and configuration -# ------------------------------- - -# Known task types (used to pre-initialize tables) -list_of_type_of_tasks = ["all", "mpi", "omp", "seq", "stl", "tbb"] - -# Compile patterns once -OLD_PATTERN = re.compile(r"tasks[\/|\\](\w*)[\/|\\](\w*):(\w*):(-*\d*\.\d*)") -NEW_PATTERN = re.compile( - r"(\w+_test_task_(threads|processes))_(\w+)_enabled:(\w*):(-*\d*\.\d*)" -) -# Example formats: -# example_threads_omp_enabled:task_run:0.4749 -# example_processes_2_mpi_enabled:pipeline:0.0507 -SIMPLE_PATTERN = re.compile( - r"(.+?)_(omp|seq|tbb|stl|all|mpi)_enabled:(task_run|pipeline):(-*\d*\.\d*)" -) - - -def _ensure_task_tables(result_tables: dict, perf_type: str, task_name: str) -> None: - if perf_type not in result_tables: - result_tables[perf_type] = {} - if task_name not in result_tables[perf_type]: - result_tables[perf_type][task_name] = {t: -1.0 for t in list_of_type_of_tasks} - - -def _infer_category(task_name: str) -> str: - return "threads" if "threads" in task_name else "processes" - - -def _columns_for_category(category: str) -> list[str]: - return ( - ["seq", "omp", "tbb", "stl", "all"] if category == "threads" else ["seq", "mpi"] - ) - - -def _write_excel_sheet( - workbook, - worksheet, - cpu_num: int, - tasks_list: list[str], - cols: list[str], - table: dict, -): - worksheet.set_column("A:Z", 23) - right_bold_border = workbook.add_format({"bold": True, "right": 2, "bottom": 2}) - bottom_bold_border = workbook.add_format({"bold": True, "bottom": 2}) - right_border = workbook.add_format({"right": 2}) - - worksheet.write(0, 0, "cpu_num = " + str(cpu_num), right_bold_border) - - # Header (T_x, S, Eff) per column - col = 1 - for ttype in cols: - worksheet.write(0, col, f"T_{ttype}({cpu_num})", bottom_bold_border) - col += 1 - worksheet.write( - 0, - col, - f"S({cpu_num}) = T_seq({cpu_num}) / T_{ttype}({cpu_num})", - bottom_bold_border, - ) - col += 1 - worksheet.write( - 0, col, f"Eff({cpu_num}) = S({cpu_num}) / {cpu_num}", right_bold_border - ) - col += 1 - - # Task rows - row = 1 - for task_name in tasks_list: - worksheet.write( - row, 0, task_name, workbook.add_format({"bold": True, "right": 2}) - ) - row += 1 - - # Values - row = 1 - for task_name in tasks_list: - col = 1 - for ttype in cols: - if task_name not in table: - # no data for task at all - worksheet.write(row, col, "—") - col += 1 - worksheet.write(row, col, "—") - col += 1 - worksheet.write(row, col, "—", right_border) - col += 1 - continue - par_time = table[task_name].get(ttype, -1.0) - seq_time = table[task_name].get("seq", -1.0) - if par_time in (0.0, -1.0) or seq_time in (0.0, -1.0): - speed_up = "—" - efficiency = "—" - else: - speed_up = seq_time / par_time - efficiency = speed_up / cpu_num - worksheet.write(row, col, par_time if par_time != -1.0 else "?") - col += 1 - worksheet.write(row, col, speed_up) - col += 1 - worksheet.write(row, col, efficiency, right_border) - col += 1 - row += 1 - - -def _write_csv(path: str, header: list[str], tasks_list: list[str], table: dict): - with open(path, "w", newline="") as csvfile: - writer = csv.writer(csvfile) - writer.writerow(header) - for task_name in tasks_list: - seq_time = table.get(task_name, {}).get("seq", -1.0) - if seq_time in (0.0, -1.0): - writer.writerow([task_name] + ["?" for _ in header[1:]]) - continue - row = [task_name, 1.0] - # Remaining headers correspond to columns starting from 2 - for col_name in header[2:]: - val = table[task_name].get(col_name.lower(), -1.0) - row.append(val / seq_time if val != -1.0 else "?") - writer.writerow(row) - - -parser = argparse.ArgumentParser() -parser.add_argument( - "-i", "--input", help="Input file path (logs of perf tests, .txt)", required=True -) -parser.add_argument( - "-o", "--output", help="Output file path (path to .xlsx table)", required=True -) -args = parser.parse_args() -logs_path = os.path.abspath(args.input) -xlsx_path = os.path.abspath(args.output) - -# For each perf_type (pipeline/task_run) store times per task -result_tables = {"pipeline": {}, "task_run": {}} -# Map task name -> category (threads|processes) -task_categories = {} -# Track tasks per category to split output -tasks_by_category = {"threads": set(), "processes": set()} - -with open(logs_path, "r") as logs_file: - logs_lines = logs_file.readlines() -for line in logs_lines: - # Handle both old format: tasks/task_type/task_name:perf_type:time - # and new format: namespace_task_type_enabled:perf_type:time - old_result = OLD_PATTERN.findall(line) - new_result = NEW_PATTERN.findall(line) - simple_result = SIMPLE_PATTERN.findall(line) - - if len(old_result): - task_name = old_result[0][1] - perf_type = old_result[0][2] - # legacy: track task in threads category by default - _ensure_task_tables(result_tables, perf_type, task_name) - # Unknown category in legacy format; default to threads - task_categories[task_name] = "threads" - tasks_by_category["threads"].add(task_name) - elif len(new_result): - # Extract task name from namespace (e.g., "example_threads" from "nesterov_a_test_task_threads") - full_task_name = new_result[0][0] - task_category = new_result[0][1] # "threads" or "processes" - task_name = f"example_{task_category}" - perf_type = new_result[0][3] - - # no set tracking needed; category mapping below - - _ensure_task_tables(result_tables, perf_type, task_name) - task_categories[task_name] = task_category - tasks_by_category[task_category].add(task_name) - elif len(simple_result): - # Extract task name in the current format (prefix already includes category suffix) - task_name = simple_result[0][0] - # Infer category by substring - task_category = "threads" if "threads" in task_name else "processes" - perf_type = simple_result[0][2] - - # no set tracking needed; category mapping below - - _ensure_task_tables(result_tables, perf_type, task_name) - task_categories[task_name] = task_category - tasks_by_category[task_category].add(task_name) - -for line in logs_lines: - # Handle both old format: tasks/task_type/task_name:perf_type:time - # and new format: namespace_task_type_enabled:perf_type:time - old_result = OLD_PATTERN.findall(line) - new_result = NEW_PATTERN.findall(line) - simple_result = SIMPLE_PATTERN.findall(line) - - if len(old_result): - task_type = old_result[0][0] - task_name = old_result[0][1] - perf_type = old_result[0][2] - perf_time = float(old_result[0][3]) - if perf_time < 0.001: - msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" - raise Exception(msg) - result_tables[perf_type][task_name][task_type] = perf_time - elif len(new_result): - # Extract task details from namespace format - task_category = new_result[0][1] # "threads" or "processes" - task_type = new_result[0][2] # "all", "omp", "seq", etc. - perf_type = new_result[0][3] - perf_time = float(new_result[0][4]) - task_name = f"example_{task_category}" - - if perf_time < 0.001: - msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" - raise Exception(msg) - - if task_name in result_tables[perf_type]: - result_tables[perf_type][task_name][task_type] = perf_time - task_categories[task_name] = task_category - tasks_by_category[task_category].add(task_name) - elif len(simple_result): - # Extract details from the simplified pattern (current logs) - task_name = simple_result[0][0] - # Infer category by substring present in task_name - task_category = "threads" if "threads" in task_name else "processes" - task_type = simple_result[0][1] - perf_type = simple_result[0][2] - perf_time = float(simple_result[0][3]) - - if perf_time < 0.001: - msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" - raise Exception(msg) - - if perf_type not in result_tables: - result_tables[perf_type] = {} - if task_name not in result_tables[perf_type]: - result_tables[perf_type][task_name] = {} - for ttype in list_of_type_of_tasks: - result_tables[perf_type][task_name][ttype] = -1.0 - result_tables[perf_type][task_name][task_type] = perf_time - task_categories[task_name] = task_category - tasks_by_category[task_category].add(task_name) - - -for table_name, table_data in result_tables.items(): - # Prepare two workbooks/CSVs: threads and processes - for category in ["threads", "processes"]: - tasks_list = sorted(tasks_by_category[category]) - if not tasks_list: - continue - - # Use appropriate env var per category - if category == "threads": - cpu_num_env = os.environ.get("PPC_NUM_THREADS") - if cpu_num_env is None: - raise EnvironmentError( - "Required environment variable 'PPC_NUM_THREADS' is not set." - ) - else: - cpu_num_env = os.environ.get("PPC_NUM_PROC") - if cpu_num_env is None: - raise EnvironmentError( - "Required environment variable 'PPC_NUM_PROC' is not set." - ) - cpu_num = int(cpu_num_env) - cols = _columns_for_category(category) - - # Excel - wb_path = os.path.join( - xlsx_path, f"{category}_" + table_name + "_perf_table.xlsx" - ) - workbook = xlsxwriter.Workbook(wb_path) - worksheet = workbook.add_worksheet() - _write_excel_sheet(workbook, worksheet, cpu_num, tasks_list, cols, table_data) - workbook.close() - - # CSV - header = ["Task", "SEQ"] + [c.upper() for c in cols[1:]] - csv_path = os.path.join( - xlsx_path, f"{category}_" + table_name + "_perf_table.csv" - ) - _write_csv(csv_path, header, tasks_list, table_data) +import argparse +import os +import re +import xlsxwriter +import csv + +# ------------------------------- +# Helpers and configuration +# ------------------------------- + +# Known task types (used to pre-initialize tables) +list_of_type_of_tasks = ["all", "mpi", "omp", "seq", "stl", "tbb"] + +# Compile patterns once +OLD_PATTERN = re.compile(r"tasks[\/|\\](\w*)[\/|\\](\w*):(\w*):(-*\d*\.\d*)") +NEW_PATTERN = re.compile( + r"(\w+_test_task_(threads|processes))_(\w+)_enabled:(\w*):(-*\d*\.\d*)" +) +# Example formats: +# example_threads_omp_enabled:task_run:0.4749 +# example_processes_2_mpi_enabled:pipeline:0.0507 +SIMPLE_PATTERN = re.compile( + r"(.+?)_(omp|seq|tbb|stl|all|mpi)_enabled:(task_run|pipeline):(-*\d*\.\d*)" +) + + +def _ensure_task_tables(result_tables: dict, perf_type: str, task_name: str) -> None: + if perf_type not in result_tables: + result_tables[perf_type] = {} + if task_name not in result_tables[perf_type]: + result_tables[perf_type][task_name] = {t: -1.0 for t in list_of_type_of_tasks} + + +def _infer_category(task_name: str) -> str: + return "threads" if "threads" in task_name else "processes" + + +def _columns_for_category(category: str) -> list[str]: + return ( + ["seq", "omp", "tbb", "stl", "all"] if category == "threads" else ["seq", "mpi"] + ) + + +def _write_excel_sheet( + workbook, + worksheet, + cpu_num: int, + tasks_list: list[str], + cols: list[str], + table: dict, +): + worksheet.set_column("A:Z", 23) + right_bold_border = workbook.add_format({"bold": True, "right": 2, "bottom": 2}) + bottom_bold_border = workbook.add_format({"bold": True, "bottom": 2}) + right_border = workbook.add_format({"right": 2}) + + worksheet.write(0, 0, "cpu_num = " + str(cpu_num), right_bold_border) + + # Header (T_x, S, Eff) per column + col = 1 + for ttype in cols: + worksheet.write(0, col, f"T_{ttype}({cpu_num})", bottom_bold_border) + col += 1 + worksheet.write( + 0, + col, + f"S({cpu_num}) = T_seq({cpu_num}) / T_{ttype}({cpu_num})", + bottom_bold_border, + ) + col += 1 + worksheet.write( + 0, col, f"Eff({cpu_num}) = S({cpu_num}) / {cpu_num}", right_bold_border + ) + col += 1 + + # Task rows + row = 1 + for task_name in tasks_list: + worksheet.write( + row, 0, task_name, workbook.add_format({"bold": True, "right": 2}) + ) + row += 1 + + # Values + row = 1 + for task_name in tasks_list: + col = 1 + for ttype in cols: + if task_name not in table: + # no data for task at all + worksheet.write(row, col, "—") + col += 1 + worksheet.write(row, col, "—") + col += 1 + worksheet.write(row, col, "—", right_border) + col += 1 + continue + par_time = table[task_name].get(ttype, -1.0) + seq_time = table[task_name].get("seq", -1.0) + if par_time in (0.0, -1.0) or seq_time in (0.0, -1.0): + speed_up = "—" + efficiency = "—" + else: + speed_up = seq_time / par_time + efficiency = speed_up / cpu_num + worksheet.write(row, col, par_time if par_time != -1.0 else "?") + col += 1 + worksheet.write(row, col, speed_up) + col += 1 + worksheet.write(row, col, efficiency, right_border) + col += 1 + row += 1 + + +def _write_csv(path: str, header: list[str], tasks_list: list[str], table: dict): + with open(path, "w", newline="") as csvfile: + writer = csv.writer(csvfile) + writer.writerow(header) + for task_name in tasks_list: + seq_time = table.get(task_name, {}).get("seq", -1.0) + if seq_time in (0.0, -1.0): + writer.writerow([task_name] + ["?" for _ in header[1:]]) + continue + row = [task_name, 1.0] + # Remaining headers correspond to columns starting from 2 + for col_name in header[2:]: + val = table[task_name].get(col_name.lower(), -1.0) + row.append(val / seq_time if val != -1.0 else "?") + writer.writerow(row) + + +parser = argparse.ArgumentParser() +parser.add_argument( + "-i", "--input", help="Input file path (logs of perf tests, .txt)", required=True +) +parser.add_argument( + "-o", "--output", help="Output file path (path to .xlsx table)", required=True +) +args = parser.parse_args() +logs_path = os.path.abspath(args.input) +xlsx_path = os.path.abspath(args.output) + +# For each perf_type (pipeline/task_run) store times per task +result_tables = {"pipeline": {}, "task_run": {}} +# Map task name -> category (threads|processes) +task_categories = {} +# Track tasks per category to split output +tasks_by_category = {"threads": set(), "processes": set()} + +with open(logs_path, "r") as logs_file: + logs_lines = logs_file.readlines() +for line in logs_lines: + # Handle both old format: tasks/task_type/task_name:perf_type:time + # and new format: namespace_task_type_enabled:perf_type:time + old_result = OLD_PATTERN.findall(line) + new_result = NEW_PATTERN.findall(line) + simple_result = SIMPLE_PATTERN.findall(line) + + if len(old_result): + task_name = old_result[0][1] + perf_type = old_result[0][2] + # legacy: track task in threads category by default + _ensure_task_tables(result_tables, perf_type, task_name) + # Unknown category in legacy format; default to threads + task_categories[task_name] = "threads" + tasks_by_category["threads"].add(task_name) + elif len(new_result): + # Extract task name from namespace (e.g., "example_threads" from "nesterov_a_test_task_threads") + full_task_name = new_result[0][0] + task_category = new_result[0][1] # "threads" or "processes" + task_name = f"example_{task_category}" + perf_type = new_result[0][3] + + # no set tracking needed; category mapping below + + _ensure_task_tables(result_tables, perf_type, task_name) + task_categories[task_name] = task_category + tasks_by_category[task_category].add(task_name) + elif len(simple_result): + # Extract task name in the current format (prefix already includes category suffix) + task_name = simple_result[0][0] + # Infer category by substring + task_category = "threads" if "threads" in task_name else "processes" + perf_type = simple_result[0][2] + + # no set tracking needed; category mapping below + + _ensure_task_tables(result_tables, perf_type, task_name) + task_categories[task_name] = task_category + tasks_by_category[task_category].add(task_name) + +for line in logs_lines: + # Handle both old format: tasks/task_type/task_name:perf_type:time + # and new format: namespace_task_type_enabled:perf_type:time + old_result = OLD_PATTERN.findall(line) + new_result = NEW_PATTERN.findall(line) + simple_result = SIMPLE_PATTERN.findall(line) + + if len(old_result): + task_type = old_result[0][0] + task_name = old_result[0][1] + perf_type = old_result[0][2] + perf_time = float(old_result[0][3]) + if perf_time < 0.001: + msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" + raise Exception(msg) + result_tables[perf_type][task_name][task_type] = perf_time + elif len(new_result): + # Extract task details from namespace format + task_category = new_result[0][1] # "threads" or "processes" + task_type = new_result[0][2] # "all", "omp", "seq", etc. + perf_type = new_result[0][3] + perf_time = float(new_result[0][4]) + task_name = f"example_{task_category}" + + if perf_time < 0.001: + msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" + raise Exception(msg) + + if task_name in result_tables[perf_type]: + result_tables[perf_type][task_name][task_type] = perf_time + task_categories[task_name] = task_category + tasks_by_category[task_category].add(task_name) + elif len(simple_result): + # Extract details from the simplified pattern (current logs) + task_name = simple_result[0][0] + # Infer category by substring present in task_name + task_category = "threads" if "threads" in task_name else "processes" + task_type = simple_result[0][1] + perf_type = simple_result[0][2] + perf_time = float(simple_result[0][3]) + + if perf_time < 0.001: + msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" + raise Exception(msg) + + if perf_type not in result_tables: + result_tables[perf_type] = {} + if task_name not in result_tables[perf_type]: + result_tables[perf_type][task_name] = {} + for ttype in list_of_type_of_tasks: + result_tables[perf_type][task_name][ttype] = -1.0 + result_tables[perf_type][task_name][task_type] = perf_time + task_categories[task_name] = task_category + tasks_by_category[task_category].add(task_name) + + +for table_name, table_data in result_tables.items(): + # Prepare two workbooks/CSVs: threads and processes + for category in ["threads", "processes"]: + tasks_list = sorted(tasks_by_category[category]) + if not tasks_list: + continue + + # Use appropriate env var per category + if category == "threads": + cpu_num_env = os.environ.get("PPC_NUM_THREADS") + if cpu_num_env is None: + raise EnvironmentError( + "Required environment variable 'PPC_NUM_THREADS' is not set." + ) + else: + cpu_num_env = os.environ.get("PPC_NUM_PROC") + if cpu_num_env is None: + raise EnvironmentError( + "Required environment variable 'PPC_NUM_PROC' is not set." + ) + cpu_num = int(cpu_num_env) + cols = _columns_for_category(category) + + # Excel + wb_path = os.path.join( + xlsx_path, f"{category}_" + table_name + "_perf_table.xlsx" + ) + workbook = xlsxwriter.Workbook(wb_path) + worksheet = workbook.add_worksheet() + _write_excel_sheet(workbook, worksheet, cpu_num, tasks_list, cols, table_data) + workbook.close() + + # CSV + header = ["Task", "SEQ"] + [c.upper() for c in cols[1:]] + csv_path = os.path.join( + xlsx_path, f"{category}_" + table_name + "_perf_table.csv" + ) + _write_csv(csv_path, header, tasks_list, table_data) diff --git a/scripts/generate_perf_results.bat b/scripts/generate_perf_results.bat index a7d72690ab..f787683a2c 100644 --- a/scripts/generate_perf_results.bat +++ b/scripts/generate_perf_results.bat @@ -1,4 +1,4 @@ -@echo off -mkdir build\perf_stat_dir -scripts/run_tests.py --running-type="performance" > build\perf_stat_dir\perf_log.txt -python scripts\create_perf_table.py --input build\perf_stat_dir\perf_log.txt --output build\perf_stat_dir +@echo off +mkdir build\perf_stat_dir +scripts/run_tests.py --running-type="performance" > build\perf_stat_dir\perf_log.txt +python scripts\create_perf_table.py --input build\perf_stat_dir\perf_log.txt --output build\perf_stat_dir diff --git a/scripts/generate_perf_results.sh b/scripts/generate_perf_results.sh index da317f4278..b080d9e0f4 100644 --- a/scripts/generate_perf_results.sh +++ b/scripts/generate_perf_results.sh @@ -1,6 +1,6 @@ -#!/usr/bin/env bash -set -euo pipefail - -mkdir -p build/perf_stat_dir -scripts/run_tests.py --running-type="performance" | tee build/perf_stat_dir/perf_log.txt -python3 scripts/create_perf_table.py --input build/perf_stat_dir/perf_log.txt --output build/perf_stat_dir +#!/usr/bin/env bash +set -euo pipefail + +mkdir -p build/perf_stat_dir +scripts/run_tests.py --running-type="performance" | tee build/perf_stat_dir/perf_log.txt +python3 scripts/create_perf_table.py --input build/perf_stat_dir/perf_log.txt --output build/perf_stat_dir diff --git a/scripts/jobs_graph.py b/scripts/jobs_graph.py index 52824d22c8..0d22708aa5 100644 --- a/scripts/jobs_graph.py +++ b/scripts/jobs_graph.py @@ -1,46 +1,46 @@ -import os - -try: - import yaml -except ImportError: - print("Please install pyyaml: pip install pyyaml") - exit(1) - -try: - import graphviz -except ImportError: - print("Please install graphviz: pip install graphviz") - exit(1) - - -def parse_gha_yml(file_path): - with open(file_path, "r") as file: - gha_data = yaml.safe_load(file) - return gha_data - - -def build_jobs_graph(gha_data): - jobs = gha_data.get("jobs", {}) - dot = graphviz.Digraph() - - for job_name, job_data in jobs.items(): - dot.node(job_name) - needs = job_data.get("needs", []) - if isinstance(needs, str): - needs = [needs] - for dependency in needs: - dot.edge(dependency, job_name) - - return dot - - -def save_graph(dot, filename, file_format): - dot.render(filename, format=file_format, cleanup=True) - - -if __name__ == "__main__": - gha_file_path = os.path.join(".github", "workflows", "main.yml") - svg_path = os.path.join("docs", "_static", "ci_graph") - gha_data = parse_gha_yml(gha_file_path) - jobs_graph = build_jobs_graph(gha_data) - save_graph(jobs_graph, svg_path, "svg") +import os + +try: + import yaml +except ImportError: + print("Please install pyyaml: pip install pyyaml") + exit(1) + +try: + import graphviz +except ImportError: + print("Please install graphviz: pip install graphviz") + exit(1) + + +def parse_gha_yml(file_path): + with open(file_path, "r") as file: + gha_data = yaml.safe_load(file) + return gha_data + + +def build_jobs_graph(gha_data): + jobs = gha_data.get("jobs", {}) + dot = graphviz.Digraph() + + for job_name, job_data in jobs.items(): + dot.node(job_name) + needs = job_data.get("needs", []) + if isinstance(needs, str): + needs = [needs] + for dependency in needs: + dot.edge(dependency, job_name) + + return dot + + +def save_graph(dot, filename, file_format): + dot.render(filename, format=file_format, cleanup=True) + + +if __name__ == "__main__": + gha_file_path = os.path.join(".github", "workflows", "main.yml") + svg_path = os.path.join("docs", "_static", "ci_graph") + gha_data = parse_gha_yml(gha_file_path) + jobs_graph = build_jobs_graph(gha_data) + save_graph(jobs_graph, svg_path, "svg") diff --git a/scripts/run_tests.py b/scripts/run_tests.py index f8477c56b6..65ec0eec5e 100755 --- a/scripts/run_tests.py +++ b/scripts/run_tests.py @@ -1,285 +1,285 @@ -#!/usr/bin/env python3 - -import os -import shlex -import subprocess -import platform -from pathlib import Path - - -def init_cmd_args(): - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument( - "--running-type", - required=True, - choices=["threads", "processes", "performance"], - help="Specify the execution mode. Choose 'threads' for multithreading or 'processes' for multiprocessing.", - ) - parser.add_argument( - "--additional-mpi-args", - required=False, - default="", - help="Additional MPI arguments to pass to the mpirun command (optional).", - ) - parser.add_argument( - "--counts", - nargs="+", - type=int, - help="List of process/thread counts to run sequentially", - ) - parser.add_argument( - "--verbose", action="store_true", help="Print commands executed by the script" - ) - args = parser.parse_args() - _args_dict = vars(args) - return _args_dict - - -class PPCRunner: - def __init__(self, verbose=False): - self.__ppc_num_threads = None - self.__ppc_num_proc = None - self.__ppc_env = None - self.work_dir = None - self.verbose = verbose - - self.valgrind_cmd = ( - "valgrind --error-exitcode=1 --leak-check=full --show-leak-kinds=all" - ) - - if platform.system() == "Windows": - self.mpi_exec = "mpiexec" - else: - self.mpi_exec = "mpirun" - self.platform = platform.system() - - # Detect MPI implementation to choose compatible flags - self.mpi_env_mode = "unknown" # one of: openmpi, mpich, unknown - self.mpi_np_flag = "-np" - if self.platform == "Windows": - # MSMPI uses -env and -n - self.mpi_env_mode = "mpich" - self.mpi_np_flag = "-n" - else: - self.mpi_env_mode, self.mpi_np_flag = self.__detect_mpi_impl() - - @staticmethod - def __get_project_path(): - script_path = Path(__file__).resolve() # Absolute path of the script - script_dir = script_path.parent # Directory containing the script - return script_dir.parent - - def setup_env(self, ppc_env): - self.__ppc_env = ppc_env - - self.__ppc_num_threads = self.__ppc_env.get("PPC_NUM_THREADS") - if self.__ppc_num_threads is None: - raise EnvironmentError( - "Required environment variable 'PPC_NUM_THREADS' is not set." - ) - self.__ppc_env["OMP_NUM_THREADS"] = self.__ppc_num_threads - - self.__ppc_num_proc = self.__ppc_env.get("PPC_NUM_PROC") - if self.__ppc_num_proc is None: - raise EnvironmentError( - "Required environment variable 'PPC_NUM_PROC' is not set." - ) - - if (Path(self.__get_project_path()) / "install").exists(): - self.work_dir = Path(self.__get_project_path()) / "install" / "bin" - else: - self.work_dir = Path(self.__get_project_path()) / "build" / "bin" - - def __run_exec(self, command): - if self.verbose: - print("Executing:", " ".join(shlex.quote(part) for part in command)) - result = subprocess.run(command, shell=False, env=self.__ppc_env) - if result.returncode != 0: - raise Exception(f"Subprocess return {result.returncode}.") - - def __detect_mpi_impl(self): - """Detect MPI implementation and return (env_mode, np_flag). - env_mode: 'openmpi' -> use '-x VAR', 'mpich' -> use '-genvlist VAR1,VAR2', 'unknown' -> pass no env flags. - np_flag: '-np' for OpenMPI/unknown, '-n' for MPICH-family. - """ - probes = (["--version"], ["-V"], ["-v"], ["--help"], ["-help"]) - out = "" - for args in probes: - try: - proc = subprocess.run( - [self.mpi_exec] + list(args), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - ) - out = (proc.stdout or "").lower() - if out: - break - except Exception: - continue - - if "open mpi" in out or "ompi" in out: - return "openmpi", "-np" - if ( - "hydra" in out - or "mpich" in out - or "intel(r) mpi" in out - or "intel mpi" in out - ): - return "mpich", "-n" - return "unknown", "-np" - - def __build_mpi_cmd(self, ppc_num_proc, additional_mpi_args): - base = [self.mpi_exec] + shlex.split(additional_mpi_args) - - if self.platform == "Windows": - # MS-MPI style - env_args = [ - "-env", - "PPC_NUM_THREADS", - self.__ppc_env["PPC_NUM_THREADS"], - "-env", - "OMP_NUM_THREADS", - self.__ppc_env["OMP_NUM_THREADS"], - ] - np_args = ["-n", ppc_num_proc] - return base + env_args + np_args - - # Non-Windows - if self.mpi_env_mode == "openmpi": - env_args = [ - "-x", - "PPC_NUM_THREADS", - "-x", - "OMP_NUM_THREADS", - ] - np_flag = "-np" - elif self.mpi_env_mode == "mpich": - # Explicitly set env variables for all ranks - env_args = [ - "-env", - "PPC_NUM_THREADS", - self.__ppc_env["PPC_NUM_THREADS"], - "-env", - "OMP_NUM_THREADS", - self.__ppc_env["OMP_NUM_THREADS"], - ] - np_flag = "-n" - else: - # Unknown MPI flavor: rely on environment inheritance and default to -np - env_args = [] - np_flag = "-np" - - return base + env_args + [np_flag, ppc_num_proc] - - @staticmethod - def __get_gtest_settings(repeats_count, type_task): - command = [ - f"--gtest_repeat={repeats_count}", - "--gtest_recreate_environments_when_repeating", - "--gtest_color=0", - "--gtest_shuffle", - f"--gtest_filter=*{type_task}*", - ] - return command - - def run_threads(self): - if platform.system() == "Linux" and not self.__ppc_env.get("PPC_ASAN_RUN"): - for task_type in ["seq", "stl"]: - self.__run_exec( - shlex.split(self.valgrind_cmd) - + [str(self.work_dir / "ppc_func_tests")] - + self.__get_gtest_settings(1, "_" + task_type + "_") - ) - - for task_type in ["omp", "seq", "stl", "tbb"]: - self.__run_exec( - [str(self.work_dir / "ppc_func_tests")] - + self.__get_gtest_settings(1, "_" + task_type + "_") - ) - - def run_core(self): - if platform.system() == "Linux" and not self.__ppc_env.get("PPC_ASAN_RUN"): - self.__run_exec( - shlex.split(self.valgrind_cmd) - + [str(self.work_dir / "core_func_tests")] - + self.__get_gtest_settings(1, "*") - + ["--gtest_filter=*:-*DisabledValgrind"] - ) - - self.__run_exec( - [str(self.work_dir / "core_func_tests")] + self.__get_gtest_settings(1, "*") - ) - - def run_processes(self, additional_mpi_args): - ppc_num_proc = self.__ppc_env.get("PPC_NUM_PROC") - if ppc_num_proc is None: - raise EnvironmentError( - "Required environment variable 'PPC_NUM_PROC' is not set." - ) - mpi_running = self.__build_mpi_cmd(ppc_num_proc, additional_mpi_args) - if not self.__ppc_env.get("PPC_ASAN_RUN"): - for task_type in ["all", "mpi"]: - self.__run_exec( - mpi_running - + [str(self.work_dir / "ppc_func_tests")] - + self.__get_gtest_settings(1, "_" + task_type + "_") - ) - - def run_performance(self): - if not self.__ppc_env.get("PPC_ASAN_RUN"): - mpi_running = self.__build_mpi_cmd(self.__ppc_num_proc, "") - for task_type in ["all", "mpi"]: - self.__run_exec( - mpi_running - + [str(self.work_dir / "ppc_perf_tests")] - + self.__get_gtest_settings(1, "_" + task_type + "_") - ) - - for task_type in ["omp", "seq", "stl", "tbb"]: - self.__run_exec( - [str(self.work_dir / "ppc_perf_tests")] - + self.__get_gtest_settings(1, "_" + task_type + "_") - ) - - -def _execute(args_dict, env): - runner = PPCRunner(verbose=args_dict.get("verbose", False)) - runner.setup_env(env) - - if args_dict["running_type"] in ["threads", "processes"]: - runner.run_core() - - if args_dict["running_type"] == "threads": - runner.run_threads() - elif args_dict["running_type"] == "processes": - runner.run_processes(args_dict["additional_mpi_args"]) - elif args_dict["running_type"] == "performance": - runner.run_performance() - else: - raise Exception("running-type is wrong!") - - -if __name__ == "__main__": - args_dict = init_cmd_args() - counts = args_dict.get("counts") - - if counts: - for count in counts: - env_copy = os.environ.copy() - - if args_dict["running_type"] == "threads": - env_copy["PPC_NUM_THREADS"] = str(count) - env_copy.setdefault("PPC_NUM_PROC", "1") - elif args_dict["running_type"] == "processes": - env_copy["PPC_NUM_PROC"] = str(count) - env_copy.setdefault("PPC_NUM_THREADS", "1") - - print( - f"Executing with {args_dict['running_type']} count: {count}", flush=True - ) - _execute(args_dict, env_copy) - else: - _execute(args_dict, os.environ.copy()) +#!/usr/bin/env python3 + +import os +import shlex +import subprocess +import platform +from pathlib import Path + + +def init_cmd_args(): + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--running-type", + required=True, + choices=["threads", "processes", "performance"], + help="Specify the execution mode. Choose 'threads' for multithreading or 'processes' for multiprocessing.", + ) + parser.add_argument( + "--additional-mpi-args", + required=False, + default="", + help="Additional MPI arguments to pass to the mpirun command (optional).", + ) + parser.add_argument( + "--counts", + nargs="+", + type=int, + help="List of process/thread counts to run sequentially", + ) + parser.add_argument( + "--verbose", action="store_true", help="Print commands executed by the script" + ) + args = parser.parse_args() + _args_dict = vars(args) + return _args_dict + + +class PPCRunner: + def __init__(self, verbose=False): + self.__ppc_num_threads = None + self.__ppc_num_proc = None + self.__ppc_env = None + self.work_dir = None + self.verbose = verbose + + self.valgrind_cmd = ( + "valgrind --error-exitcode=1 --leak-check=full --show-leak-kinds=all" + ) + + if platform.system() == "Windows": + self.mpi_exec = "mpiexec" + else: + self.mpi_exec = "mpirun" + self.platform = platform.system() + + # Detect MPI implementation to choose compatible flags + self.mpi_env_mode = "unknown" # one of: openmpi, mpich, unknown + self.mpi_np_flag = "-np" + if self.platform == "Windows": + # MSMPI uses -env and -n + self.mpi_env_mode = "mpich" + self.mpi_np_flag = "-n" + else: + self.mpi_env_mode, self.mpi_np_flag = self.__detect_mpi_impl() + + @staticmethod + def __get_project_path(): + script_path = Path(__file__).resolve() # Absolute path of the script + script_dir = script_path.parent # Directory containing the script + return script_dir.parent + + def setup_env(self, ppc_env): + self.__ppc_env = ppc_env + + self.__ppc_num_threads = self.__ppc_env.get("PPC_NUM_THREADS") + if self.__ppc_num_threads is None: + raise EnvironmentError( + "Required environment variable 'PPC_NUM_THREADS' is not set." + ) + self.__ppc_env["OMP_NUM_THREADS"] = self.__ppc_num_threads + + self.__ppc_num_proc = self.__ppc_env.get("PPC_NUM_PROC") + if self.__ppc_num_proc is None: + raise EnvironmentError( + "Required environment variable 'PPC_NUM_PROC' is not set." + ) + + if (Path(self.__get_project_path()) / "install").exists(): + self.work_dir = Path(self.__get_project_path()) / "install" / "bin" + else: + self.work_dir = Path(self.__get_project_path()) / "build" / "bin" + + def __run_exec(self, command): + if self.verbose: + print("Executing:", " ".join(shlex.quote(part) for part in command)) + result = subprocess.run(command, shell=False, env=self.__ppc_env) + if result.returncode != 0: + raise Exception(f"Subprocess return {result.returncode}.") + + def __detect_mpi_impl(self): + """Detect MPI implementation and return (env_mode, np_flag). + env_mode: 'openmpi' -> use '-x VAR', 'mpich' -> use '-genvlist VAR1,VAR2', 'unknown' -> pass no env flags. + np_flag: '-np' for OpenMPI/unknown, '-n' for MPICH-family. + """ + probes = (["--version"], ["-V"], ["-v"], ["--help"], ["-help"]) + out = "" + for args in probes: + try: + proc = subprocess.run( + [self.mpi_exec] + list(args), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + out = (proc.stdout or "").lower() + if out: + break + except Exception: + continue + + if "open mpi" in out or "ompi" in out: + return "openmpi", "-np" + if ( + "hydra" in out + or "mpich" in out + or "intel(r) mpi" in out + or "intel mpi" in out + ): + return "mpich", "-n" + return "unknown", "-np" + + def __build_mpi_cmd(self, ppc_num_proc, additional_mpi_args): + base = [self.mpi_exec] + shlex.split(additional_mpi_args) + + if self.platform == "Windows": + # MS-MPI style + env_args = [ + "-env", + "PPC_NUM_THREADS", + self.__ppc_env["PPC_NUM_THREADS"], + "-env", + "OMP_NUM_THREADS", + self.__ppc_env["OMP_NUM_THREADS"], + ] + np_args = ["-n", ppc_num_proc] + return base + env_args + np_args + + # Non-Windows + if self.mpi_env_mode == "openmpi": + env_args = [ + "-x", + "PPC_NUM_THREADS", + "-x", + "OMP_NUM_THREADS", + ] + np_flag = "-np" + elif self.mpi_env_mode == "mpich": + # Explicitly set env variables for all ranks + env_args = [ + "-env", + "PPC_NUM_THREADS", + self.__ppc_env["PPC_NUM_THREADS"], + "-env", + "OMP_NUM_THREADS", + self.__ppc_env["OMP_NUM_THREADS"], + ] + np_flag = "-n" + else: + # Unknown MPI flavor: rely on environment inheritance and default to -np + env_args = [] + np_flag = "-np" + + return base + env_args + [np_flag, ppc_num_proc] + + @staticmethod + def __get_gtest_settings(repeats_count, type_task): + command = [ + f"--gtest_repeat={repeats_count}", + "--gtest_recreate_environments_when_repeating", + "--gtest_color=0", + "--gtest_shuffle", + f"--gtest_filter=*{type_task}*", + ] + return command + + def run_threads(self): + if platform.system() == "Linux" and not self.__ppc_env.get("PPC_ASAN_RUN"): + for task_type in ["seq", "stl"]: + self.__run_exec( + shlex.split(self.valgrind_cmd) + + [str(self.work_dir / "ppc_func_tests")] + + self.__get_gtest_settings(1, "_" + task_type + "_") + ) + + for task_type in ["omp", "seq", "stl", "tbb"]: + self.__run_exec( + [str(self.work_dir / "ppc_func_tests")] + + self.__get_gtest_settings(1, "_" + task_type + "_") + ) + + def run_core(self): + if platform.system() == "Linux" and not self.__ppc_env.get("PPC_ASAN_RUN"): + self.__run_exec( + shlex.split(self.valgrind_cmd) + + [str(self.work_dir / "core_func_tests")] + + self.__get_gtest_settings(1, "*") + + ["--gtest_filter=*:-*DisabledValgrind"] + ) + + self.__run_exec( + [str(self.work_dir / "core_func_tests")] + self.__get_gtest_settings(1, "*") + ) + + def run_processes(self, additional_mpi_args): + ppc_num_proc = self.__ppc_env.get("PPC_NUM_PROC") + if ppc_num_proc is None: + raise EnvironmentError( + "Required environment variable 'PPC_NUM_PROC' is not set." + ) + mpi_running = self.__build_mpi_cmd(ppc_num_proc, additional_mpi_args) + if not self.__ppc_env.get("PPC_ASAN_RUN"): + for task_type in ["all", "mpi"]: + self.__run_exec( + mpi_running + + [str(self.work_dir / "ppc_func_tests")] + + self.__get_gtest_settings(1, "_" + task_type + "_") + ) + + def run_performance(self): + if not self.__ppc_env.get("PPC_ASAN_RUN"): + mpi_running = self.__build_mpi_cmd(self.__ppc_num_proc, "") + for task_type in ["all", "mpi"]: + self.__run_exec( + mpi_running + + [str(self.work_dir / "ppc_perf_tests")] + + self.__get_gtest_settings(1, "_" + task_type + "_") + ) + + for task_type in ["omp", "seq", "stl", "tbb"]: + self.__run_exec( + [str(self.work_dir / "ppc_perf_tests")] + + self.__get_gtest_settings(1, "_" + task_type + "_") + ) + + +def _execute(args_dict, env): + runner = PPCRunner(verbose=args_dict.get("verbose", False)) + runner.setup_env(env) + + if args_dict["running_type"] in ["threads", "processes"]: + runner.run_core() + + if args_dict["running_type"] == "threads": + runner.run_threads() + elif args_dict["running_type"] == "processes": + runner.run_processes(args_dict["additional_mpi_args"]) + elif args_dict["running_type"] == "performance": + runner.run_performance() + else: + raise Exception("running-type is wrong!") + + +if __name__ == "__main__": + args_dict = init_cmd_args() + counts = args_dict.get("counts") + + if counts: + for count in counts: + env_copy = os.environ.copy() + + if args_dict["running_type"] == "threads": + env_copy["PPC_NUM_THREADS"] = str(count) + env_copy.setdefault("PPC_NUM_PROC", "1") + elif args_dict["running_type"] == "processes": + env_copy["PPC_NUM_PROC"] = str(count) + env_copy.setdefault("PPC_NUM_THREADS", "1") + + print( + f"Executing with {args_dict['running_type']} count: {count}", flush=True + ) + _execute(args_dict, env_copy) + else: + _execute(args_dict, os.environ.copy()) diff --git a/scripts/variants_generation.py b/scripts/variants_generation.py index 7e1284b15a..8ea3db0c65 100644 --- a/scripts/variants_generation.py +++ b/scripts/variants_generation.py @@ -1,67 +1,67 @@ -import csv -import numpy as np -from xlsxwriter.workbook import Workbook -from pathlib import Path - - -def get_project_path(): - script_path = Path(__file__).resolve() # Absolute path of the script - script_dir = script_path.parent # Directory containing the script - return script_dir.parent - - -def generate_group_table(_num_tasks, _num_students, _num_variants, _csv_file): - if _num_tasks != len(_num_variants): - raise Exception( - f"Count of students: {_num_tasks} != count of list of variants: {len(_num_variants)}" - ) - - list_of_tasks = [] - str_of_print = "" - str_of_headers = "" - for i, num_v in zip(range(_num_tasks), _num_variants): - list_of_variants = [] - shuffled_list_of_variants = [] - for j in range(int(_num_students / num_v) + 1): - list_of_variants.append(np.arange(num_v) + 1) - for variant in list_of_variants: - np.random.shuffle(variant) - shuffled_list_of_variants.append(variant) - result_variants = np.concatenate(shuffled_list_of_variants) - list_of_tasks.append(result_variants[:_num_students]) - str_of_print += "%d," - str_of_headers += "Task " + str(i + 1) + "," - str_of_print = str_of_print[:-1] - str_of_headers = str_of_headers[:-1] - - np.savetxt( - _csv_file, np.dstack(list_of_tasks)[0], str_of_print, header=str_of_headers - ) - - workbook = Workbook(_csv_file[:-4] + ".xlsx") - worksheet = workbook.add_worksheet() - with open(_csv_file, "rt") as f: - reader = csv.reader(f) - for r, row in enumerate(reader): - for c, col in enumerate(row): - worksheet.write(r, c, col) - workbook.close() - - -if __name__ == "__main__": - # Define the number of tasks - num_tasks = 3 - - # List containing the number of students for each task - list_students = [29, 10, 40] - - # List containing the number of variants (versions) for each task - num_variants = [27, 2, 9] - - # Overall, `path_to_results` represents the file path leading to a csv's and xlsx's directory - path_to_results = Path(get_project_path()) / "build" / "variants_results" - path_to_results.mkdir(parents=True, exist_ok=True) - - for num_students, index in zip(list_students, range(len(list_students))): - csv_path = path_to_results / f"variants_group_{index + 1}.csv" - generate_group_table(num_tasks, num_students, num_variants, csv_path.as_posix()) +import csv +import numpy as np +from xlsxwriter.workbook import Workbook +from pathlib import Path + + +def get_project_path(): + script_path = Path(__file__).resolve() # Absolute path of the script + script_dir = script_path.parent # Directory containing the script + return script_dir.parent + + +def generate_group_table(_num_tasks, _num_students, _num_variants, _csv_file): + if _num_tasks != len(_num_variants): + raise Exception( + f"Count of students: {_num_tasks} != count of list of variants: {len(_num_variants)}" + ) + + list_of_tasks = [] + str_of_print = "" + str_of_headers = "" + for i, num_v in zip(range(_num_tasks), _num_variants): + list_of_variants = [] + shuffled_list_of_variants = [] + for j in range(int(_num_students / num_v) + 1): + list_of_variants.append(np.arange(num_v) + 1) + for variant in list_of_variants: + np.random.shuffle(variant) + shuffled_list_of_variants.append(variant) + result_variants = np.concatenate(shuffled_list_of_variants) + list_of_tasks.append(result_variants[:_num_students]) + str_of_print += "%d," + str_of_headers += "Task " + str(i + 1) + "," + str_of_print = str_of_print[:-1] + str_of_headers = str_of_headers[:-1] + + np.savetxt( + _csv_file, np.dstack(list_of_tasks)[0], str_of_print, header=str_of_headers + ) + + workbook = Workbook(_csv_file[:-4] + ".xlsx") + worksheet = workbook.add_worksheet() + with open(_csv_file, "rt") as f: + reader = csv.reader(f) + for r, row in enumerate(reader): + for c, col in enumerate(row): + worksheet.write(r, c, col) + workbook.close() + + +if __name__ == "__main__": + # Define the number of tasks + num_tasks = 3 + + # List containing the number of students for each task + list_students = [29, 10, 40] + + # List containing the number of variants (versions) for each task + num_variants = [27, 2, 9] + + # Overall, `path_to_results` represents the file path leading to a csv's and xlsx's directory + path_to_results = Path(get_project_path()) / "build" / "variants_results" + path_to_results.mkdir(parents=True, exist_ok=True) + + for num_students, index in zip(list_students, range(len(list_students))): + csv_path = path_to_results / f"variants_group_{index + 1}.csv" + generate_group_table(num_tasks, num_students, num_variants, csv_path.as_posix()) diff --git a/setup.cfg b/setup.cfg index cf53ebb5f8..e069d3074e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ -[flake8] -max-line-length = 120 -exclude = - 3rdparty - venv - .git +[flake8] +max-line-length = 120 +exclude = + 3rdparty + venv + .git diff --git a/tasks/CMakeLists.txt b/tasks/CMakeLists.txt index d063333945..2138330c79 100644 --- a/tasks/CMakeLists.txt +++ b/tasks/CMakeLists.txt @@ -1,29 +1,29 @@ -project(parallel_programming_course LANGUAGES C CXX) - -message(STATUS "Student's tasks") - -# Test runner executables -set(FUNC_TEST_EXEC ppc_func_tests) -set(PERF_TEST_EXEC ppc_perf_tests) - -# ——— Include helper scripts —————————————————————————————————————— -include(${CMAKE_SOURCE_DIR}/cmake/functions.cmake) - -# ——— Initialize test executables ————————————————————————————————————— -ppc_add_test(${FUNC_TEST_EXEC} common/runners/functional.cpp USE_FUNC_TESTS) -ppc_add_test(${PERF_TEST_EXEC} common/runners/performance.cpp USE_PERF_TESTS) - -# ——— List of implementations ———————————————————————————————————————— -set(PPC_IMPLEMENTATIONS "all;mpi;omp;seq;stl;tbb" CACHE STRING "Implementations to build (semicolon-separated)") - -# ——— Configure each subproject ————————————————————————————————————— -file( - GLOB subdirs - RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" - "${CMAKE_CURRENT_SOURCE_DIR}/*") -foreach(sub IN LISTS subdirs) - if(IS_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/${sub}" AND NOT sub STREQUAL - "common") - ppc_configure_subproject(${sub}) - endif() -endforeach() +project(parallel_programming_course LANGUAGES C CXX) + +message(STATUS "Student's tasks") + +# Test runner executables +set(FUNC_TEST_EXEC ppc_func_tests) +set(PERF_TEST_EXEC ppc_perf_tests) + +# ——— Include helper scripts —————————————————————————————————————— +include(${CMAKE_SOURCE_DIR}/cmake/functions.cmake) + +# ——— Initialize test executables ————————————————————————————————————— +ppc_add_test(${FUNC_TEST_EXEC} common/runners/functional.cpp USE_FUNC_TESTS) +ppc_add_test(${PERF_TEST_EXEC} common/runners/performance.cpp USE_PERF_TESTS) + +# ——— List of implementations ———————————————————————————————————————— +set(PPC_IMPLEMENTATIONS "all;mpi;omp;seq;stl;tbb" CACHE STRING "Implementations to build (semicolon-separated)") + +# ——— Configure each subproject ————————————————————————————————————— +file( + GLOB subdirs + RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" + "${CMAKE_CURRENT_SOURCE_DIR}/*") +foreach(sub IN LISTS subdirs) + if(IS_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/${sub}" AND NOT sub STREQUAL + "common") + ppc_configure_subproject(${sub}) + endif() +endforeach() diff --git a/tasks/common/runners/functional.cpp b/tasks/common/runners/functional.cpp index c32e6e9d1a..d6de7d01e3 100644 --- a/tasks/common/runners/functional.cpp +++ b/tasks/common/runners/functional.cpp @@ -1,12 +1,12 @@ -#include - -#include "oneapi/tbb/global_control.h" -#include "runners/include/runners.hpp" -#include "util/include/util.hpp" - -int main(int argc, char **argv) { - if (ppc::util::IsUnderMpirun()) { - return ppc::runners::Init(argc, argv); - } - return ppc::runners::SimpleInit(argc, argv); -} +#include + +#include "oneapi/tbb/global_control.h" +#include "runners/include/runners.hpp" +#include "util/include/util.hpp" + +int main(int argc, char **argv) { + if (ppc::util::IsUnderMpirun()) { + return ppc::runners::Init(argc, argv); + } + return ppc::runners::SimpleInit(argc, argv); +} diff --git a/tasks/common/runners/performance.cpp b/tasks/common/runners/performance.cpp index a4b6c0e2fc..d29457c379 100644 --- a/tasks/common/runners/performance.cpp +++ b/tasks/common/runners/performance.cpp @@ -1,5 +1,5 @@ -#include "runners/include/runners.hpp" - -int main(int argc, char **argv) { - return ppc::runners::Init(argc, argv); -} +#include "runners/include/runners.hpp" + +int main(int argc, char **argv) { + return ppc::runners::Init(argc, argv); +} diff --git a/tasks/konovalov_s_symbol_count/common/include/common.hpp b/tasks/konovalov_s_symbol_count/common/include/common.hpp new file mode 100644 index 0000000000..680c599274 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/common/include/common.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +#include "task/include/task.hpp" + +namespace konovalov_s_symbol_count { + +using InType = std::string; +using OutType = int; +using TestType = std::tuple; +using BaseTask = ppc::task::Task; + +} // namespace konovalov_s_symbol_count diff --git a/tasks/konovalov_s_symbol_count/data/pic.jpg b/tasks/konovalov_s_symbol_count/data/pic.jpg new file mode 100644 index 0000000000..637624238c Binary files /dev/null and b/tasks/konovalov_s_symbol_count/data/pic.jpg differ diff --git a/tasks/konovalov_s_symbol_count/data/text_line_15.txt b/tasks/konovalov_s_symbol_count/data/text_line_15.txt new file mode 100644 index 0000000000..1ed1bd7478 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/data/text_line_15.txt @@ -0,0 +1 @@ +bCKOkeyas5Pjad620ON1 \ No newline at end of file diff --git a/tasks/konovalov_s_symbol_count/data/text_line_337.txt b/tasks/konovalov_s_symbol_count/data/text_line_337.txt new file mode 100644 index 0000000000..0ca4c71a31 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/data/text_line_337.txt @@ -0,0 +1 @@ +hZxGwLKAhueSgMubrzF8AhPS9r2Y2FPFgiR7vTknOMqh8K2yqX09M9VZ5yDNJiCuHxNVMs3IYxo7DQj6Z3UPloXWCvyvUb4s1o2S1SNcuUHgmyYz7127AFSQFI9oGXBfyzwsdqBrGil3W3rlNSbppH0lXfwxkgwHfV0kCosoBJq3bscjvgwPPZYL9pZrG5b61qAHchrClJMZMFxKhOZRy0kI3HuGLmxKSv1aNgUf2xTVuT4eYc9yL5IDWunlB5FqsJLfUhyhZb3z8oftD6HKSSpBOoH945qkH2ovNVHodwUoXyiDtmdNiosvWjrTJlniI3t0ZbQpUscDxrKuyN3T7lNaXlVzANMeqz0jdHeIDOxZUWroSvRXseW8n3R7XlxY4M7L79p2rEbc0P3w \ No newline at end of file diff --git a/tasks/konovalov_s_symbol_count/data/text_line_6129.txt b/tasks/konovalov_s_symbol_count/data/text_line_6129.txt new file mode 100644 index 0000000000..63289c058f --- /dev/null +++ b/tasks/konovalov_s_symbol_count/data/text_line_6129.txt @@ -0,0 +1 @@ +ZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8lZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8l \ No newline at end of file diff --git a/tasks/konovalov_s_symbol_count/data/text_line_681.txt b/tasks/konovalov_s_symbol_count/data/text_line_681.txt new file mode 100644 index 0000000000..1995979943 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/data/text_line_681.txt @@ -0,0 +1 @@ +ZucYZrHd9l40qX0e1sK6yCW5lmXDWkjvZBBEIFwmkD9YRimgPT7WD5I5og6SOlS5ZvYlGr6fRO3cvgvBJB5EEcKNEmrjgP9iNnVHwOSRPFNGdITHWXwgqzo4ZLKBAX7NuEgCE8T3KojJPJ3cc2WR5Vs6gobOUzGEbQbQvXKoopIh27Wh66dPqN3rUJPF7YM9TYtuXzIpiWpihBK5WZ6Wialzcfao9WQVDgICDVtINioyQ5SExW45YmJNNIJyUAXeOgp9nYsFcQRctcwDpZvRUh0hJSlWV2gxr5ftqxkLYZubIupVmGxmqvaEX25F7AccJiRQl1Pc6wD45zqxgIjYbmwawKn1PKd33YDkxUBNg4WMxttY9UJAEu9QoRMj2G1hHEb7M34Yz8jrFMcTZXK4wHQU7UI3LL34UpOCayrkXrpVajRRtyFO0SZdWIbe9y0ANAZrU0WEesRdRi6sYTWTVEedfI3ykb3QivMGK4EszIcZA949ahg0OVSAxOt4IanDhaUYUyCNwEuM34zy9ZXafYAAcPQjc2q5YaxiwzZy8XqngbbYDsMqoQei2sbi5Ce8NpqrjzIkCH2684iy4bv7LwDjZg3tIs9oxN4fyY8vHBBqBvoCrvByxBey8aKuPP6segYAKtGu3GHoRWTSDO0qaKbvt5w9grasYx5IOk1b3XtF0Fu2qCOh4RJlqGDcrReZPm4rpsm98TB8VBEtujHUNBgJ6fVSrd7L6Wd51nOviVV8TOhxGRFaNnm6j2uTQfg1Hq4sEGNLhLKnEsyb6xXqiUV1eotFQsxPyPCBxD8l \ No newline at end of file diff --git a/tasks/konovalov_s_symbol_count/info.json b/tasks/konovalov_s_symbol_count/info.json new file mode 100644 index 0000000000..bbbbcf1c1d --- /dev/null +++ b/tasks/konovalov_s_symbol_count/info.json @@ -0,0 +1,9 @@ +{ + "student": { + "first_name": "Sergey", + "last_name": "Konovalov", + "middle_name": "Alexandrovich", + "group_number": "3823Б1Пр3", + "task_number": "1" + } +} diff --git a/tasks/konovalov_s_symbol_count/mpi/include/ops_mpi.hpp b/tasks/konovalov_s_symbol_count/mpi/include/ops_mpi.hpp new file mode 100644 index 0000000000..e517c021be --- /dev/null +++ b/tasks/konovalov_s_symbol_count/mpi/include/ops_mpi.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "konovalov_s_symbol_count/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace konovalov_s_symbol_count { + +class KonovalovSSymbolCountMPI : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kMPI; + } + explicit KonovalovSSymbolCountMPI(const InType &in); + + private: + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; +}; + +} // namespace konovalov_s_symbol_count diff --git a/tasks/konovalov_s_symbol_count/mpi/src/ops_mpi.cpp b/tasks/konovalov_s_symbol_count/mpi/src/ops_mpi.cpp new file mode 100644 index 0000000000..72179837c5 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/mpi/src/ops_mpi.cpp @@ -0,0 +1,67 @@ +#include "konovalov_s_symbol_count/mpi/include/ops_mpi.hpp" + +#include + +#include +#include + +#include "konovalov_s_symbol_count/common/include/common.hpp" +#include "util/include/util.hpp" + +namespace konovalov_s_symbol_count { + +KonovalovSSymbolCountMPI::KonovalovSSymbolCountMPI(const InType &in) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetInput() = in; + GetOutput() = 0; +} + +bool KonovalovSSymbolCountMPI::ValidationImpl() { + return GetInput().length() > 0; +} + +bool KonovalovSSymbolCountMPI::PreProcessingImpl() { + return true; +} + +bool KonovalovSSymbolCountMPI::RunImpl() { + + int rank, size, str_len, rad; + int count = 0; + int gl_non_digit_count = 0; + std::string line; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank == 0) { + line = GetInput(); + str_len = line.length(); + rad = round(str_len / size); + } + MPI_Bcast(&str_len, 1, MPI_INT, 0, MPI_COMM_WORLD); + if(rank != 0) line.resize(str_len); + MPI_Bcast(line.data(), str_len, MPI_CHAR, 0, MPI_COMM_WORLD); + MPI_Bcast(&rad, 1, MPI_INT, 0, MPI_COMM_WORLD); + + if(rank != size - 1){ + for(int i = rad*rank; i < rad*(rank + 1); i++){ + if(isalpha(line[i])) count++; + } + } + else{ + for(int i = rad*rank; i < str_len; i++){ + if(isalpha(line[i])) count++; + } + } + + + MPI_Allreduce(&count, &gl_non_digit_count, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); + + GetOutput() = static_cast(gl_non_digit_count); + return true; +} + +bool KonovalovSSymbolCountMPI::PostProcessingImpl() { + return true; +} + +} // namespace konovalov_s_symbol_count diff --git a/tasks/konovalov_s_symbol_count/report.md b/tasks/konovalov_s_symbol_count/report.md new file mode 100644 index 0000000000..ebc41bc9e9 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/report.md @@ -0,0 +1,86 @@ +# <Подсчет буквенных символов в строке> + +- Student: <Коновалов Сергей Александрович>, group <3823Б1Пр3> +- Technology: +- Variant: <22> + +## 1. Introduction +Вводная задача на реализацию простого алгоритма чтения строки. Строка константна, загружается из внешнего источника. Алгоритм должен вернуть количество буквенных символов в строке. + +## 2. Problem Statement +Главной задачей является посчет буквенных символов в строке. + +'''InType = std::string''' +В качестве входных данных в программу передается строка, в которой содержится заранее известное число буквенных символов. Для работы алгоритма не требуются какие-либо дополнительные входные данные. + +'''OutType = std::tuple''' +В качестве выходных данных программа возвращает посчитанное число буквенных символов в строке, а также номер процесса-отправителя для валидации данных (последовательная версия по умолчанию передает ранг процесса 0). + +## 3. Baseline Algorithm (Sequential) +Последовательный алгоритм выполняет следующие действия: +- Считывает строку из входных данных. +- Вычисляет количество искомых символов с помощью функции count_if(), определяя тип символов функцией isdigit(). +- Формирует возвращаемое значение GetOutput() с помощью функции make_tuple(). + +count_if() - возвращает количество элементов в массиве, удовлетвояющие заданному устовию. +isdigit() - возвращает одно из булевых значений: 1 - элмент является числом, 0 - элмент не является числом. +make_tuple() - возвращает данные типа std::tuple<>. + +## 4. Parallelization Scheme +Идея распараллеливания вычислений заключается в следующем: +Разделение обязанностей: +### 1. Процесс-дистрибьютор +- Один из процессов (ранг 0) выступает в роли дистрибьютора данных, разделяя исходную строку на равные части. +- Процесс передаёт полученные подстроки процессам-получателям. +- - Так как строка может не поделиться поровну, последнему процессу-получателю будут переданы остаточные символы исходной строки. +- После того, как строки были отправлены, процесс встает в состояние ожидания ответа от процессов - число буквенных символов в их подстроках. +- - При получении результата, процесс прибавляет полученное значение к переменной mpi_non_digit_count, хранящей общее число буквенных символов в исходной строке, т.е. ответ. +- Получив все ответы, процесс формирует возвращаемое значение GetOutput() и заканчивает выполнение метода. +### 2. Процесс-получатель +- Каждый процесс находится в состоянии ожидания входящих сообщений и инициализирует массив размером по длине сообщения. +- - После получения сообщения процесс вычисляет количество искомых символов с помощью функции count_if(), определяя тип символов функцией isdigit(). +- После подсчета, процесс посылает результат процессу-дистрибьютору. +### 3. Общая часть +- Все процессы подсчитывают промежуточные значения count методом Allreduce и записывают итоговый резутьтат в gl_non_digit_count. +- Процессы выходят из алгоритма и завершают работу. + +## 5. Implementation Details +Тестовые данные хранятся в папке /data с ключевым словом "text_line_[correct_ans]". Файл открывается и обрабатывается в классе теста. +Ввиду своей простоты, весь алгоритм реализован в методе RunImpl(). + +## 6. Experimental Setup +Проект запускался в WSL с помощью докера. +- CPU: Intel Xeon CPU E5-2678 v3 2.50GHz +- Cores: 12 +- RAM: 7.5 Gb +- OS: Ubuntu 24.04.3 LTS +- Compiler: GCC +- Build type: Release +- Environment: PPC_NUM_PROC +- Data: tasks/konovalov_s_symbol_count/data + +## 7. Results and Discussion + +### 7.1 Correctness +Функциональный тест сверяет заранее известный верный ответ для данной строки символов с результатом, полученным в результате выполнения алгоритма. Для параллельной версии, верный ответ сверяется с результатами каждого процесса. +Алгоритм справился с фунциональными тестами, как на последовательной, так и на параллельной версии. + +### 7.2 Performance +Present time, speedup and efficiency. Example table: + +| Mode | Count | Time, s | Speedup | Efficiency | +|-------------|-------|---------|---------|------------| +| seq | 1 | 1.234 | 1.00 | N/A | +| mpi | 2 | 0.700 | 1.76 | 88.0% | +| omp | 4 | 0.390 | 3.16 | 79.0% | + + + +## 9. References +1.
+2. + +## Appendix (Optional) +```cpp +// Short, readable code excerpts if needed +``` diff --git a/tasks/konovalov_s_symbol_count/seq/include/ops_seq.hpp b/tasks/konovalov_s_symbol_count/seq/include/ops_seq.hpp new file mode 100644 index 0000000000..45df52763f --- /dev/null +++ b/tasks/konovalov_s_symbol_count/seq/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "konovalov_s_symbol_count/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace konovalov_s_symbol_count { + +class KonovalovSSymbolCountSEQ : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kSEQ; + } + explicit KonovalovSSymbolCountSEQ(const InType &in); + + private: + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; +}; + +} // namespace konovalov_s_symbol_count diff --git a/tasks/konovalov_s_symbol_count/seq/src/ops_seq.cpp b/tasks/konovalov_s_symbol_count/seq/src/ops_seq.cpp new file mode 100644 index 0000000000..872fe882ff --- /dev/null +++ b/tasks/konovalov_s_symbol_count/seq/src/ops_seq.cpp @@ -0,0 +1,39 @@ +#include "konovalov_s_symbol_count/seq/include/ops_seq.hpp" + +#include +#include + +#include "konovalov_s_symbol_count/common/include/common.hpp" +#include "util/include/util.hpp" + +namespace konovalov_s_symbol_count { + +KonovalovSSymbolCountSEQ::KonovalovSSymbolCountSEQ(const InType &in) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetInput() = in; + GetOutput() = 0; +} + +bool KonovalovSSymbolCountSEQ::ValidationImpl() { + return true; +} + +bool KonovalovSSymbolCountSEQ::PreProcessingImpl() { + return true; +} + +bool KonovalovSSymbolCountSEQ::RunImpl() { + + InType &line = GetInput(); + + int count = count_if(line.begin(), line.end(), [](unsigned char c) { return isdigit(c) == 0; }); + GetOutput() = static_cast(count); + + return true; +} + +bool KonovalovSSymbolCountSEQ::PostProcessingImpl() { + return true; +} + +} // namespace konovalov_s_symbol_count diff --git a/tasks/konovalov_s_symbol_count/settings.json b/tasks/konovalov_s_symbol_count/settings.json new file mode 100644 index 0000000000..7d2c35b298 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/settings.json @@ -0,0 +1,7 @@ +{ + "tasks_type": "processes", + "tasks": { + "mpi": "enabled", + "seq": "enabled" + } +} diff --git a/tasks/konovalov_s_symbol_count/tests/.clang-tidy b/tasks/konovalov_s_symbol_count/tests/.clang-tidy new file mode 100644 index 0000000000..d68523c24e --- /dev/null +++ b/tasks/konovalov_s_symbol_count/tests/.clang-tidy @@ -0,0 +1,13 @@ +InheritParentConfig: true + +Checks: > + -modernize-loop-convert, + -cppcoreguidelines-avoid-goto, + -cppcoreguidelines-avoid-non-const-global-variables, + -misc-use-anonymous-namespace, + -modernize-use-std-print, + -modernize-type-traits + +CheckOptions: + - key: readability-function-cognitive-complexity.Threshold + value: 50 # Relaxed for tests diff --git a/tasks/konovalov_s_symbol_count/tests/functional/main.cpp b/tasks/konovalov_s_symbol_count/tests/functional/main.cpp new file mode 100644 index 0000000000..3cb7a81ba3 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/tests/functional/main.cpp @@ -0,0 +1,77 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "konovalov_s_symbol_count/common/include/common.hpp" +#include "konovalov_s_symbol_count/mpi/include/ops_mpi.hpp" +#include "konovalov_s_symbol_count/seq/include/ops_seq.hpp" +#include "util/include/func_test_util.hpp" +#include "util/include/util.hpp" + +namespace konovalov_s_symbol_count { + +class KonovalovSSymbolCountFuncTest : public ppc::util::BaseRunFuncTests { + public: + static std::string PrintTestParam(const TestType &test_param) { + int fndot = std::get<1>(test_param).find("."); + return std::get<1>(test_param).substr(0, fndot); + } + + protected: + void SetUp() override { + TestType params = std::get(ppc::util::GTestParamIndex::kTestParams)>(GetParam()); + std::string data4test = ppc::util::GetAbsoluteTaskPath(PPC_ID_konovalov_s_symbol_count, std::get<1>(params)); + + std::ifstream in(data4test); + while (std::getline(in, input_data_)){} + + correct_output = std::get<0>(params); + + + in.close(); + } + + bool CheckTestOutputData(OutType &output_data) { + return output_data == correct_output; + } + + InType GetTestInputData() final { + return input_data_; + } + + private: + InType input_data_; + int correct_output; +}; + +namespace { + +TEST_P(KonovalovSSymbolCountFuncTest, SymbolsInLineCountFT) { + ExecuteTest(GetParam()); +} + +const std::array kTestParam = {std::make_tuple(15, "text_line_15.txt"), std::make_tuple(337, "text_line_337.txt"),std::make_tuple(681, "text_line_681.txt"), std::make_tuple(6129, "text_line_6129.txt")}; + +const auto kTestTasksList = + std::tuple_cat(ppc::util::AddFuncTask(kTestParam, PPC_SETTINGS_konovalov_s_symbol_count), + ppc::util::AddFuncTask(kTestParam, PPC_SETTINGS_konovalov_s_symbol_count)); + +const auto kGtestValues = ppc::util::ExpandToValues(kTestTasksList); + +const auto kPerfTestName = KonovalovSSymbolCountFuncTest::PrintFuncTestName; + +INSTANTIATE_TEST_SUITE_P(FTestData, KonovalovSSymbolCountFuncTest, kGtestValues, kPerfTestName); + +} // namespace + +} // namespace konovalov_s_symbol_count diff --git a/tasks/konovalov_s_symbol_count/tests/performance/main.cpp b/tasks/konovalov_s_symbol_count/tests/performance/main.cpp new file mode 100644 index 0000000000..f1bfc3ce95 --- /dev/null +++ b/tasks/konovalov_s_symbol_count/tests/performance/main.cpp @@ -0,0 +1,48 @@ +#include + +#include "konovalov_s_symbol_count/common/include/common.hpp" +#include "konovalov_s_symbol_count/mpi/include/ops_mpi.hpp" +#include "konovalov_s_symbol_count/seq/include/ops_seq.hpp" +#include "util/include/perf_test_util.hpp" + +namespace konovalov_s_symbol_count { + +class KonovalovSSymbolCountPerfTest : public ppc::util::BaseRunPerfTests { + InType input_data_; + OutType correct_output = 0; + + void SetUp() override { + std::string str; + for(int i = 0; i < 100000000; i++){ + str += "0"; + } + str += "t"; + input_data_ = str; + correct_output = 1; + } + + bool CheckTestOutputData(OutType &output_data) final { + return output_data == correct_output; + } + + InType GetTestInputData() final { + return input_data_; + } +}; + +TEST_P(KonovalovSSymbolCountPerfTest, SymbolsInLineCounPT) { + ExecuteTest(GetParam()); +} + + + +const auto kAllPerfTasks = + ppc::util::MakeAllPerfTasks(PPC_SETTINGS_konovalov_s_symbol_count); + +const auto kGtestValues = ppc::util::TupleToGTestValues(kAllPerfTasks); + +const auto kPerfTestName = KonovalovSSymbolCountPerfTest::CustomPerfTestName; + +INSTANTIATE_TEST_SUITE_P(PTestData, KonovalovSSymbolCountPerfTest, kGtestValues, kPerfTestName); + +} // namespace konovalov_s_symbol_count