From 4732de5485fa966de4f9fe18026fba373ca78df5 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 7 Jan 2025 15:08:06 +0530 Subject: [PATCH 1/7] Support docker for mixtral --- script/get-ml-model-mixtral/_cm.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/script/get-ml-model-mixtral/_cm.yaml b/script/get-ml-model-mixtral/_cm.yaml index 5b61ef9ca..358d56318 100644 --- a/script/get-ml-model-mixtral/_cm.yaml +++ b/script/get-ml-model-mixtral/_cm.yaml @@ -6,6 +6,8 @@ category: AI/ML models env: CM_ML_MODEL_DATASET: '' CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +docker: + real_run: False input_mapping: checkpoint: MIXTRAL_CHECKPOINT_PATH new_env_keys: From f56c75d37d948b35f8772bfd794e9d3ecdbea6ad Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 7 Jan 2025 18:08:16 +0530 Subject: [PATCH 2/7] Added option to automatically submit mlperf inference results while running the submission checker --- script/run-mlperf-inference-submission-checker/_cm.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/script/run-mlperf-inference-submission-checker/_cm.yaml b/script/run-mlperf-inference-submission-checker/_cm.yaml index 84e712a40..1317140c1 100644 --- a/script/run-mlperf-inference-submission-checker/_cm.yaml +++ b/script/run-mlperf-inference-submission-checker/_cm.yaml @@ -50,6 +50,7 @@ input_mapping: src_version: CM_MLPERF_SUBMISSION_CHECKER_VERSION submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR submitter: CM_MLPERF_SUBMITTER + submitter_id: CM_MLPERF_SUBMITTER_ID tar: CM_TAR_SUBMISSION_DIR post_deps: - enable_if_env: @@ -66,6 +67,12 @@ post_deps: CM_TAR_SUBMISSION_DIR: - 'yes' tags: run,tar +- enable_if_env: + CM_SUBMITTER_ID: + - 'yes' + tags: submit,mlperf,results,_inference + env: + CM_MLPERF_SUBMISSION_FILE: <<>> tags: - run - mlc From 1bc3a0fe25df54176e76ad62ab36ad0fdbca0d1b Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Tue, 7 Jan 2025 20:38:58 +0530 Subject: [PATCH 3/7] Cleanups for get-mlperf-inference-src --- .../generate-mlperf-inference-submission/_cm.yaml | 1 + script/get-mlperf-inference-src/_cm.yaml | 14 +++++++------- script/get-mlperf-inference-src/customize.py | 2 +- .../_cm.yaml | 1 + 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/script/generate-mlperf-inference-submission/_cm.yaml b/script/generate-mlperf-inference-submission/_cm.yaml index 8dad87f25..013997df9 100644 --- a/script/generate-mlperf-inference-submission/_cm.yaml +++ b/script/generate-mlperf-inference-submission/_cm.yaml @@ -69,6 +69,7 @@ input_mapping: device: CM_MLPERF_DEVICE division: CM_MLPERF_SUBMISSION_DIVISION duplicate: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS + extra_checker_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARG hw_name: CM_HW_NAME hw_notes_extra: CM_MLPERF_SUT_HW_NOTES_EXTRA infer_scenario_results: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS diff --git a/script/get-mlperf-inference-src/_cm.yaml b/script/get-mlperf-inference-src/_cm.yaml index 47dc7073d..a9f7410a5 100644 --- a/script/get-mlperf-inference-src/_cm.yaml +++ b/script/get-mlperf-inference-src/_cm.yaml @@ -50,8 +50,6 @@ prehook_deps: _submodules.: - CM_GIT_SUBMODULES print_env_at_the_end_disabled: - CM_MLPERF_INFERENCE_CONF_PATH: Path to the MLPerf inference benchmark configuration - file CM_MLPERF_INFERENCE_SOURCE: Path to MLPerf inference benchmark sources tags: - get @@ -154,31 +152,33 @@ versions: CM_MLPERF_LAST_RELEASE: v2.1 CM_TMP_GIT_CHECKOUT: v2.1 r3.0: - adr: + ad: inference-git-repo: tags: _tag.v3.0 env: CM_MLPERF_LAST_RELEASE: v3.0 CM_TMP_GIT_CHECKOUT: '' r3.1: - adr: + ad: inference-git-repo: tags: _tag.v3.1 env: CM_MLPERF_LAST_RELEASE: v3.1 - CM_TMP_GIT_CHECKOUT: '' + CM_GIT_CHECKOUT_TAG: 'v3.1' r4.0: - adr: + ad: inference-git-repo: tags: _tag.v4.0 env: CM_MLPERF_LAST_RELEASE: v4.0 + CM_GIT_CHECKOUT_TAG: 'v4.0' r4.1: - adr: + ad: inference-git-repo: tags: _tag.v4.1 env: CM_MLPERF_LAST_RELEASE: v4.1 + CM_GIT_CHECKOUT_TAG: 'v4.1' r5.0: env: CM_MLPERF_LAST_RELEASE: v5.0 diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py index 1a62da3b6..7ed4fdf89 100644 --- a/script/get-mlperf-inference-src/customize.py +++ b/script/get-mlperf-inference-src/customize.py @@ -41,7 +41,7 @@ def preprocess(i): # if not try to assign the values specified in version parameters, # if version parameters does not have the value to a parameter, set the # default one - if env.get('CM_GIT_CHECKOUT', '') == '': + if env.get('CM_GIT_CHECKOUT', '') == '' and env.get('CM_GIT_CHECKOUT_TAG', '') == '': if env.get('CM_TMP_GIT_CHECKOUT', '') != '': env["CM_GIT_CHECKOUT"] = env["CM_TMP_GIT_CHECKOUT"] else: diff --git a/script/run-mlperf-inference-submission-checker/_cm.yaml b/script/run-mlperf-inference-submission-checker/_cm.yaml index 1317140c1..a302e5d12 100644 --- a/script/run-mlperf-inference-submission-checker/_cm.yaml +++ b/script/run-mlperf-inference-submission-checker/_cm.yaml @@ -36,6 +36,7 @@ deps: tags: preprocess,mlperf,inference,submission input_mapping: extra_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS + extra_checker_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS extra_model_benchmark_map: CM_MLPERF_EXTRA_MODEL_MAPPING input: CM_MLPERF_INFERENCE_SUBMISSION_DIR power: CM_MLPERF_POWER From ee7be856e5def9e46bc4535b6128ec342f6931db Mon Sep 17 00:00:00 2001 From: arjunsuresh Date: Tue, 7 Jan 2025 15:09:43 +0000 Subject: [PATCH 4/7] [Automated Commit] Format Codebase --- script/get-mlperf-inference-src/customize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/get-mlperf-inference-src/customize.py b/script/get-mlperf-inference-src/customize.py index 7ed4fdf89..8c545d678 100644 --- a/script/get-mlperf-inference-src/customize.py +++ b/script/get-mlperf-inference-src/customize.py @@ -41,7 +41,8 @@ def preprocess(i): # if not try to assign the values specified in version parameters, # if version parameters does not have the value to a parameter, set the # default one - if env.get('CM_GIT_CHECKOUT', '') == '' and env.get('CM_GIT_CHECKOUT_TAG', '') == '': + if env.get('CM_GIT_CHECKOUT', '') == '' and env.get( + 'CM_GIT_CHECKOUT_TAG', '') == '': if env.get('CM_TMP_GIT_CHECKOUT', '') != '': env["CM_GIT_CHECKOUT"] = env["CM_TMP_GIT_CHECKOUT"] else: From aaa30b9a948e0649459055a529e5df42263b8e4d Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 8 Jan 2025 10:38:45 +0000 Subject: [PATCH 5/7] Create CONTRIBUTORS.md --- CONTRIBUTORS.md | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 CONTRIBUTORS.md diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 000000000..9f08749f8 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,43 @@ +# Contributors + +Thank you for your interest in contributing to **MLPerf Automations**! We welcome contributions that help improve the project and expand its functionality. + +--- + +## How to Become a Contributor + +We value all contributions, whether they are code, documentation, bug reports, or feature suggestions. If you contribute **more than 50 lines of code** (including tests and documentation), you will be officially recognized as a project contributor. + +**Note:** Trivial contributions, such as minor typo fixes or small formatting changes, will not count toward the 50-line threshold. + +To contribute: +1. **Fork** the repository. +2. **Create** a new branch for your feature or bug fix. +3. **Submit** a pull request (PR) describing your changes. +Please see [here](CONTRIBUTING.md) for further guidelines for official contribution to any MLCommons repository. + +--- + +## Contributor Recognition + +Once your contribution exceeds 50 lines of code (in total), we will: +- Add your name to this `CONTRIBUTORS.md` file. +- Highlight your contribution in the next release notes. +- Grant you access to suggest and vote on new features. + +--- + +## Current Contributors + +- **Grigori Fursin** - *Initial Development, CLI workflow support via CMind, Added core automation features* +- **Arjun Suresh** - *Initial Development, Added core automation features* +- **Anandhu Sooraj** - *Added multiple CM scripts for MLPerf Inference* +- **Thomaz Zhu** - *Added CPP implementation for MLPerf Inference Onnxruntime* +- **Sahil Avaran** - *Adding logging support in MLPerf script automation* +- **[Your Name Here]** - This could be you! 🎉 + +--- + +We believe in collaborative growth, and every contribution makes a difference. Feel free to reach out by opening an issue if you have any questions or ideas. + +Happy Coding! 🚀 From 129a70b4f0554f8f63d928229d4e54d9ff61b11c Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 8 Jan 2025 13:30:12 +0000 Subject: [PATCH 6/7] Update README.md --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/README.md b/README.md index ea0454c2e..450260a45 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,10 @@ # MLPerf Automations and Scripts +[![License](https://img.shields.io/badge/License-Apache%202.0-green)](LICENSE.md) +[![Downloads](https://static.pepy.tech/badge/cm4mlops)](https://pepy.tech/project/cm4mlops) + +[![CM script automation features test](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-cm-script-features.yml/badge.svg)](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-cm-script-features.yml) +[![MLPerf inference ABTF POC Test](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-mlperf-inference-abtf-poc.yml/badge.svg)](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-mlperf-inference-abtf-poc.yml) + This repository contains the automations and scripts used to run MLPerf benchmarks, primarily focusing on MLPerf inference benchmarks. The automations used here are largely based on and extended from the [Collective Mind script automations](https://github.com/mlcommons/cm4mlops/tree/main/automation/script). @@ -7,7 +13,19 @@ This repository contains the automations and scripts used to run MLPerf benchmar **CM (Collective Mind)** is a Python package with a CLI and API designed to create and manage automations. Two key automations developed using CM are **Script** and **Cache**, which streamline ML workflows, including managing Docker runs. +## Contributions +Please submit any pull requests (PRs) to dev branch. Please see [CONTRIBUTORS.md](here). For more information about using MLPerf Automations for MLPerf Inference, refer to the [MLPerf Inference Documentation](https://docs.mlcommons.org/inference/). + +## News + +* Waiting... ## License [Apache 2.0](LICENSE.md) + + +## Funding + +We thank OctoML(https://octoml.ai), [cKnowledge.org](https://cKnowledge.org), [cTuning foundation](https://cTuning.org) and [MLCommons](https://mlcommons.org) for sponsoring this project! + From 683378ba9423e790cbc6e550ea97232435a1c0c3 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 8 Jan 2025 13:32:44 +0000 Subject: [PATCH 7/7] Update README.md --- README.md | 60 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 450260a45..800db041b 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,65 @@ # MLPerf Automations and Scripts + [![License](https://img.shields.io/badge/License-Apache%202.0-green)](LICENSE.md) [![Downloads](https://static.pepy.tech/badge/cm4mlops)](https://pepy.tech/project/cm4mlops) +[![CM Script Automation Test](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-cm-script-features.yml/badge.svg)](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-cm-script-features.yml) +[![MLPerf Inference ABTF POC Test](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-mlperf-inference-abtf-poc.yml/badge.svg)](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-mlperf-inference-abtf-poc.yml) + +Welcome to the **MLPerf Automations and Scripts** repository! This repository provides tools, automations, and scripts to facilitate running MLPerf benchmarks, with a primary focus on **MLPerf Inference benchmarks**. + +The automations build upon and extend the powerful [Collective Mind (CM) script automations](https://github.com/mlcommons/cm4mlops/tree/main/automation/script) to streamline benchmarking and workflow processes. + +--- + +## 🚀 Key Features +- **Automated Benchmarking** – Simplifies running MLPerf Inference benchmarks with minimal manual intervention. +- **Modular and Extensible** – Easily extend the scripts to support additional benchmarks and configurations. +- **Seamless Integration** – Compatible with Docker, cloud environments, and local machines. +- **Collective Mind (CM) Integration** – Utilizes the CM framework to enhance reproducibility and automation. + +--- + +## 🧰 Collective Mind (CM) Automations -[![CM script automation features test](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-cm-script-features.yml/badge.svg)](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-cm-script-features.yml) -[![MLPerf inference ABTF POC Test](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-mlperf-inference-abtf-poc.yml/badge.svg)](https://github.com/mlcommons/mlperf-automations/actions/workflows/test-mlperf-inference-abtf-poc.yml) +The **Collective Mind (CM)** framework is a Python-based package offering both CLI and API support for creating and managing automations. CM automations enhance ML workflows by simplifying complex tasks such as Docker container management and caching. +### Core Automations +- **Script Automation** – Automates script execution across different environments. +- **Cache Management** – Manages reusable cached results to accelerate workflow processes. -This repository contains the automations and scripts used to run MLPerf benchmarks, primarily focusing on MLPerf inference benchmarks. The automations used here are largely based on and extended from the [Collective Mind script automations](https://github.com/mlcommons/cm4mlops/tree/main/automation/script). +Learn more about CM in the [CM4MLOps documentation](https://github.com/mlcommons/cm4mlops). +--- -## Collective Mind (CM) Automations +## 🤝 Contributing +We welcome contributions from the community! To contribute: +1. Submit pull requests (PRs) to the **`dev`** branch. +2. Review our [CONTRIBUTORS.md](here) for guidelines and best practices. +3. Explore more about MLPerf Inference automation in the official [MLPerf Inference Documentation](https://docs.mlcommons.org/inference/). -**CM (Collective Mind)** is a Python package with a CLI and API designed to create and manage automations. Two key automations developed using CM are **Script** and **Cache**, which streamline ML workflows, including managing Docker runs. +Your contributions help drive the project forward! -## Contributions -Please submit any pull requests (PRs) to dev branch. Please see [CONTRIBUTORS.md](here). For more information about using MLPerf Automations for MLPerf Inference, refer to the [MLPerf Inference Documentation](https://docs.mlcommons.org/inference/). +--- -## News +## 📰 News +Stay tuned for upcoming updates and announcements. -* Waiting... +--- -## License +## 📄 License +This project is licensed under the [Apache 2.0 License](LICENSE.md). -[Apache 2.0](LICENSE.md) +--- +## 💡 Acknowledgments and Funding +This project is made possible through the generous support of: +- [OctoML](https://octoml.ai) +- [cKnowledge.org](https://cKnowledge.org) +- [cTuning Foundation](https://cTuning.org) +- [MLCommons](https://mlcommons.org) -## Funding +We appreciate their contributions and sponsorship! -We thank OctoML(https://octoml.ai), [cKnowledge.org](https://cKnowledge.org), [cTuning foundation](https://cTuning.org) and [MLCommons](https://mlcommons.org) for sponsoring this project! +--- +Thank you for your interest and support in MLPerf Automations and Scripts!