diff --git a/Dockerfile b/Dockerfile index dcaf1bf3..ee3e07c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.08-py3 -ARG TRITONSDK_BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.08-py3-sdk +ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.09-py3 +ARG TRITONSDK_BASE_IMAGE=nvcr.io/nvidia/tritonserver:24.09-py3-sdk -ARG MODEL_ANALYZER_VERSION=1.44.0dev -ARG MODEL_ANALYZER_CONTAINER_VERSION=24.09dev +ARG MODEL_ANALYZER_VERSION=1.44.0 +ARG MODEL_ANALYZER_CONTAINER_VERSION=24.09 FROM ${TRITONSDK_BASE_IMAGE} as sdk FROM $BASE_IMAGE diff --git a/README.md b/README.md index 944bc3a6..4363d530 100644 --- a/README.md +++ b/README.md @@ -18,14 +18,6 @@ limitations under the License. # Triton Model Analyzer -> [!Warning] -> -> ##### LATEST RELEASE -> -> You are currently on the `main` branch which tracks under-development progress towards the next release.
-> The latest release of the Triton Model Analyzer is 1.43.0 and is available on branch -> [r24.08](https://github.com/triton-inference-server/model_analyzer/tree/r24.08). - Triton Model Analyzer is a CLI tool which can help you find a more optimal configuration, on a given piece of hardware, for single, multiple, ensemble, or BLS models running on a [Triton Inference Server](https://github.com/triton-inference-server/server/). Model Analyzer will also generate reports to help you better understand the trade-offs of the different configurations along with their compute and memory requirements.

@@ -35,14 +27,14 @@ Triton Model Analyzer is a CLI tool which can help you find a more optimal confi - [Optuna Search](docs/config_search.md#optuna-search-mode) **_-ALPHA RELEASE-_** allows you to search for every parameter that can be specified in the model configuration, using a hyperparameter optimization framework. Please see the [Optuna](https://optuna.org/) website if you are interested in specific details on how the algorithm functions. -- [Quick Search](docs/config_search.md#quick-search-mode) will **sparsely** search the [Max Batch Size](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#maximum-batch-size), - [Dynamic Batching](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#dynamic-batcher), and - [Instance Group](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#instance-groups) spaces by utilizing a heuristic hill-climbing algorithm to help you quickly find a more optimal configuration +- [Quick Search](docs/config_search.md#quick-search-mode) will **sparsely** search the [Max Batch Size](https://github.com/triton-inference-server/server/blob/r24.09/docs/user_guide/model_configuration.md#maximum-batch-size), + [Dynamic Batching](https://github.com/triton-inference-server/server/blob/r24.09/docs/user_guide/model_configuration.md#dynamic-batcher), and + [Instance Group](https://github.com/triton-inference-server/server/blob/r24.09/docs/user_guide/model_configuration.md#instance-groups) spaces by utilizing a heuristic hill-climbing algorithm to help you quickly find a more optimal configuration - [Automatic Brute Search](docs/config_search.md#automatic-brute-search) will **exhaustively** search the - [Max Batch Size](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#maximum-batch-size), - [Dynamic Batching](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#dynamic-batcher), and - [Instance Group](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#instance-groups) + [Max Batch Size](https://github.com/triton-inference-server/server/blob/r24.09/docs/user_guide/model_configuration.md#maximum-batch-size), + [Dynamic Batching](https://github.com/triton-inference-server/server/blob/r24.09/docs/user_guide/model_configuration.md#dynamic-batcher), and + [Instance Group](https://github.com/triton-inference-server/server/blob/r24.09/docs/user_guide/model_configuration.md#instance-groups) parameters of your model configuration - [Manual Brute Search](docs/config_search.md#manual-brute-search) allows you to create manual sweeps for every parameter that can be specified in the model configuration diff --git a/VERSION b/VERSION index 28eae321..372cf402 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.44.0dev \ No newline at end of file +1.44.0 diff --git a/docs/bls_quick_start.md b/docs/bls_quick_start.md index 51219f15..7a7ba5de 100644 --- a/docs/bls_quick_start.md +++ b/docs/bls_quick_start.md @@ -49,7 +49,7 @@ git pull origin main **1. Pull the SDK container:** ``` -docker pull nvcr.io/nvidia/tritonserver:24.08-py3-sdk +docker pull nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` **2. Run the SDK container** @@ -59,7 +59,7 @@ docker run -it --gpus 1 \ --shm-size 2G \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \ - --net=host nvcr.io/nvidia/tritonserver:24.08-py3-sdk + --net=host nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` **Important:** The example above uses a single GPU. If you are running on multiple GPUs, you may need to increase the shared memory size accordingly

diff --git a/docs/config.md b/docs/config.md index d5d69366..866a55b5 100644 --- a/docs/config.md +++ b/docs/config.md @@ -153,7 +153,7 @@ cpu_only_composing_models: [ reload_model_disable: | default: false] # Triton Docker image tag used when launching using Docker mode -[ triton_docker_image: | default: nvcr.io/nvidia/tritonserver:24.08-py3 ] +[ triton_docker_image: | default: nvcr.io/nvidia/tritonserver:24.09-py3 ] # Triton Server HTTP endpoint url used by Model Analyzer client" [ triton_http_endpoint: | default: localhost:8000 ] diff --git a/docs/ensemble_quick_start.md b/docs/ensemble_quick_start.md index 631eb454..46601a40 100644 --- a/docs/ensemble_quick_start.md +++ b/docs/ensemble_quick_start.md @@ -55,7 +55,7 @@ mkdir examples/quick-start/ensemble_add_sub/1 **1. Pull the SDK container:** ``` -docker pull nvcr.io/nvidia/tritonserver:24.08-py3-sdk +docker pull nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` **2. Run the SDK container** @@ -65,7 +65,7 @@ docker run -it --gpus 1 \ --shm-size 1G \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \ - --net=host nvcr.io/nvidia/tritonserver:24.08-py3-sdk + --net=host nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` **Important:** The example above uses a single GPU. If you are running on multiple GPUs, you may need to increase the shared memory size accordingly

diff --git a/docs/kubernetes_deploy.md b/docs/kubernetes_deploy.md index 44a79f55..95e9ec94 100644 --- a/docs/kubernetes_deploy.md +++ b/docs/kubernetes_deploy.md @@ -79,7 +79,7 @@ images: triton: image: nvcr.io/nvidia/tritonserver - tag: 24.08-py3 + tag: 24.09-py3 ``` The model analyzer executable uses the config file defined in `helm-chart/templates/config-map.yaml`. This config can be modified to supply arguments to model analyzer. Only the content under the `config.yaml` section of the file should be modified. diff --git a/docs/mm_quick_start.md b/docs/mm_quick_start.md index 6d250754..e917745b 100644 --- a/docs/mm_quick_start.md +++ b/docs/mm_quick_start.md @@ -49,7 +49,7 @@ git pull origin main **1. Pull the SDK container:** ``` -docker pull nvcr.io/nvidia/tritonserver:24.08-py3-sdk +docker pull nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` **2. Run the SDK container** @@ -58,7 +58,7 @@ docker pull nvcr.io/nvidia/tritonserver:24.08-py3-sdk docker run -it --gpus all \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \ - --net=host nvcr.io/nvidia/tritonserver:24.08-py3-sdk + --net=host nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` ## `Step 3:` Profile both models concurrently diff --git a/docs/quick_start.md b/docs/quick_start.md index 555fa1bb..efd8b58e 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -49,7 +49,7 @@ git pull origin main **1. Pull the SDK container:** ``` -docker pull nvcr.io/nvidia/tritonserver:24.08-py3-sdk +docker pull nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` **2. Run the SDK container** @@ -58,7 +58,7 @@ docker pull nvcr.io/nvidia/tritonserver:24.08-py3-sdk docker run -it --gpus all \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \ - --net=host nvcr.io/nvidia/tritonserver:24.08-py3-sdk + --net=host nvcr.io/nvidia/tritonserver:24.09-py3-sdk ``` ## `Step 3:` Profile the `add_sub` model diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml index 5861ba93..6ad4e346 100644 --- a/helm-chart/values.yaml +++ b/helm-chart/values.yaml @@ -41,4 +41,4 @@ images: triton: image: nvcr.io/nvidia/tritonserver - tag: 24.08-py3 + tag: 24.09-py3 diff --git a/model_analyzer/config/input/config_defaults.py b/model_analyzer/config/input/config_defaults.py index 0999437d..a7c709ab 100755 --- a/model_analyzer/config/input/config_defaults.py +++ b/model_analyzer/config/input/config_defaults.py @@ -63,7 +63,7 @@ DEFAULT_REQUEST_RATE_SEARCH_ENABLE = False DEFAULT_CONCURRENCY_SWEEP_DISABLE = False DEFAULT_TRITON_LAUNCH_MODE = "local" -DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:24.08-py3" +DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:24.09-py3" DEFAULT_TRITON_HTTP_ENDPOINT = "localhost:8000" DEFAULT_TRITON_GRPC_ENDPOINT = "localhost:8001" DEFAULT_TRITON_METRICS_URL = "http://localhost:8002/metrics"