Skip to content

Commit e672c28

Browse files
mc-nvyinggehpskiran1
authored
Update default branch post 25.07 (#8319)
Co-authored-by: Yingge He <[email protected]> Co-authored-by: Sai Kiran Polisetty <[email protected]>
1 parent 99f4820 commit e672c28

File tree

21 files changed

+59
-54
lines changed

21 files changed

+59
-54
lines changed

Dockerfile.sdk

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
#
3030

3131
# Base image on the minimum Triton container
32-
ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:25.06-py3-min
32+
ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:25.07-py3-min
3333

3434
ARG TRITON_CLIENT_REPO_SUBDIR=clientrepo
3535
ARG TRITON_PA_REPO_SUBDIR=perfanalyzerrepo
@@ -60,34 +60,36 @@ ENV PIP_BREAK_SYSTEM_PACKAGES=1 CMAKE_POLICY_VERSION_MINIMUM=3.5
6060

6161
RUN apt-get update && \
6262
apt-get install -y --no-install-recommends \
63-
ca-certificates \
64-
software-properties-common \
6563
autoconf \
6664
automake \
6765
build-essential \
66+
ca-certificates \
6867
curl \
6968
git \
7069
gperf \
7170
libb64-dev \
7271
libgoogle-perftools-dev \
73-
libopencv-dev \
7472
libopencv-core-dev \
73+
libopencv-dev \
7574
libssl-dev \
7675
libtool \
76+
maven \
77+
openjdk-11-jdk \
7778
pkg-config \
7879
python3 \
79-
python3-pip \
8080
python3-dev \
81-
python3-wheel \
81+
python3-pdfkit \
82+
python3-pip \
8283
python3-setuptools \
84+
python3-wheel \
8385
rapidjson-dev \
86+
software-properties-common \
8487
vim \
85-
wget \
86-
python3-pdfkit \
87-
openjdk-11-jdk \
88-
maven && \
88+
wget && \
8989
pip3 install --upgrade "grpcio-tools<1.68" cmake==4.0.3
9090

91+
ENV CMAKE_POLICY_MINIMUM_REQUIRED=3.5
92+
9193
# Build expects "python" executable (not python3).
9294
RUN rm -f /usr/bin/python && \
9395
ln -s /usr/bin/python3 /usr/bin/python
@@ -126,8 +128,7 @@ RUN cmake -DCMAKE_INSTALL_PREFIX=/workspace/install \
126128
-DTRITON_ENABLE_JAVA_HTTP=ON \
127129
-DTRITON_ENABLE_EXAMPLES=ON -DTRITON_ENABLE_TESTS=ON \
128130
-DTRITON_ENABLE_GPU=${TRITON_ENABLE_GPU} /workspace/client
129-
RUN make -j16 cc-clients java-clients && \
130-
rm -fr ~/.m2
131+
RUN cmake --build . -v --parallel --target cc-clients java-clients
131132

132133
# TODO: PA will rebuild the CC clients since it depends on it.
133134
# This should be optimized so that we do not have to build
@@ -145,6 +146,7 @@ RUN if [ "$TRITON_PERF_ANALYZER_BUILD" = "1" ]; then \
145146
-DTRITON_COMMON_REPO_TAG=${TRITON_COMMON_REPO_TAG} \
146147
-DTRITON_CORE_REPO_TAG=${TRITON_CORE_REPO_TAG} \
147148
-DTRITON_CLIENT_REPO_TAG=${TRITON_CLIENT_REPO_TAG} \
149+
-DTRITON_THIRD_PARTY_REPO_TAG=${TRITON_THIRD_PARTY_REPO_TAG} \
148150
-DTRITON_ENABLE_PERF_ANALYZER_C_API=ON \
149151
-DTRITON_ENABLE_PERF_ANALYZER_TFS=ON \
150152
-DTRITON_ENABLE_PERF_ANALYZER_TS=ON \
@@ -156,7 +158,7 @@ RUN if [ "$TRITON_PERF_ANALYZER_BUILD" = "1" ]; then \
156158
-DTRITON_PACKAGE_PERF_ANALYZER=ON \
157159
-DTRITON_ENABLE_GPU=${TRITON_ENABLE_GPU} \
158160
/workspace/perf_analyzer && \
159-
make -j16 perf-analyzer python-clients && \
161+
cmake --build . -v --parallel --target perf-analyzer python-clients && \
160162
pip3 install build && \
161163
cd /workspace/perf_analyzer/genai-perf && \
162164
python3 -m build --wheel --outdir /workspace/install/python; \
@@ -169,12 +171,13 @@ RUN if [ "$TRITON_PERF_ANALYZER_BUILD" = "1" ]; then \
169171
-DTRITON_REPO_ORGANIZATION=${TRITON_REPO_ORGANIZATION} \
170172
-DTRITON_COMMON_REPO_TAG=${TRITON_COMMON_REPO_TAG} \
171173
-DTRITON_CLIENT_REPO_TAG=${TRITON_CLIENT_REPO_TAG} \
174+
-DTRITON_THIRD_PARTY_REPO_TAG=${TRITON_THIRD_PARTY_REPO_TAG} \
172175
-DTRITON_ENABLE_PYTHON_HTTP=ON \
173176
-DTRITON_ENABLE_PYTHON_GRPC=ON \
174177
-DTRITON_PACKAGE_PERF_ANALYZER=ON \
175178
-DTRITON_ENABLE_GPU=${TRITON_ENABLE_GPU} \
176179
/workspace/perf_analyzer && \
177-
make -j16 python-clients && \
180+
cmake --build . -v --parallel --target python-clients && \
178181
mkdir -p /workspace/install/python && \
179182
cp /workspace/perf_analyzer/genai_perf-*.whl /workspace/install/python/; \
180183
fi
@@ -205,26 +208,27 @@ ARG TRITON_ENABLE_GPU
205208

206209
RUN apt-get update && \
207210
apt-get install -y --no-install-recommends \
208-
software-properties-common \
209211
curl \
212+
default-jdk \
210213
git \
211214
gperf \
212215
libb64-dev \
213216
libgoogle-perftools-dev \
214-
libopencv-dev \
215217
libopencv-core-dev \
218+
libopencv-dev \
216219
libssl-dev \
217220
libtool \
221+
maven \
222+
perl \
218223
python3 \
219-
python3-pip \
220224
python3-dev \
221-
python3-wheel \
225+
python3-pdfkit \
226+
python3-pip \
222227
python3-setuptools \
228+
python3-wheel \
229+
software-properties-common \
223230
vim \
224-
wget \
225-
python3-pdfkit \
226-
maven \
227-
default-jdk && \
231+
wget && \
228232
pip3 install "grpcio<1.68" "grpcio-tools<1.68"
229233

230234
WORKDIR /workspace

README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929

3030
>[!WARNING]
3131
>You are currently on the `main` branch which tracks under-development progress
32-
>towards the next release. The current release is version [2.59.0](https://github.com/triton-inference-server/server/releases/latest)
33-
>and corresponds to the 25.06 container release on NVIDIA GPU Cloud (NGC).
32+
>towards the next release. The current release is version [2.59.1](https://github.com/triton-inference-server/server/releases/latest)
33+
>and corresponds to the 25.07 container release on NVIDIA GPU Cloud (NGC).
3434
3535
# Triton Inference Server
3636

@@ -90,16 +90,16 @@ Inference Server with the
9090

9191
```bash
9292
# Step 1: Create the example model repository
93-
git clone -b r25.06 https://github.com/triton-inference-server/server.git
93+
git clone -b r25.07 https://github.com/triton-inference-server/server.git
9494
cd server/docs/examples
9595
./fetch_models.sh
9696

9797
# Step 2: Launch triton from the NGC Triton container
98-
docker run --gpus=1 --rm --net=host -v ${PWD}/model_repository:/models nvcr.io/nvidia/tritonserver:25.06-py3 tritonserver --model-repository=/models --model-control-mode explicit --load-model densenet_onnx
98+
docker run --gpus=1 --rm --net=host -v ${PWD}/model_repository:/models nvcr.io/nvidia/tritonserver:25.07-py3 tritonserver --model-repository=/models --model-control-mode explicit --load-model densenet_onnx
9999

100100
# Step 3: Sending an Inference Request
101101
# In a separate console, launch the image_client example from the NGC Triton SDK container
102-
docker run -it --rm --net=host nvcr.io/nvidia/tritonserver:25.06-py3-sdk /workspace/install/bin/image_client -m densenet_onnx -c 3 -s INCEPTION /workspace/images/mug.jpg
102+
docker run -it --rm --net=host nvcr.io/nvidia/tritonserver:25.07-py3-sdk /workspace/install/bin/image_client -m densenet_onnx -c 3 -s INCEPTION /workspace/images/mug.jpg
103103

104104
# Inference should return the following
105105
Image '/workspace/images/mug.jpg':

build.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@
7272

7373
DEFAULT_TRITON_VERSION_MAP = {
7474
"release_version": "2.60.0dev",
75-
"triton_container_version": "25.07dev",
76-
"upstream_container_version": "25.06",
75+
"triton_container_version": "25.08dev",
76+
"upstream_container_version": "25.07",
7777
"ort_version": "1.22.0",
7878
"ort_openvino_version": "2025.2.0",
7979
"standalone_openvino_version": "2025.2.0",

deploy/aws/values.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:25.06-py3
30+
imageName: nvcr.io/nvidia/tritonserver:25.07-py3
3131
pullPolicy: IfNotPresent
3232
modelRepositoryPath: s3://triton-inference-server-repository/model_repository
3333
numGpus: 1

deploy/fleetcommand/Chart.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
apiVersion: v1
2828
# appVersion is the Triton version; update when changing release
29-
appVersion: "2.59.0"
29+
appVersion: "2.59.1"
3030
description: Triton Inference Server (Fleet Command)
3131
name: triton-inference-server
3232
# version is the Chart version; update when changing anything in the chart

deploy/fleetcommand/values.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:25.06-py3
30+
imageName: nvcr.io/nvidia/tritonserver:25.07-py3
3131
pullPolicy: IfNotPresent
3232
numGpus: 1
3333
serverCommand: tritonserver
@@ -47,13 +47,13 @@ image:
4747
#
4848
# To set model control mode, uncomment and configure below
4949
# TODO: Fix the following url, it is invalid
50-
# See https://github.com/triton-inference-server/server/blob/r25.06/docs/user_guide/model_management.md
50+
# See https://github.com/triton-inference-server/server/blob/r25.07/docs/user_guide/model_management.md
5151
# for more details
5252
#- --model-control-mode=explicit|poll|none
5353
#
5454
# Additional server args
5555
#
56-
# see https://github.com/triton-inference-server/server/blob/r25.06/README.md
56+
# see https://github.com/triton-inference-server/server/blob/r25.07/README.md
5757
# for more details
5858

5959
service:

deploy/gcp/values.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
replicaCount: 1
2828

2929
image:
30-
imageName: nvcr.io/nvidia/tritonserver:25.06-py3
30+
imageName: nvcr.io/nvidia/tritonserver:25.07-py3
3131
pullPolicy: IfNotPresent
3232
modelRepositoryPath: gs://triton-inference-server-repository/model_repository
3333
numGpus: 1

deploy/gke-marketplace-app/benchmark/perf-analyzer-script/triton_client.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ metadata:
3333
namespace: default
3434
spec:
3535
containers:
36-
- image: nvcr.io/nvidia/tritonserver:25.06-py3-sdk
36+
- image: nvcr.io/nvidia/tritonserver:25.07-py3-sdk
3737
imagePullPolicy: Always
3838
name: nv-triton-client
3939
securityContext:

deploy/gke-marketplace-app/server-deployer/build_and_push.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
export REGISTRY=gcr.io/$(gcloud config get-value project | tr ':' '/')
2929
export APP_NAME=tritonserver
3030
export MAJOR_VERSION=2.59
31-
export MINOR_VERSION=2.59.0
32-
export NGC_VERSION=25.06-py3
31+
export MINOR_VERSION=2.59.1
32+
export NGC_VERSION=25.07-py3
3333

3434
docker pull nvcr.io/nvidia/$APP_NAME:$NGC_VERSION
3535

deploy/gke-marketplace-app/server-deployer/chart/triton/Chart.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,4 +28,4 @@ apiVersion: v1
2828
appVersion: "2.59"
2929
description: Triton Inference Server
3030
name: triton-inference-server
31-
version: 2.59.0
31+
version: 2.59.1

0 commit comments

Comments
 (0)