@@ -99,7 +99,8 @@ Procedure
9999
100100 $ helm install --wait --generate-name \
101101 -n gpu-operator --create-namespace \
102- nvidia/gpu-operator
102+ nvidia/gpu-operator \
103+ --version=${version}
103104
104105 - Install the Operator and specify configuration options:
105106
@@ -108,6 +109,7 @@ Procedure
108109 $ helm install --wait --generate-name \
109110 -n gpu-operator --create-namespace \
110111 nvidia/gpu-operator \
112+ --version=${version} \
111113 --set <option-name>=<option-value>
112114
113115 Refer to the :ref: `gpu-operator-helm-chart-options `
@@ -291,7 +293,8 @@ For example, to install the GPU Operator in the ``nvidia-gpu-operator`` namespac
291293
292294 $ helm install --wait --generate-name \
293295 -n nvidia-gpu-operator --create-namespace \
294- nvidia/gpu-operator
296+ nvidia/gpu-operator \
297+ --version=${version} \
295298
296299 If you do not specify a namespace during installation, all GPU Operator components are installed in the ``default `` namespace.
297300
@@ -329,6 +332,7 @@ In this scenario, use the NVIDIA Container Toolkit image that is built on UBI 8:
329332 $ helm install --wait --generate-name \
330333 -n gpu-operator --create-namespace \
331334 nvidia/gpu-operator \
335+ --version=${version} \
332336 --set toolkit.version=v1.16.1-ubi8
333337
334338 Replace the ``v1.16.1 `` value in the preceding command with the version that is supported
@@ -349,6 +353,7 @@ In this scenario, the NVIDIA GPU driver is already installed on the worker nodes
349353 $ helm install --wait --generate-name \
350354 -n gpu-operator --create-namespace \
351355 nvidia/gpu-operator \
356+ --version=${version} \
352357 --set driver.enabled=false
353358
354359 The preceding command prevents the Operator from installing the GPU driver on any nodes in the cluster.
@@ -377,9 +382,10 @@ Install the Operator with the following options:
377382
378383 $ helm install --wait --generate-name \
379384 -n gpu-operator --create-namespace \
380- nvidia/gpu-operator \
381- --set driver.enabled=false \
382- --set toolkit.enabled=false
385+ nvidia/gpu-operator \
386+ --version=${version} \
387+ --set driver.enabled=false \
388+ --set toolkit.enabled=false
383389
384390
385391 Pre-Installed NVIDIA Container Toolkit (but no drivers)
@@ -400,6 +406,7 @@ In this scenario, the NVIDIA Container Toolkit is already installed on the worke
400406 $ helm install --wait --generate-name \
401407 -n gpu-operator --create-namespace \
402408 nvidia/gpu-operator \
409+ --version=${version} \
403410 --set toolkit.enabled=false
404411
405412 Running a Custom Driver Image
@@ -428,6 +435,7 @@ you can build a custom driver container image. Follow these steps:
428435 $ helm install --wait --generate-name \
429436 -n gpu-operator --create-namespace \
430437 nvidia/gpu-operator \
438+ --version=${version} \
431439 --set driver.repository=docker.io/nvidia \
432440 --set driver.version="465.27"
433441
@@ -465,6 +473,7 @@ If you need to specify custom values, refer to the following sample command for
465473
466474 helm install gpu-operator -n gpu-operator --create-namespace \
467475 nvidia/gpu-operator $HELM_OPTIONS \
476+ --version=${version} \
468477 --set toolkit.env[0].name=CONTAINERD_CONFIG \
469478 --set toolkit.env[0].value=/etc/containerd/config.toml \
470479 --set toolkit.env[1].name=CONTAINERD_SOCKET \
@@ -539,6 +548,7 @@ These options can be passed to GPU Operator during install time as below.
539548
540549 helm install gpu-operator -n gpu-operator --create-namespace \
541550 nvidia/gpu-operator $HELM_OPTIONS \
551+ --version=${version} \
542552 --set toolkit.env[0].name=CONTAINERD_CONFIG \
543553 --set toolkit.env[0].value=/var/snap/microk8s/current/args/containerd-template.toml \
544554 --set toolkit.env[1].name=CONTAINERD_SOCKET \
0 commit comments