@@ -99,7 +99,8 @@ Procedure
9999
100100 $ helm install --wait --generate-name \
101101 -n gpu-operator --create-namespace \
102- nvidia/gpu-operator
102+ nvidia/gpu-operator \
103+ --set version=${version}
103104
104105 - Install the Operator and specify configuration options:
105106
@@ -108,6 +109,7 @@ Procedure
108109 $ helm install --wait --generate-name \
109110 -n gpu-operator --create-namespace \
110111 nvidia/gpu-operator \
112+ --set version=${version} \
111113 --set <option-name>=<option-value>
112114
113115 Refer to the :ref: `gpu-operator-helm-chart-options `
@@ -292,7 +294,8 @@ For example, to install the GPU Operator in the ``nvidia-gpu-operator`` namespac
292294
293295 $ helm install --wait --generate-name \
294296 -n nvidia-gpu-operator --create-namespace \
295- nvidia/gpu-operator
297+ nvidia/gpu-operator \
298+ --set version=${version} \
296299
297300 If you do not specify a namespace during installation, all GPU Operator components are installed in the ``default `` namespace.
298301
@@ -330,6 +333,7 @@ In this scenario, use the NVIDIA Container Toolkit image that is built on UBI 8:
330333 $ helm install --wait --generate-name \
331334 -n gpu-operator --create-namespace \
332335 nvidia/gpu-operator \
336+ --set version=${version} \
333337 --set toolkit.version=v1.16.1-ubi8
334338
335339 Replace the ``v1.16.1 `` value in the preceding command with the version that is supported
@@ -350,6 +354,7 @@ In this scenario, the NVIDIA GPU driver is already installed on the worker nodes
350354 $ helm install --wait --generate-name \
351355 -n gpu-operator --create-namespace \
352356 nvidia/gpu-operator \
357+ --set version=${version} \
353358 --set driver.enabled=false
354359
355360 The preceding command prevents the Operator from installing the GPU driver on any nodes in the cluster.
@@ -378,9 +383,10 @@ Install the Operator with the following options:
378383
379384 $ helm install --wait --generate-name \
380385 -n gpu-operator --create-namespace \
381- nvidia/gpu-operator \
382- --set driver.enabled=false \
383- --set toolkit.enabled=false
386+ nvidia/gpu-operator \
387+ --set version=${version} \
388+ --set driver.enabled=false \
389+ --set toolkit.enabled=false
384390
385391
386392 Pre-Installed NVIDIA Container Toolkit (but no drivers)
@@ -401,6 +407,7 @@ In this scenario, the NVIDIA Container Toolkit is already installed on the worke
401407 $ helm install --wait --generate-name \
402408 -n gpu-operator --create-namespace \
403409 nvidia/gpu-operator \
410+ --set version=${version} \
404411 --set toolkit.enabled=false
405412
406413 Running a Custom Driver Image
@@ -429,6 +436,7 @@ you can build a custom driver container image. Follow these steps:
429436 $ helm install --wait --generate-name \
430437 -n gpu-operator --create-namespace \
431438 nvidia/gpu-operator \
439+ --set version=${version} \
432440 --set driver.repository=docker.io/nvidia \
433441 --set driver.version="465.27"
434442
@@ -466,6 +474,7 @@ If you need to specify custom values, refer to the following sample command for
466474
467475 helm install gpu-operator -n gpu-operator --create-namespace \
468476 nvidia/gpu-operator $HELM_OPTIONS \
477+ --set version=${version} \
469478 --set toolkit.env[0].name=CONTAINERD_CONFIG \
470479 --set toolkit.env[0].value=/etc/containerd/config.toml \
471480 --set toolkit.env[1].name=CONTAINERD_SOCKET \
@@ -538,6 +547,7 @@ These options can be passed to GPU Operator during install time as below.
538547
539548 helm install gpu-operator -n gpu-operator --create-namespace \
540549 nvidia/gpu-operator $HELM_OPTIONS \
550+ --set version=${version} \
541551 --set toolkit.env[0].name=CONTAINERD_CONFIG \
542552 --set toolkit.env[0].value=/var/snap/microk8s/current/args/containerd-template.toml \
543553 --set toolkit.env[1].name=CONTAINERD_SOCKET \
0 commit comments