@@ -100,7 +100,7 @@ Procedure
100100 $ helm install --wait --generate-name \
101101 -n gpu-operator --create-namespace \
102102 nvidia/gpu-operator \
103- --set version=${version}
103+ --version=${version}
104104
105105 - Install the Operator and specify configuration options:
106106
@@ -109,7 +109,7 @@ Procedure
109109 $ helm install --wait --generate-name \
110110 -n gpu-operator --create-namespace \
111111 nvidia/gpu-operator \
112- --set version=${version} \
112+ --version=${version} \
113113 --set <option-name>=<option-value>
114114
115115 Refer to the :ref: `gpu-operator-helm-chart-options `
@@ -295,7 +295,7 @@ For example, to install the GPU Operator in the ``nvidia-gpu-operator`` namespac
295295 $ helm install --wait --generate-name \
296296 -n nvidia-gpu-operator --create-namespace \
297297 nvidia/gpu-operator \
298- --set version=${version} \
298+ --version=${version} \
299299
300300 If you do not specify a namespace during installation, all GPU Operator components are installed in the ``default `` namespace.
301301
@@ -333,7 +333,7 @@ In this scenario, use the NVIDIA Container Toolkit image that is built on UBI 8:
333333 $ helm install --wait --generate-name \
334334 -n gpu-operator --create-namespace \
335335 nvidia/gpu-operator \
336- --set version=${version} \
336+ --version=${version} \
337337 --set toolkit.version=v1.16.1-ubi8
338338
339339 Replace the ``v1.16.1 `` value in the preceding command with the version that is supported
@@ -354,7 +354,7 @@ In this scenario, the NVIDIA GPU driver is already installed on the worker nodes
354354 $ helm install --wait --generate-name \
355355 -n gpu-operator --create-namespace \
356356 nvidia/gpu-operator \
357- --set version=${version} \
357+ --version=${version} \
358358 --set driver.enabled=false
359359
360360 The preceding command prevents the Operator from installing the GPU driver on any nodes in the cluster.
@@ -384,7 +384,7 @@ Install the Operator with the following options:
384384 $ helm install --wait --generate-name \
385385 -n gpu-operator --create-namespace \
386386 nvidia/gpu-operator \
387- --set version=${version} \
387+ --version=${version} \
388388 --set driver.enabled=false \
389389 --set toolkit.enabled=false
390390
@@ -407,7 +407,7 @@ In this scenario, the NVIDIA Container Toolkit is already installed on the worke
407407 $ helm install --wait --generate-name \
408408 -n gpu-operator --create-namespace \
409409 nvidia/gpu-operator \
410- --set version=${version} \
410+ --version=${version} \
411411 --set toolkit.enabled=false
412412
413413 Running a Custom Driver Image
@@ -436,7 +436,7 @@ you can build a custom driver container image. Follow these steps:
436436 $ helm install --wait --generate-name \
437437 -n gpu-operator --create-namespace \
438438 nvidia/gpu-operator \
439- --set version=${version} \
439+ --version=${version} \
440440 --set driver.repository=docker.io/nvidia \
441441 --set driver.version="465.27"
442442
@@ -474,7 +474,7 @@ If you need to specify custom values, refer to the following sample command for
474474
475475 helm install gpu-operator -n gpu-operator --create-namespace \
476476 nvidia/gpu-operator $HELM_OPTIONS \
477- --set version=${version} \
477+ --version=${version} \
478478 --set toolkit.env[0].name=CONTAINERD_CONFIG \
479479 --set toolkit.env[0].value=/etc/containerd/config.toml \
480480 --set toolkit.env[1].name=CONTAINERD_SOCKET \
@@ -547,7 +547,7 @@ These options can be passed to GPU Operator during install time as below.
547547
548548 helm install gpu-operator -n gpu-operator --create-namespace \
549549 nvidia/gpu-operator $HELM_OPTIONS \
550- --set version=${version} \
550+ --version=${version} \
551551 --set toolkit.env[0].name=CONTAINERD_CONFIG \
552552 --set toolkit.env[0].value=/var/snap/microk8s/current/args/containerd-template.toml \
553553 --set toolkit.env[1].name=CONTAINERD_SOCKET \
0 commit comments