@@ -94,7 +94,7 @@ Tooling:
94
94
helm install vllm-llama3-8b-instruct \
95
95
--set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
96
96
--set provider.name=$GATEWAY_PROVIDER \
97
- --version v1.0.0 \
97
+ --version v1.0.1-rc.1 \
98
98
oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
99
99
```
100
100
@@ -105,7 +105,7 @@ Tooling:
105
105
helm install vllm-llama3-8b-instruct \
106
106
--set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
107
107
--set provider.name=$GATEWAY_PROVIDER \
108
- --version v1.0.0 \
108
+ --version v1.0.1-rc.1 \
109
109
oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
110
110
```
111
111
@@ -116,7 +116,7 @@ Tooling:
116
116
helm install vllm-llama3-8b-instruct \
117
117
--set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
118
118
--set provider.name=$GATEWAY_PROVIDER \
119
- --version v1.0.0 \
119
+ --version v1.0.1-rc.1 \
120
120
oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
121
121
```
122
122
@@ -127,7 +127,7 @@ Tooling:
127
127
helm install vllm-llama3-8b-instruct \
128
128
--set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
129
129
--set provider.name=$GATEWAY_PROVIDER \
130
- --version v1.0.0 \
130
+ --version v1.0.1-rc.1 \
131
131
oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
132
132
```
133
133
0 commit comments