@@ -89,47 +89,47 @@ Tooling:
8989
9090=== "GKE"
9191
92-    ``` bash 
93-    export  GATEWAY_PROVIDER=gke
94-    helm install vllm-llama3-8b-instruct \
95-    --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
96-    --set provider.name=$GATEWAY_PROVIDER  \
97-    --version v0.5.1 \
98-    oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
99-    ``` 
92+         ```bash
93+         export GATEWAY_PROVIDER=gke
94+         helm install vllm-llama3-8b-instruct \
95+         --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
96+         --set provider.name=$GATEWAY_PROVIDER \
97+         --version v0.5.1 \
98+         oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
99+         ```
100100
101101=== "Istio"
102102
103-    ``` bash 
104-    export  GATEWAY_PROVIDER=none
105-    helm install vllm-llama3-8b-instruct \
106-    --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
107-    --set provider.name=$GATEWAY_PROVIDER  \
108-    --version v0.5.1 \
109-    oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
110-    ``` 
103+         ```bash
104+         export GATEWAY_PROVIDER=none
105+         helm install vllm-llama3-8b-instruct \
106+         --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
107+         --set provider.name=$GATEWAY_PROVIDER \
108+         --version v0.5.1 \
109+         oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
110+         ```
111111
112112=== "Kgateway"
113113
114-    ``` bash 
115-    export  GATEWAY_PROVIDER=none
116-    helm install vllm-llama3-8b-instruct \
117-    --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
118-    --set provider.name=$GATEWAY_PROVIDER  \
119-    --version v0.5.1 \
120-    oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
121-    ``` 
114+         ```bash
115+         export GATEWAY_PROVIDER=none
116+         helm install vllm-llama3-8b-instruct \
117+         --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
118+         --set provider.name=$GATEWAY_PROVIDER \
119+         --version v0.5.1 \
120+         oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
121+         ```
122122
123123=== "Agentgateway"
124124
125-    ``` bash 
126-    export  GATEWAY_PROVIDER=none
127-    helm install vllm-llama3-8b-instruct \
128-    --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
129-    --set provider.name=$GATEWAY_PROVIDER  \
130-    --version v0.5.1 \
131-    oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
132-    ``` 
125+         ```bash
126+         export GATEWAY_PROVIDER=none
127+         helm install vllm-llama3-8b-instruct \
128+         --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \
129+         --set provider.name=$GATEWAY_PROVIDER \
130+         --version v0.5.1 \
131+         oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool
132+         ```
133133
134134### Deploy an Inference Gateway  
135135
0 commit comments