diff --git a/hack/release-quickstart.sh b/hack/release-quickstart.sh index 04b79a3ef..813146dee 100755 --- a/hack/release-quickstart.sh +++ b/hack/release-quickstart.sh @@ -132,11 +132,15 @@ sed -i.bak '/us-central1-docker.pkg.dev\/k8s-staging-images\/gateway-api-inferen # Update the container registry for lora-syncer in vLLM CPU and GPU deployment manifests. sed -i.bak -E "s|us-central1-docker\.pkg\.dev/k8s-staging-images|registry.k8s.io|g" "$VLLM_GPU_DEPLOY" "$VLLM_CPU_DEPLOY" +# Update IGW_CHART_VERSION in quickstart guide to match the current release tag +GUIDES_INDEX="site-src/guides/index.md" +sed -i.bak -E "s/export IGW_CHART_VERSION=v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?/export IGW_CHART_VERSION=${RELEASE_TAG}/g" "$GUIDES_INDEX" + # ----------------------------------------------------------------------------- # Stage the changes # ----------------------------------------------------------------------------- -echo "Staging $VERSION_FILE $UPDATED_CRD $README $EPP_HELM $BBR_HELM $CONFORMANCE_MANIFESTS $VLLM_GPU_DEPLOY $VLLM_CPU_DEPLOY $VLLM_SIM_DEPLOY files..." -git add $VERSION_FILE $UPDATED_CRD $README $EPP_HELM $BBR_HELM $CONFORMANCE_MANIFESTS $VLLM_GPU_DEPLOY $VLLM_CPU_DEPLOY $VLLM_SIM_DEPLOY +echo "Staging $VERSION_FILE $UPDATED_CRD $README $EPP_HELM $BBR_HELM $CONFORMANCE_MANIFESTS $VLLM_GPU_DEPLOY $VLLM_CPU_DEPLOY $VLLM_SIM_DEPLOY $GUIDES_INDEX files..." +git add $VERSION_FILE $UPDATED_CRD $README $EPP_HELM $BBR_HELM $CONFORMANCE_MANIFESTS $VLLM_GPU_DEPLOY $VLLM_CPU_DEPLOY $VLLM_SIM_DEPLOY $GUIDES_INDEX # ----------------------------------------------------------------------------- # Cleanup backup files and finish diff --git a/site-src/guides/index.md b/site-src/guides/index.md index 15339aa95..6c7b2c528 100644 --- a/site-src/guides/index.md +++ b/site-src/guides/index.md @@ -91,10 +91,11 @@ Tooling: ```bash export GATEWAY_PROVIDER=gke + export IGW_CHART_VERSION=v1.0.1-rc.1 helm install vllm-llama3-8b-instruct \ --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \ --set provider.name=$GATEWAY_PROVIDER \ - --version v1.0.1-rc.1 \ + --version $IGW_CHART_VERSION \ oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool ``` @@ -102,10 +103,11 @@ Tooling: ```bash export GATEWAY_PROVIDER=istio + export IGW_CHART_VERSION=v1.0.1-rc.1 helm install vllm-llama3-8b-instruct \ --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \ --set provider.name=$GATEWAY_PROVIDER \ - --version v1.0.1-rc.1 \ + --version $IGW_CHART_VERSION \ oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool ``` @@ -113,10 +115,11 @@ Tooling: ```bash export GATEWAY_PROVIDER=none + export IGW_CHART_VERSION=v1.0.1-rc.1 helm install vllm-llama3-8b-instruct \ --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \ --set provider.name=$GATEWAY_PROVIDER \ - --version v1.0.1-rc.1 \ + --version $IGW_CHART_VERSION \ oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool ``` @@ -124,10 +127,11 @@ Tooling: ```bash export GATEWAY_PROVIDER=none + export IGW_CHART_VERSION=v1.0.1-rc.1 helm install vllm-llama3-8b-instruct \ --set inferencePool.modelServers.matchLabels.app=vllm-llama3-8b-instruct \ --set provider.name=$GATEWAY_PROVIDER \ - --version v1.0.1-rc.1 \ + --version $IGW_CHART_VERSION \ oci://registry.k8s.io/gateway-api-inference-extension/charts/inferencepool ```