diff --git a/deploy/helm/semantic-router/templates/deployment.yaml b/deploy/helm/semantic-router/templates/deployment.yaml index 0fb0d1aee..477cdb216 100644 --- a/deploy/helm/semantic-router/templates/deployment.yaml +++ b/deploy/helm/semantic-router/templates/deployment.yaml @@ -70,6 +70,9 @@ spec: env: - name: HF_HUB_CACHE value: /tmp/hf_cache + {{- with .Values.initContainer.env }} + {{- toYaml . | nindent 10 }} + {{- end }} resources: {{- toYaml .Values.initContainer.resources | nindent 10 }} volumeMounts: diff --git a/deploy/helm/semantic-router/values.yaml b/deploy/helm/semantic-router/values.yaml index 8cd43938d..926676823 100644 --- a/deploy/helm/semantic-router/values.yaml +++ b/deploy/helm/semantic-router/values.yaml @@ -135,6 +135,18 @@ initContainer: requests: memory: "1Gi" cpu: "500m" + # -- Additional environment variables for the init container. + # For example, to use a private Hugging Face model, you can pass a token + # and specify an endpoint using a pre-existing Kubernetes secret. + # env: + # - name: HF_TOKEN + # valueFrom: + # secretKeyRef: + # name: my-hf-secret + # key: token + # - name: HF_ENDPOINT + # value: "https://huggingface.co" + env: [] # -- Models to download models: - name: all-MiniLM-L12-v2