11---
2+ # chatbot-rag-app deploys "create-index" to install ELSER and load values.
3+ # Then, it starts "api-frontend" to serve the application.
24apiVersion : apps/v1
35kind : Deployment
46metadata :
@@ -18,29 +20,30 @@ spec:
1820 - name : gcloud-credentials
1921 secret :
2022 secretName : gcloud-credentials
23+ optional : true # only read when `LLM_TYPE=vertex`
2124 initContainers :
22- - name : create-index
23- image : &image ghcr.io/elastic/elasticsearch-labs/chatbot-rag-app:latest
24- command : &command [ "bash", "-eu", "./entrypoint.sh" ] # match image
25- args : ["flask", "create-index"]
26- # This recreates your configmap based on your .env file:
27- # kubectl create configmap chatbot-rag-app-env --from-env-file=.env
28- envFrom : &envFrom
29- - configMapRef :
30- name : chatbot-rag-app-env
25+ - name : create-index
26+ image : &image ghcr.io/elastic/elasticsearch-labs/chatbot-rag-app:latest
27+ command : &command [ "bash", "-eu", "./entrypoint.sh" ] # match image
28+ args : [ "flask", "create-index" ]
29+ # This recreates your configmap based on your .env file:
30+ # kubectl create configmap chatbot-rag-app-env --from-env-file=.env
31+ envFrom : &envFrom
32+ - configMapRef :
33+ name : chatbot-rag-app-env
3134 containers :
32- - name : api-frontend
33- image : *image
34- command : *command
35- args : [ "python", "api/app.py" ]
36- ports :
37- - containerPort : 4000
38- envFrom : *envFrom
39- # For `LLM_TYPE=vertex`: mount credentials to the path read by the google-cloud-sdk
40- volumeMounts :
41- - name : gcloud-credentials
42- mountPath : /root/.config/gcloud
43- readOnly : true
35+ - name : api-frontend
36+ image : *image
37+ command : *command
38+ args : [ "python", "api/app.py" ]
39+ ports :
40+ - containerPort : 4000
41+ envFrom : *envFrom
42+ # For `LLM_TYPE=vertex`: mount credentials to the path read by the google-cloud-sdk
43+ volumeMounts :
44+ - name : gcloud-credentials
45+ mountPath : /root/.config/gcloud
46+ readOnly : true
4447---
4548apiVersion : v1
4649kind : Service
0 commit comments