diff --git a/docker/README.md b/docker/README.md index 75062ab56..41824c621 100644 --- a/docker/README.md +++ b/docker/README.md @@ -9,8 +9,11 @@ Note: If you haven't checked out this repository, all you need is one file: wget https://raw.githubusercontent.com/elastic/elasticsearch-labs/refs/heads/main/docker/docker-compose-elastic.yml ``` -Use docker compose to run Elastic stack in the background: +Before you begin, ensure you have free CPU and memory on your Docker host. If +you plan to use ELSER, assume a minimum of 8 cpus and 6GB memory for the +containers in this compose file. +First, start this Elastic Stack in the background: ```bash docker compose -f docker-compose-elastic.yml up --force-recreate --wait -d ``` @@ -20,7 +23,6 @@ Then, you can view Kibana at http://localhost:5601/app/home#/ If asked for a username and password, use username: elastic and password: elastic. Clean up when finished, like this: - ```bash docker compose -f docker-compose-elastic.yml down ``` diff --git a/docker/docker-compose-elastic.yml b/docker/docker-compose-elastic.yml index 6deeea829..9dbacf8ab 100644 --- a/docker/docker-compose-elastic.yml +++ b/docker/docker-compose-elastic.yml @@ -27,7 +27,7 @@ services: test: # readiness probe taken from kbn-health-gateway-server script [ "CMD-SHELL", - "curl -s http://localhost:9200 | grep -q 'missing authentication credentials'", + "curl --max-time 1 -s http://localhost:9200 | grep -q 'missing authentication credentials'", ] start_period: 10s interval: 1s @@ -41,12 +41,15 @@ services: image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 container_name: elasticsearch_settings restart: 'no' + # gen-ai assistants in kibana save state in a way that requires system + # access, so set kibana_system's password to a known value. command: > - bash -c ' - # gen-ai assistants in kibana save state in a way that requires security to be enabled, so we need to create - # a kibana system user before starting it. + bash -c ' echo "Setup the kibana_system password"; - until curl -s -u "elastic:elastic" -X POST http://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"elastic\"}" -H "Content-Type: application/json" | grep -q "^{}"; do sleep 5; done; + until curl --max-time 1 -s -u "elastic:elastic" \ + -X POST http://elasticsearch:9200/_security/user/kibana_system/_password \ + -d "{\"password\":\"elastic\"}" \ + -H "Content-Type: application/json" | grep -q "^{}"; do sleep 5; done; ' kibana: @@ -69,7 +72,7 @@ services: - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=fhjskloppd678ehkdfdlliverpoolfcr - SERVER_PUBLICBASEURL=http://127.0.0.1:5601 healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status | grep -q 'available'"] + test: ["CMD-SHELL", "curl --max-time 1 -s http://localhost:5601/api/status | grep -q 'available'"] retries: 300 interval: 1s diff --git a/example-apps/chatbot-rag-app/Dockerfile b/example-apps/chatbot-rag-app/Dockerfile index 210144346..4cfd50091 100644 --- a/example-apps/chatbot-rag-app/Dockerfile +++ b/example-apps/chatbot-rag-app/Dockerfile @@ -5,9 +5,7 @@ COPY frontend ./frontend RUN cd frontend && yarn install RUN cd frontend && REACT_APP_API_HOST=/api yarn build -# langchain and vertexai depend on a large number of system packages including -# linux-headers, g++, geos, geos-dev, rust and cargo. These are already present -# on -slim and adding them to -alpine results in a larger image than -slim. +# Use glibc-based image to get pre-compiled wheels for grpcio and tiktoken FROM python:3.12-slim WORKDIR /app @@ -27,10 +25,7 @@ EXPOSE 4000 # docker invocations to reenable. ENV OTEL_SDK_DISABLED=true -# https://github.com/elastic/genai-instrumentation/issues/255 -# Currently Python SDK has a bug that spams logs when opentelemetry-instrument is used -# with SDK being disabled. Until it is fixed, we handle it in our own entrypoint by -# avoiding opentelemetry-instrument when SDK is disabled. +# TODO remove custom entrypoint when EDOT Python >0.7.0 is released. RUN echo 'if [ "${OTEL_SDK_DISABLED:-true}" == "false" ]; \ then \ opentelemetry-instrument $@; \ @@ -38,4 +33,4 @@ RUN echo 'if [ "${OTEL_SDK_DISABLED:-true}" == "false" ]; \ exec $@; \ fi' > entrypoint.sh ENTRYPOINT [ "bash", "-eu", "./entrypoint.sh" ] -CMD [ "python", "api/app.py"] +CMD [ "python", "api/app.py" ] diff --git a/example-apps/chatbot-rag-app/README.md b/example-apps/chatbot-rag-app/README.md index 869a94c7e..3579ee5a9 100644 --- a/example-apps/chatbot-rag-app/README.md +++ b/example-apps/chatbot-rag-app/README.md @@ -22,8 +22,8 @@ Copy [env.example](env.example) to `.env` and fill in values noted inside. ## Installing and connecting to Elasticsearch There are a number of ways to install Elasticsearch. Cloud is best for most -use-cases. We also have [docker-compose-elastic.yml](../../docker), that starts -Elasticsearch, Kibana, and APM Server on your laptop with one command. +use-cases. We also have [docker-compose-elastic.yml][docker-compose-elastic], +that starts Elasticsearch, Kibana, and APM Server on your laptop in one step. Once you decided your approach, edit your `.env` file accordingly. @@ -71,6 +71,68 @@ Clean up when finished, like this: docker compose down ``` +### Run with Kubernetes + +Kubernetes is more complicated than Docker, but closer to the production +experience for many users. [k8s-manifest.yml](k8s-manifest.yml) creates the +same services, but needs additional configuration first. + +First step is to setup your environment. [env.example](env.example) must be +copied to a file name `.env` and updated with `ELASTICSEARCH_URL` and +`OTEL_EXPORTER_OTLP_ENDPOINT` values visible to you Kubernetes deployment. + +For example, if you started your Elastic Stack with [k8s-manifest-elastic.yml][k8s-manifest-elastic], +you would update these values: +``` +ELASTICSEARCH_URL=http://elasticsearch:9200 +OTEL_EXPORTER_OTLP_ENDPOINT=http://apm-server:8200 +``` + +Then, import your `.env` file as a configmap like this: +```bash +kubectl create configmap chatbot-rag-app-env --from-env-file=.env +``` + +
+To use Vertex AI, set `LLM_TYPE=vertex` in your `.env` and follow these steps +The `api-frontend container` needs access to your Google Cloud credentials. +Share your `application_default_credentials.json` as a Kubernetes secret: +```bash +# Logs you into Google Cloud and creates application_default_credentials.json +gcloud auth application-default login +# Adds your credentials to a Kubernetes secret named gcloud-credentials +kubectl create secret generic gcloud-credentials \ + --from-file=application_default_credentials.json=$HOME/.config/gcloud/application_default_credentials.json +``` +
+ +Now that your configuration is applied, create the `chatbot-rag-app` deployment +and service by applying this manifest: +```bash +kubectl apply -f k8s-manifest.yml +``` + +Next, block until `chatbot-rag-app` is available. +```bash +kubectl wait --for=condition=available --timeout=20m deployment/chatbot-rag-app +``` + +*Note*: The first run may take several minutes to become available. Here's how +to follow logs on this stage: +```bash +kubectl logs deployment.apps/chatbot-rag-app -c create-index -f +``` + +Next, forward the web UI port: +```bash +kubectl port-forward deployment.apps/chatbot-rag-app 4000:4000 & +``` + +Clean up when finished, like this: +```bash +kubectl delete -f k8s-manifest.yml +``` + ### Run with Python If you want to run this example with Python, you need to do a few things listed @@ -196,3 +258,5 @@ docker compose up --build --force-recreate --- [loader-docs]: https://python.langchain.com/docs/how_to/#document-loaders [install-es]: https://www.elastic.co/search-labs/tutorials/install-elasticsearch +[docker-compose-elastic]: ../../docker/docker-compose-elastic.yml +[k8s-manifest-elastic]: ../../k8s/k8s-manifest-elastic.yml diff --git a/example-apps/chatbot-rag-app/env.example b/example-apps/chatbot-rag-app/env.example index 0cd2fb75f..1902064e6 100644 --- a/example-apps/chatbot-rag-app/env.example +++ b/example-apps/chatbot-rag-app/env.example @@ -6,6 +6,10 @@ FLASK_APP=api/app.py PYTHONUNBUFFERED=1 # How you connect to Elasticsearch: change details to your instance +# This defaults to a Elastic Stack accessible via localhost. +# +# When running inside Kubernetes, set to http://elasticsearch.default.svc:9200 +# or similar. ELASTICSEARCH_URL=http://localhost:9200 ELASTICSEARCH_USER=elastic ELASTICSEARCH_PASSWORD=elastic @@ -68,7 +72,11 @@ OTEL_SDK_DISABLED=true # Assign the service name that shows up in Kibana OTEL_SERVICE_NAME=chatbot-rag-app -# Default to send traces to the Elastic APM server +# Default to send logs, traces and metrics to an Elastic APM server accessible +# via localhost. +# +# When running inside Kubernetes, set to http://elasticsearch.default.svc:9200 +# or similar. OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8200 OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf diff --git a/example-apps/chatbot-rag-app/k8s-manifest.yml b/example-apps/chatbot-rag-app/k8s-manifest.yml new file mode 100644 index 000000000..34c46a883 --- /dev/null +++ b/example-apps/chatbot-rag-app/k8s-manifest.yml @@ -0,0 +1,58 @@ +--- +# chatbot-rag-app deploys "create-index" to install ELSER and load values. +# Then, it starts "api-frontend" to serve the application. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chatbot-rag-app +spec: + replicas: 1 + selector: + matchLabels: + app: chatbot-rag-app + template: + metadata: + labels: + app: chatbot-rag-app + spec: + # For `LLM_TYPE=vertex`: create a volume for application_default_credentials.json + volumes: + - name: gcloud-credentials + secret: + secretName: gcloud-credentials + optional: true # only read when `LLM_TYPE=vertex` + initContainers: + - name: create-index + image: &image ghcr.io/elastic/elasticsearch-labs/chatbot-rag-app:latest + command: &command [ "bash", "-eu", "./entrypoint.sh" ] # match image + args: [ "flask", "create-index" ] + # This recreates your configmap based on your .env file: + # kubectl create configmap chatbot-rag-app-env --from-env-file=.env + envFrom: &envFrom + - configMapRef: + name: chatbot-rag-app-env + containers: + - name: api-frontend + image: *image + command: *command + args: [ "python", "api/app.py" ] + ports: + - containerPort: 4000 + envFrom: *envFrom + # For `LLM_TYPE=vertex`: mount credentials to the path read by the google-cloud-sdk + volumeMounts: + - name: gcloud-credentials + mountPath: /root/.config/gcloud + readOnly: true +--- +apiVersion: v1 +kind: Service +metadata: + name: api +spec: + selector: + app: chatbot-rag-app + ports: + - protocol: TCP + port: 4000 + targetPort: 4000 diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 000000000..93957a8e5 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,47 @@ +# Running your own Elastic Stack with Kubernetes + +If you'd like to start Elastic with Kubernetes, you can use the provided +[manifest-elastic.yml](manifest-elastic.yml) file. This starts +Elasticsearch, Kibana, and APM Server in an existing Kubernetes cluster. + +Note: If you haven't checked out this repository, all you need is one file: +```bash +wget https://raw.githubusercontent.com/elastic/elasticsearch-labs/refs/heads/main/k8s/k8s-manifest-elastic.yml +``` + +Before you begin, ensure you have free CPU and memory in your cluster. If you +plan to use ELSER, assume a minimum of 8 cpus and 6GB memory for the containers +in this manifest. + +First, start this Elastic Stack in the background: +```bash +kubectl apply -f k8s-manifest-elastic.yml +``` + +**Note**: For simplicity, this adds an Elastic Stack to the default namespace. +Commands after here are simpler due to this. If you want to choose a different +one, use `kubectl`'s `--namespace` flag! + +Next, block until the whole stack is available. First install or changing the +Elastic Stack version can take a long time due to image pulling. +```bash +kubectl wait --for=condition=available --timeout=10m \ + deployment/elasticsearch \ + deployment/kibana \ + deployment/apm-server +``` + +Next, forward the kibana port: +```bash +kubectl port-forward service/kibana 5601:5601 & +``` + +Finally, you can view Kibana at http://localhost:5601/app/home#/ + +If asked for a username and password, use username: elastic and password: elastic. + +Clean up when finished, like this: + +```bash +kubectl delete -f k8s-manifest-elastic.yml +``` diff --git a/k8s/k8s-manifest-elastic.yml b/k8s/k8s-manifest-elastic.yml new file mode 100644 index 000000000..41fb16879 --- /dev/null +++ b/k8s/k8s-manifest-elastic.yml @@ -0,0 +1,212 @@ +# This is a simple k8s manifest to start Elasticsearch, Kibana and APM server +# with the same configuration as ../docker/docker-compose-elastic.yml +# +# For this reason, if trying to understand why a setting exists, look at the +# docker variant first. Similarly, updates to the docker variant should happen +# here as well. + +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch +spec: + ports: + - port: 9200 + targetPort: 9200 + selector: + app: elasticsearch + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: elasticsearch +spec: + replicas: 1 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + ports: + - containerPort: 9200 + env: + - name: node.name + value: elasticsearch + - name: cluster.name + value: k8s-cluster + - name: discovery.type + value: single-node + - name: ELASTIC_PASSWORD + value: elastic + - name: bootstrap.memory_lock + value: "true" + - name: xpack.security.enabled + value: "true" + - name: xpack.security.http.ssl.enabled + value: "false" + - name: xpack.security.transport.ssl.enabled + value: "false" + - name: xpack.license.self_generated.type + value: trial + # Note that ELSER is recommended to have 2GB, but it is JNI (PyTorch). + # ELSER's memory is in addition to the heap and other overhead. + - name: ES_JAVA_OPTS + value: "-Xms2g -Xmx2g" + securityContext: + capabilities: + add: ["CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"] + drop: ["ALL"] + readinessProbe: + exec: + command: ["sh", "-c", "curl --max-time 1 -s http://localhost:9200 | grep -q 'missing authentication credentials'"] + initialDelaySeconds: 5 + periodSeconds: 1 + timeoutSeconds: 10 + failureThreshold: 120 + +--- +apiVersion: v1 +kind: Service +metadata: + name: kibana +spec: + ports: + - port: 5601 + targetPort: 5601 + selector: + app: kibana + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + initContainers: + # gen-ai assistants in kibana save state in a way that requires system + # access, so set kibana_system's password to a known value. + - name: setup-kibana-system-user + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + command: + - bash + - -c + - | + echo "Setup the kibana_system password"; + until curl --max-time 1 -s -u "elastic:elastic" \ + -X POST http://elasticsearch.default.svc:9200/_security/user/kibana_system/_password \ + -d "{\"password\":\"elastic\"}" \ + -H "Content-Type: application/json" | grep -q "^{}"; do sleep 5; done; + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.17.2 + ports: + - containerPort: 5601 + env: + - name: SERVERNAME + value: kibana + - name: ELASTICSEARCH_HOSTS + value: http://elasticsearch.default.svc:9200 + - name: ELASTICSEARCH_USERNAME + value: kibana_system + - name: ELASTICSEARCH_PASSWORD + value: elastic + - name: MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLED + value: "true" + - name: XPACK_SECURITY_ENCRYPTIONKEY + value: fhjskloppd678ehkdfdlliverpoolfcr + - name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY + value: fhjskloppd678ehkdfdlliverpoolfcr + - name: SERVER_HOST + value: 0.0.0.0 + - name: SERVER_PUBLICBASEURL + value: http://127.0.0.1:5601 + readinessProbe: + exec: + command: ["sh", "-c", "curl --max-time 1 -s http://localhost:5601/api/status | grep -q 'available'"] + initialDelaySeconds: 1 + periodSeconds: 1 + failureThreshold: 300 + +--- +apiVersion: v1 +kind: Service +metadata: + name: apm-server +spec: + ports: + - port: 8200 + targetPort: 8200 + selector: + app: apm-server + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: apm-server +spec: + replicas: 1 + selector: + matchLabels: + app: apm-server + template: + metadata: + labels: + app: apm-server + spec: + initContainers: + - name: await-kibana + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + command: + - bash + - -xc + - | + echo "Waiting for kibana to be available"; + until curl --max-time 1 -s http://kibana.default.svc:5601/api/status | grep -q 'available'; do sleep 1; done; + containers: + - name: apm-server + image: docker.elastic.co/apm/apm-server:8.17.2 + command: + - apm-server + - -E + - apm-server.kibana.enabled=true + - -E + - apm-server.kibana.host=http://kibana.default.svc:5601 + - -E + - apm-server.kibana.username=elastic + - -E + - apm-server.kibana.password=elastic + - -E + - output.elasticsearch.hosts=["http://elasticsearch.default.svc:9200"] + - -E + - output.elasticsearch.username=elastic + - -E + - output.elasticsearch.password=elastic + ports: + - containerPort: 8200 + readinessProbe: + tcpSocket: + port: 8200 + initialDelaySeconds: 1 + periodSeconds: 1 + failureThreshold: 300 + securityContext: + capabilities: + add: ["CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"] + drop: ["ALL"]