@@ -10,9 +10,9 @@ ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER ?=
10
10
LLAMA_STACK_PORT ?= 8321
11
11
LOCAL_DB_PATH ?= .
12
12
CONTAINER_DB_PATH ?= /.llama/data/distributions/ansible-chatbot
13
- # quay.io/ansible/aap-rag-content:latest does not work with lightspeed-stack :latest
14
- # aap-rag-content uses llama-stack:0.2.14 whereas lightspeed-stack:latest uses 0.2.13.
15
- RAG_CONTENT_IMAGE ?= quay.io/ ansible/aap-rag-content:1.0.1751985495
13
+ RAG_CONTENT_IMAGE ?= quay.io/ansible/aap-rag-content:latest
14
+ LIGHTSPEED_STACK_CONFIG ?= lightspeed-stack.yaml
15
+ LLAMA_STACK_RUN_CONFIG ?= ansible-chatbot-run.yaml
16
16
# Colors for terminal output
17
17
RED := \033[0;31m
18
18
NC := \033[0m # No Color
@@ -23,6 +23,8 @@ NC := \033[0m # No Color
23
23
24
24
UV_HTTP_TIMEOUT =120
25
25
26
+ PLATFORM ?= "linux/amd64"
27
+
26
28
help :
27
29
@echo " Makefile for Ansible Chatbot Stack"
28
30
@echo " Available targets:"
@@ -61,7 +63,7 @@ setup-vector-db:
61
63
@echo " Setting up vector db and embedding image..."
62
64
rm -rf ./vector_db ./embeddings_model
63
65
mkdir -p ./vector_db
64
- docker run -d --rm --name rag-content $(RAG_CONTENT_IMAGE ) sleep infinity
66
+ docker run --platform $( PLATFORM ) - d --rm --name rag-content $(RAG_CONTENT_IMAGE ) sleep infinity
65
67
docker cp rag-content:/rag/llama_stack_vector_db/faiss_store.db.gz ./vector_db/aap_faiss_store.db.gz
66
68
docker cp rag-content:/rag/embeddings_model .
67
69
docker kill rag-content
@@ -76,8 +78,9 @@ check-env-build:
76
78
77
79
build : check-env-build
78
80
@echo " Building customized Ansible Chatbot Stack image from lightspeed-core/lightspeed-stack..."
79
- docker build -f ./Containerfile \
81
+ docker build --platform $( PLATFORM ) - f ./Containerfile \
80
82
--build-arg ANSIBLE_CHATBOT_VERSION=$(ANSIBLE_CHATBOT_VERSION ) \
83
+ --build-arg LLAMA_STACK_RUN_CONFIG=$(LLAMA_STACK_RUN_CONFIG ) \
81
84
-t ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION ) .
82
85
@printf " Custom image $( RED) ansible-chatbot-stack:$( ANSIBLE_CHATBOT_VERSION) $( NC) built successfully.\n"
83
86
@@ -104,10 +107,11 @@ run: check-env-run
104
107
@echo " Running Ansible Chatbot Stack container..."
105
108
@echo " Using vLLM URL: $( ANSIBLE_CHATBOT_VLLM_URL) "
106
109
@echo " Using inference model: $( ANSIBLE_CHATBOT_INFERENCE_MODEL) "
107
- docker run --security-opt label=disable -it -p $(LLAMA_STACK_PORT ) :8080 \
110
+ docker run --platform $( PLATFORM ) -- security-opt label=disable -it -p $(LLAMA_STACK_PORT ) :8080 \
108
111
-v ./embeddings_model:/.llama/data/embeddings_model \
109
112
-v ./vector_db/aap_faiss_store.db:$(CONTAINER_DB_PATH ) /aap_faiss_store.db \
110
- -v ./lightspeed-stack.yaml:/.llama/data/lightspeed-stack.yaml \
113
+ -v ./$(LIGHTSPEED_STACK_CONFIG ) :/.llama/distributions/ansible-chatbot/config/lightspeed-stack.yaml \
114
+ -v ./$(LLAMA_STACK_RUN_CONFIG ) :/.llama/distributions/llama-stack/config/ansible-chatbot-run.yaml \
111
115
-v ./ansible-chatbot-system-prompt.txt:/.llama/distributions/ansible-chatbot/system-prompts/default.txt \
112
116
--env VLLM_URL=$(ANSIBLE_CHATBOT_VLLM_URL ) \
113
117
--env VLLM_API_TOKEN=$(ANSIBLE_CHATBOT_VLLM_API_TOKEN ) \
@@ -140,11 +144,13 @@ run-local-db: check-env-run-local-db
140
144
@echo " Using inference model: $( ANSIBLE_CHATBOT_INFERENCE_MODEL) "
141
145
@echo " Using inference model for tools filtering : $( ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER) "
142
146
@echo " Mapping local DB from $( LOCAL_DB_PATH) to $( CONTAINER_DB_PATH) "
143
- docker run --security-opt label=disable -it -p $(LLAMA_STACK_PORT ) :8080 \
147
+ docker run --platform $( PLATFORM ) -- security-opt label=disable -it -p $(LLAMA_STACK_PORT ) :8080 \
144
148
-v $(LOCAL_DB_PATH ) :$(CONTAINER_DB_PATH ) \
145
149
-v ./embeddings_model:/app/embeddings_model \
146
150
-v ./vector_db/aap_faiss_store.db:$(CONTAINER_DB_PATH ) /aap_faiss_store.db \
147
- -v ./lightspeed-stack.yaml:/.llama/data/lightspeed-stack.yaml \
151
+ -v ./$(LIGHTSPEED_STACK_CONFIG ) :/.llama/distributions/ansible-chatbot/config/lightspeed-stack.yaml \
152
+ -v ./$(LLAMA_STACK_RUN_CONFIG ) :/.llama/distributions/llama-stack/config/ansible-chatbot-run.yaml \
153
+ -v ./ansible-chatbot-system-prompt.txt:/.llama/distributions/ansible-chatbot/system-prompts/default.txt \
148
154
--env VLLM_URL=$(ANSIBLE_CHATBOT_VLLM_URL ) \
149
155
--env VLLM_API_TOKEN=$(ANSIBLE_CHATBOT_VLLM_API_TOKEN ) \
150
156
--env INFERENCE_MODEL=$(ANSIBLE_CHATBOT_INFERENCE_MODEL ) \
0 commit comments