-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdocker-compose.template.j2
More file actions
102 lines (101 loc) · 2.78 KB
/
docker-compose.template.j2
File metadata and controls
102 lines (101 loc) · 2.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
services:
# FastAPI service
app:
build:
context: .
dockerfile: Dockerfile.final
args:
USER_UID: ${USER_UID}
USER_GID: ${USER_GID}
labels:
autoheal-label: true
command: /bin/sh -c "python scripts/milvus_credentials_setup.py && uvicorn app.main:app --host 0.0.0.0 --port 80"
env_file:
- .env.app
environment:
- USE_GPU={{ USE_GPU }}
- MILVUS__URI=http://milvus-standalone:19530
- MILVUS__USER=${MILVUS_AIOD_USER}
- MILVUS__PASS=${MILVUS_AIOD_PASS}
- METADATA_FILTERING__ENABLED={{ USE_LLM }}
- MILVUS_NEW_ROOT_PASS=${MILVUS_NEW_ROOT_PASS}
- MONGO__HOST=mongo
- MONGO__PORT=27017
- MONGO__USER=${MONGO_USER}
- MONGO__PASSWORD=${MONGO_PASSWORD}
- MONGO__DBNAME=aiod
- CHATBOT__USE_CHATBOT={{ USE_CHATBOT }}
- AIOD__JSON_SAVEPATH=/cold_data
{% if USE_LLM == "true" %}
- OLLAMA__URI=http://ollama:11434
{% endif %}
ports:
- "${APP_HOST_PORT:-8000}:80"
depends_on:
milvus-standalone:
condition: service_healthy
restart: false
mongo:
condition: service_healthy
restart: false
{% if USE_LLM == "true" %}
ollama:
condition: service_healthy
restart: false
{% endif %}
volumes:
- ${DATA_DIRPATH}/model:/model
- ${DATA_DIRPATH}/cold_data:/cold_data
healthcheck:
# TODO GPU specific healthcheck shouldn't be necessary once we resolve issue:
# https://github.com/aiondemand/aiod-enhanced-interaction/issues/76
{% if USE_GPU == "true" %}
test: ["CMD-SHELL", "curl -f http://localhost:80/health && nvidia-smi || exit 1"]
{% else %}
test: ["CMD-SHELL", "curl -f http://localhost:80/health"]
{% endif %}
interval: 30s
timeout: 20s
retries: 3
start_period: 120s
restart: always
{% if USE_GPU == "true" %}
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
{% endif %}
{% if USE_LLM == "true" %}
#Ollama service
ollama:
image: ollama/ollama:0.11.0
labels:
autoheal-label: true
restart: always
healthcheck:
# TODO Same as above
{% if USE_GPU == "true" %}
test: ["CMD-SHELL", "ollama && nvidia-smi || exit 1"]
{% else %}
test: ["CMD", "ollama"]
{% endif %}
interval: 30s
timeout: 20s
retries: 3
volumes:
- ${DATA_DIRPATH}/ollama:/root/.ollama/
ports:
- "${OLLAMA_HOST_PORT:-11434}:11434"
{% if USE_GPU == "true" %}
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
{% endif %}
{% endif %}