-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
460 lines (437 loc) · 18.3 KB
/
docker-compose.yaml
File metadata and controls
460 lines (437 loc) · 18.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Basic Airflow cluster configuration for CeleryExecutor with Redis and PostgreSQL.
#
# This configuration supports basic configuration using environment variables or an .env file
---
name: alphakraken-${ENV_NAME:?error}
x-airflow-common: &airflow-common
build:
context: .
dockerfile: airflow_src/Dockerfile
# User ID in Airflow containers
user: "${AIRFLOW_UID:?error}:0"
environment: &airflow-common-env
AIRFLOW__CORE__EXECUTOR: CeleryExecutor
# the next three lines define the connection to postgres & redis
# make sure the passwords don't contain weird characters like '/' or '#'
AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://${POSTGRES_USER:?error}:${POSTGRES_PASSWORD:?error}@${POSTGRES_HOST:?error}:${POSTGRES_PORT:?error}/${POSTGRES_DB:?error}
AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://${POSTGRES_USER:?error}:${POSTGRES_PASSWORD:?error}@${POSTGRES_HOST:?error}:${POSTGRES_PORT:?error}/${POSTGRES_DB:?error}
AIRFLOW__CELERY__BROKER_URL: redis://:@${REDIS_HOST:?error}:${REDIS_PORT:?error}/0
AIRFLOW__CORE__FERNET_KEY: ""
AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: "true"
AIRFLOW__CORE__LOAD_EXAMPLES: "false"
AIRFLOW__API__AUTH_BACKENDS: "airflow.api.auth.backend.basic_auth,airflow.api.auth.backend.session"
AIRFLOW__SCHEDULER__MIN_FILE_PROCESS_INTERVAL: 300 # reduce this number locally to get DAG changes faster
AIRFLOW__CORE__TEST_CONNECTION: "Enabled" # to allow testing connections via UI
# yamllint disable rule:line-length
# Use simple http server on scheduler for health checks
# See https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/check-health.html#scheduler-health-check-server
# yamllint enable rule:line-length
AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: "true"
# The following line can be used to set a custom config file, stored in the local config folder
# If you want to use it, outcomment it and replace airflow.cfg with the name of your config file
# AIRFLOW_CONFIG: '/opt/airflow/config/airflow.cfg'
#
# environmental variables to be taken from the *.env file and passed to the containers at run time:
ENV_NAME: ${ENV_NAME:?error}
MONGO_HOST: ${MONGO_HOST:?error}
MONGO_PORT: ${MONGO_PORT:?error}
volumes:
# all mounts (backup pool folders, results pool folders, instruments, logs) need to be available in $MOUNTS_PATH
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
# # Uncomment for local development so that changes to DAGs and Tasks are immediately active
# # Note that this part is defined another time for the airflow-worker-test1
#- ./airflow_src/dags:/opt/airflow/dags
#- ./airflow_src/config:/opt/airflow/config
#- ./airflow_src/plugins:/opt/airflow/plugins
#- ./shared:/opt/airflow/shared
x-airflow-worker:
# This is the configuration for the production workers. For the test worker, the volume mapping is overwritten.
&airflow-worker
<<: *airflow-common
profiles: ["workers"]
healthcheck:
# yamllint disable rule:line-length
test:
- "CMD-SHELL"
- 'celery --app airflow.providers.celery.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}" || celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"'
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
environment:
<<: *airflow-common-env
# Required to handle warm shutdown of the celery workers properly
# See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation
DUMB_INIT_SETSID: "0"
AIRFLOW__CELERY__WORKER_CONCURRENCY: 32
MONGO_USER: ${MONGO_USER_READWRITE:?error}
MONGO_PASSWORD: ${MONGO_PASSWORD_READWRITE:?error}
restart: always
services:
postgres-service:
profiles: ["local", "dbs"]
image: postgres:13
# the default value of 100 connections yielded 'psycopg2.OperationalError: FATAL: sorry, too many clients already'
# and tasks sticking in 'scheduled' state for a long time
# cf. https://stackoverflow.com/questions/51487740/airflow-psycopg2-operationalerror-fatal-sorry-too-many-clients-already
command: -c 'max_connections=500'
ports:
- 5432:5432
environment:
POSTGRES_USER: ${POSTGRES_USER:?error}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?error}
POSTGRES_DB: ${POSTGRES_DB:?error}
volumes:
- ./airflowdb_data_${ENV_NAME:?error}:/var/lib/postgresql/data
healthcheck:
test: ["CMD", "pg_isready", "-U", "airflow"]
interval: 10s
retries: 5
start_period: 5s
restart: always
redis-service:
profiles: ["local", "dbs"]
# Redis is limited to 7.2-bookworm due to licencing change
# https://redis.io/blog/redis-adopts-dual-source-available-licensing/
image: redis:7.2-bookworm
ports:
- 6379:6379
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 30s
retries: 50
start_period: 30s
restart: always
airflow-webserver:
profiles: ["local", "infrastructure"]
<<: *airflow-common
command: webserver
ports:
- ${WEBSERVER_PORT:?error}:8080
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
airflow-scheduler:
profiles: ["local", "infrastructure"]
<<: *airflow-common
command: scheduler
# More replicas, quicker scheduling and more concurrently running tasks. Keep in mind max_connections of airflow DB.
deploy:
replicas: 2
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8974/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
# only required if one wants to use 'deferrable operators'
# cf. https://airflow.apache.org/docs/apache-airflow/stable/authoring-and-scheduling/deferring.html#using-deferrable-operators
# airflow-triggerer:
# <<: *airflow-common
# command: triggerer
# healthcheck:
# test: ["CMD-SHELL", 'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"']
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 30s
# restart: always
airflow-init:
profiles: ["airflow-init"]
<<: *airflow-common
entrypoint: /bin/bash
# yamllint disable rule:line-length
command:
- -c
- |
if [[ -z "${AIRFLOW_UID}" ]]; then
echo
echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m"
echo "If you are on Linux, you SHOULD follow the instructions below to set "
echo "AIRFLOW_UID environment variable, otherwise files will be owned by root."
echo "For other operating systems you can get rid of the warning with manually created .env file:"
echo " See: https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#setting-the-right-airflow-user"
echo
fi
one_meg=1048576
mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg))
cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat)
disk_available=$$(df / | tail -1 | awk '{print $$4}')
warning_resources="false"
if (( mem_available < 4000 )) ; then
echo
echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m"
echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))"
echo
warning_resources="true"
fi
if (( cpus_available < 2 )); then
echo
echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m"
echo "At least 2 CPUs recommended. You have $${cpus_available}"
echo
warning_resources="true"
fi
if (( disk_available < one_meg * 10 )); then
echo
echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m"
echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))"
echo
warning_resources="true"
fi
if [[ $${warning_resources} == "true" ]]; then
echo
echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m"
echo "Please follow the instructions to increase amount of resources available:"
echo " https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#before-you-begin"
echo
fi
mkdir -p /sources/logs /sources/dags /sources/plugins
chown -R "${AIRFLOW_UID}:0" /sources/{logs,dags,plugins}
exec /entrypoint airflow version
# yamllint enable rule:line-length
environment:
<<: *airflow-common-env
_AIRFLOW_DB_MIGRATE: "true"
_AIRFLOW_WWW_USER_CREATE: "true"
_AIRFLOW_WWW_USER_USERNAME: ${AIRFLOW_USER:?error}
_AIRFLOW_WWW_USER_PASSWORD: ${AIRFLOW_PASSWORD:?error}
user: "0:0"
volumes:
- ./airflow_src:/sources
depends_on:
postgres-service:
condition: service_healthy
airflow-cli:
# allows to run a cli using `./compose.sh run airflow-cli bash`
<<: *airflow-common
profiles: ["debug"]
environment:
<<: *airflow-common-env
CONNECTION_CHECK_MAX_COUNT: "0"
# Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252
command:
- bash
- -c
- airflow
# Flower enables monitoring the environment (e.g. the workers). Currently not used.
# You can enable flower by adding "--profile flower" option e.g. docker compose--profile flower up
# or by explicitly targeted on the command line e.g. docker compose up flower.
# See: https://docs.docker.com/compose/profiles/
# flower:
# <<: *airflow-common
# command: celery flower
# profiles: ["flower"]
# ports:
# - "5555:5555"
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:5555/"]
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 30s
# restart: always
mongodb-service:
profiles: ["local", "dbs"]
# cf. https://www.mongodb.com/resources/products/compatibilities/docker
image: mongo:6-jammy
ports:
- ${MONGO_PORT:?error}:27017
volumes:
- ./mongodb_data_${ENV_NAME:?error}:/data/db
# init the DB: https://stackoverflow.com/a/53522699
# Note that this could also be done via python, cf. https://github.com/MannLabs/alphakraken/pull/6#discussion_r1624957906
- ./misc/init-mongo.sh:/docker-entrypoint-initdb.d/init-mongo.sh
environment:
<<: *airflow-common-env
# these env vars are required for the init scrip to work
MONGO_USER_READ: ${MONGO_USER_READ:?error}
MONGO_PASSWORD_READ: ${MONGO_PASSWORD_READ:?error}
MONGO_USER_READWRITE: ${MONGO_USER_READWRITE:?error}
MONGO_PASSWORD_READWRITE: ${MONGO_PASSWORD_READWRITE:?error}
MONGO_USER_WEBAPP: ${MONGO_USER_WEBAPP:?error}
MONGO_PASSWORD_WEBAPP: ${MONGO_PASSWORD_WEBAPP:?error}
env_file:
- ./envs/.env-mongo
healthcheck:
test:
["CMD", "mongosh", "--eval", '''db.runCommand("ping").ok''', "--quiet"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
webapp:
profiles: ["local", "infrastructure"]
build:
context: .
dockerfile: webapp/Dockerfile
deploy:
replicas: 1 # if more than 1, set WEBAPP_PORT to a range, e.g. WEBAPP_PORT=8501-8503 (and make sure nginx is configured to handle this)
resources:
limits:
memory: 4G # assuming 8 GB total on the VM that is shared with the other non-worker services
ports:
- ${WEBAPP_PORT:?error}:8501
environment:
<<: *airflow-common-env
MONGO_USER: ${MONGO_USER_WEBAPP:?error}
MONGO_PASSWORD: ${MONGO_PASSWORD_WEBAPP:?error}
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8501/healthz"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
volumes:
- ./webapp_logs:/app/logs
# Uncomment for local development so that changes to webapp are immediately active
# - ./webapp:/app/webapp
# - ./shared:/app/webapp/shared
nginx:
image: nginx:latest
profiles: ["nginx"]
ports:
- "80:80"
- "8501:8501" # for serving webapp on streamlit port
- "443:443"
- "8080:8080" # for serving Airflow webserver
- "8089:8089" # for serving MCP server
volumes:
- ./misc/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx_logs:/var/log/nginx/
# - ./certs:/etc/nginx/certs:ro # Optional: for SSL certificates
restart: always
monitoring:
build:
context: .
dockerfile: monitoring/Dockerfile
profiles: ["infrastructure", "workers", "dbs"] # have monitoring running on all machines (downsides: potential alerts duplication)
environment:
<<: *airflow-common-env
MONGO_USER: ${MONGO_USER_READ:?error}
MONGO_PASSWORD: ${MONGO_PASSWORD_READ:?error}
restart: always
mcp-server:
profiles: ["local", "infrastructure"]
build:
context: .
dockerfile: mcp_server/Dockerfile
ports:
- ${MCP_PORT:-8089}:8089
environment:
<<: *airflow-common-env
MONGO_USER: ${MONGO_USER_READ:?error}
MONGO_PASSWORD: ${MONGO_PASSWORD_READ:?error}
MCP_TRANSPORT: streamable-http
MCP_PORT: 8089
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8089/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
# healthcheck:
# test: ["CMD", "python", "-c", "..."] # TODO add appropriate health check
# interval: 30s
# timeout: 10s
# retries: 3
# test worker for local testing
airflow-worker-test1:
profiles: ["local"]
<<: *airflow-worker
# "-q <XXX>" makes this worker handle only DAGs that reference queue_name=<XXX> during DAG instantiation
command: celery worker -q kraken_queue_test1
volumes:
# all mounts (backup pool folders, results pool folders, instruments) need to be available in $MOUNTS_PATH .
# the following four mounts are required for a worker:
# to write the airflow logs:
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
# to read the output for metrics calculation:
- ${MOUNTS_PATH:?error}/output:/opt/airflow/mounts/output:ro
# to read the raw file data from the instrument (for copying):
- ${MOUNTS_PATH:?error}/instruments/test1:/opt/airflow/mounts/instruments/test1:ro
# to write the raw file data to the backup pool:
- ${MOUNTS_PATH:?error}/backup/test1:/opt/airflow/mounts/backup/test1:rw
# for the test1 worker, changes to DAGs and Tasks are immediately active
- ./airflow_src/dags:/opt/airflow/dags
- ./airflow_src/config:/opt/airflow/config
- ./airflow_src/plugins:/opt/airflow/plugins
- ./shared:/opt/airflow/shared
########################################## FILE MOVER/REMOVER WORKERS ##########################################
# file mover: moves files from instrument to instrument Backup folder
airflow-worker-file-mover:
profiles: ["local", "workers"]
<<: *airflow-worker
command: celery worker -q kraken_queue_file_mover
volumes:
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
# to write (move) from instrument to instrument Backup folder:
- ${MOUNTS_PATH:?error}/instruments:/opt/airflow/mounts/instruments:rw
# file remover: removes files from instrument Backup folder after comparing to pool Backup folder
airflow-worker-file-remover:
profiles: ["local", "workers"]
<<: *airflow-worker
command: celery worker -q kraken_queue_file_remover
volumes:
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
# to read from pool backup (for file comparison):
- ${MOUNTS_PATH:?error}/backup:/opt/airflow/mounts/backup:ro
# to write (remove) from instrument Backup folder: (this could be limited to the 'Backup' folders but then requires one entry per instrument)
- ${MOUNTS_PATH:?error}/instruments:/opt/airflow/mounts/instruments:rw
# s3 uploader: uploads files to s3
airflow-worker-s3-uploader:
profiles: ["s3"]
<<: *airflow-worker
command: celery worker -q kraken_queue_s3_uploader
volumes:
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
# to read files to upload
- ${MOUNTS_PATH:?error}/backup:/opt/airflow/mounts/backup:ro
########################################## INSTRUMENT WORKERS ##########################################
# cf. comments in airflow-worker-test1
# INSTRUMENT: Test2
airflow-worker-test2:
profiles: ["local"]
<<: *airflow-worker
command: celery worker -q kraken_queue_test2
volumes:
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
- ${MOUNTS_PATH:?error}/output:/opt/airflow/mounts/output:ro
- ${MOUNTS_PATH:?error}/instruments/test2:/opt/airflow/mounts/instruments/test2:ro
- ${MOUNTS_PATH:?error}/backup/test2:/opt/airflow/mounts/backup/test2:rw
# INSTRUMENT: Test3
airflow-worker-test3:
profiles: ["local"]
<<: *airflow-worker
command: celery worker -q kraken_queue_test3
volumes:
- ${MOUNTS_PATH:?error}/airflow_logs:/opt/airflow/logs:rw
- ${MOUNTS_PATH:?error}/output:/opt/airflow/mounts/output:ro
- ${MOUNTS_PATH:?error}/instruments/test3:/opt/airflow/mounts/instruments/test3:ro
- ${MOUNTS_PATH:?error}/backup/test3:/opt/airflow/mounts/backup/test3:rw