forked from hotosm/drone-tm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.sub.yaml
More file actions
193 lines (185 loc) · 5.27 KB
/
compose.sub.yaml
File metadata and controls
193 lines (185 loc) · 5.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
networks:
dtm-network:
name: dtm-network
ipam:
driver: default
config:
- subnet: 10.20.30.0/24
volumes:
db-data:
name: drone-tm-db-data-${GIT_BRANCH}
frontend-html:
name: drone-tm-frontend-html-${GIT_BRANCH}
certs:
name: drone-tm-certs-${GIT_BRANCH}
services:
proxy:
image: "bunkerity/bunkerweb-all-in-one:1.6.2"
depends_on:
backend:
condition: service_healthy
# Frontends must be built and available first
frontend:
condition: service_completed_successfully
volumes:
- certs:/data
- frontend-html:/var/www/html/${DOMAIN}:ro
environment:
# General
BUNKERWEB_INSTANCES: proxy:5000
LOG_LEVEL: notice
UI_WIZARD: no
USE_BUNKERNET: no
DISABLE_DEFAULT_SERVER: yes
USE_REDIS: no
# Avoid running ModSec rules on internal service calls
API_WHITELIST_IP: 127.0.0.0/8 10.20.30.0/24
WHITELIST_IP: 10.20.30.0/24
WHITELIST_URI: https://${DOMAIN}
MULTISITE: yes
USE_REVERSE_PROXY: yes
REVERSE_PROXY_INTERCEPT_ERRORS: no
ALLOWED_METHODS: OPTIONS|HEAD|GET|POST|PATCH|PUT|DELETE
USE_REAL_IP: yes
SERVE_FILES: yes
USE_BACKUP: no
USE_METRICS: no
# USE_ANTIBOT: yes
USE_LIMIT_CONN: yes
# S3 supports HTTP/1.1, increase from default 10 for multipart requests
# Multipart uploads + API polling can exceed defaults; increase headroom
LIMIT_CONN_MAX_HTTP1: 50
# BAD_BEHAVIOUR disabled as it's difficult to work with...
USE_BAD_BEHAVIOR: no
USE_LIMIT_REQ: yes
# Default 2r/s --> 20r/s for simultaneous S3 multipart uploads
# Global default (all URLs). Keep moderate and override the hot upload endpoints below.
LIMIT_REQ_RATE: 5r/s
# Uppy AwsS3 multipart signing is bursty: one request per part (and parts can be pipelined),
# so these endpoint need a much higher per-IP budget to avoid 429 during legitimate uploads
LIMIT_REQ_URL_1: /api/projects/sign-part-upload/
LIMIT_REQ_RATE_1: 50r/s
LIMIT_REQ_URL_2: /api/projects/initiate-multipart-upload/
LIMIT_REQ_RATE_2: 20r/s
LIMIT_REQ_URL_3: /api/projects/complete-multipart-upload/
LIMIT_REQ_RATE_3: 15r/s
LIMIT_REQ_URL_4: /api/projects/list-parts/
LIMIT_REQ_RATE_4: 50r/s
LIMIT_REQ_URL_5: /api/projects/abort-multipart-upload/
LIMIT_REQ_RATE_5: 50r/s
USE_MODSECURITY: no
USE_GZIP: yes
# On client, brotli is preferred over gzip if both are enabled
USE_BROTLI: yes
AUTO_LETS_ENCRYPT: yes
EMAIL_LETS_ENCRYPT: ${CERT_EMAIL}
# USE_LETS_ENCRYPT_STAGING: yes
# Reverse proxy configs
SERVER_NAME: ${DOMAIN}
${DOMAIN}_REVERSE_PROXY_HOST: http://backend:8000
${DOMAIN}_REVERSE_PROXY_URL: /api
${DOMAIN}_ERRORS=404: /index.html
ports:
- 80:8080
- 443:8443
networks:
- dtm-network
restart: "unless-stopped"
backend:
image: ghcr.io/hotosm/drone-tm/backend:${GIT_BRANCH:-main}
restart: always
depends_on:
db:
condition: service_healthy
migrations:
condition: service_completed_successfully
volumes:
- frontend-html:/project/src/backend/frontend_html
env_file: .env
command:
[
"uvicorn",
"app.main:api",
"--host",
"0.0.0.0",
"--port",
"8000",
"--workers",
"1",
"--log-level",
"critical",
"--no-access-log",
]
networks:
- dtm-network
deploy:
replicas: ${BACKEND_REPLICAS:-4}
resources:
limits:
cpus: "0.9"
memory: 1500M
reservations:
cpus: "0.1"
memory: 100M
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/__lbheartbeat__"]
start_period: 60s
interval: 10s
timeout: 5s
retries: 10
frontend:
image: ghcr.io/hotosm/drone-tm/frontend:${GIT_BRANCH:-main}
build:
context: src/frontend
dockerfile: Dockerfile
target: prod
volumes:
- frontend-html:/frontend_html
network_mode: none
restart: "on-failure:2"
db:
image: postgis/postgis:16-3.4-alpine
volumes:
- db-data:/var/lib/postgresql/data
env_file: .env
networks:
- dtm-network
restart: unless-stopped
healthcheck:
test: pg_isready -U ${POSTGRES_USER:-dtm} -d ${POSTGRES_DB:-dtm_db}
start_period: 5s
interval: 10s
timeout: 5s
retries: 3
migrations:
image: ghcr.io/hotosm/drone-tm/backend:${GIT_BRANCH:-main}
depends_on:
db:
condition: service_healthy
env_file:
- .env
networks:
- dtm-network
command: ["alembic", "upgrade", "head"]
restart: "no"
arq-worker:
image: ghcr.io/hotosm/drone-tm/backend:${GIT_BRANCH:-main}
command: arq app.arq.tasks.WorkerSettings
depends_on:
dragonfly:
condition: service_healthy
env_file: .env
networks:
- dtm-network
restart: unless-stopped
healthcheck:
test: ["CMD", "arq", "app.arq.tasks.WorkerSettings", "--check"]
interval: 30s
timeout: 5s
retries: 2
start_period: 20s
dragonfly:
image: ghcr.io/dragonflydb/dragonfly:v1.36.0
networks:
- dtm-network
restart: unless-stopped