@@ -26,10 +26,10 @@ services:
2626 condition : service_healthy
2727 environment :
2828 - JAVA_TOOL_OPTIONS=-javaagent:/opentelemetry-javaagent.jar -XX:-UseContainerSupport
29- - TUMBLEBUG_URL=${TUMBLEBUG_URL:-http://cb-tumblebug :1323}
29+ - TUMBLEBUG_URL=${TUMBLEBUG_URL:-http://mc-infra-manager :1323}
3030 - TUMBLEBUG_ID=${TUMBLEBUG_ID:-default}
3131 - TUMBLEBUG_PW=${TUMBLEBUG_PW:-default}
32- - SPIDER_URL=${SPIDER_URL:-http://cb-spider :1024}
32+ - SPIDER_URL=${SPIDER_URL:-http://mc-infra-connector :1024}
3333 - SPIDER_ID=${SPIDER_ID:-}
3434 - SPIDER_PW=${SPIDER_PW:-}
3535 - INSIGHT_URL=http://mc-observability-insight:9001
@@ -45,7 +45,7 @@ services:
4545 - OTEL_SERVICE_NAME=${OTEL_SERVICE_NAME:-mc-observability-manager}
4646 - OTEL_METRICS_EXPORTER=none
4747 - OTEL_LOGS_EXPORTER=none
48- - OTEL_TRACES_EXPORTER=none
48+ - OTEL_TRACES_EXPORTER=otlp
4949 - OTEL_EXPORTER_OTLP_ENDPOINT=${OTEL_EXPORTER_OTLP_ENDPOINT:-http://mc-observability-tempo:4318}
5050 # - OTEL_EXPORTER_OTLP_ENDPOINT=${OTEL_EXPORTER_OTLP_ENDPOINT}
5151 # - OTEL_EXPORTER_OTLP_PROTOCOL=${OTEL_EXPORTER_OTLP_PROTOCOL}
@@ -60,7 +60,8 @@ services:
6060 # - OTEL_INSTRUMENTATION_SPRING_SCHEDULING_ENABLED=false
6161 # - OTEL_INSTRUMENTATION_EXECUTOR_ENABLED=false
6262 - OTEL_INSTRUMENTATION_MICROMETER_ENABLED=false
63- # - OTEL_INSTRUMENTATION_AWS_SDK_ENABLED=false
63+ - MAX_FILE_SIZE=200MB
64+ # - OTEL_INSTRUMENTATION_AWS_SDK_ENABLED=false
6465 # --- RabbitMQ connection ---
6566 - SPRING_RABBITMQ_HOST=mc-observability-rabbitmq
6667 - SPRING_RABBITMQ_PORT=5672
@@ -153,43 +154,43 @@ services:
153154 retries : 30
154155
155156 # CB-Tumblebug
156- cb-tumblebug :
157+ mc-infra-manager :
157158 image : cloudbaristaorg/cb-tumblebug:0.11.13
158- container_name : cb-tumblebug
159+ container_name : mc-infra-manager
159160 networks :
160161 - internal_network
161162 - external_network
162163 ports :
163164 - 1323:1323
164165 depends_on :
165- cb-tumblebug -etcd :
166+ mc-infra-manager -etcd :
166167 condition : service_started
167- cb-spider :
168+ mc-infra-connector :
168169 condition : service_started
169- cb-tumblebug -postgres :
170+ mc-infra-manager -postgres :
170171 condition : service_healthy
171172 volumes :
172173 - ./conf/setup.env:/app/conf/setup.env
173174 - ./conf/cloud_conf.yaml:/app/conf/cloud_conf.yaml
174175 - ~/.cloud-barista/credentials.yaml.enc:/app/conf/credentials.yaml.enc
175- - /docker/cb-tumblebug -container/meta_db/:/app/meta_db/
176- - /docker/cb-tumblebug -container/log/:/app/log/
176+ - /docker/mc-infra-manager -container/meta_db/:/app/meta_db/
177+ - /docker/mc-infra-manager -container/log/:/app/log/
177178 environment :
178179 # - TB_ROOT_PATH=/app
179180 # # Enable TB_SELF_ENDPOINT to specify an endpoint for CB-TB API (default: localhost:1323)
180181 # # Use public IP if you want to access the API Dashboard from outside of localhost
181182 # - TB_SELF_ENDPOINT=xxx.xxx.xxx.xxx:1323
182- - TB_SPIDER_REST_URL=http://cb-spider :1024/spider
183- - TB_ETCD_ENDPOINTS=http://cb-tumblebug -etcd:2379
183+ - TB_SPIDER_REST_URL=http://mc-infra-connector :1024/spider
184+ - TB_ETCD_ENDPOINTS=http://mc-infra-manager -etcd:2379
184185 - TB_TERRARIUM_REST_URL=http://mc-terrarium:8055/terrarium
185186 - TB_IAM_MANAGER_REST_URL=http://mc-iam-manager:5000
186187 # - TB_ETCD_AUTH_ENABLED=false
187188 # - TB_ETCD_USERNAME=default
188189 # - TB_ETCD_PASSWORD=default
189- - TB_POSTGRES_ENDPOINT=cb-tumblebug -postgres:5432
190- - TB_POSTGRES_DATABASE=cb_tumblebug
191- - TB_POSTGRES_USER=cb_tumblebug
192- - TB_POSTGRES_PASSWORD=cb_tumblebug
190+ - TB_POSTGRES_ENDPOINT=mc-infra-manager -postgres:5432
191+ - TB_POSTGRES_DATABASE=tumblebug
192+ - TB_POSTGRES_USER=tumblebug
193+ - TB_POSTGRES_PASSWORD=tumblebug
193194 # - TB_TERRARIUM_API_USERNAME=default
194195 # - TB_TERRARIUM_API_PASSWORD=default
195196 # - TB_ALLOW_ORIGINS=*
@@ -210,24 +211,24 @@ services:
210211 # - TB_LOGWRITER=both
211212 # - TB_NODE_ENV=development
212213 healthcheck : # for CB-Tumblebug
213- test : ["CMD", "curl", "-f", "http://localhost:1323/tumblebug/readyz"]
214+ test : [ "CMD", "curl", "-f", "http://localhost:1323/tumblebug/readyz" ]
214215 interval : 1m
215216 timeout : 5s
216217 retries : 3
217218 start_period : 10s
218219
219220 # CB-Tumblebug ETCD
220221 # This is used for storing CB-Tumblebug metadata.
221- cb-tumblebug -etcd :
222+ mc-infra-manager -etcd :
222223 image : gcr.io/etcd-development/etcd:v3.5.21
223- container_name : cb-tumblebug -etcd
224+ container_name : mc-infra-manager -etcd
224225 networks :
225226 - internal_network
226227 ports :
227228 - 2379:2379
228229 - 2380:2380
229230 volumes :
230- - /docker/etcd/data:/etcd-data
231+ - /docker/mc-infra-manager/ etcd/data:/etcd-data
231232 entrypoint : /usr/local/bin/etcd
232233 command :
233234 - --name
@@ -257,17 +258,17 @@ services:
257258 - --auth-token
258259 - simple
259260 healthcheck : # for etcd
260- test : ["CMD", "/usr/local/bin/etcd", "--version"]
261+ test : [ "CMD", "/usr/local/bin/etcd", "--version" ]
261262 interval : 1m
262263 timeout : 5s
263264 retries : 3
264265 start_period : 10s
265266
266267 # CB-Tumblebug PostgreSQL
267268 # This is used for storing CB-Tumblebug Spec and Image.
268- cb-tumblebug -postgres :
269+ mc-infra-manager -postgres :
269270 image : postgres:16-alpine
270- container_name : cb-tumblebug -postgres
271+ container_name : mc-infra-manager -postgres
271272 restart : always
272273 networks :
273274 - internal_network
@@ -277,22 +278,22 @@ services:
277278 ports :
278279 - 5432:5432
279280 volumes :
280- - /docker/cb-tumblebug -container/meta_db/postgres/:/var/lib/postgresql/data/
281+ - /docker/mc-infra-manager -container/meta_db/postgres/:/var/lib/postgresql/data/
281282 environment :
282- - POSTGRES_USER=cb_tumblebug
283- - POSTGRES_PASSWORD=cb_tumblebug
284- - POSTGRES_DB=cb_tumblebug
283+ - POSTGRES_USER=tumblebug
284+ - POSTGRES_PASSWORD=tumblebug
285+ - POSTGRES_DB=tumblebug
285286 healthcheck :
286- test : ["CMD-SHELL", "pg_isready -U cb_tumblebug"]
287+ test : [ "CMD-SHELL", "pg_isready -U cb_tumblebug" ]
287288 interval : 10s
288289 timeout : 5s
289290 retries : 5
290291 start_period : 10s
291292
292293 # CB-Spider
293- cb-spider :
294+ mc-infra-connector :
294295 image : cloudbaristaorg/cb-spider_azure_monitoring:edge
295- container_name : cb-spider
296+ container_name : mc-infra-connector
296297 networks :
297298 - internal_network
298299 - external_network # for outbound access (not ideal for security)
@@ -311,7 +312,7 @@ services:
311312 - SPIDER_HISCALL_LOG_LEVEL=error
312313 - ID_TRANSFORM_MODE=OFF
313314 healthcheck : # for CB-Spider
314- test : ["CMD", "curl", "-f", "http://localhost:1024/spider/readyz"]
315+ test : [ "CMD", "curl", "-f", "http://localhost:1024/spider/readyz" ]
315316 interval : 1m
316317 timeout : 5s
317318 retries : 3
@@ -348,7 +349,7 @@ services:
348349 image : busybox:stable
349350 container_name : mc-observability-influx-init-volumes
350351 restart : no
351- command : ["sh", "-c", "chown -R 1500:1500 /var/lib/influxdb"]
352+ command : [ "sh", "-c", "chown -R 1500:1500 /var/lib/influxdb" ]
352353 volumes :
353354 - /docker/influxdb_data:/var/lib/influxdb
354355 user : root
@@ -386,7 +387,7 @@ services:
386387 image : busybox:stable
387388 container_name : mc-observability-influx-2-init-volumes
388389 restart : no
389- command : ["sh", "-c", "chown -R 1500:1500 /var/lib/influxdb"]
390+ command : [ "sh", "-c", "chown -R 1500:1500 /var/lib/influxdb" ]
390391 volumes :
391392 - /docker/influxdb2_data:/var/lib/influxdb
392393 user : root
@@ -426,7 +427,7 @@ services:
426427 image : busybox:stable
427428 container_name : mc-observability-loki-init-volumes
428429 restart : no
429- command : ["sh", "-c", "chown -R 10001:10001 /loki"]
430+ command : [ "sh", "-c", "chown -R 10001:10001 /loki" ]
430431 volumes :
431432 - /docker/loki_data:/loki
432433 user : root
@@ -506,7 +507,7 @@ services:
506507 image : busybox:stable
507508 container_name : mc-observability-grafana-init-volumes
508509 restart : no
509- command : ["sh", "-c", "chown -R 472:472 /var/lib/grafana && chown -R 472:472 /var/log/grafana && chown -R 472:472 /grafana_config"]
510+ command : [ "sh", "-c", "chown -R 472:472 /var/lib/grafana && chown -R 472:472 /var/log/grafana && chown -R 472:472 /grafana_config" ]
510511 volumes :
511512 - /docker/grafana_data/data:/var/lib/grafana
512513 - /docker/grafana_data/log:/var/log/grafana
@@ -682,32 +683,32 @@ services:
682683 mc-observability-rabbitmq :
683684 image : rabbitmq:4-management-alpine
684685 container_name : mc-observability-rabbitmq
685- volumes :
686- - ./rabbitmq/etc:/etc/rabbitmq
687- - /docker/rabbitmq_data:/var/lib/rabbitmq
688- - ./rabbitmq/etc/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro
689- # - /docker/rabbitmq_log:/var/log/rabbitmq
690- depends_on :
691- mc-observability-rabbitmq-init-volumes :
692- condition : service_completed_successfully
686+ restart : always
693687 ports :
694688 - " 5672:5672" # AMQP
695689 - " 1883:1883" # MQTT
696690 - " 15672:15672" # Dashboard
697691 # - "8883:8883" # MQTT/TLS (optional)
698692 # - "15675:15675" # MQTT WebSocket (optional)
693+ depends_on :
694+ mc-observability-rabbitmq-init-volumes :
695+ condition : service_completed_successfully
699696 environment :
700- RABBITMQ_ENABLED_PLUGINS_FILE : " /etc/rabbitmq/enabled_plugins"
701- RABBITMQ_LOAD_DEFINITIONS : " /etc/rabbitmq/definitions.json"
702- restart : unless-stopped
697+ - RABBITMQ_ENABLED_PLUGINS_FILE=/etc/rabbitmq/enabled_plugins
698+ - RABBITMQ_LOAD_DEFINITIONS=/etc/rabbitmq/definitions.json
699+ volumes :
700+ - ./rabbitmq/etc:/etc/rabbitmq
701+ - /docker/rabbitmq_data:/var/lib/rabbitmq
702+ - ./rabbitmq/etc/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro
703+ # - /docker/rabbitmq_log:/var/log/rabbitmq
703704 networks :
704705 - internal_network
705706 - external_network
706707
707708 mc-observability-rabbitmq-init-volumes :
708709 image : busybox:stable
709710 container_name : mc-observability-rabbitmq-init-volumes
710- restart : " no "
711+ restart : no
711712 command :
712713 - sh
713714 - -c
@@ -738,7 +739,7 @@ services:
738739 - " 4318:4318"
739740 environment :
740741 - TZ=Asia/Seoul
741- command : ["-config.file=/etc/tempo-config.yaml"]
742+ command : [ "-config.file=/etc/tempo-config.yaml" ]
742743 volumes :
743744 - /etc/localtime:/etc/localtime:ro
744745 - /docker/tempo_data:/tmp/tempo
0 commit comments