-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdocker-compose-env.yml
More file actions
276 lines (262 loc) · 8.67 KB
/
docker-compose-env.yml
File metadata and controls
276 lines (262 loc) · 8.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
# 之所以改了一些配置使用postman发请求nginx会502 Bad Gateway, 是因为go要重新下一些包
######## 项目依赖的环境,启动项目之前要先启动此环境 #######
######## The environment that the project depends on, starting this environment before starting the project #######
services:
# jaeger链路追踪 — Jaeger for tracing
# jaeger:
# image: jaegertracing/all-in-one:latest # <-- 从 latest 修改为具体的版本号
# container_name: jaeger
# restart: always
# ports:
# - "5775:5775/udp"
# - "6831:6831/udp"
# - "6832:6832/udp"
# - "5778:5778"
# - "16686:16686"
# - "14268:14268"
# - "9411:9411"
# environment:
# - SPAN_STORAGE_TYPE=elasticsearch
# - ES_SERVER_URLS=http://elasticsearch:9200
# - LOG_LEVEL=debug
# networks:
# - heart_trip_net
# depends_on: # <-- 增加 depends_on,确保 ES 先启动
# - elasticsearch
#prometheus监控 — Prometheus for monitoring
prometheus:
image: prom/prometheus:latest
container_name: prometheus
environment:
# 时区上海 - Time zone Shanghai (Change if needed)
TZ: Asia/Shanghai
volumes:
- ./deploy/prometheus/server/prometheus.yml:/etc/prometheus/prometheus.yml
- ./data/prometheus/data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
restart: always
user: root
ports:
- "9090:9090"
networks:
- heart_trip_net
#查看prometheus监控数据 - Grafana to view Prometheus monitoring data
grafana:
image: grafana/grafana:latest
container_name: grafana
hostname: grafana
user: root
environment:
# 时区上海 - Time zone Shanghai (Change if needed)
TZ: Asia/Shanghai
restart: always
volumes:
- ./data/grafana/data:/var/lib/grafana
ports:
- "3001:3000"
networks:
- heart_trip_net
#
# #搜集kafka业务日志、存储prometheus监控数据 - Kafka for collecting business logs and storing Prometheus monitoring data
# elasticsearch:
# image: elasticsearch:9.1.3
# container_name: elasticsearch
# user: elasticsearch
# environment:
# - discovery.type=single-node
# - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
# - TZ=Asia/Shanghai
# volumes:
# - ./data/elasticsearch/data:/usr/share/elasticsearch/data
# restart: always
# ports:
# - "9200:9200"
# - "9300:9300"
# networks:
# - heart_trip_net
#
# #查看elasticsearch数据 - Kibana to view Elasticsearch data
# kibana:
# image: kibana:9.1.3
# container_name: kibana
# environment:
# - elasticsearch.hosts=http://elasticsearch:9200
# - TZ=Asia/Shanghai
# restart: always
# networks:
# - heart_trip_net
# ports:
# - "5601:5601"
# depends_on:
# - elasticsearch
#
# #消费kafka中filebeat收集的数据输出到es - The data output collected by FileBeat in Kafka is output to ES
# go-stash:
# image: kevinwan/go-stash:latest # if you "macOS intel" or "linux amd"
## image: kevinwan/go-stash:1.0-arm64 # if you "macOS m1" or "linux arm"
# container_name: go-stash
# environment:
# # 时区上海 - Time zone Shanghai (Change if needed)
# TZ: Asia/Shanghai
# user: root
# restart: always
# volumes:
# - ./deploy/go-stash/etc:/app/etc
# networks:
# - heart_trip_net
# depends_on:
# - elasticsearch
# - kafka
#
# #收集业务数据 - Collect business data
# filebeat:
# image: elastic/filebeat:9.1.3
# container_name: filebeat
# environment:
# # 时区上海 - Time zone Shanghai (Change if needed)
# TZ: Asia/Shanghai
# user: root
# restart: always
# entrypoint: "filebeat -e -strict.perms=false" #解决配置文件权限问题 - Solving the configuration file permissions
# volumes:
# - ./deploy/filebeat/conf/filebeat.yml:/usr/share/filebeat/filebeat.yml
# # 此处需指定docker的containers目录,取决于你docker的配置 - The containers directory of docker needs to be specified here, depending on your docker configuration
# # 如snap安装的docker,则为/var/snap/docker/common/var-lib-docker/containers - Example if docker is installed by Snap /var/snap/docker/common/var-lib-docker/containers
# # - /var/snap/docker/common/var-lib-docker/containers:/var/lib/docker/containers
# - /var/lib/docker/containers:/var/lib/docker/containers
# networks:
# - heart_trip_net
# depends_on:
# - kafka
#zookeeper是kafka的依赖 - Zookeeper is the dependencies of Kafka
# zookeeper:
# image: zookeeper:latest
# container_name: zookeeper
# environment:
# # 时区上海 - Time zone Shanghai (Change if needed)
# TZ: Asia/Shanghai
# restart: always
# ports:
# - "2181:2181"
# networks:
# - heart_trip_net
#消息队列 - Message queue
# kafka:
# image: wurstmeister/kafka:latest
# container_name: kafka
# ports:
# - "9092:9092"
# environment:
# - KAFKA_ADVERTISED_HOST_NAME=kafka
# - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
# - KAFKA_AUTO_CREATE_TOPICS_ENABLE=false
# - TZ=Asia/Shanghai
# restart: always
# volumes:
# - /var/run/docker.sock:/var/run/docker.sock
# networks:
# - heart_trip_net
# depends_on:
# - zookeeper
# 消息队列 - 使用 Bitnami Kafka 并启用 KRaft 模式
kafka:
# 使用 Bitnami 提供的官方 Kafka 镜像
image: bitnami/kafka:latest
container_name: kafka
ports:
# 9092 是客户端连接的端口
- "9092:9092"
environment:
# --- KRaft 模式配置 ---
# 1. 设置节点 ID
- KAFKA_CFG_NODE_ID=0
# 2. 设置节点角色为控制器(controller)和代理(broker)
- KAFKA_CFG_PROCESS_ROLES=controller,broker
# 3. 设置控制器选举投票者(单节点指向自己)
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093
# --- 监听器配置 ---
# 4. 配置监听器,PLAINTEXT 用于客户端数据,CONTROLLER 用于内部集群管理
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
# 5. 配置广播给客户端的监听地址
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
# 6. 指定控制器使用的监听器名称
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
# 7. 指定 Broker 之间通信使用的监听器名称
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=PLAINTEXT
# --- 其他配置 ---
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false
- TZ=Asia/Shanghai
# 不再需要挂载 docker.sock,安全性大大提高
# volumes:
# - /var/run/docker.sock:/var/run/docker.sock
networks:
- heart_trip_net
# 由于没有 Zookeeper,depends_on 也可以移除了
# depends_on:
# - zookeeper
#asynqmon asynq延迟队列、定时队列的webUI - Asynqmon asynq delay queue, timing queue's webUI
asynqmon:
image: hibiken/asynqmon:latest
container_name: asynqmon
ports:
- "8980:8080"
command:
- '--redis-addr=redis:6379'
- '--redis-password=G62m50oigInC30sf'
restart: always
networks:
- heart_trip_net
depends_on:
- redis
mysql:
image: mysql:8.0
container_name: mysql
environment:
# 时区上海 - Time zone Shanghai (Change if needed)
TZ: Asia/Shanghai
# root 密码 - root password
MYSQL_ROOT_PASSWORD: PXDN93VRKUm8TeE7
ports:
- "33069:3306"
volumes:
# 数据挂载 - Data mounting
- ./data/mysql/data:/var/lib/mysql
# 日志
command:
# 将mysql8.0默认密码策略 修改为 原先 策略 (mysql8.0对其默认策略做了更改 会导致密码无法匹配)
# Modify the Mysql 8.0 default password strategy to the original strategy (MySQL8.0 to change its default strategy will cause the password to be unable to match)
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
privileged: true
restart: always
networks:
- heart_trip_net
#redis容器 - Redis container
redis:
image: redis:latest
container_name: redis
ports:
- "36379:6379"
environment:
# 时区上海 - Time zone Shanghai (Change if needed)
TZ: Asia/Shanghai
volumes:
# 数据文件 - data files
- ./data/redis/data:/data:rw
command: "redis-server --requirepass G62m50oigInC30sf --appendonly yes"
privileged: true
restart: always
networks:
- heart_trip_net
networks:
heart_trip_net:
external: true
driver: bridge
ipam:
config:
- subnet: 172.16.0.0/16