Skip to content

Commit bcaee0d

Browse files
fix(supabase): add logs config
1 parent c9d49c7 commit bcaee0d

File tree

2 files changed

+233
-0
lines changed

2 files changed

+233
-0
lines changed

supabase/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
volumes/db/data
22
volumes/storage
3+
!volumes/logs
34
.env
45
test.http
56
docker-compose.override.yml

supabase/volumes/logs/vector.yml

Lines changed: 232 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,232 @@
1+
api:
2+
enabled: true
3+
address: 0.0.0.0:9001
4+
5+
sources:
6+
docker_host:
7+
type: docker_logs
8+
exclude_containers:
9+
- supabase-vector
10+
11+
transforms:
12+
project_logs:
13+
type: remap
14+
inputs:
15+
- docker_host
16+
source: |-
17+
.project = "default"
18+
.event_message = del(.message)
19+
.appname = del(.container_name)
20+
del(.container_created_at)
21+
del(.container_id)
22+
del(.source_type)
23+
del(.stream)
24+
del(.label)
25+
del(.image)
26+
del(.host)
27+
del(.stream)
28+
router:
29+
type: route
30+
inputs:
31+
- project_logs
32+
route:
33+
kong: '.appname == "supabase-kong"'
34+
auth: '.appname == "supabase-auth"'
35+
rest: '.appname == "supabase-rest"'
36+
realtime: '.appname == "supabase-realtime"'
37+
storage: '.appname == "supabase-storage"'
38+
functions: '.appname == "supabase-functions"'
39+
db: '.appname == "supabase-db"'
40+
# Ignores non nginx errors since they are related with kong booting up
41+
kong_logs:
42+
type: remap
43+
inputs:
44+
- router.kong
45+
source: |-
46+
req, err = parse_nginx_log(.event_message, "combined")
47+
if err == null {
48+
.timestamp = req.timestamp
49+
.metadata.request.headers.referer = req.referer
50+
.metadata.request.headers.user_agent = req.agent
51+
.metadata.request.headers.cf_connecting_ip = req.client
52+
.metadata.request.method = req.method
53+
.metadata.request.path = req.path
54+
.metadata.request.protocol = req.protocol
55+
.metadata.response.status_code = req.status
56+
}
57+
if err != null {
58+
abort
59+
}
60+
# Ignores non nginx errors since they are related with kong booting up
61+
kong_err:
62+
type: remap
63+
inputs:
64+
- router.kong
65+
source: |-
66+
.metadata.request.method = "GET"
67+
.metadata.response.status_code = 200
68+
parsed, err = parse_nginx_log(.event_message, "error")
69+
if err == null {
70+
.timestamp = parsed.timestamp
71+
.severity = parsed.severity
72+
.metadata.request.host = parsed.host
73+
.metadata.request.headers.cf_connecting_ip = parsed.client
74+
url, err = split(parsed.request, " ")
75+
if err == null {
76+
.metadata.request.method = url[0]
77+
.metadata.request.path = url[1]
78+
.metadata.request.protocol = url[2]
79+
}
80+
}
81+
if err != null {
82+
abort
83+
}
84+
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
85+
auth_logs:
86+
type: remap
87+
inputs:
88+
- router.auth
89+
source: |-
90+
parsed, err = parse_json(.event_message)
91+
if err == null {
92+
.metadata.timestamp = parsed.time
93+
.metadata = merge!(.metadata, parsed)
94+
}
95+
# PostgREST logs are structured so we separate timestamp from message using regex
96+
rest_logs:
97+
type: remap
98+
inputs:
99+
- router.rest
100+
source: |-
101+
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
102+
if err == null {
103+
.event_message = parsed.msg
104+
.timestamp = to_timestamp!(parsed.time)
105+
.metadata.host = .project
106+
}
107+
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
108+
realtime_logs:
109+
type: remap
110+
inputs:
111+
- router.realtime
112+
source: |-
113+
.metadata.project = del(.project)
114+
.metadata.external_id = .metadata.project
115+
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
116+
if err == null {
117+
.event_message = parsed.msg
118+
.metadata.level = parsed.level
119+
}
120+
# Storage logs may contain json objects so we parse them for completeness
121+
storage_logs:
122+
type: remap
123+
inputs:
124+
- router.storage
125+
source: |-
126+
.metadata.project = del(.project)
127+
.metadata.tenantId = .metadata.project
128+
parsed, err = parse_json(.event_message)
129+
if err == null {
130+
.event_message = parsed.msg
131+
.metadata.level = parsed.level
132+
.metadata.timestamp = parsed.time
133+
.metadata.context[0].host = parsed.hostname
134+
.metadata.context[0].pid = parsed.pid
135+
}
136+
# Postgres logs some messages to stderr which we map to warning severity level
137+
db_logs:
138+
type: remap
139+
inputs:
140+
- router.db
141+
source: |-
142+
.metadata.host = "db-default"
143+
.metadata.parsed.timestamp = .timestamp
144+
145+
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
146+
147+
if err != null || parsed == null {
148+
.metadata.parsed.error_severity = "info"
149+
}
150+
if parsed != null {
151+
.metadata.parsed.error_severity = parsed.level
152+
}
153+
if .metadata.parsed.error_severity == "info" {
154+
.metadata.parsed.error_severity = "log"
155+
}
156+
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
157+
158+
sinks:
159+
logflare_auth:
160+
type: 'http'
161+
inputs:
162+
- auth_logs
163+
encoding:
164+
codec: 'json'
165+
method: 'post'
166+
request:
167+
retry_max_duration_secs: 10
168+
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
169+
logflare_realtime:
170+
type: 'http'
171+
inputs:
172+
- realtime_logs
173+
encoding:
174+
codec: 'json'
175+
method: 'post'
176+
request:
177+
retry_max_duration_secs: 10
178+
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
179+
logflare_rest:
180+
type: 'http'
181+
inputs:
182+
- rest_logs
183+
encoding:
184+
codec: 'json'
185+
method: 'post'
186+
request:
187+
retry_max_duration_secs: 10
188+
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
189+
logflare_db:
190+
type: 'http'
191+
inputs:
192+
- db_logs
193+
encoding:
194+
codec: 'json'
195+
method: 'post'
196+
request:
197+
retry_max_duration_secs: 10
198+
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
199+
# lead to broken queries from studio. This works by the assumption that containers are started in the
200+
# following order: vector > db > logflare > kong
201+
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
202+
logflare_functions:
203+
type: 'http'
204+
inputs:
205+
- router.functions
206+
encoding:
207+
codec: 'json'
208+
method: 'post'
209+
request:
210+
retry_max_duration_secs: 10
211+
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
212+
logflare_storage:
213+
type: 'http'
214+
inputs:
215+
- storage_logs
216+
encoding:
217+
codec: 'json'
218+
method: 'post'
219+
request:
220+
retry_max_duration_secs: 10
221+
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
222+
logflare_kong:
223+
type: 'http'
224+
inputs:
225+
- kong_logs
226+
- kong_err
227+
encoding:
228+
codec: 'json'
229+
method: 'post'
230+
request:
231+
retry_max_duration_secs: 10
232+
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'

0 commit comments

Comments
 (0)