-
Notifications
You must be signed in to change notification settings - Fork 246
Expand file tree
/
Copy pathvalues.yaml
More file actions
130 lines (118 loc) · 5.42 KB
/
values.yaml
File metadata and controls
130 lines (118 loc) · 5.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# BetterStack Logs Helm Chart — values.yaml
#
# Deploys Vector as a DaemonSet to collect container stdout/stderr logs
# and ship them to BetterStack with Pino JSON flattened to top-level fields.
#
# This replaces the in-process @logtail/pino transport that causes
# unbounded heap growth (see cloud/issues/067-heap-growth-investigation).
#
# The transforms below:
# 1. Filter to only our cloud app containers (no kube-system noise)
# 2. Parse the Pino JSON from the message field
# 3. Flatten it to the top level so region=, feature=, service= work in Live Tail
# 4. Add a log_source="vector" marker to distinguish from @logtail/pino during migration
#
# Usage per cluster:
# porter helm --cluster <CLUSTER_ID> -- install betterstack-logs \
# betterstack-logs/betterstack-logs \
# --namespace betterstack \
# --create-namespace \
# --values cloud/infra/betterstack-logs/values.yaml \
# --set "vector.customConfig.sinks.better_stack_http_sink.auth.token=$SOURCE_TOKEN" \
# --set "vector.customConfig.sinks.better_stack_http_metrics_sink.auth.token=$SOURCE_TOKEN" \
# --set "vector.customConfig.sinks.better_stack_http_sink.uri=https://$INGESTING_HOST/" \
# --set "vector.customConfig.sinks.better_stack_http_metrics_sink.uri=https://$INGESTING_HOST/metrics"
#
# Source tokens per region (stored in Doppler as BETTERSTACK_SOURCE_TOKEN):
# doppler secrets get BETTERSTACK_SOURCE_TOKEN --project mentraos-cloud --config <CONFIG> --plain
#
# The ingesting host for MentraCloud-Prod source:
# s2324289.eu-nbg-2.betterstackdata.com
vector:
customConfig:
transforms:
# Step 1: Filter to only our cloud app containers.
# Drops all kube-system, cert-manager, ingress-nginx, porter-agent, etc.
cloud_only_filter:
type: "filter"
inputs: ["better_stack_kubernetes_parser"]
condition: >
contains(to_string!(.kubernetes.container_name), "cloud-prod-cloud") ||
contains(to_string!(.kubernetes.container_name), "cloud-staging-cloud") ||
contains(to_string!(.kubernetes.container_name), "cloud-debug-cloud") ||
contains(to_string!(.kubernetes.container_name), "cloud-dev-cloud")
# Step 2: Flatten the Pino JSON so all fields are top-level.
# Input from Vector: { kubernetes: {...}, message: { level: 30, region: "us-west", ... } }
# Output: { level: 30, region: "us-west", ..., kubernetes_pod: "...", log_source: "vector" }
flatten_pino:
type: "remap"
inputs: ["cloud_only_filter"]
source: |
# Grab kubernetes context before we restructure
kube_pod = to_string(.kubernetes.pod_name) ?? "unknown"
kube_container = to_string(.kubernetes.container_name) ?? "unknown"
# The Pino JSON lives in .message
# It may already be parsed as an object, or it may be a raw JSON string
pino_data = .message
if is_string(pino_data) {
parsed, err = parse_json(string!(pino_data))
if err == null {
pino_data = parsed
}
}
# If we successfully got a Pino object, flatten it to the top level
# This makes region=, feature=, service=, heapUsedMB= all queryable in Live Tail
if is_object(pino_data) {
. = object!(pino_data)
}
# Normalize fields to match what @logtail/pino sends to BetterStack.
# Without this, existing queries and Live Tail display break.
# msg → message (Pino uses "msg", @logtail/pino renames to "message")
if exists(.msg) {
.message = del(.msg)
}
# time → dt (@logtail/pino sends "dt", Pino natively uses "time")
if exists(.time) {
.dt = del(.time)
}
# level: numeric → string (Pino outputs 10/20/30/40/50/60, BetterStack expects strings)
if is_integer(.level) {
numeric_level = to_int!(.level)
if numeric_level >= 60 {
.level = "fatal"
} else if numeric_level >= 50 {
.level = "error"
} else if numeric_level >= 40 {
.level = "warn"
} else if numeric_level >= 30 {
.level = "info"
} else if numeric_level >= 20 {
.level = "debug"
} else {
.level = "trace"
}
}
# Nest Vector metadata into _meta so it's out of the way of app fields.
# Everything from Pino stays flat at the root. The extra stuff Vector adds
# goes into _meta — there if you need it, not cluttering the log.
._meta.kubernetes_pod = kube_pod
._meta.kubernetes_container = kube_container
._meta.log_source = "vector"
sinks:
# Override the default sink to use our flattened output instead of raw
better_stack_http_sink:
inputs: ["flatten_pino"]
# Override uri and auth.token via --set flags (do NOT commit tokens)
uri: "https://PLACEHOLDER_INGESTING_HOST/"
auth:
strategy: "bearer"
token: "PLACEHOLDER_SOURCE_TOKEN"
better_stack_http_metrics_sink:
uri: "https://PLACEHOLDER_INGESTING_HOST/metrics"
auth:
strategy: "bearer"
token: "PLACEHOLDER_SOURCE_TOKEN"
# The existing better-stack-collector DaemonSet already installs a metrics
# server on each cluster. Disable it here to avoid conflicts.
metrics-server:
enabled: false