-
Notifications
You must be signed in to change notification settings - Fork 42
183 lines (151 loc) · 5.14 KB
/
chart-test.yml
File metadata and controls
183 lines (151 loc) · 5.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
name: Helm Chart Integration Test
on:
# Run on merge to main
push:
branches: [ main ]
# Run on pull requests
pull_request:
branches: [ main ]
# Run nightly at 2 AM UTC
schedule:
- cron: '0 2 * * *'
# Allow manual trigger
workflow_dispatch:
jobs:
test-helm-chart:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: '3.12.0'
- name: Update appVersion for nightly builds
if: github.event_name == 'schedule'
run: |
echo "Updating appVersion to 2-nightly for scheduled builds"
sed -i 's/^appVersion:.*/appVersion: 2-nightly/' charts/hdx-oss-v2/Chart.yaml
echo "Updated Chart.yaml:"
cat charts/hdx-oss-v2/Chart.yaml
- name: Create kind cluster config
run: |
cat > kind-config.yaml << EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraPortMappings:
- containerPort: 30000
hostPort: 3000
protocol: TCP
- containerPort: 30001
hostPort: 4318
protocol: TCP
EOF
- name: Create kind cluster
uses: helm/kind-action@v1
with:
cluster_name: hyperdx-test
config: kind-config.yaml
- name: Install local-path-provisioner
run: |
kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.24/deploy/local-path-storage.yaml
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
- name: Run Helm unit tests
run: |
helm plugin install https://github.com/helm-unittest/helm-unittest.git || true
helm unittest charts/hdx-oss-v2
- name: Deploy HyperDX chart
run: |
# Create test values for faster deployment
cat > test-values.yaml << EOF
hyperdx:
apiKey: "test-api-key-for-ci"
frontendUrl: "http://localhost:3000"
replicas: 1
service:
type: NodePort
nodePort: 30000
clickhouse:
persistence:
enabled: true
dataSize: 2Gi
logSize: 1Gi
mongodb:
persistence:
enabled: true
dataSize: 2Gi
otel:
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
EOF
# Install the chart
helm install hyperdx-test ./charts/hdx-oss-v2 -f test-values.yaml --timeout=5m
# Give services time to initialize after pods are running
echo "Waiting for services to initialize..."
sleep 20
- name: Bootstrap team in MongoDB
run: |
# Wait for MongoDB to be ready
kubectl wait --for=condition=Ready pods -l app=mongodb --timeout=300s
echo "Creating test team in MongoDB..."
kubectl exec -n default deployment/hyperdx-test-hdx-oss-v2-mongodb -- mongosh hyperdx --eval "
db.teams.insertOne({
name: 'CI Test Team',
apiKey: 'test-api-key-for-ci',
collectorAuthenticationEnforced: false,
createdAt: new Date(),
updatedAt: new Date()
})
"
echo "Verifying team creation..."
kubectl exec -n default deployment/hyperdx-test-hdx-oss-v2-mongodb -- mongosh hyperdx --eval "
const team = db.teams.findOne({ apiKey: 'test-api-key-for-ci' });
if (team) {
print('Team created successfully:', team.name);
} else {
print('Team creation failed');
exit(1);
}
"
echo "Waiting for OpAMP server to reconfigure collectors..."
sleep 30
- name: Verify deployment
run: |
echo "Initial pod status:"
kubectl get pods -o wide
echo "Waiting for all pods to be ready..."
kubectl wait --for=condition=Ready pods --all --timeout=600s
echo "Final pod status:"
kubectl get pods -o wide
kubectl get services
- name: Run comprehensive smoke tests
run: |
chmod +x ./scripts/smoke-test.sh
RELEASE_NAME=hyperdx-test NAMESPACE=default ./scripts/smoke-test.sh
- name: Collect logs on failure
if: failure()
run: |
echo "=== Pod Status ==="
kubectl get pods -o wide
echo "=== Events ==="
kubectl get events --sort-by=.metadata.creationTimestamp
echo "=== HyperDX App Logs ==="
kubectl logs -l app=app --tail=100 || true
echo "=== ClickHouse Logs ==="
kubectl logs -l app=clickhouse --tail=100 || true
echo "=== MongoDB Logs ==="
kubectl logs -l app=mongodb --tail=100 || true
echo "=== OTEL Collector Logs ==="
kubectl logs -l app=otel-collector --tail=100 || true
- name: Cleanup
if: always()
run: |
helm uninstall hyperdx-test || true
kind delete cluster --name hyperdx-test || true