66 - main
77
88jobs :
9- benchmark :
9+ run- benchmark :
1010 runs-on : ubuntu-latest
1111 steps :
1212 - name : Checkout code
@@ -33,14 +33,215 @@ jobs:
3333 mkdir -p /tmp/artifacts/
3434 ARTIFACT_PATH=/tmp/artifacts make test-benchmark
3535
36- - name : Compare with baseline
36+ - name : Convert Benchmark Output to Prometheus Metrics
3737 run : |
38- go install golang.org/x/perf/cmd/benchstat@latest
39- benchstat benchmarks/baseline.txt /tmp/artifacts/new.txt | tee /tmp/artifacts/output
38+ mkdir -p /tmp/artifacts/prometheus/
39+ cat << 'EOF' > benchmark_to_prometheus.py
40+ import sys
41+ import re
4042
41- - name : Upload benchmark results
43+ def parse_benchmark_output(benchmark_output):
44+ metrics = []
45+ for line in benchmark_output.split("\n"):
46+ match = re.match(r"Benchmark([\w\d]+)-\d+\s+\d+\s+([\d]+)\s+ns/op\s+([\d]+)\s+B/op\s+([\d]+)\s+allocs/op", line)
47+ if match:
48+ benchmark_name = match.group(1).lower()
49+ time_ns = match.group(2)
50+ memory_bytes = match.group(3)
51+ allocs = match.group(4)
52+
53+ metrics.append(f"benchmark_{benchmark_name}_ns {time_ns}")
54+ metrics.append(f"benchmark_{benchmark_name}_allocs {allocs}")
55+ metrics.append(f"benchmark_{benchmark_name}_mem_bytes {memory_bytes}")
56+
57+ return "\n".join(metrics)
58+
59+ if __name__ == "__main__":
60+ benchmark_output = sys.stdin.read()
61+ metrics = parse_benchmark_output(benchmark_output)
62+ print(metrics)
63+ EOF
64+
65+ cat /tmp/artifacts/new.txt | python3 benchmark_to_prometheus.py > /tmp/artifacts/prometheus/metrics.txt
66+
67+ # - name: Compare with baseline
68+ # run: |
69+ # go install golang.org/x/perf/cmd/benchstat@latest
70+ # benchstat benchmarks/baseline.txt /tmp/artifacts/new.txt | tee /tmp/artifacts/output
71+
72+ - name : Upload Benchmark Metrics
4273 uses : actions/upload-artifact@v4
4374 with :
44- name : benchmark-artifacts
45- path : /tmp/artifacts/
75+ name : benchmark-metrics
76+ path : /tmp/artifacts/prometheus/
77+
78+ run-prometheus :
79+ needs : run-benchmark
80+ runs-on : ubuntu-latest
81+ steps :
82+ - name : Checkout code
83+ uses : actions/checkout@v4
84+ with :
85+ fetch-depth : 0
86+
87+ - name : Download Prometheus Snapshot
88+ run : |
89+ echo "Available Artifacts in this run:"
90+ gh run list --repo operator-framework/operator-controller --limit 5
91+ gh run download --repo operator-framework/operator-controller --name prometheus-snapshot --dir .
92+ ls -lh ./
93+ env :
94+ GH_TOKEN : ${{ secrets.GITHUB_TOKEN }}
95+
96+ # #this step is invalid if download the artifacts in a different job
97+ # - name: Download Prometheus Snapshot2
98+ # uses: actions/download-artifact@v4
99+ # with:
100+ # name: prometheus-snapshot
101+ # path: ./
102+ # env:
103+ # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
104+
105+ - name : Download Benchmark Metrics
106+ uses : actions/download-artifact@v4
107+ with :
108+ name : benchmark-metrics
109+ path : ./
110+
111+ - name : Set Up Prometheus Config
112+ run : |
113+ cat << 'EOF' > prometheus.yml
114+ global:
115+ scrape_interval: 5s
116+ scrape_configs:
117+ - job_name: 'benchmark_metrics'
118+ static_configs:
119+ - targets: ['localhost:9000']
120+ EOF
121+ mkdir -p ${{ github.workspace }}/prometheus-data
122+ sudo chown -R 65534:65534 ${{ github.workspace }}/prometheus-data
123+ sudo chmod -R 777 ${{ github.workspace }}/prometheus-data
124+
125+ - name : Extract and Restore Prometheus Snapshot
126+ run : |
127+ SNAPSHOT_ZIP="${{ github.workspace }}/prometheus-snapshot.zip"
128+ SNAPSHOT_TAR="${{ github.workspace }}/prometheus_snapshot.tar.gz"
129+ SNAPSHOT_DIR="${{ github.workspace }}/prometheus-data/snapshots"
130+
131+ mkdir -p "$SNAPSHOT_DIR"
132+
133+ if [[ -f "$SNAPSHOT_ZIP" ]]; then
134+ echo "📦 Detected ZIP archive: $SNAPSHOT_ZIP"
135+ unzip -o "$SNAPSHOT_ZIP" -d "$SNAPSHOT_DIR"
136+ echo "✅ Successfully extracted ZIP snapshot."
137+ elif [[ -f "$SNAPSHOT_TAR" ]]; then
138+ echo "📦 Detected TAR archive: $SNAPSHOT_TAR"
139+ tar -xzf "$SNAPSHOT_TAR" -C "$SNAPSHOT_DIR"
140+ echo "✅ Successfully extracted TAR snapshot."
141+ else
142+ echo "⚠️ WARNING: No snapshot file found. Skipping extraction."
143+ fi
144+
145+ - name : Run Prometheus
146+ run : |
147+ docker run -d --name prometheus -p 9090:9090 \
148+ -v ${{ github.workspace }}/prometheus.yml:/etc/prometheus/prometheus.yml \
149+ -v ${{ github.workspace }}/prometheus-data:/prometheus \
150+ prom/prometheus --config.file=/etc/prometheus/prometheus.yml \
151+ --storage.tsdb.path=/prometheus \
152+ --storage.tsdb.retention.time=1h \
153+ --web.enable-admin-api
154+
155+ - name : Wait for Prometheus to start
156+ run : sleep 10
157+
158+ - name : Check Prometheus is running
159+ run : curl -s http://localhost:9090/-/ready || (docker logs prometheus && exit 1)
46160
161+ - name : Start HTTP Server to Expose Metrics
162+ run : |
163+ cat << 'EOF' > server.py
164+ from http.server import SimpleHTTPRequestHandler, HTTPServer
165+
166+ class MetricsHandler(SimpleHTTPRequestHandler):
167+ def do_GET(self):
168+ if self.path == "/metrics":
169+ self.send_response(200)
170+ self.send_header("Content-type", "text/plain")
171+ self.end_headers()
172+ with open("metrics.txt", "r") as f:
173+ self.wfile.write(f.read().encode())
174+ else:
175+ self.send_response(404)
176+ self.end_headers()
177+
178+ if __name__ == "__main__":
179+ server = HTTPServer(('0.0.0.0', 9000), MetricsHandler)
180+ print("Serving on port 9000...")
181+ server.serve_forever()
182+ EOF
183+
184+ nohup python3 server.py &
185+
186+ - name : Wait for Prometheus to Collect Data
187+ run : sleep 30
188+
189+ - name : Check Benchmark Metrics Against Threshold
190+ run : |
191+ MAX_TIME_NS=1200000000 # 1.2s
192+ MAX_ALLOCS=4000
193+ MAX_MEM_BYTES=450000
194+
195+ # Query Prometheus Metrics
196+ time_ns=$(curl -s "http://localhost:9090/api/v1/query?query=benchmark_create_cluster_catalog_ns" | jq -r '.data.result[0].value[1]')
197+ allocs=$(curl -s "http://localhost:9090/api/v1/query?query=benchmark_create_cluster_catalog_allocs" | jq -r '.data.result[0].value[1]')
198+ mem_bytes=$(curl -s "http://localhost:9090/api/v1/query?query=benchmark_create_cluster_catalog_mem_bytes" | jq -r '.data.result[0].value[1]')
199+
200+ echo "⏳ Benchmark Execution Time: $time_ns ns"
201+ echo "🛠️ Memory Allocations: $allocs"
202+ echo "💾 Memory Usage: $mem_bytes bytes"
203+
204+ # threshold checking
205+ if (( $(echo "$time_ns > $MAX_TIME_NS" | bc -l) )); then
206+ echo "❌ ERROR: Execution time exceeds threshold!"
207+ exit 1
208+ fi
209+
210+ if (( $(echo "$allocs > $MAX_ALLOCS" | bc -l) )); then
211+ echo "❌ ERROR: Too many memory allocations!"
212+ exit 1
213+ fi
214+
215+ if (( $(echo "$mem_bytes > $MAX_MEM_BYTES" | bc -l) )); then
216+ echo "❌ ERROR: Memory usage exceeds threshold!"
217+ exit 1
218+ fi
219+
220+ echo "✅ All benchmarks passed within threshold!"
221+
222+ - name : Trigger Prometheus Snapshot
223+ run : |
224+ curl -X POST http://localhost:9090/api/v1/admin/tsdb/snapshot || (docker logs prometheus && exit 1)
225+
226+ - name : Find and Upload Prometheus Snapshot
227+ run : |
228+ SNAPSHOT_PATH=$(ls -td ${{ github.workspace }}/prometheus-data/snapshots/* 2>/dev/null | head -1 || echo "")
229+ if [[ -z "$SNAPSHOT_PATH" ]]; then
230+ echo "❌ No Prometheus snapshot found!"
231+ docker logs prometheus
232+ exit 1
233+ fi
234+
235+ echo "✅ Prometheus snapshot stored in: $SNAPSHOT_PATH"
236+ tar -czf $GITHUB_WORKSPACE/prometheus_snapshot.tar.gz -C "$SNAPSHOT_PATH" .
237+
238+
239+ - name : Stop Prometheus
240+ run : docker stop prometheus
241+
242+ - name : Upload Prometheus Snapshot
243+ uses : actions/upload-artifact@v4
244+ with :
245+ name : prometheus-snapshot
246+ path : prometheus_snapshot.tar.gz
247+
0 commit comments