Skip to content

Commit bd0deeb

Browse files
committed
best practices
Signed-off-by: Sertac Ozercan <[email protected]>
1 parent 63673b5 commit bd0deeb

File tree

4 files changed

+392
-5
lines changed

4 files changed

+392
-5
lines changed

cmd/gator/bench/bench.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ func init() {
101101
"a URL to an OCI image containing policies. Can be specified multiple times.")
102102
Cmd.Flags().StringVarP(&flagTempDir, flagNameTempDir, "d", "",
103103
"temporary directory to download and unpack images to.")
104-
Cmd.Flags().StringVarP(&flagEngine, flagNameEngine, "e", "rego",
104+
Cmd.Flags().StringVarP(&flagEngine, flagNameEngine, "e", string(bench.EngineRego),
105105
fmt.Sprintf("policy engine to benchmark. One of: %s|%s|%s", bench.EngineRego, bench.EngineCEL, bench.EngineAll))
106106
Cmd.Flags().IntVarP(&flagIterations, flagNameIterations, "n", 1000,
107107
"number of benchmark iterations to run. Use at least 1000 for meaningful P99 metrics.")
@@ -228,13 +228,13 @@ func run(_ *cobra.Command, _ []string) {
228228

229229
func parseEngine(s string) (bench.Engine, error) {
230230
switch strings.ToLower(s) {
231-
case "rego":
231+
case string(bench.EngineRego):
232232
return bench.EngineRego, nil
233-
case "cel":
233+
case string(bench.EngineCEL):
234234
return bench.EngineCEL, nil
235-
case "all":
235+
case string(bench.EngineAll):
236236
return bench.EngineAll, nil
237237
default:
238-
return "", fmt.Errorf("invalid engine %q (valid: rego, cel, all)", s)
238+
return "", fmt.Errorf("invalid engine %q (valid: %s, %s, %s)", s, bench.EngineRego, bench.EngineCEL, bench.EngineAll)
239239
}
240240
}
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
#!/bin/bash
2+
# Analysis script for gator bench data
3+
4+
OUTPUT_DIR="/tmp/gator-bench-data"
5+
6+
if [ ! -d "$OUTPUT_DIR" ]; then
7+
echo "Error: No data found. Run gather-data.sh first."
8+
exit 1
9+
fi
10+
11+
echo "=== Gator Bench Data Analysis ==="
12+
echo ""
13+
14+
###############################################################################
15+
# Test 1: CEL vs Rego Comparison
16+
###############################################################################
17+
echo "=== Test 1: CEL vs Rego Comparison ==="
18+
echo ""
19+
20+
if [ -f "$OUTPUT_DIR/test1_rego.json" ] && [ -f "$OUTPUT_DIR/test1_cel.json" ]; then
21+
REGO_THROUGHPUT=$(jq -r '.[0].reviewsPerSecond' "$OUTPUT_DIR/test1_rego.json")
22+
CEL_THROUGHPUT=$(jq -r '.[0].reviewsPerSecond' "$OUTPUT_DIR/test1_cel.json")
23+
24+
REGO_MEAN=$(jq -r '.[0].latencies.mean' "$OUTPUT_DIR/test1_rego.json")
25+
CEL_MEAN=$(jq -r '.[0].latencies.mean' "$OUTPUT_DIR/test1_cel.json")
26+
27+
REGO_P99=$(jq -r '.[0].latencies.p99' "$OUTPUT_DIR/test1_rego.json")
28+
CEL_P99=$(jq -r '.[0].latencies.p99' "$OUTPUT_DIR/test1_cel.json")
29+
30+
REGO_SETUP=$(jq -r '.[0].setupDuration' "$OUTPUT_DIR/test1_rego.json")
31+
CEL_SETUP=$(jq -r '.[0].setupDuration' "$OUTPUT_DIR/test1_cel.json")
32+
33+
echo "Metric Rego CEL Ratio (CEL/Rego)"
34+
echo "------ ---- --- ----------------"
35+
printf "Throughput %-17.2f %-17.2f %.2fx\n" "$REGO_THROUGHPUT" "$CEL_THROUGHPUT" "$(echo "scale=2; $CEL_THROUGHPUT / $REGO_THROUGHPUT" | bc)"
36+
printf "Mean Latency (ns) %-17.0f %-17.0f %.2fx\n" "$REGO_MEAN" "$CEL_MEAN" "$(echo "scale=2; $REGO_MEAN / $CEL_MEAN" | bc)"
37+
printf "P99 Latency (ns) %-17.0f %-17.0f %.2fx\n" "$REGO_P99" "$CEL_P99" "$(echo "scale=2; $REGO_P99 / $CEL_P99" | bc)"
38+
printf "Setup Time (ns) %-17.0f %-17.0f %.2fx\n" "$REGO_SETUP" "$CEL_SETUP" "$(echo "scale=2; $REGO_SETUP / $CEL_SETUP" | bc)"
39+
echo ""
40+
fi
41+
42+
###############################################################################
43+
# Test 2: Concurrency Scaling
44+
###############################################################################
45+
echo "=== Test 2: Concurrency Scaling ==="
46+
echo ""
47+
48+
echo "Concurrency Throughput P99 Latency Efficiency"
49+
echo "----------- ---------- ----------- ----------"
50+
51+
BASELINE_THROUGHPUT=""
52+
for CONC in 1 2 4 8 16; do
53+
FILE="$OUTPUT_DIR/test2_conc_${CONC}.json"
54+
if [ -f "$FILE" ]; then
55+
THROUGHPUT=$(jq -r '.[0].reviewsPerSecond' "$FILE")
56+
P99=$(jq -r '.[0].latencies.p99' "$FILE")
57+
58+
if [ -z "$BASELINE_THROUGHPUT" ]; then
59+
BASELINE_THROUGHPUT=$THROUGHPUT
60+
EFFICIENCY="100%"
61+
else
62+
# Expected linear scaling
63+
EXPECTED=$(echo "scale=2; $BASELINE_THROUGHPUT * $CONC" | bc)
64+
EFF=$(echo "scale=0; ($THROUGHPUT / $EXPECTED) * 100" | bc)
65+
EFFICIENCY="${EFF}%"
66+
fi
67+
68+
P99_MS=$(echo "scale=3; $P99 / 1000000" | bc)
69+
printf "%-12d %-14.2f %-14.3fms %s\n" "$CONC" "$THROUGHPUT" "$P99_MS" "$EFFICIENCY"
70+
fi
71+
done
72+
echo ""
73+
74+
###############################################################################
75+
# Test 3: P99 Stability
76+
###############################################################################
77+
echo "=== Test 3: P99 Stability vs Iteration Count ==="
78+
echo ""
79+
80+
echo "Iterations P50 (µs) P95 (µs) P99 (µs) Mean (µs)"
81+
echo "---------- -------- -------- -------- ---------"
82+
83+
for ITER in 50 100 500 1000 5000; do
84+
FILE="$OUTPUT_DIR/test3_iter_${ITER}.json"
85+
if [ -f "$FILE" ]; then
86+
P50=$(jq -r '.[0].latencies.p50' "$FILE")
87+
P95=$(jq -r '.[0].latencies.p95' "$FILE")
88+
P99=$(jq -r '.[0].latencies.p99' "$FILE")
89+
MEAN=$(jq -r '.[0].latencies.mean' "$FILE")
90+
91+
P50_US=$(echo "scale=2; $P50 / 1000" | bc)
92+
P95_US=$(echo "scale=2; $P95 / 1000" | bc)
93+
P99_US=$(echo "scale=2; $P99 / 1000" | bc)
94+
MEAN_US=$(echo "scale=2; $MEAN / 1000" | bc)
95+
96+
printf "%-12d %-11.2f %-11.2f %-11.2f %.2f\n" "$ITER" "$P50_US" "$P95_US" "$P99_US" "$MEAN_US"
97+
fi
98+
done
99+
echo ""
100+
101+
###############################################################################
102+
# Test 4: Memory Comparison
103+
###############################################################################
104+
echo "=== Test 4: Memory Profiling ==="
105+
echo ""
106+
107+
if [ -f "$OUTPUT_DIR/test4_rego_memory.json" ] && [ -f "$OUTPUT_DIR/test4_cel_memory.json" ]; then
108+
REGO_ALLOCS=$(jq -r '.[0].memoryStats.allocsPerReview // "N/A"' "$OUTPUT_DIR/test4_rego_memory.json")
109+
CEL_ALLOCS=$(jq -r '.[0].memoryStats.allocsPerReview // "N/A"' "$OUTPUT_DIR/test4_cel_memory.json")
110+
111+
REGO_BYTES=$(jq -r '.[0].memoryStats.bytesPerReview // "N/A"' "$OUTPUT_DIR/test4_rego_memory.json")
112+
CEL_BYTES=$(jq -r '.[0].memoryStats.bytesPerReview // "N/A"' "$OUTPUT_DIR/test4_cel_memory.json")
113+
114+
echo "Metric Rego CEL"
115+
echo "------ ---- ---"
116+
printf "Allocs/Review %-17s %s\n" "$REGO_ALLOCS" "$CEL_ALLOCS"
117+
printf "Bytes/Review %-17s %s\n" "$REGO_BYTES" "$CEL_BYTES"
118+
echo ""
119+
fi
120+
121+
###############################################################################
122+
# Test 5: Warmup Impact
123+
###############################################################################
124+
echo "=== Test 5: Warmup Impact ==="
125+
echo ""
126+
127+
echo "Warmup Mean (µs) P99 (µs)"
128+
echo "------ --------- --------"
129+
130+
for WARMUP in 0 5 10 50 100; do
131+
FILE="$OUTPUT_DIR/test5_warmup_${WARMUP}.json"
132+
if [ -f "$FILE" ]; then
133+
MEAN=$(jq -r '.[0].latencies.mean' "$FILE")
134+
P99=$(jq -r '.[0].latencies.p99' "$FILE")
135+
136+
MEAN_US=$(echo "scale=2; $MEAN / 1000" | bc)
137+
P99_US=$(echo "scale=2; $P99 / 1000" | bc)
138+
139+
printf "%-12d %-11.2f %.2f\n" "$WARMUP" "$MEAN_US" "$P99_US"
140+
fi
141+
done
142+
echo ""
143+
144+
###############################################################################
145+
# Test 6: Variance Analysis
146+
###############################################################################
147+
echo "=== Test 6: Variance Analysis ==="
148+
echo ""
149+
150+
echo "Run Throughput Mean (µs) P99 (µs)"
151+
echo "--- ---------- --------- --------"
152+
153+
SUM_THROUGHPUT=0
154+
SUM_MEAN=0
155+
SUM_P99=0
156+
COUNT=0
157+
158+
for RUN in 1 2 3 4 5; do
159+
FILE="$OUTPUT_DIR/test6_run_${RUN}.json"
160+
if [ -f "$FILE" ]; then
161+
THROUGHPUT=$(jq -r '.[0].reviewsPerSecond' "$FILE")
162+
MEAN=$(jq -r '.[0].latencies.mean' "$FILE")
163+
P99=$(jq -r '.[0].latencies.p99' "$FILE")
164+
165+
MEAN_US=$(echo "scale=2; $MEAN / 1000" | bc)
166+
P99_US=$(echo "scale=2; $P99 / 1000" | bc)
167+
168+
printf "%-5d %-14.2f %-12.2f %.2f\n" "$RUN" "$THROUGHPUT" "$MEAN_US" "$P99_US"
169+
170+
SUM_THROUGHPUT=$(echo "$SUM_THROUGHPUT + $THROUGHPUT" | bc)
171+
SUM_MEAN=$(echo "$SUM_MEAN + $MEAN_US" | bc)
172+
SUM_P99=$(echo "$SUM_P99 + $P99_US" | bc)
173+
COUNT=$((COUNT + 1))
174+
fi
175+
done
176+
177+
if [ $COUNT -gt 0 ]; then
178+
AVG_THROUGHPUT=$(echo "scale=2; $SUM_THROUGHPUT / $COUNT" | bc)
179+
AVG_MEAN=$(echo "scale=2; $SUM_MEAN / $COUNT" | bc)
180+
AVG_P99=$(echo "scale=2; $SUM_P99 / $COUNT" | bc)
181+
182+
echo "--- ---------- --------- --------"
183+
printf "AVG %-14.2f %-12.2f %.2f\n" "$AVG_THROUGHPUT" "$AVG_MEAN" "$AVG_P99"
184+
fi
185+
echo ""
186+
187+
echo "=== Analysis Complete ==="
Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
#!/bin/bash
2+
# Performance data gathering script for gator bench
3+
# This script collects data to understand performance characteristics
4+
5+
set -e
6+
7+
GATOR="./bin/gator"
8+
OUTPUT_DIR="/tmp/gator-bench-data"
9+
ITERATIONS=1000
10+
11+
mkdir -p "$OUTPUT_DIR"
12+
13+
echo "=== Gator Bench Data Collection ==="
14+
echo "Output directory: $OUTPUT_DIR"
15+
echo "Iterations per test: $ITERATIONS"
16+
echo ""
17+
18+
# Build gator first
19+
echo "Building gator..."
20+
make gator > /dev/null 2>&1
21+
echo "Done."
22+
echo ""
23+
24+
###############################################################################
25+
# Test 1: CEL vs Rego - Same Policy (K8sAllowedRepos supports both)
26+
###############################################################################
27+
echo "=== Test 1: CEL vs Rego Comparison ==="
28+
29+
echo "Running Rego engine..."
30+
$GATOR bench \
31+
--filename test/gator/bench/both/ \
32+
--engine rego \
33+
--iterations $ITERATIONS \
34+
--output json > "$OUTPUT_DIR/test1_rego.json"
35+
36+
echo "Running CEL engine..."
37+
$GATOR bench \
38+
--filename test/gator/bench/both/ \
39+
--engine cel \
40+
--iterations $ITERATIONS \
41+
--output json > "$OUTPUT_DIR/test1_cel.json"
42+
43+
echo "Results saved to test1_rego.json and test1_cel.json"
44+
echo ""
45+
46+
###############################################################################
47+
# Test 2: Concurrency Scaling
48+
###############################################################################
49+
echo "=== Test 2: Concurrency Scaling ==="
50+
51+
for CONC in 1 2 4 8 16; do
52+
echo "Running with concurrency=$CONC..."
53+
$GATOR bench \
54+
--filename test/gator/bench/basic/ \
55+
--iterations $ITERATIONS \
56+
--concurrency $CONC \
57+
--output json > "$OUTPUT_DIR/test2_conc_${CONC}.json"
58+
done
59+
60+
echo "Results saved to test2_conc_*.json"
61+
echo ""
62+
63+
###############################################################################
64+
# Test 3: Iteration Count Impact on P99 Stability
65+
###############################################################################
66+
echo "=== Test 3: P99 Stability vs Iteration Count ==="
67+
68+
for ITER in 50 100 500 1000 5000; do
69+
echo "Running with iterations=$ITER..."
70+
$GATOR bench \
71+
--filename test/gator/bench/basic/ \
72+
--iterations $ITER \
73+
--output json > "$OUTPUT_DIR/test3_iter_${ITER}.json"
74+
done
75+
76+
echo "Results saved to test3_iter_*.json"
77+
echo ""
78+
79+
###############################################################################
80+
# Test 4: Memory Profiling Comparison
81+
###############################################################################
82+
echo "=== Test 4: Memory Profiling ==="
83+
84+
echo "Running Rego with memory profiling..."
85+
$GATOR bench \
86+
--filename test/gator/bench/both/ \
87+
--engine rego \
88+
--iterations $ITERATIONS \
89+
--memory \
90+
--output json > "$OUTPUT_DIR/test4_rego_memory.json"
91+
92+
echo "Running CEL with memory profiling..."
93+
$GATOR bench \
94+
--filename test/gator/bench/both/ \
95+
--engine cel \
96+
--iterations $ITERATIONS \
97+
--memory \
98+
--output json > "$OUTPUT_DIR/test4_cel_memory.json"
99+
100+
echo "Results saved to test4_*_memory.json"
101+
echo ""
102+
103+
###############################################################################
104+
# Test 5: Warmup Impact
105+
###############################################################################
106+
echo "=== Test 5: Warmup Impact ==="
107+
108+
for WARMUP in 0 5 10 50 100; do
109+
echo "Running with warmup=$WARMUP..."
110+
$GATOR bench \
111+
--filename test/gator/bench/basic/ \
112+
--iterations 500 \
113+
--warmup $WARMUP \
114+
--output json > "$OUTPUT_DIR/test5_warmup_${WARMUP}.json"
115+
done
116+
117+
echo "Results saved to test5_warmup_*.json"
118+
echo ""
119+
120+
###############################################################################
121+
# Test 6: Multiple Runs for Variance Analysis
122+
###############################################################################
123+
echo "=== Test 6: Variance Analysis (5 runs) ==="
124+
125+
for RUN in 1 2 3 4 5; do
126+
echo "Run $RUN/5..."
127+
$GATOR bench \
128+
--filename test/gator/bench/basic/ \
129+
--iterations $ITERATIONS \
130+
--output json > "$OUTPUT_DIR/test6_run_${RUN}.json"
131+
done
132+
133+
echo "Results saved to test6_run_*.json"
134+
echo ""
135+
136+
###############################################################################
137+
# Summary
138+
###############################################################################
139+
echo "=== Data Collection Complete ==="
140+
echo ""
141+
echo "All data saved to: $OUTPUT_DIR"
142+
echo ""
143+
echo "To analyze, run: ./test/gator/bench/analyze-data.sh"

0 commit comments

Comments
 (0)