-
-
Notifications
You must be signed in to change notification settings - Fork 0
189 lines (159 loc) · 6.41 KB
/
performance.yml
File metadata and controls
189 lines (159 loc) · 6.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
name: Performance Tests
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
# Only run on release-plz PRs to document performance
types: [ opened, synchronize ]
workflow_dispatch:
permissions:
contents: read
issues: write
pull-requests: write
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
# Performance thresholds
MAX_LATENCY_MS: 5
MAX_REGRESSION_PERCENT: 10
jobs:
performance:
name: Performance Benchmarks
runs-on: ubuntu-latest
# Run on pushes to main or on release-plz PRs only
if: |
github.event_name == 'push' ||
(github.event_name == 'pull_request' &&
(startsWith(github.head_ref, 'release-plz-') ||
github.event.pull_request.user.login == 'github-actions[bot]'))
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Need history for baseline comparison
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache dependencies
uses: Swatinem/rust-cache@v2
with:
key: ${{ runner.os }}-cargo-bench-${{ hashFiles('**/Cargo.lock') }}
- name: Install cargo-criterion
run: |
cargo install cargo-criterion --locked || true
cargo install critcmp --locked || true
- name: Run benchmark validation tests
run: cargo test --test benchmark_validation
- name: Get baseline benchmark results
if: github.event_name == 'pull_request'
run: |
# Checkout base branch for comparison
git fetch origin ${{ github.base_ref }}
git checkout origin/${{ github.base_ref }}
# Run benchmarks on base branch
cargo bench --bench proxy_performance -- --save-baseline base --sample-size 50
# Return to PR branch
git checkout -
- name: Run performance benchmarks
run: |
cargo bench --bench proxy_performance -- --save-baseline current --sample-size 50
- name: Compare benchmark results
if: github.event_name == 'pull_request'
id: benchmark_comparison
run: |
# Compare results
critcmp base current > comparison.txt || true
# Check for regressions - look for increases above threshold
# critcmp outputs format: "+12.34%" for regressions
REGRESSION_FOUND=false
# Extract percentage increases and check against threshold
while IFS= read -r line; do
if echo "$line" | grep -E '\+[0-9]+\.[0-9]+%' > /dev/null; then
# Extract the percentage value
PERCENT=$(echo "$line" | grep -oE '\+[0-9]+\.[0-9]+' | tr -d '+')
# Check if it exceeds our threshold using awk for float comparison
if awk -v p="$PERCENT" -v t="$MAX_REGRESSION_PERCENT" 'BEGIN { exit !(p >= t) }'; then
echo "Regression found: +${PERCENT}% exceeds threshold of ${MAX_REGRESSION_PERCENT}%"
REGRESSION_FOUND=true
fi
fi
done < comparison.txt
if [ "$REGRESSION_FOUND" = true ]; then
echo "Performance regression detected!"
echo "REGRESSION_DETECTED=true" >> $GITHUB_OUTPUT
else
echo "No significant regression detected"
echo "REGRESSION_DETECTED=false" >> $GITHUB_OUTPUT
fi
# Save comparison for PR comment
echo '```' > benchmark_report.md
cat comparison.txt >> benchmark_report.md
echo '```' >> benchmark_report.md
- name: Run memory profiling
run: |
cargo bench --bench memory_profiling > memory_profile.txt 2>&1 || true
- name: Post benchmark results to PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
let comment = '## 📊 Performance Benchmark Results\n\n';
// Add comparison results if available
try {
const comparison = fs.readFileSync('benchmark_report.md', 'utf8');
comment += '### Benchmark Comparison (base vs current)\n';
comment += comparison + '\n\n';
} catch (e) {
comment += '### Benchmark Results\n';
comment += 'No baseline comparison available.\n\n';
}
// Add memory profile summary
try {
const memProfile = fs.readFileSync('memory_profile.txt', 'utf8');
const lines = memProfile.split('\n').slice(-20).join('\n');
comment += '### Memory Profile Summary\n';
comment += '```\n' + lines + '\n```\n\n';
} catch (e) {
console.log('No memory profile available');
}
// Check for regression
const regressionDetected = '${{ steps.benchmark_comparison.outputs.REGRESSION_DETECTED }}' === 'true';
if (regressionDetected) {
comment += '⚠️ **Performance regression detected!** Please review the benchmark comparison above.\n';
comment += 'Regressions exceeding ' + process.env.MAX_REGRESSION_PERCENT + '% require justification.\n';
} else {
comment += '✅ **No significant performance regressions detected.**\n';
}
comment += '\n<sub>All latency requirements (<' + process.env.MAX_LATENCY_MS + 'ms) are enforced by the test suite.</sub>';
// Find existing comment or create new one
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('Performance Benchmark Results')
);
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: comment,
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: comment,
});
}
- name: Fail if regression detected
if: steps.benchmark_comparison.outputs.REGRESSION_DETECTED == 'true'
run: |
echo "Performance regression detected! See PR comment for details."
exit 1