Skip to content

Commit b259832

Browse files
authored
Merge branch 'alpha' into moumouls/upgrade-appollo-server
2 parents c955ab8 + a85ba19 commit b259832

26 files changed

+1500
-91
lines changed
Lines changed: 302 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,302 @@
1+
name: ci-performance
2+
on:
3+
pull_request_target:
4+
branches:
5+
- alpha
6+
- beta
7+
- release
8+
- 'release-[0-9]+.x.x'
9+
- next-major
10+
paths-ignore:
11+
- '**.md'
12+
- 'docs/**'
13+
14+
env:
15+
NODE_VERSION: 24.11.0
16+
MONGODB_VERSION: 8.0.4
17+
18+
permissions:
19+
contents: read
20+
pull-requests: write
21+
issues: write
22+
23+
jobs:
24+
performance-check:
25+
name: Benchmarks
26+
runs-on: ubuntu-latest
27+
timeout-minutes: 30
28+
29+
steps:
30+
- name: Checkout base branch
31+
uses: actions/checkout@v4
32+
with:
33+
ref: ${{ github.base_ref }}
34+
fetch-depth: 1
35+
36+
- name: Setup Node.js
37+
uses: actions/setup-node@v4
38+
with:
39+
node-version: ${{ env.NODE_VERSION }}
40+
cache: 'npm'
41+
42+
- name: Install dependencies (base)
43+
run: npm ci
44+
45+
- name: Build Parse Server (base)
46+
run: npm run build
47+
48+
- name: Run baseline benchmarks
49+
id: baseline
50+
run: |
51+
echo "Checking if benchmark script exists..."
52+
if [ ! -f "benchmark/performance.js" ]; then
53+
echo "⚠️ Benchmark script not found in base branch - this is expected for new features"
54+
echo "Skipping baseline benchmark"
55+
echo '[]' > baseline.json
56+
echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt
57+
exit 0
58+
fi
59+
echo "Running baseline benchmarks..."
60+
npm run benchmark > baseline-output.txt 2>&1 || true
61+
echo "Benchmark command completed with exit code: $?"
62+
echo "Output file size: $(wc -c < baseline-output.txt) bytes"
63+
echo "--- Begin baseline-output.txt ---"
64+
cat baseline-output.txt
65+
echo "--- End baseline-output.txt ---"
66+
# Extract JSON from output (everything between first [ and last ])
67+
sed -n '/^\[/,/^\]/p' baseline-output.txt > baseline.json || echo '[]' > baseline.json
68+
echo "Extracted JSON size: $(wc -c < baseline.json) bytes"
69+
echo "Baseline benchmark results:"
70+
cat baseline.json
71+
continue-on-error: true
72+
73+
- name: Save baseline results to temp location
74+
run: |
75+
mkdir -p /tmp/benchmark-results
76+
cp baseline.json /tmp/benchmark-results/ || echo '[]' > /tmp/benchmark-results/baseline.json
77+
cp baseline-output.txt /tmp/benchmark-results/ || echo 'No baseline output' > /tmp/benchmark-results/baseline-output.txt
78+
79+
- name: Upload baseline results
80+
uses: actions/upload-artifact@v4
81+
with:
82+
name: baseline-benchmark
83+
path: |
84+
/tmp/benchmark-results/baseline.json
85+
/tmp/benchmark-results/baseline-output.txt
86+
retention-days: 7
87+
88+
- name: Checkout PR branch
89+
uses: actions/checkout@v4
90+
with:
91+
ref: ${{ github.event.pull_request.head.sha }}
92+
fetch-depth: 1
93+
clean: true
94+
95+
- name: Restore baseline results
96+
run: |
97+
cp /tmp/benchmark-results/baseline.json ./ || echo '[]' > baseline.json
98+
cp /tmp/benchmark-results/baseline-output.txt ./ || echo 'No baseline output' > baseline-output.txt
99+
100+
- name: Setup Node.js (PR)
101+
uses: actions/setup-node@v4
102+
with:
103+
node-version: ${{ env.NODE_VERSION }}
104+
cache: 'npm'
105+
106+
- name: Install dependencies (PR)
107+
run: npm ci
108+
109+
- name: Build Parse Server (PR)
110+
run: npm run build
111+
112+
- name: Run PR benchmarks
113+
id: pr-bench
114+
run: |
115+
echo "Running PR benchmarks..."
116+
npm run benchmark > pr-output.txt 2>&1 || true
117+
echo "Benchmark command completed with exit code: $?"
118+
echo "Output file size: $(wc -c < pr-output.txt) bytes"
119+
echo "--- Begin pr-output.txt ---"
120+
cat pr-output.txt
121+
echo "--- End pr-output.txt ---"
122+
# Extract JSON from output (everything between first [ and last ])
123+
sed -n '/^\[/,/^\]/p' pr-output.txt > pr.json || echo '[]' > pr.json
124+
echo "Extracted JSON size: $(wc -c < pr.json) bytes"
125+
echo "PR benchmark results:"
126+
cat pr.json
127+
continue-on-error: true
128+
129+
- name: Upload PR results
130+
uses: actions/upload-artifact@v4
131+
with:
132+
name: pr-benchmark
133+
path: |
134+
pr.json
135+
pr-output.txt
136+
retention-days: 7
137+
138+
- name: Verify benchmark files exist
139+
run: |
140+
echo "Checking for benchmark result files..."
141+
if [ ! -f baseline.json ] || [ ! -s baseline.json ]; then
142+
echo "⚠️ baseline.json is missing or empty, creating empty array"
143+
echo '[]' > baseline.json
144+
fi
145+
if [ ! -f pr.json ] || [ ! -s pr.json ]; then
146+
echo "⚠️ pr.json is missing or empty, creating empty array"
147+
echo '[]' > pr.json
148+
fi
149+
echo "baseline.json size: $(wc -c < baseline.json) bytes"
150+
echo "pr.json size: $(wc -c < pr.json) bytes"
151+
152+
- name: Store benchmark result (PR)
153+
uses: benchmark-action/github-action-benchmark@v1
154+
if: github.event_name == 'pull_request' && hashFiles('pr.json') != ''
155+
continue-on-error: true
156+
with:
157+
name: Parse Server Performance
158+
tool: 'customSmallerIsBetter'
159+
output-file-path: pr.json
160+
github-token: ${{ secrets.GITHUB_TOKEN }}
161+
auto-push: false
162+
save-data-file: false
163+
alert-threshold: '110%'
164+
comment-on-alert: true
165+
fail-on-alert: false
166+
alert-comment-cc-users: '@parse-community/maintainers'
167+
summary-always: true
168+
169+
- name: Compare benchmark results
170+
id: compare
171+
run: |
172+
node -e "
173+
const fs = require('fs');
174+
175+
let baseline, pr;
176+
try {
177+
baseline = JSON.parse(fs.readFileSync('baseline.json', 'utf8'));
178+
pr = JSON.parse(fs.readFileSync('pr.json', 'utf8'));
179+
} catch (e) {
180+
console.log('⚠️ Could not parse benchmark results');
181+
process.exit(0);
182+
}
183+
184+
// Handle case where baseline doesn't exist (new feature)
185+
if (!Array.isArray(baseline) || baseline.length === 0) {
186+
if (!Array.isArray(pr) || pr.length === 0) {
187+
console.log('⚠️ Benchmark results are empty or invalid');
188+
process.exit(0);
189+
}
190+
console.log('# Performance Benchmark Results\n');
191+
console.log('> ℹ️ Baseline not available - this appears to be a new feature\n');
192+
console.log('| Benchmark | Value | Details |');
193+
console.log('|-----------|-------|---------|');
194+
pr.forEach(result => {
195+
console.log(\`| \${result.name} | \${result.value.toFixed(2)} ms | \${result.extra} |\`);
196+
});
197+
console.log('');
198+
console.log('✅ **New benchmarks established for this feature.**');
199+
process.exit(0);
200+
}
201+
202+
if (!Array.isArray(pr) || pr.length === 0) {
203+
console.log('⚠️ PR benchmark results are empty or invalid');
204+
process.exit(0);
205+
}
206+
207+
console.log('# Performance Comparison\n');
208+
console.log('| Benchmark | Baseline | PR | Change | Status |');
209+
console.log('|-----------|----------|----|---------| ------ |');
210+
211+
let hasRegression = false;
212+
let hasImprovement = false;
213+
214+
baseline.forEach(baseResult => {
215+
const prResult = pr.find(p => p.name === baseResult.name);
216+
if (!prResult) {
217+
console.log(\`| \${baseResult.name} | \${baseResult.value.toFixed(2)} ms | N/A | - | ⚠️ Missing |\`);
218+
return;
219+
}
220+
221+
const baseValue = parseFloat(baseResult.value);
222+
const prValue = parseFloat(prResult.value);
223+
const change = ((prValue - baseValue) / baseValue * 100);
224+
const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
225+
226+
let status = '✅';
227+
if (change > 20) {
228+
status = '❌ Much Slower';
229+
hasRegression = true;
230+
} else if (change > 10) {
231+
status = '⚠️ Slower';
232+
hasRegression = true;
233+
} else if (change < -10) {
234+
status = '🚀 Faster';
235+
hasImprovement = true;
236+
}
237+
238+
console.log(\`| \${baseResult.name} | \${baseValue.toFixed(2)} ms | \${prValue.toFixed(2)} ms | \${changeStr} | \${status} |\`);
239+
});
240+
241+
console.log('');
242+
if (hasRegression) {
243+
console.log('⚠️ **Performance regressions detected.** Please review the changes.');
244+
} else if (hasImprovement) {
245+
console.log('🚀 **Performance improvements detected!** Great work!');
246+
} else {
247+
console.log('✅ **No significant performance changes.**');
248+
}
249+
" | tee comparison.md
250+
251+
- name: Upload comparison
252+
uses: actions/upload-artifact@v4
253+
with:
254+
name: benchmark-comparison
255+
path: comparison.md
256+
retention-days: 30
257+
258+
- name: Prepare comment body
259+
if: github.event_name == 'pull_request'
260+
run: |
261+
echo "## Performance Impact Report" > comment.md
262+
echo "" >> comment.md
263+
if [ -f comparison.md ]; then
264+
cat comparison.md >> comment.md
265+
else
266+
echo "⚠️ Could not generate performance comparison." >> comment.md
267+
fi
268+
echo "" >> comment.md
269+
echo "<details>" >> comment.md
270+
echo "<summary>📊 View detailed results</summary>" >> comment.md
271+
echo "" >> comment.md
272+
echo "### Baseline Results" >> comment.md
273+
echo "\`\`\`json" >> comment.md
274+
cat baseline.json >> comment.md
275+
echo "\`\`\`" >> comment.md
276+
echo "" >> comment.md
277+
echo "### PR Results" >> comment.md
278+
echo "\`\`\`json" >> comment.md
279+
cat pr.json >> comment.md
280+
echo "\`\`\`" >> comment.md
281+
echo "" >> comment.md
282+
echo "</details>" >> comment.md
283+
echo "" >> comment.md
284+
echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md
285+
286+
- name: Comment PR with results
287+
if: github.event_name == 'pull_request'
288+
uses: thollander/actions-comment-pull-request@v2
289+
continue-on-error: true
290+
with:
291+
filePath: comment.md
292+
comment_tag: performance-benchmark
293+
mode: recreate
294+
295+
- name: Generate job summary
296+
if: always()
297+
run: |
298+
if [ -f comparison.md ]; then
299+
cat comparison.md >> $GITHUB_STEP_SUMMARY
300+
else
301+
echo "⚠️ Benchmark comparison not available" >> $GITHUB_STEP_SUMMARY
302+
fi

CONTRIBUTING.md

Lines changed: 57 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,13 @@
2121
- [Good to Know](#good-to-know)
2222
- [Troubleshooting](#troubleshooting)
2323
- [Please Do's](#please-dos)
24-
- [TypeScript Tests](#typescript-tests)
24+
- [TypeScript Tests](#typescript-tests)
2525
- [Test against Postgres](#test-against-postgres)
2626
- [Postgres with Docker](#postgres-with-docker)
27+
- [Performance Testing](#performance-testing)
28+
- [Adding Tests](#adding-tests)
29+
- [Adding Benchmarks](#adding-benchmarks)
30+
- [Benchmark Guidelines](#benchmark-guidelines)
2731
- [Breaking Changes](#breaking-changes)
2832
- [Deprecation Policy](#deprecation-policy)
2933
- [Feature Considerations](#feature-considerations)
@@ -298,6 +302,58 @@ RUN chmod +x /docker-entrypoint-initdb.d/setup-dbs.sh
298302

299303
Note that the script above will ONLY be executed during initialization of the container with no data in the database, see the official [Postgres image](https://hub.docker.com/_/postgres) for details. If you want to use the script to run again be sure there is no data in the /var/lib/postgresql/data of the container.
300304

305+
### Performance Testing
306+
307+
Parse Server includes an automated performance benchmarking system that runs on every pull request to detect performance regressions and track improvements over time.
308+
309+
#### Adding Tests
310+
311+
You should consider adding performance benchmarks if your contribution:
312+
313+
- **Introduces a performance-critical feature**: Features that will be frequently used in production environments, such as new query operations, authentication methods, or data processing functions.
314+
- **Modifies existing critical paths**: Changes to core functionality like object CRUD operations, query execution, user authentication, file operations, or Cloud Code execution.
315+
- **Has potential performance impact**: Any change that affects database operations, network requests, data parsing, caching mechanisms, or algorithmic complexity.
316+
- **Optimizes performance**: If your PR specifically aims to improve performance, adding benchmarks helps verify the improvement and prevents future regressions.
317+
318+
#### Adding Benchmarks
319+
320+
Performance benchmarks are located in [`benchmark/performance.js`](benchmark/performance.js). To add a new benchmark:
321+
322+
1. **Identify the operation to benchmark**: Determine the specific operation you want to measure (e.g., a new query type, a new API endpoint).
323+
324+
2. **Create a benchmark function**: Follow the existing patterns in `benchmark/performance.js`:
325+
```javascript
326+
async function benchmarkNewFeature() {
327+
return measureOperation('Feature Name', async () => {
328+
// Your operation to benchmark
329+
const result = await someOperation();
330+
}, ITERATIONS);
331+
}
332+
```
333+
334+
3. **Add to benchmark suite**: Register your benchmark in the `runBenchmarks()` function:
335+
```javascript
336+
console.error('Running New Feature benchmark...');
337+
await cleanupDatabase();
338+
results.push(await benchmarkNewFeature());
339+
```
340+
341+
4. **Test locally**: Run the benchmarks locally to verify they work:
342+
```bash
343+
npm run benchmark:quick # Quick test with 10 iterations
344+
npm run benchmark # Full test with 100 iterations
345+
```
346+
347+
For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
348+
349+
#### Benchmark Guidelines
350+
351+
- **Keep benchmarks focused**: Each benchmark should test a single, well-defined operation.
352+
- **Use realistic data**: Test with data that reflects real-world usage patterns.
353+
- **Clean up between runs**: Use `cleanupDatabase()` to ensure consistent test conditions.
354+
- **Consider iteration count**: Use fewer iterations for expensive operations (see `ITERATIONS` environment variable).
355+
- **Document what you're testing**: Add clear comments explaining what the benchmark measures and why it's important.
356+
301357
## Breaking Changes
302358

303359
Breaking changes should be avoided whenever possible. For a breaking change to be accepted, the benefits of the change have to clearly outweigh the costs of developers having to adapt their deployments. If a breaking change is only cosmetic it will likely be rejected and preferred to become obsolete organically during the course of further development, unless it is required as part of a larger change. Breaking changes should follow the [Deprecation Policy](#deprecation-policy).

0 commit comments

Comments
 (0)