Skip to content

Add warm with lockfile benchmark to perfcheck #162

Add warm with lockfile benchmark to perfcheck

Add warm with lockfile benchmark to perfcheck #162

Workflow file for this run

name: Benchmarks
on:
pull_request:
branches: [main]
paths-ignore:
- 'documentation/**'
- 'README.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
jobs:
benchmark:
name: Run benchmarks
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Find merge-base and prepare worktrees
id: setup
run: |
git fetch origin ${{ github.event.pull_request.base.ref }}
MERGE_BASE=$(git merge-base HEAD origin/${{ github.event.pull_request.base.ref }})
echo "merge_base=$MERGE_BASE" >> $GITHUB_OUTPUT
# Create worktrees for base and head
git worktree add ../base $MERGE_BASE
git worktree add ../head HEAD
- name: Setup Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Rust Cache
uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
with:
shared-key: benchmark
workspaces: |
../base -> target
../head -> target
- name: Install hyperfine
run: |
wget https://github.com/sharkdp/hyperfine/releases/download/v1.18.0/hyperfine_1.18.0_amd64.deb
sudo dpkg -i hyperfine_1.18.0_amd64.deb
- name: Build Yarn (base)
working-directory: ../base
run: cargo build --release -p zpm --bin yarn-bin
- name: Build Yarn (head)
working-directory: ../head
run: cargo build --release -p zpm --bin yarn-bin
- name: Start mock proxy
working-directory: ../head
run: |
./target/release/yarn-bin debug mock-proxy -p 4873 https://registry.npmjs.org &
echo "YARN_NPM_REGISTRY_SERVER=http://localhost:4873" >> $GITHUB_ENV
echo "YARN_UNSAFE_HTTP_WHITELIST=localhost" >> $GITHUB_ENV
- name: Run benchmark cold (base)
working-directory: ../base
run: ./target/release/yarn-bin debug bench gatsby install-full-cold
- name: Run benchmark cold (head)
working-directory: ../head
run: ./target/release/yarn-bin debug bench gatsby install-full-cold
- name: Run benchmark warm with lockfile (base)
working-directory: ../base
run: ./target/release/yarn-bin debug bench gatsby install-cache-and-lock
- name: Run benchmark warm with lockfile (head)
working-directory: ../head
run: ./target/release/yarn-bin debug bench gatsby install-cache-and-lock
- name: Generate benchmark comment
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const readResults = (mode) => {
const base = JSON.parse(fs.readFileSync(`../base/bench-gatsby-${mode}.json`, 'utf8'));
const head = JSON.parse(fs.readFileSync(`../head/bench-gatsby-${mode}.json`, 'utf8'));
return {base, head};
};
const formatTime = (t) => `${t.toFixed(3)}s`;
const formatDiff = (pct) => {
const num = parseFloat(pct);
if (num > 0) return `+${pct}% ⚠️`;
if (num < 0) return `${pct}% βœ…`;
return `${pct}%`;
};
const formatPct = (base, head) => (((head - base) / base) * 100).toFixed(2);
const benches = [
{mode: 'install-full-cold', label: 'gatsby install-full-cold'},
{mode: 'install-cache-and-lock', label: 'gatsby install-cache-and-lock (warm, with lockfile)'},
];
const mergedResults = {
benchmarks: benches.map(({mode, label}) => {
const {base, head} = readResults(mode);
base.results.forEach(r => {
r.command = `[base][${mode}] ${r.command.split('/').pop()}`;
});
head.results.forEach(r => {
r.command = `[head][${mode}] ${r.command.split('/').pop()}`;
});
return {
benchmark: label,
mode,
results: [...base.results, ...head.results],
};
}),
};
fs.writeFileSync('bench-merged.json', JSON.stringify(mergedResults, null, 2));
const renderSection = ({mode, label}) => {
const {base, head} = readResults(mode);
const baseStats = base.results[0];
const headStats = head.results[0];
const meanPct = formatPct(baseStats.mean, headStats.mean);
const medianPct = formatPct(baseStats.median, headStats.median);
return `### ${label}
| Metric | Base | Head | Difference |
|--------|------|------|------------|
| **Mean** | ${formatTime(baseStats.mean)} | ${formatTime(headStats.mean)} | ${formatDiff(meanPct)} |
| **Median** | ${formatTime(baseStats.median)} | ${formatTime(headStats.median)} | ${formatDiff(medianPct)} |
| **Min** | ${formatTime(baseStats.min)} | ${formatTime(headStats.min)} | |
| **Max** | ${formatTime(baseStats.max)} | ${formatTime(headStats.max)} | |
| **Std Dev** | ${formatTime(baseStats.stddev)} | ${formatTime(headStats.stddev)} | |
<details>
<summary>πŸ“Š Raw benchmark data (${label})</summary>
**Base times:** ${baseStats.times.map(t => formatTime(t)).join(', ')}
**Head times:** ${headStats.times.map(t => formatTime(t)).join(', ')}
</details>`;
};
const sections = benches.map(renderSection).join('\n\n---\n\n');
const comment = `## ⏱️ Benchmark Results
${sections}
`.split('\n').map(l => l.trim()).join('\n');
// Save comment body and PR number for the perfcheck-comment workflow
fs.mkdirSync('perfcheck-comment', { recursive: true });
fs.writeFileSync('perfcheck-comment/comment-body.md', comment);
fs.writeFileSync('perfcheck-comment/pr-number.txt', String(context.issue.number));
- name: Upload comment body artifact
uses: actions/upload-artifact@v4
with:
name: perfcheck-comment
path: perfcheck-comment/
- name: Upload merged benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: bench-merged.json