Skip to content

Ceno Benchmark v2

Ceno Benchmark v2 #23

name: "Ceno Benchmark v2"
on:
workflow_dispatch:
inputs:
ceno_version:
description: "Ceno version (commit sha) to benchmark"
required: true
type: string
block_number:
description: "Block number to generate input for"
required: true
type: string
default: "23100006"
rerun_keygen:
description: "Rerun keygen"
required: false
type: boolean
recompile_reth:
description: "Recompile reth program"
required: false
type: boolean
reth_version:
description: "The git commit or branch of Reth program to compile from"
required: false
type: string
default: ""
cleanup:
description: "Cleanup after benchmark"
required: false
type: boolean
default: "true"
jobs:
benchmark:
runs-on: [self-hosted, x64, linux, gpu]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: nightly-2025-08-18 # keep it same with ceno repo
components: rust-src
cache: false
- name: Give Github Action access to ceno-gpu
uses: webfactory/[email protected]
with:
ssh-private-key: |
${{ secrets.SECRET_FOR_CENO_GPU }}
- name: Run benchmark locally
run: |
block_number="${{ inputs.block_number }}"
# Install Ceno CLI
echo "Installing Ceno CLI..."
cargo install --git https://github.com/scroll-tech/ceno.git \
--rev ${{ inputs.ceno_version }} \
--features jemalloc \
--features nightly-features \
--locked \
--force \
cargo-ceno
RPC_1=${{ secrets.RPC_URL_1 }}
# Build client binary
cd bin/ceno-client-eth
cargo ceno build --release
cd ../..
# Create necessary directories
mkdir -p output
mkdir -p rpc-cache
# LOG_NAME with Block number and Timestamp(UTC+8)
TIMESTAMP=$(date -u -d '+8 hours' +'%Y%m%d-%H%M%S')
LOG_NAME="mainnet${block_number}-${TIMESTAMP}"
LOG_FILE="${LOG_NAME}.log"
echo "LOG_NAME=${LOG_NAME}" >> $GITHUB_ENV
echo "LOG_FILE=${LOG_FILE}" >> $GITHUB_ENV
echo "Generating e2e proof for block $block_number..."
export JEMALLOC_SYS_WITH_MALLOC_CONF="retain:true,background_thread:true,metadata_thp:always,thp:always,dirty_decay_ms:10000,muzzy_decay_ms:10000,abort_conf:true"
CENO_GPU_CACHE_LEVEL=0 RUSTFLAGS="-C target-feature=+avx2" \
RUST_LOG=info,openvm_stark_*=warn,openvm_cuda_common=warn \
cargo run --features "jemalloc,gpu" --config net.git-fetch-with-cli=true \
--release --bin ceno-reth-benchmark-bin -- \
--mode prove-stark \
--block-number $block_number \
--rpc-url ${{ secrets.RPC_URL_1 }} \
--output-dir output \
--cache-dir rpc-cache \
2>&1 | tee "$LOG_FILE"
echo "e2e proof generated successfully"
- name: Profile trace from log file
run: |
echo "Profiling log file: ${{ env.LOG_FILE }}"
python3 ci/trace_profiler.py "${{ env.LOG_FILE }}"
- name: Prepare Result Directory
run: |
# Create a temp directory outside the workspace to survive checkout
TEMP_DIR="/tmp/benchmark_results"
rm -rf "$TEMP_DIR"
mkdir -p "$TEMP_DIR"
# Copy files to temp directory
cp "${{ env.LOG_NAME }}_breakdown_chip.md" "$TEMP_DIR/"
cp "${{ env.LOG_NAME }}_breakdown_module.md" "$TEMP_DIR/"
cp "${{ env.LOG_NAME }}_summary.md" "$TEMP_DIR/"
echo "TEMP_DIR=$TEMP_DIR" >> $GITHUB_ENV
### Update gh-pages
- uses: actions/checkout@v4
with:
ref: gh-pages
- name: Set up git
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Commit to gh-pages branch
run: |
GH_PAGES_PATH="benchmarks-dispatch/${{ github.head_ref || github.ref }}"
echo "GH_PAGES_PATH=${GH_PAGES_PATH}" >> $GITHUB_ENV
mkdir -p "${GH_PAGES_PATH}"
# Copy files from temp directory back to workspace
cp -r ${{ env.TEMP_DIR }}/* "${GH_PAGES_PATH}/"
git add "${GH_PAGES_PATH}/"
git commit --allow-empty -m "Add benchmark results for ${{ env.LOG_NAME }} at ${GH_PAGES_PATH}"
# Push with retry logic
MAX_RETRIES=10
RETRY_DELAY=5
ATTEMPT=0
SUCCESS=false
while [ $ATTEMPT -lt $MAX_RETRIES ]; do
echo "Attempt $((ATTEMPT + 1)) to push of $MAX_RETRIES..."
git fetch origin gh-pages
git merge origin/gh-pages --no-edit
if git push origin gh-pages; then
SUCCESS=true
break
else
echo "Push failed. Retrying in $RETRY_DELAY seconds..."
sleep $RETRY_DELAY
ATTEMPT=$((ATTEMPT + 1))
fi
done
if [ "$SUCCESS" = false ]; then
echo "Push failed after $MAX_RETRIES attempts"
exit 1
fi
- name: Update summary with results
run: |
RESULT_URL="https://github.com/${{ github.repository }}/blob/gh-pages/${{ env.GH_PAGES_PATH }}/${{ env.LOG_NAME }}_summary.md"
echo "### Results" >> $GITHUB_STEP_SUMMARY
echo "[${{ env.LOG_NAME }}](${RESULT_URL})" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
cat "${{ env.GH_PAGES_PATH }}/${{ env.LOG_NAME }}_summary.md" >> $GITHUB_STEP_SUMMARY
- name: Cleanup Temp Directory
if: always()
run: |
if [ -d "/tmp/benchmark_results" ]; then
echo "Cleaning up /tmp/benchmark_results..."
rm -rf "/tmp/benchmark_results"
fi
- name: Path to result
run: |
echo "https://github.com/${{ github.repository }}/blob/gh-pages/${{ env.GH_PAGES_PATH }}/${{ env.LOG_NAME }}_summary.md"
# fi
#
# # Verify input file exists
# if [ ! -f "input.json" ]; then
# echo "Error: input.json file not found!"
# exit 1
# fi
#
# echo "Input file ready: $(ls -lh input.json)"
#
# - name: Prepare benchmark
# id: prepare
# run: |
# # Build JSON payload using jq for proper JSON construction
# json_payload=$(jq -n \
# --arg openvm_commit "${{ inputs.openvm_version }}" \
# '{openvm_commit: $openvm_commit}')
#
# if [ "${{ inputs.rerun_keygen }}" = "true" ]; then
# json_payload=$(echo "$json_payload" | jq '. + {rekeygen: true}')
# fi
#
# if [ "${{ inputs.recompile_reth }}" = "true" ]; then
# json_payload=$(echo "$json_payload" | jq '. + {recompile: true}')
# fi
#
# echo "Final JSON payload: $json_payload"
#
# response=$(curl -X POST \
# -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# -H "Content-Type: application/json" \
# -d "$json_payload" \
# https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs)
# echo "Response: $response"
# benchmark_id=$(echo "$response" | jq -r '.id')
# echo "benchmark_id=$benchmark_id" >> $GITHUB_OUTPUT
#
# - name: Wait for benchmark preparation
# run: |
# benchmark_id="${{ steps.prepare.outputs.benchmark_id }}"
# echo "Waiting for benchmark $benchmark_id to be ready..."
#
# max_iterations=80 # 40min
# iteration=0
#
# while [ $iteration -lt $max_iterations ]; do
# response=$(curl -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs/$benchmark_id)
# echo "Response: $response"
#
# status=$(echo "$response" | jq -r '.status')
# echo "Status: $status (iteration $((iteration + 1))/$max_iterations)"
#
# if [ "$status" = "ready" ]; then
# echo "Benchmark is ready!"
# break
# fi
#
# if [ "$status" = "failed" ]; then
# echo "Benchmark failed!"
# exit 1
# fi
#
# iteration=$((iteration + 1))
#
# if [ $iteration -lt $max_iterations ]; then
# echo "Waiting 30 seconds before next check..."
# sleep 30
# fi
# done
#
# if [ $iteration -eq $max_iterations ]; then
# echo "Timeout: Benchmark preparation did not complete within 10 minutes (20 iterations)"
# exit 1
# fi
#
# - name: prove
# id: prove
# run: |
# benchmark_id="${{ steps.prepare.outputs.benchmark_id }}"
# echo "Getting program_uuid for benchmark $benchmark_id..."
#
# sleep 180 # wait 3 min to make sure the prove service is ready
#
# response=$(curl -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs/$benchmark_id)
# echo "Response: $response"
#
# program_uuid=$(echo "$response" | jq -r '.program_uuid')
# echo "Program UUID: $program_uuid"
#
# echo "Submitting proof with JSON data..."
# response=$(curl -X POST \
# -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# -H "Content-Type: application/json" \
# -d @input.json \
# "https://api.staging.app.axiom.xyz/v1/proofs?program_id=$program_uuid")
# echo "Response: $response"
# proof_id=$(echo "$response" | jq -r '.id')
# echo "proof_id=$proof_id" >> $GITHUB_OUTPUT
#
# - name: Wait for proof
# run: |
# proof_id="${{ steps.prove.outputs.proof_id }}"
# echo "Waiting for proof $proof_id to complete..."
#
# max_iterations=20 # 10min
# iteration=0
#
# while [ $iteration -lt $max_iterations ]; do
# response=$(curl -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# https://api.staging.app.axiom.xyz/v1/proofs/$proof_id)
# echo "Response: $response"
#
# status=$(echo "$response" | jq -r '.state')
# echo "Status: $status (iteration $((iteration + 1))/$max_iterations)"
#
# if [ "$status" = "Succeeded" ] || [ "$status" = "Failed" ]; then
# echo "Proof completed with status: $status"
# break
# fi
#
# iteration=$((iteration + 1))
#
# if [ $iteration -lt $max_iterations ]; then
# echo "Waiting 30 seconds before next check..."
# sleep 30
# fi
# done
#
# if [ $iteration -eq $max_iterations ]; then
# echo "Timeout: Proof did not complete within 10 minutes (20 iterations)"
# echo "WORKFLOW_FAILED=true" >> $GITHUB_ENV
# fi
#
# - name: Cleanup
# if: ${{ inputs.cleanup == true }}
# run: |
# benchmark_id="${{ steps.prepare.outputs.benchmark_id }}"
# echo "Deleting benchmark $benchmark_id..."
# response=$(curl -X DELETE \
# -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# "https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs/$benchmark_id")
# echo "Response: $response"
#
# - name: Download and display metrics
# run: |
# if [ "$WORKFLOW_FAILED" = "true" ]; then
# echo "skipping metrics download"
# else
# proof_id="${{ steps.prove.outputs.proof_id }}"
# echo "Downloading metrics for proof $proof_id..."
#
# max_iterations=10 # 5 minutes total
# iteration=0
#
# while [ $iteration -lt $max_iterations ]; do
# echo "Attempting to download metrics (attempt $((iteration + 1))/$max_iterations)..."
#
# response_code=$(curl -w "%{http_code}" -s -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \
# "https://api.staging.app.axiom.xyz/v1/internal/benchmark_metrics/$proof_id" \
# -o metrics.md)
#
# echo "HTTP response code: $response_code"
#
# if [ "$response_code" = "200" ]; then
# echo "Metrics downloaded successfully!"
# break
# elif [ "$response_code" = "404" ]; then
# echo "Metrics not ready yet (404), waiting 30 seconds before retry..."
# rm -f metrics.md # Clean up partial file
# iteration=$((iteration + 1))
#
# if [ $iteration -lt $max_iterations ]; then
# sleep 30
# fi
# else
# echo "Unexpected response code: $response_code"
# rm -f metrics.md # Clean up partial file
# echo "METRICS_FAILED=true" >> $GITHUB_ENV
# break
# fi
# done
#
# if [ $iteration -eq $max_iterations ]; then
# echo "Timeout: Metrics were not available after 5 minutes"
# echo "METRICS_FAILED=true" >> $GITHUB_ENV
# elif [ "$response_code" = "200" ]; then
# echo "Metrics downloaded to metrics.md"
# echo "=== BENCHMARK METRICS ==="
# cat metrics.md
# echo "========================="
# fi
# fi
#
# - name: Upload metrics as artifact
# if: env.WORKFLOW_FAILED != 'true' && env.METRICS_FAILED != 'true'
# uses: actions/upload-artifact@v4
# with:
# name: benchmark-metrics-${{ inputs.openvm_version }}
# path: metrics.md
# retention-days: 30
#
# - name: Check workflow status
# run: |
# if [ "$WORKFLOW_FAILED" = "true" ]; then
# echo "Workflow failed due to timeout or proof failure"
# exit 1
# else
# echo "Workflow completed successfully"
# fi
#