Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions .github/workflows/benchmark-diff-comment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
name: Benchmark Diff on PR

on:
pull_request:
# Trigger only on PRs targeting main or release branches
# Note: This event does NOT get secrets when PRs come from forks
types: [opened, synchronize]
branches:
- main
- 'release-v*'

permissions:
contents: read
pull-requests: write

jobs:
comment-benchmark-diff:
# Extra guard: only run on PRs from branches within this repo (not forks)
if: github.event.pull_request.head.repo.fork == false
runs-on: ubuntu-latest

steps:
# 1. Check out the PR branch so we can build and benchmark it
- name: Checkout repo
uses: actions/checkout@v4

# 2. Extract the required Scarb version from Scarb.toml and save to env
- name: Extract scarb version
run: |
SCARB_VERSION=$(grep 'scarb-version = ' Scarb.toml | sed 's/scarb-version = "\(.*\)"/\1/')
echo "SCARB_VERSION=$SCARB_VERSION" >> "$GITHUB_ENV"

# 3. Install the correct Scarb version for this project
- name: Setup scarb
uses: software-mansion/setup-scarb@v1
id: setup_scarb
with:
scarb-version: ${{ env.SCARB_VERSION }}

# 4. Build mocks (needed dependencies for benchmarking)
- name: Build mocks
run: scarb --release build -p openzeppelin_test_common

# 5. Run the benchmark script and capture the diff in Markdown format
- name: Run benchmark and capture diff
id: benchmark_diff
run: |
python3 scripts/benchmarking/benchmark_diff.py \
scripts/benchmarking/benchmark.py \
benches/contract_sizes.json \
--dir target/release \
--markdown > diff_output.txt

# 6. Prepare the comment body that will be posted on the PR
- name: Prepare benchmark comment
run: |
{
echo "<!-- comment-id:benchmark-diff -->"
echo "### 🧪 Cairo Contract Size Benchmark Diff"
echo
cat diff_output.txt
echo
echo "_This comment was generated automatically from benchmark diffs._"
} > comment.md

# 7. Look for an existing benchmark comment on the PR (from this bot)
- name: Find comment to update
uses: peter-evans/find-comment@v3
id: get_comment
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: benchmark-diff

# 8. Create or update the PR comment with the new benchmark diff
- name: Post benchmark diff comment
uses: peter-evans/create-or-update-comment@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.pull_request.number }}
comment-id: ${{ steps.get_comment.outputs.comment-id }}
edit-mode: replace
body-file: comment.md
75 changes: 75 additions & 0 deletions .github/workflows/benchmark-diff-pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
name: Update contract sizes benchmark

on:
pull_request:
types:
- closed # Trigger when a PR is closed (merged OR just closed without merge)

permissions:
contents: write # Needed to push changes / create PRs
pull-requests: write # Needed to open a new PR

jobs:
create-pr-if-changed:
# Run only if:
# - PR is NOT from a fork
# - PR was merged (not just closed)
# - Base branch of the PR is main
if: |
github.event.pull_request.head.repo.fork == false &&
github.event.pull_request.merged == true &&
github.event.pull_request.base.ref == 'main'
runs-on: ubuntu-latest

steps:
# 1. Check out the repository at the merge commit
- name: Checkout repository
uses: actions/checkout@v4

# 2. Use Rust cache (speeds up builds in repeated runs)
- uses: Swatinem/rust-cache@v2

# 3. Extract the required Scarb version from Scarb.toml and save to env
- name: Extract scarb version
run: |
SCARB_VERSION=$(grep 'scarb-version = ' Scarb.toml | sed 's/scarb-version = "\(.*\)"/\1/')
echo "SCARB_VERSION=$SCARB_VERSION" >> "$GITHUB_ENV"

# 4. Install the right Scarb version
- name: Setup scarb
uses: software-mansion/setup-scarb@v1
id: setup_scarb
with:
scarb-version: ${{ env.SCARB_VERSION }}

# 5. Build mocks (needed for contract size benchmarking)
- name: Build mocks
run: scarb --release build -p openzeppelin_test_common

# 6. Run the benchmark script to regenerate benches/contract_sizes.json
- name: Update benchmark
run: |
python3 ./scripts/benchmarking/benchmark.py --json --dir target/release > benches/contract_sizes.json

# 7. Check whether benches/contract_sizes.json differs from main
- name: Check if file changed
id: check_diff
run: |
if git diff --quiet origin/main -- benches/contract_sizes.json; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi

# 8. If benchmark file changed, open a PR with the updated file
- name: Create Pull Request with benchmark update
if: steps.check_diff.outputs.changed == 'true'
uses: peter-evans/create-pull-request@v6
with:
commit-message: Update contract sizes benchmark
title: Update contract sizes benchmark
body: |
This PR updates the contract size benchmarks after a recent merge to `main`.
branch: update/contract-sizes-${{ github.run_id }}
base: main
token: ${{ secrets.GITHUB_TOKEN }}
13 changes: 13 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Default paths (can be overridden via command-line args)
BENCHMARK_SCRIPT ?= scripts/benchmarking/benchmark.py
DIFF_SCRIPT ?= scripts/benchmarking/benchmark_diff.py
PREVIOUS_JSON ?= benches/contract_sizes.json
TARGET_DIR ?= target/release

# Run benchmark diff (normal output)
diff:
python3 $(DIFF_SCRIPT) $(BENCHMARK_SCRIPT) $(PREVIOUS_JSON) --dir $(TARGET_DIR)

# Run benchmark diff with Markdown output
diff-md:
python3 $(DIFF_SCRIPT) $(BENCHMARK_SCRIPT) $(PREVIOUS_JSON) --dir $(TARGET_DIR) --markdown
4 changes: 4 additions & 0 deletions benches/contract_sizes.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"bytecode": {},
"contract_class": {}
}
2 changes: 1 addition & 1 deletion packages/test_common/Scarb.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ openzeppelin_utils = { path = "../utils" }
[[target.starknet-contract]]
allowed-libfuncs-list.name = "experimental"
sierra = true
casm = false
casm = true # Required for benchmarking
104 changes: 104 additions & 0 deletions scripts/benchmarking/benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import os
import json
import sys
import argparse

# ANSI color codes (no external dependencies)
RESET = "\033[0m"
BOLD = "\033[1m"
YELLOW = "\033[33m"
GREEN = "\033[32m"
RED = "\033[31m"
CYAN = "\033[36m"

# Set the path to your Scarb release output, e.g., "target/release"
TARGET_DIR = "target/release"

# Keys for the JSON output
BYTECODE_KEY = "bytecode"
CONTRACT_CLASS_KEY = "contract_class"

def try_get_name(filename):
"""
Extracts the contract name from the filename:
- Starts at the first uppercase letter.
- Ends at the next '.' or end of string.
Returns the filename if no uppercase letter is found.
"""
for i, c in enumerate(filename):
if c.isupper():
start = i
end = filename.find('.', start)
if end == -1:
return filename[start:]
else:
return filename[start:end]
return filename


def get_bytecode_size(json_path):
with open(json_path, "r") as f:
data = json.load(f)
bytecode = data.get(BYTECODE_KEY, [])
num_felts = len(bytecode)
return num_felts


def get_sierra_contract_class_size(json_path):
num_bytes = os.path.getsize(json_path)
return num_bytes


def benchmark_contracts(target_dir):
results = {BYTECODE_KEY: {}, CONTRACT_CLASS_KEY: {}}
for file in os.listdir(target_dir):
if file.endswith(".compiled_contract_class.json"):
path = os.path.join(target_dir, file)
try:
num_felts = get_bytecode_size(path)
results[BYTECODE_KEY][file] = {"felts": num_felts}
except Exception as e:
results[BYTECODE_KEY][file] = {"error": str(e)}
elif file.endswith(".contract_class.json"):
path = os.path.join(target_dir, file)
try:
num_bytes = get_sierra_contract_class_size(path)
results[CONTRACT_CLASS_KEY][file] = {"bytes": num_bytes}
except Exception as e:
results[CONTRACT_CLASS_KEY][file] = {"error": str(e)}
return results


def print_benchmark_results(results):
print(f"{BOLD}{CYAN}CASM bytecode sizes:{RESET}")
for file, info in results[BYTECODE_KEY].items():
name = f"{BOLD}{YELLOW}{try_get_name(file)}{RESET}"
if "felts" in info:
value = f"{BOLD}{GREEN}{info['felts']} felts{RESET}"
print(f"{name}: {value}")
else:
print(f"{RED}Error processing {file}: {info['error']}{RESET}")

print(f"\n{BOLD}{CYAN}Sierra contract class sizes:{RESET}")
for file, info in results[CONTRACT_CLASS_KEY].items():
name = f"{BOLD}{YELLOW}{try_get_name(file)}{RESET}"
if "bytes" in info:
num_bytes = info["bytes"]
value = f"{BOLD}{GREEN}{num_bytes} bytes{RESET} ({num_bytes/1024:.2f} KB)"
print(f"{name}: {value}")
else:
print(f"{RED}Error processing {file}: {info['error']}{RESET}")


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Benchmark Cairo contract artifact sizes.")
parser.add_argument("--json", action="store_true", help="Output results as JSON.")
parser.add_argument("--dir", type=str, default=TARGET_DIR, help="Target directory (default: target/release)")
args = parser.parse_args()

results = benchmark_contracts(args.dir)
if args.json:
print(json.dumps(results, indent=2))
else:
print(f"{BOLD}Benchmarking CASM and Sierra contract class sizes in: {args.dir}\n{RESET}")
print_benchmark_results(results)
Loading
Loading