diff --git a/.cargo/config.toml b/.cargo/config.toml index de55dd2065a..cb6abcad984 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -6,7 +6,7 @@ TABLEGEN_190_PREFIX = "/usr/lib/llvm-19/" # Use `lld` for linking instead of `ld`, since we run out of memory while linking with `ld` on # 16-cores linux machines, see: # https://nnethercote.github.io/perf-book/build-configuration.html#linking. -# TODO: remove this once `rust` stabilizes `lld` as the default linker, currently only on nightly: +# TODO(Gilad): remove this once `rust` stabilizes `lld` as the default linker, currently only on nightly: # https://github.com/rust-lang/rust/issues/39915#issuecomment-618726211 [target.'cfg(all(target_os = "linux"))'] rustflags = ["-Clink-arg=-fuse-ld=lld"] diff --git a/.github/actions/bootstrap/action.yml b/.github/actions/bootstrap/action.yml index f5a1468c523..e30a67e129c 100644 --- a/.github/actions/bootstrap/action.yml +++ b/.github/actions/bootstrap/action.yml @@ -5,6 +5,9 @@ inputs: extra_rust_toolchains: description: "Extra toolchains to install, but aren't used by default" required: false + github_token: + description: "Github token to use for authentication" + required: false runs: using: "composite" @@ -13,5 +16,6 @@ runs: uses: ./.github/actions/install_rust with: extra_rust_toolchains: ${{ inputs.extra_rust_toolchains }} + github_token: ${{ inputs.github_token }} - name: Install cairo native. uses: ./.github/actions/setup_native_deps diff --git a/.github/actions/install_rust/action.yml b/.github/actions/install_rust/action.yml index 4d123f39924..2ea1ac342cc 100644 --- a/.github/actions/install_rust/action.yml +++ b/.github/actions/install_rust/action.yml @@ -5,11 +5,15 @@ inputs: extra_rust_toolchains: description: "Extra toolchains to install, but aren't used by default" required: false + github_token: + description: "Github token to use for authentication" + required: false runs: using: "composite" steps: - uses: moonrepo/setup-rust@v1 + name: Install Rust toolchain and binaries with: cache-base: main(-v[0-9].*)? inherit-toolchain: true @@ -18,3 +22,13 @@ runs: channel: ${{ inputs.extra_rust_toolchains }} env: RUSTFLAGS: "-C link-arg=-fuse-ld=lld" + GITHUB_TOKEN: ${{ inputs.github_token }} + + # This installation is _not_ cached, but takes a couple seconds: it's downloading prepackaged + # binaries. + # TODO(Gilad): once we migrate to a cached Docker image, we can remove this step and just + # install it during dependencies.sh (which we don't do now since dependencies.sh isn't cached). + - name: Install Anvil + uses: foundry-rs/foundry-toolchain@v1 + with: + version: v0.3.0 diff --git a/.github/workflows/blockifier_ci.yml b/.github/workflows/blockifier_ci.yml index 126ef834091..cd1617d8766 100644 --- a/.github/workflows/blockifier_ci.yml +++ b/.github/workflows/blockifier_ci.yml @@ -22,14 +22,16 @@ on: paths: # Other than code-related changes, all changes related to the native-blockifier build-and-push # process should trigger the build (e.g., changes to the Dockerfile, build scripts, etc.). + - '.github/actions/bootstrap/action.yml' - '.github/workflows/blockifier_ci.yml' - '.github/workflows/upload_artifacts_workflow.yml' - 'build_native_in_docker.sh' - 'Cargo.lock' - 'Cargo.toml' - 'crates/blockifier/**' + - 'crates/blockifier_test_utils/**' - 'crates/native_blockifier/**' - - 'crates/starknet_sierra_multicompile/build.rs' + - 'crates/apollo_sierra_multicompile/build.rs' - 'scripts/build_native_blockifier.sh' - 'scripts/dependencies.sh' - 'scripts/install_build_tools.sh' @@ -48,19 +50,46 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: - feature-combo-builds: + test-without-features: runs-on: starkware-ubuntu-24.04-medium steps: - uses: actions/checkout@v4 - with: - fetch-depth: 0 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # No features - build blockifier without features activated by dependencies in the workspace. - - run: cargo build -p blockifier - run: cargo test -p blockifier + - run: cargo build -p blockifier + + test-with-transaction-serde-feature: + runs-on: starkware-ubuntu-24.04-medium + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # transaction_serde is not activated by any workspace crate; test the build. - - run: cargo build -p blockifier --features transaction_serde - run: cargo test -p blockifier --features transaction_serde + - run: cargo build -p blockifier --features transaction_serde + + test-with-cairo-native-feature: + runs-on: starkware-ubuntu-24.04-medium + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # cairo_native is not activated by any workspace crate; test the build. - run: cargo build -p blockifier --features cairo_native - run: cargo test -p blockifier --features cairo_native + + test-with-tracing-feature: + runs-on: starkware-ubuntu-24.04-medium + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + # tracing is not activated by any workspace crate; test the build. + - run: cargo build -p blockifier --features tracing + - run: cargo test -p blockifier --features tracing diff --git a/.github/workflows/blockifier_compiled_cairo.yml b/.github/workflows/blockifier_compiled_cairo.yml index fd778b0d1f5..f8f98d7fa9b 100644 --- a/.github/workflows/blockifier_compiled_cairo.yml +++ b/.github/workflows/blockifier_compiled_cairo.yml @@ -7,12 +7,10 @@ on: - reopened - synchronize paths: - - 'Cargo.toml' - '.github/workflows/blockifier_compiled_cairo.yml' - - 'crates/blockifier/feature_contracts/**' - - 'crates/blockifier/src/test_utils/cairo_compile.rs' - - 'crates/blockifier/tests/feature_contracts_compatibility_test.rs' - - 'crates/blockifier/tests/requirements.txt' + - 'crates/apollo_compile_to_casm/src/constants.rs' # Contains the Cairo1 compiler version. + - 'crates/apollo_infra_utils/src/cairo0_compiler.rs' # Contains the Cairo0 compiler version. + - 'crates/blockifier_test_utils/**' - 'scripts/dependencies.sh' env: @@ -33,44 +31,21 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # Setup pypy and link to the location expected by .cargo/config.toml. - uses: actions/setup-python@v5 id: setup-pypy with: python-version: 'pypy3.9' + cache: 'pip' - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 - env: LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV - - # Checkout sequencer into a dedicated directory - technical requirement in order to be able to checkout `cairo` in a sibling directory. - - name: checkout sequencer into `sequencer` directory. - uses: actions/checkout@v4 - with: - repository: 'starkware-libs/sequencer' - path: 'sequencer' - - - name: checkout cairo1 repo in order to compile cairo1 contracts. - uses: actions/checkout@v4 - with: - repository: 'starkware-libs/cairo' - fetch-depth: 0 - fetch-tags: true - path: 'cairo' - - - name: install toolchain for legacy contract compilation (old compiler tag) - uses: actions-rs/toolchain@master - with: - toolchain: nightly-2023-07-05 - - - name: install toolchain for cairo steps test contract compilation (old compiler tag) - uses: actions-rs/toolchain@master - with: - toolchain: nightly-2024-04-29 + - run: pip install -r crates/blockifier_test_utils/resources/blockifier-test-utils-requirements.txt - name: Verify cairo contract recompilation (both cairo versions). run: - cd sequencer && - pip install -r crates/blockifier/tests/requirements.txt && - cargo test -p blockifier --test feature_contracts_compatibility_test --features testing -- --include-ignored + cargo test -p blockifier_test_utils --test feature_contracts_compatibility_test -- --include-ignored --nocapture diff --git a/.github/workflows/blockifier_post-merge.yml b/.github/workflows/blockifier_post-merge.yml deleted file mode 100644 index 5ef6a115b02..00000000000 --- a/.github/workflows/blockifier_post-merge.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: Blockifier-Post-Merge - -on: - pull_request: - types: - - closed - paths: - - '.github/workflows/blockifier_post-merge.yml' - - 'crates/blockifier/**' - - 'crates/native_blockifier/**' - - 'scripts/dependencies.sh' - -jobs: - if_merged: - if: github.event.pull_request.merged == true - runs-on: starkware-ubuntu-24.04-medium - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/bootstrap - - # Setup pypy and link to the location expected by .cargo/config.toml. - - uses: actions/setup-python@v5 - id: setup-pypy - with: - python-version: 'pypy3.9' - - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 - - env: - LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin - run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV - - - run: | - pip install -r crates/blockifier/tests/requirements.txt - cargo test -p blockifier -p native_blockifier -- --include-ignored diff --git a/.github/workflows/blockifier_reexecution_ci.yml b/.github/workflows/blockifier_reexecution_ci.yml index 0700a5537a0..b650379071f 100644 --- a/.github/workflows/blockifier_reexecution_ci.yml +++ b/.github/workflows/blockifier_reexecution_ci.yml @@ -37,6 +37,8 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # Download the blockifier re-execution test data. - id: auth uses: "google-github-actions/auth@v2" diff --git a/.github/workflows/committer_and_os_cli_push.yml b/.github/workflows/committer_and_os_cli_push.yml new file mode 100644 index 00000000000..5a6624bcdfd --- /dev/null +++ b/.github/workflows/committer_and_os_cli_push.yml @@ -0,0 +1,94 @@ +name: Committer-And-OS-CLI-push + +on: + push: + branches: + - main + - main-v[0-9].** + tags: + - v[0-9].** + + pull_request: + types: + - opened + - reopened + - synchronize + - auto_merge_enabled + - edited + paths: + - '.github/workflows/committer_and_os_cli_push.yml' + - 'build_native_in_docker.sh' + - 'docker-ci/images/sequencer-ci.Dockerfile' + - 'Cargo.toml' + - 'Cargo.lock' + - 'crates/apollo_starknet_os_program/**' + - 'crates/starknet_committer_and_os_cli/**' + - 'crates/starknet_api/**' + - 'crates/starknet_committer/**' + - 'crates/starknet_os/**' + - 'crates/starknet_patricia/**' + - 'rust-toolchain.toml' + - 'scripts/dependencies.sh' + +env: + RUSTFLAGS: "-D warnings -C link-arg=-fuse-ld=lld" + +# On PR events, cancel existing CI runs on this same PR for this workflow. +# Also, create different concurrency groups for different pushed commits, on push events. +concurrency: + group: > + ${{ github.workflow }}- + ${{ github.ref }}- + ${{ github.event_name == 'pull_request' && 'PR' || github.sha }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + gcs-push: + runs-on: starkware-ubuntu-24.04-medium + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + # Commit hash on pull request event would be the head commit of the branch. + - name: Get commit hash prefix for PR update + if: ${{ github.event_name == 'pull_request' }} + env: + COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: echo "SHORT_HASH=${COMMIT_SHA:0:7}" >> $GITHUB_ENV + + # On push event (to main, for example) we should take the commit post-push. + - name: Get commit hash prefix for merge + if: ${{ github.event_name != 'pull_request' }} + env: + COMMIT_SHA: ${{ github.event.after }} + run: echo "SHORT_HASH=${COMMIT_SHA:0:7}" >> $GITHUB_ENV + + # Setup pypy and link to the location expected by .cargo/config.toml. + # Python + requirements are needed to compile the OS. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + + - name: Build CLI binary + run: ./build_native_in_docker.sh cargo build -p starknet_committer_and_os_cli -r --bin starknet_committer_and_os_cli --target-dir CLI_TARGET + + - id: auth + uses: "google-github-actions/auth@v2" + with: + credentials_json: ${{ secrets.COMMITER_PRODUCTS_EXT_WRITER_JSON }} + + - name: Upload binary to GCP + id: upload_file + uses: "google-github-actions/upload-cloud-storage@v2" + with: + path: "CLI_TARGET/release/starknet_committer_and_os_cli" + destination: "committer-products-external/${{ env.SHORT_HASH }}/release/" diff --git a/.github/workflows/committer_ci.yml b/.github/workflows/committer_ci.yml index d6e1a046e0b..29b2db42bc3 100644 --- a/.github/workflows/committer_ci.yml +++ b/.github/workflows/committer_ci.yml @@ -12,7 +12,7 @@ on: - '.github/workflows/committer_ci.yml' - 'Cargo.toml' - 'Cargo.lock' - - 'crates/committer_cli/**' + - 'crates/starknet_committer_and_os_cli/**' - 'crates/starknet_api/**' - 'crates/starknet_committer/**' - 'crates/starknet_patricia/**' @@ -36,16 +36,32 @@ jobs: if: ${{ github.event_name == 'pull_request' }} steps: - uses: actions/checkout@v4 + + # Setup pypy and link to the location expected by .cargo/config.toml. + # Python + requirements are needed to compile the OS. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - id: auth uses: "google-github-actions/auth@v2" with: credentials_json: ${{ secrets.COMMITER_PRODUCTS_EXT_WRITER_JSON }} - uses: 'google-github-actions/setup-gcloud@v2' - - run: echo "BENCH_INPUT_FILES_PREFIX=$(cat ./crates/committer_cli/src/tests/flow_test_files_prefix)" >> $GITHUB_ENV - - run: gcloud storage cp -r gs://committer-testing-artifacts/$BENCH_INPUT_FILES_PREFIX/* ./crates/committer_cli/test_inputs - - run: cargo test -p committer_cli --release -- --include-ignored test_regression + - run: echo "BENCH_INPUT_FILES_PREFIX=$(cat ./crates/starknet_committer_and_os_cli/src/committer_cli/tests/flow_test_files_prefix)" >> $GITHUB_ENV + - run: gcloud storage cp -r gs://committer-testing-artifacts/$BENCH_INPUT_FILES_PREFIX/* ./crates/starknet_committer_and_os_cli/test_inputs + - run: cargo test -p starknet_committer_and_os_cli --release -- --include-ignored test_regression benchmarking: runs-on: starkware-ubuntu-24.04-medium @@ -55,7 +71,23 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.base_ref }} + + # Setup pypy and link to the location expected by .cargo/config.toml. + # Python + requirements are needed to compile the OS. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # Download the old benchmark inputs. - id: auth @@ -63,53 +95,70 @@ jobs: with: credentials_json: ${{ secrets.COMMITER_PRODUCTS_EXT_WRITER_JSON }} - uses: 'google-github-actions/setup-gcloud@v2' - - run: echo "OLD_BENCH_INPUT_FILES_PREFIX=$(cat ./crates/committer_cli/src/tests/flow_test_files_prefix)" >> $GITHUB_ENV - - run: gcloud storage cp -r gs://committer-testing-artifacts/$OLD_BENCH_INPUT_FILES_PREFIX/* ./crates/committer_cli/test_inputs + - run: echo "OLD_BENCH_INPUT_FILES_PREFIX=$(cat ./crates/starknet_committer_and_os_cli/src/committer_cli/tests/flow_test_files_prefix)" >> $GITHUB_ENV + - run: gcloud storage cp -r gs://committer-testing-artifacts/$OLD_BENCH_INPUT_FILES_PREFIX/* ./crates/starknet_committer_and_os_cli/test_inputs # List the existing benchmarks. - run: | - cargo bench -p committer_cli -- --list | grep ': benchmark$' | sed -e "s/: benchmark$//" > benchmarks_list.txt + cargo bench -p starknet_committer_and_os_cli -- --list | grep ': benchmark$' | sed -e "s/: benchmark$//" > benchmarks_list.txt # Benchmark the old code. - - run: cargo bench -p committer_cli + - run: cargo bench -p starknet_committer_and_os_cli # Backup the downloaded files to avoid re-downloading them if they didn't change (overwritten by checkout). - - run: mv ./crates/committer_cli/test_inputs/tree_flow_inputs.json ./crates/committer_cli/test_inputs/tree_flow_inputs.json_bu - - run: mv ./crates/committer_cli/test_inputs/committer_flow_inputs.json ./crates/committer_cli/test_inputs/committer_flow_inputs.json_bu + - run: mv ./crates/starknet_committer_and_os_cli/test_inputs/tree_flow_inputs.json ./crates/starknet_committer_and_os_cli/test_inputs/tree_flow_inputs.json_bu + - run: mv ./crates/starknet_committer_and_os_cli/test_inputs/committer_flow_inputs.json ./crates/starknet_committer_and_os_cli/test_inputs/committer_flow_inputs.json_bu # Checkout the new code. - uses: actions/checkout@v4 with: clean: false - - run: echo "NEW_BENCH_INPUT_FILES_PREFIX=$(cat ./crates/committer_cli/src/tests/flow_test_files_prefix)" >> $GITHUB_ENV + - run: echo "NEW_BENCH_INPUT_FILES_PREFIX=$(cat ./crates/starknet_committer_and_os_cli/src/committer_cli/tests/flow_test_files_prefix)" >> $GITHUB_ENV # Input files didn't change. - if: env.OLD_BENCH_INPUT_FILES_PREFIX == env.NEW_BENCH_INPUT_FILES_PREFIX run: | - mv ./crates/committer_cli/test_inputs/tree_flow_inputs.json_bu ./crates/committer_cli/test_inputs/tree_flow_inputs.json - mv ./crates/committer_cli/test_inputs/committer_flow_inputs.json_bu ./crates/committer_cli/test_inputs/committer_flow_inputs.json + mv ./crates/starknet_committer_and_os_cli/test_inputs/tree_flow_inputs.json_bu ./crates/starknet_committer_and_os_cli/test_inputs/tree_flow_inputs.json + mv ./crates/starknet_committer_and_os_cli/test_inputs/committer_flow_inputs.json_bu ./crates/starknet_committer_and_os_cli/test_inputs/committer_flow_inputs.json # Input files did change, download new inputs. - if: env.OLD_BENCH_INPUT_FILES_PREFIX != env.NEW_BENCH_INPUT_FILES_PREFIX run: | - gcloud storage cp -r gs://committer-testing-artifacts/$NEW_BENCH_INPUT_FILES_PREFIX/* ./crates/committer_cli/test_inputs + gcloud storage cp -r gs://committer-testing-artifacts/$NEW_BENCH_INPUT_FILES_PREFIX/* ./crates/starknet_committer_and_os_cli/test_inputs # Benchmark the new code, splitting the benchmarks, and prepare the results for posting a comment. - - run: bash ./crates/committer_cli/benches/bench_split_and_prepare_post.sh benchmarks_list.txt bench_new.txt + - run: bash ./crates/starknet_committer_and_os_cli/benches/bench_split_and_prepare_post.sh benchmarks_list.txt bench_new.txt - run: echo BENCHES_RESULT=$(cat bench_new.txt) >> $GITHUB_ENV - # Post comment in case of performance regression or improvement. - - run: npm install fs - - if: contains(env.BENCHES_RESULT, 'regressed') || contains(env.BENCHES_RESULT, 'improved') - uses: actions/github-script@v6 + # Comment with a link to the workflow (or update existing comment on rerun). + - name: Find Comment + if: github.event_name == 'pull_request' + uses: starkware-libs/find-comment@v3 + id: find-benchmark-comment + with: + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: Benchmark movements + + - name: Create comment + # If the PR number is found and the comment is not found, create a new comment. + if: github.event_name == 'pull_request' + && steps.find-benchmark-comment.outputs.comment-id == '' + uses: starkware-libs/create-or-update-comment@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ github.event.pull_request.number }} + body: ${{ env.BENCHES_RESULT }} + + - name: Update comment + # If the PR number is found and the comment exists, update it. + if: github.event_name == 'pull_request' + && steps.find-benchmark-comment.outputs.comment-id != '' + uses: starkware-libs/create-or-update-comment@v4 with: - script: | - const fs = require('fs') - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: fs.readFileSync('bench_new.txt', 'utf8'), - path: 'Commits' - }) + token: ${{ secrets.GITHUB_TOKEN }} + comment-id: ${{ steps.find-benchmark-comment.outputs.comment-id }} + edit-mode: replace + body: ${{ env.BENCHES_RESULT }} diff --git a/.github/workflows/committer_cli_push.yml b/.github/workflows/committer_cli_push.yml deleted file mode 100644 index 553a210c126..00000000000 --- a/.github/workflows/committer_cli_push.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Committer-CLI-push - -on: - push: - branches: - - main - - main-v[0-9].** - tags: - - v[0-9].** - paths: - - '.github/workflows/committer_cli_push.yml' - - 'Cargo.toml' - - 'Cargo.lock' - - 'crates/committer_cli/**' - - 'crates/starknet_api/**' - - 'crates/starknet_committer/**' - - 'crates/starknet_patricia/**' - - 'rust-toolchain.toml' - - 'scripts/dependencies.sh' - - pull_request: - types: - - opened - - reopened - - synchronize - - auto_merge_enabled - - edited - paths: - - '.github/workflows/committer_cli_push.yml' - - 'Cargo.toml' - - 'Cargo.lock' - - 'crates/committer_cli/**' - - 'crates/starknet_api/**' - - 'crates/starknet_committer/**' - - 'crates/starknet_patricia/**' - - 'rust-toolchain.toml' - - 'scripts/dependencies.sh' - -env: - RUSTFLAGS: "-D warnings -C link-arg=-fuse-ld=lld" - -# On PR events, cancel existing CI runs on this same PR for this workflow. -# Also, create different concurrency groups for different pushed commits, on push events. -concurrency: - group: > - ${{ github.workflow }}- - ${{ github.ref }}- - ${{ github.event_name == 'pull_request' && 'PR' || github.sha }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - -jobs: - gcs-push: - runs-on: starkware-ubuntu-24.04-medium - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/bootstrap - - # Commit hash on pull request event would be the head commit of the branch. - - name: Get commit hash prefix for PR update - if: ${{ github.event_name == 'pull_request' }} - env: - COMMIT_SHA: ${{ github.event.pull_request.head.sha }} - run: echo "SHORT_HASH=${COMMIT_SHA:0:7}" >> $GITHUB_ENV - - # On push event (to main, for example) we should take the commit post-push. - - name: Get commit hash prefix for merge - if: ${{ github.event_name != 'pull_request' }} - env: - COMMIT_SHA: ${{ github.event.after }} - run: echo "SHORT_HASH=${COMMIT_SHA:0:7}" >> $GITHUB_ENV - - - name: Build CLI binary - run: ./build_native_in_docker.sh rustup toolchain install && cargo build -p committer_cli -r --bin committer_cli --target-dir CLI_TARGET - - - id: auth - uses: "google-github-actions/auth@v2" - with: - credentials_json: ${{ secrets.COMMITER_PRODUCTS_EXT_WRITER_JSON }} - - - name: Upload binary to GCP - id: upload_file - uses: "google-github-actions/upload-cloud-storage@v2" - with: - path: "CLI_TARGET/release/committer_cli" - destination: "committer-products-external/${{ env.SHORT_HASH }}/release/" diff --git a/.github/workflows/consolidated_system_test.yaml b/.github/workflows/consolidated_system_test.yaml new file mode 100644 index 00000000000..0ff4f7048b1 --- /dev/null +++ b/.github/workflows/consolidated_system_test.yaml @@ -0,0 +1,357 @@ +name: Sequencer - Consolidated Node System Test + +on: + workflow_dispatch: + inputs: + liveness_test_duration_sec: + description: Time in seconds to keep the liveness test running. + required: false + default: 10 + type: number + + pull_request: + +env: + job_link: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + deployment_config_path: ${{ github.workspace }}/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json + namespace: sequencer-consolidated-system-test-run-${{ github.run_number }}-attempt-${{ github.run_attempt }} + cluster_name: consolidated-system-test + crate_triggers: "apollo_node,apollo_deployments,apollo_integration_tests" + path_triggers: ".github/workflows/consolidated_system_test.yaml,scripts/*.py,scripts/system_tests/**/*.py" + pvc_storage_class_name: "premium-rwo" + anvil_port: "8545" + +permissions: + contents: read + +# On PR events, cancel existing CI runs on this same PR for this workflow. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.job }}-${{ github.event_name == 'workflow_dispatch' && github.run_id || 'pr' }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + check-system-test-trigger: + runs-on: starkware-ubuntu-24.04-small + outputs: + should_run: ${{ steps.system_check.outputs.should_run }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: "pip" + + - run: pip install -r scripts/requirements.txt + + - name: Check for system-test-triggering changes + id: system_check + run: | + echo "Checking if any system-test-triggering crates were modified..." + OUTPUT_FILE=$(mktemp) + + python ./scripts/check_test_trigger.py --output_file $OUTPUT_FILE \ + --commit_id ${{ github.event.pull_request.base.sha }} \ + --crate_triggers ${{ env.crate_triggers }} \ + --path_triggers ${{ env.path_triggers }} + + should_run=$(cat "$OUTPUT_FILE") + echo "Captured output: $should_run" + echo "should_run=$should_run" >> $GITHUB_OUTPUT + + system_test: + needs: check-system-test-trigger + if: needs.check-system-test-trigger.outputs.should_run == 'true' + runs-on: starkware-ubuntu-24.04-large + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Create k3d cluster (Local k8s) + uses: AbsaOSS/k3d-action@v2 + with: + # Assumption: only one PR can run per machine at a time. + cluster-name: ${{ env.cluster_name }} + args: >- + --verbose + --agents 1 + --no-lb + --wait + --timeout 120s + + # Install rust components. + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Install local-path-provisioner (for PVC support) + run: | + echo "πŸ”§ Installing local-path-provisioner..." + kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml + + echo "⏳ Waiting for local-path-provisioner pod to be ready..." + kubectl wait --for=condition=Ready pod -l app=local-path-provisioner -n local-path-storage --timeout=60s + + echo "βœ… local-path-provisioner is ready." + + echo "πŸ“¦ Verifying default StorageClass is set..." + kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' + + echo "πŸ“¦ Creating alias StorageClass ${pvc_storage_class_name} for compatibility with PVCs..." + cat < /dev/null; then + echo "πŸ” Namespace '${{ env.namespace }}' already exists. Deleting it..." + kubectl delete namespace "${{ env.namespace }}" + echo "⏳ Waiting for namespace deletion..." + while kubectl get namespace "${{ env.namespace }}" &> /dev/null; do sleep 2; done + echo "βœ… Namespace '${{ env.namespace }}' deleted." + fi + pipenv install + cdk8s import + cdk8s synth --app "pipenv run python main.py --namespace ${{ env.namespace }}" + kubectl create namespace ${{ env.namespace }} + kubectl apply -R -f ./dist + echo "⏳ Waiting for Anvil to become ready..." + kubectl wait --namespace ${{ env.namespace }} --for=condition=Ready -l app=anvil pod --timeout 60s + echo "πŸš€ Anvil deployed successfully." + + echo "πŸ” Extracting Anvil addresses from logs." + + ANVIL_POD=$(kubectl get pods -n "${{ env.namespace }}" -l app=anvil -o jsonpath="{.items[0].metadata.name}") + ANVIL_LOGS=$(kubectl logs -n "${{ env.namespace }}" "$ANVIL_POD") + + echo "πŸ” Extracting Anvil addresses from logs..." + ANVIL_POD=$(kubectl get pods -n ${{ env.namespace }} -l app=anvil -o jsonpath="{.items[0].metadata.name}") + ADDRESSES=$(kubectl logs -n ${{ env.namespace }} "$ANVIL_POD" | grep -oP '0x[a-fA-F0-9]{40}' | head -n 2) + + SENDER_ADDRESS=$(echo "$ADDRESSES" | head -n 1) + RECEIVER_ADDRESS=$(echo "$ADDRESSES" | tail -n 1) + + echo "πŸ’‘ SENDER_ADDRESS=$SENDER_ADDRESS" + echo "πŸ’‘ RECEIVER_ADDRESS=$RECEIVER_ADDRESS" + + echo "SENDER_ADDRESS=$SENDER_ADDRESS" >> "$GITHUB_ENV" + echo "RECEIVER_ADDRESS=$RECEIVER_ADDRESS" >> "$GITHUB_ENV" + + - name: Build binaries + run: cargo build --bin sequencer_node_setup --bin sequencer_simulator + + - name: Create storage files + run: ./target/debug/sequencer_node_setup --output-base-dir ./output --data-prefix-path /data --n-consolidated 1 --n-distributed 0 + + - name: Export application config dir + run: | + set -euo pipefail + + # Get the config directory + app_config_dir=$(jq -r '.application_config_subdir' ${{ env.deployment_config_path }}) + + # Export to environment for the next step + echo "app_config_dir=$app_config_dir" >> $GITHUB_ENV + echo "app_config_dir is: $app_config_dir" + + # TODO(Nadin): move the config definition out of the GitHub Actions secret section, since it no longer contains sensitive values. + - name: Inject Config Secrets + run: | + python ./scripts/system_tests/config_secrets_injector.py --deployment_config_path ${{ env.deployment_config_path }} + + - name: Generate k8s manifests + working-directory: deployments/sequencer + run: | + pipenv install + cdk8s import + echo "Generating Kubernetes manifests using deployment config at: ${{ env.deployment_config_path }}:" + cat "${{ env.deployment_config_path }}" + cdk8s synth --app "pipenv run python main.py --namespace ${{ env.namespace }} --deployment-config-file ${{ env.deployment_config_path }} --deployment-image sequencer:local" + + - name: Deploy Sequencer + working-directory: deployments/sequencer + run: | + echo "Deploying Sequencer..." + kubectl create namespace ${{ env.namespace }} + kubectl apply -R -f ./dist/ + + - name: Set default namespace + run: kubectl config set-context --current --namespace ${{ env.namespace }} + + - name: Run readiness check + run: pipenv run python ./scripts/system_tests/readiness_check.py --deployment_config_path ${{ env.deployment_config_path }} --namespace ${{ env.namespace }} + + - name: Get Config Dir + run: | + set -euo pipefail + + # Get the config directory + config_dir=$(jq -r '.application_config_subdir' ${{ env.deployment_config_path }}) + + echo "config_dir is: $config_dir" + # Export to environment for the next step. + echo "config_dir=$config_dir" >> $GITHUB_ENV + + - name: Test sequencer is alive + env: + initial_delay_sec: 10 + check_interval_sec: 5 + check_timeout_sec: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.liveness_test_duration_sec || 10 }} + run: | + # TODO(Nadin): Calculate config_dir dynamically in liveness_check.py based on deployment_config_path + pipenv run python ./scripts/system_tests/liveness_check.py \ + --deployment-config-path ${{ env.deployment_config_path }} \ + --config-dir ${{ env.config_dir }} \ + --timeout ${{ env.check_timeout_sec }} \ + --interval ${{ env.check_interval_sec }} + + - name: Copy state and restart pod + run: pipenv run python ./scripts/system_tests/copy_state_and_restart.py --deployment_config_path ${{ env.deployment_config_path }} --data-dir "./output/data/node_0/executable_0" + + - name: Port-forward Anvil pod to localhost:${{ env.anvil_port }} + run: | + echo "πŸ”Œ Setting up port-forward to Anvil..." + + ANVIL_POD=$(kubectl get pods -n anvil -l app=anvil -o jsonpath="{.items[0].metadata.name}") + echo "🌐 Found Anvil pod: $ANVIL_POD" + + # Start port-forwarding in background and keep it running + kubectl port-forward -n anvil "$ANVIL_POD" ${{ env.anvil_port }}:${{ env.anvil_port }} & + echo "⏳ Waiting a few seconds to ensure port-forward is established..." + sleep 2 + + - name: Send transactions test + run: pipenv run python ./scripts/system_tests/sequencer_simulator.py --deployment_config_path ${{ env.deployment_config_path }} --config_dir "${{ env.config_dir }}" --node_type "consolidated" --sender_address "${{ env.SENDER_ADDRESS }}" --receiver_address "${{ env.RECEIVER_ADDRESS }}" + + - name: Get container logs + if: always() + run: | + echo "πŸ“₯ Getting pod logs and descriptions from namespace: $namespace" + + # List all pods in the namespace + kubectl get pods -n "$namespace" + + # For each pod, get logs and description + for pod in $(kubectl get pods -n "$namespace" -o jsonpath='{.items[*].metadata.name}'); do + echo "----------------------------------------------" + echo "Logs for pod: $pod" + kubectl logs -n "$namespace" "$pod" || echo "⚠️ Failed to get logs for $pod" + + echo "" + echo "Description for pod: $pod" + kubectl describe pod -n "$namespace" "$pod" || echo "⚠️ Failed to describe pod $pod" + echo "---------------------------------------------" + echo "" + done diff --git a/.github/workflows/deployment.yml b/.github/workflows/deployment.yml deleted file mode 100644 index ea42247c0db..00000000000 --- a/.github/workflows/deployment.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Sequencer Deployment Test -on: - push: - branches: - - main - - main-v[0-9].** - tags: - - v[0-9].** - # TODO(Dori, 1/9/2024): Decide when exactly native-blockifier artifacts will be built. Until - # then, keep the 'paths' key empty and build on every push to a release branch / tag. - - pull_request: - types: - - opened - - reopened - - synchronize - - auto_merge_enabled - - edited - paths: - - 'deployments/sequencer/*' - -jobs: - deployment: - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-python@v5 - with: - python-version: '3.10' - - - name: black all files - run: | - python3 -m pip install black - pushd deployments/sequencer - ./black.sh --check - popd - - - run: | - # Install deps. - npm install -g cdk8s-cli - python3 -m pip install pipenv - - # Synthesize the CDK8s Sequencer app. - cd deployments/sequencer - pipenv install - cdk8s synth --app "pipenv run python main.py --namespace test" - diff -aur references dist diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8534af74f6e..cb64b1bdc44 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -48,20 +48,18 @@ jobs: id: setup-pypy with: python-version: "pypy3.9" + cache: 'pip' - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 - env: LD_LIBRARY_PATH: ${{ steps.setup-pypy.outputs.pythonLocation }}/bin run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt # Install rust components. - uses: ./.github/actions/bootstrap with: extra_rust_toolchains: ${{ env.EXTRA_RUST_TOOLCHAINS }} - - - name: Setup Python venv - run: | - python3 -m venv ci - ci/bin/pip install -r scripts/requirements.txt + github_token: ${{ secrets.GITHUB_TOKEN }} # Check Cargo.lock is up to date. - name: "Check Cargo.lock" @@ -69,13 +67,20 @@ jobs: cargo update -w --locked git diff --exit-code Cargo.lock + # Make sure no submodules are out of date or missing. + - name: "Check submodules" + run: git submodule status + # Run code style on PR. + - name: "Run TODO style pull request" + if: github.event_name == 'pull_request' + run: scripts/named_todos.py --commit_id ${{ github.event.pull_request.base.sha }} - name: "Run clippy pull request" if: github.event_name == 'pull_request' - run: ci/bin/python scripts/run_tests.py --command clippy --changes_only --commit_id ${{ github.event.pull_request.base.sha }} + run: scripts/run_tests.py --command clippy --changes_only --commit_id ${{ github.event.pull_request.base.sha }} - name: "Run cargo doc pull request" if: github.event_name == 'pull_request' - run: ci/bin/python scripts/run_tests.py --command doc --changes_only --commit_id ${{ github.event.pull_request.base.sha }} + run: scripts/run_tests.py --command doc --changes_only --commit_id ${{ github.event.pull_request.base.sha }} # Run code style on push. - name: "Run rustfmt" @@ -85,10 +90,10 @@ jobs: - name: "Run clippy on push" if: github.event_name == 'push' - run: ci/bin/python scripts/run_tests.py --command clippy + run: scripts/run_tests.py --command clippy - name: "Run cargo doc on push" if: github.event_name == 'push' - run: ci/bin/python scripts/run_tests.py --command doc + run: scripts/run_tests.py --command doc - name: "Run taplo" run: scripts/taplo.sh @@ -100,6 +105,8 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: cargo test -p workspace_tests run-tests: @@ -110,33 +117,70 @@ jobs: # Fetch the entire history. fetch-depth: 0 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # Setup pypy and link to the location expected by .cargo/config.toml. - uses: actions/setup-python@v5 id: setup-pypy with: python-version: "pypy3.9" + cache: 'pip' - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 - env: LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + + # TODO(Gilad): only one test needs this (base_layer_test.rs), once it migrates to + # anvil, remove. - run: npm install -g ganache@7.4.3 + - name: "Run tests pull request" if: github.event_name == 'pull_request' run: | - python3 -m venv ci - ci/bin/pip install -r scripts/requirements.txt - ci/bin/python scripts/run_tests.py --command test --changes_only --include_dependencies --commit_id ${{ github.event.pull_request.base.sha }} + scripts/run_tests.py --command test --changes_only --include_dependencies --commit_id ${{ github.event.pull_request.base.sha }} env: SEED: 0 - name: "Run tests on push" if: github.event_name == 'push' - # TODO: Better support for running tests on push. + # TODO(AdiY/Dori): Better support for running tests on push. run: | - python3 -m venv ci - ci/bin/pip install -r scripts/requirements.txt - ci/bin/python scripts/run_tests.py --command test + scripts/run_tests.py --command test env: SEED: 0 + run-integration-tests: + runs-on: starkware-ubuntu-24.04-large + steps: + - uses: actions/checkout@v4 + with: + # Fetch the entire history. + fetch-depth: 0 + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + # Setup pypy and link to the location expected by .cargo/config.toml. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + + # TODO(Gilad): only one test needs this (base_layer_test.rs), once it migrates to + # anvil, remove. + - run: npm install -g ganache@7.4.3 + + - name: "Run integration tests pull request" + if: github.event_name == 'pull_request' + run: | + scripts/run_tests.py --command integration --changes_only --include_dependencies --commit_id ${{ github.event.pull_request.base.sha }} + env: + SEED: 0 diff --git a/.github/workflows/main_nightly.yml b/.github/workflows/main_nightly.yml index 7629064aed7..3c72f9e067e 100644 --- a/.github/workflows/main_nightly.yml +++ b/.github/workflows/main_nightly.yml @@ -13,10 +13,25 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: npm install -g ganache@7.4.3 - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov + # Setup pypy and link to the location expected by .cargo/config.toml. + # Python + requirements are needed to compile the OS. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + - name: "Run codecov" run: cargo llvm-cov --codecov --output-path codecov.json env: @@ -27,3 +42,58 @@ jobs: with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true + + feature_combos: + runs-on: starkware-ubuntu-24.04-large + steps: + - uses: actions/checkout@v4 + + # Setup pypy and link to the location expected by .cargo/config.toml. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ steps.setup-pypy.outputs.pythonLocation }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + + # Install rust components. + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + # Run feature combo test. + - name: "Run feature combo on all crates." + run: scripts/run_feature_combos_test.py + + run-integration-tests: + runs-on: starkware-ubuntu-24.04-large + steps: + - uses: actions/checkout@v4 + + - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + # Setup pypy and link to the location expected by .cargo/config.toml. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ env.Python3_ROOT_DIR }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + # TODO(Gilad): only one test needs this (base_layer_test.rs), once it migrates to + # anvil, remove. + - run: npm install -g ganache@7.4.3 + + - name: "Run integration tests pull request" + run: | + scripts/run_tests.py --command integration --is_nightly + env: + SEED: 0 diff --git a/.github/workflows/main_pr.yml b/.github/workflows/main_pr.yml index b462e8f0ccb..d541135b986 100644 --- a/.github/workflows/main_pr.yml +++ b/.github/workflows/main_pr.yml @@ -1,6 +1,7 @@ name: Main-CI-PR-Flow on: + merge_group: pull_request: types: - opened @@ -58,7 +59,7 @@ jobs: uses: upsidr/merge-gatekeeper@v1 with: token: ${{ secrets.GITHUB_TOKEN }} - timeout: 1500 + timeout: 3600 interval: 30 ignored: "code-review/reviewable" @@ -68,6 +69,6 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{github.ref}} - timeout: 1500 + timeout: 1800 interval: 30 ignored: "code-review/reviewable" diff --git a/.github/workflows/merge_paths_ci.yml b/.github/workflows/merge_paths_ci.yml index a5fc183da34..efb485de0f7 100644 --- a/.github/workflows/merge_paths_ci.yml +++ b/.github/workflows/merge_paths_ci.yml @@ -33,6 +33,7 @@ jobs: - uses: actions/setup-python@v5 with: python-version: '3.9' + cache: 'pip' - run: | python -m pip install --upgrade pip pip install pytest diff --git a/.github/workflows/merge_queue_ci.yml b/.github/workflows/merge_queue_ci.yml new file mode 100644 index 00000000000..abc056eea02 --- /dev/null +++ b/.github/workflows/merge_queue_ci.yml @@ -0,0 +1,50 @@ +name: Merge-queue-CI-Flow + +on: + merge_group: + types: + - checks_requested + +env: + CI: 1 + RUSTFLAGS: "-D warnings -C link-arg=-fuse-ld=lld" + EXTRA_RUST_TOOLCHAINS: nightly-2024-04-29 + +jobs: + code_style: + runs-on: starkware-ubuntu-24.04-medium + steps: + # Environment setup. + - uses: actions/checkout@v4 + + # Setup pypy and link to the location expected by .cargo/config.toml. + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: 'pip' + - run: ln -s '${{ steps.setup-pypy.outputs.python-path }}' /usr/local/bin/pypy3.9 + - env: + LD_LIBRARY_PATH: ${{ steps.setup-pypy.outputs.pythonLocation }}/bin + run: echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + - run: pip install -r scripts/requirements.txt + + # Install rust components. + - uses: ./.github/actions/bootstrap + with: + extra_rust_toolchains: ${{ env.EXTRA_RUST_TOOLCHAINS }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + # Check Cargo.lock is up to date. + - name: "Check Cargo.lock" + run: | + cargo update -w --locked + git diff --exit-code Cargo.lock + + - name: "Run clippy on merge queue" + run: scripts/run_tests.py --command clippy + + - name: "Run rustfmt on merge queue" + # The nightly here is coupled with the one in install_rust/action.yml. + # If we move the install here we can use a const. + run: cargo +"$EXTRA_RUST_TOOLCHAINS" fmt --all -- --check diff --git a/.github/workflows/papyrus_benchmark.yaml b/.github/workflows/papyrus_benchmark.yaml index c07826f7724..12ca7784879 100644 --- a/.github/workflows/papyrus_benchmark.yaml +++ b/.github/workflows/papyrus_benchmark.yaml @@ -2,7 +2,7 @@ name: Papyrus-Benchmarks on: workflow_dispatch: - # TODO: Uncomment and run this automatically when the storage benchmark is fixed. + # TODO(DanB): Uncomment and run this automatically when the storage benchmark is fixed. # push: # branches: [main] diff --git a/.github/workflows/papyrus_ci.yml b/.github/workflows/papyrus_ci.yml index ee4f1d7d9a3..b63052fcdc3 100644 --- a/.github/workflows/papyrus_ci.yml +++ b/.github/workflows/papyrus_ci.yml @@ -9,18 +9,14 @@ on: - auto_merge_enabled - edited # for when the PR title is edited paths: - - '.github/workflows/papyrus_ci.yml' - - 'Dockerfile' - - 'papyrus_utilities.Dockerfile' - - 'Cargo.toml' - - 'Cargo.lock' - - 'crates/papyrus**/**' - - 'crates/sequencing/**' - - 'crates/starknet_client/**' - - 'scripts/dependencies.sh' - - merge_group: - types: [checks_requested] + - ".github/workflows/papyrus_ci.yml" + - "deployments/images/base/Dockerfile" + - "papyrus_utilities.Dockerfile" + - "Cargo.toml" + - "Cargo.lock" + - "crates/papyrus**/**" + - "crates/apollo_starknet_client/**" + - "scripts/dependencies.sh" env: RUSTFLAGS: "-D warnings -C link-arg=-fuse-ld=lld" @@ -40,14 +36,16 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - name: Build node run: | mkdir data - cargo build -r -p papyrus_node + cargo build -p papyrus_node - name: Run executable run: > - target/release/papyrus_node --base_layer.node_url ${{ secrets.CI_BASE_LAYER_NODE_URL }} + target/debug/papyrus_node --chain_id SN_SEPOLIA --base_layer.node_url ${{ secrets.CI_BASE_LAYER_NODE_URL }} & sleep 30 ; kill $! executable-run-no-rpc: @@ -55,14 +53,16 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - name: Build node run: | mkdir data - cargo build -r -p papyrus_node --no-default-features + cargo build -p papyrus_node --no-default-features - name: Run executable run: > - target/release/papyrus_node --base_layer.node_url ${{ secrets.CI_BASE_LAYER_NODE_URL }} + target/debug/papyrus_node --chain_id SN_SEPOLIA --base_layer.node_url ${{ secrets.CI_BASE_LAYER_NODE_URL }} & sleep 30 ; kill $! # FIXME: Job is currently running out of disk space, error is hidden inside the `Annoatations` @@ -72,9 +72,11 @@ jobs: # steps: # - uses: actions/checkout@v4 # - uses: ./.github/actions/bootstrap + # with: + # github_token: ${{ secrets.GITHUB_TOKEN }} # - name: Build node - # run: cargo build -r -p papyrus_node + # run: cargo build -p papyrus_node # - name: Run p2p sync end-to-end test # run: scripts/papyrus/p2p_sync_e2e_test/main.sh ${{ secrets.CI_BASE_LAYER_NODE_URL }} @@ -84,45 +86,48 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: > - cargo test -r + cargo test --test latency_histogram --test gateway_integration_test --test feeder_gateway_integration_test -- --include-ignored --skip test_gw_integration_testnet; - cargo run -r -p papyrus_node --bin central_source_integration_test --features="futures-util tokio-stream" + cargo run -p papyrus_node --bin central_source_integration_test --features="futures-util tokio-stream" test-no-rpc: runs-on: starkware-ubuntu-24.04-medium steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: | cargo test -p papyrus_node --no-default-features env: SEED: 0 - build-papyrus-utilities-image: - runs-on: starkware-ubuntu-24.04-medium - steps: - - uses: actions/checkout@v4 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Build Papyrus utilites docker image - uses: docker/build-push-action@v3.2.0 - continue-on-error: true # ignore the failure of a step and avoid terminating the job. - with: - push: false - context: . - file: papyrus_utilities.Dockerfile - cache-from: type=gha,scope=buildkit-ci - cache-to: type=gha,mode=max,scope=buildkit-ci + # TODO(DanB): Re-enable this job when necessary. + # Note that currently the `papyrus_load_test` build fails. + # build-papyrus-utilities-image: + # runs-on: starkware-ubuntu-24.04-medium + # steps: + # - uses: actions/checkout@v4 + # - name: Set up Docker Buildx + # uses: docker/setup-buildx-action@v3 + # - name: Build Papyrus utilites docker image + # uses: docker/build-push-action@v3.2.0 + # with: + # push: false + # context: . + # file: papyrus_utilities.Dockerfile random-table-test: runs-on: starkware-ubuntu-24.04-medium steps: - uses: actions/checkout@v4 - # run this job only if the path 'crates/papyrus_storage/src/db/**' is changed, because it takes around 2 minutes. + # run this job only if the path 'crates/apollo_storage/src/db/**' is changed, because it takes around 2 minutes. - uses: dorny/paths-filter@v3 id: changes with: @@ -132,8 +137,10 @@ jobs: ref: ${{ github.event.merge_group.head_ref }} filters: | target_directory: - - 'crates/papyrus_storage/src/db/**' + - 'crates/apollo_storage/src/db/**' - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} # repeat this job 32 times. this is a random test for part of the code that may cause a corrupted database. - - run: for run in {1..32}; do cargo test -r -p papyrus_storage -- --include-ignored common_prefix_compare_with_simple_table_random; done + - run: for run in {1..32}; do cargo test -p apollo_storage -- --include-ignored common_prefix_compare_with_simple_table_random; done if: steps.changes.outputs.target_directory == 'true' diff --git a/.github/workflows/papyrus_docker-publish.yml b/.github/workflows/papyrus_docker-publish.yml index ce4a00dc80c..038a72110ee 100644 --- a/.github/workflows/papyrus_docker-publish.yml +++ b/.github/workflows/papyrus_docker-publish.yml @@ -85,7 +85,7 @@ jobs: # Build and push Docker image with Buildx # https://github.com/docker/build-push-action - name: Build and push Docker image - uses: docker/build-push-action@v3.2.0 + uses: docker/build-push-action@v6.13.0 with: context: . file: deployments/images/papyrus/Dockerfile diff --git a/.github/workflows/papyrus_nightly-tests-call.yml b/.github/workflows/papyrus_nightly-tests-call.yml index 6171e514e6c..5b7a1e9869d 100644 --- a/.github/workflows/papyrus_nightly-tests-call.yml +++ b/.github/workflows/papyrus_nightly-tests-call.yml @@ -25,6 +25,8 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: sudo apt update; sudo apt -y install libclang-dev # Install libclang-dev that is not a part of the ubuntu vm in github actions. if: runner.os == 'Linux' @@ -38,7 +40,7 @@ jobs: retVal=0; INTEGRATION_TESTNET_NODE_URL=${{ secrets.INTEGRATION_TESTNET_NODE_URL }} SENDER_PRIVATE_KEY=${{ secrets.INTEGRATION_TESTNET_SENDER_PRIVATE_KEY }} - cargo test --test gateway_integration_test -p papyrus_rpc test_gw_integration_testnet + cargo test --test gateway_integration_test -p apollo_rpc test_gw_integration_testnet -- --ignored || retVal=$?; echo "retVal=$retVal" >> $GITHUB_OUTPUT; if [ $retVal -ne 0 ]; then diff --git a/.github/workflows/papyrus_nightly-tests.yml b/.github/workflows/papyrus_nightly-tests.yml index 92db40f283d..920b31153ff 100644 --- a/.github/workflows/papyrus_nightly-tests.yml +++ b/.github/workflows/papyrus_nightly-tests.yml @@ -38,15 +38,17 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: mkdir data - name: Build node - run: cargo build -r -p papyrus_node + run: cargo build -p papyrus_node - name: Run executable run: > - target/release/papyrus_node --base_layer.node_url ${{ secrets.CI_BASE_LAYER_NODE_URL }} + target/debug/papyrus_node --base_layer.node_url ${{ secrets.CI_BASE_LAYER_NODE_URL }} & sleep 30 ; kill $! test: @@ -55,11 +57,12 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: npm install -g ganache@7.4.3 - - run: | - cargo test -r -p papyrus_node + cargo test -p papyrus_node env: SEED: 0 @@ -69,7 +72,9 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap - - run: cargo build -r -p papyrus_load_test + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + - run: cargo build -p papyrus_load_test integration-test: runs-on: macos-latest @@ -77,19 +82,23 @@ jobs: steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - run: > - cargo test -r + cargo test --test latency_histogram --test gateway_integration_test --test feeder_gateway_integration_test -- --include-ignored --skip test_gw_integration_testnet; - cargo run -r -p papyrus_node --bin central_source_integration_test --features="futures-util tokio-stream" + cargo run -p papyrus_node --bin central_source_integration_test --features="futures-util tokio-stream" - # TODO(dvir): make this run only if the path 'crates/papyrus_storage/src/db/**' (same path as in the CI) was changed on the + # TODO(dvir): make this run only if the path 'crates/apollo_storage/src/db/**' (same path as in the CI) was changed on the # last day and increase the number of repetitions. random-table-test: runs-on: starkware-ubuntu-24.04-medium steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap - - run: for run in {1..100}; do cargo test -r -p papyrus_storage -- --include-ignored common_prefix_compare_with_simple_table_random; done + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + - run: for run in {1..100}; do cargo test -p apollo_storage -- --include-ignored common_prefix_compare_with_simple_table_random; done diff --git a/.github/workflows/replay.yml b/.github/workflows/replay.yml new file mode 100644 index 00000000000..2bb87ffccdc --- /dev/null +++ b/.github/workflows/replay.yml @@ -0,0 +1,48 @@ +name: Replay + +on: + push: + branches: [main, replay] + pull_request: + +jobs: + clippy: + runs-on: ubuntu-latest + env: + MLIR_SYS_190_PREFIX: /usr/lib/llvm-19/ + LLVM_SYS_191_PREFIX: /usr/lib/llvm-19/ + TABLEGEN_190_PREFIX: /usr/lib/llvm-19/ + CAIRO_NATIVE_RUNTIME_LIBRARY: libcairo_native_runtime.a + steps: + - uses: actions/checkout@v4 + with: + # required to clone native as a git submodule + submodules: recursive + - uses: dtolnay/rust-toolchain@1.82.0 + with: + components: clippy + - uses: Swatinem/rust-cache@v2 + - name: Add LLVM Debian repository + uses: myci-actions/add-deb-repo@10 + with: + repo: deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-19 main + repo-name: llvm-repo + keys-asc: https://apt.llvm.org/llvm-snapshot.gpg.key + - name: Install lld + run: sudo apt install lld + - name: Install LLVM + run: sudo apt-get install llvm-19 llvm-19-dev llvm-19-runtime clang-19 clang-tools-19 lld-19 libpolly-19-dev libmlir-19-dev mlir-19-tools + - name: Run cargo clippy + run: | + cd crates/blockifier + cargo clippy --all-targets --all-features --no-deps + format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + toolchain: nightly-2023-10-19 + components: rustfmt + - name: Run cargo fmt + run: cargo +nightly-2023-10-19 fmt --all -- --check diff --git a/.github/workflows/sequencer_cdk8s-test.yml b/.github/workflows/sequencer_cdk8s-test.yml new file mode 100644 index 00000000000..ba37b673749 --- /dev/null +++ b/.github/workflows/sequencer_cdk8s-test.yml @@ -0,0 +1,119 @@ +name: Sequencer-Cdk8s-Test +on: + workflow_dispatch: + + push: + branches: + - main + - main-v[0-9].** + tags: + - v[0-9].** + paths: + - ".github/workflows/sequencer_cdk8s-test.yml" + - "deployments/sequencer/**" + + pull_request: + branches: + - main + - main-v[0-9].** + paths: + - ".github/workflows/sequencer_cdk8s-test.yml" + - "deployments/sequencer/**" + +jobs: + prepare: + runs-on: ubuntu-24.04 + env: + cluster: test + namespace: test + deployment_config_path: ${{ github.workspace }}/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json + monitoring_dashboard_file: ${{ github.workspace }}/deployments/monitoring/examples/output/dashboards/sequencer_node_dashboard.json + + steps: + - name: Checkout sequencer + uses: actions/checkout@v4 + + - name: Setup python + uses: actions/setup-python@v5.4.0 + with: + python-version: "3.10" + cache: "pip" + + - name: Setup Node + uses: actions/setup-node@v4.2.0 + with: + node-version: 22 + + - name: Install pip dependencies + run: python3 -m pip install black pipenv + + - name: Install cdk8s-cli + run: npm install -g cdk8s-cli@2.198.334 + + - name: Black all files + uses: psf/black@stable + with: + options: "--check --verbose -l 100 -t py310 --diff --color --exclude imports" + src: deployments/sequencer + + # Synthesize the CDK8s Sequencer app. + - name: CDk8s synth + working-directory: deployments/sequencer + run: | + cdk8s import + pipenv install + cdk8s synth --app "pipenv run python main.py --namespace ${{ env.namespace }} --deployment-config-file ${{ env.deployment_config_path }} --monitoring-dashboard-file ${{ env.monitoring_dashboard_file }} --cluster ${{ env.cluster }}" + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: cdk8s-artifacts + path: | + deployments/sequencer/dist + deployments/sequencer/resources + + validate: + runs-on: ubuntu-24.04 + needs: prepare + env: + dist_path: ./cdk8s-artifacts/dist + crds_path: ./cdk8s-artifacts/resources/crds + steps: + - name: Setup go lang + uses: actions/setup-go@v5 + with: + go-version: "stable" + + - name: Setup kubectl-validate + run: go install sigs.k8s.io/kubectl-validate@latest + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + name: cdk8s-artifacts + path: cdk8s-artifacts + merge-multiple: true + + - name: kubectl validation test for version 1.27 + run: kubectl validate ${{ env.dist_path }} --local-crds ${{ env.crds_path }} --version 1.27 + continue-on-error: true + + - name: kubectl validation test for version 1.28 + run: kubectl validate ${{ env.dist_path }} --local-crds ${{ env.crds_path }} --version 1.28 + continue-on-error: true + + - name: kubectl validation test for version 1.29 + run: kubectl validate ${{ env.dist_path }} --local-crds ${{ env.crds_path }} --version 1.29 + continue-on-error: false + + - name: kubectl validation test for version 1.30 + run: kubectl validate ${{ env.dist_path }} --local-crds ${{ env.crds_path }} --version 1.30 + continue-on-error: false + + - name: kubectl validation test for version 1.31 + run: kubectl validate ${{ env.dist_path }} --local-crds ${{ env.crds_path }} --version 1.31 + continue-on-error: false + + - name: kubectl validation test for version 1.32 + run: kubectl validate ${{ env.dist_path }} --local-crds ${{ env.crds_path }} --version 1.32 + continue-on-error: false diff --git a/.github/workflows/sequencer_docker-publish.yml b/.github/workflows/sequencer_docker-publish.yml new file mode 100644 index 00000000000..f3ad066e03a --- /dev/null +++ b/.github/workflows/sequencer_docker-publish.yml @@ -0,0 +1,91 @@ +name: Sequencer-Docker-Publish + +on: + workflow_dispatch: + push: + branches: + - main + - main-v*.*.* # e.g. main-v0.14.0 + - v*.*.*-integration # e.g. v0.14.0-integration + tags: + - "v*.*.*" + - "APOLLO-*" + paths: + - ".github/workflows/sequencer_docker-publish.yml" + - "crates/**" + - "scripts/dependencies.sh" + - "scripts/install_build_tools.sh" + - "deployments/images/base/Dockerfile" + - "deployments/images/sequencer/Dockerfile" + +permissions: + contents: read + packages: write + +# On PR events, cancel existing CI runs on this same PR for this workflow. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.job }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +env: + REGISTRY: ghcr.io + REPO_NAME: ${{ github.repository }} + RUSTFLAGS: "-D warnings -C link-arg=-fuse-ld=lld" + +jobs: + docker-build-push: + runs-on: starkware-ubuntu-24.04-large + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Not required but recommended - enables build multi-platform images, export cache, etc + # Also workaround for: https://github.com/docker/build-push-action/issues/461 + # https://github.com/docker/setup-buildx-action + - name: Setup Docker buildx + uses: docker/setup-buildx-action@v2.2.1 + + # Login to a Docker registry except on PR + # https://github.com/docker/login-action + - name: Login to registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v2.1.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + logout: true + + # Extract metadata (tags, labels) for Docker + # https://github.com/docker/metadata-action + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v4.1.1 + with: + images: ${{ env.REGISTRY }}/${{ env.REPO_NAME }}/sequencer + tags: | + type=raw,enable=${{ startsWith(github.ref, 'refs/tags/') && 'true' || 'false' }},value={{tag}} + type=semver,pattern={{raw}} + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=ref,event=pr + # set `dev` tag for the default branch (`main`). + type=raw,value=dev,enable={{is_default_branch}} + # set `dev-{{branch}}-{{sha}}` additional tag for the default branch (`main`). + type=raw,value=dev-{{branch}}{{tag}}-{{sha}},enable={{is_default_branch}} + type=raw,value={{branch}}{{tag}}-{{sha}},enable=${{ github.event_name == 'workflow_dispatch' }} + + # Build and push Docker image with Buildx + # https://github.com/docker/build-push-action + - name: Build and push Docker image + uses: docker/build-push-action@v6.13.0 + with: + context: . + file: deployments/images/sequencer/Dockerfile + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + load: true # Loads the build result to docker images. Required for "Run docker test" step to work. + no-cache: true + build-args: BUILD_MODE=release diff --git a/.github/workflows/sequencer_docker-test.yml b/.github/workflows/sequencer_docker-test.yml new file mode 100644 index 00000000000..5b923edb009 --- /dev/null +++ b/.github/workflows/sequencer_docker-test.yml @@ -0,0 +1,131 @@ +name: Sequencer-Docker-Test + +on: + workflow_dispatch: + push: + branches: + - main + - main-v[0-9].** + tags: + - v[0-9].** + + pull_request: + +env: + crate_triggers: "apollo_node,apollo_dashboard,apollo_integration_tests" + path_triggers: ".github/workflows/sequencer_docker-test.yml,scripts/*.py,scripts/system_tests/**/*.py" + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.job }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + check-docker-test-trigger: + runs-on: starkware-ubuntu-24.04-small + outputs: + should_run: ${{ steps.docker_check.outputs.should_run }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-python@v5 + id: setup-pypy + with: + python-version: "pypy3.9" + cache: "pip" + + - run: pip install -r scripts/requirements.txt + + - name: Check for docker-test-triggering changes + id: docker_check + run: | + echo "Checking if any docker-test-triggering crates were modified" + OUTPUT_FILE=$(mktemp) + + python ./scripts/check_test_trigger.py --output_file $OUTPUT_FILE \ + --commit_id ${{ github.event.pull_request.base.sha }} \ + --crate_triggers ${{ env.crate_triggers }} \ + --path_triggers ${{ env.path_triggers }} + + should_run=$(cat "$OUTPUT_FILE") + echo "Captured output: $should_run" + echo "should_run=$should_run" >> $GITHUB_OUTPUT + + sequencer_docker_compose_test: + needs: check-docker-test-trigger + if: needs.check-docker-test-trigger.outputs.should_run == 'true' + runs-on: starkware-ubuntu-24.04-large + env: + MONITORING_ENABLED: false + SIMULATOR_RUN_FOREVER: false + FOLLOW_LOGS: false + SIMULATOR_TIMEOUT: 300 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Not required but recommended - enables build multi-platform images, export cache, etc + # Also workaround for: https://github.com/docker/build-push-action/issues/461 + # https://github.com/docker/setup-buildx-action + - name: Setup Docker buildx + uses: docker/setup-buildx-action@v2.2.1 + + - name: Run docker compose + run: ./deployments/monitoring/deploy_local_stack.sh up -d --build + + # Getting the sequencer_simulator container id, then + # Invoking `docker wait $container_id`. + # docker wait will return the container exit_code. + - name: Wait for simulator results + working-directory: ./deployments/monitoring/local + timeout-minutes: 5 + run: | + simulator_id=$(docker compose ps -q sequencer_simulator 2>/dev/null) + exit_code=$(docker wait $simulator_id) + if (( $exit_code == 0 )); then + echo "βœ… Simulator test succeeded. exit_code=$exit_code" + else + echo "❌ Simulator test failed. exit_code=$exit_code" + exit $exit_code + fi + + # Printing all services logs separately. Makes it more readable later. + - name: Print sequencer_node_setup logs + if: always() + working-directory: ./deployments/monitoring/local + run: docker compose logs sequencer_node_setup + + - name: Print dummy_recorder logs + if: always() + working-directory: ./deployments/monitoring/local + run: docker compose logs dummy_recorder + + - name: Print dummy_eth_to_strk_oracle logs + if: always() + working-directory: ./deployments/monitoring/local + run: docker compose logs dummy_eth_to_strk_oracle + + - name: Print config_injector logs + if: always() + working-directory: ./deployments/monitoring/local + run: docker compose logs config_injector + + - name: Print sequencer_node logs + if: always() + working-directory: ./deployments/monitoring/local + run: docker compose logs sequencer_node + + - name: Print sequencer_simulator logs + if: always() + working-directory: ./deployments/monitoring/local + run: docker compose logs sequencer_simulator + + # Shutting down all containers and cleaning volumes. + - name: Cleanup + if: always() + run: ./deployments/monitoring/deploy_local_stack.sh down -v diff --git a/.github/workflows/sequencer_integration_tests.yml b/.github/workflows/sequencer_integration_tests.yml deleted file mode 100644 index 30beb61d4cd..00000000000 --- a/.github/workflows/sequencer_integration_tests.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Sequencer-Integration-Tests-CI-Flow - -on: - pull_request: - types: - - opened - - reopened - - synchronize - - auto_merge_enabled - - edited - -env: - RUSTFLAGS: "-D warnings -C link-arg=-fuse-ld=lld" - -# On PR events, cancel existing CI runs on this same PR for this workflow. -# Also, create different concurrency groups for different pushed commits, on push events. -concurrency: - group: > - ${{ github.workflow }}- - ${{ github.ref }}- - ${{ github.event_name == 'pull_request' && 'PR' || github.sha }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - -jobs: - build-and-run-sequencer-integration-tests: - runs-on: starkware-ubuntu-latest-large - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/bootstrap - - run: | - cargo build --bin starknet_sequencer_node - cargo build --bin sequencer_node_end_to_end_integration_test - target/debug/sequencer_node_end_to_end_integration_test - diff --git a/.github/workflows/upload_artifacts_workflow.yml b/.github/workflows/upload_artifacts_workflow.yml index 3d2368df632..939a9c84cd3 100644 --- a/.github/workflows/upload_artifacts_workflow.yml +++ b/.github/workflows/upload_artifacts_workflow.yml @@ -21,16 +21,19 @@ on: - '.github/workflows/blockifier_ci.yml' - '.github/workflows/upload_artifacts_workflow.yml' - 'build_native_in_docker.sh' + - 'docker-ci/images/sequencer-ci.Dockerfile' + - 'scripts/build_native_blockifier.sh' + - 'scripts/dependencies.sh' + - 'scripts/install_build_tools.sh' - 'Cargo.lock' - 'Cargo.toml' - 'crates/blockifier/**' - 'crates/native_blockifier/**' - - 'crates/starknet_sierra_multicompile/**' + - 'crates/apollo_state_reader/**' + - 'crates/apollo_storage/**' + - 'crates/starknet_api/**' + - 'crates/apollo_sierra_multicompile/**' - 'rust-toolchain.toml' - - 'scripts/build_native_blockifier.sh' - - 'scripts/dependencies.sh' - - 'scripts/install_build_tools.sh' - - 'scripts/sequencer-ci.Dockerfile' # On PR events, cancel existing CI runs on this same PR for this workflow. # Also, create different concurrency groups for different pushed commits, on push events. diff --git a/.github/workflows/verify-deps.yml b/.github/workflows/verify-deps.yml index b1389352385..699c24ab996 100644 --- a/.github/workflows/verify-deps.yml +++ b/.github/workflows/verify-deps.yml @@ -11,10 +11,12 @@ jobs: latest_deps: name: Latest Dependencies runs-on: starkware-ubuntu-24.04-medium - continue-on-error: true steps: - uses: actions/checkout@v4 - uses: ./.github/actions/bootstrap + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + - run: npm install -g ganache@7.4.3 - name: Update Dependencies run: cargo update --verbose - name: Build diff --git a/.gitignore b/.gitignore index 3933be59086..5484ec08468 100644 --- a/.gitignore +++ b/.gitignore @@ -12,16 +12,16 @@ CLI_TARGET/ *.pdb *.egg-info -build +/build dist target */.vscode/* *.DS_Store tmp_venv/* +sequencer_venv/* .vscode/settings.json /data /logs -/sequencer_data /target /.vscode # Git hooks @@ -29,9 +29,14 @@ tmp_venv/* /.idea __pycache__/ .idea/ +**/.venv + +# Native blockifier artifacts. +/crates/native_blockifier/build # Python artifacts. scripts/__pycache__ +monitoring_venv/ # Papyrus p2p sync test artifacts. scripts/papyrus/p2p_sync_e2e_test/data_client/ @@ -40,3 +45,5 @@ scripts/papyrus/p2p_sync_e2e_test/data_server/ # Papyrus helm chart deployments/papyrus/helm/config/* !deployments/papyrus/helm/config/example.json + +integration_test_temporary_logs diff --git a/BUILD b/BUILD index f7e67dff2a8..14d5f42ae2b 100644 --- a/BUILD +++ b/BUILD @@ -2,8 +2,8 @@ exports_files([ "target/release/libnative_blockifier.so", "target/release/shared_executables/starknet-native-compile", - "target/debug/committer_cli", - "target/release/committer_cli", - "target/x86_64-unknown-linux-musl/debug/committer_cli", - "target/x86_64-unknown-linux-musl/release/committer_cli", + "target/debug/starknet_committer_and_os_cli", + "target/release/starknet_committer_and_os_cli", + "target/x86_64-unknown-linux-musl/debug/starknet_committer_and_os_cli", + "target/x86_64-unknown-linux-musl/release/starknet_committer_and_os_cli", ]) diff --git a/Cargo.lock b/Cargo.lock index f42ffeda554..31b4e635956 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,37 +91,85 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b4ae82946772d69f868b9ef81fc66acb1b149ef9b4601849bec4bcf5da6552e" +dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-core", + "alloy-eips", + "alloy-genesis", + "alloy-json-rpc", + "alloy-network", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-signer", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", +] + [[package]] name = "alloy-chains" -version = "0.1.52" +version = "0.1.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f15afc5993458b42739ab3b69bdb6b4c8112acd3997dbea9bc092c9517137c" +checksum = "28e2652684758b0d9b389d248b209ed9fd9989ef489a550265fe4bb8454fe7eb" dependencies = [ "alloy-primitives", "num_enum", - "strum 0.26.3", + "strum 0.27.1", ] [[package]] name = "alloy-consensus" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" +checksum = "6fbf458101ed6c389e9bb70a34ebc56039868ad10472540614816cdedc8f5265" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-trie", + "auto_impl", "c-kzg", + "derive_more 2.0.1", + "either", + "k256", + "once_cell", + "rand 0.8.5", + "serde", + "serde_with", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-consensus-any" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc982af629e511292310fe85b433427fd38cb3105147632b574abc997db44c91" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", "serde", ] [[package]] name = "alloy-contract" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eefe64fd344cffa9cf9e3435ec4e93e6e9c3481bc37269af988bf497faf4a6a" +checksum = "cd0a0c1ddee20ecc14308aae21c2438c994df7b39010c26d70f86e1d8fdb8db0" dependencies = [ + "alloy-consensus", "alloy-dyn-abi", "alloy-json-abi", "alloy-network", @@ -133,14 +181,27 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 1.0.69", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-core" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d8bcce99ad10fe02640cfaec1c6bc809b837c783c1d52906aa5af66e2a196f6" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", ] [[package]] name = "alloy-dyn-abi" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44e3b98c37b3218924cd1d2a8570666b89662be54e5b182643855f783ea68b33" +checksum = "eb8e762aefd39a397ff485bc86df673465c4ad3ec8819cc60833a8a3ba5cdc87" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -150,7 +211,20 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.22", + "winnow 0.7.4", +] + +[[package]] +name = "alloy-eip2124" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "675264c957689f0fd75f5993a73123c2cc3b5c235a38f5b9037fe6c826bfb2c0" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "crc", + "serde", + "thiserror 2.0.12", ] [[package]] @@ -166,38 +240,68 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "9b15b13d38b366d01e818fe8e710d4d702ef7499eacd44926a06171dd9585d0c" dependencies = [ "alloy-primitives", "alloy-rlp", "serde", + "thiserror 2.0.12", ] [[package]] name = "alloy-eips" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" +checksum = "6e86967eb559920e4b9102e4cb825fe30f2e9467988353ce4809f0d3f2c90cd4" dependencies = [ + "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", "alloy-primitives", "alloy-rlp", "alloy-serde", + "auto_impl", "c-kzg", - "derive_more 1.0.0", + "derive_more 2.0.1", + "either", "once_cell", "serde", "sha2", ] +[[package]] +name = "alloy-genesis" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a40de6f5b53ecf5fd7756072942f41335426d9a3704cd961f77d854739933bcf" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "alloy-trie", + "serde", +] + +[[package]] +name = "alloy-hardforks" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "473ee2ab7f5262b36e8fbc1b5327d5c9d488ab247e31ac739b929dbe2444ae79" +dependencies = [ + "alloy-chains", + "alloy-eip2124", + "alloy-primitives", + "auto_impl", + "dyn-clone", +] + [[package]] name = "alloy-json-abi" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731ea743b3d843bc657e120fb1d1e9cc94f5dab8107e35a82125a63e6420a102" +checksum = "fe6beff64ad0aa6ad1019a3db26fef565aefeb011736150ab73ed3366c3cfd1b" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -207,65 +311,92 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" +checksum = "27434beae2514d4a2aa90f53832cbdf6f23e4b5e2656d95eaf15f9276e2418b6" dependencies = [ "alloy-primitives", "alloy-sol-types", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.12", "tracing", ] [[package]] name = "alloy-network" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3705ce7d8602132bcf5ac7a1dd293a42adc2f183abf5907c30ac535ceca049" +checksum = "26a33a38c7486b1945f8d093ff027add2f3a8f83c7300dbad6165cc49150085e" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", "alloy-primitives", + "alloy-rpc-types-any", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", "alloy-sol-types", "async-trait", "auto_impl", + "derive_more 2.0.1", "futures-utils-wasm", - "thiserror 1.0.69", + "serde", + "serde_json", + "thiserror 2.0.12", ] [[package]] name = "alloy-network-primitives" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" +checksum = "db973a7a23cbe96f2958e5687c51ce2d304b5c6d0dc5ccb3de8667ad8476f50b" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-serde", "serde", ] +[[package]] +name = "alloy-node-bindings" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "846c2248472c3a7efa8d9d6c51af5b545a88335af0ed7a851d01debfc3b03395" +dependencies = [ + "alloy-genesis", + "alloy-hardforks", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "alloy-signer-local", + "k256", + "rand 0.8.5", + "serde_json", + "tempfile", + "thiserror 2.0.12", + "tracing", + "url", +] + [[package]] name = "alloy-primitives" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788bb18e8f61d5d9340b52143f27771daf7e1dccbaf2741621d2493f9debf52e" +checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", - "derive_more 1.0.0", + "derive_more 2.0.1", "foldhash", "hashbrown 0.15.2", - "indexmap 2.7.0", + "indexmap 2.9.0", "itoa", "k256", "keccak-asm", @@ -281,9 +412,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927f708dd457ed63420400ee5f06945df9632d5d101851952056840426a10dc5" +checksum = "8b03bde77ad73feae14aa593bcabb932c8098c0f0750ead973331cfc0003a4e1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -291,9 +422,12 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-network-primitives", + "alloy-node-bindings", "alloy-primitives", "alloy-rpc-client", + "alloy-rpc-types-anvil", "alloy-rpc-types-eth", + "alloy-sol-types", "alloy-transport", "alloy-transport-http", "async-stream", @@ -302,15 +436,17 @@ dependencies = [ "dashmap", "futures", "futures-utils-wasm", - "lru", + "lru 0.13.0", + "parking_lot", "pin-project", "reqwest 0.12.12", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.12", "tokio", "tracing", "url", + "wasmtimer", ] [[package]] @@ -332,18 +468,20 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "alloy-rpc-client" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d82952dca71173813d4e5733e2c986d8b04aea9e0f3b0a576664c232ad050a5" +checksum = "445a3298c14fae7afb5b9f2f735dead989f3dd83020c2ab8e48ed95d7b6d1acb" dependencies = [ "alloy-json-rpc", + "alloy-primitives", "alloy-transport", "alloy-transport-http", + "async-stream", "futures", "pin-project", "reqwest 0.12.12", @@ -353,35 +491,71 @@ dependencies = [ "tokio-stream", "tower 0.5.2", "tracing", + "tracing-futures", "url", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9157deaec6ba2ad7854f16146e4cd60280e76593eed79fdcb06e0fa8b6c60f77" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-anvil" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a80ee83ef97e7ffd667a81ebdb6154558dfd5e8f20d8249a10a12a1671a04b3" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-any" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "604dea1f00fd646debe8033abe8e767c732868bf8a5ae9df6321909ccbc99c56" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", ] [[package]] name = "alloy-rpc-types-eth" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83aa984386deda02482660aa31cb8ca1e63d533f1c31a52d7d181ac5ec68e9b8" +checksum = "7e13d71eac04513a71af4b3df580f52f2b4dcbff9d971cc9a52519acf55514cb" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-network-primitives", "alloy-primitives", "alloy-rlp", "alloy-serde", "alloy-sol-types", - "cfg-if", - "derive_more 1.0.0", - "hashbrown 0.14.5", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", + "thiserror 2.0.12", ] [[package]] name = "alloy-serde" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" +checksum = "3a1cd73fc054de6353c7f22ff9b846b0f0f145cd0112da07d4119e41e9959207" dependencies = [ "alloy-primitives", "serde", @@ -390,83 +564,101 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" +checksum = "c96fbde54bee943cd94ebacc8a62c50b38c7dfd2552dcd79ff61aea778b1bfcc" dependencies = [ "alloy-primitives", "async-trait", "auto_impl", + "either", "elliptic-curve", "k256", - "thiserror 1.0.69", + "thiserror 2.0.12", +] + +[[package]] +name = "alloy-signer-local" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6e72002cc1801d8b41e9892165e3a6551b7bd382bd9d0414b21e90c0c62551" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror 2.0.12", ] [[package]] name = "alloy-sol-macro" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a07b74d48661ab2e4b50bb5950d74dbff5e61dd8ed03bb822281b706d54ebacb" +checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19cc9c7f20b90f9be1a8f71a3d8e283a43745137b0837b1a1cb13159d37cad72" +checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.7.0", + "indexmap 2.9.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713b7e6dfe1cb2f55c80fb05fd22ed085a1b4e48217611365ed0ae598a74c6ac" +checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" dependencies = [ "alloy-json-abi", "const-hex", "dunce", "heck 0.5.0", + "macro-string", "proc-macro2", "quote", "serde_json", - "syn 2.0.95", + "syn 2.0.100", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eda2711ab2e1fb517fc6e2ffa9728c9a232e296d16810810e6957b781a1b8bc" +checksum = "6d162f8524adfdfb0e4bd0505c734c985f3e2474eb022af32eef0d52a4f3935c" dependencies = [ "serde", - "winnow 0.6.22", + "winnow 0.7.4", ] [[package]] name = "alloy-sol-types" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b478bc9c0c4737a04cd976accde4df7eba0bdc0d90ad6ff43d58bc93cf79c1" +checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -477,28 +669,31 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33616b2edf7454302a1d48084db185e52c309f73f6c10be99b0fe39354b3f1e9" +checksum = "9aec325c2af8562ef355c02aeb527c755a07e9d8cf6a1e65dda8d0bf23e29b2c" dependencies = [ "alloy-json-rpc", "base64 0.22.1", - "futures-util", + "derive_more 2.0.1", + "futures", "futures-utils-wasm", + "parking_lot", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.12", "tokio", "tower 0.5.2", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.3.6" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a944f5310c690b62bbb3e7e5ce34527cbd36b2d18532a797af123271ce595a49" +checksum = "a082c9473c6642cce8b02405a979496126a03b096997888e86229afad05db06c" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -509,6 +704,22 @@ dependencies = [ "url", ] +[[package]] +name = "alloy-trie" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6917c79e837aa7b77b7a6dae9f89cbe15313ac161c4d3cfaf8909ef21f3d22d8" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more 1.0.0", + "nybbles", + "serde", + "smallvec", + "tracing", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -586,36 +797,1423 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] -name = "aquamarine" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" -dependencies = [ - "include_dir", - "itertools 0.10.5", - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.95", +name = "apollo_batcher" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_batcher_types", + "apollo_class_manager_types", + "apollo_config", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_provider_types", + "apollo_mempool_types", + "apollo_metrics", + "apollo_reverts", + "apollo_starknet_client", + "apollo_state_reader", + "apollo_state_sync_types", + "apollo_storage", + "assert_matches", + "async-trait", + "blockifier", + "cairo-lang-starknet-classes", + "cairo-vm", + "chrono", + "futures", + "indexmap 2.9.0", + "itertools 0.12.1", + "mempool_test_utils", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "mockall", + "pretty_assertions", + "reqwest 0.11.27", + "rstest", + "serde", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tracing", + "url", + "validator", ] [[package]] -name = "arc-swap" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +name = "apollo_batcher_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_proc_macros", + "apollo_state_sync_types", + "async-trait", + "blockifier", + "chrono", + "derive_more 0.99.18", + "indexmap 2.9.0", + "mockall", + "serde", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", +] [[package]] -name = "ark-ec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +name = "apollo_central_sync" +version = "0.15.0-rc.2" dependencies = [ - "ark-ff 0.4.2", - "ark-poly 0.4.2", - "ark-serialize 0.4.2", - "ark-std 0.4.0", - "derivative", + "apollo_class_manager_types", + "apollo_config", + "apollo_proc_macros", + "apollo_starknet_client", + "apollo_state_sync_metrics", + "apollo_storage", + "apollo_test_utils", + "assert_matches", + "async-stream", + "async-trait", + "cairo-lang-starknet-classes", + "chrono", + "futures", + "futures-util", + "indexmap 2.9.0", + "itertools 0.12.1", + "lru 0.12.5", + "metrics 0.24.1", + "mockall", + "papyrus_base_layer", + "papyrus_common", + "pretty_assertions", + "reqwest 0.11.27", + "serde", + "simple_logger", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "url", + "validator", +] + +[[package]] +name = "apollo_class_manager" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_class_manager_types", + "apollo_compile_to_casm_types", + "apollo_config", + "apollo_infra", + "apollo_metrics", + "apollo_storage", + "assert_matches", + "async-trait", + "hex", + "mockall", + "serde", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_class_manager_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_compile_to_casm_types", + "apollo_infra", + "apollo_proc_macros", + "async-trait", + "mockall", + "serde", + "serde_json", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", +] + +[[package]] +name = "apollo_compilation_utils" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra_utils", + "assert_matches", + "cairo-lang-sierra", + "cairo-lang-starknet-classes", + "cairo-lang-utils", + "cairo-native", + "rlimit", + "rstest", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "tempfile", + "thiserror 1.0.69", +] + +[[package]] +name = "apollo_compile_to_casm" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_compilation_utils", + "apollo_compile_to_casm_types", + "apollo_config", + "apollo_infra", + "apollo_infra_utils", + "apollo_metrics", + "apollo_proc_macros", + "assert_matches", + "async-trait", + "cairo-lang-starknet-classes", + "mempool_test_utils", + "pretty_assertions", + "serde", + "serde_json", + "starknet_api", + "thiserror 1.0.69", + "tracing", + "validator", +] + +[[package]] +name = "apollo_compile_to_casm_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_proc_macros", + "async-trait", + "mockall", + "serde", + "serde_json", + "starknet_api", + "thiserror 1.0.69", +] + +[[package]] +name = "apollo_compile_to_native" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_compilation_utils", + "apollo_config", + "apollo_infra_utils", + "assert_matches", + "cairo-lang-starknet-classes", + "cairo-native", + "mempool_test_utils", + "rstest", + "serde", + "tempfile", + "toml_test_utils", + "validator", +] + +[[package]] +name = "apollo_config" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra_utils", + "apollo_test_utils", + "assert_matches", + "clap", + "const_format", + "itertools 0.12.1", + "lazy_static", + "serde", + "serde_json", + "starknet_api", + "strum_macros 0.25.3", + "tempfile", + "thiserror 1.0.69", + "tracing", + "url", + "validator", +] + +[[package]] +name = "apollo_consensus" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_metrics", + "apollo_network", + "apollo_network_types", + "apollo_protobuf", + "apollo_storage", + "apollo_test_utils", + "apollo_time", + "async-trait", + "enum-as-inner", + "futures", + "lazy_static", + "lru 0.12.5", + "mockall", + "prost", + "serde", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "test-case", + "thiserror 1.0.69", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_consensus_manager" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_batcher_types", + "apollo_class_manager_types", + "apollo_config", + "apollo_consensus", + "apollo_consensus_orchestrator", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_gas_price", + "apollo_l1_gas_price_types", + "apollo_metrics", + "apollo_network", + "apollo_protobuf", + "apollo_reverts", + "apollo_state_sync_types", + "apollo_time", + "async-trait", + "futures", + "mockall", + "rstest", + "serde", + "starknet_api", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_consensus_orchestrator" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_batcher", + "apollo_batcher_types", + "apollo_class_manager_types", + "apollo_config", + "apollo_consensus", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_gas_price_types", + "apollo_metrics", + "apollo_network", + "apollo_proc_macros", + "apollo_protobuf", + "apollo_starknet_client", + "apollo_state_sync_types", + "apollo_storage", + "apollo_test_utils", + "apollo_time", + "assert_matches", + "async-trait", + "blockifier", + "cairo-lang-casm", + "cairo-lang-starknet-classes", + "cairo-lang-utils", + "cairo-vm", + "chrono", + "ethnum", + "futures", + "indexmap 2.9.0", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "mockall", + "mockito 1.6.1", + "num-bigint 0.4.6", + "num-rational 0.4.2", + "paste", + "reqwest 0.11.27", + "rstest", + "serde", + "serde_json", + "shared_execution_objects", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing", + "url", + "validator", +] + +[[package]] +name = "apollo_dashboard" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_batcher", + "apollo_class_manager", + "apollo_compile_to_casm", + "apollo_consensus", + "apollo_consensus_manager", + "apollo_consensus_orchestrator", + "apollo_gateway", + "apollo_http_server", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_gas_price", + "apollo_l1_provider", + "apollo_mempool", + "apollo_mempool_p2p", + "apollo_metrics", + "apollo_state_sync_metrics", + "blockifier", + "indexmap 2.9.0", + "serde", + "serde_json", +] + +[[package]] +name = "apollo_deployments" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_infra_utils", + "apollo_node", + "apollo_protobuf", + "indexmap 2.9.0", + "serde", + "serde_json", + "serde_with", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "tempfile", + "url", +] + +[[package]] +name = "apollo_gateway" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_class_manager_types", + "apollo_compilation_utils", + "apollo_config", + "apollo_gateway_types", + "apollo_infra", + "apollo_mempool", + "apollo_mempool_types", + "apollo_metrics", + "apollo_network_types", + "apollo_proc_macros", + "apollo_rpc", + "apollo_state_sync_types", + "apollo_test_utils", + "assert_matches", + "async-trait", + "axum", + "blockifier", + "blockifier_test_utils", + "cairo-lang-sierra-to-casm", + "cairo-lang-starknet-classes", + "clap", + "criterion", + "futures", + "lazy_static", + "mempool_test_utils", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "mockall", + "mockito 1.6.1", + "num-bigint 0.4.6", + "num-rational 0.4.2", + "pretty_assertions", + "reqwest 0.11.27", + "rstest", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-test", + "validator", +] + +[[package]] +name = "apollo_gateway_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_network_types", + "apollo_proc_macros", + "apollo_rpc", + "async-trait", + "enum-assoc", + "enum-iterator", + "mockall", + "serde", + "serde_json", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "apollo_http_server" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_gateway_types", + "apollo_infra", + "apollo_infra_utils", + "apollo_metrics", + "apollo_proc_macros", + "assert_matches", + "axum", + "base64 0.13.1", + "blockifier", + "blockifier_reexecution", + "blockifier_test_utils", + "flate2", + "futures", + "hyper 0.14.32", + "mempool_test_utils", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "regex", + "reqwest 0.11.27", + "rstest", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-test", + "validator", +] + +[[package]] +name = "apollo_infra" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_infra_utils", + "apollo_metrics", + "assert_matches", + "async-trait", + "hyper 0.14.32", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "once_cell", + "pretty_assertions", + "rstest", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "time", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-subscriber", + "validator", +] + +[[package]] +name = "apollo_infra_utils" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_proc_macros", + "assert-json-diff", + "colored 3.0.0", + "nix 0.20.2", + "num_enum", + "pretty_assertions", + "rstest", + "serde", + "serde_json", + "socket2 0.5.8", + "tempfile", + "thiserror 1.0.69", + "tokio", + "toml", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "apollo_integration_tests" +version = "0.15.0-rc.2" +dependencies = [ + "alloy", + "anyhow", + "apollo_batcher", + "apollo_class_manager", + "apollo_config", + "apollo_consensus", + "apollo_consensus_manager", + "apollo_consensus_orchestrator", + "apollo_deployments", + "apollo_gateway", + "apollo_http_server", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_endpoint_monitor", + "apollo_l1_gas_price", + "apollo_l1_gas_price_types", + "apollo_l1_provider", + "apollo_mempool", + "apollo_mempool_p2p", + "apollo_monitoring_endpoint", + "apollo_network", + "apollo_node", + "apollo_protobuf", + "apollo_rpc", + "apollo_state_sync", + "apollo_state_sync_metrics", + "apollo_storage", + "apollo_test_utils", + "assert_matches", + "axum", + "blockifier", + "blockifier_test_utils", + "cairo-lang-starknet-classes", + "clap", + "futures", + "hex", + "indexmap 2.9.0", + "mempool_test_utils", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "papyrus_base_layer", + "pretty_assertions", + "rstest", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "tempfile", + "tokio", + "tokio-util", + "tracing", + "url", +] + +[[package]] +name = "apollo_l1_endpoint_monitor" +version = "0.15.0-rc.2" +dependencies = [ + "alloy", + "apollo_config", + "apollo_infra", + "apollo_l1_endpoint_monitor_types", + "async-trait", + "mockito 1.6.1", + "papyrus_base_layer", + "serde", + "tokio", + "tracing", + "url", + "validator", +] + +[[package]] +name = "apollo_l1_endpoint_monitor_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_proc_macros", + "async-trait", + "mockall", + "serde", + "strum_macros 0.25.3", + "thiserror 1.0.69", + "url", +] + +[[package]] +name = "apollo_l1_gas_price" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_gas_price_types", + "apollo_metrics", + "async-trait", + "futures", + "hex", + "lru 0.12.5", + "mockall", + "mockito 1.6.1", + "papyrus_base_layer", + "reqwest 0.11.27", + "rstest", + "serde", + "serde_json", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing", + "url", + "validator", +] + +[[package]] +name = "apollo_l1_gas_price_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_proc_macros", + "async-trait", + "mockall", + "papyrus_base_layer", + "reqwest 0.11.27", + "serde", + "serde_json", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "apollo_l1_provider" +version = "0.15.0-rc.2" +dependencies = [ + "alloy", + "apollo_batcher_types", + "apollo_config", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_endpoint_monitor_types", + "apollo_l1_provider_types", + "apollo_metrics", + "apollo_state_sync_types", + "apollo_time", + "assert_matches", + "async-trait", + "hex", + "indexmap 2.9.0", + "itertools 0.12.1", + "papyrus_base_layer", + "pretty_assertions", + "rstest", + "serde", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_l1_provider_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_proc_macros", + "async-trait", + "indexmap 2.9.0", + "mockall", + "papyrus_base_layer", + "serde", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "apollo_mempool" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_infra", + "apollo_mempool_p2p_types", + "apollo_mempool_types", + "apollo_metrics", + "apollo_network", + "apollo_network_types", + "apollo_test_utils", + "apollo_time", + "assert_matches", + "async-trait", + "derive_more 0.99.18", + "indexmap 2.9.0", + "itertools 0.12.1", + "mempool_test_utils", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "mockall", + "pretty_assertions", + "rand 0.8.5", + "rstest", + "serde", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_mempool_p2p" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_class_manager_types", + "apollo_config", + "apollo_gateway_types", + "apollo_infra", + "apollo_mempool_p2p_types", + "apollo_metrics", + "apollo_network", + "apollo_network_types", + "apollo_protobuf", + "apollo_test_utils", + "async-trait", + "futures", + "libp2p", + "mockall", + "rand_chacha 0.3.1", + "serde", + "starknet_api", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_mempool_p2p_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_network_types", + "apollo_proc_macros", + "async-trait", + "mockall", + "serde", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", +] + +[[package]] +name = "apollo_mempool_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_network_types", + "apollo_proc_macros", + "async-trait", + "indexmap 2.9.0", + "mockall", + "serde", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", +] + +[[package]] +name = "apollo_metrics" +version = "0.15.0-rc.2" +dependencies = [ + "indexmap 2.9.0", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "num-traits", + "paste", + "regex", + "rstest", + "strum 0.25.0", + "strum_macros 0.25.3", +] + +[[package]] +name = "apollo_monitoring_endpoint" +version = "0.15.0-rc.2" +dependencies = [ + "anyhow", + "apollo_config", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_provider_types", + "apollo_mempool_types", + "apollo_metrics", + "axum", + "hyper 0.14.32", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "num-traits", + "pretty_assertions", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tower 0.4.13", + "tracing", + "validator", +] + +[[package]] +name = "apollo_network" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_metrics", + "apollo_network_types", + "assert_matches", + "async-stream", + "async-trait", + "bytes", + "deadqueue", + "defaultmap", + "derive_more 0.99.18", + "futures", + "lazy_static", + "libp2p", + "libp2p-swarm-test", + "metrics 0.24.1", + "mockall", + "pretty_assertions", + "replace_with", + "rstest", + "serde", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tokio-retry", + "tokio-stream", + "tracing", + "unsigned-varint 0.8.0", + "validator", + "void", + "waker-fn", +] + +[[package]] +name = "apollo_network_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_test_utils", + "lazy_static", + "libp2p", + "rand_chacha 0.3.1", + "serde", +] + +[[package]] +name = "apollo_node" +version = "0.15.0-rc.2" +dependencies = [ + "anyhow", + "apollo_batcher", + "apollo_batcher_types", + "apollo_class_manager", + "apollo_class_manager_types", + "apollo_compile_to_casm", + "apollo_compile_to_casm_types", + "apollo_config", + "apollo_consensus_manager", + "apollo_gateway", + "apollo_gateway_types", + "apollo_http_server", + "apollo_infra", + "apollo_infra_utils", + "apollo_l1_endpoint_monitor", + "apollo_l1_endpoint_monitor_types", + "apollo_l1_gas_price", + "apollo_l1_gas_price_types", + "apollo_l1_provider", + "apollo_l1_provider_types", + "apollo_mempool", + "apollo_mempool_p2p", + "apollo_mempool_p2p_types", + "apollo_mempool_types", + "apollo_monitoring_endpoint", + "apollo_reverts", + "apollo_state_sync", + "apollo_state_sync_types", + "clap", + "const_format", + "futures", + "papyrus_base_layer", + "pretty_assertions", + "rstest", + "serde", + "serde_json", + "tikv-jemallocator", + "tokio", + "tokio-util", + "tracing", + "validator", +] + +[[package]] +name = "apollo_p2p_sync" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_class_manager_types", + "apollo_config", + "apollo_network", + "apollo_proc_macros", + "apollo_protobuf", + "apollo_state_sync_metrics", + "apollo_state_sync_types", + "apollo_storage", + "apollo_test_utils", + "assert_matches", + "async-stream", + "async-trait", + "chrono", + "enum-iterator", + "futures", + "indexmap 2.9.0", + "lazy_static", + "metrics 0.24.1", + "mockall", + "papyrus_common", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "starknet-types-core", + "starknet_api", + "static_assertions", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "validator", +] + +[[package]] +name = "apollo_proc_macros" +version = "0.15.0-rc.2" +dependencies = [ + "lazy_static", + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "apollo_proc_macros_tests" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_metrics", + "apollo_proc_macros", + "apollo_test_utils", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "papyrus_common", + "prometheus-parse", + "rstest", +] + +[[package]] +name = "apollo_protobuf" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_test_utils", + "bytes", + "indexmap 2.9.0", + "lazy_static", + "papyrus_common", + "primitive-types", + "prost", + "prost-build", + "protoc-prebuilt", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "tempfile", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "apollo_reverts" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_storage", + "futures", + "serde", + "starknet_api", + "tracing", + "validator", +] + +[[package]] +name = "apollo_rpc" +version = "0.15.0-rc.2" +dependencies = [ + "anyhow", + "apollo_class_manager_types", + "apollo_config", + "apollo_proc_macros", + "apollo_rpc_execution", + "apollo_starknet_client", + "apollo_storage", + "apollo_test_utils", + "assert_matches", + "async-trait", + "base64 0.13.1", + "cairo-lang-casm", + "cairo-lang-starknet-classes", + "camelpaste", + "derive_more 0.99.18", + "enum-iterator", + "ethers", + "flate2", + "hex", + "hyper 0.14.32", + "indexmap 2.9.0", + "insta", + "itertools 0.12.1", + "jsonrpsee", + "jsonschema", + "lazy_static", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "mockall", + "papyrus_common", + "pretty_assertions", + "prometheus-parse", + "rand 0.8.5", + "rand_chacha 0.3.1", + "regex", + "reqwest 0.11.27", + "serde", + "serde_json", + "starknet-core", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "tokio", + "tower 0.4.13", + "tracing", + "validator", +] + +[[package]] +name = "apollo_rpc_execution" +version = "0.15.0-rc.2" +dependencies = [ + "anyhow", + "apollo_class_manager_types", + "apollo_config", + "apollo_storage", + "apollo_test_utils", + "assert_matches", + "blockifier", + "cairo-lang-casm", + "cairo-lang-starknet-classes", + "cairo-lang-utils", + "cairo-vm", + "indexmap 2.9.0", + "itertools 0.12.1", + "lazy_static", + "mockall", + "papyrus_common", + "pretty_assertions", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "apollo_starknet_client" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_test_utils", + "assert_matches", + "async-trait", + "cairo-lang-starknet-classes", + "enum-iterator", + "http 0.2.12", + "indexmap 2.9.0", + "mockall", + "mockito 0.31.1", + "os_info", + "papyrus_common", + "pretty_assertions", + "rand 0.8.5", + "rand_chacha 0.3.1", + "reqwest 0.11.27", + "serde", + "serde_json", + "serde_repr", + "simple_logger", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "thiserror 1.0.69", + "tokio", + "tokio-retry", + "tracing", + "url", +] + +[[package]] +name = "apollo_starknet_os_program" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra_utils", + "blockifier", + "cairo-vm", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "apollo_state_reader" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_class_manager_types", + "apollo_storage", + "assert_matches", + "blockifier", + "blockifier_test_utils", + "cairo-lang-starknet-classes", + "indexmap 2.9.0", + "rstest", + "starknet-types-core", + "starknet_api", + "tokio", +] + +[[package]] +name = "apollo_state_sync" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_central_sync", + "apollo_class_manager_types", + "apollo_config", + "apollo_infra", + "apollo_network", + "apollo_p2p_sync", + "apollo_reverts", + "apollo_rpc", + "apollo_starknet_client", + "apollo_state_sync_metrics", + "apollo_state_sync_types", + "apollo_storage", + "apollo_test_utils", + "async-trait", + "futures", + "indexmap 2.9.0", + "libp2p", + "papyrus_common", + "rand_chacha 0.3.1", + "serde", + "starknet-types-core", + "starknet_api", + "tokio", + "tracing", + "validator", +] + +[[package]] +name = "apollo_state_sync_metrics" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_metrics", + "apollo_storage", + "starknet_api", +] + +[[package]] +name = "apollo_state_sync_types" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra", + "apollo_proc_macros", + "apollo_storage", + "async-trait", + "futures", + "mockall", + "serde", + "starknet-types-core", + "starknet_api", + "strum_macros 0.25.3", + "thiserror 1.0.69", +] + +[[package]] +name = "apollo_storage" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_config", + "apollo_proc_macros", + "apollo_test_utils", + "assert_matches", + "byteorder", + "cairo-lang-casm", + "cairo-lang-starknet-classes", + "cairo-lang-utils", + "camelpaste", + "clap", + "human_bytes", + "indexmap 2.9.0", + "insta", + "integer-encoding", + "lazy_static", + "libmdbx", + "memmap2", + "metrics 0.24.1", + "metrics-exporter-prometheus", + "num-bigint 0.4.6", + "num-traits", + "page_size", + "papyrus_common", + "parity-scale-codec", + "paste", + "pretty_assertions", + "primitive-types", + "prometheus-parse", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rstest", + "schemars", + "serde", + "serde_json", + "simple_logger", + "starknet-types-core", + "starknet_api", + "statistical", + "tempfile", + "test-case", + "test-log", + "thiserror 1.0.69", + "tokio", + "tracing", + "validator", + "zstd 0.13.2", +] + +[[package]] +name = "apollo_task_executor" +version = "0.15.0-rc.2" +dependencies = [ + "futures", + "rstest", + "tokio", + "tokio-test", +] + +[[package]] +name = "apollo_test_utils" +version = "0.15.0-rc.2" +dependencies = [ + "cairo-lang-casm", + "cairo-lang-starknet-classes", + "cairo-lang-utils", + "indexmap 2.9.0", + "num-bigint 0.4.6", + "pretty_assertions", + "primitive-types", + "prometheus-parse", + "rand 0.8.5", + "rand_chacha 0.3.1", + "reqwest 0.11.27", + "serde", + "serde_json", + "starknet-types-core", + "starknet_api", +] + +[[package]] +name = "apollo_time" +version = "0.15.0-rc.2" +dependencies = [ + "chrono", + "mockall", + "tokio", +] + +[[package]] +name = "aquamarine" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" +dependencies = [ + "include_dir", + "itertools 0.10.5", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", "hashbrown 0.13.2", "itertools 0.10.5", "num-traits", @@ -728,7 +2326,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -766,7 +2364,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -895,7 +2493,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -939,6 +2537,9 @@ name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] [[package]] name = "ascii-canvas" @@ -946,7 +2547,16 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" dependencies = [ - "term", + "term 0.7.0", +] + +[[package]] +name = "ascii-canvas" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1e3e699d84ab1b0911a1010c5c106aa34ae89aeac103be5ce0c3859db1e891" +dependencies = [ + "term 1.0.1", ] [[package]] @@ -973,7 +2583,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "synstructure", ] @@ -985,7 +2595,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1112,7 +2722,7 @@ dependencies = [ "futures-lite 2.5.0", "parking", "polling 3.7.4", - "rustix 0.38.42", + "rustix 0.38.43", "slab", "tracing", "windows-sys 0.59.0", @@ -1133,7 +2743,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] @@ -1162,9 +2772,9 @@ dependencies = [ "async-task", "blocking", "cfg-if", - "event-listener 5.3.1", + "event-listener 5.4.0", "futures-lite 2.5.0", - "rustix 0.38.42", + "rustix 0.38.43", "tracing", ] @@ -1176,7 +2786,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1191,7 +2801,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.42", + "rustix 0.38.43", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -1242,7 +2852,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1253,13 +2863,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.84" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1324,7 +2934,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1342,6 +2952,31 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "aws-lc-rs" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f409eb70b561706bf8abba8ca9c112729c481595893fd06a2dd9af8ed8441148" +dependencies = [ + "aws-lc-sys", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "923ded50f602b3007e5e63e3f094c479d9c8a9b42d7f4034e4afe456aa48bfd2" +dependencies = [ + "bindgen 0.69.5", + "cc", + "cmake", + "dunce", + "fs_extra", + "paste", +] + [[package]] name = "axum" version = "0.6.20" @@ -1458,14 +3093,11 @@ dependencies = [ ] [[package]] -name = "bigdecimal" -version = "0.3.1" +name = "bincode" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", "serde", ] @@ -1495,7 +3127,30 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.95", + "syn 2.0.100", +] + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.100", + "which", ] [[package]] @@ -1513,7 +3168,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1533,7 +3188,7 @@ dependencies = [ "regex", "rustc-hash 2.1.0", "shlex", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -1542,7 +3197,16 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" dependencies = [ - "bit-vec", + "bit-vec 0.6.3", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec 0.8.0", ] [[package]] @@ -1551,6 +3215,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.2.1" @@ -1584,6 +3254,15 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2s" +version = "0.15.0-rc.2" +dependencies = [ + "blake2", + "digest 0.10.7", + "starknet-types-core", +] + [[package]] name = "block-buffer" version = "0.9.0" @@ -1604,32 +3283,41 @@ dependencies = [ [[package]] name = "blockifier" -version = "0.14.0-rc.3" +version = "0.15.0-rc.2" dependencies = [ "anyhow", + "apollo_compilation_utils", + "apollo_compile_to_native", + "apollo_config", + "apollo_infra_utils", + "apollo_metrics", "ark-ec 0.4.2", "ark-ff 0.4.2", "ark-secp256k1 0.4.0", "ark-secp256r1 0.4.0", "assert_matches", + "blockifier_test_utils", "cached", "cairo-lang-casm", "cairo-lang-runner", + "cairo-lang-sierra", "cairo-lang-starknet-classes", "cairo-native", "cairo-vm", "criterion", + "dashmap", "derive_more 0.99.18", + "expect-test", "glob", - "indexmap 2.7.0", + "indexmap 2.9.0", "itertools 0.12.1", "keccak", "log", + "mockall", "num-bigint 0.4.6", "num-integer", "num-rational 0.4.2", "num-traits", - "papyrus_config", "paste", "phf", "pretty_assertions", @@ -1637,27 +3325,26 @@ dependencies = [ "regex", "rstest", "rstest_reuse", - "semver 1.0.24", + "semver 1.0.26", "serde", "serde_json", "sha2", + "sierra-emu", "starknet-types-core", "starknet_api", - "starknet_infra_utils", - "starknet_sierra_multicompile", "strum 0.25.0", "strum_macros 0.25.3", - "tempfile", "test-case", "thiserror 1.0.69", "tikv-jemallocator", - "toml", ] [[package]] name = "blockifier_reexecution" -version = "0.14.0-rc.3" +version = "0.15.0-rc.2" dependencies = [ + "apollo_gateway", + "apollo_rpc_execution", "assert_matches", "blockifier", "cairo-lang-starknet-classes", @@ -1665,8 +3352,7 @@ dependencies = [ "clap", "flate2", "google-cloud-storage", - "indexmap 2.7.0", - "papyrus_execution", + "indexmap 2.9.0", "pretty_assertions", "retry", "rstest", @@ -1675,11 +3361,29 @@ dependencies = [ "starknet-core", "starknet-types-core", "starknet_api", - "starknet_gateway", "thiserror 1.0.69", "tokio", ] +[[package]] +name = "blockifier_test_utils" +version = "0.15.0-rc.2" +dependencies = [ + "apollo_infra_utils", + "cairo-lang-starknet-classes", + "pretty_assertions", + "rstest", + "serde_json", + "starknet-types-core", + "starknet_api", + "strum 0.25.0", + "strum_macros 0.25.3", + "tempfile", + "tokio", + "tracing", + "tracing-test", +] + [[package]] name = "blocking" version = "1.6.1" @@ -1705,6 +3409,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "cfg_aliases", +] + [[package]] name = "bs58" version = "0.5.1" @@ -1832,9 +3545,9 @@ checksum = "ade8366b8bd5ba243f0a58f036cc0ca8a2f069cff1a2351ef1cac6b083e16fc0" [[package]] name = "cairo-lang-casm" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff11aec4eb39d670efa69d8a6bda5803661578e9dc1be54ea948fe82fb39995" +checksum = "88bf35a939eaed69b8a71405c5d56fa52c3b1c76701b8c1056fe22b3e2569c7d" dependencies = [ "cairo-lang-utils", "indoc 2.0.5", @@ -1847,9 +3560,9 @@ dependencies = [ [[package]] name = "cairo-lang-compiler" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f704af3ba7499d63a695688d2f5b40109820f8ca38d78092a4aa4a64ec600d2" +checksum = "c13c2341fb2272999f6152b0fc2238148fe93c745183a07acba031b27eeb0b66" dependencies = [ "anyhow", "cairo-lang-defs", @@ -1866,54 +3579,57 @@ dependencies = [ "indoc 2.0.5", "rayon", "rust-analyzer-salsa", - "semver 1.0.24", + "semver 1.0.26", "smol_str", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-debug" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b22020eb5184ceab861f249ca9fb5d17dbc1278fa88216663e1711da64fbe5a" +checksum = "17cf782d64a29c4acb1eb2759c39783d5e92c397d5ae3775754edae5d2c665ee" dependencies = [ "cairo-lang-utils", ] [[package]] name = "cairo-lang-defs" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2322f996ea69a064a9cbad43b996e0352bf0218c6a27b8ff1423ad7942faaa29" +checksum = "80a146574d443e7fba848c069d7d84879c4635df5811963a18bf042df3e34e61" dependencies = [ + "bincode 1.3.3", "cairo-lang-debug", "cairo-lang-diagnostics", "cairo-lang-filesystem", "cairo-lang-parser", "cairo-lang-syntax", "cairo-lang-utils", - "itertools 0.12.1", + "itertools 0.14.0", "rust-analyzer-salsa", + "serde", "smol_str", + "typetag", ] [[package]] name = "cairo-lang-diagnostics" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d4751c8b3835df963f9aed56a2dba2bb000af824809b2694c0876e3e9f7dee" +checksum = "cb704187b1543aa4a2e0030d13ae6e0ad63712e5362cac3ded3237623a2b1b32" dependencies = [ "cairo-lang-debug", "cairo-lang-filesystem", "cairo-lang-utils", - "itertools 0.12.1", + "itertools 0.14.0", ] [[package]] name = "cairo-lang-eq-solver" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed621df2fcc246a81a71ace26fd1be34bbd19aeb60535c85d8e794710a2bef5e" +checksum = "a5a2e6145241a4812820948278a86e11c25adc30e9a19b2e24b2517be19eedac" dependencies = [ "cairo-lang-utils", "good_lp", @@ -1921,15 +3637,15 @@ dependencies = [ [[package]] name = "cairo-lang-filesystem" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0baa53250acf692f7214e997ec864a529d4aad7392f5a7798805659c195ac321" +checksum = "3c903cdcae48aa02c0ed41e1fd54b3231da51f1edf9538327ed155463594b8be" dependencies = [ "cairo-lang-debug", "cairo-lang-utils", "path-clean", "rust-analyzer-salsa", - "semver 1.0.24", + "semver 1.0.26", "serde", "smol_str", "toml", @@ -1937,9 +3653,9 @@ dependencies = [ [[package]] name = "cairo-lang-formatter" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb00211393a7f992bcf33a17bbe189e1a9dbe247a2de04fe22a313d6b684f746" +checksum = "a943f0818cfc091c3302acd92053ff2a642004d32757c3b4f94c5aa18e184ac0" dependencies = [ "anyhow", "cairo-lang-diagnostics", @@ -1949,18 +3665,20 @@ dependencies = [ "cairo-lang-utils", "diffy", "ignore", - "itertools 0.12.1", + "itertools 0.14.0", "rust-analyzer-salsa", "serde", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-lowering" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949be6b96044de47aaa2ecf99167a5ffa893de2ca21b1a869cb726cf90f37eec" +checksum = "7b86cd38af57c36b2a72a2483d04d8b17ea2f9a554fd45b9d83375773f948818" dependencies = [ + "assert_matches", + "bincode 1.3.3", "cairo-lang-debug", "cairo-lang-defs", "cairo-lang-diagnostics", @@ -1971,28 +3689,29 @@ dependencies = [ "cairo-lang-syntax", "cairo-lang-utils", "id-arena", - "itertools 0.12.1", + "itertools 0.14.0", "log", "num-bigint 0.4.6", "num-integer", "num-traits", "rust-analyzer-salsa", - "smol_str", + "serde", ] [[package]] name = "cairo-lang-parser" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3dcc5e85867b0f715b30d62585ac750a4c7ab1f92813599ba17cc29e1d0a1d1" +checksum = "f64f0a5ca9ac54975cdec98fe8491d9b62922b7cb048e6ded00188383b841b25" dependencies = [ "cairo-lang-diagnostics", "cairo-lang-filesystem", + "cairo-lang-primitive-token", "cairo-lang-syntax", "cairo-lang-syntax-codegen", "cairo-lang-utils", - "colored", - "itertools 0.12.1", + "colored 3.0.0", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", "rust-analyzer-salsa", @@ -2002,9 +3721,9 @@ dependencies = [ [[package]] name = "cairo-lang-plugins" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239cebcb9024d9e8eb26496055ffa325e5c2d868f798637fc69f25ac3b2ab5ca" +checksum = "6230c6d37e5e8361900709112e56b42e48478806b829a086727ef59b2f7f3310" dependencies = [ "cairo-lang-defs", "cairo-lang-diagnostics", @@ -2014,7 +3733,7 @@ dependencies = [ "cairo-lang-utils", "indent", "indoc 2.0.5", - "itertools 0.12.1", + "itertools 0.14.0", "rust-analyzer-salsa", "smol_str", ] @@ -2027,33 +3746,33 @@ checksum = "123ac0ecadf31bacae77436d72b88fa9caef2b8e92c89ce63a125ae911a12fae" [[package]] name = "cairo-lang-proc-macros" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a98a058656493f4ef4b7fc51ed4fa46cc9b2834262815959746bf1696f1c50f" +checksum = "b1e4872352761cf6d7f47eeb1626e3b1d84a514017fb4251173148d8c04f36d5" dependencies = [ "cairo-lang-debug", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "cairo-lang-project" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d111a3ffe3b463e79af5d6049a6e23fc5c7048e0bacea5ebd268107ded21012" +checksum = "2327b8c070f9e50ac85a410f03c6aaf6a0dffee5ba8299d6af4bf2344587ac42" dependencies = [ "cairo-lang-filesystem", "cairo-lang-utils", "serde", - "thiserror 1.0.69", + "thiserror 2.0.12", "toml", ] [[package]] name = "cairo-lang-runnable-utils" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dae123904bb7831433868377bb6e7d0a054504b6ea00535cc192abb1a364950" +checksum = "775b064b0265e8408565b1ec9d360116015acf35753f02db255cbb13ad30670e" dependencies = [ "cairo-lang-casm", "cairo-lang-sierra", @@ -2063,19 +3782,19 @@ dependencies = [ "cairo-lang-sierra-type-size", "cairo-lang-utils", "cairo-vm", - "itertools 0.12.1", - "thiserror 1.0.69", + "itertools 0.14.0", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-runner" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b00e638356ca1b6073fad36d95131ebe62ee8bc0cc2cb31880e0b17f5e38c39" +checksum = "e7166250692bda4b8b31f0480b8ba7fe75793b15bd4f0b6e5b5fe5d6dde01da8" dependencies = [ - "ark-ff 0.4.2", - "ark-secp256k1 0.4.0", - "ark-secp256r1 0.4.0", + "ark-ff 0.5.0", + "ark-secp256k1 0.5.0", + "ark-secp256r1 0.5.0", "cairo-lang-casm", "cairo-lang-lowering", "cairo-lang-runnable-utils", @@ -2085,23 +3804,23 @@ dependencies = [ "cairo-lang-starknet", "cairo-lang-utils", "cairo-vm", - "itertools 0.12.1", + "itertools 0.14.0", "keccak", "num-bigint 0.4.6", "num-integer", "num-traits", - "rand 0.8.5", + "rand 0.9.0", "sha2", "smol_str", "starknet-types-core", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-semantic" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5aedc89a6324b3dbdd6cf6827258c50035b7459cf818d88296d6d88c1a46cfd" +checksum = "f53a1ea47d1a0295179881d5578fc2b2c8cd5d2ac99bd81958c423d54960bb84" dependencies = [ "cairo-lang-debug", "cairo-lang-defs", @@ -2115,7 +3834,7 @@ dependencies = [ "cairo-lang-utils", "id-arena", "indoc 2.0.5", - "itertools 0.12.1", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", "rust-analyzer-salsa", @@ -2126,18 +3845,18 @@ dependencies = [ [[package]] name = "cairo-lang-sierra" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cfdac5d0a0be84e9247414b552905967e06feaef48633af4ca6e80240acb09" +checksum = "a48fe249ba77fb39363b1b40c81556c8d272083508b4618fb65b964559aca0ee" dependencies = [ "anyhow", "cairo-lang-utils", "const-fnv1a-hash", - "convert_case 0.6.0", + "convert_case 0.8.0", "derivative", - "itertools 0.12.1", - "lalrpop", - "lalrpop-util", + "itertools 0.14.0", + "lalrpop 0.22.1", + "lalrpop-util 0.22.1", "num-bigint 0.4.6", "num-integer", "num-traits", @@ -2148,46 +3867,46 @@ dependencies = [ "sha3", "smol_str", "starknet-types-core", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-sierra-ap-change" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a923105c63704b7371f4ee92a17b3037c8be88f0c7021eb764d8f974a397ff5" +checksum = "fad6cbf5cc904f4309179793fc3757c1db9615e71c1b78eff601d2e22206d1c6" dependencies = [ "cairo-lang-eq-solver", "cairo-lang-sierra", "cairo-lang-sierra-type-size", "cairo-lang-utils", - "itertools 0.12.1", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-sierra-gas" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa8dc62dfa49f57dcdb092f551bcba42d0123f4d0f0763087b3b41142543ecf" +checksum = "704ec6a8cb1b38f78571d5561519e87672ed5008a018a422842fa2a122ca3c34" dependencies = [ "cairo-lang-eq-solver", "cairo-lang-sierra", "cairo-lang-sierra-type-size", "cairo-lang-utils", - "itertools 0.12.1", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-sierra-generator" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4526593827287b39af72c0d12698007a9fe693d8a8e9fc4e481885634e9c1601" +checksum = "2e011122028a59557ff075b22f550f7d0267b493f3db3dbefc281ff6795d108c" dependencies = [ "cairo-lang-debug", "cairo-lang-defs", @@ -2199,7 +3918,7 @@ dependencies = [ "cairo-lang-sierra", "cairo-lang-syntax", "cairo-lang-utils", - "itertools 0.12.1", + "itertools 0.14.0", "num-traits", "rust-analyzer-salsa", "serde", @@ -2209,9 +3928,9 @@ dependencies = [ [[package]] name = "cairo-lang-sierra-to-casm" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4357f1cadb6a713c85560aacba92a794eac1f5c82021c0f28ce3a6810c4e334" +checksum = "6a43885fd8e806f5c50a80473798faad1a9a919f474e469d3027aece4f8b2002" dependencies = [ "assert_matches", "cairo-lang-casm", @@ -2221,18 +3940,18 @@ dependencies = [ "cairo-lang-sierra-type-size", "cairo-lang-utils", "indoc 2.0.5", - "itertools 0.12.1", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", "starknet-types-core", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-sierra-type-size" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2963c5eea0778ba7f00e41916dc347b988ca73458edc3e460954e597484bc95" +checksum = "860006ddce78cae65babf37ff279c31358336ae76717991837d7e0868561878c" dependencies = [ "cairo-lang-sierra", "cairo-lang-utils", @@ -2240,9 +3959,9 @@ dependencies = [ [[package]] name = "cairo-lang-starknet" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07a18683311c0976fbff8ac237e1f0940707695efee77438b56c1604d1db9b5e" +checksum = "4ba6eb0a421d3411f4948e002d3dd81ab134044465bda3131f2718f56afda409" dependencies = [ "anyhow", "cairo-lang-compiler", @@ -2260,26 +3979,27 @@ dependencies = [ "const_format", "indent", "indoc 2.0.5", - "itertools 0.12.1", + "itertools 0.14.0", "serde", "serde_json", "smol_str", "starknet-types-core", - "thiserror 1.0.69", + "thiserror 2.0.12", + "typetag", ] [[package]] name = "cairo-lang-starknet-classes" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467bf061c5a43844880d5566931d6d2866b714b4451c61072301ce40b484cda4" +checksum = "2cf81db2c36c1e3fe3bbf64ebc5c237f748e9f41bdd42a6ed3e03e00086d768c" dependencies = [ "cairo-lang-casm", "cairo-lang-sierra", "cairo-lang-sierra-to-casm", "cairo-lang-utils", - "convert_case 0.6.0", - "itertools 0.12.1", + "convert_case 0.8.0", + "itertools 0.14.0", "num-bigint 0.4.6", "num-integer", "num-traits", @@ -2288,14 +4008,14 @@ dependencies = [ "sha3", "smol_str", "starknet-types-core", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] name = "cairo-lang-syntax" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a45ff877463d52565f056a6e9f4689c3a2cea59fde66f9853f2f7ce9e44dc3" +checksum = "f36620fd45292fd0276bd581e774222fbd06e13aa8a4bf820a4be8ad3bcec100" dependencies = [ "cairo-lang-debug", "cairo-lang-filesystem", @@ -2304,15 +4024,16 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "rust-analyzer-salsa", + "serde", "smol_str", "unescaper", ] [[package]] name = "cairo-lang-syntax-codegen" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983e0ab5783bcb1ed70e7401c4c845762f7e033c0e213886f913b5dc875cbf08" +checksum = "0e5e3c6be0b159dad1239fa83562087448aeb1d44b0ead059ea6ab73728909a8" dependencies = [ "genco", "xshell", @@ -2320,9 +4041,9 @@ dependencies = [ [[package]] name = "cairo-lang-test-plugin" -version = "2.10.0-rc.1" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe07acbac1f6f31e0b422833174e0a5220370542b47d9ceb6f4e4c7e33ffe61" +checksum = "eb622d636da63a5cc8138dba941d9eb1918d06e297bdb5a76dc69fffdf3581d9" dependencies = [ "anyhow", "cairo-lang-compiler", @@ -2338,7 +4059,7 @@ dependencies = [ "cairo-lang-syntax", "cairo-lang-utils", "indoc 2.0.5", - "itertools 0.12.1", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", "serde", @@ -2347,38 +4068,39 @@ dependencies = [ [[package]] name = "cairo-lang-test-utils" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a4f3e42fc818474f3767308159548ddbedc95a6fb857a04ebb95da725203ca" +checksum = "4c65df2eee1678a29b4b9dcff5c10a70b44e38d445ba2522025b1b6b7177b61f" dependencies = [ "cairo-lang-formatter", "cairo-lang-utils", - "colored", + "colored 3.0.0", "log", "pretty_assertions", ] [[package]] name = "cairo-lang-utils" -version = "2.10.0" +version = "2.12.0-dev.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d000fa1b86f07587b9dcabdaed00878464944b96c8c1f3f5006e890a5a8870" +checksum = "5f043065d60a8a2510bfacb6c91767298fed50ed9abbd69ff7698322b7cb1e65" dependencies = [ - "hashbrown 0.14.5", - "indexmap 2.7.0", - "itertools 0.12.1", + "hashbrown 0.15.2", + "indexmap 2.9.0", + "itertools 0.14.0", "num-bigint 0.4.6", "num-traits", "parity-scale-codec", "schemars", "serde", + "smol_str", ] [[package]] name = "cairo-native" -version = "0.3.1" +version = "0.6.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a72a2a1fd26cfe84c9de7dfc5f1a78bc2e15847fb10f593fba96fd2b93ccd89b" +checksum = "9bb843f93a320c7d8cb99843a02c4c613f8263022ce8b52070d82cc4341168ee" dependencies = [ "anyhow", "aquamarine", @@ -2396,13 +4118,14 @@ dependencies = [ "cairo-lang-sierra-ap-change", "cairo-lang-sierra-gas", "cairo-lang-sierra-generator", + "cairo-lang-sierra-to-casm", "cairo-lang-starknet", "cairo-lang-starknet-classes", "cairo-lang-test-plugin", "cairo-lang-utils", "cc", "clap", - "colored", + "colored 2.2.0", "educe 0.5.11", "itertools 0.14.0", "keccak", @@ -2419,11 +4142,12 @@ dependencies = [ "serde", "serde_json", "sha2", - "starknet-curve 0.5.1", + "sierra-emu", + "starknet-curve", "starknet-types-core", "stats_alloc", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", "tracing-subscriber", "utf8_iter", @@ -2431,16 +4155,17 @@ dependencies = [ [[package]] name = "cairo-vm" -version = "1.0.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fa8b4b56ee66cebcade4d85128e55b2bfdf046502187aeaa8c2768a427684dc" +checksum = "1d2a2f6d93aa279509d396d6f5c1992fa63d7d32c2b8d61ffa3398617c2cd0cd" dependencies = [ "anyhow", - "bincode", + "bincode 2.0.0-rc.3", "bitvec", "generic-array", - "hashbrown 0.14.5", + "hashbrown 0.15.2", "hex", + "indoc 2.0.5", "keccak", "lazy_static", "nom", @@ -2454,9 +4179,9 @@ dependencies = [ "serde_json", "sha2", "sha3", - "starknet-crypto 0.6.2", + "starknet-crypto", "starknet-types-core", - "thiserror-no-std", + "thiserror 2.0.12", "zip", ] @@ -2492,7 +4217,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.24", + "semver 1.0.26", "serde", "serde_json", "thiserror 1.0.69", @@ -2515,9 +4240,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.12" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -2635,9 +4360,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9" dependencies = [ "clap_builder", "clap_derive", @@ -2645,9 +4370,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d" dependencies = [ "anstream", "anstyle", @@ -2657,14 +4382,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -2682,6 +4407,15 @@ dependencies = [ "bitflags 1.2.1", ] +[[package]] +name = "cmake" +version = "0.1.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +dependencies = [ + "cc", +] + [[package]] name = "coins-bip32" version = "0.8.7" @@ -2751,32 +4485,12 @@ dependencies = [ ] [[package]] -name = "committer_cli" -version = "0.14.0-rc.3" +name = "colored" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" dependencies = [ - "clap", - "criterion", - "derive_more 0.99.18", - "ethnum", - "futures", - "indexmap 2.7.0", - "pretty_assertions", - "rand 0.8.5", - "rand_distr", - "serde", - "serde_json", - "serde_repr", - "starknet-types-core", - "starknet_api", - "starknet_committer", - "starknet_patricia", - "strum 0.25.0", - "strum_macros 0.25.3", - "tempfile", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-subscriber", + "windows-sys 0.59.0", ] [[package]] @@ -2873,18 +4587,18 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "convert_case" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ "unicode-segmentation", ] [[package]] name = "convert_case" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" +checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f" dependencies = [ "unicode-segmentation", ] @@ -2927,6 +4641,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -2951,6 +4675,21 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" version = "1.4.2" @@ -2971,6 +4710,7 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", "itertools 0.10.5", "num-traits", @@ -2983,6 +4723,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -3102,7 +4843,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3150,7 +4891,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3172,7 +4913,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3287,7 +5028,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3296,7 +5037,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ - "derive_more-impl", + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", ] [[package]] @@ -3307,7 +5057,18 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", "unicode-xid", ] @@ -3325,11 +5086,11 @@ checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "diffy" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e616e59155c92257e84970156f506287853355f58cd4a6eb167385722c32b790" +checksum = "b545b8c50194bdd008283985ab0b31dba153cfd5b3066a92770634fbc0d7d291" dependencies = [ - "nu-ansi-term", + "nu-ansi-term 0.50.1", ] [[package]] @@ -3403,9 +5164,15 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] +[[package]] +name = "dissimilar" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8975ffdaa0ef3661bfe02dbdcc06c9f829dfafe6a3c474de366a8d5e44276921" + [[package]] name = "downcast" version = "0.11.0" @@ -3446,6 +5213,7 @@ dependencies = [ "digest 0.10.7", "elliptic-curve", "rfc6979", + "serdect", "signature", "spki", ] @@ -3484,7 +5252,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3496,14 +5264,17 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "either" -version = "1.13.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -3517,9 +5288,11 @@ dependencies = [ "ff", "generic-array", "group", + "pem-rfc7468", "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle", "zeroize", ] @@ -3581,7 +5354,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3592,7 +5365,7 @@ checksum = "4f4b100e337b021ae69f3e7dd82e230452c54ff833958446c4a3854c66dc9326" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3612,7 +5385,7 @@ checksum = "a1ab991c1362ac86c61ab6f556cff143daa22e5a15e4e189df818b2fd19fe65b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3632,7 +5405,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3662,6 +5435,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "erased-serde" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e004d887f51fcb9fef17317a2f3525c887d8aa3f4f50fed920816a688284a5b7" +dependencies = [ + "serde", + "typeid", +] + [[package]] name = "errno" version = "0.3.10" @@ -3808,7 +5591,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.95", + "syn 2.0.100", "toml", "walkdir", ] @@ -3826,7 +5609,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -3852,7 +5635,7 @@ dependencies = [ "serde", "serde_json", "strum 0.26.3", - "syn 2.0.95", + "syn 2.0.100", "tempfile", "thiserror 1.0.69", "tiny-keccak", @@ -3868,7 +5651,7 @@ dependencies = [ "chrono", "ethers-core", "reqwest 0.11.27", - "semver 1.0.24", + "semver 1.0.26", "serde", "serde_json", "thiserror 1.0.69", @@ -3977,7 +5760,7 @@ dependencies = [ "path-slash", "rayon", "regex", - "semver 1.0.24", + "semver 1.0.26", "serde", "serde_json", "solang-parser", @@ -4004,9 +5787,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -4019,10 +5802,20 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] +[[package]] +name = "expect-test" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63af43ff4431e848fb47472a920f14fa71c24de13255a5692e93d4e90302acb0" +dependencies = [ + "dissimilar", + "once_cell", +] + [[package]] name = "eyre" version = "0.6.12" @@ -4039,7 +5832,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b95f7c0680e4142284cf8b22c14a476e87d61b004a3a0861872b32ef7ead40a2" dependencies = [ - "bit-set", + "bit-set 0.5.3", "regex", ] @@ -4126,6 +5919,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" version = "1.0.35" @@ -4210,6 +6009,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -4327,7 +6132,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -4426,7 +6231,7 @@ checksum = "43eaff6bbc0b3a878361aced5ec6a2818ee7c541c5b33b5880dfa9a86c23e9e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -4718,7 +6523,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.7.0", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -4737,7 +6542,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.2.0", - "indexmap 2.7.0", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -4777,7 +6582,6 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", - "serde", ] [[package]] @@ -5064,9 +6868,27 @@ dependencies = [ "hyper 0.14.32", "log", "rustls 0.21.12", - "rustls-native-certs", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http 1.2.0", + "hyper 1.5.2", + "hyper-util", + "rustls 0.23.20", + "rustls-native-certs 0.8.1", + "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.1", + "tower-service", ] [[package]] @@ -5255,7 +7077,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -5329,7 +7151,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ "async-io 2.4.0", - "core-foundation", + "core-foundation 0.9.4", "fnv", "futures", "if-addrs", @@ -5422,7 +7244,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -5469,9 +7291,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -5527,6 +7349,15 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +[[package]] +name = "inventory" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab08d7cd2c5897f2c949e5383ea7c7db03fb19130ffcfbf7eda795137ae3cb83" +dependencies = [ + "rustversion", +] + [[package]] name = "io-lifetimes" version = "1.0.11" @@ -5682,11 +7513,11 @@ dependencies = [ "http 0.2.12", "jsonrpsee-core", "pin-project", - "rustls-native-certs", + "rustls-native-certs 0.6.3", "soketto", "thiserror 1.0.69", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "tracing", "url", @@ -5727,7 +7558,7 @@ checksum = "57c7b9f95208927653e7965a98525e7fc641781cab89f0e27c43fa2974405683" dependencies = [ "async-trait", "hyper 0.14.32", - "hyper-rustls", + "hyper-rustls 0.24.2", "jsonrpsee-core", "jsonrpsee-types", "serde", @@ -5882,6 +7713,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2", "signature", ] @@ -5920,22 +7752,43 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" dependencies = [ - "ascii-canvas", - "bit-set", + "ascii-canvas 3.0.0", + "bit-set 0.5.3", "ena", "itertools 0.11.0", - "lalrpop-util", - "petgraph", - "pico-args", + "lalrpop-util 0.20.2", + "petgraph 0.6.5", "regex", "regex-syntax 0.8.5", "string_cache", - "term", + "term 0.7.0", "tiny-keccak", "unicode-xid", "walkdir", ] +[[package]] +name = "lalrpop" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7047a26de42016abf8f181b46b398aef0b77ad46711df41847f6ed869a2a1d5b" +dependencies = [ + "ascii-canvas 4.0.0", + "bit-set 0.8.0", + "ena", + "itertools 0.14.0", + "lalrpop-util 0.22.1", + "petgraph 0.7.1", + "pico-args", + "regex", + "regex-syntax 0.8.5", + "sha3", + "string_cache", + "term 1.0.1", + "unicode-xid", + "walkdir", +] + [[package]] name = "lalrpop-util" version = "0.20.2" @@ -5945,6 +7798,16 @@ dependencies = [ "regex-automata 0.4.9", ] +[[package]] +name = "lalrpop-util" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d05b3fe34b8bd562c338db725dfa9beb9451a48f65f129ccb9538b48d2c93b" +dependencies = [ + "regex-automata 0.4.9", + "rustversion", +] + [[package]] name = "lambdaworks-crypto" version = "0.10.0" @@ -6170,7 +8033,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru", + "lru 0.12.5", "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", @@ -6348,7 +8211,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru", + "lru 0.12.5", "multistream-select", "once_cell", "rand 0.8.5", @@ -6367,7 +8230,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -6502,9 +8365,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -6523,7 +8386,7 @@ dependencies = [ "lazy_static", "libc", "regex-lite", - "semver 1.0.24", + "semver 1.0.26", ] [[package]] @@ -6538,9 +8401,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" dependencies = [ "value-bag", ] @@ -6554,6 +8417,15 @@ dependencies = [ "hashbrown 0.15.2", ] +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" +dependencies = [ + "hashbrown 0.15.2", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -6572,6 +8444,17 @@ dependencies = [ "libc", ] +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "match_cfg" version = "0.1.0" @@ -6651,7 +8534,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.95", + "syn 2.0.100", "tblgen", "unindent 0.2.3", ] @@ -6691,33 +8574,32 @@ dependencies = [ [[package]] name = "mempool_test_utils" -version = "0.14.0-rc.3" +version = "0.15.0-rc.2" dependencies = [ + "apollo_infra_utils", "assert_matches", - "blockifier", - "pretty_assertions", + "blockifier_test_utils", + "papyrus_base_layer", "serde_json", "starknet-types-core", "starknet_api", - "starknet_infra_utils", ] [[package]] name = "metrics" -version = "0.21.1" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +checksum = "2be3cbd384d4e955b231c895ce10685e3d8260c5ccffae898c96c723b0772835" dependencies = [ "ahash", - "metrics-macros", "portable-atomic", ] [[package]] name = "metrics" -version = "0.22.3" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2be3cbd384d4e955b231c895ce10685e3d8260c5ccffae898c96c723b0772835" +checksum = "7a7deb012b3b2767169ff203fadb4c6b0b82b947512e5eb9e0b78c2e186ad9e3" dependencies = [ "ahash", "portable-atomic", @@ -6725,15 +8607,18 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.12.2" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d4fa7ce7c4862db464a37b0b31d89bca874562f034bd7993895572783d02950" +checksum = "12779523996a67c13c84906a876ac6fe4d07a6e1adb54978378e13f199251a62" dependencies = [ - "base64 0.21.7", - "hyper 0.14.32", - "indexmap 1.9.3", + "base64 0.22.1", + "http-body-util", + "hyper 1.5.2", + "hyper-rustls 0.27.5", + "hyper-util", + "indexmap 2.9.0", "ipnet", - "metrics 0.21.1", + "metrics 0.24.1", "metrics-util", "quanta", "thiserror 1.0.69", @@ -6741,17 +8626,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "metrics-macros" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b4faf00617defe497754acde3024865bc143d44a86799b24e191ecff91354f" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.95", -] - [[package]] name = "metrics-process" version = "1.2.1" @@ -6769,16 +8643,17 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.15.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111cb375987443c3de8d503580b536f77dc8416d32db62d9456db5d93bd7ac47" +checksum = "dbd4884b1dd24f7d6628274a2f5ae22465c337c5ba065ec9b6edccddf8acc673" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.2", - "metrics 0.21.1", - "num_cpus", + "hashbrown 0.15.2", + "metrics 0.24.1", "quanta", + "rand 0.8.5", + "rand_xoshiro", "sketches-ddsketch", ] @@ -6867,7 +8742,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -6877,7 +8752,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80f9fece9bd97ab74339fe19f4bcaf52b76dcc18e5364c7977c1838f76b38de9" dependencies = [ "assert-json-diff", - "colored", + "colored 2.2.0", "httparse", "lazy_static", "log", @@ -6896,7 +8771,7 @@ checksum = "652cd6d169a36eaf9d1e6bce1a221130439a966d7f27858af66a33a66e9c4ee2" dependencies = [ "assert-json-diff", "bytes", - "colored", + "colored 2.2.0", "futures-util", "http 1.2.0", "http-body 1.0.1", @@ -6994,32 +8869,32 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "tempfile", ] [[package]] name = "native_blockifier" -version = "0.14.0-rc.3" +version = "0.15.0-rc.2" dependencies = [ + "apollo_compile_to_native", + "apollo_state_reader", + "apollo_storage", "blockifier", "cached", "cairo-lang-starknet-classes", "cairo-vm", - "indexmap 2.7.0", + "indexmap 2.9.0", "log", "num-bigint 0.4.6", - "papyrus_state_reader", - "papyrus_storage", "pretty_assertions", "pyo3", "pyo3-log", - "serde", "serde_json", + "shared_execution_objects", "starknet-types-core", "starknet_api", - "starknet_sierra_multicompile", "tempfile", "thiserror 1.0.69", ] @@ -7173,6 +9048,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "nu-ansi-term" +version = "0.50.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "num" version = "0.2.1" @@ -7304,7 +9188,7 @@ checksum = "e238432a7881ec7164503ccc516c014bf009be7984cde1ba56837862543bdec3" dependencies = [ "bitvec", "either", - "lru", + "lru 0.12.5", "num-bigint 0.4.6", "num-integer", "num-modular", @@ -7374,7 +9258,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -7386,6 +9270,19 @@ dependencies = [ "libc", ] +[[package]] +name = "nybbles" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +dependencies = [ + "alloy-rlp", + "const-hex", + "proptest", + "serde", + "smallvec", +] + [[package]] name = "object" version = "0.36.7" @@ -7406,9 +9303,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" @@ -7449,9 +9346,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "f5e534d133a060a3c19daec1eb3e98ec6f4685978834f2dbadfe2ec215bab64e" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -7470,14 +9367,14 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" @@ -7498,587 +9395,198 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] -name = "os_info" -version = "3.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" -dependencies = [ - "log", - "serde", - "windows-sys 0.52.0", -] - -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "page_size" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "papyrus_base_layer" -version = "0.14.0-rc.3" -dependencies = [ - "alloy-contract", - "alloy-dyn-abi", - "alloy-json-rpc", - "alloy-primitives", - "alloy-provider", - "alloy-sol-types", - "alloy-transport", - "alloy-transport-http", - "async-trait", - "ethers", - "ethers-core", - "papyrus_base_layer", - "papyrus_config", - "pretty_assertions", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api", - "tar", - "tempfile", - "thiserror 1.0.69", - "tokio", - "url", -] - -[[package]] -name = "papyrus_common" -version = "0.14.0-rc.3" -dependencies = [ - "assert_matches", - "base64 0.13.1", - "cairo-lang-starknet-classes", - "flate2", - "indexmap 2.7.0", - "lazy_static", - "papyrus_test_utils", - "pretty_assertions", - "rand 0.8.5", - "serde", - "serde_json", - "sha3", - "starknet-types-core", - "starknet_api", - "thiserror 1.0.69", -] - -[[package]] -name = "papyrus_config" -version = "0.14.0-rc.3" -dependencies = [ - "assert_matches", - "clap", - "itertools 0.12.1", - "lazy_static", - "papyrus_test_utils", - "serde", - "serde_json", - "starknet_api", - "starknet_infra_utils", - "strum_macros 0.25.3", - "tempfile", - "thiserror 1.0.69", - "tracing", - "validator", -] - -[[package]] -name = "papyrus_consensus" -version = "0.14.0-rc.3" -dependencies = [ - "async-trait", - "clap", - "enum-as-inner", - "fs2", - "futures", - "lazy_static", - "lru", - "metrics 0.21.1", - "mockall", - "nix 0.20.2", - "papyrus_common", - "papyrus_config", - "papyrus_network", - "papyrus_network_types", - "papyrus_protobuf", - "papyrus_storage", - "papyrus_test_utils", - "serde", - "starknet-types-core", - "starknet_api", - "test-case", - "thiserror 1.0.69", - "tokio", - "tracing", - "validator", -] - -[[package]] -name = "papyrus_consensus_orchestrator" -version = "0.14.0-rc.3" -dependencies = [ - "async-trait", - "blockifier", - "chrono", - "futures", - "indexmap 2.7.0", - "lazy_static", - "mockall", - "papyrus_consensus", - "papyrus_network", - "papyrus_protobuf", - "papyrus_storage", - "papyrus_test_utils", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api", - "starknet_batcher_types", - "starknet_infra_utils", - "test-case", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "papyrus_execution" -version = "0.14.0-rc.3" -dependencies = [ - "anyhow", - "assert_matches", - "blockifier", - "cairo-lang-casm", - "cairo-lang-starknet-classes", - "cairo-lang-utils", - "cairo-vm", - "indexmap 2.7.0", - "itertools 0.12.1", - "lazy_static", - "papyrus_common", - "papyrus_config", - "papyrus_storage", - "papyrus_test_utils", - "pretty_assertions", - "rand 0.8.5", - "rand_chacha 0.3.1", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "papyrus_load_test" -version = "0.14.0-rc.3" -dependencies = [ - "anyhow", - "assert_matches", - "goose", - "lazy_static", - "once_cell", - "pretty_assertions", - "rand 0.8.5", - "reqwest 0.11.27", - "serde", - "serde_json", - "starknet_api", - "tokio", -] - -[[package]] -name = "papyrus_monitoring_gateway" -version = "0.14.0-rc.3" -dependencies = [ - "axum", - "hyper 0.14.32", - "metrics 0.21.1", - "metrics-exporter-prometheus", - "metrics-process", - "papyrus_config", - "papyrus_storage", - "pretty_assertions", - "rand 0.8.5", - "serde", - "serde_json", - "starknet_client", - "thiserror 1.0.69", - "tokio", - "tower 0.4.13", - "tracing", - "validator", -] - -[[package]] -name = "papyrus_network" -version = "0.14.0-rc.3" -dependencies = [ - "assert_matches", - "async-stream", - "async-trait", - "bytes", - "deadqueue", - "defaultmap", - "derive_more 0.99.18", - "futures", - "lazy_static", - "libp2p", - "libp2p-swarm-test", - "metrics 0.21.1", - "mockall", - "papyrus_common", - "papyrus_config", - "papyrus_network_types", - "pretty_assertions", - "replace_with", - "serde", - "starknet_api", - "starknet_sequencer_infra", - "thiserror 1.0.69", - "tokio", - "tokio-retry", - "tokio-stream", - "tracing", - "unsigned-varint 0.8.0", - "validator", - "void", +name = "os_info" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" +dependencies = [ + "log", + "serde", + "windows-sys 0.52.0", ] [[package]] -name = "papyrus_network_types" -version = "0.14.0-rc.3" +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "libp2p", - "papyrus_test_utils", - "rand_chacha 0.3.1", - "serde", + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", ] [[package]] -name = "papyrus_node" -version = "0.14.0-rc.3" +name = "page_size" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" dependencies = [ - "anyhow", - "assert-json-diff", - "clap", - "colored", - "const_format", - "futures", - "futures-util", - "insta", - "itertools 0.12.1", - "lazy_static", - "metrics-exporter-prometheus", - "once_cell", - "papyrus_base_layer", - "papyrus_common", - "papyrus_config", - "papyrus_consensus", - "papyrus_consensus_orchestrator", - "papyrus_monitoring_gateway", - "papyrus_network", - "papyrus_p2p_sync", - "papyrus_protobuf", - "papyrus_rpc", - "papyrus_storage", - "papyrus_sync", - "papyrus_test_utils", - "pretty_assertions", - "reqwest 0.11.27", - "serde", - "serde_json", - "starknet_api", - "starknet_client", - "starknet_infra_utils", - "strum 0.25.0", - "tempfile", - "tokio", - "tokio-stream", - "tracing", - "tracing-subscriber", - "validator", + "libc", + "winapi", ] [[package]] -name = "papyrus_p2p_sync" -version = "0.14.0-rc.3" +name = "papyrus_base_layer" +version = "0.15.0-rc.2" dependencies = [ + "alloy", + "apollo_config", + "apollo_l1_endpoint_monitor_types", "assert_matches", - "async-stream", - "chrono", - "enum-iterator", + "async-trait", + "colored 3.0.0", + "ethers", + "ethers-core", "futures", - "indexmap 2.7.0", - "lazy_static", - "metrics 0.21.1", - "papyrus_common", - "papyrus_config", - "papyrus_network", - "papyrus_proc_macros", - "papyrus_protobuf", - "papyrus_storage", - "papyrus_test_utils", - "rand 0.8.5", - "rand_chacha 0.3.1", + "mockall", + "pretty_assertions", "serde", "starknet-types-core", "starknet_api", - "starknet_state_sync_types", - "static_assertions", + "tar", + "tempfile", "thiserror 1.0.69", "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "papyrus_proc_macros" -version = "0.14.0-rc.3" -dependencies = [ - "metrics 0.21.1", - "metrics-exporter-prometheus", - "papyrus_common", - "papyrus_test_utils", - "prometheus-parse", - "quote", - "syn 2.0.95", "tracing", + "url", + "validator", ] [[package]] -name = "papyrus_protobuf" -version = "0.14.0-rc.3" +name = "papyrus_common" +version = "0.15.0-rc.2" dependencies = [ - "indexmap 2.7.0", - "lazy_static", - "papyrus_common", - "papyrus_test_utils", - "primitive-types", - "prost", - "prost-build", - "protoc-prebuilt", + "apollo_test_utils", + "assert_matches", + "cairo-lang-starknet-classes", + "indexmap 2.9.0", + "pretty_assertions", "rand 0.8.5", - "rand_chacha 0.3.1", "serde", "serde_json", "starknet-types-core", "starknet_api", - "thiserror 1.0.69", ] [[package]] -name = "papyrus_rpc" -version = "0.14.0-rc.3" +name = "papyrus_load_test" +version = "0.15.0-rc.2" dependencies = [ "anyhow", "assert_matches", - "async-trait", - "base64 0.13.1", - "cairo-lang-casm", - "cairo-lang-starknet-classes", - "camelpaste", - "derive_more 0.99.18", - "enum-iterator", - "ethers", - "flate2", - "futures-util", - "hex", - "hyper 0.14.32", - "indexmap 2.7.0", - "insta", - "itertools 0.12.1", - "jsonrpsee", - "jsonschema", + "goose", "lazy_static", - "metrics 0.21.1", - "metrics-exporter-prometheus", - "mockall", - "papyrus_common", - "papyrus_config", - "papyrus_execution", - "papyrus_proc_macros", - "papyrus_storage", - "papyrus_test_utils", + "once_cell", "pretty_assertions", - "prometheus-parse", "rand 0.8.5", - "rand_chacha 0.3.1", - "regex", "reqwest 0.11.27", "serde", "serde_json", - "starknet-core", - "starknet-types-core", "starknet_api", - "starknet_client", - "strum 0.25.0", - "strum_macros 0.25.3", "tokio", - "tower 0.4.13", - "tracing", - "validator", -] - -[[package]] -name = "papyrus_state_reader" -version = "0.14.0-rc.3" -dependencies = [ - "assert_matches", - "blockifier", - "indexmap 2.7.0", - "log", - "papyrus_storage", - "rstest", - "starknet-types-core", - "starknet_api", ] [[package]] -name = "papyrus_storage" -version = "0.14.0-rc.3" +name = "papyrus_monitoring_gateway" +version = "0.15.0-rc.2" dependencies = [ - "assert_matches", - "byteorder", - "cairo-lang-casm", - "cairo-lang-starknet-classes", - "cairo-lang-utils", - "camelpaste", - "clap", - "human_bytes", - "indexmap 2.7.0", - "insta", - "integer-encoding", - "lazy_static", - "libmdbx", - "memmap2", - "metrics 0.21.1", + "apollo_config", + "apollo_starknet_client", + "apollo_storage", + "axum", + "hyper 0.14.32", + "metrics 0.24.1", "metrics-exporter-prometheus", - "num-bigint 0.4.6", - "num-traits", - "page_size", - "papyrus_common", - "papyrus_config", - "papyrus_proc_macros", - "papyrus_test_utils", - "parity-scale-codec", - "paste", + "metrics-process", "pretty_assertions", - "primitive-types", - "prometheus-parse", "rand 0.8.5", - "rand_chacha 0.3.1", - "rstest", - "schemars", "serde", "serde_json", - "simple_logger", - "starknet-types-core", - "starknet_api", - "statistical", - "tempfile", - "test-case", - "test-log", "thiserror 1.0.69", "tokio", + "tower 0.4.13", "tracing", "validator", - "zstd 0.13.2", ] [[package]] -name = "papyrus_sync" -version = "0.14.0-rc.3" +name = "papyrus_node" +version = "0.15.0-rc.2" dependencies = [ - "assert_matches", - "async-stream", - "async-trait", - "cairo-lang-starknet-classes", - "chrono", + "anyhow", + "apollo_central_sync", + "apollo_class_manager_types", + "apollo_config", + "apollo_consensus", + "apollo_consensus_orchestrator", + "apollo_infra_utils", + "apollo_network", + "apollo_p2p_sync", + "apollo_rpc", + "apollo_starknet_client", + "apollo_storage", + "apollo_test_utils", + "clap", + "colored 3.0.0", + "const_format", "futures", "futures-util", - "indexmap 2.7.0", + "insta", "itertools 0.12.1", - "lru", - "metrics 0.21.1", - "mockall", + "lazy_static", + "metrics-exporter-prometheus", + "once_cell", "papyrus_base_layer", "papyrus_common", - "papyrus_config", - "papyrus_proc_macros", - "papyrus_storage", - "papyrus_test_utils", + "papyrus_monitoring_gateway", "pretty_assertions", "reqwest 0.11.27", "serde", - "simple_logger", - "starknet-types-core", + "serde_json", "starknet_api", - "starknet_client", - "thiserror 1.0.69", + "strum 0.25.0", + "tempfile", "tokio", "tokio-stream", "tracing", -] - -[[package]] -name = "papyrus_test_utils" -version = "0.14.0-rc.3" -dependencies = [ - "cairo-lang-casm", - "cairo-lang-starknet-classes", - "cairo-lang-utils", - "indexmap 2.7.0", - "num-bigint 0.4.6", - "pretty_assertions", - "primitive-types", - "prometheus-parse", - "rand 0.8.5", - "rand_chacha 0.3.1", - "reqwest 0.11.27", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api", + "tracing-subscriber", + "validator", ] [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" dependencies = [ "arrayvec", "bitvec", "byte-slice-cast", + "const_format", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.100", ] [[package]] @@ -8208,7 +9716,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.11", + "thiserror 2.0.12", "ucd-trie", ] @@ -8218,8 +9726,18 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ - "fixedbitset", - "indexmap 2.7.0", + "fixedbitset 0.4.2", + "indexmap 2.9.0", +] + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset 0.5.7", + "indexmap 2.9.0", ] [[package]] @@ -8234,35 +9752,35 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_macros", - "phf_shared 0.11.2", + "phf_shared 0.11.3", ] [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.2", + "phf_shared 0.11.3", "rand 0.8.5", ] [[package]] name = "phf_macros" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ "phf_generator", - "phf_shared 0.11.2", + "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -8271,16 +9789,16 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "siphasher", + "siphasher 0.3.11", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher", + "siphasher 1.0.1", ] [[package]] @@ -8291,29 +9809,29 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -8400,7 +9918,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.42", + "rustix 0.38.43", "tracing", "windows-sys 0.59.0", ] @@ -8507,7 +10025,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705" dependencies = [ "proc-macro2", - "syn 2.0.95", + "syn 2.0.100", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", ] [[package]] @@ -8540,7 +10067,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.22", + "toml_edit 0.22.24", ] [[package]] @@ -8586,14 +10113,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -8608,7 +10135,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.42", + "rustix 0.38.43", ] [[package]] @@ -8641,7 +10168,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -8662,8 +10189,8 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ - "bit-set", - "bit-vec", + "bit-set 0.5.3", + "bit-vec 0.6.3", "bitflags 2.6.0", "lazy_static", "num-traits", @@ -8698,12 +10225,12 @@ dependencies = [ "log", "multimap", "once_cell", - "petgraph", + "petgraph 0.6.5", "prettyplease", "prost", "prost-types", "regex", - "syn 2.0.95", + "syn 2.0.100", "tempfile", ] @@ -8717,7 +10244,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -8830,13 +10357,12 @@ dependencies = [ [[package]] name = "quanta" -version = "0.11.1" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" dependencies = [ "crossbeam-utils", "libc", - "mach2", "once_cell", "raw-cpuid", "wasi 0.11.0+wasi-snapshot-preview1", @@ -8899,7 +10425,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -8918,7 +10444,7 @@ dependencies = [ "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time", @@ -8991,8 +10517,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.0", - "zerocopy 0.8.16", + "rand_core 0.9.3", + "zerocopy 0.8.23", ] [[package]] @@ -9022,7 +10548,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -9051,12 +10577,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ "getrandom 0.3.1", - "zerocopy 0.8.16", ] [[package]] @@ -9135,18 +10660,27 @@ dependencies = [ name = "rand_xorshift" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" dependencies = [ "rand_core 0.6.4", ] [[package]] name = "raw-cpuid" -version = "10.7.0" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ - "bitflags 1.2.1", + "bitflags 2.6.0", ] [[package]] @@ -9296,7 +10830,7 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", - "hyper-rustls", + "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -9315,7 +10849,7 @@ dependencies = [ "system-configuration 0.5.1", "tokio", "tokio-native-tls", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "tower-service", "url", @@ -9524,7 +11058,7 @@ checksum = "b3a8fb4672e840a587a66fc577a5491375df51ddb88f2a2c2a792598c326fe14" dependencies = [ "quote", "rand 0.8.5", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -9584,7 +11118,7 @@ version = "0.17.0-pre.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719825638c59fd26a55412a24561c7c5bcf54364c88b9a7a04ba08a6eafaba8d" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.9.0", "lock_api", "oorandom", "parking_lot", @@ -9604,7 +11138,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -9656,7 +11190,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.24", + "semver 1.0.26", ] [[package]] @@ -9684,14 +11218,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys 0.4.15", "windows-sys 0.59.0", ] @@ -9713,6 +11247,7 @@ version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring 0.17.8", @@ -9731,7 +11266,19 @@ dependencies = [ "openssl-probe", "rustls-pemfile 1.0.4", "schannel", - "security-framework", + "security-framework 2.11.1", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework 3.2.0", ] [[package]] @@ -9777,6 +11324,7 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ + "aws-lc-rs", "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", @@ -9856,7 +11404,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -9890,7 +11438,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -9931,6 +11479,7 @@ dependencies = [ "der", "generic-array", "pkcs8", + "serdect", "subtle", "zeroize", ] @@ -9942,7 +11491,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.10.0", "core-foundation-sys", "libc", "security-framework-sys", @@ -9950,9 +11512,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -9969,9 +11531,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" dependencies = [ "serde", ] @@ -10014,7 +11576,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -10025,14 +11587,14 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -10069,7 +11631,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -10095,14 +11657,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "chrono", "hex", + "indexmap 1.9.3", + "indexmap 2.9.0", "serde", + "serde_derive", "serde_json", "serde_with_macros", "time", @@ -10110,14 +11675,24 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", +] + +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", ] [[package]] @@ -10145,976 +11720,612 @@ dependencies = [ ] [[package]] -name = "sha2" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sha3-asm" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" -dependencies = [ - "cc", - "cfg-if", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "signal-hook-registry" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - -[[package]] -name = "similar" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" - -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "thiserror 1.0.69", - "time", -] - -[[package]] -name = "simple_logger" -version = "4.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e7e46c8c90251d47d08b28b8a419ffb4aede0f87c2eea95e17d1d5bacbf3ef1" -dependencies = [ - "colored", - "log", - "time", - "windows-sys 0.48.0", -] - -[[package]] -name = "simplelog" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16257adbfaef1ee58b1363bdc0664c9b8e1e30aed86049635fb5f147d065a9c0" -dependencies = [ - "log", - "termcolor", - "time", -] - -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - -[[package]] -name = "sketches-ddsketch" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg 1.4.0", -] - -[[package]] -name = "slug" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "882a80f72ee45de3cc9a5afeb2da0331d58df69e4e7d8eeb5d3c7784ae67e724" -dependencies = [ - "deunicode", - "wasm-bindgen", -] - -[[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" - -[[package]] -name = "smol" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" -dependencies = [ - "async-channel 2.3.1", - "async-executor", - "async-fs", - "async-io 2.4.0", - "async-lock 3.4.0", - "async-net", - "async-process", - "blocking", - "futures-lite 2.5.0", -] - -[[package]] -name = "smol_str" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" -dependencies = [ - "serde", -] - -[[package]] -name = "snow" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" -dependencies = [ - "aes-gcm", - "blake2", - "chacha20poly1305", - "curve25519-dalek", - "rand_core 0.6.4", - "ring 0.17.8", - "rustc_version 0.4.1", - "sha2", - "subtle", -] - -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "soketto" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" -dependencies = [ - "base64 0.13.1", - "bytes", - "futures", - "http 0.2.12", - "httparse", - "log", - "rand 0.8.5", - "sha-1", -] - -[[package]] -name = "solang-parser" -version = "0.3.3" +name = "sha2" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "itertools 0.11.0", - "lalrpop", - "lalrpop-util", - "phf", - "thiserror 1.0.69", - "unicode-xid", + "cfg-if", + "cpufeatures", + "digest 0.10.7", ] [[package]] -name = "spin" -version = "0.5.2" +name = "sha3" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] [[package]] -name = "spin" -version = "0.9.8" +name = "sha3-asm" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ - "lock_api", + "cc", + "cfg-if", ] [[package]] -name = "spki" -version = "0.7.3" +name = "sharded-slab" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ - "base64ct", - "der", + "lazy_static", ] [[package]] -name = "sprs" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704ef26d974e8a452313ed629828cd9d4e4fa34667ca1ad9d6b1fffa43c6e166" +name = "shared_execution_objects" +version = "0.15.0-rc.2" dependencies = [ - "ndarray", - "num-complex 0.4.6", - "num-traits", - "smallvec", + "blockifier", + "rstest", + "serde", + "starknet_api", ] [[package]] -name = "stable_deref_trait" -version = "1.2.0" +name = "shlex" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] -name = "starknet-core" -version = "0.6.1" +name = "sierra-emu" +version = "0.6.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14139b1c39bdc2f1e663c12090ff5108fe50ebe62c09e15e32988dfaf445a7e4" +checksum = "5e5431f771bd0919c8663b29fb288ea75cc79fd819562e1fc85ef5eb875896ba" dependencies = [ - "base64 0.21.7", - "flate2", - "hex", + "cairo-lang-compiler", + "cairo-lang-filesystem", + "cairo-lang-runner", + "cairo-lang-sierra", + "cairo-lang-sierra-ap-change", + "cairo-lang-sierra-gas", + "cairo-lang-sierra-generator", + "cairo-lang-sierra-to-casm", + "cairo-lang-starknet-classes", + "cairo-lang-test-plugin", + "cairo-lang-utils", + "clap", + "k256", + "keccak", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "p256", + "rand 0.8.5", + "rayon", + "sec1", "serde", "serde_json", - "serde_json_pythonic", - "serde_with", - "sha3", - "starknet-crypto 0.6.2", - "starknet-ff", + "sha2", + "smallvec", + "starknet-crypto", + "starknet-curve", + "starknet-types-core", + "tempfile", + "thiserror 2.0.12", + "tracing", + "tracing-subscriber", ] [[package]] -name = "starknet-crypto" -version = "0.6.2" +name = "signal-hook-registry" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e2c30c01e8eb0fc913c4ee3cf676389fffc1d1182bfe5bb9670e4e72e968064" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ - "crypto-bigint", - "hex", - "hmac", - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "rfc6979", - "sha2", - "starknet-crypto-codegen", - "starknet-curve 0.4.2", - "starknet-ff", - "zeroize", + "libc", ] [[package]] -name = "starknet-crypto" -version = "0.7.3" +name = "signature" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded22ccf4cb9e572ce3f77de6066af53560cd2520d508876c83bb1e6b29d5cbc" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "similar" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "crypto-bigint", - "hex", - "hmac", "num-bigint 0.4.6", - "num-integer", "num-traits", - "rfc6979", - "sha2", - "starknet-curve 0.5.1", - "starknet-types-core", - "zeroize", + "thiserror 1.0.69", + "time", ] [[package]] -name = "starknet-crypto-codegen" -version = "0.3.3" +name = "simple_logger" +version = "4.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbc159a1934c7be9761c237333a57febe060ace2bc9e3b337a59a37af206d19f" +checksum = "8e7e46c8c90251d47d08b28b8a419ffb4aede0f87c2eea95e17d1d5bacbf3ef1" dependencies = [ - "starknet-curve 0.4.2", - "starknet-ff", - "syn 2.0.95", + "colored 2.2.0", + "log", + "time", + "windows-sys 0.48.0", ] [[package]] -name = "starknet-curve" -version = "0.4.2" +name = "simplelog" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1c383518bb312751e4be80f53e8644034aa99a0afb29d7ac41b89a997db875b" +checksum = "16257adbfaef1ee58b1363bdc0664c9b8e1e30aed86049635fb5f147d065a9c0" dependencies = [ - "starknet-ff", + "log", + "termcolor", + "time", ] [[package]] -name = "starknet-curve" -version = "0.5.1" +name = "siphasher" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcde6bd74269b8161948190ace6cf069ef20ac6e79cd2ba09b320efa7500b6de" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "size-of" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4e36eca171fddeda53901b0a436573b3f2391eaa9189d439b2bd8ea8cebd7e3" dependencies = [ - "starknet-types-core", + "size-of-derive", ] [[package]] -name = "starknet-ff" -version = "0.3.7" +name = "size-of-derive" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abf1b44ec5b18d87c1ae5f54590ca9d0699ef4dd5b2ffa66fc97f24613ec585" +checksum = "eefff4890f5308d477f3da563af8bdb8fbb6fabaec4c974bd211896fa7945e68" dependencies = [ - "ark-ff 0.4.2", - "bigdecimal", - "crypto-bigint", - "getrandom 0.2.15", - "hex", - "serde", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "starknet-types-core" -version = "0.1.7" +name = "sketches-ddsketch" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1b9e01ccb217ab6d475c5cda05dbb22c30029f7bb52b192a010a00d77a3d74" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "lambdaworks-crypto", - "lambdaworks-math", - "lazy_static", - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "serde", + "autocfg 1.4.0", ] [[package]] -name = "starknet_api" -version = "0.14.0-rc.3" +name = "slug" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882a80f72ee45de3cc9a5afeb2da0331d58df69e4e7d8eeb5d3c7784ae67e724" dependencies = [ - "assert_matches", - "bitvec", - "cairo-lang-runner", - "cairo-lang-starknet-classes", - "derive_more 0.99.18", - "hex", - "indexmap 2.7.0", - "itertools 0.12.1", - "num-bigint 0.4.6", - "num-traits", - "pretty_assertions", - "primitive-types", - "rstest", - "semver 1.0.24", - "serde", - "serde_json", - "sha3", - "starknet-crypto 0.7.3", - "starknet-types-core", - "starknet_api", - "starknet_infra_utils", - "strum 0.25.0", - "strum_macros 0.25.3", - "thiserror 1.0.69", + "deunicode", + "wasm-bindgen", ] [[package]] -name = "starknet_batcher" -version = "0.14.0-rc.3" +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" dependencies = [ - "assert_matches", - "async-trait", - "blockifier", - "chrono", - "futures", - "indexmap 2.7.0", - "mempool_test_utils", - "mockall", - "papyrus_config", - "papyrus_state_reader", - "papyrus_storage", - "rstest", "serde", - "starknet-types-core", - "starknet_api", - "starknet_batcher_types", - "starknet_l1_provider_types", - "starknet_mempool_types", - "starknet_sequencer_infra", - "starknet_state_sync_types", - "thiserror 1.0.69", - "tokio", - "tracing", - "validator", ] [[package]] -name = "starknet_batcher_types" -version = "0.14.0-rc.3" +name = "smol" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" dependencies = [ - "async-trait", - "chrono", - "derive_more 0.99.18", - "mockall", - "papyrus_proc_macros", - "serde", - "starknet_api", - "starknet_batcher_types", - "starknet_sequencer_infra", - "starknet_state_sync_types", - "thiserror 1.0.69", + "async-channel 2.3.1", + "async-executor", + "async-fs", + "async-io 2.4.0", + "async-lock 3.4.0", + "async-net", + "async-process", + "blocking", + "futures-lite 2.5.0", ] [[package]] -name = "starknet_class_manager_types" -version = "0.14.0-rc.3" +name = "smol_str" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9676b89cd56310a87b93dec47b11af744f34d5fc9f367b829474eec0a891350d" dependencies = [ - "async-trait", - "cairo-lang-starknet-classes", - "papyrus_proc_macros", + "borsh", "serde", - "starknet_api", - "starknet_sequencer_infra", - "thiserror 1.0.69", ] [[package]] -name = "starknet_client" -version = "0.14.0-rc.3" +name = "snow" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "assert_matches", - "async-trait", - "cairo-lang-starknet-classes", - "enum-iterator", - "http 0.2.12", - "indexmap 2.7.0", - "mockall", - "mockito 0.31.1", - "os_info", - "papyrus_common", - "papyrus_config", - "papyrus_test_utils", - "pretty_assertions", - "rand 0.8.5", - "rand_chacha 0.3.1", - "reqwest 0.11.27", - "serde", - "serde_json", - "serde_repr", - "simple_logger", - "starknet-types-core", - "starknet_api", - "strum 0.25.0", - "strum_macros 0.25.3", - "thiserror 1.0.69", - "tokio", - "tokio-retry", - "tracing", - "url", + "aes-gcm", + "blake2", + "chacha20poly1305", + "curve25519-dalek", + "rand_core 0.6.4", + "ring 0.17.8", + "rustc_version 0.4.1", + "sha2", + "subtle", ] [[package]] -name = "starknet_committer" -version = "0.14.0-rc.3" +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ - "hex", - "pretty_assertions", - "rstest", - "serde_json", - "starknet-types-core", - "starknet_patricia", - "thiserror 1.0.69", - "tokio", - "tracing", + "libc", + "winapi", ] [[package]] -name = "starknet_consensus_manager" -version = "0.14.0-rc.3" +name = "socket2" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ - "async-trait", - "futures", - "papyrus_config", - "papyrus_consensus", - "papyrus_consensus_orchestrator", - "papyrus_network", - "papyrus_protobuf", - "serde", - "starknet_api", - "starknet_batcher_types", - "starknet_infra_utils", - "starknet_sequencer_infra", - "starknet_state_sync_types", - "tokio", - "tracing", - "validator", + "libc", + "windows-sys 0.52.0", ] [[package]] -name = "starknet_gateway" -version = "0.14.0-rc.3" +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "assert_matches", - "async-trait", - "axum", - "blockifier", - "cairo-lang-sierra-to-casm", - "cairo-lang-starknet-classes", + "base64 0.13.1", + "bytes", "futures", - "mempool_test_utils", - "mockall", - "mockito 1.6.1", - "num-bigint 0.4.6", - "papyrus_config", - "papyrus_network_types", - "papyrus_rpc", - "papyrus_test_utils", - "pretty_assertions", - "reqwest 0.11.27", - "rstest", - "serde", - "serde_json", - "starknet-types-core", - "starknet_api", - "starknet_gateway_types", - "starknet_mempool", - "starknet_mempool_types", - "starknet_sequencer_infra", - "starknet_sierra_multicompile", - "starknet_state_sync_types", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-test", - "validator", + "http 0.2.12", + "httparse", + "log", + "rand 0.8.5", + "sha-1", ] [[package]] -name = "starknet_gateway_types" -version = "0.14.0-rc.3" +name = "solang-parser" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" dependencies = [ - "async-trait", - "axum", - "enum-assoc", - "mockall", - "papyrus_network_types", - "papyrus_proc_macros", - "papyrus_rpc", - "serde", - "serde_json", - "starknet_api", - "starknet_gateway_types", - "starknet_sequencer_infra", + "itertools 0.11.0", + "lalrpop 0.20.2", + "lalrpop-util 0.20.2", + "phf", "thiserror 1.0.69", - "tracing", + "unicode-xid", ] [[package]] -name = "starknet_http_server" -version = "0.14.0-rc.3" -dependencies = [ - "axum", - "hyper 0.14.32", - "papyrus_config", - "reqwest 0.11.27", - "serde", - "serde_json", - "starknet_api", - "starknet_gateway_types", - "starknet_infra_utils", - "starknet_sequencer_infra", - "thiserror 1.0.69", - "tokio", - "tracing", - "validator", -] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "starknet_infra_utils" -version = "0.14.0-rc.3" +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ - "pretty_assertions", - "rstest", - "tokio", - "tracing", - "tracing-subscriber", + "lock_api", ] [[package]] -name = "starknet_integration_tests" -version = "0.14.0-rc.3" +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ - "assert_matches", - "blockifier", - "cairo-lang-starknet-classes", - "futures", - "indexmap 2.7.0", - "itertools 0.12.1", - "mempool_test_utils", - "papyrus_common", - "papyrus_config", - "papyrus_consensus", - "papyrus_execution", - "papyrus_network", - "papyrus_protobuf", - "papyrus_rpc", - "papyrus_storage", - "pretty_assertions", - "rstest", - "serde_json", - "starknet-types-core", - "starknet_api", - "starknet_batcher", - "starknet_client", - "starknet_consensus_manager", - "starknet_gateway", - "starknet_gateway_types", - "starknet_http_server", - "starknet_infra_utils", - "starknet_mempool_p2p", - "starknet_monitoring_endpoint", - "starknet_sequencer_infra", - "starknet_sequencer_node", - "starknet_state_sync", - "strum 0.25.0", - "tempfile", - "tokio", - "tracing", + "base64ct", + "der", ] [[package]] -name = "starknet_l1_provider" -version = "0.14.0-rc.3" +name = "sprs" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704ef26d974e8a452313ed629828cd9d4e4fa34667ca1ad9d6b1fffa43c6e166" dependencies = [ - "assert_matches", - "async-trait", - "indexmap 2.7.0", - "papyrus_base_layer", - "papyrus_config", - "pretty_assertions", - "serde", - "starknet_api", - "starknet_l1_provider_types", - "starknet_sequencer_infra", - "thiserror 1.0.69", - "tracing", - "validator", + "ndarray", + "num-complex 0.4.6", + "num-traits", + "smallvec", ] [[package]] -name = "starknet_l1_provider_types" -version = "0.14.0-rc.3" -dependencies = [ - "async-trait", - "mockall", - "papyrus_proc_macros", - "serde", - "starknet_api", - "starknet_sequencer_infra", - "thiserror 1.0.69", - "tracing", -] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] -name = "starknet_mempool" -version = "0.14.0-rc.3" +name = "starknet-core" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37abf0af45a3b866dd108880ace9949ae7830f6830adb8963024302ae9e82c24" dependencies = [ - "assert_matches", - "async-trait", - "derive_more 0.99.18", - "itertools 0.12.1", - "mempool_test_utils", - "mockall", - "papyrus_network", - "papyrus_network_types", - "papyrus_test_utils", - "pretty_assertions", - "rstest", + "base64 0.21.7", + "crypto-bigint", + "flate2", + "foldhash", + "hex", + "indexmap 2.9.0", + "num-traits", + "serde", + "serde_json", + "serde_json_pythonic", + "serde_with", + "sha3", + "starknet-core-derive", + "starknet-crypto", "starknet-types-core", - "starknet_api", - "starknet_mempool", - "starknet_mempool_p2p_types", - "starknet_mempool_types", - "starknet_sequencer_infra", - "tokio", - "tracing", ] [[package]] -name = "starknet_mempool_p2p" -version = "0.14.0-rc.3" +name = "starknet-core-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08520b7d80eda7bf1a223e8db4f9bb5779a12846f15ebf8f8d76667eca7f5ad" dependencies = [ - "async-trait", - "futures", - "libp2p", - "papyrus_config", - "papyrus_network", - "papyrus_network_types", - "papyrus_protobuf", - "papyrus_test_utils", - "rand_chacha 0.3.1", - "serde", - "starknet_api", - "starknet_gateway_types", - "starknet_mempool_p2p_types", - "starknet_sequencer_infra", - "tokio", - "tracing", - "validator", + "proc-macro2", + "quote", + "syn 2.0.100", ] [[package]] -name = "starknet_mempool_p2p_types" -version = "0.14.0-rc.3" +name = "starknet-crypto" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "039a3bad70806b494c9e6b21c5238a6c8a373d66a26071859deb0ccca6f93634" dependencies = [ - "async-trait", - "mockall", - "papyrus_network_types", - "papyrus_proc_macros", - "serde", - "starknet_api", - "starknet_sequencer_infra", - "thiserror 1.0.69", + "crypto-bigint", + "hex", + "hmac", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "rfc6979", + "sha2", + "starknet-curve", + "starknet-types-core", + "zeroize", ] [[package]] -name = "starknet_mempool_types" -version = "0.14.0-rc.3" +name = "starknet-curve" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcde6bd74269b8161948190ace6cf069ef20ac6e79cd2ba09b320efa7500b6de" dependencies = [ - "async-trait", - "mockall", - "papyrus_network_types", - "papyrus_proc_macros", - "serde", - "starknet_api", - "starknet_mempool_types", - "starknet_sequencer_infra", - "thiserror 1.0.69", + "starknet-types-core", ] [[package]] -name = "starknet_monitoring_endpoint" -version = "0.14.0-rc.3" +name = "starknet-types-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4037bcb26ce7c508448d221e570d075196fd4f6912ae6380981098937af9522a" dependencies = [ - "axum", - "hyper 0.14.32", - "metrics 0.21.1", - "metrics-exporter-prometheus", - "papyrus_config", - "pretty_assertions", + "lambdaworks-crypto", + "lambdaworks-math", + "lazy_static", + "num-bigint 0.4.6", + "num-integer", + "num-traits", "serde", - "starknet_infra_utils", - "starknet_sequencer_infra", - "tokio", - "tower 0.4.13", - "tracing", - "validator", + "size-of", + "zeroize", ] [[package]] -name = "starknet_patricia" -version = "0.14.0-rc.3" +name = "starknet_api" +version = "0.15.0-rc.2" dependencies = [ - "async-recursion", + "apollo_infra_utils", + "assert_matches", + "base64 0.13.1", + "bitvec", + "cached", + "cairo-lang-runner", + "cairo-lang-starknet-classes", + "cairo-lang-utils", "derive_more 0.99.18", - "ethnum", + "flate2", "hex", + "indexmap 2.9.0", + "itertools 0.12.1", + "num-bigint 0.4.6", + "num-traits", "pretty_assertions", + "primitive-types", "rand 0.8.5", "rstest", + "semver 1.0.26", "serde", "serde_json", + "sha3", + "size-of", + "starknet-crypto", "starknet-types-core", "strum 0.25.0", "strum_macros 0.25.3", "thiserror 1.0.69", - "tokio", - "tracing", ] [[package]] -name = "starknet_sequencer_infra" -version = "0.14.0-rc.3" +name = "starknet_committer" +version = "0.15.0-rc.2" dependencies = [ - "assert_matches", - "async-trait", - "hyper 0.14.32", - "papyrus_config", + "hex", "pretty_assertions", "rstest", - "serde", "serde_json", "starknet-types-core", "starknet_api", - "starknet_infra_utils", + "starknet_patricia", + "starknet_patricia_storage", "thiserror 1.0.69", "tokio", "tracing", - "tracing-subscriber", - "validator", ] [[package]] -name = "starknet_sequencer_node" -version = "0.14.0-rc.3" +name = "starknet_committer_and_os_cli" +version = "0.15.0-rc.2" dependencies = [ - "anyhow", - "assert-json-diff", + "apollo_starknet_os_program", "assert_matches", + "blake2s", + "blockifier", + "cairo-lang-starknet-classes", + "cairo-vm", "clap", - "colored", - "const_format", + "criterion", + "derive_more 0.99.18", + "ethnum", "futures", - "mempool_test_utils", - "papyrus_config", - "papyrus_proc_macros", - "papyrus_protobuf", + "indexmap 2.9.0", "pretty_assertions", - "rstest", + "rand 0.8.5", + "rand_distr", "serde", "serde_json", + "serde_repr", + "starknet-types-core", "starknet_api", - "starknet_batcher", - "starknet_batcher_types", - "starknet_consensus_manager", - "starknet_gateway", - "starknet_gateway_types", - "starknet_http_server", - "starknet_infra_utils", - "starknet_l1_provider", - "starknet_l1_provider_types", - "starknet_mempool", - "starknet_mempool_p2p", - "starknet_mempool_p2p_types", - "starknet_mempool_types", - "starknet_monitoring_endpoint", - "starknet_sequencer_infra", - "starknet_sequencer_node", - "starknet_sierra_multicompile", - "starknet_state_sync", - "starknet_state_sync_types", + "starknet_committer", + "starknet_os", + "starknet_patricia", + "starknet_patricia_storage", + "strum 0.25.0", + "strum_macros 0.25.3", + "tempfile", + "thiserror 1.0.69", "tokio", "tracing", - "validator", + "tracing-subscriber", ] [[package]] -name = "starknet_sierra_multicompile" -version = "0.14.0-rc.3" +name = "starknet_os" +version = "0.15.0-rc.2" dependencies = [ + "apollo_starknet_os_program", + "ark-bls12-381", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-secp256k1 0.4.0", + "ark-secp256r1 0.4.0", "assert_matches", - "cairo-lang-sierra", + "blockifier", + "blockifier_test_utils", + "c-kzg", + "cairo-lang-casm", + "cairo-lang-runner", "cairo-lang-starknet-classes", - "cairo-lang-utils", - "cairo-native", - "mempool_test_utils", - "papyrus_config", - "rlimit", + "cairo-vm", + "derive_more 0.99.18", + "ethnum", + "indexmap 2.9.0", + "indoc 2.0.5", + "log", + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "papyrus_common", + "paste", + "rand 0.8.5", + "regex", "rstest", "serde", "serde_json", + "sha2", + "sha3", + "shared_execution_objects", "starknet-types-core", "starknet_api", - "starknet_infra_utils", - "tempfile", + "starknet_committer", + "starknet_patricia", + "strum 0.25.0", + "strum_macros 0.25.3", "thiserror 1.0.69", - "validator", + "tracing", ] [[package]] -name = "starknet_state_sync" -version = "0.14.0-rc.3" +name = "starknet_patricia" +version = "0.15.0-rc.2" dependencies = [ - "async-trait", - "futures", - "papyrus_config", - "papyrus_network", - "papyrus_p2p_sync", - "papyrus_storage", + "async-recursion", + "derive_more 0.99.18", + "ethnum", + "num-bigint 0.4.6", + "pretty_assertions", + "rand 0.8.5", + "rstest", "serde", + "serde_json", "starknet-types-core", - "starknet_api", - "starknet_sequencer_infra", - "starknet_state_sync_types", + "starknet_patricia_storage", + "strum 0.25.0", + "strum_macros 0.25.3", + "thiserror 1.0.69", "tokio", - "validator", + "tracing", ] [[package]] -name = "starknet_state_sync_types" -version = "0.14.0-rc.3" +name = "starknet_patricia_storage" +version = "0.15.0-rc.2" dependencies = [ - "async-trait", - "futures", - "papyrus_proc_macros", - "papyrus_storage", + "hex", "serde", + "serde_json", "starknet-types-core", "starknet_api", - "starknet_sequencer_infra", "thiserror 1.0.69", ] -[[package]] -name = "starknet_task_executor" -version = "0.14.0-rc.3" -dependencies = [ - "futures", - "rstest", - "tokio", - "tokio-test", -] - [[package]] name = "static_assertions" version = "1.1.0" @@ -11167,6 +12378,9 @@ name = "strum" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros 0.25.3", +] [[package]] name = "strum" @@ -11177,6 +12391,15 @@ dependencies = [ "strum_macros 0.26.4", ] +[[package]] +name = "strum" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +dependencies = [ + "strum_macros 0.27.1", +] + [[package]] name = "strum_macros" version = "0.25.3" @@ -11187,7 +12410,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11200,7 +12423,20 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.95", + "syn 2.0.100", +] + +[[package]] +name = "strum_macros" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.100", ] [[package]] @@ -11220,7 +12456,7 @@ dependencies = [ "hex", "once_cell", "reqwest 0.11.27", - "semver 1.0.24", + "semver 1.0.26", "serde", "serde_json", "sha2", @@ -11242,9 +12478,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.95" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -11253,14 +12489,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.18" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e89d8bf2768d277f40573c83a02a099e96d96dd3104e13ea676194e61ac4b0" +checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11286,7 +12522,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11296,7 +12532,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.2.1", - "core-foundation", + "core-foundation 0.9.4", "system-configuration-sys 0.5.0", ] @@ -11307,7 +12543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", "system-configuration-sys 0.6.0", ] @@ -11363,7 +12599,7 @@ dependencies = [ "bindgen 0.71.1", "cc", "paste", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -11376,7 +12612,7 @@ dependencies = [ "fastrand 2.3.0", "getrandom 0.2.15", "once_cell", - "rustix 0.38.42", + "rustix 0.38.43", "windows-sys 0.59.0", ] @@ -11391,6 +12627,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "term" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3bb6001afcea98122260987f8b7b5da969ecad46dbf0b5453702f776b491a41" +dependencies = [ + "home", + "windows-sys 0.52.0", +] + [[package]] name = "termcolor" version = "1.4.1" @@ -11424,7 +12670,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11435,7 +12681,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "test-case-core", ] @@ -11458,7 +12704,7 @@ checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11472,11 +12718,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -11487,38 +12733,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.95", -] - -[[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", -] - -[[package]] -name = "thiserror-no-std" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" -dependencies = [ - "thiserror-impl-no-std", + "syn 2.0.100", ] [[package]] @@ -11663,7 +12889,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11697,6 +12923,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +dependencies = [ + "rustls 0.23.20", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.17" @@ -11732,7 +12968,7 @@ dependencies = [ "log", "rustls 0.21.12", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tungstenite", "webpki-roots 0.25.4", ] @@ -11747,20 +12983,22 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", + "futures-util", + "hashbrown 0.14.5", "pin-project-lite", "tokio", ] [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.22", + "toml_edit 0.22.24", ] [[package]] @@ -11778,22 +13016,30 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.9.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.22", + "winnow 0.7.4", +] + +[[package]] +name = "toml_test_utils" +version = "0.15.0-rc.2" +dependencies = [ + "serde", + "toml", ] [[package]] @@ -11864,7 +13110,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11883,6 +13129,8 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ + "futures", + "futures-task", "pin-project", "tracing", ] @@ -11915,7 +13163,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", - "nu-ansi-term", + "nu-ansi-term 0.46.0", "once_cell", "regex", "serde", @@ -11923,6 +13171,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", + "time", "tracing", "tracing-core", "tracing-log", @@ -11947,7 +13196,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -11992,12 +13241,42 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" +[[package]] +name = "typeid" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" + [[package]] name = "typenum" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "typetag" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f22b40dd7bfe8c14230cf9702081366421890435b2d625fa92b4acc4c3de6f" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35f5380909ffc31b4de4f4bdf96b877175a016aa2ca98cee39fcfd8c4d53d952" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "ucd-trie" version = "0.1.7" @@ -12338,7 +13617,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -12373,7 +13652,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -12397,6 +13676,20 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmtimer" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +dependencies = [ + "futures", + "js-sys", + "parking_lot", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.76" @@ -12441,6 +13734,18 @@ dependencies = [ "cc", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.43", +] + [[package]] name = "widestring" version = "1.1.0" @@ -12782,9 +14087,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.22" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -12810,10 +14115,9 @@ dependencies = [ [[package]] name = "workspace_tests" -version = "0.14.0-rc.3" +version = "0.15.0-rc.2" dependencies = [ - "serde", - "toml", + "toml_test_utils", ] [[package]] @@ -12892,15 +14196,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" dependencies = [ "libc", - "linux-raw-sys 0.4.14", - "rustix 0.38.42", + "linux-raw-sys 0.4.15", + "rustix 0.38.43", ] [[package]] name = "xml-rs" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea8b391c9a790b496184c29f7f93b9ed5b16abb306c05415b68bcc16e4d06432" +checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" [[package]] name = "xmltree" @@ -12998,7 +14302,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "synstructure", ] @@ -13014,11 +14318,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.16" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b8c07a70861ce02bad1607b5753ecb2501f67847b9f9ada7c160fff0ec6300c" +checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" dependencies = [ - "zerocopy-derive 0.8.16", + "zerocopy-derive 0.8.23", ] [[package]] @@ -13029,18 +14333,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] name = "zerocopy-derive" -version = "0.8.16" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5226bc9a9a9836e7428936cde76bb6b22feea1a8bfdbc0d241136e4d13417e25" +checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -13060,7 +14364,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", "synstructure", ] @@ -13081,7 +14385,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] @@ -13103,7 +14407,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.100", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4938a1b72c9..30c591d8dbf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,78 +4,148 @@ resolver = "2" members = [ + "crates/apollo_batcher", + "crates/apollo_batcher_types", + "crates/apollo_central_sync", + "crates/apollo_class_manager", + "crates/apollo_class_manager_types", + "crates/apollo_compilation_utils", + "crates/apollo_compile_to_casm", + "crates/apollo_compile_to_casm_types", + "crates/apollo_compile_to_native", + "crates/apollo_config", + "crates/apollo_consensus", + "crates/apollo_consensus_manager", + "crates/apollo_consensus_orchestrator", + "crates/apollo_dashboard", + "crates/apollo_deployments", + "crates/apollo_gateway", + "crates/apollo_gateway_types", + "crates/apollo_http_server", + "crates/apollo_infra", + "crates/apollo_infra_utils", + "crates/apollo_integration_tests", + "crates/apollo_l1_endpoint_monitor", + "crates/apollo_l1_endpoint_monitor_types", + "crates/apollo_l1_gas_price", + "crates/apollo_l1_gas_price_types", + "crates/apollo_l1_provider", + "crates/apollo_l1_provider_types", + "crates/apollo_mempool", + "crates/apollo_mempool_p2p", + "crates/apollo_mempool_p2p_types", + "crates/apollo_mempool_types", + "crates/apollo_metrics", + "crates/apollo_monitoring_endpoint", + "crates/apollo_network", + "crates/apollo_network_types", + "crates/apollo_node", + "crates/apollo_p2p_sync", + "crates/apollo_proc_macros", + "crates/apollo_proc_macros_tests", + "crates/apollo_protobuf", + "crates/apollo_reverts", + "crates/apollo_rpc", + "crates/apollo_rpc_execution", + "crates/apollo_starknet_client", + "crates/apollo_starknet_os_program", + "crates/apollo_state_reader", + "crates/apollo_state_sync", + "crates/apollo_state_sync_metrics", + "crates/apollo_state_sync_types", + "crates/apollo_storage", + "crates/apollo_task_executor", + "crates/apollo_test_utils", + "crates/apollo_time", + "crates/blake2s", "crates/blockifier", "crates/blockifier_reexecution", - "crates/committer_cli", + "crates/blockifier_test_utils", "crates/mempool_test_utils", "crates/native_blockifier", "crates/papyrus_base_layer", "crates/papyrus_common", - "crates/papyrus_config", - "crates/papyrus_execution", "crates/papyrus_load_test", "crates/papyrus_monitoring_gateway", - "crates/papyrus_network", - "crates/papyrus_network_types", "crates/papyrus_node", - "crates/papyrus_p2p_sync", - "crates/papyrus_proc_macros", - "crates/papyrus_protobuf", - "crates/papyrus_rpc", - "crates/papyrus_state_reader", - "crates/papyrus_storage", - "crates/papyrus_sync", - "crates/papyrus_test_utils", - "crates/sequencing/papyrus_consensus", - "crates/sequencing/papyrus_consensus_orchestrator", + "crates/shared_execution_objects", "crates/starknet_api", - "crates/starknet_batcher", - "crates/starknet_batcher_types", - "crates/starknet_class_manager_types", - "crates/starknet_client", "crates/starknet_committer", - "crates/starknet_consensus_manager", - "crates/starknet_gateway", - "crates/starknet_gateway_types", - "crates/starknet_http_server", - "crates/starknet_infra_utils", - "crates/starknet_integration_tests", - "crates/starknet_l1_provider", - "crates/starknet_l1_provider_types", - "crates/starknet_mempool", - "crates/starknet_mempool_p2p", - "crates/starknet_mempool_p2p_types", - "crates/starknet_mempool_types", - "crates/starknet_monitoring_endpoint", + "crates/starknet_committer_and_os_cli", + "crates/starknet_os", "crates/starknet_patricia", - "crates/starknet_sequencer_infra", - "crates/starknet_sequencer_node", - "crates/starknet_sierra_multicompile", - "crates/starknet_state_sync", - "crates/starknet_state_sync_types", - "crates/starknet_task_executor", + "crates/starknet_patricia_storage", + "toml_test_utils", "workspace_tests", ] [workspace.package] -version = "0.14.0-rc.3" +version = "0.15.0-rc.2" edition = "2021" repository = "https://github.com/starkware-libs/sequencer/" license = "Apache-2.0" license-file = "LICENSE" [workspace.dependencies] -alloy-contract = "0.3.5" -alloy-dyn-abi = "0.8.3" -alloy-json-rpc = "0.3.5" -alloy-primitives = "0.8.3" -alloy-provider = "0.3.5" -alloy-sol-types = "0.8.3" -alloy-transport = "0.3.5" -alloy-transport-http = "0.3.5" +alloy = "0.12" anyhow = "1.0.44" +apollo_batcher.path = "crates/apollo_batcher" +apollo_batcher_types.path = "crates/apollo_batcher_types" +apollo_central_sync.path = "crates/apollo_central_sync" +apollo_class_manager.path = "crates/apollo_class_manager" +apollo_class_manager_types.path = "crates/apollo_class_manager_types" +apollo_compilation_utils = { path = "crates/apollo_compilation_utils", version = "0.15.0-rc.2" } +apollo_compile_to_casm.path = "crates/apollo_compile_to_casm" +apollo_compile_to_casm_types.path = "crates/apollo_compile_to_casm_types" +apollo_compile_to_native = { path = "crates/apollo_compile_to_native", version = "0.15.0-rc.2" } +apollo_config = { path = "crates/apollo_config", version = "0.15.0-rc.2" } +apollo_consensus.path = "crates/apollo_consensus" +apollo_consensus_manager.path = "crates/apollo_consensus_manager" +apollo_consensus_orchestrator.path = "crates/apollo_consensus_orchestrator" +apollo_dashboard.path = "crates/apollo_dashboard" +apollo_deployments.path = "crates/apollo_deployments" +apollo_gateway.path = "crates/apollo_gateway" +apollo_gateway_types.path = "crates/apollo_gateway_types" +apollo_http_server.path = "crates/apollo_http_server" +apollo_infra.path = "crates/apollo_infra" +apollo_infra_utils = { path = "crates/apollo_infra_utils", version = "0.15.0-rc.2" } +apollo_integration_tests.path = "crates/apollo_integration_tests" +apollo_l1_endpoint_monitor.path = "crates/apollo_l1_endpoint_monitor" +apollo_l1_endpoint_monitor_types.path = "crates/apollo_l1_endpoint_monitor_types" +apollo_l1_gas_price.path = "crates/apollo_l1_gas_price" +apollo_l1_gas_price_types.path = "crates/apollo_l1_gas_price_types" +apollo_l1_provider.path = "crates/apollo_l1_provider" +apollo_l1_provider_types.path = "crates/apollo_l1_provider_types" +apollo_mempool.path = "crates/apollo_mempool" +apollo_mempool_p2p.path = "crates/apollo_mempool_p2p" +apollo_mempool_p2p_types.path = "crates/apollo_mempool_p2p_types" +apollo_mempool_types.path = "crates/apollo_mempool_types" +apollo_metrics = { path = "crates/apollo_metrics", version = "0.15.0-rc.2" } +apollo_monitoring_endpoint.path = "crates/apollo_monitoring_endpoint" +apollo_network.path = "crates/apollo_network" +apollo_network_types.path = "crates/apollo_network_types" +apollo_node.path = "crates/apollo_node" +apollo_p2p_sync.path = "crates/apollo_p2p_sync" +apollo_proc_macros = { path = "crates/apollo_proc_macros", version = "0.15.0-rc.2" } +apollo_proc_macros_tests.path = "crates/apollo_proc_macros_tests" +apollo_protobuf.path = "crates/apollo_protobuf" +apollo_reverts.path = "crates/apollo_reverts" +apollo_rpc.path = "crates/apollo_rpc" +apollo_rpc_execution.path = "crates/apollo_rpc_execution" +apollo_starknet_client.path = "crates/apollo_starknet_client" +apollo_starknet_os_program = { path = "crates/apollo_starknet_os_program", version = "0.15.0-rc.2" } +apollo_state_reader.path = "crates/apollo_state_reader" +apollo_state_sync.path = "crates/apollo_state_sync" +apollo_state_sync_metrics.path = "crates/apollo_state_sync_metrics" +apollo_state_sync_types.path = "crates/apollo_state_sync_types" +apollo_storage.path = "crates/apollo_storage" +apollo_task_executor.path = "crates/apollo_task_executor" +apollo_test_utils.path = "crates/apollo_test_utils" +apollo_time.path = "crates/apollo_time" +ark-bls12-381 = "0.4.0" ark-ec = "0.4.2" ark-ff = "0.4.0-alpha.7" +ark-poly = "0.4.0" ark-secp256k1 = "0.4.0" ark-secp256r1 = "0.4.0" assert-json-diff = "2.0.2" @@ -89,34 +159,43 @@ base64 = "0.13.0" bincode = "1.3.3" bisection = "0.1.0" bitvec = "1.0.1" -blockifier = { path = "crates/blockifier", version = "0.14.0-rc.3" } +blake2 = "0.10.6" +blake2s.path = "crates/blake2s" +blockifier = { path = "crates/blockifier", version = "0.15.0-rc.2" } +blockifier_reexecution.path = "crates/blockifier_reexecution" +blockifier_test_utils = { path = "crates/blockifier_test_utils", version = "0.15.0-rc.2" } byteorder = "1.4.3" bytes = "1" +c-kzg = "1.0.3" cached = "0.44.0" cairo-felt = "0.9.1" -cairo-lang-casm = "2.10.0" -cairo-lang-runner = "2.10.0" -cairo-lang-sierra = "=2.10.0" -cairo-lang-sierra-to-casm = "2.10.0" -cairo-lang-starknet-classes = "2.10.0" -cairo-lang-utils = "2.10.0" -cairo-native = "0.3.1" -cairo-vm = "=1.0.2" +cairo-lang-casm = "2.12.0-dev.1" +cairo-lang-runner = "2.12.0-dev.1" +cairo-lang-sierra = "2.12.0-dev.1" +cairo-lang-sierra-to-casm = "2.12.0-dev.1" +cairo-lang-starknet-classes = "2.12.0-dev.1" +cairo-lang-utils = "2.12.0-dev.1" +cairo-native = "0.6.0-rc.1" +sierra-emu = "0.6.0-rc.1" +cairo-vm = "2.2.0" camelpaste = "0.1.0" chrono = "0.4.26" clap = "4.5.4" -colored = "2.1.0" +colored = "3" const_format = "0.2.30" criterion = "0.5.1" +dashmap = "6.1.0" deadqueue = "0.2.4" defaultmap = "0.5.0" derive_more = "0.99.17" +digest = "0.10.7" enum-as-inner = "0.6.1" enum-assoc = "1.1.0" enum-iterator = "1.4.1" ethers = "2.0.3" ethers-core = "2.0.3" ethnum = "1.5.0" +expect-test = "1.5.1" flate2 = "1.0.24" fs2 = "0.4" futures = "0.3.21" @@ -131,6 +210,7 @@ http-body = "0.4.5" human_bytes = "0.4.3" hyper = "0.14" indexmap = "2.1.0" +indoc = "2.0.5" insta = "1.29.0" integer-encoding = "3.0.4" itertools = "0.12.1" @@ -144,43 +224,34 @@ libp2p-swarm-test = "0.3.0" log = "0.4" lru = "0.12.0" memmap2 = "0.8.0" -mempool_test_utils = { path = "crates/mempool_test_utils", version = "0.14.0-rc.3" } -metrics = "0.21.0" -metrics-exporter-prometheus = "0.12.1" +mempool_test_utils.path = "crates/mempool_test_utils" +metrics = "0.24.1" +metrics-exporter-prometheus = "0.16.1" metrics-process = "1.0.11" mockall = "0.12.1" mockito = "1.4.0" +native_blockifier.path = "crates/native_blockifier" nix = "0.20.0" num-bigint = "0.4" num-integer = "0.1.45" num-rational = "0.4" num-traits = "0.2.15" +num_enum = "0.7.3" once_cell = "1.19.0" os_info = "3.6.0" page_size = "0.6.0" -papyrus_base_layer = { path = "crates/papyrus_base_layer", version = "0.14.0-rc.3" } -papyrus_common = { path = "crates/papyrus_common", version = "0.14.0-rc.3" } -papyrus_config = { path = "crates/papyrus_config", version = "0.14.0-rc.3" } -papyrus_consensus = { path = "crates/sequencing/papyrus_consensus", version = "0.14.0-rc.3" } -papyrus_consensus_orchestrator = { path = "crates/sequencing/papyrus_consensus_orchestrator", version = "0.14.0-rc.3" } -papyrus_execution = { path = "crates/papyrus_execution", version = "0.14.0-rc.3" } -papyrus_monitoring_gateway = { path = "crates/papyrus_monitoring_gateway", version = "0.14.0-rc.3" } -papyrus_network = { path = "crates/papyrus_network", version = "0.14.0-rc.3" } -papyrus_network_types = { path = "crates/papyrus_network_types", version = "0.14.0-rc.3" } -papyrus_p2p_sync = { path = "crates/papyrus_p2p_sync", version = "0.14.0-rc.3" } -papyrus_proc_macros = { path = "crates/papyrus_proc_macros", version = "0.14.0-rc.3" } -papyrus_protobuf = { path = "crates/papyrus_protobuf", version = "0.14.0-rc.3" } -papyrus_rpc = { path = "crates/papyrus_rpc", version = "0.14.0-rc.3" } -papyrus_state_reader = { path = "crates/papyrus_state_reader", version = "0.14.0-rc.3" } -papyrus_storage = { path = "crates/papyrus_storage", version = "0.14.0-rc.3" } -papyrus_sync = { path = "crates/papyrus_sync", version = "0.14.0-rc.3" } -papyrus_test_utils = { path = "crates/papyrus_test_utils", version = "0.14.0-rc.3" } +papyrus_base_layer.path = "crates/papyrus_base_layer" +papyrus_common.path = "crates/papyrus_common" +papyrus_load_test.path = "crates/papyrus_load_test" +papyrus_monitoring_gateway.path = "crates/papyrus_monitoring_gateway" +papyrus_node.path = "crates/papyrus_node" parity-scale-codec = "3.6" parity-scale-codec-derive = "3.6" paste = "1.0.15" phf = "0.11" pretty_assertions = "1.4.0" primitive-types = "0.12.1" +proc-macro2 = "1.0" prometheus-parse = "0.2.4" prost = "0.12.1" prost-build = "0.12.1" @@ -205,38 +276,23 @@ semver = "1.0.23" serde = "1.0.197" serde_json = "1.0.116" serde_repr = "0.1.19" +serde_with = "3.12.0" serde_yaml = "0.9.16" sha2 = "0.10.8" sha3 = "0.10.8" +shared_execution_objects.path = "crates/shared_execution_objects" simple_logger = "4.0.0" -starknet-core = "0.6.0" +size-of = "0.1.5" +socket2 = "0.5.8" +starknet-core = "0.12.1" starknet-crypto = "0.7.1" -starknet-types-core = "0.1.6" -starknet_api = { path = "crates/starknet_api", version = "0.14.0-rc.3" } -starknet_batcher = { path = "crates/starknet_batcher", version = "0.14.0-rc.3" } -starknet_batcher_types = { path = "crates/starknet_batcher_types", version = "0.14.0-rc.3" } -starknet_class_manager_types = { path = "crates/starknet_class_manager_types", version = "0.14.0-rc.3" } -starknet_client = { path = "crates/starknet_client", version = "0.14.0-rc.3" } -starknet_committer = { path = "crates/starknet_committer", version = "0.14.0-rc.3" } -starknet_consensus_manager = { path = "crates/starknet_consensus_manager", version = "0.14.0-rc.3" } -starknet_gateway = { path = "crates/starknet_gateway", version = "0.14.0-rc.3" } -starknet_gateway_types = { path = "crates/starknet_gateway_types", version = "0.14.0-rc.3" } -starknet_http_server = { path = "crates/starknet_http_server", version = "0.14.0-rc.3" } -starknet_infra_utils = { path = "crates/starknet_infra_utils", version = "0.14.0-rc.3" } -starknet_l1_provider = { path = "crates/starknet_l1_provider", version = "0.14.0-rc.3" } -starknet_l1_provider_types = { path = "crates/starknet_l1_provider_types", version = "0.14.0-rc.3" } -starknet_mempool = { path = "crates/starknet_mempool", version = "0.14.0-rc.3" } -starknet_mempool_p2p = { path = "crates/starknet_mempool_p2p", version = "0.14.0-rc.3" } -starknet_mempool_p2p_types = { path = "crates/starknet_mempool_p2p_types", version = "0.14.0-rc.3" } -starknet_mempool_types = { path = "crates/starknet_mempool_types", version = "0.14.0-rc.3" } -starknet_monitoring_endpoint = { path = "crates/starknet_monitoring_endpoint", version = "0.14.0-rc.3" } -starknet_patricia = { path = "crates/starknet_patricia", version = "0.14.0-rc.3" } -starknet_sequencer_infra = { path = "crates/starknet_sequencer_infra", version = "0.14.0-rc.3" } -starknet_sequencer_node = { path = "crates/starknet_sequencer_node", version = "0.14.0-rc.3" } -starknet_sierra_multicompile = { path = "crates/starknet_sierra_multicompile", version = "0.14.0-rc.3" } -starknet_state_sync = { path = "crates/starknet_state_sync", version = "0.14.0-rc.3" } -starknet_state_sync_types = { path = "crates/starknet_state_sync_types", version = "0.14.0-rc.3" } -starknet_task_executor = { path = "crates/starknet_task_executor", version = "0.14.0-rc.3" } +starknet-types-core = "0.1.8" +starknet_api = { path = "crates/starknet_api", version = "0.15.0-rc.2" } +starknet_committer.path = "crates/starknet_committer" +starknet_committer_and_os_cli.path = "crates/starknet_committer_and_os_cli" +starknet_os.path = "crates/starknet_os" +starknet_patricia.path = "crates/starknet_patricia" +starknet_patricia_storage.path = "crates/starknet_patricia_storage" static_assertions = "1.1.0" statistical = "1.0.0" strum = "0.25.0" @@ -248,12 +304,14 @@ test-case = "3.2.1" test-log = "0.2.14" thiserror = "1.0.37" tikv-jemallocator = "0.5.4" +time = "0.3.37" tokio = "1.37.0" tokio-retry = "0.3" tokio-stream = "0.1.8" tokio-test = "0.4.4" tokio-util = "0.7.13" toml = "0.8" +toml_test_utils.path = "toml_test_utils" tower = "0.4.13" tracing = "0.1.37" tracing-subscriber = "0.3.16" @@ -262,6 +320,8 @@ unsigned-varint = "0.8.0" url = "2.5.0" validator = "0.12" void = "1.0.2" +waker-fn = "1.2.0" +workspace_tests.path = "workspace_tests" zstd = "0.13.1" # Note: both rust and clippy lints are warning by default and denied on the CI (see run_tests.py). diff --git a/Monitoring/papyrus/grafana.json b/Monitoring/papyrus/grafana.json index 529dee3a7de..e9d0ac65645 100644 --- a/Monitoring/papyrus/grafana.json +++ b/Monitoring/papyrus/grafana.json @@ -9278,4 +9278,4 @@ "uid": "Tv-UWle4k", "version": 60, "weekStart": "" -} +} \ No newline at end of file diff --git a/WORKSPACE b/WORKSPACE index e69de29bb2d..8b137891791 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -0,0 +1 @@ + diff --git a/build_native_in_docker.sh b/build_native_in_docker.sh index fdfbdf4f010..e603f5848ec 100755 --- a/build_native_in_docker.sh +++ b/build_native_in_docker.sh @@ -1,21 +1,21 @@ #!/bin/env bash set -e -docker_image_name=sequencer-ci +# Enables build-x for older versions of docker. +export DOCKER_BUILDKIT=1 -( - cd scripts - docker build . --build-arg USER_UID=$UID -t ${docker_image_name} --file ${docker_image_name}.Dockerfile -) +docker_image_name=sequencer-ci +dockerfile_path="docker-ci/images/${docker_image_name}.Dockerfile" +docker build . --build-arg USER_UID=$UID -t ${docker_image_name} --file ${dockerfile_path} docker run \ --rm \ --net host \ - -e CARGO_HOME=${HOME}/.cargo \ + -e CARGO_HOME="${HOME}/.cargo" \ -u $UID \ -v /tmp:/tmp \ -v "${HOME}:${HOME}" \ - --workdir ${PWD} \ + --workdir "${PWD}" \ ${docker_image_name} \ "$@" diff --git a/commitlint.config.js b/commitlint.config.js index 092aee14c51..e5337cff50a 100644 --- a/commitlint.config.js +++ b/commitlint.config.js @@ -1,14 +1,122 @@ +const AllowedScopes = ['apollo_batcher', + 'apollo_batcher_types', + 'apollo_central_sync', + 'apollo_class_manager', + 'apollo_class_manager_types', + 'apollo_compilation_utils', + 'apollo_compile_to_casm', + 'apollo_compile_to_casm_types', + 'apollo_compile_to_native', + 'apollo_config', + 'apollo_consensus', + 'apollo_consensus_manager', + 'apollo_consensus_orchestrator', + 'apollo_dashboard', + 'apollo_deployments', + 'apollo_gateway', + 'apollo_gateway_types', + 'apollo_http_server', + 'apollo_infra', + 'apollo_infra_utils', + 'apollo_integration_tests', + 'apollo_l1_endpoint_monitor', + 'apollo_l1_gas_price', + 'apollo_l1_gas_price_types', + 'apollo_l1_provider', + 'apollo_l1_provider_types', + 'apollo_mempool', + 'apollo_mempool_p2p', + 'apollo_mempool_p2p_types', + 'apollo_mempool_types', + 'apollo_metrics', + 'apollo_monitoring_endpoint', + 'apollo_network', + 'apollo_network_types', + 'apollo_node', + 'apollo_p2p_sync', + 'apollo_proc_macros', + 'apollo_protobuf', + 'apollo_reverts', + 'apollo_rpc', + 'apollo_rpc_execution', + 'apollo_starknet_client', + 'apollo_starknet_os_program', + 'apollo_state_reader', + 'apollo_state_sync', + 'apollo_state_sync_metrics', + 'apollo_state_sync_types', + 'apollo_storage', + 'apollo_task_executor', + 'apollo_test_utils', + 'apollo_time', + 'batcher', + 'blake2s', + 'blockifier', + 'blockifier_reexecution', + 'blockifier_test_utils', + 'cairo_native', + 'ci', + 'committer', + 'deployment', + 'infra', + 'l1', + 'mempool_test_utils', + 'native_blockifier', + 'papyrus_base_layer', + 'papyrus_common', + 'papyrus_load_test', + 'papyrus_monitoring_gateway', + 'papyrus_node', + 'release', + 'scripts', + 'shared_execution_objects', + 'starknet_api', + 'starknet_committer', + 'starknet_committer_and_os_cli', + 'starknet_os', + 'starknet_patricia', + 'starknet_patricia_storage', + 'time', + 'workspace_tests', +]; + const Configuration = { /* * Resolve and load @commitlint/config-conventional from node_modules. * Referenced packages must be installed */ - extends: ['@commitlint/config-conventional'], + //extends: ['@commitlint/config-conventional'], /* * Resolve and load conventional-changelog-atom from node_modules. * Referenced packages must be installed */ // parserPreset: 'conventional-changelog-atom', + parserPreset: { + parserOpts: { + // Match: "scope1[,scope2...]: subject" + headerPattern: /^([\w,]+): (.+)$/, + headerCorrespondence: ['scope', 'subject'], + }, + }, + + plugins: [ + { + rules: { + 'multi-scope-enum': ({ header }) => { + const match = header.match(/^([\w,]+):/); + if (!match) return [false, 'Cannot parse header']; + + const scopes = match[1].split(',').map((s) => s.trim()); + const invalid = scopes.filter((s) => !AllowedScopes.includes(s)); + + if (invalid.length > 0) { + return [false, `Invalid scope(s): ${invalid.join(', ')}`]; + } + return [true]; + }, + }, + }, + ], /* * Resolve and load @commitlint/format from node_modules. * Referenced package must be installed @@ -18,64 +126,12 @@ const Configuration = { * Any rules defined here will override rules from @commitlint/config-conventional */ rules: { - 'scope-enum': [2, 'always', [ - 'blockifier', - 'blockifier_reexecution', - 'cairo_native', - 'ci', - 'committer', - 'committer_cli', - 'consensus', - 'deployment', - 'helm', - 'mempool_test_utils', - 'native_blockifier', - 'papyrus_base_layer', - 'papyrus_common', - 'papyrus_config', - 'papyrus_execution', - 'papyrus_load_test', - 'papyrus_monitoring_gateway', - 'papyrus_network', - 'papyrus_network_types', - 'papyrus_node', - 'papyrus_p2p_sync', - 'papyrus_proc_macros', - 'papyrus_protobuf', - 'papyrus_rpc', - 'papyrus_state_reader', - 'papyrus_storage', - 'papyrus_sync', - 'papyrus_test_utils', - 'release', - 'sequencing', - 'starknet_api', - 'starknet_batcher', - 'starknet_batcher_types', - 'starknet_client', - 'starknet_committer', - 'starknet_consensus_manager', - 'starknet_class_manager_types', - 'starknet_gateway', - 'starknet_gateway_types', - 'starknet_http_server', - 'starknet_infra_utils', - 'starknet_integration_tests', - 'starknet_l1_provider', - 'starknet_l1_provider_types', - 'starknet_mempool', - 'starknet_mempool_p2p', - 'starknet_mempool_p2p_types', - 'starknet_mempool_types', - 'starknet_monitoring_endpoint', - 'starknet_patricia', - 'starknet_sequencer_infra', - 'starknet_sequencer_node', - 'starknet_sierra_multicompile', - 'starknet_state_sync', - 'starknet_state_sync_types', - 'starknet_task_executor', - ]], + 'scope-empty': [2, 'never'], + 'scope-enum': [0], // Disable builtin (we validate via plugin). + 'subject-empty': [2, 'never'], + 'multi-scope-enum': [2, 'always'], + 'type-empty': [0], // No type used. + 'type-enum': [0], 'header-max-length': [2, 'always', 100], }, /* diff --git a/config/papyrus/default_config.json b/config/papyrus/default_config.json index 228b5a4c832..5c3bfe70921 100644 --- a/config/papyrus/default_config.json +++ b/config/papyrus/default_config.json @@ -1,14 +1,24 @@ { "base_layer.node_url": { - "description": "A required param! Ethereum node URL. A schema to match to Infura node: https://mainnet.infura.io/v3/, but any other node can be used.", - "param_type": "String", - "privacy": "Private" + "description": "Initial ethereum node URL. A schema to match to Infura node: https://mainnet.infura.io/v3/, but any other node can be used. May be be replaced during runtime if becomes inoperative", + "privacy": "Private", + "value": "https://mainnet.infura.io/v3/%3Cyour_api_key%3E" + }, + "base_layer.prague_blob_gas_calc": { + "description": "If true use the blob gas calculcation from the Pectra upgrade. If false use the EIP 4844 calculation.", + "privacy": "Public", + "value": true }, "base_layer.starknet_contract_address": { "description": "Starknet contract address in ethereum.", "privacy": "Public", "value": "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4" }, + "base_layer.timeout_millis": { + "description": "The timeout (milliseconds) for a query of the L1 base layer", + "privacy": "Public", + "value": 1000 + }, "central.class_cache_size": { "description": "Size of class cache, must be a positive integer.", "privacy": "Public", @@ -60,9 +70,9 @@ "privacy": "Public" }, "chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "privacy": "TemporaryValue", - "value": "SN_MAIN" + "description": "A required param! The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", + "param_type": "String", + "privacy": "TemporaryValue" }, "collect_metrics": { "description": "If true, collect metrics for the node.", @@ -79,130 +89,130 @@ "privacy": "TemporaryValue", "value": true }, - "consensus.chain_id": { - "description": "The chain id of the Starknet chain.", - "pointer_target": "chain_id", - "privacy": "Public" + "consensus.future_height_limit": { + "description": "How many heights in the future should we cache.", + "privacy": "Public", + "value": 10 + }, + "consensus.future_height_round_limit": { + "description": "How many rounds should we cache for future heights.", + "privacy": "Public", + "value": 1 + }, + "consensus.future_round_limit": { + "description": "How many rounds in the future (for current height) should we cache.", + "privacy": "Public", + "value": 10 }, - "consensus.consensus_delay": { + "consensus.startup_delay": { "description": "Delay (seconds) before starting consensus to give time for network peering.", "privacy": "Public", "value": 5 }, - "consensus.network_config.advertised_multiaddr": { - "description": "The external address other peers see this node. If this is set, the node will not try to find out which addresses it has and will write this address as external instead", + "consensus.sync_retry_interval": { + "description": "The duration (seconds) between sync attempts.", "privacy": "Public", - "value": "" + "value": 1.0 }, - "consensus.network_config.advertised_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true + "consensus.timeouts.precommit_timeout": { + "description": "The timeout (seconds) for a precommit.", + "privacy": "Public", + "value": 1.0 }, - "consensus.network_config.bootstrap_peer_multiaddr": { - "description": "The multiaddress of the peer node. It should include the peer's id. For more info: https://docs.libp2p.io/concepts/fundamentals/peers/", + "consensus.timeouts.prevote_timeout": { + "description": "The timeout (seconds) for a prevote.", "privacy": "Public", - "value": "" + "value": 1.0 + }, + "consensus.timeouts.proposal_timeout": { + "description": "The timeout (seconds) for a proposal.", + "privacy": "Public", + "value": 3.0 + }, + "consensus.validator_id": { + "description": "The validator id of the node.", + "privacy": "Public", + "value": "0x64" }, - "consensus.network_config.bootstrap_peer_multiaddr.#is_none": { + "context.#is_none": { "description": "Flag for an optional field.", "privacy": "TemporaryValue", "value": true }, - "consensus.network_config.chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "consensus.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": { - "description": "The base delay in milliseconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 2 - }, - "consensus.network_config.discovery_config.bootstrap_dial_retry_config.factor": { - "description": "The factor for the exponential backoff strategy.", + "context.block_timestamp_window_seconds": { + "description": "Maximum allowed deviation (seconds) of a proposed block's timestamp from the current time.", "privacy": "Public", - "value": 5 + "value": 1 }, - "consensus.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": { - "description": "The maximum delay in seconds for the exponential backoff strategy.", + "context.build_proposal_margin_millis": { + "description": "Safety margin (in ms) to make sure that the batcher completes building the proposal with enough time for the Fin to be checked by validators.", "privacy": "Public", - "value": 5 + "value": 1000 }, - "consensus.network_config.discovery_config.heartbeat_interval": { - "description": "The interval between each discovery (Kademlia) query in milliseconds.", + "context.builder_address": { + "description": "The address of the contract that builds the block.", "privacy": "Public", - "value": 100 + "value": "0x0" }, - "consensus.network_config.idle_connection_timeout": { - "description": "Amount of time in seconds that a connection with no active sessions will stay alive.", - "privacy": "Public", - "value": 120 + "context.chain_id": { + "description": "The chain id of the Starknet chain.", + "pointer_target": "chain_id", + "privacy": "Public" }, - "consensus.network_config.peer_manager_config.malicious_timeout_seconds": { - "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", + "context.constant_l2_gas_price": { + "description": "If true, sets STRK gas price to its minimum price from the versioned constants.", "privacy": "Public", - "value": 31536000 + "value": false }, - "consensus.network_config.peer_manager_config.unstable_timeout_millis": { - "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", + "context.l1_da_mode": { + "description": "The data availability mode, true: Blob, false: Calldata.", "privacy": "Public", - "value": 1000 + "value": true }, - "consensus.network_config.quic_port": { - "description": "The port that the node listens on for incoming quic connections.", + "context.l1_data_gas_price_multiplier_ppt": { + "description": "Part per thousand of multiplicative factor to apply to the data gas price, to enable fine-tuning of the price charged to end users.", "privacy": "Public", - "value": 10101 + "value": 135 }, - "consensus.network_config.secret_key": { - "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", - "privacy": "Private", - "value": "" - }, - "consensus.network_config.session_timeout": { - "description": "Maximal time in seconds that each session can take before failing on timeout.", + "context.l1_gas_tip_wei": { + "description": "This additional gas is added to the L1 gas price.", "privacy": "Public", - "value": 120 + "value": 1000000000 }, - "consensus.network_config.tcp_port": { - "description": "The port that the node listens on for incoming tcp connections.", + "context.max_l1_data_gas_price_wei": { + "description": "The maximum L1 data gas price in wei.", "privacy": "Public", - "value": 10100 + "value": 1000000000000000000 }, - "consensus.network_topic": { - "description": "The network topic of the consensus.", + "context.max_l1_gas_price_wei": { + "description": "The maximum L1 gas price in wei.", "privacy": "Public", - "value": "consensus" + "value": 200000000000 }, - "consensus.num_validators": { - "description": "The number of validators in the consensus.", + "context.min_l1_data_gas_price_wei": { + "description": "The minimum L1 data gas price in wei.", "privacy": "Public", "value": 1 }, - "consensus.start_height": { - "description": "The height to start the consensus from.", + "context.min_l1_gas_price_wei": { + "description": "The minimum L1 gas price in wei.", "privacy": "Public", - "value": 0 + "value": 1000000000 }, - "consensus.timeouts.precommit_timeout": { - "description": "The timeout (seconds) for a precommit.", + "context.num_validators": { + "description": "The number of validators.", "privacy": "Public", - "value": 1.0 - }, - "consensus.timeouts.prevote_timeout": { - "description": "The timeout (seconds) for a prevote.", - "privacy": "Public", - "value": 1.0 + "value": 1 }, - "consensus.timeouts.proposal_timeout": { - "description": "The timeout (seconds) for a proposal.", + "context.proposal_buffer_size": { + "description": "The buffer size for streaming outbound proposals.", "privacy": "Public", - "value": 3.0 + "value": 100 }, - "consensus.validator_id": { - "description": "The validator id of the node.", + "context.validate_proposal_margin_millis": { + "description": "Safety margin (in ms) to make sure that consensus determines when to timeout validating a proposal.", "privacy": "Public", - "value": "0x64" + "value": 10000 }, "monitoring_gateway.collect_metrics": { "description": "If true, collect and return metrics in the monitoring gateway.", @@ -254,6 +264,11 @@ "privacy": "TemporaryValue", "value": true }, + "network.broadcasted_message_metadata_buffer_size": { + "description": "The size of the buffer that holds the metadata of the broadcasted messages.", + "privacy": "Public", + "value": 100000 + }, "network.chain_id": { "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", "pointer_target": "chain_id", @@ -287,17 +302,22 @@ "network.peer_manager_config.malicious_timeout_seconds": { "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", "privacy": "Public", - "value": 31536000 + "value": 1 }, "network.peer_manager_config.unstable_timeout_millis": { "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", "privacy": "Public", "value": 1000 }, - "network.quic_port": { - "description": "The port that the node listens on for incoming quic connections.", + "network.port": { + "description": "The port that the node listens on for incoming tcp connections.", "privacy": "Public", - "value": 10001 + "value": 10000 + }, + "network.reported_peer_ids_buffer_size": { + "description": "The size of the buffer that holds the reported peer ids.", + "privacy": "Public", + "value": 100000 }, "network.secret_key": { "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", @@ -309,11 +329,6 @@ "privacy": "Public", "value": 120 }, - "network.tcp_port": { - "description": "The port that the node listens on for incoming tcp connections.", - "privacy": "Public", - "value": 10000 - }, "p2p_sync.#is_none": { "description": "Flag for an optional field.", "privacy": "TemporaryValue", @@ -349,6 +364,26 @@ "privacy": "Public", "value": 50 }, + "p2p_sync.wait_period_for_other_protocol": { + "description": "Time in millisseconds to wait for a dependency protocol to advance (e.g.state diff sync depends on header sync)", + "privacy": "Public", + "value": 50 + }, + "rpc.apollo_gateway_retry_config.max_retries": { + "description": "For communicating with Starknet gateway, maximum number of retries before the node stops retrying.", + "privacy": "Public", + "value": 5 + }, + "rpc.apollo_gateway_retry_config.retry_base_millis": { + "description": "For communicating with Starknet gateway, base waiting time after a failed request. After that, the time increases exponentially.", + "privacy": "Public", + "value": 50 + }, + "rpc.apollo_gateway_retry_config.retry_max_delay_millis": { + "description": "For communicating with Starknet gateway, max waiting time after a failed request.", + "privacy": "Public", + "value": 1000 + }, "rpc.chain_id": { "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", "pointer_target": "chain_id", @@ -374,6 +409,11 @@ "privacy": "Public", "value": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" }, + "rpc.ip": { + "description": "The JSON RPC server ip.", + "privacy": "Public", + "value": "0.0.0.0" + }, "rpc.max_events_chunk_size": { "description": "Maximum chunk size supported by the node in get_events requests.", "privacy": "Public", @@ -384,25 +424,10 @@ "privacy": "Public", "value": 100 }, - "rpc.server_address": { - "description": "IP:PORT of the node`s JSON-RPC server.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "rpc.starknet_gateway_retry_config.max_retries": { - "description": "For communicating with Starknet gateway, maximum number of retries before the node stops retrying.", - "privacy": "Public", - "value": 5 - }, - "rpc.starknet_gateway_retry_config.retry_base_millis": { - "description": "For communicating with Starknet gateway, base waiting time after a failed request. After that, the time increases exponentially.", + "rpc.port": { + "description": "The JSON RPC server port.", "privacy": "Public", - "value": 50 - }, - "rpc.starknet_gateway_retry_config.retry_max_delay_millis": { - "description": "For communicating with Starknet gateway, max waiting time after a failed request.", - "privacy": "Public", - "value": 1000 + "value": 8090 }, "rpc.starknet_url": { "description": "URL for communicating with Starknet in write_api methods.", @@ -499,6 +524,11 @@ "privacy": "Public", "value": 1000 }, + "sync.store_sierras_and_casms": { + "description": "Whether to store sierras and casms to the storage. This allows maintaining backward-compatibility with native-blockifier", + "privacy": "Public", + "value": true + }, "sync.verify_blocks": { "description": "Whether to verify incoming blocks.", "privacy": "Public", diff --git a/config/sequencer/default_config.json b/config/sequencer/default_config.json deleted file mode 100644 index 7d739be9b78..00000000000 --- a/config/sequencer/default_config.json +++ /dev/null @@ -1,1197 +0,0 @@ -{ - "batcher_config.block_builder_config.bouncer_config.block_max_capacity.l1_gas": { - "description": "An upper bound on the total l1_gas used in a block.", - "privacy": "Public", - "value": 2500000 - }, - "batcher_config.block_builder_config.bouncer_config.block_max_capacity.message_segment_length": { - "description": "An upper bound on the message segment length in a block.", - "privacy": "Public", - "value": 3700 - }, - "batcher_config.block_builder_config.bouncer_config.block_max_capacity.n_events": { - "description": "An upper bound on the total number of events generated in a block.", - "privacy": "Public", - "value": 5000 - }, - "batcher_config.block_builder_config.bouncer_config.block_max_capacity.sierra_gas": { - "description": "An upper bound on the total sierra_gas used in a block.", - "privacy": "Public", - "value": 400000000 - }, - "batcher_config.block_builder_config.bouncer_config.block_max_capacity.state_diff_size": { - "description": "An upper bound on the total state diff size in a block.", - "privacy": "Public", - "value": 4000 - }, - "batcher_config.block_builder_config.chain_info.chain_id": { - "description": "The chain ID of the StarkNet chain.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "batcher_config.block_builder_config.chain_info.fee_token_addresses.eth_fee_token_address": { - "description": "Address of the ETH fee token.", - "pointer_target": "eth_fee_token_address", - "privacy": "Public" - }, - "batcher_config.block_builder_config.chain_info.fee_token_addresses.strk_fee_token_address": { - "description": "Address of the STRK fee token.", - "pointer_target": "strk_fee_token_address", - "privacy": "Public" - }, - "batcher_config.block_builder_config.execute_config.concurrency_config.chunk_size": { - "description": "The size of the transaction chunk executed in parallel.", - "privacy": "Public", - "value": 0 - }, - "batcher_config.block_builder_config.execute_config.concurrency_config.enabled": { - "description": "Enables concurrency of transaction execution.", - "privacy": "Public", - "value": false - }, - "batcher_config.block_builder_config.execute_config.concurrency_config.n_workers": { - "description": "Number of parallel transaction execution workers.", - "privacy": "Public", - "value": 0 - }, - "batcher_config.block_builder_config.execute_config.stack_size": { - "description": "The thread stack size (proportional to the maximal gas of a transaction).", - "privacy": "Public", - "value": 62914560 - }, - "batcher_config.block_builder_config.tx_chunk_size": { - "description": "The size of the transaction chunk.", - "privacy": "Public", - "value": 100 - }, - "batcher_config.block_builder_config.versioned_constants_overrides.invoke_tx_max_n_steps": { - "description": "Maximum number of steps the invoke function is allowed to run.", - "pointer_target": "versioned_constants_overrides.invoke_tx_max_n_steps", - "privacy": "Public" - }, - "batcher_config.block_builder_config.versioned_constants_overrides.max_n_events": { - "description": "Maximum number of events that can be emitted from the transation.", - "pointer_target": "versioned_constants_overrides.max_n_events", - "privacy": "Public" - }, - "batcher_config.block_builder_config.versioned_constants_overrides.max_recursion_depth": { - "description": "Maximum recursion depth for nested calls during blockifier validation.", - "pointer_target": "versioned_constants_overrides.max_recursion_depth", - "privacy": "Public" - }, - "batcher_config.block_builder_config.versioned_constants_overrides.validate_max_n_steps": { - "description": "Maximum number of steps the validation function is allowed to run.", - "pointer_target": "versioned_constants_overrides.validate_max_n_steps", - "privacy": "Public" - }, - "batcher_config.contract_class_manager_config.cairo_native_run_config.channel_size": { - "description": "The size of the compilation request channel.", - "privacy": "Public", - "value": 2000 - }, - "batcher_config.contract_class_manager_config.cairo_native_run_config.native_classes_whitelist": { - "description": "Contracts for Cairo Specifies whether to execute all class hashes or only a limited selection using Cairo native contracts. If limited, a specific list of class hashes is provided. compilation.", - "privacy": "Public", - "value": "All" - }, - "batcher_config.contract_class_manager_config.cairo_native_run_config.run_cairo_native": { - "description": "Enables Cairo native execution.", - "privacy": "Public", - "value": false - }, - "batcher_config.contract_class_manager_config.cairo_native_run_config.wait_on_native_compilation": { - "description": "Block Sequencer main program while compiling sierra, for testing.", - "privacy": "Public", - "value": false - }, - "batcher_config.contract_class_manager_config.contract_cache_size": { - "description": "The size of the global contract cache.", - "privacy": "Public", - "value": 600 - }, - "batcher_config.contract_class_manager_config.native_compiler_config.max_casm_bytecode_size": { - "description": "Limitation of compiled casm bytecode size.", - "privacy": "Public", - "value": 81920 - }, - "batcher_config.contract_class_manager_config.native_compiler_config.max_cpu_time": { - "description": "Limitation of compilation cpu time (seconds).", - "privacy": "Public", - "value": 20 - }, - "batcher_config.contract_class_manager_config.native_compiler_config.max_memory_usage": { - "description": "Limitation of compilation process's virtual memory (bytes).", - "privacy": "Public", - "value": 5368709120 - }, - "batcher_config.contract_class_manager_config.native_compiler_config.max_native_bytecode_size": { - "description": "Limitation of compiled native bytecode size.", - "privacy": "Public", - "value": 15728640 - }, - "batcher_config.contract_class_manager_config.native_compiler_config.optimization_level": { - "description": "The level of optimization to apply during compilation.", - "privacy": "Public", - "value": 2 - }, - "batcher_config.contract_class_manager_config.native_compiler_config.panic_on_compilation_failure": { - "description": "Whether to panic on compilation failure.", - "privacy": "Public", - "value": false - }, - "batcher_config.contract_class_manager_config.native_compiler_config.sierra_to_native_compiler_path": { - "description": "The path to the Sierra-to-Native compiler binary.", - "privacy": "Public", - "value": "" - }, - "batcher_config.contract_class_manager_config.native_compiler_config.sierra_to_native_compiler_path.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "batcher_config.input_stream_content_buffer_size": { - "description": "Sets the buffer size for the input transaction channel. Adding more transactions beyond this limit will block until space is available.", - "privacy": "Public", - "value": 400 - }, - "batcher_config.max_l1_handler_txs_per_block_proposal": { - "description": "The maximum number of L1 handler transactions to include in a block proposal.", - "privacy": "Public", - "value": 3 - }, - "batcher_config.outstream_content_buffer_size": { - "description": "The maximum number of items to include in a single get_proposal_content response.", - "privacy": "Public", - "value": 100 - }, - "batcher_config.storage.db_config.chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "batcher_config.storage.db_config.enforce_file_exists": { - "description": "Whether to enforce that the path exists. If true, `open_env` fails when the mdbx.dat file does not exist.", - "privacy": "Public", - "value": true - }, - "batcher_config.storage.db_config.growth_step": { - "description": "The growth step in bytes, must be greater than zero to allow the database to grow.", - "privacy": "Public", - "value": 4294967296 - }, - "batcher_config.storage.db_config.max_size": { - "description": "The maximum size of the node's storage in bytes.", - "privacy": "Public", - "value": 1099511627776 - }, - "batcher_config.storage.db_config.min_size": { - "description": "The minimum size of the node's storage in bytes.", - "privacy": "Public", - "value": 1048576 - }, - "batcher_config.storage.db_config.path_prefix": { - "description": "Prefix of the path of the node's storage directory, the storage file path will be /. The path is not created automatically.", - "privacy": "Public", - "value": "." - }, - "batcher_config.storage.mmap_file_config.growth_step": { - "description": "The growth step in bytes, must be greater than max_object_size.", - "privacy": "Public", - "value": 1073741824 - }, - "batcher_config.storage.mmap_file_config.max_object_size": { - "description": "The maximum size of a single object in the file in bytes", - "privacy": "Public", - "value": 268435456 - }, - "batcher_config.storage.mmap_file_config.max_size": { - "description": "The maximum size of a memory mapped file in bytes. Must be greater than growth_step.", - "privacy": "Public", - "value": 1099511627776 - }, - "batcher_config.storage.scope": { - "description": "The categories of data saved in storage.", - "privacy": "Public", - "value": "StateOnly" - }, - "chain_id": { - "description": "A required param! The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "param_type": "String", - "privacy": "TemporaryValue" - }, - "compiler_config.max_casm_bytecode_size": { - "description": "Limitation of compiled casm bytecode size.", - "privacy": "Public", - "value": 81920 - }, - "compiler_config.max_cpu_time": { - "description": "Limitation of compilation cpu time (seconds).", - "privacy": "Public", - "value": 20 - }, - "compiler_config.max_memory_usage": { - "description": "Limitation of compilation process's virtual memory (bytes).", - "privacy": "Public", - "value": 5368709120 - }, - "compiler_config.max_native_bytecode_size": { - "description": "Limitation of compiled native bytecode size.", - "privacy": "Public", - "value": 15728640 - }, - "compiler_config.optimization_level": { - "description": "The level of optimization to apply during compilation.", - "privacy": "Public", - "value": 2 - }, - "compiler_config.panic_on_compilation_failure": { - "description": "Whether to panic on compilation failure.", - "privacy": "Public", - "value": false - }, - "compiler_config.sierra_to_native_compiler_path": { - "description": "The path to the Sierra-to-Native compiler binary.", - "privacy": "Public", - "value": "" - }, - "compiler_config.sierra_to_native_compiler_path.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.batcher.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "LocalExecutionWithRemoteDisabled" - }, - "components.batcher.local_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": false - }, - "components.batcher.local_server_config.channel_buffer_size": { - "description": "The communication channel buffer size.", - "privacy": "Public", - "value": 32 - }, - "components.batcher.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.batcher.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.batcher.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.batcher.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.batcher.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.batcher.remote_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.batcher.remote_server_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.consensus_manager.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "Enabled" - }, - "components.consensus_manager.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.consensus_manager.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.consensus_manager.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.consensus_manager.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.consensus_manager.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.gateway.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "LocalExecutionWithRemoteDisabled" - }, - "components.gateway.local_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": false - }, - "components.gateway.local_server_config.channel_buffer_size": { - "description": "The communication channel buffer size.", - "privacy": "Public", - "value": 32 - }, - "components.gateway.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.gateway.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.gateway.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.gateway.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.gateway.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.gateway.remote_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.gateway.remote_server_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.http_server.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "Enabled" - }, - "components.http_server.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.http_server.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.http_server.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.http_server.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.http_server.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.l1_provider.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "LocalExecutionWithRemoteDisabled" - }, - "components.l1_provider.local_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": false - }, - "components.l1_provider.local_server_config.channel_buffer_size": { - "description": "The communication channel buffer size.", - "privacy": "Public", - "value": 32 - }, - "components.l1_provider.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.l1_provider.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.l1_provider.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.l1_provider.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.l1_provider.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.l1_provider.remote_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.l1_provider.remote_server_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.mempool.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "LocalExecutionWithRemoteDisabled" - }, - "components.mempool.local_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": false - }, - "components.mempool.local_server_config.channel_buffer_size": { - "description": "The communication channel buffer size.", - "privacy": "Public", - "value": 32 - }, - "components.mempool.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.mempool.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.mempool.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.mempool.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.mempool.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.mempool.remote_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.mempool.remote_server_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.mempool_p2p.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "LocalExecutionWithRemoteDisabled" - }, - "components.mempool_p2p.local_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": false - }, - "components.mempool_p2p.local_server_config.channel_buffer_size": { - "description": "The communication channel buffer size.", - "privacy": "Public", - "value": 32 - }, - "components.mempool_p2p.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.mempool_p2p.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.mempool_p2p.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.mempool_p2p.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.mempool_p2p.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.mempool_p2p.remote_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.mempool_p2p.remote_server_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.monitoring_endpoint.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "Enabled" - }, - "components.monitoring_endpoint.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.monitoring_endpoint.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.monitoring_endpoint.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.monitoring_endpoint.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.monitoring_endpoint.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.state_sync.execution_mode": { - "description": "The component execution mode.", - "privacy": "Public", - "value": "LocalExecutionWithRemoteDisabled" - }, - "components.state_sync.local_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": false - }, - "components.state_sync.local_server_config.channel_buffer_size": { - "description": "The communication channel buffer size.", - "privacy": "Public", - "value": 32 - }, - "components.state_sync.remote_client_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.state_sync.remote_client_config.idle_connections": { - "description": "The maximum number of idle connections to keep alive.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "components.state_sync.remote_client_config.idle_timeout": { - "description": "The duration in seconds to keep an idle connection open before closing.", - "privacy": "Public", - "value": 90 - }, - "components.state_sync.remote_client_config.retries": { - "description": "The max number of retries for sending a message.", - "privacy": "Public", - "value": 3 - }, - "components.state_sync.remote_client_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "components.state_sync.remote_server_config.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "components.state_sync.remote_server_config.socket": { - "description": "The remote component server socket.", - "privacy": "Public", - "value": "0.0.0.0:8080" - }, - "consensus_manager_config.consensus_config.chain_id": { - "description": "The chain id of the Starknet chain.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "consensus_manager_config.consensus_config.consensus_delay": { - "description": "Delay (seconds) before starting consensus to give time for network peering.", - "privacy": "Public", - "value": 5 - }, - "consensus_manager_config.consensus_config.network_config.advertised_multiaddr": { - "description": "The external address other peers see this node. If this is set, the node will not try to find out which addresses it has and will write this address as external instead", - "privacy": "Public", - "value": "" - }, - "consensus_manager_config.consensus_config.network_config.advertised_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "consensus_manager_config.consensus_config.network_config.bootstrap_peer_multiaddr": { - "description": "The multiaddress of the peer node. It should include the peer's id. For more info: https://docs.libp2p.io/concepts/fundamentals/peers/", - "privacy": "Public", - "value": "" - }, - "consensus_manager_config.consensus_config.network_config.bootstrap_peer_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "consensus_manager_config.consensus_config.network_config.chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "consensus_manager_config.consensus_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": { - "description": "The base delay in milliseconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 2 - }, - "consensus_manager_config.consensus_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": { - "description": "The factor for the exponential backoff strategy.", - "privacy": "Public", - "value": 5 - }, - "consensus_manager_config.consensus_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": { - "description": "The maximum delay in seconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 5 - }, - "consensus_manager_config.consensus_config.network_config.discovery_config.heartbeat_interval": { - "description": "The interval between each discovery (Kademlia) query in milliseconds.", - "privacy": "Public", - "value": 100 - }, - "consensus_manager_config.consensus_config.network_config.idle_connection_timeout": { - "description": "Amount of time in seconds that a connection with no active sessions will stay alive.", - "privacy": "Public", - "value": 120 - }, - "consensus_manager_config.consensus_config.network_config.peer_manager_config.malicious_timeout_seconds": { - "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", - "privacy": "Public", - "value": 31536000 - }, - "consensus_manager_config.consensus_config.network_config.peer_manager_config.unstable_timeout_millis": { - "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", - "privacy": "Public", - "value": 1000 - }, - "consensus_manager_config.consensus_config.network_config.quic_port": { - "description": "The port that the node listens on for incoming quic connections.", - "privacy": "Public", - "value": 10101 - }, - "consensus_manager_config.consensus_config.network_config.secret_key": { - "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", - "privacy": "Private", - "value": "" - }, - "consensus_manager_config.consensus_config.network_config.session_timeout": { - "description": "Maximal time in seconds that each session can take before failing on timeout.", - "privacy": "Public", - "value": 120 - }, - "consensus_manager_config.consensus_config.network_config.tcp_port": { - "description": "The port that the node listens on for incoming tcp connections.", - "privacy": "Public", - "value": 10100 - }, - "consensus_manager_config.consensus_config.network_topic": { - "description": "The network topic of the consensus.", - "privacy": "Public", - "value": "consensus" - }, - "consensus_manager_config.consensus_config.num_validators": { - "description": "The number of validators in the consensus.", - "privacy": "Public", - "value": 1 - }, - "consensus_manager_config.consensus_config.start_height": { - "description": "The height to start the consensus from.", - "privacy": "Public", - "value": 0 - }, - "consensus_manager_config.consensus_config.timeouts.precommit_timeout": { - "description": "The timeout (seconds) for a precommit.", - "privacy": "Public", - "value": 1.0 - }, - "consensus_manager_config.consensus_config.timeouts.prevote_timeout": { - "description": "The timeout (seconds) for a prevote.", - "privacy": "Public", - "value": 1.0 - }, - "consensus_manager_config.consensus_config.timeouts.proposal_timeout": { - "description": "The timeout (seconds) for a proposal.", - "privacy": "Public", - "value": 3.0 - }, - "consensus_manager_config.consensus_config.validator_id": { - "description": "The validator id of the node.", - "pointer_target": "validator_id", - "privacy": "Public" - }, - "eth_fee_token_address": { - "description": "A required param! Address of the ETH fee token.", - "param_type": "String", - "privacy": "TemporaryValue" - }, - "gateway_config.chain_info.chain_id": { - "description": "The chain ID of the StarkNet chain.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "gateway_config.chain_info.fee_token_addresses.eth_fee_token_address": { - "description": "Address of the ETH fee token.", - "pointer_target": "eth_fee_token_address", - "privacy": "Public" - }, - "gateway_config.chain_info.fee_token_addresses.strk_fee_token_address": { - "description": "Address of the STRK fee token.", - "pointer_target": "strk_fee_token_address", - "privacy": "Public" - }, - "gateway_config.stateful_tx_validator_config.max_nonce_for_validation_skip": { - "description": "Maximum nonce for which the validation is skipped.", - "privacy": "Public", - "value": "0x1" - }, - "gateway_config.stateful_tx_validator_config.versioned_constants_overrides.invoke_tx_max_n_steps": { - "description": "Maximum number of steps the invoke function is allowed to run.", - "pointer_target": "versioned_constants_overrides.invoke_tx_max_n_steps", - "privacy": "Public" - }, - "gateway_config.stateful_tx_validator_config.versioned_constants_overrides.max_n_events": { - "description": "Maximum number of events that can be emitted from the transation.", - "pointer_target": "versioned_constants_overrides.max_n_events", - "privacy": "Public" - }, - "gateway_config.stateful_tx_validator_config.versioned_constants_overrides.max_recursion_depth": { - "description": "Maximum recursion depth for nested calls during blockifier validation.", - "pointer_target": "versioned_constants_overrides.max_recursion_depth", - "privacy": "Public" - }, - "gateway_config.stateful_tx_validator_config.versioned_constants_overrides.validate_max_n_steps": { - "description": "Maximum number of steps the validation function is allowed to run.", - "pointer_target": "versioned_constants_overrides.validate_max_n_steps", - "privacy": "Public" - }, - "gateway_config.stateless_tx_validator_config.max_calldata_length": { - "description": "Limitation of calldata length.", - "privacy": "Public", - "value": 4000 - }, - "gateway_config.stateless_tx_validator_config.max_contract_class_object_size": { - "description": "Limitation of contract class object size.", - "privacy": "Public", - "value": 4089446 - }, - "gateway_config.stateless_tx_validator_config.max_sierra_version.major": { - "description": "The major version of the configuration.", - "privacy": "Public", - "value": 1 - }, - "gateway_config.stateless_tx_validator_config.max_sierra_version.minor": { - "description": "The minor version of the configuration.", - "privacy": "Public", - "value": 5 - }, - "gateway_config.stateless_tx_validator_config.max_sierra_version.patch": { - "description": "The patch version of the configuration.", - "privacy": "Public", - "value": 18446744073709551615 - }, - "gateway_config.stateless_tx_validator_config.max_signature_length": { - "description": "Limitation of signature length.", - "privacy": "Public", - "value": 4000 - }, - "gateway_config.stateless_tx_validator_config.min_sierra_version.major": { - "description": "The major version of the configuration.", - "privacy": "Public", - "value": 1 - }, - "gateway_config.stateless_tx_validator_config.min_sierra_version.minor": { - "description": "The minor version of the configuration.", - "privacy": "Public", - "value": 1 - }, - "gateway_config.stateless_tx_validator_config.min_sierra_version.patch": { - "description": "The patch version of the configuration.", - "privacy": "Public", - "value": 0 - }, - "gateway_config.stateless_tx_validator_config.validate_non_zero_l1_data_gas_fee": { - "description": "If true, validates that a transaction has non-zero L1 Data (Blob) resource bounds.", - "privacy": "Public", - "value": false - }, - "gateway_config.stateless_tx_validator_config.validate_non_zero_l1_gas_fee": { - "description": "If true, validates that a transaction has non-zero L1 resource bounds.", - "privacy": "Public", - "value": true - }, - "gateway_config.stateless_tx_validator_config.validate_non_zero_l2_gas_fee": { - "description": "If true, validates that a transaction has non-zero L2 resource bounds.", - "privacy": "Public", - "value": false - }, - "http_server_config.ip": { - "description": "The http server ip.", - "privacy": "Public", - "value": "0.0.0.0" - }, - "http_server_config.port": { - "description": "The http server port.", - "privacy": "Public", - "value": 8080 - }, - "l1_provider_config._poll_interval": { - "description": "Interval in milliseconds between each scraping attempt of L1.", - "privacy": "Public", - "value": 100 - }, - "mempool_p2p_config.network_buffer_size": { - "description": "Network buffer size.", - "privacy": "Public", - "value": 10000 - }, - "mempool_p2p_config.network_config.advertised_multiaddr": { - "description": "The external address other peers see this node. If this is set, the node will not try to find out which addresses it has and will write this address as external instead", - "privacy": "Public", - "value": "" - }, - "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": { - "description": "The multiaddress of the peer node. It should include the peer's id. For more info: https://docs.libp2p.io/concepts/fundamentals/peers/", - "privacy": "Public", - "value": "" - }, - "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "mempool_p2p_config.network_config.chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "mempool_p2p_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": { - "description": "The base delay in milliseconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 2 - }, - "mempool_p2p_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": { - "description": "The factor for the exponential backoff strategy.", - "privacy": "Public", - "value": 5 - }, - "mempool_p2p_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": { - "description": "The maximum delay in seconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 5 - }, - "mempool_p2p_config.network_config.discovery_config.heartbeat_interval": { - "description": "The interval between each discovery (Kademlia) query in milliseconds.", - "privacy": "Public", - "value": 100 - }, - "mempool_p2p_config.network_config.idle_connection_timeout": { - "description": "Amount of time in seconds that a connection with no active sessions will stay alive.", - "privacy": "Public", - "value": 120 - }, - "mempool_p2p_config.network_config.peer_manager_config.malicious_timeout_seconds": { - "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", - "privacy": "Public", - "value": 31536000 - }, - "mempool_p2p_config.network_config.peer_manager_config.unstable_timeout_millis": { - "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", - "privacy": "Public", - "value": 1000 - }, - "mempool_p2p_config.network_config.quic_port": { - "description": "The port that the node listens on for incoming quic connections.", - "privacy": "Public", - "value": 10001 - }, - "mempool_p2p_config.network_config.secret_key": { - "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", - "privacy": "Private", - "value": "" - }, - "mempool_p2p_config.network_config.session_timeout": { - "description": "Maximal time in seconds that each session can take before failing on timeout.", - "privacy": "Public", - "value": 120 - }, - "mempool_p2p_config.network_config.tcp_port": { - "description": "The port that the node listens on for incoming tcp connections.", - "privacy": "Public", - "value": 10000 - }, - "monitoring_endpoint_config.collect_metrics": { - "description": "If true, collect and return metrics in the monitoring endpoint.", - "privacy": "Public", - "value": false - }, - "monitoring_endpoint_config.ip": { - "description": "The monitoring endpoint ip address.", - "privacy": "Public", - "value": "0.0.0.0" - }, - "monitoring_endpoint_config.port": { - "description": "The monitoring endpoint port.", - "privacy": "Public", - "value": 8082 - }, - "rpc_state_reader_config.json_rpc_version": { - "description": "The json rpc version.", - "privacy": "Public", - "value": "2.0" - }, - "rpc_state_reader_config.url": { - "description": "The url of the rpc server.", - "privacy": "Public", - "value": "" - }, - "state_sync_config.network_config.advertised_multiaddr": { - "description": "The external address other peers see this node. If this is set, the node will not try to find out which addresses it has and will write this address as external instead", - "privacy": "Public", - "value": "" - }, - "state_sync_config.network_config.advertised_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "state_sync_config.network_config.bootstrap_peer_multiaddr": { - "description": "The multiaddress of the peer node. It should include the peer's id. For more info: https://docs.libp2p.io/concepts/fundamentals/peers/", - "privacy": "Public", - "value": "" - }, - "state_sync_config.network_config.bootstrap_peer_multiaddr.#is_none": { - "description": "Flag for an optional field.", - "privacy": "TemporaryValue", - "value": true - }, - "state_sync_config.network_config.chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": { - "description": "The base delay in milliseconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 2 - }, - "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": { - "description": "The factor for the exponential backoff strategy.", - "privacy": "Public", - "value": 5 - }, - "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": { - "description": "The maximum delay in seconds for the exponential backoff strategy.", - "privacy": "Public", - "value": 5 - }, - "state_sync_config.network_config.discovery_config.heartbeat_interval": { - "description": "The interval between each discovery (Kademlia) query in milliseconds.", - "privacy": "Public", - "value": 100 - }, - "state_sync_config.network_config.idle_connection_timeout": { - "description": "Amount of time in seconds that a connection with no active sessions will stay alive.", - "privacy": "Public", - "value": 120 - }, - "state_sync_config.network_config.peer_manager_config.malicious_timeout_seconds": { - "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", - "privacy": "Public", - "value": 31536000 - }, - "state_sync_config.network_config.peer_manager_config.unstable_timeout_millis": { - "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", - "privacy": "Public", - "value": 1000 - }, - "state_sync_config.network_config.quic_port": { - "description": "The port that the node listens on for incoming quic connections.", - "privacy": "Public", - "value": 10001 - }, - "state_sync_config.network_config.secret_key": { - "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", - "privacy": "Private", - "value": "" - }, - "state_sync_config.network_config.session_timeout": { - "description": "Maximal time in seconds that each session can take before failing on timeout.", - "privacy": "Public", - "value": 120 - }, - "state_sync_config.network_config.tcp_port": { - "description": "The port that the node listens on for incoming tcp connections.", - "privacy": "Public", - "value": 12345 - }, - "state_sync_config.p2p_sync_client_config.buffer_size": { - "description": "Size of the buffer for read from the storage and for incoming responses.", - "privacy": "Public", - "value": 100000 - }, - "state_sync_config.p2p_sync_client_config.num_block_classes_per_query": { - "description": "The maximum amount of block's classes to ask from peers in each iteration.", - "privacy": "Public", - "value": 100 - }, - "state_sync_config.p2p_sync_client_config.num_block_state_diffs_per_query": { - "description": "The maximum amount of block's state diffs to ask from peers in each iteration.", - "privacy": "Public", - "value": 100 - }, - "state_sync_config.p2p_sync_client_config.num_block_transactions_per_query": { - "description": "The maximum amount of blocks to ask their transactions from peers in each iteration.", - "privacy": "Public", - "value": 100 - }, - "state_sync_config.p2p_sync_client_config.num_headers_per_query": { - "description": "The maximum amount of headers to ask from peers in each iteration.", - "privacy": "Public", - "value": 10000 - }, - "state_sync_config.p2p_sync_client_config.wait_period_for_new_data": { - "description": "Time in millisseconds to wait when a query returned with partial data before sending a new query", - "privacy": "Public", - "value": 50 - }, - "state_sync_config.storage_config.db_config.chain_id": { - "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", - "pointer_target": "chain_id", - "privacy": "Public" - }, - "state_sync_config.storage_config.db_config.enforce_file_exists": { - "description": "Whether to enforce that the path exists. If true, `open_env` fails when the mdbx.dat file does not exist.", - "privacy": "Public", - "value": false - }, - "state_sync_config.storage_config.db_config.growth_step": { - "description": "The growth step in bytes, must be greater than zero to allow the database to grow.", - "privacy": "Public", - "value": 4294967296 - }, - "state_sync_config.storage_config.db_config.max_size": { - "description": "The maximum size of the node's storage in bytes.", - "privacy": "Public", - "value": 1099511627776 - }, - "state_sync_config.storage_config.db_config.min_size": { - "description": "The minimum size of the node's storage in bytes.", - "privacy": "Public", - "value": 1048576 - }, - "state_sync_config.storage_config.db_config.path_prefix": { - "description": "Prefix of the path of the node's storage directory, the storage file path will be /. The path is not created automatically.", - "privacy": "Public", - "value": "./sequencer_data" - }, - "state_sync_config.storage_config.mmap_file_config.growth_step": { - "description": "The growth step in bytes, must be greater than max_object_size.", - "privacy": "Public", - "value": 1073741824 - }, - "state_sync_config.storage_config.mmap_file_config.max_object_size": { - "description": "The maximum size of a single object in the file in bytes", - "privacy": "Public", - "value": 268435456 - }, - "state_sync_config.storage_config.mmap_file_config.max_size": { - "description": "The maximum size of a memory mapped file in bytes. Must be greater than growth_step.", - "privacy": "Public", - "value": 1099511627776 - }, - "state_sync_config.storage_config.scope": { - "description": "The categories of data saved in storage.", - "privacy": "Public", - "value": "FullArchive" - }, - "strk_fee_token_address": { - "description": "A required param! Address of the STRK fee token.", - "param_type": "String", - "privacy": "TemporaryValue" - }, - "validator_id": { - "description": "A required param! The ID of the validator. Also the address of this validator as a starknet contract.", - "param_type": "String", - "privacy": "TemporaryValue" - }, - "versioned_constants_overrides.invoke_tx_max_n_steps": { - "description": "Maximum number of steps the invoke function is allowed to run.", - "privacy": "TemporaryValue", - "value": 10000000 - }, - "versioned_constants_overrides.max_n_events": { - "description": "Maximum number of events that can be emitted from the transation.", - "privacy": "TemporaryValue", - "value": 1000 - }, - "versioned_constants_overrides.max_recursion_depth": { - "description": "Maximum recursion depth for nested calls during blockifier validation.", - "privacy": "TemporaryValue", - "value": 50 - }, - "versioned_constants_overrides.validate_max_n_steps": { - "description": "Maximum number of steps the validation function is allowed to run.", - "privacy": "TemporaryValue", - "value": 1000000 - } -} diff --git a/config/sequencer/presets/config-batcher.json b/config/sequencer/presets/config-batcher.json deleted file mode 100644 index e8d0fa511c5..00000000000 --- a/config/sequencer/presets/config-batcher.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "chain_id": "0x5", - "eth_fee_token_address": "0x6", - "strk_fee_token_address": "0x7", - "batcher_config.storage.db_config.path_prefix": "/data", - "batcher_config.storage.db_config.enforce_file_exists": false, - "validator_id" : "0x1" -} diff --git a/config/sequencer/presets/config.json b/config/sequencer/presets/config.json deleted file mode 100644 index 310df1b2091..00000000000 --- a/config/sequencer/presets/config.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "chain_id": "0x5", - "eth_fee_token_address": "0x6", - "strk_fee_token_address": "0x7", - "components.batcher.execution_mode": "Disabled", - "components.batcher.local_server_config.#is_none": true, - "components.consensus_manager.execution_mode": "Disabled", - "components.gateway.execution_mode": "Disabled", - "components.http_server.execution_mode": "Disabled", - "components.mempool.execution_mode": "Disabled", - "components.mempool_p2p.execution_mode": "Disabled", - "components.consensus_manager.local_server_config.#is_none": true, - "components.gateway.local_server_config.#is_none": true, - "components.http_server.local_server_config.#is_none": true, - "components.mempool.local_server_config.#is_none": true, - "components.mempool_p2p.local_server_config.#is_none": true, - "components.http_server.remote_server_config.#is_none": true, - "batcher_config.storage.db_config.enforce_file_exists": false, - "batcher_config.storage.db_config.path_prefix": "/data" -} diff --git a/crates/apollo_batcher/Cargo.toml b/crates/apollo_batcher/Cargo.toml new file mode 100644 index 00000000000..b39108d7b21 --- /dev/null +++ b/crates/apollo_batcher/Cargo.toml @@ -0,0 +1,63 @@ +[package] +name = "apollo_batcher" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[features] +cairo_native = ["blockifier/cairo_native"] +testing = [] + +[lints] +workspace = true + +[dependencies] +apollo_batcher_types.workspace = true +apollo_class_manager_types.workspace = true +apollo_config.workspace = true +apollo_infra.workspace = true +apollo_infra_utils.workspace = true +apollo_l1_provider_types.workspace = true +apollo_mempool_types.workspace = true +apollo_metrics.workspace = true +apollo_reverts.workspace = true +apollo_starknet_client.workspace = true +apollo_state_reader.workspace = true +apollo_state_sync_types.workspace = true +apollo_storage.workspace = true +async-trait.workspace = true +blockifier.workspace = true +cairo-vm.workspace = true +chrono.workspace = true +futures.workspace = true +indexmap.workspace = true +reqwest = { workspace = true, features = ["json"] } +serde.workspace = true +starknet_api.workspace = true +thiserror.workspace = true +tokio.workspace = true +tracing.workspace = true +url = { workspace = true, features = ["serde"] } +validator.workspace = true + +[dev-dependencies] +apollo_class_manager_types = { workspace = true, features = ["testing"] } +apollo_infra_utils.workspace = true +apollo_l1_provider_types = { workspace = true, features = ["testing"] } +apollo_mempool_types = { workspace = true, features = ["testing"] } +apollo_metrics = { workspace = true, features = ["testing"] } +apollo_storage = { workspace = true, features = ["testing"] } +assert_matches.workspace = true +blockifier = { workspace = true, features = ["testing"] } +cairo-lang-starknet-classes.workspace = true +chrono = { workspace = true } +itertools.workspace = true +mempool_test_utils.workspace = true +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +mockall.workspace = true +pretty_assertions.workspace = true +rstest.workspace = true +starknet-types-core.workspace = true +starknet_api = { workspace = true, features = ["testing"] } diff --git a/crates/apollo_batcher/src/batcher.rs b/crates/apollo_batcher/src/batcher.rs new file mode 100644 index 00000000000..c3c288f6f6d --- /dev/null +++ b/crates/apollo_batcher/src/batcher.rs @@ -0,0 +1,957 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use apollo_batcher_types::batcher_types::{ + BatcherResult, + CentralObjects, + DecisionReachedInput, + DecisionReachedResponse, + GetHeightResponse, + GetProposalContent, + GetProposalContentInput, + GetProposalContentResponse, + ProposalCommitment, + ProposalId, + ProposalStatus, + ProposeBlockInput, + RevertBlockInput, + SendProposalContent, + SendProposalContentInput, + SendProposalContentResponse, + StartHeightInput, + ValidateBlockInput, +}; +use apollo_batcher_types::errors::BatcherError; +use apollo_class_manager_types::transaction_converter::TransactionConverter; +use apollo_class_manager_types::SharedClassManagerClient; +use apollo_infra::component_definitions::{default_component_start_fn, ComponentStarter}; +use apollo_l1_provider_types::errors::{L1ProviderClientError, L1ProviderError}; +use apollo_l1_provider_types::{SessionState, SharedL1ProviderClient}; +use apollo_mempool_types::communication::SharedMempoolClient; +use apollo_mempool_types::mempool_types::CommitBlockArgs; +use apollo_reverts::revert_block; +use apollo_state_sync_types::state_sync_types::SyncBlock; +use apollo_storage::state::{StateStorageReader, StateStorageWriter}; +use async_trait::async_trait; +use blockifier::concurrency::worker_pool::WorkerPool; +use blockifier::state::contract_class_manager::ContractClassManager; +use futures::FutureExt; +use indexmap::IndexSet; +#[cfg(test)] +use mockall::automock; +use starknet_api::block::{BlockHeaderWithoutHash, BlockNumber}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::{ContractAddress, Nonce}; +use starknet_api::state::ThinStateDiff; +use starknet_api::transaction::TransactionHash; +use tokio::sync::Mutex; +use tracing::{debug, error, info, instrument, trace, Instrument}; + +use crate::block_builder::{ + BlockBuilderError, + BlockBuilderExecutionParams, + BlockBuilderFactory, + BlockBuilderFactoryTrait, + BlockBuilderTrait, + BlockExecutionArtifacts, + BlockMetadata, +}; +use crate::cende_client_types::CendeBlockMetadata; +use crate::config::BatcherConfig; +use crate::metrics::{ + register_metrics, + ProposalMetricsHandle, + BATCHED_TRANSACTIONS, + LAST_BATCHED_BLOCK, + LAST_PROPOSED_BLOCK, + LAST_SYNCED_BLOCK, + REJECTED_TRANSACTIONS, + REVERTED_BLOCKS, + REVERTED_TRANSACTIONS, + STORAGE_HEIGHT, + SYNCED_TRANSACTIONS, +}; +use crate::pre_confirmed_block_writer::{ + PreconfirmedBlockWriterFactory, + PreconfirmedBlockWriterFactoryTrait, + PreconfirmedBlockWriterTrait, +}; +use crate::pre_confirmed_cende_client::PreconfirmedCendeClientTrait; +use crate::transaction_provider::{ProposeTransactionProvider, ValidateTransactionProvider}; +use crate::utils::{ + deadline_as_instant, + proposal_status_from, + verify_block_input, + ProposalResult, + ProposalTask, +}; + +type OutputStreamReceiver = tokio::sync::mpsc::UnboundedReceiver; +type InputStreamSender = tokio::sync::mpsc::Sender; + +pub struct Batcher { + pub config: BatcherConfig, + pub storage_reader: Arc, + pub storage_writer: Box, + pub l1_provider_client: SharedL1ProviderClient, + pub mempool_client: SharedMempoolClient, + pub transaction_converter: TransactionConverter, + + /// Used to create block builders. + /// Using the factory pattern to allow for easier testing. + block_builder_factory: Box, + + /// Used to create pre-confirmed block writers. + pre_confirmed_block_writer_factory: Box, + + /// The height that the batcher is currently working on. + /// All proposals are considered to be at this height. + active_height: Option, + + /// The block proposal that is currently being built, if any. + /// At any given time, there can be only one proposal being actively executed (either proposed + /// or validated). + active_proposal: Arc>>, + active_proposal_task: Option, + + /// Holds all the proposals that completed execution in the current height. + executed_proposals: Arc>>>, + + /// The propose blocks transaction streams, used to stream out the proposal transactions. + /// Each stream is kept until all the transactions are streamed out, or a new height is + /// started. + propose_tx_streams: HashMap, + + /// The validate blocks transaction streams, used to stream in the transactions to validate. + /// Each stream is kept until SendProposalContent::Finish/Abort is received, or a new height is + /// started. + validate_tx_streams: HashMap, +} + +impl Batcher { + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + config: BatcherConfig, + storage_reader: Arc, + storage_writer: Box, + l1_provider_client: SharedL1ProviderClient, + mempool_client: SharedMempoolClient, + transaction_converter: TransactionConverter, + block_builder_factory: Box, + pre_confirmed_block_writer_factory: Box, + ) -> Self { + Self { + config, + storage_reader, + storage_writer, + l1_provider_client, + mempool_client, + transaction_converter, + block_builder_factory, + pre_confirmed_block_writer_factory, + active_height: None, + active_proposal: Arc::new(Mutex::new(None)), + active_proposal_task: None, + executed_proposals: Arc::new(Mutex::new(HashMap::new())), + propose_tx_streams: HashMap::new(), + validate_tx_streams: HashMap::new(), + } + } + + #[instrument(skip(self), err)] + pub async fn start_height(&mut self, input: StartHeightInput) -> BatcherResult<()> { + if self.active_height == Some(input.height) { + return Err(BatcherError::HeightInProgress); + } + + let storage_height = self.get_height_from_storage()?; + if storage_height != input.height { + return Err(BatcherError::StorageHeightMarkerMismatch { + marker_height: storage_height, + requested_height: input.height, + }); + } + + self.abort_active_height().await; + + info!("Starting to work on height {}.", input.height); + self.active_height = Some(input.height); + + Ok(()) + } + + #[instrument(skip(self), err)] + pub async fn propose_block( + &mut self, + propose_block_input: ProposeBlockInput, + ) -> BatcherResult<()> { + let block_number = propose_block_input.block_info.block_number; + let proposal_metrics_handle = ProposalMetricsHandle::new(); + let active_height = self.active_height.ok_or(BatcherError::NoActiveHeight)?; + verify_block_input( + active_height, + block_number, + propose_block_input.retrospective_block_hash, + )?; + + // TODO(yair): extract function for the following calls, use join_all. + self.mempool_client.commit_block(CommitBlockArgs::default()).await.map_err(|err| { + error!( + "Mempool is not ready to start proposal {}: {}.", + propose_block_input.proposal_id, err + ); + BatcherError::NotReady + })?; + self.mempool_client + .update_gas_price( + propose_block_input.block_info.gas_prices.strk_gas_prices.l2_gas_price.get(), + ) + .await + .map_err(|err| { + error!("Failed to update gas price in mempool: {}", err); + BatcherError::InternalError + })?; + self.l1_provider_client + .start_block(SessionState::Propose, propose_block_input.block_info.block_number) + .await + .map_err(|err| { + error!( + "L1 provider is not ready to start proposing block {}: {}. ", + propose_block_input.block_info.block_number, err + ); + BatcherError::NotReady + })?; + + let tx_provider = ProposeTransactionProvider::new( + self.mempool_client.clone(), + self.l1_provider_client.clone(), + self.config.max_l1_handler_txs_per_block_proposal, + propose_block_input.block_info.block_number, + ); + + // A channel to receive the transactions included in the proposed block. + let (output_tx_sender, output_tx_receiver) = tokio::sync::mpsc::unbounded_channel(); + + let cende_block_metadata = CendeBlockMetadata::new(propose_block_input.block_info.clone()); + let (pre_confirmed_block_writer, candidate_tx_sender, pre_confirmed_tx_sender) = + self.pre_confirmed_block_writer_factory.create( + propose_block_input.block_info.block_number, + propose_block_input.proposal_round, + cende_block_metadata, + ); + + let (block_builder, abort_signal_sender) = self + .block_builder_factory + .create_block_builder( + BlockMetadata { + block_info: propose_block_input.block_info, + retrospective_block_hash: propose_block_input.retrospective_block_hash, + }, + BlockBuilderExecutionParams { + deadline: deadline_as_instant(propose_block_input.deadline)?, + is_validator: false, + }, + Box::new(tx_provider), + Some(output_tx_sender), + Some(candidate_tx_sender), + Some(pre_confirmed_tx_sender), + tokio::runtime::Handle::current(), + ) + .map_err(|err| { + error!("Failed to get block builder: {}", err); + BatcherError::InternalError + })?; + + self.spawn_proposal( + propose_block_input.proposal_id, + block_builder, + abort_signal_sender, + None, + Some(pre_confirmed_block_writer), + proposal_metrics_handle, + ) + .await?; + + let proposal_already_exists = + self.propose_tx_streams.insert(propose_block_input.proposal_id, output_tx_receiver); + assert!( + proposal_already_exists.is_none(), + "Proposal {} already exists. This should have been checked when spawning the proposal.", + propose_block_input.proposal_id + ); + LAST_PROPOSED_BLOCK.set_lossy(block_number.0); + Ok(()) + } + + #[instrument(skip(self), err)] + pub async fn validate_block( + &mut self, + validate_block_input: ValidateBlockInput, + ) -> BatcherResult<()> { + let proposal_metrics_handle = ProposalMetricsHandle::new(); + let active_height = self.active_height.ok_or(BatcherError::NoActiveHeight)?; + verify_block_input( + active_height, + validate_block_input.block_info.block_number, + validate_block_input.retrospective_block_hash, + )?; + + self.l1_provider_client + .start_block(SessionState::Validate, validate_block_input.block_info.block_number) + .await + .map_err(|err| { + error!( + "L1 provider is not ready to start validating block {}: {}. ", + validate_block_input.block_info.block_number, err + ); + BatcherError::NotReady + })?; + + // A channel to send the transactions to include in the block being validated. + let (input_tx_sender, input_tx_receiver) = + tokio::sync::mpsc::channel(self.config.input_stream_content_buffer_size); + let (final_n_executed_txs_sender, final_n_executed_txs_receiver) = + tokio::sync::oneshot::channel(); + + let tx_provider = ValidateTransactionProvider::new( + input_tx_receiver, + final_n_executed_txs_receiver, + self.l1_provider_client.clone(), + validate_block_input.block_info.block_number, + ); + + let (block_builder, abort_signal_sender) = self + .block_builder_factory + .create_block_builder( + BlockMetadata { + block_info: validate_block_input.block_info, + retrospective_block_hash: validate_block_input.retrospective_block_hash, + }, + BlockBuilderExecutionParams { + deadline: deadline_as_instant(validate_block_input.deadline)?, + is_validator: true, + }, + Box::new(tx_provider), + None, + None, + None, + tokio::runtime::Handle::current(), + ) + .map_err(|err| { + error!("Failed to get block builder: {}", err); + BatcherError::InternalError + })?; + + self.spawn_proposal( + validate_block_input.proposal_id, + block_builder, + abort_signal_sender, + Some(final_n_executed_txs_sender), + None, + proposal_metrics_handle, + ) + .await?; + + let validation_already_exists = + self.validate_tx_streams.insert(validate_block_input.proposal_id, input_tx_sender); + assert!( + validation_already_exists.is_none(), + "Proposal {} already exists. This should have been checked when spawning the proposal.", + validate_block_input.proposal_id + ); + + Ok(()) + } + + // This function assumes that requests are received in order, otherwise the content could + // be processed out of order. + #[instrument(skip(self), err)] + pub async fn send_proposal_content( + &mut self, + send_proposal_content_input: SendProposalContentInput, + ) -> BatcherResult { + let proposal_id = send_proposal_content_input.proposal_id; + if !self.validate_tx_streams.contains_key(&proposal_id) { + return Err(BatcherError::ProposalNotFound { proposal_id }); + } + + match send_proposal_content_input.content { + SendProposalContent::Txs(txs) => self.handle_send_txs_request(proposal_id, txs).await, + SendProposalContent::Finish(final_n_executed_txs) => { + self.handle_finish_proposal_request(proposal_id, final_n_executed_txs).await + } + SendProposalContent::Abort => self.handle_abort_proposal_request(proposal_id).await, + } + } + + /// Clear all the proposals from the previous height. + async fn abort_active_height(&mut self) { + self.abort_active_proposal().await; + self.executed_proposals.lock().await.clear(); + self.propose_tx_streams.clear(); + self.validate_tx_streams.clear(); + self.active_height = None; + } + + async fn handle_send_txs_request( + &mut self, + proposal_id: ProposalId, + txs: Vec, + ) -> BatcherResult { + if self.is_active(proposal_id).await { + // The proposal is active. Send the transactions through the tx provider. + let tx_provider_sender = &self + .validate_tx_streams + .get(&proposal_id) + .expect("Expecting tx_provider_sender to exist during batching."); + for tx in txs { + tx_provider_sender.send(tx).await.map_err(|err| { + error!("Failed to send transaction to the tx provider: {}", err); + BatcherError::InternalError + })?; + } + return Ok(SendProposalContentResponse { response: ProposalStatus::Processing }); + } + + // The proposal is no longer active, can't send the transactions. + let proposal_result = + self.get_completed_proposal_result(proposal_id).await.expect("Proposal should exist."); + match proposal_result { + Ok(_) => panic!("Proposal finished validation before all transactions were sent."), + Err(err) => Ok(SendProposalContentResponse { response: proposal_status_from(err)? }), + } + } + + async fn handle_finish_proposal_request( + &mut self, + proposal_id: ProposalId, + final_n_executed_txs: usize, + ) -> BatcherResult { + debug!("Send proposal content done for {}", proposal_id); + + self.validate_tx_streams.remove(&proposal_id).expect("validate tx stream should exist."); + if self.is_active(proposal_id).await { + self.await_active_proposal(final_n_executed_txs).await?; + } + + let proposal_result = + self.get_completed_proposal_result(proposal_id).await.expect("Proposal should exist."); + let proposal_status = match proposal_result { + Ok((commitment, _)) => ProposalStatus::Finished(commitment), + Err(err) => proposal_status_from(err)?, + }; + Ok(SendProposalContentResponse { response: proposal_status }) + } + + async fn handle_abort_proposal_request( + &mut self, + proposal_id: ProposalId, + ) -> BatcherResult { + if self.is_active(proposal_id).await { + self.abort_active_proposal().await; + + let proposal_already_exists = self + .executed_proposals + .lock() + .await + .insert(proposal_id, Err(Arc::new(BlockBuilderError::Aborted))); + assert!(proposal_already_exists.is_none(), "Duplicate proposal: {proposal_id}."); + } + self.validate_tx_streams.remove(&proposal_id); + Ok(SendProposalContentResponse { response: ProposalStatus::Aborted }) + } + + fn get_height_from_storage(&self) -> BatcherResult { + self.storage_reader.height().map_err(|err| { + error!("Failed to get height from storage: {}", err); + BatcherError::InternalError + }) + } + + #[instrument(skip(self), err)] + pub async fn get_height(&self) -> BatcherResult { + let height = self.get_height_from_storage()?; + Ok(GetHeightResponse { height }) + } + + #[instrument(skip(self), err)] + pub async fn get_proposal_content( + &mut self, + get_proposal_content_input: GetProposalContentInput, + ) -> BatcherResult { + let proposal_id = get_proposal_content_input.proposal_id; + + let tx_stream = &mut self + .propose_tx_streams + .get_mut(&proposal_id) + .ok_or(BatcherError::ProposalNotFound { proposal_id })?; + + // Blocking until we have some txs to stream or the proposal is done. + let mut txs = Vec::new(); + let n_executed_txs = + tx_stream.recv_many(&mut txs, self.config.outstream_content_buffer_size).await; + + if n_executed_txs != 0 { + debug!("Streaming {} txs", n_executed_txs); + return Ok(GetProposalContentResponse { content: GetProposalContent::Txs(txs) }); + } + + // Finished streaming all the transactions. + self.propose_tx_streams.remove(&proposal_id); + let (commitment, final_n_executed_txs) = self + .get_completed_proposal_result(proposal_id) + .await + .expect("Proposal should exist.") + .map_err(|err| { + error!("Failed to get commitment: {}", err); + BatcherError::InternalError + })?; + + Ok(GetProposalContentResponse { + content: GetProposalContent::Finished { id: commitment, final_n_executed_txs }, + }) + } + + #[instrument(skip(self, sync_block), err)] + pub async fn add_sync_block(&mut self, sync_block: SyncBlock) -> BatcherResult<()> { + trace!("Received sync block: {:?}", sync_block); + // TODO(AlonH): Use additional data from the sync block. + let SyncBlock { + state_diff, + account_transaction_hashes, + l1_transaction_hashes, + block_header_without_hash: BlockHeaderWithoutHash { block_number, .. }, + } = sync_block; + + let height = self.get_height_from_storage()?; + if height != block_number { + return Err(BatcherError::StorageHeightMarkerMismatch { + marker_height: height, + requested_height: block_number, + }); + } + + if let Some(height) = self.active_height { + info!("Aborting all work on height {} due to state sync.", height); + self.abort_active_height().await; + } + + let address_to_nonce = state_diff.nonces.iter().map(|(k, v)| (*k, *v)).collect(); + self.commit_proposal_and_block( + height, + state_diff, + address_to_nonce, + l1_transaction_hashes.iter().copied().collect(), + Default::default(), + ) + .await?; + LAST_SYNCED_BLOCK.set_lossy(block_number.0); + SYNCED_TRANSACTIONS.increment( + (account_transaction_hashes.len() + l1_transaction_hashes.len()).try_into().unwrap(), + ); + Ok(()) + } + + #[instrument(skip(self), err)] + pub async fn decision_reached( + &mut self, + input: DecisionReachedInput, + ) -> BatcherResult { + let height = self.active_height.ok_or(BatcherError::NoActiveHeight)?; + + let proposal_id = input.proposal_id; + let proposal_result = self.executed_proposals.lock().await.remove(&proposal_id); + let block_execution_artifacts = proposal_result + .ok_or(BatcherError::ExecutedProposalNotFound { proposal_id })? + .map_err(|err| { + error!("Failed to get block execution artifacts: {}", err); + BatcherError::InternalError + })?; + let state_diff = block_execution_artifacts.thin_state_diff(); + let n_txs = u64::try_from(block_execution_artifacts.tx_hashes().len()) + .expect("Number of transactions should fit in u64"); + let n_rejected_txs = + u64::try_from(block_execution_artifacts.execution_data.rejected_tx_hashes.len()) + .expect("Number of rejected transactions should fit in u64"); + let n_reverted_count = u64::try_from( + block_execution_artifacts + .execution_data + .execution_infos + .values() + .filter(|info| info.revert_error.is_some()) + .count(), + ) + .expect("Number of reverted transactions should fit in u64"); + self.commit_proposal_and_block( + height, + state_diff.clone(), + block_execution_artifacts.address_to_nonce(), + block_execution_artifacts.execution_data.consumed_l1_handler_tx_hashes, + block_execution_artifacts.execution_data.rejected_tx_hashes, + ) + .await?; + let execution_infos = block_execution_artifacts.execution_data.execution_infos; + + LAST_BATCHED_BLOCK.set_lossy(height.0); + BATCHED_TRANSACTIONS.increment(n_txs); + REJECTED_TRANSACTIONS.increment(n_rejected_txs); + REVERTED_TRANSACTIONS.increment(n_reverted_count); + + Ok(DecisionReachedResponse { + state_diff, + l2_gas_used: block_execution_artifacts.l2_gas_used, + central_objects: CentralObjects { + execution_infos, + bouncer_weights: block_execution_artifacts.bouncer_weights, + compressed_state_diff: block_execution_artifacts.compressed_state_diff, + casm_hash_computation_data_sierra_gas: block_execution_artifacts + .casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas: block_execution_artifacts + .casm_hash_computation_data_proving_gas, + }, + }) + } + + async fn commit_proposal_and_block( + &mut self, + height: BlockNumber, + state_diff: ThinStateDiff, + address_to_nonce: HashMap, + consumed_l1_handler_tx_hashes: IndexSet, + rejected_tx_hashes: IndexSet, + ) -> BatcherResult<()> { + info!( + "Committing block at height {} and notifying mempool & L1 event provider of the block.", + height + ); + trace!("Rejected transactions: {:#?}, State diff: {:#?}.", rejected_tx_hashes, state_diff); + + // Commit the proposal to the storage. + self.storage_writer.commit_proposal(height, state_diff).map_err(|err| { + error!("Failed to commit proposal to storage: {}", err); + BatcherError::InternalError + })?; + + // Notify the L1 provider of the new block. + let rejected_l1_handler_tx_hashes = rejected_tx_hashes + .iter() + .copied() + .filter(|tx_hash| consumed_l1_handler_tx_hashes.contains(tx_hash)) + .collect(); + + let l1_provider_result = self + .l1_provider_client + .commit_block(consumed_l1_handler_tx_hashes, rejected_l1_handler_tx_hashes, height) + .await; + + // Return error if the commit to the L1 provider failed. + if let Err(err) = l1_provider_result { + match err { + L1ProviderClientError::L1ProviderError(L1ProviderError::UnexpectedHeight { + expected_height, + got, + }) => { + error!( + "Unexpected height while committing block in L1 provider: expected={:?}, \ + got={:?}", + expected_height, got + ); + } + other_err => { + error!( + "Unexpected error while committing block in L1 provider: {:?}", + other_err + ); + } + } + // Rollback the state diff in the storage. + self.storage_writer.revert_block(height); + return Err(BatcherError::InternalError); + } + + // Notify the mempool of the new block. + let mempool_result = self + .mempool_client + .commit_block(CommitBlockArgs { address_to_nonce, rejected_tx_hashes }) + .await; + + if let Err(mempool_err) = mempool_result { + error!("Failed to commit block to mempool: {}", mempool_err); + // TODO(AlonH): Should we rollback the state diff and return an error? + }; + + STORAGE_HEIGHT.increment(1); + Ok(()) + } + + async fn is_active(&self, proposal_id: ProposalId) -> bool { + *self.active_proposal.lock().await == Some(proposal_id) + } + + // Sets a new active proposal task. + // Fails if there is another proposal being currently generated, or a proposal with the same ID + // already exists. + async fn set_active_proposal(&mut self, proposal_id: ProposalId) -> BatcherResult<()> { + if self.executed_proposals.lock().await.contains_key(&proposal_id) { + return Err(BatcherError::ProposalAlreadyExists { proposal_id }); + } + + let mut active_proposal = self.active_proposal.lock().await; + if let Some(active_proposal_id) = *active_proposal { + return Err(BatcherError::AnotherProposalInProgress { + active_proposal_id, + new_proposal_id: proposal_id, + }); + } + + debug!("Set proposal {} as the one being generated.", proposal_id); + *active_proposal = Some(proposal_id); + Ok(()) + } + + // Starts a new block proposal generation task for the given proposal_id. + // Uses the given block_builder to generate the proposal. + async fn spawn_proposal( + &mut self, + proposal_id: ProposalId, + mut block_builder: Box, + abort_signal_sender: tokio::sync::oneshot::Sender<()>, + final_n_executed_txs_sender: Option>, + pre_confirmed_block_writer: Option>, + mut proposal_metrics_handle: ProposalMetricsHandle, + ) -> BatcherResult<()> { + self.set_active_proposal(proposal_id).await?; + info!("Starting generation of a new proposal with id {}.", proposal_id); + + let active_proposal = self.active_proposal.clone(); + let executed_proposals = self.executed_proposals.clone(); + + let execution_join_handle = tokio::spawn( + async move { + let result = match block_builder.build_block().await { + Ok(artifacts) => { + proposal_metrics_handle.set_succeeded(); + Ok(artifacts) + } + Err(BlockBuilderError::Aborted) => { + proposal_metrics_handle.set_aborted(); + Err(BlockBuilderError::Aborted) + } + Err(e) => Err(e), + } + .map_err(Arc::new); + + // The proposal is done, clear the active proposal. + // Keep the proposal result only if it is the same as the active proposal. + // The active proposal might have changed if this proposal was aborted. + let mut active_proposal = active_proposal.lock().await; + if *active_proposal == Some(proposal_id) { + active_proposal.take(); + let proposal_already_exists = + executed_proposals.lock().await.insert(proposal_id, result); + assert!( + proposal_already_exists.is_none(), + "Duplicate proposal: {proposal_id}." + ); + } + } + .in_current_span(), + ); + + let writer_join_handle = + pre_confirmed_block_writer.map(|mut pre_confirmed_block_writer| { + tokio::spawn(async move { + // TODO(noamsp): add error handling + pre_confirmed_block_writer.run().await.ok(); + }) + }); + + self.active_proposal_task = Some(ProposalTask { + abort_signal_sender, + final_n_executed_txs_sender, + execution_join_handle, + writer_join_handle, + }); + Ok(()) + } + + // Returns a completed proposal result, either its commitment and final_n_executed_txs or an + // error if the proposal failed. If the proposal doesn't exist, or it's still active, + // returns None. + async fn get_completed_proposal_result( + &self, + proposal_id: ProposalId, + ) -> Option> { + let guard = self.executed_proposals.lock().await; + let proposal_result = guard.get(&proposal_id); + match proposal_result { + Some(Ok(artifacts)) => { + Some(Ok((artifacts.commitment(), artifacts.final_n_executed_txs))) + } + Some(Err(e)) => Some(Err(e.clone())), + None => None, + } + } + + // Ends the current active proposal. + // This call is non-blocking. + async fn abort_active_proposal(&mut self) { + self.active_proposal.lock().await.take(); + if let Some(proposal_task) = self.active_proposal_task.take() { + proposal_task.abort_signal_sender.send(()).ok(); + } + } + + pub async fn await_active_proposal( + &mut self, + final_n_executed_txs: usize, + ) -> BatcherResult<()> { + if let Some(ProposalTask { + execution_join_handle, + writer_join_handle, + final_n_executed_txs_sender, + .. + }) = self.active_proposal_task.take() + { + if let Some(final_n_executed_txs_sender) = final_n_executed_txs_sender { + final_n_executed_txs_sender.send(final_n_executed_txs).map_err(|err| { + error!( + "Failed to send final_n_executed_txs ({final_n_executed_txs}) to the tx \ + provider: {}", + err + ); + BatcherError::InternalError + })?; + } + + let writer_future = writer_join_handle + .map(FutureExt::boxed) + .unwrap_or_else(|| futures::future::ready(Ok(())).boxed()); + let _ = tokio::join!(execution_join_handle, writer_future); + } + + Ok(()) + } + + #[instrument(skip(self), err)] + // This function will panic if there is a storage failure to revert the block. + pub async fn revert_block(&mut self, input: RevertBlockInput) -> BatcherResult<()> { + info!("Reverting block at height {}.", input.height); + let height = self.get_height_from_storage()?.prev().ok_or( + BatcherError::StorageHeightMarkerMismatch { + marker_height: BlockNumber(0), + requested_height: input.height, + }, + )?; + + if height != input.height { + return Err(BatcherError::StorageHeightMarkerMismatch { + marker_height: height.unchecked_next(), + requested_height: input.height, + }); + } + + if let Some(height) = self.active_height { + info!("Aborting all work on height {} due to a revert request.", height); + self.abort_active_height().await; + } + + self.storage_writer.revert_block(height); + STORAGE_HEIGHT.decrement(1); + REVERTED_BLOCKS.increment(1); + Ok(()) + } +} + +pub fn create_batcher( + config: BatcherConfig, + mempool_client: SharedMempoolClient, + l1_provider_client: SharedL1ProviderClient, + class_manager_client: SharedClassManagerClient, + pre_confirmed_cende_client: Arc, +) -> Batcher { + let (storage_reader, storage_writer) = apollo_storage::open_storage(config.storage.clone()) + .expect("Failed to open batcher's storage"); + + let execute_config = &config.block_builder_config.execute_config; + let worker_pool = Arc::new(WorkerPool::start(execute_config)); + let pre_confirmed_block_writer_factory = Box::new(PreconfirmedBlockWriterFactory { + config: config.pre_confirmed_block_writer_config, + cende_client: pre_confirmed_cende_client, + }); + let block_builder_factory = Box::new(BlockBuilderFactory { + block_builder_config: config.block_builder_config.clone(), + storage_reader: storage_reader.clone(), + contract_class_manager: ContractClassManager::start( + config.contract_class_manager_config.clone(), + ), + class_manager_client: class_manager_client.clone(), + worker_pool, + }); + let storage_reader = Arc::new(storage_reader); + let storage_writer = Box::new(storage_writer); + let transaction_converter = + TransactionConverter::new(class_manager_client, config.storage.db_config.chain_id.clone()); + + Batcher::new( + config, + storage_reader, + storage_writer, + l1_provider_client, + mempool_client, + transaction_converter, + block_builder_factory, + pre_confirmed_block_writer_factory, + ) +} + +#[cfg_attr(test, automock)] +pub trait BatcherStorageReaderTrait: Send + Sync { + /// Returns the next height that the batcher should work on. + fn height(&self) -> apollo_storage::StorageResult; +} + +impl BatcherStorageReaderTrait for apollo_storage::StorageReader { + fn height(&self) -> apollo_storage::StorageResult { + self.begin_ro_txn()?.get_state_marker() + } +} + +#[cfg_attr(test, automock)] +pub trait BatcherStorageWriterTrait: Send + Sync { + fn commit_proposal( + &mut self, + height: BlockNumber, + state_diff: ThinStateDiff, + ) -> apollo_storage::StorageResult<()>; + + fn revert_block(&mut self, height: BlockNumber); +} + +impl BatcherStorageWriterTrait for apollo_storage::StorageWriter { + fn commit_proposal( + &mut self, + height: BlockNumber, + state_diff: ThinStateDiff, + ) -> apollo_storage::StorageResult<()> { + // TODO(AlonH): write casms. + self.begin_rw_txn()?.append_state_diff(height, state_diff)?.commit() + } + + // This function will panic if there is a storage failure to revert the block. + fn revert_block(&mut self, height: BlockNumber) { + revert_block(self, height); + } +} + +#[async_trait] +impl ComponentStarter for Batcher { + async fn start(&mut self) { + default_component_start_fn::().await; + let storage_height = self + .storage_reader + .height() + .expect("Failed to get height from storage during batcher creation."); + register_metrics(storage_height); + } +} diff --git a/crates/apollo_batcher/src/batcher_test.rs b/crates/apollo_batcher/src/batcher_test.rs new file mode 100644 index 00000000000..08b241ac67e --- /dev/null +++ b/crates/apollo_batcher/src/batcher_test.rs @@ -0,0 +1,1186 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use apollo_batcher_types::batcher_types::{ + DecisionReachedInput, + DecisionReachedResponse, + GetHeightResponse, + GetProposalContent, + GetProposalContentInput, + GetProposalContentResponse, + ProposalCommitment, + ProposalId, + ProposalStatus, + ProposeBlockInput, + RevertBlockInput, + SendProposalContent, + SendProposalContentInput, + SendProposalContentResponse, + StartHeightInput, + ValidateBlockInput, +}; +use apollo_batcher_types::errors::BatcherError; +use apollo_class_manager_types::transaction_converter::TransactionConverter; +use apollo_class_manager_types::{EmptyClassManagerClient, SharedClassManagerClient}; +use apollo_infra::component_client::ClientError; +use apollo_infra::component_definitions::ComponentStarter; +use apollo_l1_provider_types::errors::{L1ProviderClientError, L1ProviderError}; +use apollo_l1_provider_types::{MockL1ProviderClient, SessionState}; +use apollo_mempool_types::communication::{MempoolClientError, MockMempoolClient}; +use apollo_mempool_types::mempool_types::CommitBlockArgs; +use apollo_state_sync_types::state_sync_types::SyncBlock; +use assert_matches::assert_matches; +use blockifier::abi::constants; +use indexmap::{indexmap, IndexSet}; +use metrics_exporter_prometheus::PrometheusBuilder; +use mockall::predicate::eq; +use rstest::rstest; +use starknet_api::block::{BlockHeaderWithoutHash, BlockInfo, BlockNumber}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::{ContractAddress, Nonce}; +use starknet_api::state::ThinStateDiff; +use starknet_api::test_utils::CHAIN_ID_FOR_TESTS; +use starknet_api::transaction::TransactionHash; +use starknet_api::{contract_address, nonce, tx_hash}; +use validator::Validate; + +use crate::batcher::{Batcher, MockBatcherStorageReaderTrait, MockBatcherStorageWriterTrait}; +use crate::block_builder::{ + AbortSignalSender, + BlockBuilderConfig, + BlockBuilderError, + BlockBuilderResult, + BlockExecutionArtifacts, + FailOnErrorCause, + MockBlockBuilderFactoryTrait, +}; +use crate::config::BatcherConfig; +use crate::metrics::{ + BATCHED_TRANSACTIONS, + LAST_SYNCED_BLOCK, + PROPOSAL_ABORTED, + PROPOSAL_FAILED, + PROPOSAL_STARTED, + PROPOSAL_SUCCEEDED, + REJECTED_TRANSACTIONS, + REVERTED_BLOCKS, + REVERTED_TRANSACTIONS, + STORAGE_HEIGHT, + SYNCED_TRANSACTIONS, +}; +use crate::pre_confirmed_block_writer::{ + MockPreconfirmedBlockWriterFactoryTrait, + MockPreconfirmedBlockWriterTrait, +}; +use crate::test_utils::{ + test_txs, + verify_indexed_execution_infos, + FakeProposeBlockBuilder, + FakeValidateBlockBuilder, + DUMMY_FINAL_N_EXECUTED_TXS, +}; + +const INITIAL_HEIGHT: BlockNumber = BlockNumber(3); +const LATEST_BLOCK_IN_STORAGE: BlockNumber = BlockNumber(INITIAL_HEIGHT.0 - 1); +const STREAMING_CHUNK_SIZE: usize = 3; +const BLOCK_GENERATION_TIMEOUT: tokio::time::Duration = tokio::time::Duration::from_secs(1); +const PROPOSAL_ID: ProposalId = ProposalId(0); +const BUILD_BLOCK_FAIL_ON_ERROR: BlockBuilderError = + BlockBuilderError::FailOnError(FailOnErrorCause::BlockFull); + +fn proposal_commitment() -> ProposalCommitment { + BlockExecutionArtifacts::create_for_testing().commitment() +} + +fn propose_block_input(proposal_id: ProposalId) -> ProposeBlockInput { + ProposeBlockInput { + proposal_id, + proposal_round: 0, + retrospective_block_hash: None, + deadline: chrono::Utc::now() + BLOCK_GENERATION_TIMEOUT, + block_info: BlockInfo { block_number: INITIAL_HEIGHT, ..BlockInfo::create_for_testing() }, + } +} + +fn validate_block_input(proposal_id: ProposalId) -> ValidateBlockInput { + ValidateBlockInput { + proposal_id, + retrospective_block_hash: None, + deadline: chrono::Utc::now() + BLOCK_GENERATION_TIMEOUT, + block_info: BlockInfo { block_number: INITIAL_HEIGHT, ..BlockInfo::create_for_testing() }, + } +} + +struct MockDependencies { + storage_reader: MockBatcherStorageReaderTrait, + storage_writer: MockBatcherStorageWriterTrait, + mempool_client: MockMempoolClient, + l1_provider_client: MockL1ProviderClient, + block_builder_factory: MockBlockBuilderFactoryTrait, + pre_confirmed_block_writer_factory: MockPreconfirmedBlockWriterFactoryTrait, + class_manager_client: SharedClassManagerClient, +} + +impl Default for MockDependencies { + fn default() -> Self { + let mut storage_reader = MockBatcherStorageReaderTrait::new(); + storage_reader.expect_height().returning(|| Ok(INITIAL_HEIGHT)); + let mut mempool_client = MockMempoolClient::new(); + let expected_gas_price = propose_block_input(PROPOSAL_ID) + .block_info + .gas_prices + .strk_gas_prices + .l2_gas_price + .get(); + mempool_client.expect_update_gas_price().with(eq(expected_gas_price)).returning(|_| Ok(())); + mempool_client + .expect_commit_block() + .with(eq(CommitBlockArgs::default())) + .returning(|_| Ok(())); + let block_builder_factory = MockBlockBuilderFactoryTrait::new(); + let mut pre_confirmed_block_writer_factory = MockPreconfirmedBlockWriterFactoryTrait::new(); + pre_confirmed_block_writer_factory.expect_create().returning(|_, _, _| { + let (non_working_candidate_tx_sender, _) = tokio::sync::mpsc::channel(1); + let (non_working_pre_confirmed_tx_sender, _) = tokio::sync::mpsc::channel(1); + let mut mock_writer = Box::new(MockPreconfirmedBlockWriterTrait::new()); + mock_writer.expect_run().return_once(|| Box::pin(async move { Ok(()) })); + (mock_writer, non_working_candidate_tx_sender, non_working_pre_confirmed_tx_sender) + }); + + Self { + storage_reader, + storage_writer: MockBatcherStorageWriterTrait::new(), + l1_provider_client: MockL1ProviderClient::new(), + mempool_client, + block_builder_factory, + pre_confirmed_block_writer_factory, + // TODO(noamsp): use MockClassManagerClient + class_manager_client: Arc::new(EmptyClassManagerClient), + } + } +} + +async fn create_batcher(mock_dependencies: MockDependencies) -> Batcher { + let mut batcher = Batcher::new( + BatcherConfig { outstream_content_buffer_size: STREAMING_CHUNK_SIZE, ..Default::default() }, + Arc::new(mock_dependencies.storage_reader), + Box::new(mock_dependencies.storage_writer), + Arc::new(mock_dependencies.l1_provider_client), + Arc::new(mock_dependencies.mempool_client), + TransactionConverter::new( + mock_dependencies.class_manager_client, + CHAIN_ID_FOR_TESTS.clone(), + ), + Box::new(mock_dependencies.block_builder_factory), + Box::new(mock_dependencies.pre_confirmed_block_writer_factory), + ); + // Call post-creation functionality (e.g., metrics registration). + batcher.start().await; + batcher +} + +fn abort_signal_sender() -> AbortSignalSender { + tokio::sync::oneshot::channel().0 +} + +async fn batcher_propose_and_commit_block( + mock_dependencies: MockDependencies, +) -> Result { + let mut batcher = create_batcher(mock_dependencies).await; + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + batcher.propose_block(propose_block_input(PROPOSAL_ID)).await.unwrap(); + batcher.await_active_proposal(DUMMY_FINAL_N_EXECUTED_TXS).await.unwrap(); + batcher.decision_reached(DecisionReachedInput { proposal_id: PROPOSAL_ID }).await +} + +fn mock_create_builder_for_validate_block( + block_builder_factory: &mut MockBlockBuilderFactoryTrait, + build_block_result: BlockBuilderResult, +) { + block_builder_factory.expect_create_block_builder().times(1).return_once( + |_, _, tx_provider, _, _, _, _| { + let block_builder = FakeValidateBlockBuilder { + tx_provider, + build_block_result: Some(build_block_result), + }; + Ok((Box::new(block_builder), abort_signal_sender())) + }, + ); +} + +fn mock_create_builder_for_propose_block( + block_builder_factory: &mut MockBlockBuilderFactoryTrait, + output_txs: Vec, + build_block_result: BlockBuilderResult, +) { + block_builder_factory.expect_create_block_builder().times(1).return_once( + move |_, _, _, output_content_sender, _, _, _| { + let block_builder = FakeProposeBlockBuilder { + output_content_sender: output_content_sender.unwrap(), + output_txs, + build_block_result: Some(build_block_result), + }; + Ok((Box::new(block_builder), abort_signal_sender())) + }, + ); +} + +async fn create_batcher_with_active_validate_block( + build_block_result: BlockBuilderResult, +) -> Batcher { + let mut block_builder_factory = MockBlockBuilderFactoryTrait::new(); + mock_create_builder_for_validate_block(&mut block_builder_factory, build_block_result); + start_batcher_with_active_validate(block_builder_factory).await +} + +async fn start_batcher_with_active_validate( + block_builder_factory: MockBlockBuilderFactoryTrait, +) -> Batcher { + let mut l1_provider_client = MockL1ProviderClient::new(); + l1_provider_client.expect_start_block().returning(|_, _| Ok(())); + + let mut batcher = create_batcher(MockDependencies { + block_builder_factory, + l1_provider_client, + ..Default::default() + }) + .await; + + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + + batcher.validate_block(validate_block_input(PROPOSAL_ID)).await.unwrap(); + + batcher +} + +fn test_tx_hashes() -> IndexSet { + (0..5u8).map(|i| tx_hash!(i + 12)).collect() +} + +fn test_contract_nonces() -> HashMap { + HashMap::from_iter((0..3u8).map(|i| (contract_address!(i + 33), nonce!(i + 9)))) +} + +pub fn test_state_diff() -> ThinStateDiff { + ThinStateDiff { + storage_diffs: indexmap! { + 4u64.into() => indexmap! { + 5u64.into() => 6u64.into(), + 7u64.into() => 8u64.into(), + }, + 9u64.into() => indexmap! { + 10u64.into() => 11u64.into(), + }, + }, + nonces: test_contract_nonces().into_iter().collect(), + ..Default::default() + } +} + +fn verify_decision_reached_response( + response: &DecisionReachedResponse, + expected_artifacts: &BlockExecutionArtifacts, +) { + assert_eq!( + response.state_diff.nonces, + expected_artifacts.commitment_state_diff.address_to_nonce + ); + assert_eq!( + response.state_diff.storage_diffs, + expected_artifacts.commitment_state_diff.storage_updates + ); + assert_eq!( + response.state_diff.declared_classes, + expected_artifacts.commitment_state_diff.class_hash_to_compiled_class_hash + ); + assert_eq!( + response.state_diff.deployed_contracts, + expected_artifacts.commitment_state_diff.address_to_class_hash + ); + assert_eq!(response.l2_gas_used, expected_artifacts.l2_gas_used); + assert_eq!(response.central_objects.bouncer_weights, expected_artifacts.bouncer_weights); + assert_eq!( + response.central_objects.execution_infos, + expected_artifacts.execution_data.execution_infos + ); +} + +fn assert_proposal_metrics( + metrics: &str, + expected_started: u64, + expected_succeeded: u64, + expected_failed: u64, + expected_aborted: u64, +) { + let n_expected_active_proposals = + expected_started - (expected_succeeded + expected_failed + expected_aborted); + assert!(n_expected_active_proposals <= 1); + let started = PROPOSAL_STARTED.parse_numeric_metric::(metrics); + let succeeded = PROPOSAL_SUCCEEDED.parse_numeric_metric::(metrics); + let failed = PROPOSAL_FAILED.parse_numeric_metric::(metrics); + let aborted = PROPOSAL_ABORTED.parse_numeric_metric::(metrics); + + assert_eq!( + started, + Some(expected_started), + "unexpected value proposal_started, expected {} got {:?}", + expected_started, + started, + ); + assert_eq!( + succeeded, + Some(expected_succeeded), + "unexpected value proposal_succeeded, expected {} got {:?}", + expected_succeeded, + succeeded, + ); + assert_eq!( + failed, + Some(expected_failed), + "unexpected value proposal_failed, expected {} got {:?}", + expected_failed, + failed, + ); + assert_eq!( + aborted, + Some(expected_aborted), + "unexpected value proposal_aborted, expected {} got {:?}", + expected_aborted, + aborted, + ); +} + +#[tokio::test] +async fn metrics_registered() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let _batcher = create_batcher(MockDependencies::default()).await; + let metrics = recorder.handle().render(); + assert_eq!(STORAGE_HEIGHT.parse_numeric_metric::(&metrics), Some(INITIAL_HEIGHT.0)); +} + +#[rstest] +#[tokio::test] +async fn start_height_success() { + let mut batcher = create_batcher(MockDependencies::default()).await; + assert_eq!(batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await, Ok(())); +} + +#[rstest] +#[case::height_already_passed( + INITIAL_HEIGHT.prev().unwrap(), + BatcherError::StorageHeightMarkerMismatch { + marker_height: INITIAL_HEIGHT, + requested_height: INITIAL_HEIGHT.prev().unwrap() + } +)] +#[case::storage_not_synced( + INITIAL_HEIGHT.unchecked_next(), + BatcherError::StorageHeightMarkerMismatch { + marker_height: INITIAL_HEIGHT, + requested_height: INITIAL_HEIGHT.unchecked_next() + } +)] +#[tokio::test] +async fn start_height_fail(#[case] height: BlockNumber, #[case] expected_error: BatcherError) { + let mut batcher = create_batcher(MockDependencies::default()).await; + assert_eq!(batcher.start_height(StartHeightInput { height }).await, Err(expected_error)); +} + +#[rstest] +#[tokio::test] +async fn duplicate_start_height() { + let mut batcher = create_batcher(MockDependencies::default()).await; + + let initial_height = StartHeightInput { height: INITIAL_HEIGHT }; + assert_eq!(batcher.start_height(initial_height.clone()).await, Ok(())); + assert_eq!(batcher.start_height(initial_height).await, Err(BatcherError::HeightInProgress)); +} + +#[rstest] +#[tokio::test] +async fn no_active_height() { + let mut batcher = create_batcher(MockDependencies::default()).await; + + // Calling `propose_block` and `validate_block` without starting a height should fail. + + let result = batcher.propose_block(propose_block_input(PROPOSAL_ID)).await; + assert_eq!(result, Err(BatcherError::NoActiveHeight)); + + let result = batcher.validate_block(validate_block_input(PROPOSAL_ID)).await; + assert_eq!(result, Err(BatcherError::NoActiveHeight)); +} + +#[rstest] +#[case::proposer(true)] +#[case::validator(false)] +#[tokio::test] +async fn l1_handler_provider_not_ready(#[case] proposer: bool) { + let mut deps = MockDependencies::default(); + deps.l1_provider_client.expect_start_block().returning(|_, _| { + // The heights are not important for the test. + let err = L1ProviderError::UnexpectedHeight { + expected_height: INITIAL_HEIGHT, + got: INITIAL_HEIGHT, + }; + Err(err.into()) + }); + let mut batcher = create_batcher(deps).await; + assert_eq!(batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await, Ok(())); + + if proposer { + assert_eq!( + batcher.propose_block(propose_block_input(PROPOSAL_ID)).await, + Err(BatcherError::NotReady) + ); + } else { + assert_eq!( + batcher.validate_block(validate_block_input(PROPOSAL_ID)).await, + Err(BatcherError::NotReady) + ); + } +} + +#[rstest] +#[tokio::test] +async fn consecutive_heights_success() { + let mut storage_reader = MockBatcherStorageReaderTrait::new(); + storage_reader.expect_height().times(1).returning(|| Ok(INITIAL_HEIGHT)); // metrics registration + storage_reader.expect_height().times(1).returning(|| Ok(INITIAL_HEIGHT)); // first start_height + storage_reader.expect_height().times(1).returning(|| Ok(INITIAL_HEIGHT.unchecked_next())); // second start_height + + let mut block_builder_factory = MockBlockBuilderFactoryTrait::new(); + for _ in 0..2 { + mock_create_builder_for_propose_block( + &mut block_builder_factory, + vec![], + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + } + + let mut l1_provider_client = MockL1ProviderClient::new(); + l1_provider_client.expect_start_block().times(2).returning(|_, _| Ok(())); + let mut batcher = create_batcher(MockDependencies { + block_builder_factory, + storage_reader, + l1_provider_client, + ..Default::default() + }) + .await; + + // Prepare the propose_block requests for the first and the second heights. + let first_propose_block_input = propose_block_input(PROPOSAL_ID); + let mut second_propose_block_input = first_propose_block_input.clone(); + second_propose_block_input.block_info.block_number = INITIAL_HEIGHT.unchecked_next(); + + // Start the first height and propose block. + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + batcher.propose_block(first_propose_block_input).await.unwrap(); + + // Start the second height, and make sure the previous height proposal is cleared, by trying to + // create a proposal with the same ID. + batcher + .start_height(StartHeightInput { height: INITIAL_HEIGHT.unchecked_next() }) + .await + .unwrap(); + batcher.propose_block(second_propose_block_input).await.unwrap(); +} + +#[rstest] +#[tokio::test] +async fn validate_block_full_flow() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut batcher = create_batcher_with_active_validate_block(Ok( + BlockExecutionArtifacts::create_for_testing(), + )) + .await; + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 1, 0, 0, 0); + + let send_proposal_input_txs = SendProposalContentInput { + proposal_id: PROPOSAL_ID, + content: SendProposalContent::Txs(test_txs(0..1)), + }; + assert_eq!( + batcher.send_proposal_content(send_proposal_input_txs).await.unwrap(), + SendProposalContentResponse { response: ProposalStatus::Processing } + ); + + let finish_proposal = SendProposalContentInput { + proposal_id: PROPOSAL_ID, + content: SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + }; + assert_eq!( + batcher.send_proposal_content(finish_proposal).await.unwrap(), + SendProposalContentResponse { response: ProposalStatus::Finished(proposal_commitment()) } + ); + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 1, 1, 0, 0); +} + +#[rstest] +#[case::send_txs(SendProposalContent::Txs(test_txs(0..1)))] +#[case::send_finish(SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS))] +#[case::send_abort(SendProposalContent::Abort)] +#[tokio::test] +async fn send_content_to_unknown_proposal(#[case] content: SendProposalContent) { + let mut batcher = create_batcher(MockDependencies::default()).await; + + let send_proposal_content_input = + SendProposalContentInput { proposal_id: PROPOSAL_ID, content }; + let result = batcher.send_proposal_content(send_proposal_content_input).await; + assert_eq!(result, Err(BatcherError::ProposalNotFound { proposal_id: PROPOSAL_ID })); +} + +#[rstest] +#[case::send_txs(SendProposalContent::Txs(test_txs(0..1)), ProposalStatus::InvalidProposal("Block is full".to_string()))] +#[case::send_finish( + SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + ProposalStatus::InvalidProposal("Block is full".to_string()) +)] +#[case::send_abort(SendProposalContent::Abort, ProposalStatus::Aborted)] +#[tokio::test] +async fn send_content_to_an_invalid_proposal( + #[case] content: SendProposalContent, + #[case] response: ProposalStatus, +) { + let mut batcher = + create_batcher_with_active_validate_block(Err(BUILD_BLOCK_FAIL_ON_ERROR)).await; + batcher.await_active_proposal(DUMMY_FINAL_N_EXECUTED_TXS).await.unwrap(); + + let send_proposal_content_input = + SendProposalContentInput { proposal_id: PROPOSAL_ID, content }; + let result = batcher.send_proposal_content(send_proposal_content_input).await.unwrap(); + assert_eq!(result, SendProposalContentResponse { response }); +} + +#[rstest] +#[case::send_txs_after_finish(SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), SendProposalContent::Txs(test_txs(0..1)))] +#[case::send_finish_after_finish( + SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS) +)] +#[case::send_abort_after_finish( + SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + SendProposalContent::Abort +)] +#[case::send_txs_after_abort(SendProposalContent::Abort, SendProposalContent::Txs(test_txs(0..1)))] +#[case::send_finish_after_abort( + SendProposalContent::Abort, + SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS) +)] +#[case::send_abort_after_abort(SendProposalContent::Abort, SendProposalContent::Abort)] +#[tokio::test] +async fn send_proposal_content_after_finish_or_abort( + #[case] end_proposal_content: SendProposalContent, + #[case] content: SendProposalContent, +) { + let mut batcher = create_batcher_with_active_validate_block(Ok( + BlockExecutionArtifacts::create_for_testing(), + )) + .await; + + // End the proposal. + let end_proposal = + SendProposalContentInput { proposal_id: PROPOSAL_ID, content: end_proposal_content }; + batcher.send_proposal_content(end_proposal).await.unwrap(); + + // Send another request. + let send_proposal_content_input = + SendProposalContentInput { proposal_id: PROPOSAL_ID, content }; + let result = batcher.send_proposal_content(send_proposal_content_input).await; + assert_eq!(result, Err(BatcherError::ProposalNotFound { proposal_id: PROPOSAL_ID })); +} + +#[rstest] +#[tokio::test] +async fn send_proposal_content_abort() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut batcher = + create_batcher_with_active_validate_block(Err(BlockBuilderError::Aborted)).await; + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 1, 0, 0, 0); + + let send_abort_proposal = + SendProposalContentInput { proposal_id: PROPOSAL_ID, content: SendProposalContent::Abort }; + assert_eq!( + batcher.send_proposal_content(send_abort_proposal).await.unwrap(), + SendProposalContentResponse { response: ProposalStatus::Aborted } + ); + + // The block builder is running in a separate task, and the proposal metrics are emitted from + // that task, so we need to wait for them (we don't have a way to wait for the completion of the + // abort). + // TODO(AlonH): Find a way to wait for the metrics to be emitted. + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 1, 0, 0, 1); +} + +#[rstest] +#[tokio::test] +async fn propose_block_full_flow() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + // Expecting 3 chunks of streamed txs. + let expected_streamed_txs = test_txs(0..STREAMING_CHUNK_SIZE * 2 + 1); + + let mut block_builder_factory = MockBlockBuilderFactoryTrait::new(); + mock_create_builder_for_propose_block( + &mut block_builder_factory, + expected_streamed_txs.clone(), + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + + let mut l1_provider_client = MockL1ProviderClient::new(); + l1_provider_client.expect_start_block().times(1).returning(|_, _| Ok(())); + + let mut batcher = create_batcher(MockDependencies { + block_builder_factory, + l1_provider_client, + ..Default::default() + }) + .await; + + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + batcher.propose_block(propose_block_input(PROPOSAL_ID)).await.unwrap(); + + let expected_n_chunks = expected_streamed_txs.len().div_ceil(STREAMING_CHUNK_SIZE); + let mut aggregated_streamed_txs = Vec::new(); + for _ in 0..expected_n_chunks { + let content = batcher + .get_proposal_content(GetProposalContentInput { proposal_id: PROPOSAL_ID }) + .await + .unwrap() + .content; + let mut txs = assert_matches!(content, GetProposalContent::Txs(txs) => txs); + assert!(txs.len() <= STREAMING_CHUNK_SIZE, "{} < {}", txs.len(), STREAMING_CHUNK_SIZE); + aggregated_streamed_txs.append(&mut txs); + } + assert_eq!(aggregated_streamed_txs, expected_streamed_txs); + + let commitment = batcher + .get_proposal_content(GetProposalContentInput { proposal_id: PROPOSAL_ID }) + .await + .unwrap(); + assert_eq!( + commitment, + GetProposalContentResponse { + content: GetProposalContent::Finished { + id: proposal_commitment(), + final_n_executed_txs: BlockExecutionArtifacts::create_for_testing() + .final_n_executed_txs + } + } + ); + + let exhausted = + batcher.get_proposal_content(GetProposalContentInput { proposal_id: PROPOSAL_ID }).await; + assert_matches!(exhausted, Err(BatcherError::ProposalNotFound { .. })); + + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 1, 1, 0, 0); +} + +#[rstest] +#[tokio::test] +async fn get_height() { + let mut storage_reader = MockBatcherStorageReaderTrait::new(); + storage_reader.expect_height().returning(|| Ok(INITIAL_HEIGHT)); + + let batcher = create_batcher(MockDependencies { storage_reader, ..Default::default() }).await; + + let result = batcher.get_height().await.unwrap(); + assert_eq!(result, GetHeightResponse { height: INITIAL_HEIGHT }); +} + +#[rstest] +#[tokio::test] +async fn propose_block_without_retrospective_block_hash() { + let mut storage_reader = MockBatcherStorageReaderTrait::new(); + storage_reader + .expect_height() + .returning(|| Ok(BlockNumber(constants::STORED_BLOCK_HASH_BUFFER))); + + let mut batcher = + create_batcher(MockDependencies { storage_reader, ..Default::default() }).await; + + batcher + .start_height(StartHeightInput { height: BlockNumber(constants::STORED_BLOCK_HASH_BUFFER) }) + .await + .unwrap(); + let result = batcher.propose_block(propose_block_input(PROPOSAL_ID)).await; + + assert_matches!(result, Err(BatcherError::MissingRetrospectiveBlockHash)); +} + +#[rstest] +#[tokio::test] +async fn get_content_from_unknown_proposal() { + let mut batcher = create_batcher(MockDependencies::default()).await; + + let get_proposal_content_input = GetProposalContentInput { proposal_id: PROPOSAL_ID }; + let result = batcher.get_proposal_content(get_proposal_content_input).await; + assert_eq!(result, Err(BatcherError::ProposalNotFound { proposal_id: PROPOSAL_ID })); +} + +#[rstest] +#[tokio::test] +async fn consecutive_proposal_generation_success() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut block_builder_factory = MockBlockBuilderFactoryTrait::new(); + for _ in 0..2 { + mock_create_builder_for_propose_block( + &mut block_builder_factory, + vec![], + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + mock_create_builder_for_validate_block( + &mut block_builder_factory, + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + } + let mut l1_provider_client = MockL1ProviderClient::new(); + l1_provider_client.expect_start_block().times(4).returning(|_, _| Ok(())); + let mut batcher = create_batcher(MockDependencies { + block_builder_factory, + l1_provider_client, + ..Default::default() + }) + .await; + + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + + // Make sure we can generate 4 consecutive proposals. + for i in 0..2 { + batcher.propose_block(propose_block_input(ProposalId(2 * i))).await.unwrap(); + batcher.await_active_proposal(DUMMY_FINAL_N_EXECUTED_TXS).await.unwrap(); + + batcher.validate_block(validate_block_input(ProposalId(2 * i + 1))).await.unwrap(); + let finish_proposal = SendProposalContentInput { + proposal_id: ProposalId(2 * i + 1), + content: SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + }; + batcher.send_proposal_content(finish_proposal).await.unwrap(); + batcher.await_active_proposal(DUMMY_FINAL_N_EXECUTED_TXS).await.unwrap(); + } + + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 4, 4, 0, 0); +} + +#[rstest] +#[tokio::test] +async fn concurrent_proposals_generation_fail() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut block_builder_factory = MockBlockBuilderFactoryTrait::new(); + // Expecting the block builder factory to be called twice. + for _ in 0..2 { + mock_create_builder_for_validate_block( + &mut block_builder_factory, + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + } + let mut batcher = start_batcher_with_active_validate(block_builder_factory).await; + + // Make sure another proposal can't be generated while the first one is still active. + let result = batcher.propose_block(propose_block_input(ProposalId(1))).await; + + assert_matches!(result, Err(BatcherError::AnotherProposalInProgress { .. })); + + // Finish the first proposal. + batcher + .send_proposal_content(SendProposalContentInput { + proposal_id: ProposalId(0), + content: SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + }) + .await + .unwrap(); + batcher.await_active_proposal(DUMMY_FINAL_N_EXECUTED_TXS).await.unwrap(); + + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 2, 1, 1, 0); +} + +#[rstest] +#[tokio::test] +async fn proposal_startup_failure_allows_new_proposals() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut block_builder_factory = MockBlockBuilderFactoryTrait::new(); + mock_create_builder_for_validate_block( + &mut block_builder_factory, + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + let mut l1_provider_client = MockL1ProviderClient::new(); + let error = L1ProviderClientError::L1ProviderError(L1ProviderError::UnexpectedHeight { + expected_height: BlockNumber(1), + got: BlockNumber(0), + }); + l1_provider_client.expect_start_block().once().return_once(|_, _| Err(error)); + l1_provider_client.expect_start_block().once().return_once(|_, _| Ok(())); + let mut batcher = create_batcher(MockDependencies { + block_builder_factory, + l1_provider_client, + ..Default::default() + }) + .await; + + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + + batcher + .propose_block(propose_block_input(ProposalId(0))) + .await + .expect_err("Expected to fail because of the first L1ProviderClient error"); + + batcher.validate_block(validate_block_input(ProposalId(1))).await.expect("Expected to succeed"); + batcher + .send_proposal_content(SendProposalContentInput { + proposal_id: ProposalId(1), + content: SendProposalContent::Finish(DUMMY_FINAL_N_EXECUTED_TXS), + }) + .await + .unwrap(); + batcher.await_active_proposal(DUMMY_FINAL_N_EXECUTED_TXS).await.unwrap(); + + let metrics = recorder.handle().render(); + assert_proposal_metrics(&metrics, 2, 1, 1, 0); +} + +#[rstest] +#[tokio::test] +async fn add_sync_block() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let l1_transaction_hashes = test_tx_hashes(); + let mut mock_dependencies = MockDependencies::default(); + + mock_dependencies + .storage_writer + .expect_commit_proposal() + .times(1) + .with(eq(INITIAL_HEIGHT), eq(test_state_diff())) + .returning(|_, _| Ok(())); + + mock_dependencies + .mempool_client + .expect_commit_block() + .times(1) + .with(eq(CommitBlockArgs { + address_to_nonce: test_contract_nonces(), + rejected_tx_hashes: [].into(), + })) + .returning(|_| Ok(())); + + mock_dependencies + .l1_provider_client + .expect_commit_block() + .times(1) + .with(eq(l1_transaction_hashes.clone()), eq(IndexSet::new()), eq(INITIAL_HEIGHT)) + .returning(|_, _, _| Ok(())); + + let mut batcher = create_batcher(mock_dependencies).await; + + let n_synced_transactions = l1_transaction_hashes.len(); + + let sync_block = SyncBlock { + block_header_without_hash: BlockHeaderWithoutHash { + block_number: INITIAL_HEIGHT, + ..Default::default() + }, + state_diff: test_state_diff(), + l1_transaction_hashes: l1_transaction_hashes.into_iter().collect(), + ..Default::default() + }; + batcher.add_sync_block(sync_block).await.unwrap(); + let metrics = recorder.handle().render(); + assert_eq!( + STORAGE_HEIGHT.parse_numeric_metric::(&metrics), + Some(INITIAL_HEIGHT.unchecked_next().0) + ); + let metrics = recorder.handle().render(); + assert_eq!(LAST_SYNCED_BLOCK.parse_numeric_metric::(&metrics), Some(INITIAL_HEIGHT.0)); + assert_eq!( + SYNCED_TRANSACTIONS.parse_numeric_metric::(&metrics), + Some(n_synced_transactions) + ); +} + +#[rstest] +#[tokio::test] +async fn add_sync_block_mismatch_block_number() { + let mut batcher = create_batcher(MockDependencies::default()).await; + + let sync_block = SyncBlock { + block_header_without_hash: BlockHeaderWithoutHash { + block_number: INITIAL_HEIGHT.unchecked_next(), + ..Default::default() + }, + ..Default::default() + }; + let result = batcher.add_sync_block(sync_block).await; + assert_eq!( + result, + Err(BatcherError::StorageHeightMarkerMismatch { + marker_height: BlockNumber(3), + requested_height: BlockNumber(4) + }) + ) +} + +#[tokio::test] +async fn revert_block() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut mock_dependencies = MockDependencies::default(); + + mock_dependencies + .storage_writer + .expect_revert_block() + .times(1) + .with(eq(LATEST_BLOCK_IN_STORAGE)) + .returning(|_| ()); + + let mut batcher = create_batcher(mock_dependencies).await; + + let metrics = recorder.handle().render(); + assert_eq!(STORAGE_HEIGHT.parse_numeric_metric::(&metrics), Some(INITIAL_HEIGHT.0)); + + let revert_input = RevertBlockInput { height: LATEST_BLOCK_IN_STORAGE }; + batcher.revert_block(revert_input).await.unwrap(); + + let metrics = recorder.handle().render(); + assert_eq!(STORAGE_HEIGHT.parse_numeric_metric::(&metrics), Some(INITIAL_HEIGHT.0 - 1)); + assert_eq!(REVERTED_BLOCKS.parse_numeric_metric::(&metrics), Some(1)); +} + +#[tokio::test] +async fn revert_block_mismatch_block_number() { + let mut batcher = create_batcher(MockDependencies::default()).await; + + let revert_input = RevertBlockInput { height: INITIAL_HEIGHT }; + let result = batcher.revert_block(revert_input).await; + assert_eq!( + result, + Err(BatcherError::StorageHeightMarkerMismatch { + marker_height: BlockNumber(3), + requested_height: BlockNumber(3) + }) + ) +} + +#[tokio::test] +async fn revert_block_empty_storage() { + let mut storage_reader = MockBatcherStorageReaderTrait::new(); + storage_reader.expect_height().returning(|| Ok(BlockNumber(0))); + + let mock_dependencies = MockDependencies { storage_reader, ..Default::default() }; + let mut batcher = create_batcher(mock_dependencies).await; + + let revert_input = RevertBlockInput { height: BlockNumber(0) }; + let result = batcher.revert_block(revert_input).await; + assert_eq!( + result, + Err(BatcherError::StorageHeightMarkerMismatch { + marker_height: BlockNumber(0), + requested_height: BlockNumber(0) + }) + ); +} + +#[rstest] +#[tokio::test] +async fn decision_reached() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + let mut mock_dependencies = MockDependencies::default(); + let expected_artifacts = BlockExecutionArtifacts::create_for_testing(); + + mock_dependencies + .mempool_client + .expect_commit_block() + .times(1) + .with(eq(CommitBlockArgs { + address_to_nonce: expected_artifacts.address_to_nonce(), + rejected_tx_hashes: expected_artifacts.execution_data.rejected_tx_hashes.clone(), + })) + .returning(|_| Ok(())); + + mock_dependencies + .l1_provider_client + .expect_start_block() + .times(1) + .with(eq(SessionState::Propose), eq(INITIAL_HEIGHT)) + .returning(|_, _| Ok(())); + + mock_dependencies + .l1_provider_client + .expect_commit_block() + .times(1) + .with(eq(IndexSet::new()), eq(IndexSet::new()), eq(INITIAL_HEIGHT)) + .returning(|_, _, _| Ok(())); + + mock_dependencies + .storage_writer + .expect_commit_proposal() + .times(1) + .with(eq(INITIAL_HEIGHT), eq(expected_artifacts.thin_state_diff())) + .returning(|_, _| Ok(())); + + mock_create_builder_for_propose_block( + &mut mock_dependencies.block_builder_factory, + vec![], + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + + let decision_reached_response = + batcher_propose_and_commit_block(mock_dependencies).await.unwrap(); + + verify_decision_reached_response(&decision_reached_response, &expected_artifacts); + + let metrics = recorder.handle().render(); + assert_eq!( + STORAGE_HEIGHT.parse_numeric_metric::(&metrics), + Some(INITIAL_HEIGHT.unchecked_next().0) + ); + assert_eq!( + BATCHED_TRANSACTIONS.parse_numeric_metric::(&metrics), + Some(expected_artifacts.execution_data.execution_infos.len()) + ); + assert_eq!( + REJECTED_TRANSACTIONS.parse_numeric_metric::(&metrics), + Some(expected_artifacts.execution_data.rejected_tx_hashes.len()) + ); + assert_eq!( + REVERTED_TRANSACTIONS.parse_numeric_metric::(&metrics), + Some( + expected_artifacts + .execution_data + .execution_infos + .values() + .filter(|info| info.revert_error.is_some()) + .count(), + ) + ); +} + +#[rstest] +#[tokio::test] +async fn decision_reached_no_executed_proposal() { + let expected_error = BatcherError::ExecutedProposalNotFound { proposal_id: PROPOSAL_ID }; + + let mut batcher = create_batcher(MockDependencies::default()).await; + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + + let decision_reached_result = + batcher.decision_reached(DecisionReachedInput { proposal_id: PROPOSAL_ID }).await; + assert_eq!(decision_reached_result, Err(expected_error)); +} + +// Test that the batcher returns the execution_infos in the same order as returned from the +// block_builder. It is crucial that the execution_infos will be ordered in the same order as +// the transactions in the block for the correct execution of starknet. +// This test together with [block_builder_test::test_execution_info_order] covers this requirement. +#[tokio::test] +async fn test_execution_info_order_is_kept() { + let mut mock_dependencies = MockDependencies::default(); + mock_dependencies.l1_provider_client.expect_start_block().returning(|_, _| Ok(())); + mock_dependencies.mempool_client.expect_commit_block().returning(|_| Ok(())); + mock_dependencies.l1_provider_client.expect_commit_block().returning(|_, _, _| Ok(())); + mock_dependencies.storage_writer.expect_commit_proposal().returning(|_, _| Ok(())); + + let block_builder_result = BlockExecutionArtifacts::create_for_testing(); + // Check that the execution_infos were initiated properly for this test. + verify_indexed_execution_infos(&block_builder_result.execution_data.execution_infos); + + mock_create_builder_for_propose_block( + &mut mock_dependencies.block_builder_factory, + vec![], + Ok(block_builder_result.clone()), + ); + + let decision_reached_response = + batcher_propose_and_commit_block(mock_dependencies).await.unwrap(); + + // Verify that the execution_infos are in the same order as returned from the block_builder. + let expected_execution_infos = block_builder_result.execution_data.execution_infos; + assert_eq!(decision_reached_response.central_objects.execution_infos, expected_execution_infos); +} + +#[tokio::test] +async fn mempool_not_ready() { + let mut mock_dependencies = MockDependencies::default(); + mock_dependencies.mempool_client.checkpoint(); + mock_dependencies.mempool_client.expect_update_gas_price().returning(|_| { + Err(MempoolClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + mock_dependencies + .mempool_client + .expect_commit_block() + .with(eq(CommitBlockArgs::default())) + .returning(|_| Ok(())); + mock_dependencies.l1_provider_client.expect_start_block().returning(|_, _| Ok(())); + + let mut batcher = create_batcher(mock_dependencies).await; + batcher.start_height(StartHeightInput { height: INITIAL_HEIGHT }).await.unwrap(); + let result = batcher.propose_block(propose_block_input(PROPOSAL_ID)).await; + assert_eq!(result, Err(BatcherError::InternalError)); +} + +#[test] +fn validate_batcher_config_failure() { + let config = BatcherConfig { + input_stream_content_buffer_size: 99, + block_builder_config: BlockBuilderConfig { n_concurrent_txs: 100, ..Default::default() }, + ..Default::default() + }; + + let error = config.validate().unwrap_err(); + assert!( + error + .to_string() + .contains("input_stream_content_buffer_size must be at least n_concurrent_txs") + ); +} + +#[rstest] +#[case::communication_failure( + L1ProviderClientError::ClientError(ClientError::CommunicationFailure("L1 commit failed".to_string())) +)] +#[case::unexpected_height( + L1ProviderClientError::L1ProviderError(L1ProviderError::UnexpectedHeight { + expected_height: INITIAL_HEIGHT, + got: INITIAL_HEIGHT, + }) +)] +#[tokio::test] +async fn decision_reached_return_error_when_l1_commit_block_fails( + #[case] l1_error: L1ProviderClientError, +) { + let mut mock_dependencies = MockDependencies::default(); + + mock_dependencies.l1_provider_client.expect_start_block().returning(|_, _| Ok(())); + + mock_dependencies + .l1_provider_client + .expect_commit_block() + .times(1) + .returning(move |_, _, _| Err(l1_error.clone())); + + mock_dependencies.storage_writer.expect_commit_proposal().returning(|_, _| Ok(())); + + mock_dependencies.storage_writer.expect_revert_block().returning(|_| ()); + + mock_create_builder_for_propose_block( + &mut mock_dependencies.block_builder_factory, + vec![], + Ok(BlockExecutionArtifacts::create_for_testing()), + ); + + let result = batcher_propose_and_commit_block(mock_dependencies).await; + assert!(result.is_err()); +} diff --git a/crates/apollo_batcher/src/block_builder.rs b/crates/apollo_batcher/src/block_builder.rs new file mode 100644 index 00000000000..e9f61deb25b --- /dev/null +++ b/crates/apollo_batcher/src/block_builder.rs @@ -0,0 +1,769 @@ +use std::cmp::min; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::sync::Arc; + +use apollo_batcher_types::batcher_types::ProposalCommitment; +use apollo_class_manager_types::transaction_converter::{ + TransactionConverter, + TransactionConverterError, + TransactionConverterResult, + TransactionConverterTrait, +}; +use apollo_class_manager_types::SharedClassManagerClient; +use apollo_config::dumping::{prepend_sub_config_name, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_infra_utils::tracing::LogCompatibleToStringExt; +use apollo_state_reader::papyrus_state::{ClassReader, PapyrusReader}; +use apollo_storage::StorageReader; +use async_trait::async_trait; +use blockifier::blockifier::concurrent_transaction_executor::ConcurrentTransactionExecutor; +use blockifier::blockifier::config::WorkerPoolConfig; +use blockifier::blockifier::transaction_executor::{ + BlockExecutionSummary, + TransactionExecutionOutput, + TransactionExecutorError as BlockifierTransactionExecutorError, + TransactionExecutorResult, +}; +use blockifier::blockifier_versioned_constants::{VersionedConstants, VersionedConstantsOverrides}; +use blockifier::bouncer::{BouncerConfig, BouncerWeights, CasmHashComputationData}; +use blockifier::concurrency::worker_pool::WorkerPool; +use blockifier::context::{BlockContext, ChainInfo}; +use blockifier::state::cached_state::{CachedState, CommitmentStateDiff}; +use blockifier::state::contract_class_manager::ContractClassManager; +use blockifier::state::errors::StateError; +use blockifier::state::state_reader_and_contract_manager::StateReaderAndContractManager; +use blockifier::transaction::objects::TransactionExecutionInfo; +use blockifier::transaction::transaction_execution::Transaction as BlockifierTransaction; +use indexmap::{IndexMap, IndexSet}; +#[cfg(test)] +use mockall::automock; +use serde::{Deserialize, Serialize}; +use starknet_api::block::{BlockHashAndNumber, BlockInfo}; +use starknet_api::block_hash::state_diff_hash::calculate_state_diff_hash; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::{ContractAddress, Nonce}; +use starknet_api::execution_resources::GasAmount; +use starknet_api::state::ThinStateDiff; +use starknet_api::transaction::TransactionHash; +use thiserror::Error; +use tokio::sync::{Mutex, MutexGuard}; +use tracing::{debug, error, info, trace, warn}; + +use crate::block_builder::FailOnErrorCause::L1HandlerTransactionValidationFailed; +use crate::cende_client_types::{StarknetClientStateDiff, StarknetClientTransactionReceipt}; +use crate::metrics::FULL_BLOCKS; +use crate::pre_confirmed_block_writer::{CandidateTxSender, PreconfirmedTxSender}; +use crate::transaction_executor::TransactionExecutorTrait; +use crate::transaction_provider::{TransactionProvider, TransactionProviderError}; + +#[derive(Debug, Error)] +pub enum BlockBuilderError { + #[error(transparent)] + BlockifierStateError(#[from] StateError), + #[error(transparent)] + ExecutorError(#[from] BlockifierTransactionExecutorError), + #[error(transparent)] + GetTransactionError(#[from] TransactionProviderError), + #[error(transparent)] + StreamTransactionsError( + #[from] tokio::sync::mpsc::error::SendError, + ), + #[error(transparent)] + FailOnError(FailOnErrorCause), + #[error("The block builder was aborted.")] + Aborted, + #[error(transparent)] + TransactionConverterError(#[from] TransactionConverterError), +} + +pub type BlockBuilderResult = Result; + +#[derive(Debug, Error)] +pub enum FailOnErrorCause { + #[error("Block is full")] + BlockFull, + #[error("Deadline has been reached")] + DeadlineReached, + #[error("Transaction failed: {0}")] + TransactionFailed(BlockifierTransactionExecutorError), + #[error("L1 Handler transaction validation failed")] + L1HandlerTransactionValidationFailed(TransactionProviderError), +} + +enum AddTxsToExecutorResult { + NoNewTxs, + NewTxs, +} + +#[cfg_attr(test, derive(Clone))] +#[derive(Debug, PartialEq)] +pub struct BlockExecutionArtifacts { + // Note: The execution_infos must be ordered to match the order of the transactions in the + // block. + pub execution_data: BlockTransactionExecutionData, + pub commitment_state_diff: CommitmentStateDiff, + pub compressed_state_diff: Option, + pub bouncer_weights: BouncerWeights, + pub l2_gas_used: GasAmount, + pub casm_hash_computation_data_sierra_gas: CasmHashComputationData, + pub casm_hash_computation_data_proving_gas: CasmHashComputationData, + // The number of transactions executed by the proposer out of the transactions that were sent. + // This value includes rejected transactions. + pub final_n_executed_txs: usize, +} + +impl BlockExecutionArtifacts { + pub fn address_to_nonce(&self) -> HashMap { + HashMap::from_iter( + self.commitment_state_diff + .address_to_nonce + .iter() + .map(|(address, nonce)| (*address, *nonce)), + ) + } + + pub fn tx_hashes(&self) -> HashSet { + HashSet::from_iter(self.execution_data.execution_infos.keys().copied()) + } + + pub fn thin_state_diff(&self) -> ThinStateDiff { + // TODO(Ayelet): Remove the clones. + let commitment_state_diff = self.commitment_state_diff.clone(); + ThinStateDiff { + deployed_contracts: commitment_state_diff.address_to_class_hash, + storage_diffs: commitment_state_diff.storage_updates, + declared_classes: commitment_state_diff.class_hash_to_compiled_class_hash, + nonces: commitment_state_diff.address_to_nonce, + // TODO(AlonH): Remove this when the structure of storage diffs changes. + deprecated_declared_classes: Vec::new(), + } + } + + pub fn commitment(&self) -> ProposalCommitment { + ProposalCommitment { + state_diff_commitment: calculate_state_diff_hash(&self.thin_state_diff()), + } + } +} + +/// The BlockBuilderTrait is responsible for building a new block from transactions provided by the +/// tx_provider. The block building will stop at time deadline. +/// The transactions that were added to the block will be streamed to the output_content_sender. +#[cfg_attr(test, automock)] +#[async_trait] +pub trait BlockBuilderTrait: Send { + async fn build_block(&mut self) -> BlockBuilderResult; +} + +pub struct BlockBuilderExecutionParams { + pub deadline: tokio::time::Instant, + pub is_validator: bool, +} + +pub struct BlockBuilder { + // TODO(Yael 14/10/2024): make the executor thread safe and delete this mutex. + executor: Arc>, + tx_provider: Box, + output_content_sender: Option>, + /// The senders are utilized only during block proposal and not during block validation. + candidate_tx_sender: Option, + pre_confirmed_tx_sender: Option, + abort_signal_receiver: tokio::sync::oneshot::Receiver<()>, + transaction_converter: TransactionConverter, + /// The number of transactions whose execution is completed. + n_executed_txs: usize, + /// The transactions whose execution started. + block_txs: Vec, + execution_data: BlockTransactionExecutionData, + + /// Parameters to configure the block builder behavior. + n_concurrent_txs: usize, + tx_polling_interval_millis: u64, + execution_params: BlockBuilderExecutionParams, +} + +impl BlockBuilder { + #[allow(clippy::too_many_arguments)] + pub fn new( + executor: impl TransactionExecutorTrait + 'static, + tx_provider: Box, + output_content_sender: Option< + tokio::sync::mpsc::UnboundedSender, + >, + candidate_tx_sender: Option, + pre_confirmed_tx_sender: Option, + abort_signal_receiver: tokio::sync::oneshot::Receiver<()>, + transaction_converter: TransactionConverter, + n_concurrent_txs: usize, + tx_polling_interval_millis: u64, + execution_params: BlockBuilderExecutionParams, + ) -> Self { + let executor = Arc::new(Mutex::new(executor)); + Self { + executor, + tx_provider, + output_content_sender, + candidate_tx_sender, + pre_confirmed_tx_sender, + abort_signal_receiver, + transaction_converter, + n_executed_txs: 0, + block_txs: Vec::new(), + execution_data: BlockTransactionExecutionData::default(), + n_concurrent_txs, + tx_polling_interval_millis, + execution_params, + } + } +} + +#[async_trait] +impl BlockBuilderTrait for BlockBuilder { + async fn build_block(&mut self) -> BlockBuilderResult { + let res = self.build_block_inner().await; + if res.is_err() { + self.executor.lock().await.abort_block(); + } + res + } +} + +impl BlockBuilder { + async fn build_block_inner(&mut self) -> BlockBuilderResult { + let mut final_n_executed_txs: Option = None; + while !self.finished_block_txs(final_n_executed_txs) { + if tokio::time::Instant::now() >= self.execution_params.deadline { + info!("Block builder deadline reached."); + if self.execution_params.is_validator { + return Err(BlockBuilderError::FailOnError(FailOnErrorCause::DeadlineReached)); + } + break; + } + if final_n_executed_txs.is_none() { + if let Some(res) = self.tx_provider.get_final_n_executed_txs().await { + info!("Received final number of transactions in block proposal: {res}."); + final_n_executed_txs = Some(res); + } + } + if self.abort_signal_receiver.try_recv().is_ok() { + info!("Received abort signal. Aborting block builder."); + return Err(BlockBuilderError::Aborted); + } + + self.handle_executed_txs().await?; + + // Check if the block is full. This is only relevant in propose mode. + // In validate mode, this is ignored and we simply wait for the proposer to send the + // final number of transactions in the block. + if !self.execution_params.is_validator && lock_executor(&self.executor).is_done() { + // Call `handle_executed_txs()` once more to get the last results. + self.handle_executed_txs().await?; + info!("Block is full."); + FULL_BLOCKS.increment(1); + break; + } + + match self.add_txs_to_executor().await? { + AddTxsToExecutorResult::NoNewTxs => self.sleep().await, + AddTxsToExecutorResult::NewTxs => {} + } + } + + // The final number of transactions to consider for the block. + // Proposer: this is the number of transactions that were executed. + // Validator: the number of transactions we got from the proposer. + let final_n_executed_txs_nonopt = if self.execution_params.is_validator { + final_n_executed_txs.expect("final_n_executed_txs must be set in validate mode.") + } else { + assert!( + final_n_executed_txs.is_none(), + "final_n_executed_txs must be None in propose mode." + ); + self.n_executed_txs + }; + + info!( + "Finished building block. Started executing {} transactions. Finished executing {} \ + transactions. Final number of transactions (as set by the proposer): {}.", + self.block_txs.len(), + self.n_executed_txs, + final_n_executed_txs_nonopt, + ); + + // Move a clone of the executor into the lambda function. + let executor = self.executor.clone(); + let block_summary = tokio::task::spawn_blocking(move || { + lock_executor(&executor).close_block(final_n_executed_txs_nonopt) + }) + .await + .expect("Failed to spawn blocking executor task.")?; + + let BlockExecutionSummary { + state_diff, + compressed_state_diff, + bouncer_weights, + casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas, + } = block_summary; + let mut execution_data = std::mem::take(&mut self.execution_data); + if let Some(final_n_executed_txs) = final_n_executed_txs { + // Remove the transactions that were executed, but eventually not included in the block. + // This can happen if the proposer sends some transactions but closes the block before + // including them, while the validator already executed those transactions. + let remove_tx_hashes: Vec = + self.block_txs[final_n_executed_txs..].iter().map(|tx| tx.tx_hash()).collect(); + execution_data.remove_last_txs(&remove_tx_hashes); + } + let l2_gas_used = execution_data.l2_gas_used(); + Ok(BlockExecutionArtifacts { + execution_data, + commitment_state_diff: state_diff, + compressed_state_diff, + bouncer_weights, + l2_gas_used, + casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas, + final_n_executed_txs: final_n_executed_txs_nonopt, + }) + } + + /// Returns the number of transactions that are currently being executed by the executor. + fn n_txs_in_progress(&self) -> usize { + self.block_txs.len() - self.n_executed_txs + } + + /// Returns `true` if all the txs in the block were executed. This function always returns + /// `false` in propose mode. + fn finished_block_txs(&self, final_n_executed_txs: Option) -> bool { + if let Some(final_n_executed_txs) = final_n_executed_txs { + self.n_executed_txs >= final_n_executed_txs + } else { + // final_n_executed_txs is not known yet, so the block is not finished. + false + } + } + + /// Adds new transactions (if there are any) from `tx_provider` to the executor. + /// + /// Returns whether new transactions were added and whether the transaction stream is exhausted + /// (this can only happen in validator mode). + async fn add_txs_to_executor(&mut self) -> BlockBuilderResult { + // Restrict the number of transactions to fetch such that the number of transactions in + // progress is at most `n_concurrent_txs`. + let n_txs_to_fetch = + self.n_concurrent_txs - min(self.n_txs_in_progress(), self.n_concurrent_txs); + + if n_txs_to_fetch == 0 { + return Ok(AddTxsToExecutorResult::NoNewTxs); + } + + let next_txs = match self.tx_provider.get_txs(n_txs_to_fetch).await { + Err(e @ TransactionProviderError::L1HandlerTransactionValidationFailed { .. }) + if self.execution_params.is_validator => + { + warn!("Failed to validate L1 Handler transaction: {:?}", e); + return Err(BlockBuilderError::FailOnError(L1HandlerTransactionValidationFailed( + e, + ))); + } + Err(err) => { + error!("Failed to get transactions from the transaction provider: {:?}", err); + return Err(err.into()); + } + Ok(result) => result, + }; + + if next_txs.is_empty() { + return Ok(AddTxsToExecutorResult::NoNewTxs); + } + + let n_txs = next_txs.len(); + debug!("Got {} transactions from the transaction provider.", n_txs); + + self.send_candidate_txs(&next_txs); + + self.block_txs.extend(next_txs.iter().cloned()); + + let tx_convert_futures = next_txs.iter().map(|tx| async { + convert_to_executable_blockifier_tx(&self.transaction_converter, tx.clone()).await + }); + let executor_input_chunk = futures::future::try_join_all(tx_convert_futures).await?; + + // Start the execution of the transactions on the worker pool. + info!("Starting execution of {} transactions.", n_txs); + lock_executor(&self.executor).add_txs_to_block(executor_input_chunk.as_slice()); + + if let Some(output_content_sender) = &self.output_content_sender { + // Send the transactions to the validators. + // Only reached in proposal flow. + for tx in next_txs.into_iter() { + output_content_sender.send(tx)?; + } + } + + Ok(AddTxsToExecutorResult::NewTxs) + } + + /// Handles the transactions that were executed so far by the executor. + async fn handle_executed_txs(&mut self) -> BlockBuilderResult<()> { + let results = lock_executor(&self.executor).get_new_results(); + + if results.is_empty() { + return Ok(()); + } + + info!("Finished execution of {} transactions.", results.len()); + + let old_n_executed_txs = self.n_executed_txs; + self.n_executed_txs += results.len(); + + collect_execution_results_and_stream_txs( + &self.block_txs[old_n_executed_txs..self.n_executed_txs], + results, + &mut self.execution_data, + &self.pre_confirmed_tx_sender, + ) + .await + } + + fn send_candidate_txs(&mut self, next_tx_chunk: &[InternalConsensusTransaction]) { + // Skip sending candidate transactions during validation flow. + // In validate flow candidate_tx_sender is None. + let Some(candidate_tx_sender) = &self.candidate_tx_sender else { + return; + }; + + let txs = next_tx_chunk.to_vec(); + let num_txs = txs.len(); + + trace!( + "Attempting to send a candidate transaction chunk with {num_txs} transactions to the \ + PreconfirmedBlockWriter.", + ); + + match candidate_tx_sender.try_send(txs) { + Ok(_) => { + info!( + "Successfully sent a candidate transaction chunk with {num_txs} transactions \ + to the PreconfirmedBlockWriter.", + ); + } + // We continue with block building even if sending candidate transactions to + // the PreconfirmedBlockWriter fails because it is not critical for the block + // building process. + Err(err) => { + error!( + "Failed to send a candidate transaction chunk with {num_txs} transactions to \ + the PreconfirmedBlockWriter: {:?}", + err + ); + } + } + } + + async fn sleep(&mut self) { + tokio::time::sleep(tokio::time::Duration::from_millis(self.tx_polling_interval_millis)) + .await; + } +} + +fn lock_executor<'a>( + executor: &'a Arc>, +) -> MutexGuard<'a, dyn TransactionExecutorTrait> { + executor.try_lock().expect("Only a single task should use the executor.") +} + +async fn convert_to_executable_blockifier_tx( + transaction_converter: &TransactionConverter, + tx: InternalConsensusTransaction, +) -> TransactionConverterResult { + let executable_tx = + transaction_converter.convert_internal_consensus_tx_to_executable_tx(tx).await?; + Ok(BlockifierTransaction::new_for_sequencing(executable_tx)) +} + +async fn collect_execution_results_and_stream_txs( + tx_chunk: &[InternalConsensusTransaction], + results: Vec>, + execution_data: &mut BlockTransactionExecutionData, + pre_confirmed_tx_sender: &Option, +) -> BlockBuilderResult<()> { + assert!( + results.len() == tx_chunk.len(), + "The number of results match the number of transactions." + ); + + for (input_tx, result) in tx_chunk.iter().zip(results.into_iter()) { + let optional_l1_handler_tx = + if let InternalConsensusTransaction::L1Handler(l1_handler_tx) = input_tx { + Some(l1_handler_tx.tx.clone()) + } else { + None + }; + let tx_hash = input_tx.tx_hash(); + + // Insert the tx_hash into the appropriate collection if it's an L1_Handler transaction. + if let InternalConsensusTransaction::L1Handler(_) = input_tx { + let is_new_entry = execution_data.consumed_l1_handler_tx_hashes.insert(tx_hash); + // Even though this doesn't get past the set insertion, this indicates a major, possibly + // reorg-producing bug, either in some batcher cache or the l1 provider. + assert!(is_new_entry, "Duplicate L1 handler transaction hash: {tx_hash}."); + } + + match result { + Ok((tx_execution_info, state_maps)) => { + let (tx_index, duplicate_tx_hash) = + execution_data.execution_infos.insert_full(tx_hash, tx_execution_info); + assert_eq!(duplicate_tx_hash, None, "Duplicate transaction: {tx_hash}."); + + // Skip sending the pre confirmed executed transactions, receipts and state diffs + // during validation flow. In validate flow pre_confirmed_tx_sender is None. + if let Some(pre_confirmed_tx_sender) = pre_confirmed_tx_sender { + let tx_receipt = StarknetClientTransactionReceipt::from(( + tx_hash, + tx_index, + // TODO(noamsp): Consider using tx_execution_info and moving the line that + // consumes it below this (if it doesn't change functionality). + &execution_data.execution_infos[&tx_hash], + optional_l1_handler_tx, + )); + + let tx_state_diff = StarknetClientStateDiff::from(state_maps).0; + + let result = pre_confirmed_tx_sender.try_send(( + input_tx.clone(), + tx_receipt, + tx_state_diff, + )); + if result.is_err() { + // We continue with block building even if sending data to The + // PreconfirmedBlockWriter fails because it is not critical + // for the block building process. + warn!("Sending data to preconfirmed block writer failed."); + } + } + } + Err(err) => { + info!( + "Transaction {} failed with error: {}.", + tx_hash, + err.log_compatible_to_string() + ); + let is_new_entry = execution_data.rejected_tx_hashes.insert(tx_hash); + assert!(is_new_entry, "Duplicate rejected transaction hash: {tx_hash}."); + } + } + } + + Ok(()) +} + +pub struct BlockMetadata { + pub block_info: BlockInfo, + pub retrospective_block_hash: Option, +} + +// Type definitions for the abort channel required to abort the block builder. +pub type AbortSignalSender = tokio::sync::oneshot::Sender<()>; +pub type BatcherWorkerPool = + Arc>>>; + +/// The BlockBuilderFactoryTrait is responsible for creating a new block builder. +#[cfg_attr(test, automock)] +pub trait BlockBuilderFactoryTrait: Send + Sync { + // TODO(noamsp): Investigate and remove this clippy warning. + #[allow(clippy::result_large_err, clippy::too_many_arguments)] + fn create_block_builder( + &self, + block_metadata: BlockMetadata, + execution_params: BlockBuilderExecutionParams, + tx_provider: Box, + output_content_sender: Option< + tokio::sync::mpsc::UnboundedSender, + >, + candidate_tx_sender: Option, + pre_confirmed_tx_sender: Option, + runtime: tokio::runtime::Handle, + ) -> BlockBuilderResult<(Box, AbortSignalSender)>; +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct BlockBuilderConfig { + pub chain_info: ChainInfo, + pub execute_config: WorkerPoolConfig, + pub bouncer_config: BouncerConfig, + pub n_concurrent_txs: usize, + pub tx_polling_interval_millis: u64, + pub versioned_constants_overrides: VersionedConstantsOverrides, +} + +impl Default for BlockBuilderConfig { + fn default() -> Self { + Self { + // TODO(AlonH): update the default values once the actual values are known. + chain_info: ChainInfo::default(), + execute_config: WorkerPoolConfig::default(), + bouncer_config: BouncerConfig::default(), + n_concurrent_txs: 100, + tx_polling_interval_millis: 1, + versioned_constants_overrides: VersionedConstantsOverrides::default(), + } + } +} + +impl SerializeConfig for BlockBuilderConfig { + fn dump(&self) -> BTreeMap { + let mut dump = prepend_sub_config_name(self.chain_info.dump(), "chain_info"); + dump.append(&mut prepend_sub_config_name(self.execute_config.dump(), "execute_config")); + dump.append(&mut prepend_sub_config_name(self.bouncer_config.dump(), "bouncer_config")); + dump.append(&mut BTreeMap::from([ser_param( + "n_concurrent_txs", + &self.n_concurrent_txs, + "Number of transactions in each request from the tx_provider.", + ParamPrivacyInput::Public, + )])); + dump.append(&mut BTreeMap::from([ser_param( + "tx_polling_interval_millis", + &self.tx_polling_interval_millis, + "Time to wait (in milliseconds) between transaction requests when the previous \ + request returned no transactions.", + ParamPrivacyInput::Public, + )])); + dump.append(&mut prepend_sub_config_name( + self.versioned_constants_overrides.dump(), + "versioned_constants_overrides", + )); + dump + } +} + +pub struct BlockBuilderFactory { + pub block_builder_config: BlockBuilderConfig, + pub storage_reader: StorageReader, + pub contract_class_manager: ContractClassManager, + pub class_manager_client: SharedClassManagerClient, + pub worker_pool: BatcherWorkerPool, +} + +impl BlockBuilderFactory { + // TODO(noamsp): Investigate and remove this clippy warning. + #[allow(clippy::result_large_err)] + fn preprocess_and_create_transaction_executor( + &self, + block_metadata: BlockMetadata, + runtime: tokio::runtime::Handle, + ) -> BlockBuilderResult< + ConcurrentTransactionExecutor>, + > { + let height = block_metadata.block_info.block_number; + let block_builder_config = self.block_builder_config.clone(); + let versioned_constants = VersionedConstants::get_versioned_constants( + block_builder_config.versioned_constants_overrides, + ); + let block_context = BlockContext::new( + block_metadata.block_info, + block_builder_config.chain_info, + versioned_constants, + block_builder_config.bouncer_config, + ); + + let class_reader = Some(ClassReader { reader: self.class_manager_client.clone(), runtime }); + let papyrus_reader = + PapyrusReader::new_with_class_reader(self.storage_reader.clone(), height, class_reader); + let state_reader = StateReaderAndContractManager { + state_reader: papyrus_reader, + contract_class_manager: self.contract_class_manager.clone(), + }; + + let executor = ConcurrentTransactionExecutor::start_block( + state_reader, + block_context, + block_metadata.retrospective_block_hash, + self.worker_pool.clone(), + None, + )?; + + Ok(executor) + } +} + +impl BlockBuilderFactoryTrait for BlockBuilderFactory { + fn create_block_builder( + &self, + block_metadata: BlockMetadata, + execution_params: BlockBuilderExecutionParams, + tx_provider: Box, + output_content_sender: Option< + tokio::sync::mpsc::UnboundedSender, + >, + candidate_tx_sender: Option, + pre_confirmed_tx_sender: Option, + runtime: tokio::runtime::Handle, + ) -> BlockBuilderResult<(Box, AbortSignalSender)> { + let executor = self.preprocess_and_create_transaction_executor(block_metadata, runtime)?; + let (abort_signal_sender, abort_signal_receiver) = tokio::sync::oneshot::channel(); + let transaction_converter = TransactionConverter::new( + self.class_manager_client.clone(), + self.block_builder_config.chain_info.chain_id.clone(), + ); + let block_builder = Box::new(BlockBuilder::new( + executor, + tx_provider, + output_content_sender, + candidate_tx_sender, + pre_confirmed_tx_sender, + abort_signal_receiver, + transaction_converter, + self.block_builder_config.n_concurrent_txs, + self.block_builder_config.tx_polling_interval_millis, + execution_params, + )); + Ok((block_builder, abort_signal_sender)) + } +} + +/// Supplementary information for use by downstream services. +#[cfg_attr(test, derive(Clone))] +#[derive(Debug, Default, PartialEq)] +pub struct BlockTransactionExecutionData { + pub execution_infos: IndexMap, + pub rejected_tx_hashes: IndexSet, + pub consumed_l1_handler_tx_hashes: IndexSet, +} + +impl BlockTransactionExecutionData { + /// Removes the last txs with the given hashes from the execution data. + fn remove_last_txs(&mut self, tx_hashes: &[TransactionHash]) { + for tx_hash in tx_hashes.iter().rev() { + remove_last_map(&mut self.execution_infos, tx_hash); + remove_last_set(&mut self.rejected_tx_hashes, tx_hash); + remove_last_set(&mut self.consumed_l1_handler_tx_hashes, tx_hash); + } + } + + fn l2_gas_used(&self) -> GasAmount { + let mut res = GasAmount::ZERO; + for execution_info in self.execution_infos.values() { + res = + res.checked_add(execution_info.receipt.gas.l2_gas).expect("Total L2 gas overflow."); + } + + res + } +} + +/// Removes the tx_hash from the map, if it exists. +/// Verifies that the removed transaction is the last one in the map. +fn remove_last_map(map: &mut IndexMap, tx_hash: &TransactionHash) { + if let Some((idx, _, _)) = map.swap_remove_full(tx_hash) { + assert_eq!(idx, map.len(), "The removed txs must be the last ones."); + } +} + +/// Removes the tx_hash from the set, if it exists. +/// Verifies that the removed transaction is the last one in the set. +fn remove_last_set(set: &mut IndexSet, tx_hash: &TransactionHash) { + if let Some((idx, _)) = set.swap_remove_full(tx_hash) { + assert_eq!(idx, set.len(), "The removed txs must be the last ones."); + } +} diff --git a/crates/apollo_batcher/src/block_builder_test.rs b/crates/apollo_batcher/src/block_builder_test.rs new file mode 100644 index 00000000000..5c1fc5d4679 --- /dev/null +++ b/crates/apollo_batcher/src/block_builder_test.rs @@ -0,0 +1,1088 @@ +use std::sync::Arc; + +use apollo_class_manager_types::transaction_converter::TransactionConverter; +use apollo_class_manager_types::MockClassManagerClient; +use apollo_l1_provider_types::InvalidValidationStatus; +use apollo_l1_provider_types::InvalidValidationStatus::{ + AlreadyIncludedInProposedBlock, + AlreadyIncludedOnL2, + ConsumedOnL1OrUnknown, +}; +use assert_matches::assert_matches; +use blockifier::blockifier::transaction_executor::{ + BlockExecutionSummary, + TransactionExecutionOutput, + TransactionExecutorError, + TransactionExecutorResult, +}; +use blockifier::bouncer::{BouncerWeights, CasmHashComputationData}; +use blockifier::fee::fee_checks::FeeCheckError; +use blockifier::fee::receipt::TransactionReceipt; +use blockifier::state::cached_state::StateMaps; +use blockifier::state::errors::StateError; +use blockifier::transaction::objects::{RevertError, TransactionExecutionInfo}; +use blockifier::transaction::transaction_execution::Transaction as BlockifierTransaction; +use indexmap::{IndexMap, IndexSet}; +use itertools::chain; +use metrics_exporter_prometheus::PrometheusBuilder; +use mockall::predicate::eq; +use mockall::Sequence; +use pretty_assertions::assert_eq; +use rstest::rstest; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::execution_resources::{GasAmount, GasVector}; +use starknet_api::test_utils::CHAIN_ID_FOR_TESTS; +use starknet_api::transaction::fields::Fee; +use starknet_api::transaction::TransactionHash; +use starknet_api::tx_hash; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; + +use crate::block_builder::{ + BlockBuilder, + BlockBuilderError, + BlockBuilderExecutionParams, + BlockBuilderResult, + BlockBuilderTrait, + BlockExecutionArtifacts, + BlockTransactionExecutionData, + FailOnErrorCause, +}; +use crate::metrics::FULL_BLOCKS; +use crate::test_utils::{test_l1_handler_txs, test_txs}; +use crate::transaction_executor::MockTransactionExecutorTrait; +use crate::transaction_provider::TransactionProviderError::L1HandlerTransactionValidationFailed; +use crate::transaction_provider::{MockTransactionProvider, TransactionProviderError}; + +const BLOCK_GENERATION_DEADLINE_SECS: u64 = 1; +const BLOCK_GENERATION_LONG_DEADLINE_SECS: u64 = 5; +const TX_CHANNEL_SIZE: usize = 50; +const N_CONCURRENT_TXS: usize = 3; +const TX_POLLING_INTERVAL: u64 = 100; + +struct TestExpectations { + mock_transaction_executor: MockTransactionExecutorTrait, + mock_tx_provider: MockTransactionProvider, + expected_block_artifacts: BlockExecutionArtifacts, + expected_txs_output: Vec, + expected_full_blocks_metric: u64, +} + +fn output_channel() +-> (UnboundedSender, UnboundedReceiver) +{ + tokio::sync::mpsc::unbounded_channel() +} + +fn block_execution_artifacts( + execution_infos: IndexMap, + rejected_tx_hashes: IndexSet, + consumed_l1_handler_tx_hashes: IndexSet, + final_n_executed_txs: usize, +) -> BlockExecutionArtifacts { + let l2_gas_used = GasAmount(execution_infos.len().try_into().unwrap()); + BlockExecutionArtifacts { + execution_data: BlockTransactionExecutionData { + execution_infos, + rejected_tx_hashes, + consumed_l1_handler_tx_hashes, + }, + commitment_state_diff: Default::default(), + compressed_state_diff: Default::default(), + bouncer_weights: BouncerWeights { l1_gas: 100, ..BouncerWeights::empty() }, + // Each mock transaction uses 1 L2 gas so the total amount should be the number of txs. + l2_gas_used, + casm_hash_computation_data_sierra_gas: CasmHashComputationData::default(), + casm_hash_computation_data_proving_gas: CasmHashComputationData::default(), + final_n_executed_txs, + } +} + +// Filling the execution_info with some non-default values to make sure the block_builder uses them. +fn execution_info() -> TransactionExecutionInfo { + TransactionExecutionInfo { + revert_error: Some(RevertError::PostExecution(FeeCheckError::MaxFeeExceeded { + max_fee: Fee(100), + actual_fee: Fee(101), + })), + receipt: TransactionReceipt { + gas: GasVector { l2_gas: GasAmount(1), ..Default::default() }, + ..Default::default() + }, + ..Default::default() + } +} + +fn one_chunk_test_expectations() -> TestExpectations { + let input_txs = test_txs(0..3); + let block_size = input_txs.len(); + let (mock_transaction_executor, expected_block_artifacts) = + one_chunk_mock_executor(&input_txs, block_size, false); + + let mock_tx_provider = mock_tx_provider_limitless_calls(vec![input_txs.clone()]); + + TestExpectations { + mock_transaction_executor, + mock_tx_provider, + expected_block_artifacts, + expected_txs_output: input_txs, + expected_full_blocks_metric: 0, + } +} + +struct ExpectationHelper { + mock_transaction_executor: MockTransactionExecutorTrait, + seq: Sequence, +} + +impl ExpectationHelper { + fn new() -> Self { + Self { + mock_transaction_executor: MockTransactionExecutorTrait::new(), + seq: Sequence::new(), + } + } + + fn expect_add_txs_to_block(&mut self, input_txs: &[InternalConsensusTransaction]) { + let input_txs_cloned = input_txs.to_vec(); + self.mock_transaction_executor + .expect_add_txs_to_block() + .times(1) + .in_sequence(&mut self.seq) + .withf(move |blockifier_input| compare_tx_hashes(&input_txs_cloned, blockifier_input)) + .return_const(()); + } + + fn expect_get_new_results_with_results( + &mut self, + results: Vec>, + ) { + self.mock_transaction_executor + .expect_get_new_results() + .times(1) + .in_sequence(&mut self.seq) + .return_once(move || results); + } + + fn expect_successful_get_new_results(&mut self, n_txs: usize) { + self.expect_get_new_results_with_results( + (0..n_txs).map(|_| Ok((execution_info(), StateMaps::default()))).collect(), + ); + } + + fn expect_is_done(&mut self, is_done: bool) { + self.mock_transaction_executor + .expect_is_done() + .times(1) + .in_sequence(&mut self.seq) + .return_const(is_done); + } + + /// Adds the expectations required for a block whose deadline is reached. + /// For such a block, `get_new_results` and `is_done` will be called repeatedly until the + /// deadline is reached. + fn deadline_expectations(&mut self) { + self.mock_transaction_executor.expect_get_new_results().returning(Vec::new); + self.mock_transaction_executor.expect_is_done().return_const(false); + } +} + +fn one_chunk_mock_executor( + input_txs: &[InternalConsensusTransaction], + block_size: usize, + is_validator: bool, +) -> (MockTransactionExecutorTrait, BlockExecutionArtifacts) { + let mut helper = ExpectationHelper::new(); + + helper.expect_successful_get_new_results(0); + if !is_validator { + helper.expect_is_done(false); + } + helper.expect_add_txs_to_block(input_txs); + helper.expect_successful_get_new_results(block_size); + if !is_validator { + helper.expect_is_done(false); + } + helper.deadline_expectations(); + + let expected_block_artifacts = + set_close_block_expectations(&mut helper.mock_transaction_executor, block_size); + (helper.mock_transaction_executor, expected_block_artifacts) +} + +fn two_chunks_mock_executor( + is_validator: bool, +) -> ( + Vec, + Vec, + MockTransactionExecutorTrait, +) { + let input_txs = test_txs(0..6); + let first_chunk = input_txs[..N_CONCURRENT_TXS].to_vec(); + let second_chunk = input_txs[N_CONCURRENT_TXS..].to_vec(); + + let mut helper = ExpectationHelper::new(); + + helper.expect_successful_get_new_results(0); + if !is_validator { + helper.expect_is_done(false); + } + helper.expect_add_txs_to_block(&first_chunk); + helper.expect_successful_get_new_results(first_chunk.len()); + if !is_validator { + helper.expect_is_done(false); + } + helper.expect_add_txs_to_block(&second_chunk); + helper.expect_successful_get_new_results(second_chunk.len()); + if !is_validator { + helper.expect_is_done(false); + } + helper.deadline_expectations(); + + (first_chunk, second_chunk, helper.mock_transaction_executor) +} + +fn two_chunks_test_expectations() -> TestExpectations { + let (first_chunk, second_chunk, mut mock_transaction_executor) = + two_chunks_mock_executor(false); + let block_size = first_chunk.len() + second_chunk.len(); + + let expected_block_artifacts = + set_close_block_expectations(&mut mock_transaction_executor, block_size); + + let mock_tx_provider = + mock_tx_provider_limitless_calls(vec![first_chunk.clone(), second_chunk.clone()]); + + TestExpectations { + mock_transaction_executor, + mock_tx_provider, + expected_block_artifacts, + expected_txs_output: chain!(first_chunk.iter(), second_chunk.iter()).cloned().collect(), + expected_full_blocks_metric: 0, + } +} + +fn empty_block_test_expectations() -> TestExpectations { + let mut helper = ExpectationHelper::new(); + helper.deadline_expectations(); + helper.mock_transaction_executor.expect_add_txs_to_block().times(0); + + let expected_block_artifacts = + set_close_block_expectations(&mut helper.mock_transaction_executor, 0); + + let mock_tx_provider = mock_tx_provider_limitless_calls(vec![]); + + TestExpectations { + mock_transaction_executor: helper.mock_transaction_executor, + mock_tx_provider, + expected_block_artifacts, + expected_txs_output: vec![], + expected_full_blocks_metric: 0, + } +} + +fn block_full_test_expectations(before_is_done: bool) -> TestExpectations { + let input_txs = test_txs(0..3); + + let mut helper = ExpectationHelper::new(); + helper.expect_successful_get_new_results(0); + helper.expect_is_done(false); + helper.expect_add_txs_to_block(&input_txs); + // Only the first transaction fits in the block. + helper.expect_successful_get_new_results(if before_is_done { 1 } else { 0 }); + helper.expect_is_done(true); + helper.expect_successful_get_new_results(if before_is_done { 0 } else { 1 }); + + let mut mock_transaction_executor = helper.mock_transaction_executor; + let expected_block_artifacts = set_close_block_expectations(&mut mock_transaction_executor, 1); + + let mock_tx_provider = mock_tx_provider_limited_calls(vec![input_txs.clone()]); + + TestExpectations { + mock_transaction_executor, + mock_tx_provider, + expected_block_artifacts, + expected_txs_output: input_txs, + expected_full_blocks_metric: 1, + } +} + +fn mock_partial_transaction_execution( + first_chunk: &[InternalConsensusTransaction], + second_chunk: &[InternalConsensusTransaction], + n_completed_txs: usize, + is_validator: bool, +) -> MockTransactionExecutorTrait { + assert!(n_completed_txs <= first_chunk.len()); + let mut helper = ExpectationHelper::new(); + helper.expect_successful_get_new_results(0); + if !is_validator { + helper.expect_is_done(false); + } + helper.expect_add_txs_to_block(first_chunk); + if n_completed_txs > 0 { + helper.expect_successful_get_new_results(n_completed_txs); + if !is_validator { + helper.expect_is_done(false); + } + helper.expect_add_txs_to_block(second_chunk); + } + + // Do not return the results, simulating a deadline reached before the completion of the + // transaction execution. + helper.deadline_expectations(); + + helper.mock_transaction_executor +} + +fn test_expectations_partial_transaction_execution() -> TestExpectations { + let n_completed_txs = 1; + let input_txs = test_txs(0..N_CONCURRENT_TXS + n_completed_txs); + let first_chunk = input_txs[0..N_CONCURRENT_TXS].to_vec(); + // After the execution of the first transaction, one more transaction is fetched from the + // provider. + let second_chunk = input_txs[N_CONCURRENT_TXS..].to_vec(); + let mut mock_transaction_executor = + mock_partial_transaction_execution(&first_chunk, &second_chunk, n_completed_txs, false); + + let expected_block_artifacts = + set_close_block_expectations(&mut mock_transaction_executor, n_completed_txs); + + let mock_tx_provider = mock_tx_provider_limited_calls(vec![first_chunk, second_chunk]); + + TestExpectations { + mock_transaction_executor, + mock_tx_provider, + expected_block_artifacts, + expected_txs_output: input_txs, + expected_full_blocks_metric: 0, + } +} + +fn transaction_failed_test_expectations() -> TestExpectations { + let n_txs = 6; + let input_invoke_txs = test_txs(0..3); + let input_l1_handler_txs = test_l1_handler_txs(3..n_txs); + let failed_tx_indices = [1, 4]; + let failed_tx_hashes: IndexSet = + failed_tx_indices.iter().map(|idx| tx_hash!(*idx)).collect(); + let consumed_l1_handler_tx_hashes: IndexSet<_> = + input_l1_handler_txs.iter().map(|tx| tx.tx_hash()).collect(); + let input_txs: Vec<_> = input_invoke_txs.iter().chain(&input_l1_handler_txs).cloned().collect(); + + let expected_txs_output: Vec<_> = + input_txs.iter().filter(|tx| !failed_tx_hashes.contains(&tx.tx_hash())).cloned().collect(); + + let mut helper = ExpectationHelper::new(); + helper.expect_successful_get_new_results(0); + helper.expect_is_done(false); + for start_idx in [0, 3] { + helper.expect_add_txs_to_block(&input_txs[start_idx..start_idx + 3]); + helper.expect_get_new_results_with_results( + (start_idx..start_idx + 3) + .map(|idx| { + if failed_tx_indices.contains(&idx) { + Err(TransactionExecutorError::StateError( + StateError::OutOfRangeContractAddress, + )) + } else { + Ok((execution_info(), StateMaps::default())) + } + }) + .collect(), + ); + helper.expect_is_done(false); + } + helper.deadline_expectations(); + + let execution_infos_mapping = + expected_txs_output.iter().map(|tx| (tx.tx_hash(), execution_info())).collect(); + + let expected_block_artifacts = block_execution_artifacts( + execution_infos_mapping, + failed_tx_hashes, + consumed_l1_handler_tx_hashes, + n_txs, + ); + let expected_block_artifacts_copy = expected_block_artifacts.clone(); + helper.mock_transaction_executor.expect_close_block().times(1).return_once(move |_| { + Ok(BlockExecutionSummary { + state_diff: expected_block_artifacts_copy.commitment_state_diff, + compressed_state_diff: None, + bouncer_weights: expected_block_artifacts_copy.bouncer_weights, + casm_hash_computation_data_sierra_gas: expected_block_artifacts_copy + .casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas: expected_block_artifacts_copy + .casm_hash_computation_data_proving_gas, + }) + }); + + let mock_tx_provider = + mock_tx_provider_limitless_calls(vec![input_invoke_txs, input_l1_handler_txs]); + + TestExpectations { + mock_transaction_executor: helper.mock_transaction_executor, + mock_tx_provider, + expected_block_artifacts, + expected_txs_output: input_txs, + expected_full_blocks_metric: 0, + } +} + +// Fill the executor outputs with some non-default values to make sure the block_builder uses +// them. +fn block_builder_expected_output( + execution_info_len: usize, + final_n_executed_txs: usize, +) -> BlockExecutionArtifacts { + let execution_info_len_u8 = u8::try_from(execution_info_len).unwrap(); + let execution_infos_mapping = + (0..execution_info_len_u8).map(|i| (tx_hash!(i), execution_info())).collect(); + block_execution_artifacts( + execution_infos_mapping, + Default::default(), + Default::default(), + final_n_executed_txs, + ) +} + +fn set_close_block_expectations( + mock_transaction_executor: &mut MockTransactionExecutorTrait, + block_size: usize, +) -> BlockExecutionArtifacts { + let output_block_artifacts = block_builder_expected_output(block_size, block_size); + let output_block_artifacts_copy = output_block_artifacts.clone(); + mock_transaction_executor.expect_close_block().times(1).return_once(move |_| { + Ok(BlockExecutionSummary { + state_diff: output_block_artifacts.commitment_state_diff, + compressed_state_diff: None, + bouncer_weights: output_block_artifacts.bouncer_weights, + casm_hash_computation_data_sierra_gas: output_block_artifacts + .casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas: output_block_artifacts + .casm_hash_computation_data_proving_gas, + }) + }); + output_block_artifacts_copy +} + +/// Create a mock tx provider that will return the input chunks for number of chunks queries. +fn mock_tx_provider_limited_calls( + input_chunks: Vec>, +) -> MockTransactionProvider { + mock_tx_provider_limited_calls_ex(input_chunks, None) +} + +/// Create a mock tx provider that will return the input chunks for number of chunks queries. +fn mock_tx_provider_limited_calls_ex( + input_chunks: Vec>, + final_n_executed_txs: Option, +) -> MockTransactionProvider { + let mut mock_tx_provider = MockTransactionProvider::new(); + let mut seq = Sequence::new(); + for input_chunk in input_chunks { + mock_tx_provider + .expect_get_final_n_executed_txs() + .times(1) + .in_sequence(&mut seq) + .return_const(None); + mock_tx_provider + .expect_get_txs() + .times(1) + .with(eq(input_chunk.len())) + .in_sequence(&mut seq) + .returning(move |_n_txs| Ok(input_chunk.clone())); + } + mock_tx_provider.expect_get_final_n_executed_txs().return_const(final_n_executed_txs); + mock_tx_provider +} + +fn mock_tx_provider_stream_done( + input_chunk: Vec, +) -> MockTransactionProvider { + let n_txs = input_chunk.len(); + let mut mock_tx_provider = MockTransactionProvider::new(); + let mut seq = Sequence::new(); + mock_tx_provider + .expect_get_final_n_executed_txs() + .times(1) + .in_sequence(&mut seq) + .return_const(None); + mock_tx_provider + .expect_get_txs() + .times(1) + .in_sequence(&mut seq) + .with(eq(N_CONCURRENT_TXS)) + .return_once(move |_n_txs| Ok(input_chunk)); + mock_tx_provider + .expect_get_final_n_executed_txs() + .times(1) + .in_sequence(&mut seq) + .return_const(Some(n_txs)); + + // Continue to return empty chunks while the block is being built. + mock_tx_provider.expect_get_txs().times(1..).returning(|_n_txs| Ok(vec![])); + mock_tx_provider +} + +/// Create a mock tx provider client that will return the input chunks and then empty chunks. +fn mock_tx_provider_limitless_calls( + input_chunks: Vec>, +) -> MockTransactionProvider { + let mut mock_tx_provider = mock_tx_provider_limited_calls(input_chunks); + + // The number of times the mempool will be called until timeout is unpredicted. + add_limitless_empty_calls(&mut mock_tx_provider); + mock_tx_provider +} + +fn add_limitless_empty_calls(mock_tx_provider: &mut MockTransactionProvider) { + mock_tx_provider.expect_get_txs().with(eq(N_CONCURRENT_TXS)).returning(|_n_txs| Ok(Vec::new())); + mock_tx_provider.expect_get_final_n_executed_txs().return_const(None); +} + +/// Creates a `MockTransactionProvider` for less than (or exactly) N_CONCURRENT_TXS transactions. +fn mock_tx_provider_small_stream( + input_chunk: Vec, +) -> MockTransactionProvider { + let mut mock_tx_provider = MockTransactionProvider::new(); + + assert!(input_chunk.len() <= N_CONCURRENT_TXS); + mock_tx_provider + .expect_get_txs() + .times(1) + .with(eq(N_CONCURRENT_TXS)) + .returning(move |_n_txs| Ok(input_chunk.clone())); + mock_tx_provider.expect_get_final_n_executed_txs().return_const(None); + + mock_tx_provider +} + +fn mock_tx_provider_with_error(error: TransactionProviderError) -> MockTransactionProvider { + let mut mock_tx_provider = MockTransactionProvider::new(); + mock_tx_provider + .expect_get_txs() + .times(1) + .with(eq(N_CONCURRENT_TXS)) + .return_once(move |_n_txs| Err(error)); + mock_tx_provider.expect_get_final_n_executed_txs().return_const(None); + mock_tx_provider +} + +fn compare_tx_hashes( + input: &[InternalConsensusTransaction], + blockifier_input: &[BlockifierTransaction], +) -> bool { + let expected_tx_hashes: Vec = input.iter().map(|tx| tx.tx_hash()).collect(); + let input_tx_hashes: Vec = + blockifier_input.iter().map(BlockifierTransaction::tx_hash).collect(); + expected_tx_hashes == input_tx_hashes +} + +// TODO(yair): refactor to be a method of TestExpectations. +async fn verify_build_block_output( + expected_output_txs: Vec, + expected_block_artifacts: BlockExecutionArtifacts, + result_block_artifacts: BlockExecutionArtifacts, + mut output_stream_receiver: UnboundedReceiver, + expected_full_blocks_metric: u64, + metrics: &str, +) { + // Verify the transactions in the output channel. + let mut output_txs = vec![]; + output_stream_receiver.recv_many(&mut output_txs, TX_CHANNEL_SIZE).await; + assert_eq!(output_txs, expected_output_txs); + + // Verify the block artifacts. + assert_eq!(result_block_artifacts, expected_block_artifacts); + + FULL_BLOCKS.assert_eq::(metrics, expected_full_blocks_metric); +} + +async fn run_build_block( + mock_transaction_executor: MockTransactionExecutorTrait, + tx_provider: MockTransactionProvider, + output_sender: Option>, + is_validator: bool, + abort_receiver: tokio::sync::oneshot::Receiver<()>, + deadline_secs: u64, +) -> BlockBuilderResult { + let deadline = tokio::time::Instant::now() + tokio::time::Duration::from_secs(deadline_secs); + let transaction_converter = TransactionConverter::new( + Arc::new(MockClassManagerClient::new()), + CHAIN_ID_FOR_TESTS.clone(), + ); + let mut block_builder = BlockBuilder::new( + mock_transaction_executor, + Box::new(tx_provider), + output_sender, + None, + None, + abort_receiver, + transaction_converter, + N_CONCURRENT_TXS, + TX_POLLING_INTERVAL, + BlockBuilderExecutionParams { deadline, is_validator }, + ); + + block_builder.build_block().await +} + +#[rstest] +#[case::one_chunk_block(one_chunk_test_expectations())] +#[case::two_chunks_block(two_chunks_test_expectations())] +#[case::empty_block(empty_block_test_expectations())] +#[case::block_full_before_is_done(block_full_test_expectations(true))] +#[case::block_full_after_is_done(block_full_test_expectations(false))] +#[case::deadline_reached_after_first_chunk(test_expectations_partial_transaction_execution())] +#[case::transaction_failed(transaction_failed_test_expectations())] +#[tokio::test] +async fn test_build_block(#[case] test_expectations: TestExpectations) { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + FULL_BLOCKS.register(); + let metrics = recorder.handle().render(); + FULL_BLOCKS.assert_eq::(&metrics, 0); + + let (output_tx_sender, output_tx_receiver) = output_channel(); + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + let result_block_artifacts = run_build_block( + test_expectations.mock_transaction_executor, + test_expectations.mock_tx_provider, + Some(output_tx_sender), + false, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + verify_build_block_output( + test_expectations.expected_txs_output, + test_expectations.expected_block_artifacts, + result_block_artifacts, + output_tx_receiver, + test_expectations.expected_full_blocks_metric, + &recorder.handle().render(), + ) + .await; +} + +#[tokio::test] +async fn test_validate_block() { + let input_txs = test_txs(0..3); + let (mock_transaction_executor, expected_block_artifacts) = + one_chunk_mock_executor(&input_txs, input_txs.len(), true); + let mock_tx_provider = mock_tx_provider_stream_done(input_txs); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + let result_block_artifacts = run_build_block( + mock_transaction_executor, + mock_tx_provider, + None, + true, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + assert_eq!(result_block_artifacts, expected_block_artifacts); +} + +/// Tests the case where the final number of transactions in the block is smaller than the number +/// of transactions that were executed. +#[tokio::test] +async fn test_validate_block_excluded_txs() { + let (first_chunk, second_chunk, mut mock_transaction_executor) = two_chunks_mock_executor(true); + let n_executed_txs = first_chunk.len() + second_chunk.len(); + let final_n_executed_txs = n_executed_txs - 1; + + let expected_block_artifacts = + set_close_block_expectations(&mut mock_transaction_executor, final_n_executed_txs); + + let mut mock_tx_provider = mock_tx_provider_limited_calls_ex( + vec![first_chunk, second_chunk], + Some(final_n_executed_txs), + ); + + mock_tx_provider.expect_get_txs().returning(move |_n_txs| Ok(vec![])); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + let result_block_artifacts = run_build_block( + mock_transaction_executor, + mock_tx_provider, + None, + true, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + assert_eq!(result_block_artifacts, expected_block_artifacts); +} + +#[rstest] +#[case::deadline_reached( + test_txs(0..3), mock_partial_transaction_execution(&input_txs, &[], 0, true), + FailOnErrorCause::DeadlineReached +)] +#[tokio::test] +async fn test_validate_block_with_error( + #[case] input_txs: Vec, + #[case] mut mock_transaction_executor: MockTransactionExecutorTrait, + #[case] expected_error: FailOnErrorCause, +) { + mock_transaction_executor.expect_close_block().times(0); + mock_transaction_executor.expect_abort_block().times(1).return_once(|| ()); + + let mock_tx_provider = mock_tx_provider_limited_calls(vec![input_txs]); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + let result = run_build_block( + mock_transaction_executor, + mock_tx_provider, + None, + true, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap_err(); + + assert_matches!( + result, BlockBuilderError::FailOnError(err) + if err.to_string() == expected_error.to_string() + ); +} + +#[rstest] +#[case::already_included_in_proposed_block(AlreadyIncludedInProposedBlock)] +#[case::already_included_on_l2(AlreadyIncludedOnL2)] +#[case::consumed_on_l1_or_unknown(ConsumedOnL1OrUnknown)] +#[tokio::test] +async fn test_validate_block_l1_handler_validation_error(#[case] status: InvalidValidationStatus) { + let tx_provider = mock_tx_provider_with_error(L1HandlerTransactionValidationFailed { + tx_hash: tx_hash!(0), + validation_status: status, + }); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + let mut helper = ExpectationHelper::new(); + helper.deadline_expectations(); + + helper.mock_transaction_executor.expect_abort_block().times(1).return_once(|| ()); + + let result = run_build_block( + helper.mock_transaction_executor, + tx_provider, + None, + true, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await; + + assert_matches!( + result, + Err(BlockBuilderError::FailOnError( + FailOnErrorCause::L1HandlerTransactionValidationFailed( + TransactionProviderError::L1HandlerTransactionValidationFailed { .. } + ) + )), + "Expected FailOnError for validation status: {status:?}" + ); +} + +#[rstest] +#[tokio::test] +async fn test_build_block_abort() { + let n_txs = 3; + let mock_tx_provider = mock_tx_provider_limitless_calls(vec![test_txs(0..n_txs)]); + + // Expect one transaction chunk to be added to the block, and then abort. + let mut helper = ExpectationHelper::new(); + helper.expect_successful_get_new_results(0); + helper.expect_is_done(false); + helper.expect_add_txs_to_block(&test_txs(0..3)); + helper.expect_successful_get_new_results(3); + helper.expect_is_done(false); + helper.deadline_expectations(); + + helper.mock_transaction_executor.expect_close_block().times(0); + helper.mock_transaction_executor.expect_abort_block().times(1).return_once(|| ()); + + let (output_tx_sender, mut output_tx_receiver) = output_channel(); + let (abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + // Send the abort signal after the first tx is added to the block. + tokio::spawn(async move { + output_tx_receiver.recv().await.unwrap(); + abort_sender.send(()).unwrap(); + }); + + assert_matches!( + run_build_block( + helper.mock_transaction_executor, + mock_tx_provider, + Some(output_tx_sender), + false, + abort_receiver, + BLOCK_GENERATION_LONG_DEADLINE_SECS, + ) + .await, + Err(BlockBuilderError::Aborted) + ); +} + +#[rstest] +#[tokio::test] +async fn test_build_block_abort_immediately() { + // Expect no transactions requested from the provider, and to be added to the block + let mut mock_tx_provider = MockTransactionProvider::new(); + mock_tx_provider.expect_get_txs().times(0); + mock_tx_provider.expect_get_final_n_executed_txs().return_const(None); + let mut mock_transaction_executor = MockTransactionExecutorTrait::new(); + mock_transaction_executor.expect_add_txs_to_block().times(0); + mock_transaction_executor.expect_close_block().times(0); + mock_transaction_executor.expect_abort_block().times(1).return_once(|| ()); + + let (output_tx_sender, _output_tx_receiver) = output_channel(); + let (abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + // Send the abort signal before we start building the block. + abort_sender.send(()).unwrap(); + + assert_matches!( + run_build_block( + mock_transaction_executor, + mock_tx_provider, + Some(output_tx_sender), + false, + abort_receiver, + BLOCK_GENERATION_LONG_DEADLINE_SECS, + ) + .await, + Err(BlockBuilderError::Aborted) + ); +} + +#[rstest] +#[tokio::test] +async fn test_l2_gas_used() { + let n_txs = 3; + let input_txs = test_txs(0..n_txs); + let (mock_transaction_executor, _) = one_chunk_mock_executor(&input_txs, input_txs.len(), true); + let mock_tx_provider = mock_tx_provider_stream_done(input_txs); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + let result_block_artifacts = run_build_block( + mock_transaction_executor, + mock_tx_provider, + None, + true, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + // Each mock transaction uses 1 L2 gas so the total amount should be the number of txs. + assert_eq!(result_block_artifacts.l2_gas_used, GasAmount(n_txs.try_into().unwrap())); +} + +// Test that the BlocBuilder returns the execution_infos ordered in the same order as +// the transactions are included in the block. This is crucial for the correct execution of +// starknet. +#[tokio::test] +async fn test_execution_info_order() { + let (first_chunk, second_chunk, mut mock_transaction_executor) = + two_chunks_mock_executor(false); + let input_txs = first_chunk.iter().chain(second_chunk.iter()).collect::>(); + + set_close_block_expectations(&mut mock_transaction_executor, input_txs.len()); + + let mock_tx_provider = + mock_tx_provider_limitless_calls(vec![first_chunk.clone(), second_chunk.clone()]); + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + let result_block_artifacts = run_build_block( + mock_transaction_executor, + mock_tx_provider, + None, + false, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + // Verify that the execution_infos are ordered in the same order as the input_txs. + result_block_artifacts.execution_data.execution_infos.iter().zip(&input_txs).for_each( + |((tx_hash, _execution_info), tx)| { + assert_eq!(tx_hash, &tx.tx_hash()); + }, + ); +} + +#[rstest] +#[tokio::test] +async fn failed_l1_handler_transaction_consumed() { + let l1_handler_txs = test_l1_handler_txs(0..2); + let mock_tx_provider = mock_tx_provider_small_stream(l1_handler_txs.clone()); + + let mut helper = ExpectationHelper::new(); + helper.expect_successful_get_new_results(0); + helper.expect_is_done(false); + helper.expect_add_txs_to_block(&l1_handler_txs); + helper.expect_get_new_results_with_results(vec![ + Err(TransactionExecutorError::StateError(StateError::OutOfRangeContractAddress)), + Ok((execution_info(), StateMaps::default())), + ]); + helper.expect_is_done(true); + helper.expect_successful_get_new_results(0); + + helper.mock_transaction_executor.expect_close_block().times(1).return_once(|_| { + Ok(BlockExecutionSummary { + state_diff: Default::default(), + compressed_state_diff: None, + bouncer_weights: BouncerWeights::empty(), + casm_hash_computation_data_sierra_gas: CasmHashComputationData::default(), + casm_hash_computation_data_proving_gas: CasmHashComputationData::default(), + }) + }); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + let result_block_artifacts = run_build_block( + helper.mock_transaction_executor, + mock_tx_provider, + None, + false, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + // Verify that all L1 handler transaction's are included in the consumed l1 transactions. + assert_eq!( + result_block_artifacts.execution_data.consumed_l1_handler_tx_hashes, + l1_handler_txs.iter().map(|tx| tx.tx_hash()).collect::>() + ); +} + +#[tokio::test] +async fn partial_chunk_execution_proposer() { + let input_txs = test_txs(0..3); // Assume 3 TXs were sent. + let executed_txs = input_txs[..2].to_vec(); // Only 2 should be processed. Simulating a partial chunk execution. + + let expected_execution_infos: IndexMap<_, _> = + executed_txs.iter().map(|tx| (tx.tx_hash(), execution_info())).collect(); + + let mut helper = ExpectationHelper::new(); + + helper.expect_successful_get_new_results(0); + helper.expect_is_done(false); + helper.expect_add_txs_to_block(&input_txs); + // Return only 2 txs, simulating a partial chunk execution. + helper.expect_successful_get_new_results(executed_txs.len()); + helper.expect_is_done(true); + helper.expect_successful_get_new_results(0); + + let expected_block_artifacts = block_execution_artifacts( + expected_execution_infos, + Default::default(), + Default::default(), + executed_txs.len(), + ); + + let expected_block_artifacts_copy = expected_block_artifacts.clone(); + helper.mock_transaction_executor.expect_close_block().times(1).return_once(move |_| { + Ok(BlockExecutionSummary { + state_diff: expected_block_artifacts.commitment_state_diff, + compressed_state_diff: None, + bouncer_weights: expected_block_artifacts.bouncer_weights, + casm_hash_computation_data_sierra_gas: expected_block_artifacts + .casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas: expected_block_artifacts + .casm_hash_computation_data_proving_gas, + }) + }); + + let mock_tx_provider = mock_tx_provider_limited_calls(vec![input_txs.clone()]); + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + // Block should be built with the executed transactions without any errors. + let is_validator = false; + let result_block_artifacts = run_build_block( + helper.mock_transaction_executor, + mock_tx_provider, + None, + is_validator, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await + .unwrap(); + + assert_eq!(result_block_artifacts, expected_block_artifacts_copy); +} + +#[rstest] +#[case::success(true)] +#[case::fail(false)] +#[tokio::test] +async fn partial_chunk_execution_validator(#[case] successful: bool) { + let input_txs = test_txs(0..3); + + let mut helper = ExpectationHelper::new(); + helper.expect_successful_get_new_results(0); + helper.expect_add_txs_to_block(&input_txs); + // Return only 2 txs, simulating a partial chunk execution. + helper.expect_successful_get_new_results(2); + + let expected_block_artifacts = if successful { + helper.mock_transaction_executor.expect_abort_block().times(0); + Some(set_close_block_expectations(&mut helper.mock_transaction_executor, 2)) + } else { + // Validator continues the loop even after the scheduler is done. + helper.mock_transaction_executor.expect_get_new_results().times(1..).returning(Vec::new); + + helper.mock_transaction_executor.expect_close_block().times(0); + helper.mock_transaction_executor.expect_abort_block().times(1).return_once(|| ()); + None + }; + + // Success: the proposer suggests final_n_executed_txs=2, and since those were executed + // successfully, the validator succeeds. + // Fail: the proposer suggests final_n_executed_txs=3, and the validator fails. + let final_n_executed_txs = if successful { 2 } else { 3 }; + let mut mock_tx_provider = + mock_tx_provider_limited_calls_ex(vec![input_txs.clone()], Some(final_n_executed_txs)); + mock_tx_provider.expect_get_txs().with(eq(2)).returning(|_n_txs| Ok(Vec::new())); + + let (_abort_sender, abort_receiver) = tokio::sync::oneshot::channel(); + + let is_validator = true; + let result_block_artifacts = run_build_block( + helper.mock_transaction_executor, + mock_tx_provider, + None, + is_validator, + abort_receiver, + BLOCK_GENERATION_DEADLINE_SECS, + ) + .await; + + if successful { + assert_eq!(result_block_artifacts.unwrap(), expected_block_artifacts.unwrap()); + } else { + // Deadline is reached since the validator never completes 3 transactions. + assert!(matches!( + result_block_artifacts, + Err(BlockBuilderError::FailOnError(FailOnErrorCause::DeadlineReached)) + )); + } +} diff --git a/crates/apollo_batcher/src/cende_client_types.rs b/crates/apollo_batcher/src/cende_client_types.rs new file mode 100644 index 00000000000..ef511ce5437 --- /dev/null +++ b/crates/apollo_batcher/src/cende_client_types.rs @@ -0,0 +1,682 @@ +//! Local copies of types from apollo_starknet_client for use in batcher, prefixed with +//! StarknetClient. +use std::collections::HashMap; + +use apollo_starknet_client::reader::objects::state::StateDiff; +use apollo_starknet_client::reader::objects::transaction::ReservedDataAvailabilityMode; +use apollo_starknet_client::reader::{DeclaredClassHashEntry, DeployedContract, StorageEntry}; +use blockifier::execution::call_info::OrderedEvent; +use blockifier::state::cached_state::{StateMaps, StorageView}; +// TODO(noamsp): find a way to share the TransactionReceipt from apollo_starknet_client and +// remove this module. +use blockifier::transaction::objects::TransactionExecutionInfo; +use cairo_vm::types::builtin_name::BuiltinName; +use indexmap::IndexMap; +use serde::{Deserialize, Serialize}; +use starknet_api::block::{ + BlockInfo, + BlockTimestamp, + GasPricePerToken, + GasPrices, + StarknetVersion, +}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::{ + ClassHash, + CompiledClassHash, + ContractAddress, + EntryPointSelector, + EthAddress, + Nonce, +}; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::executable_transaction::L1HandlerTransaction as ExecutableL1HandlerTransaction; +use starknet_api::execution_resources::GasVector; +use starknet_api::hash::StarkHash; +use starknet_api::rpc_transaction::{ + InternalRpcDeployAccountTransaction, + InternalRpcTransaction, + RpcDeployAccountTransaction, + RpcInvokeTransaction, +}; +use starknet_api::transaction::fields::{ + AccountDeploymentData, + AllResourceBounds, + Calldata, + ContractAddressSalt, + Fee, + PaymasterData, + ResourceBounds, + Tip, + TransactionSignature, +}; +use starknet_api::transaction::{ + Event, + L1ToL2Payload, + L2ToL1Payload, + TransactionHash, + TransactionOffsetInBlock, + TransactionVersion, +}; + +#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Deserialize, Serialize, PartialOrd, Ord)] +pub struct L1ToL2Nonce(pub StarkHash); + +#[derive(Debug, Default, Deserialize, Serialize, Clone, Eq, PartialEq)] +pub struct L1ToL2Message { + pub from_address: EthAddress, + pub to_address: ContractAddress, + pub selector: EntryPointSelector, + pub payload: L1ToL2Payload, + #[serde(default)] + pub nonce: L1ToL2Nonce, +} + +// TODO(Arni): This code already appears somewhere else in the codebase, consider sharing it. +impl From for L1ToL2Message { + fn from(l1_handler_transaction: starknet_api::transaction::L1HandlerTransaction) -> Self { + let calldata = l1_handler_transaction.calldata; + let from_address = calldata.0[0].try_into().expect("Failed to convert EthAddress"); + let payload = L1ToL2Payload(calldata.0[1..].to_vec()); + Self { + from_address, + to_address: l1_handler_transaction.contract_address, + selector: l1_handler_transaction.entry_point_selector, + payload, + nonce: L1ToL2Nonce(l1_handler_transaction.nonce.0), + } + } +} + +#[derive(Debug, Default, Deserialize, Serialize, Clone, Eq, PartialEq)] +pub struct L2ToL1Message { + pub from_address: ContractAddress, + pub to_address: EthAddress, + pub payload: L2ToL1Payload, +} + +// Note: the serialization is different from the one in starknet_api. +#[derive(Hash, Debug, Deserialize, Serialize, Clone, Eq, PartialEq)] +pub enum Builtin { + #[serde(rename = "range_check_builtin")] + RangeCheck, + #[serde(rename = "pedersen_builtin")] + Pedersen, + #[serde(rename = "poseidon_builtin")] + Poseidon, + #[serde(rename = "ec_op_builtin")] + EcOp, + #[serde(rename = "ecdsa_builtin")] + Ecdsa, + #[serde(rename = "bitwise_builtin")] + Bitwise, + #[serde(rename = "keccak_builtin")] + Keccak, + // Note: in starknet_api this variant doesn't exist. + #[serde(rename = "output_builtin")] + Output, + #[serde(rename = "segment_arena_builtin")] + SegmentArena, + #[serde(rename = "add_mod_builtin")] + AddMod, + #[serde(rename = "mul_mod_builtin")] + MulMod, + #[serde(rename = "range_check96_builtin")] + RangeCheck96, +} + +impl From for Builtin { + fn from(builtin_name: BuiltinName) -> Self { + match builtin_name { + BuiltinName::range_check => Builtin::RangeCheck, + BuiltinName::pedersen => Builtin::Pedersen, + BuiltinName::poseidon => Builtin::Poseidon, + BuiltinName::ec_op => Builtin::EcOp, + BuiltinName::ecdsa => Builtin::Ecdsa, + BuiltinName::bitwise => Builtin::Bitwise, + BuiltinName::keccak => Builtin::Keccak, + BuiltinName::output => Builtin::Output, + BuiltinName::segment_arena => Builtin::SegmentArena, + BuiltinName::add_mod => Builtin::AddMod, + BuiltinName::mul_mod => Builtin::MulMod, + BuiltinName::range_check96 => Builtin::RangeCheck96, + } + } +} + +/// The execution resources used by a transaction. +#[derive(Debug, Default, Deserialize, Serialize, Clone, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +pub struct ExecutionResources { + // Note: in starknet_api this field is named `steps` + pub n_steps: u64, + pub builtin_instance_counter: HashMap, + // Note: in starknet_api this field is named `memory_holes` + pub n_memory_holes: u64, + // This field is missing in blocks created before v0.13.1, even if the feeder gateway is of + // that version + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub data_availability: Option, + // This field is missing in blocks created before v0.13.2, even if the feeder gateway is of + // that version + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub total_gas_consumed: Option, +} + +/// Transaction execution status. +#[derive(Debug, Clone, Eq, PartialEq, Hash, Deserialize, Serialize, PartialOrd, Ord, Default)] +pub enum TransactionExecutionStatus { + #[serde(rename = "SUCCEEDED")] + #[default] + Succeeded, + #[serde(rename = "REVERTED")] + Reverted, +} + +// TODO(Arni): Consider deleting derive default for this type. Same for members of this struct. +#[derive(Debug, Default, Deserialize, Serialize, Clone, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +pub struct StarknetClientTransactionReceipt { + pub transaction_index: TransactionOffsetInBlock, + pub transaction_hash: TransactionHash, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub l1_to_l2_consumed_message: Option, + pub l2_to_l1_messages: Vec, + pub events: Vec, + #[serde(default)] + pub execution_resources: ExecutionResources, + pub actual_fee: Fee, + // TODO(Yair): Check if we can remove the serde(default). + #[serde(default)] + pub execution_status: TransactionExecutionStatus, + // Note that in starknet_api this field is named `revert_reason`. + // Assumption: if the transaction execution status is Succeeded, then revert_error is None, and + // if the transaction execution status is Reverted, then revert_error is Some. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub revert_error: Option, +} + +// Conversion logic from blockifier types to StarknetClient types. +impl + From<( + TransactionHash, + // TODO(Arni): change the type of this parameter to TransactionOffsetInBlock + usize, + &TransactionExecutionInfo, + Option, + )> for StarknetClientTransactionReceipt +{ + fn from( + (tx_hash, tx_index, tx_execution_info, l1_handler): ( + TransactionHash, + usize, + &TransactionExecutionInfo, + Option, + ), + ) -> Self { + let l2_to_l1_messages = get_l2_to_l1_messages(tx_execution_info); + let events = get_events_from_execution_info(tx_execution_info); + let execution_resources = get_execution_resources(tx_execution_info); + let execution_status = if tx_execution_info.is_reverted() { + TransactionExecutionStatus::Reverted + } else { + TransactionExecutionStatus::Succeeded + }; + + // TODO(Arni): I assume this is not the correct way to fill this field. + let revert_error = + tx_execution_info.revert_error.as_ref().map(|revert_error| revert_error.to_string()); + + Self { + transaction_index: TransactionOffsetInBlock(tx_index), + transaction_hash: tx_hash, + // TODO(Arni): Fill this up. This is relevant only for L1 handler transactions. + l1_to_l2_consumed_message: l1_handler.map(L1ToL2Message::from), + l2_to_l1_messages, + events, + execution_resources, + actual_fee: tx_execution_info.receipt.fee, + execution_status, + revert_error, + } + } +} + +fn get_l2_to_l1_messages(execution_info: &TransactionExecutionInfo) -> Vec { + // TODO(Arni): Fix this call. The iterator returns all the call infos in the order: `validate`, + // `execute`, `fee_transfer`. For `deploy_account` transactions, the order is `execute`, + // `validate`, `fee_transfer`. + let call_info_iterator = execution_info.non_optional_call_infos(); + + let mut l2_to_l1_messages = vec![]; + for call in call_info_iterator { + let messages = + call.execution.l2_to_l1_messages.iter().map(|l2_to_l1_message| L2ToL1Message { + from_address: call.call.caller_address, + to_address: l2_to_l1_message.message.to_address, + payload: l2_to_l1_message.message.payload.clone(), + }); + l2_to_l1_messages.extend(messages); + } + + l2_to_l1_messages +} + +fn get_events_from_execution_info(execution_info: &TransactionExecutionInfo) -> Vec { + let call_info = if let Some(ref call_info) = execution_info.execute_call_info { + call_info + } else { + return vec![]; + }; + + // Collect all the events from the call infos, along with their order. + let mut accumulated_sortable_events = vec![]; + for call_info in call_info.iter() { + let sortable_events = call_info + .execution + .events + .iter() + .map(|orderable_event| (call_info.call.caller_address, orderable_event)); + accumulated_sortable_events.extend(sortable_events); + } + // Sort the events by their order. + accumulated_sortable_events.sort_by_key(|(_, OrderedEvent { order, .. })| *order); + + // Convert the sorted events into the StarknetClient Event type. + accumulated_sortable_events + .iter() + .map(|(from_address, OrderedEvent { event, .. })| Event { + from_address: *from_address, + content: event.clone(), + }) + .collect() +} + +fn get_execution_resources(execution_info: &TransactionExecutionInfo) -> ExecutionResources { + let receipt = &execution_info.receipt; + let resources = &receipt.resources.computation.total_vm_resources(); + let builtin_instance_counter = resources + .builtin_instance_counter + .iter() + .map(|(&builtin_name, &count)| { + (builtin_name.into(), count.try_into().expect("Failed to convert usize to u64")) + }) + .collect(); + + ExecutionResources { + n_steps: resources.n_steps.try_into().expect("Failed to convert usize to u64"), + builtin_instance_counter, + n_memory_holes: resources + .n_memory_holes + .try_into() + .expect("Failed to convert usize to u64"), + data_availability: Some(receipt.da_gas), + total_gas_consumed: Some(receipt.gas), + } +} + +// TODO(shahak): consider extracting common fields out (version, hash, type). +// This is a modified version of the enum +// `apollo_starknet_client::reader::objects::transaction::Transaction`. +// The main difference is that the `Deploy` variant is not present in this enum. +// Also a few modifications were made to the serialization format. +#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)] +#[serde(tag = "type")] +pub enum CendePreconfirmedTransaction { + #[serde(rename = "DECLARE")] + Declare(IntermediateDeclareTransaction), + #[serde(rename = "DEPLOY_ACCOUNT")] + DeployAccount(IntermediateDeployAccountTransaction), + #[serde(rename = "INVOKE_FUNCTION")] + Invoke(IntermediateInvokeTransaction), + #[serde(rename = "L1_HANDLER")] + L1Handler(L1HandlerTransaction), +} + +impl CendePreconfirmedTransaction { + pub fn transaction_hash(&self) -> TransactionHash { + match self { + CendePreconfirmedTransaction::Declare(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::DeployAccount(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::Invoke(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::L1Handler(tx) => tx.transaction_hash, + } + } +} + +impl From for CendePreconfirmedTransaction { + fn from(transaction: InternalConsensusTransaction) -> Self { + match transaction { + InternalConsensusTransaction::RpcTransaction(internal_rpc_transaction) => { + internal_rpc_transaction.into() + } + InternalConsensusTransaction::L1Handler(l1_handler_transaction) => { + l1_handler_transaction.into() + } + } + } +} + +// TODO(Arni): Share code with `crates/apollo_consensus_orchestrator/src/cende/central_objects.rs`. +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, Eq)] +pub struct CentralResourceBounds { + #[serde(rename = "L1_GAS")] + l1_gas: ResourceBounds, + #[serde(rename = "L2_GAS")] + l2_gas: ResourceBounds, + #[serde(rename = "L1_DATA_GAS")] + l1_data_gas: ResourceBounds, +} + +impl From for CentralResourceBounds { + fn from(resource_bounds: AllResourceBounds) -> CentralResourceBounds { + CentralResourceBounds { + l1_gas: resource_bounds.l1_gas, + l2_gas: resource_bounds.l2_gas, + l1_data_gas: resource_bounds.l1_data_gas, + } + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +pub struct IntermediateDeclareTransaction { + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_bounds: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tip: Option, + pub signature: TransactionSignature, + pub nonce: Nonce, + pub class_hash: ClassHash, + #[serde(skip_serializing_if = "Option::is_none")] + pub compiled_class_hash: Option, + pub sender_address: ContractAddress, + #[serde(skip_serializing_if = "Option::is_none")] + pub nonce_data_availability_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub fee_data_availability_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub paymaster_data: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub account_deployment_data: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_fee: Option, + pub version: TransactionVersion, + pub transaction_hash: TransactionHash, +} + +#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +pub struct IntermediateDeployAccountTransaction { + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_bounds: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tip: Option, + pub signature: TransactionSignature, + pub nonce: Nonce, + pub class_hash: ClassHash, + pub contract_address_salt: ContractAddressSalt, + pub constructor_calldata: Calldata, + #[serde(skip_serializing_if = "Option::is_none")] + pub nonce_data_availability_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub fee_data_availability_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub paymaster_data: Option, + // In early versions of starknet, the `sender_address` field was originally named + // `contract_address`. + #[serde(alias = "contract_address")] + pub sender_address: ContractAddress, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_fee: Option, + pub transaction_hash: TransactionHash, + pub version: TransactionVersion, +} + +#[derive(Debug, Default, Deserialize, Serialize, Clone, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +pub struct IntermediateInvokeTransaction { + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_bounds: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tip: Option, + pub calldata: Calldata, + // In early versions of starknet, the `sender_address` field was originally named + // `contract_address`. + #[serde(alias = "contract_address")] + pub sender_address: ContractAddress, + #[serde(skip_serializing_if = "Option::is_none")] + pub entry_point_selector: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub nonce: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub max_fee: Option, + pub signature: TransactionSignature, + #[serde(skip_serializing_if = "Option::is_none")] + pub nonce_data_availability_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub fee_data_availability_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub paymaster_data: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub account_deployment_data: Option, + pub transaction_hash: TransactionHash, + pub version: TransactionVersion, +} + +impl From for CendePreconfirmedTransaction { + fn from(internal_rpc_transaction: InternalRpcTransaction) -> Self { + let tx_hash = internal_rpc_transaction.tx_hash; + match internal_rpc_transaction.tx { + starknet_api::rpc_transaction::InternalRpcTransactionWithoutTxHash::Declare( + declare_transaction, + ) => { + let version = declare_transaction.version(); + CendePreconfirmedTransaction::Declare(IntermediateDeclareTransaction { + resource_bounds: Some(declare_transaction.resource_bounds.into()), + tip: Some(declare_transaction.tip), + signature: declare_transaction.signature, + nonce: declare_transaction.nonce, + class_hash: declare_transaction.class_hash, + compiled_class_hash: Some(declare_transaction.compiled_class_hash), + sender_address: declare_transaction.sender_address, + nonce_data_availability_mode: Some( + declare_transaction.nonce_data_availability_mode.into(), + ), + fee_data_availability_mode: Some( + declare_transaction.fee_data_availability_mode.into(), + ), + paymaster_data: Some(declare_transaction.paymaster_data), + account_deployment_data: Some(declare_transaction.account_deployment_data), + version, + transaction_hash: tx_hash, + // Irrelevant for V3 declare transactions. + max_fee: None, + }) + } + starknet_api::rpc_transaction::InternalRpcTransactionWithoutTxHash::DeployAccount( + deploy_account_transaction, + ) => { + let version = deploy_account_transaction.version(); + let InternalRpcDeployAccountTransaction { + tx: RpcDeployAccountTransaction::V3(tx), + contract_address, + } = deploy_account_transaction; + CendePreconfirmedTransaction::DeployAccount(IntermediateDeployAccountTransaction { + resource_bounds: Some(tx.resource_bounds.into()), + tip: Some(tx.tip), + signature: tx.signature, + nonce: tx.nonce, + class_hash: tx.class_hash, + contract_address_salt: tx.contract_address_salt, + constructor_calldata: tx.constructor_calldata, + nonce_data_availability_mode: Some(tx.nonce_data_availability_mode.into()), + fee_data_availability_mode: Some(tx.fee_data_availability_mode.into()), + paymaster_data: Some(tx.paymaster_data), + sender_address: contract_address, + transaction_hash: tx_hash, + version, + // Irrelevant for V3 deploy account transactions. + max_fee: None, + }) + } + starknet_api::rpc_transaction::InternalRpcTransactionWithoutTxHash::Invoke( + invoke_transaction, + ) => { + let version = invoke_transaction.version(); + let RpcInvokeTransaction::V3(tx) = invoke_transaction; + CendePreconfirmedTransaction::Invoke(IntermediateInvokeTransaction { + resource_bounds: Some(tx.resource_bounds.into()), + tip: Some(tx.tip), + calldata: tx.calldata, + sender_address: tx.sender_address, + nonce: Some(tx.nonce), + signature: tx.signature, + nonce_data_availability_mode: Some(tx.nonce_data_availability_mode.into()), + fee_data_availability_mode: Some(tx.fee_data_availability_mode.into()), + paymaster_data: Some(tx.paymaster_data), + account_deployment_data: Some(tx.account_deployment_data), + version, + transaction_hash: tx_hash, + // Irrelevant for V3 invoke transactions. + entry_point_selector: None, + max_fee: None, + }) + } + } + } +} + +#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Deserialize, Serialize, PartialOrd, Ord)] +#[serde(deny_unknown_fields)] +pub struct L1HandlerTransaction { + pub transaction_hash: TransactionHash, + pub version: TransactionVersion, + #[serde(default)] + pub nonce: Nonce, + pub contract_address: ContractAddress, + pub entry_point_selector: EntryPointSelector, + pub calldata: Calldata, +} + +impl From for CendePreconfirmedTransaction { + fn from(l1_handler_transaction: ExecutableL1HandlerTransaction) -> Self { + let ExecutableL1HandlerTransaction { tx, tx_hash, .. } = l1_handler_transaction; + CendePreconfirmedTransaction::L1Handler(L1HandlerTransaction { + transaction_hash: tx_hash, + version: tx.version, + nonce: tx.nonce, + contract_address: tx.contract_address, + entry_point_selector: tx.entry_point_selector, + calldata: tx.calldata, + }) + } +} + +const PRE_CONFIRMED_STATUS: &str = "PRE_CONFIRMED"; + +#[derive(Serialize, Clone)] +pub struct CendeBlockMetadata { + pub status: &'static str, + pub starknet_version: StarknetVersion, + pub l1_da_mode: L1DataAvailabilityMode, + pub l1_gas_price: GasPricePerToken, + pub l1_data_gas_price: GasPricePerToken, + pub l2_gas_price: GasPricePerToken, + pub timestamp: BlockTimestamp, + pub sequencer_address: ContractAddress, +} + +impl CendeBlockMetadata { + pub fn new(block_info: BlockInfo) -> Self { + let l1_da_mode = match block_info.use_kzg_da { + true => L1DataAvailabilityMode::Blob, + false => L1DataAvailabilityMode::Calldata, + }; + + let (l1_gas_price, l1_data_gas_price, l2_gas_price) = + get_gas_prices(&block_info.gas_prices); + + // TODO(noamsp): use correct version. + let starknet_version = StarknetVersion::default(); + + Self { + status: PRE_CONFIRMED_STATUS, + starknet_version, + l1_da_mode, + l1_gas_price, + l1_data_gas_price, + l2_gas_price, + timestamp: block_info.block_timestamp, + sequencer_address: block_info.sequencer_address, + } + } +} + +fn get_gas_prices( + gas_prices: &GasPrices, +) -> (GasPricePerToken, GasPricePerToken, GasPricePerToken) { + ( + GasPricePerToken { + price_in_fri: gas_prices.strk_gas_prices.l1_gas_price.into(), + price_in_wei: gas_prices.eth_gas_prices.l1_gas_price.into(), + }, + GasPricePerToken { + price_in_fri: gas_prices.strk_gas_prices.l1_data_gas_price.into(), + price_in_wei: gas_prices.eth_gas_prices.l1_data_gas_price.into(), + }, + GasPricePerToken { + price_in_fri: gas_prices.strk_gas_prices.l2_gas_price.into(), + price_in_wei: gas_prices.eth_gas_prices.l2_gas_price.into(), + }, + ) +} + +#[derive(Serialize)] +pub struct CendePreconfirmedBlock { + #[serde(flatten)] + pub metadata: CendeBlockMetadata, + pub transactions: Vec, + pub transaction_receipts: Vec>, + pub transaction_state_diffs: Vec>, +} + +pub struct StarknetClientStateDiff(pub StateDiff); + +impl From for StarknetClientStateDiff { + fn from(state_maps: StateMaps) -> Self { + StarknetClientStateDiff(StateDiff { + storage_diffs: IndexMap::from(StorageView(state_maps.storage)) + .into_iter() + .map(|(address, entries)| { + ( + address, + entries + .into_iter() + .map(|(key, value)| StorageEntry { key, value }) + .collect(), + ) + }) + .collect(), + deployed_contracts: state_maps + .class_hashes + .into_iter() + .map(|(address, class_hash)| DeployedContract { address, class_hash }) + .collect(), + declared_classes: state_maps + .compiled_class_hashes + .into_iter() + .map(|(class_hash, compiled_class_hash)| DeclaredClassHashEntry { + class_hash, + compiled_class_hash, + }) + .collect(), + old_declared_contracts: Default::default(), + nonces: state_maps.nonces.into_iter().collect(), + replaced_classes: Default::default(), + }) + } +} diff --git a/crates/apollo_batcher/src/communication.rs b/crates/apollo_batcher/src/communication.rs new file mode 100644 index 00000000000..b1843292cdd --- /dev/null +++ b/crates/apollo_batcher/src/communication.rs @@ -0,0 +1,44 @@ +use apollo_batcher_types::communication::{BatcherRequest, BatcherResponse}; +use apollo_infra::component_definitions::ComponentRequestHandler; +use apollo_infra::component_server::{LocalComponentServer, RemoteComponentServer}; +use async_trait::async_trait; + +use crate::batcher::Batcher; + +pub type LocalBatcherServer = LocalComponentServer; +pub type RemoteBatcherServer = RemoteComponentServer; + +#[async_trait] +impl ComponentRequestHandler for Batcher { + async fn handle_request(&mut self, request: BatcherRequest) -> BatcherResponse { + match request { + BatcherRequest::ProposeBlock(input) => { + BatcherResponse::ProposeBlock(self.propose_block(input).await) + } + BatcherRequest::GetCurrentHeight => { + BatcherResponse::GetCurrentHeight(self.get_height().await) + } + BatcherRequest::GetProposalContent(input) => { + BatcherResponse::GetProposalContent(self.get_proposal_content(input).await) + } + BatcherRequest::StartHeight(input) => { + BatcherResponse::StartHeight(self.start_height(input).await) + } + BatcherRequest::DecisionReached(input) => { + BatcherResponse::DecisionReached(self.decision_reached(input).await.map(Box::new)) + } + BatcherRequest::ValidateBlock(input) => { + BatcherResponse::ValidateBlock(self.validate_block(input).await) + } + BatcherRequest::SendProposalContent(input) => { + BatcherResponse::SendProposalContent(self.send_proposal_content(input).await) + } + BatcherRequest::AddSyncBlock(sync_block) => { + BatcherResponse::AddSyncBlock(self.add_sync_block(sync_block).await) + } + BatcherRequest::RevertBlock(input) => { + BatcherResponse::RevertBlock(self.revert_block(input).await) + } + } + } +} diff --git a/crates/apollo_batcher/src/config.rs b/crates/apollo_batcher/src/config.rs new file mode 100644 index 00000000000..3700738a61d --- /dev/null +++ b/crates/apollo_batcher/src/config.rs @@ -0,0 +1,105 @@ +use std::collections::BTreeMap; + +use apollo_config::dumping::{prepend_sub_config_name, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use blockifier::blockifier::config::ContractClassManagerConfig; +use serde::{Deserialize, Serialize}; +use validator::{Validate, ValidationError}; + +use crate::block_builder::BlockBuilderConfig; +use crate::pre_confirmed_block_writer::PreconfirmedBlockWriterConfig; +use crate::pre_confirmed_cende_client::PreconfirmedCendeConfig; + +/// The batcher related configuration. +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +#[validate(schema(function = "validate_batcher_config"))] +pub struct BatcherConfig { + pub storage: apollo_storage::StorageConfig, + pub outstream_content_buffer_size: usize, + pub input_stream_content_buffer_size: usize, + pub block_builder_config: BlockBuilderConfig, + pub pre_confirmed_block_writer_config: PreconfirmedBlockWriterConfig, + pub contract_class_manager_config: ContractClassManagerConfig, + pub max_l1_handler_txs_per_block_proposal: usize, + pub pre_confirmed_cende_config: PreconfirmedCendeConfig, +} + +impl SerializeConfig for BatcherConfig { + fn dump(&self) -> BTreeMap { + // TODO(yair): create nicer function to append sub configs. + let mut dump = BTreeMap::from([ + ser_param( + "outstream_content_buffer_size", + &self.outstream_content_buffer_size, + "The maximum number of items to include in a single get_proposal_content response.", + ParamPrivacyInput::Public, + ), + ser_param( + "input_stream_content_buffer_size", + &self.input_stream_content_buffer_size, + "Sets the buffer size for the input transaction channel. Adding more transactions \ + beyond this limit will block until space is available.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_l1_handler_txs_per_block_proposal", + &self.max_l1_handler_txs_per_block_proposal, + "The maximum number of L1 handler transactions to include in a block proposal.", + ParamPrivacyInput::Public, + ), + ]); + dump.append(&mut prepend_sub_config_name(self.storage.dump(), "storage")); + dump.append(&mut prepend_sub_config_name( + self.block_builder_config.dump(), + "block_builder_config", + )); + dump.append(&mut prepend_sub_config_name( + self.pre_confirmed_block_writer_config.dump(), + "pre_confirmed_block_writer_config", + )); + dump.append(&mut prepend_sub_config_name( + self.contract_class_manager_config.dump(), + "contract_class_manager_config", + )); + dump.append(&mut prepend_sub_config_name( + self.pre_confirmed_cende_config.dump(), + "pre_confirmed_cende_config", + )); + dump + } +} + +impl Default for BatcherConfig { + fn default() -> Self { + Self { + storage: apollo_storage::StorageConfig { + db_config: apollo_storage::db::DbConfig { + path_prefix: "/data/batcher".into(), + enforce_file_exists: false, + ..Default::default() + }, + scope: apollo_storage::StorageScope::StateOnly, + ..Default::default() + }, + // TODO(AlonH): set a more reasonable default value. + outstream_content_buffer_size: 100, + input_stream_content_buffer_size: 400, + block_builder_config: BlockBuilderConfig::default(), + pre_confirmed_block_writer_config: PreconfirmedBlockWriterConfig::default(), + contract_class_manager_config: ContractClassManagerConfig::default(), + max_l1_handler_txs_per_block_proposal: 3, + pre_confirmed_cende_config: PreconfirmedCendeConfig::default(), + } + } +} + +fn validate_batcher_config(batcher_config: &BatcherConfig) -> Result<(), ValidationError> { + if batcher_config.input_stream_content_buffer_size + < batcher_config.block_builder_config.n_concurrent_txs + { + return Err(ValidationError::new( + "input_stream_content_buffer_size must be at least n_concurrent_txs", + )); + } + Ok(()) +} diff --git a/crates/apollo_batcher/src/lib.rs b/crates/apollo_batcher/src/lib.rs new file mode 100644 index 00000000000..0893e3257b9 --- /dev/null +++ b/crates/apollo_batcher/src/lib.rs @@ -0,0 +1,23 @@ +pub mod batcher; +#[cfg(test)] +mod batcher_test; +pub mod block_builder; +#[cfg(test)] +mod block_builder_test; +pub mod cende_client_types; +pub mod communication; +pub mod config; +pub mod metrics; +pub mod pre_confirmed_block_writer; +pub mod pre_confirmed_cende_client; +#[cfg(test)] +mod test_utils; +mod transaction_executor; +mod transaction_provider; +#[cfg(test)] +mod transaction_provider_test; +mod utils; + +// Re-export so it can be used in the general config of the sequencer node without depending on +// blockifier. +pub use blockifier::blockifier_versioned_constants::VersionedConstantsOverrides; diff --git a/crates/apollo_batcher/src/metrics.rs b/crates/apollo_batcher/src/metrics.rs new file mode 100644 index 00000000000..fb6bc06a850 --- /dev/null +++ b/crates/apollo_batcher/src/metrics.rs @@ -0,0 +1,86 @@ +use apollo_metrics::define_metrics; +use starknet_api::block::BlockNumber; + +define_metrics!( + Batcher => { + // Heights + MetricGauge { STORAGE_HEIGHT, "batcher_storage_height", "The height of the batcher's storage" }, + MetricGauge { LAST_BATCHED_BLOCK, "batcher_last_batched_block", "The last block received by batching" }, + MetricGauge { LAST_SYNCED_BLOCK, "batcher_last_synced_block", "The last block received by syncing" }, + MetricGauge { LAST_PROPOSED_BLOCK, "batcher_last_proposed_block", "The last block proposed by this sequencer" }, + MetricCounter { REVERTED_BLOCKS, "batcher_reverted_blocks", "Counter of reverted blocks", init = 0 }, + // Proposals + MetricCounter { PROPOSAL_STARTED, "batcher_proposal_started", "Counter of proposals started", init = 0 }, + MetricCounter { PROPOSAL_SUCCEEDED, "batcher_proposal_succeeded", "Counter of successful proposals", init = 0 }, + MetricCounter { PROPOSAL_FAILED, "batcher_proposal_failed", "Counter of failed proposals", init = 0 }, + MetricCounter { PROPOSAL_ABORTED, "batcher_proposal_aborted", "Counter of aborted proposals", init = 0 }, + // Transactions + MetricCounter { BATCHED_TRANSACTIONS, "batcher_batched_transactions", "Counter of batched transactions across all forks", init = 0 }, + MetricCounter { REJECTED_TRANSACTIONS, "batcher_rejected_transactions", "Counter of rejected transactions", init = 0 }, + MetricCounter { REVERTED_TRANSACTIONS, "batcher_reverted_transactions", "Counter of reverted transactions across all forks", init = 0 }, + MetricCounter { SYNCED_TRANSACTIONS, "batcher_synced_transactions", "Counter of synced transactions", init = 0 }, + + MetricCounter { FULL_BLOCKS, "batcher_full_blocks", "Counter of blocks closed on full capacity", init = 0 }, + MetricCounter { PRECONFIRMED_BLOCK_WRITTEN, "batcher_preconfirmed_block_written", "Counter of preconfirmed blocks written to storage", init = 0 }, + }, +); + +pub fn register_metrics(storage_height: BlockNumber) { + STORAGE_HEIGHT.register(); + STORAGE_HEIGHT.set_lossy(storage_height.0); + LAST_BATCHED_BLOCK.register(); + LAST_SYNCED_BLOCK.register(); + LAST_PROPOSED_BLOCK.register(); + REVERTED_BLOCKS.register(); + + PROPOSAL_STARTED.register(); + PROPOSAL_SUCCEEDED.register(); + PROPOSAL_FAILED.register(); + PROPOSAL_ABORTED.register(); + + BATCHED_TRANSACTIONS.register(); + REJECTED_TRANSACTIONS.register(); + REVERTED_TRANSACTIONS.register(); + SYNCED_TRANSACTIONS.register(); + + FULL_BLOCKS.register(); + PRECONFIRMED_BLOCK_WRITTEN.register(); +} + +/// A handle to update the proposal metrics when the proposal is created and dropped. +#[derive(Debug)] +pub(crate) struct ProposalMetricsHandle { + finish_status: ProposalFinishStatus, +} + +impl ProposalMetricsHandle { + pub fn new() -> Self { + PROPOSAL_STARTED.increment(1); + Self { finish_status: ProposalFinishStatus::Failed } + } + + pub fn set_succeeded(&mut self) { + self.finish_status = ProposalFinishStatus::Succeeded; + } + + pub fn set_aborted(&mut self) { + self.finish_status = ProposalFinishStatus::Aborted; + } +} + +#[derive(Debug)] +enum ProposalFinishStatus { + Succeeded, + Aborted, + Failed, +} + +impl Drop for ProposalMetricsHandle { + fn drop(&mut self) { + match self.finish_status { + ProposalFinishStatus::Succeeded => PROPOSAL_SUCCEEDED.increment(1), + ProposalFinishStatus::Aborted => PROPOSAL_ABORTED.increment(1), + ProposalFinishStatus::Failed => PROPOSAL_FAILED.increment(1), + } + } +} diff --git a/crates/apollo_batcher/src/pre_confirmed_block_writer.rs b/crates/apollo_batcher/src/pre_confirmed_block_writer.rs new file mode 100644 index 00000000000..ba5bedb169e --- /dev/null +++ b/crates/apollo_batcher/src/pre_confirmed_block_writer.rs @@ -0,0 +1,345 @@ +use std::collections::BTreeMap; +use std::sync::Arc; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::Round; +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_starknet_client::reader::StateDiff; +use async_trait::async_trait; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use indexmap::map::Entry; +use indexmap::IndexMap; +#[cfg(test)] +use mockall::automock; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use starknet_api::block::BlockNumber; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::transaction::TransactionHash; +use thiserror::Error; +use tracing::{error, info}; + +use crate::cende_client_types::{ + CendeBlockMetadata, + CendePreconfirmedBlock, + CendePreconfirmedTransaction, + StarknetClientTransactionReceipt, +}; +use crate::pre_confirmed_cende_client::{ + CendeWritePreconfirmedBlock, + PreconfirmedCendeClientError, + PreconfirmedCendeClientTrait, +}; + +#[derive(Debug, Error)] +pub enum BlockWriterError { + #[error(transparent)] + PreconfirmedCendeClientError(#[from] PreconfirmedCendeClientError), +} + +pub type BlockWriterResult = Result; + +pub type CandidateTxReceiver = tokio::sync::mpsc::Receiver>; +pub type CandidateTxSender = tokio::sync::mpsc::Sender>; + +pub type PreconfirmedTxReceiver = tokio::sync::mpsc::Receiver<( + InternalConsensusTransaction, + StarknetClientTransactionReceipt, + StateDiff, +)>; + +pub type PreconfirmedTxSender = tokio::sync::mpsc::Sender<( + InternalConsensusTransaction, + StarknetClientTransactionReceipt, + StateDiff, +)>; + +/// Coordinates the flow of pre-confirmed block data during block proposal. +/// Listens for transaction updates from the block builder via dedicated channels and utilizes a +/// Cende client to communicate the updates to the Cende recorder. +#[async_trait] +#[cfg_attr(test, automock)] +pub trait PreconfirmedBlockWriterTrait: Send { + async fn run(&mut self) -> BlockWriterResult<()>; +} + +pub struct PreconfirmedBlockWriter { + pre_confirmed_block_writer_input: PreconfirmedBlockWriterInput, + candidate_tx_receiver: CandidateTxReceiver, + pre_confirmed_tx_receiver: PreconfirmedTxReceiver, + cende_client: Arc, + write_block_interval_millis: u64, +} + +impl PreconfirmedBlockWriter { + pub fn new( + pre_confirmed_block_writer_input: PreconfirmedBlockWriterInput, + candidate_tx_receiver: CandidateTxReceiver, + pre_confirmed_tx_receiver: PreconfirmedTxReceiver, + cende_client: Arc, + write_block_interval_millis: u64, + ) -> Self { + Self { + pre_confirmed_block_writer_input, + candidate_tx_receiver, + pre_confirmed_tx_receiver, + cende_client, + write_block_interval_millis, + } + } + + fn create_pre_confirmed_block( + &self, + transactions_map: &IndexMap< + TransactionHash, + ( + CendePreconfirmedTransaction, + Option, + Option, + ), + >, + write_iteration: u64, + ) -> CendeWritePreconfirmedBlock { + let mut transactions = Vec::with_capacity(transactions_map.len()); + let mut transaction_receipts = Vec::with_capacity(transactions_map.len()); + let mut transaction_state_diffs = Vec::with_capacity(transactions_map.len()); + + for (tx, tx_receipt, tx_state_diff) in transactions_map.values() { + transactions.push(tx.clone()); + transaction_receipts.push(tx_receipt.clone()); + transaction_state_diffs.push(tx_state_diff.clone()); + } + + let pre_confirmed_block = CendePreconfirmedBlock { + metadata: self.pre_confirmed_block_writer_input.block_metadata.clone(), + transactions, + transaction_receipts, + transaction_state_diffs, + }; + + CendeWritePreconfirmedBlock { + block_number: self.pre_confirmed_block_writer_input.block_number, + round: self.pre_confirmed_block_writer_input.round, + write_iteration, + pre_confirmed_block, + } + } +} + +#[async_trait] +impl PreconfirmedBlockWriterTrait for PreconfirmedBlockWriter { + async fn run(&mut self) -> BlockWriterResult<()> { + let mut transactions_map: IndexMap< + TransactionHash, + ( + CendePreconfirmedTransaction, + Option, + Option, + ), + > = IndexMap::new(); + + let mut pending_tasks = FuturesUnordered::new(); + let mut write_pre_confirmed_txs_timer = + tokio::time::interval(Duration::from_millis(self.write_block_interval_millis)); + + // We initially mark that we have pending changes so that the client will write to the + // Cende recorder that a new proposal round has started. + let mut pending_changes = true; + let mut next_write_iteration = 0; + + loop { + tokio::select! { + _ = write_pre_confirmed_txs_timer.tick() => { + // Only send if there are pending changes to avoid unnecessary calls + if pending_changes { + // TODO(noamsp): Extract to a function. + let pre_confirmed_block = self.create_pre_confirmed_block( + &transactions_map, + next_write_iteration, + ); + pending_tasks.push(self.cende_client.write_pre_confirmed_block(pre_confirmed_block)); + next_write_iteration += 1; + pending_changes = false; + } + } + + Some(result) = pending_tasks.next() => { + if let Err(error) = result { + if is_round_mismatch_error(&error, next_write_iteration) { + pending_tasks.clear(); + return Err(error.into()); + } + } + } + msg = self.pre_confirmed_tx_receiver.recv() => { + match msg { + Some((tx, tx_receipt, tx_state_diff)) => { + let tx = CendePreconfirmedTransaction::from(tx); + let tx_hash = tx.transaction_hash(); + transactions_map.insert(tx_hash, (tx, Some(tx_receipt), Some(tx_state_diff))); + pending_changes = true; + } + None => { + info!("Pre confirmed tx channel closed"); + break; + } + } + } + msg = self.candidate_tx_receiver.recv() => { + match msg { + Some(txs) => { + // Skip transactions that were already executed, to avoid an unnecessary write. + for tx in txs { + let tx = CendePreconfirmedTransaction::from(tx); + match transactions_map.entry(tx.transaction_hash()) { + Entry::Vacant(entry) => { + entry.insert((tx, None, None)); + pending_changes = true; + } + Entry::Occupied(_) => {} + } + } + } + None => { + info!("Candidate tx channel closed"); + break; + } + } + } + } + } + + if pending_changes { + let pre_confirmed_block = + self.create_pre_confirmed_block(&transactions_map, next_write_iteration); + self.cende_client.write_pre_confirmed_block(pre_confirmed_block).await? + } + + // Wait for all pending tasks to complete gracefully. + // TODO(noamsp): Add timeout. + while let Some(result) = pending_tasks.next().await { + if let Err(error) = result { + if is_round_mismatch_error(&error, next_write_iteration) { + pending_tasks.clear(); + return Err(error.into()); + } + } + } + info!("Pre confirmed block writer finished"); + + Ok(()) + } +} + +fn is_round_mismatch_error( + error: &PreconfirmedCendeClientError, + next_write_iteration: u64, +) -> bool { + let PreconfirmedCendeClientError::CendeRecorderError { + block_number, + round, + write_iteration, + status_code, + } = error + else { + return false; + }; + + // A bad request status indicates a round or write iteration mismatch. The latest request can + // receive a bad request status only if it is due to a round mismatch. + if *status_code == StatusCode::BAD_REQUEST && *write_iteration == next_write_iteration - 1 { + error!( + "A higher round was detected for block_number: {}. rejected round: {}. Stopping \ + pre-confirmed block writer.", + block_number, round, + ); + return true; + } + false +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Debug, Copy)] +pub struct PreconfirmedBlockWriterConfig { + pub channel_buffer_capacity: usize, + pub write_block_interval_millis: u64, +} + +impl Default for PreconfirmedBlockWriterConfig { + fn default() -> Self { + Self { channel_buffer_capacity: 1000, write_block_interval_millis: 50 } + } +} + +impl SerializeConfig for PreconfirmedBlockWriterConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param( + "channel_buffer_capacity", + &self.channel_buffer_capacity, + "The capacity of the channel buffer for receiving pre-confirmed transactions.", + ParamPrivacyInput::Public, + ), + ser_param( + "write_block_interval_millis", + &self.write_block_interval_millis, + "Time interval (ms) between writing pre-confirmed blocks. Writes occur only when \ + block data changes.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +#[cfg_attr(test, automock)] +pub trait PreconfirmedBlockWriterFactoryTrait: Send + Sync { + fn create( + &self, + block_number: BlockNumber, + proposal_round: Round, + block_metadata: CendeBlockMetadata, + ) -> (Box, CandidateTxSender, PreconfirmedTxSender); +} + +pub struct PreconfirmedBlockWriterFactory { + pub config: PreconfirmedBlockWriterConfig, + pub cende_client: Arc, +} + +impl PreconfirmedBlockWriterFactoryTrait for PreconfirmedBlockWriterFactory { + fn create( + &self, + block_number: BlockNumber, + round: Round, + block_metadata: CendeBlockMetadata, + ) -> (Box, CandidateTxSender, PreconfirmedTxSender) { + // Initialize channels for communication between the pre confirmed block writer and the + // block builder. + let (pre_confirmed_tx_sender, pre_confirmed_tx_receiver) = + tokio::sync::mpsc::channel(self.config.channel_buffer_capacity); + let (candidate_tx_sender, candidate_tx_receiver) = + tokio::sync::mpsc::channel(self.config.channel_buffer_capacity); + + let cende_client = self.cende_client.clone(); + + let pre_confirmed_block_writer_input = + PreconfirmedBlockWriterInput { block_number, round, block_metadata }; + + let pre_confirmed_block_writer = Box::new(PreconfirmedBlockWriter::new( + pre_confirmed_block_writer_input, + candidate_tx_receiver, + pre_confirmed_tx_receiver, + cende_client, + self.config.write_block_interval_millis, + )); + (pre_confirmed_block_writer, candidate_tx_sender, pre_confirmed_tx_sender) + } +} + +// TODO(noamsp): find a better name for this struct. +pub struct PreconfirmedBlockWriterInput { + pub block_number: BlockNumber, + pub round: Round, + pub block_metadata: CendeBlockMetadata, +} diff --git a/crates/apollo_batcher/src/pre_confirmed_cende_client.rs b/crates/apollo_batcher/src/pre_confirmed_cende_client.rs new file mode 100644 index 00000000000..932f958cd1b --- /dev/null +++ b/crates/apollo_batcher/src/pre_confirmed_cende_client.rs @@ -0,0 +1,146 @@ +use std::collections::BTreeMap; + +use apollo_batcher_types::batcher_types::Round; +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use async_trait::async_trait; +use reqwest::{Client, StatusCode}; +use serde::{Deserialize, Serialize}; +use starknet_api::block::BlockNumber; +use thiserror::Error; +use tracing::{debug, error, trace, warn}; +use url::Url; + +use crate::cende_client_types::CendePreconfirmedBlock; +use crate::metrics::PRECONFIRMED_BLOCK_WRITTEN; + +#[derive(Debug, Error)] +pub enum PreconfirmedCendeClientError { + #[error(transparent)] + RequestError(#[from] reqwest::Error), + #[error( + "Cende recorder returned an error. block_number: {block_number}, round: {round}, \ + write_iteration: {write_iteration}, status_code: {status_code}." + )] + CendeRecorderError { + block_number: BlockNumber, + round: Round, + write_iteration: u64, + status_code: StatusCode, + }, +} + +pub type PreconfirmedCendeClientResult = Result; + +/// Interface for communicating pre-confirmed block data to the Cende recorder during block +/// proposal. +#[async_trait] +pub trait PreconfirmedCendeClientTrait: Send + Sync { + /// Notifies the Cende recorder about a pre-confirmed block update. + async fn write_pre_confirmed_block( + &self, + pre_confirmed_block: CendeWritePreconfirmedBlock, + ) -> PreconfirmedCendeClientResult<()>; +} + +pub struct PreconfirmedCendeClient { + write_pre_confirmed_block_url: Url, + client: Client, +} + +// The endpoints for the Cende recorder. +pub const RECORDER_WRITE_PRE_CONFIRMED_BLOCK_PATH: &str = + "/cende_recorder/write_pre_confirmed_block"; + +impl PreconfirmedCendeClient { + pub fn new(config: PreconfirmedCendeConfig) -> Self { + let recorder_url = config.recorder_url; + + Self { + write_pre_confirmed_block_url: recorder_url + .join(RECORDER_WRITE_PRE_CONFIRMED_BLOCK_PATH) + .expect("Failed to construct URL"), + client: Client::new(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct PreconfirmedCendeConfig { + pub recorder_url: Url, +} + +impl Default for PreconfirmedCendeConfig { + fn default() -> Self { + Self { + recorder_url: "https://recorder_url" + .parse() + .expect("recorder_url must be a valid Recorder URL"), + } + } +} + +impl SerializeConfig for PreconfirmedCendeConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ser_param( + "recorder_url", + &self.recorder_url, + "The URL of the Pythonic cende_recorder", + ParamPrivacyInput::Private, + )]) + } +} + +#[derive(Serialize)] +pub struct CendeWritePreconfirmedBlock { + pub block_number: BlockNumber, + pub round: Round, + pub write_iteration: u64, + pub pre_confirmed_block: CendePreconfirmedBlock, +} + +#[async_trait] +impl PreconfirmedCendeClientTrait for PreconfirmedCendeClient { + async fn write_pre_confirmed_block( + &self, + pre_confirmed_block: CendeWritePreconfirmedBlock, + ) -> PreconfirmedCendeClientResult<()> { + let block_number = pre_confirmed_block.block_number; + let round = pre_confirmed_block.round; + let write_iteration = pre_confirmed_block.write_iteration; + let number_of_txs = pre_confirmed_block.pre_confirmed_block.transactions.len(); + + let request_builder = + self.client.post(self.write_pre_confirmed_block_url.clone()).json(&pre_confirmed_block); + + trace!( + "Sending write_pre_confirmed_block request to Cende recorder. \ + block_number={block_number}, round={round}, write_iteration={write_iteration}. The \ + block contains {number_of_txs} transactions.", + ); + + let response = request_builder.send().await?; + + let response_status = response.status(); + if response_status.is_success() { + debug!( + "write_pre_confirmed_block request succeeded. block_number={block_number}, \ + round={round}, write_iteration={write_iteration}, status={response_status}", + ); + PRECONFIRMED_BLOCK_WRITTEN.increment(1); + Ok(()) + } else { + warn!( + "write_pre_confirmed_block request failed. block_number={block_number}, \ + round={round}, write_iteration={write_iteration}, status={response_status}", + ); + + return Err(PreconfirmedCendeClientError::CendeRecorderError { + block_number, + round, + write_iteration, + status_code: response_status, + }); + } + } +} diff --git a/crates/apollo_batcher/src/test_utils.rs b/crates/apollo_batcher/src/test_utils.rs new file mode 100644 index 00000000000..b54a1c2d3d1 --- /dev/null +++ b/crates/apollo_batcher/src/test_utils.rs @@ -0,0 +1,152 @@ +use std::ops::Range; + +use async_trait::async_trait; +use blockifier::bouncer::{BouncerWeights, CasmHashComputationData}; +use blockifier::fee::receipt::TransactionReceipt; +use blockifier::state::cached_state::CommitmentStateDiff; +use blockifier::transaction::objects::TransactionExecutionInfo; +use indexmap::IndexMap; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::execution_resources::GasAmount; +use starknet_api::test_utils::invoke::{internal_invoke_tx, InvokeTxArgs}; +use starknet_api::test_utils::l1_handler::{executable_l1_handler_tx, L1HandlerTxArgs}; +use starknet_api::transaction::fields::Fee; +use starknet_api::transaction::TransactionHash; +use starknet_api::{class_hash, contract_address, nonce, tx_hash}; +use tokio::sync::mpsc::UnboundedSender; + +use crate::block_builder::{ + BlockBuilderResult, + BlockBuilderTrait, + BlockExecutionArtifacts, + BlockTransactionExecutionData, +}; +use crate::transaction_provider::TransactionProvider; + +pub const EXECUTION_INFO_LEN: usize = 10; +pub const DUMMY_FINAL_N_EXECUTED_TXS: usize = 12; + +// A fake block builder for validate flow, that fetches transactions from the transaction provider +// until it is exhausted. +// This ensures the block builder (and specifically the tx_provider) is not dropped before all +// transactions are processed. Otherwise, the batcher would fail during tests when attempting to +// send transactions to it. +pub(crate) struct FakeValidateBlockBuilder { + pub tx_provider: Box, + pub build_block_result: Option>, +} + +#[async_trait] +impl BlockBuilderTrait for FakeValidateBlockBuilder { + async fn build_block(&mut self) -> BlockBuilderResult { + // build_block should be called only once, so we can safely take the result. + let build_block_result = self.build_block_result.take().unwrap(); + + if build_block_result.is_ok() { + while self.tx_provider.get_final_n_executed_txs().await.is_none() { + self.tx_provider.get_txs(1).await.unwrap(); + tokio::task::yield_now().await; + } + } + build_block_result + } +} + +// A fake block builder for propose flow, that sends the given transactions to the output content +// sender. +pub(crate) struct FakeProposeBlockBuilder { + pub output_content_sender: UnboundedSender, + pub output_txs: Vec, + pub build_block_result: Option>, +} + +#[async_trait] +impl BlockBuilderTrait for FakeProposeBlockBuilder { + async fn build_block(&mut self) -> BlockBuilderResult { + for tx in &self.output_txs { + self.output_content_sender.send(tx.clone()).unwrap(); + } + + // build_block should be called only once, so we can safely take the result. + self.build_block_result.take().unwrap() + } +} + +pub fn test_txs(tx_hash_range: Range) -> Vec { + tx_hash_range + .map(|i| { + InternalConsensusTransaction::RpcTransaction(internal_invoke_tx(InvokeTxArgs { + tx_hash: tx_hash!(i), + ..Default::default() + })) + }) + .collect() +} + +pub fn test_l1_handler_txs(tx_hash_range: Range) -> Vec { + tx_hash_range + .map(|i| { + InternalConsensusTransaction::L1Handler(executable_l1_handler_tx(L1HandlerTxArgs { + tx_hash: tx_hash!(i), + ..Default::default() + })) + }) + .collect() +} + +// Create `execution_infos` with an indexed field to enable verification of the order. +fn indexed_execution_infos() -> IndexMap { + test_txs(0..EXECUTION_INFO_LEN) + .iter() + .enumerate() + .map(|(i, tx)| { + ( + tx.tx_hash(), + TransactionExecutionInfo { + receipt: TransactionReceipt { + fee: Fee(i.try_into().unwrap()), + ..Default::default() + }, + ..Default::default() + }, + ) + }) + .collect() +} + +// Verify that `execution_infos` was initiated with an indexed fields. +pub fn verify_indexed_execution_infos( + execution_infos: &IndexMap, +) { + for (i, execution_info) in execution_infos.iter().enumerate() { + assert_eq!(execution_info.1.receipt.fee, Fee(i.try_into().unwrap())); + } +} + +impl BlockExecutionArtifacts { + pub fn create_for_testing() -> Self { + // Use a non-empty commitment_state_diff to get a valuable test verification of the result. + Self { + execution_data: BlockTransactionExecutionData { + execution_infos: indexed_execution_infos(), + rejected_tx_hashes: test_txs(10..15).iter().map(|tx| tx.tx_hash()).collect(), + consumed_l1_handler_tx_hashes: Default::default(), + }, + commitment_state_diff: CommitmentStateDiff { + address_to_class_hash: IndexMap::from_iter([( + contract_address!("0x7"), + class_hash!("0x11111111"), + )]), + storage_updates: IndexMap::new(), + class_hash_to_compiled_class_hash: IndexMap::new(), + address_to_nonce: IndexMap::from_iter([(contract_address!("0x7"), nonce!(1_u64))]), + }, + compressed_state_diff: Default::default(), + bouncer_weights: BouncerWeights::empty(), + l2_gas_used: GasAmount::default(), + casm_hash_computation_data_sierra_gas: CasmHashComputationData::empty(), + casm_hash_computation_data_proving_gas: CasmHashComputationData::empty(), + final_n_executed_txs: DUMMY_FINAL_N_EXECUTED_TXS, + } + } +} diff --git a/crates/apollo_batcher/src/transaction_executor.rs b/crates/apollo_batcher/src/transaction_executor.rs new file mode 100644 index 00000000000..2e857996d93 --- /dev/null +++ b/crates/apollo_batcher/src/transaction_executor.rs @@ -0,0 +1,68 @@ +use blockifier::blockifier::concurrent_transaction_executor::ConcurrentTransactionExecutor; +use blockifier::blockifier::transaction_executor::{ + BlockExecutionSummary, + TransactionExecutionOutput, + TransactionExecutorResult, +}; +use blockifier::state::state_api::StateReader; +use blockifier::transaction::transaction_execution::Transaction as BlockifierTransaction; +#[cfg(test)] +use mockall::automock; + +#[cfg_attr(test, automock)] +pub trait TransactionExecutorTrait: Send { + /// Starts executing the given transactions. + fn add_txs_to_block(&mut self, txs: &[BlockifierTransaction]); + + /// Returns the new execution results of the transactions that were executed so far, starting + /// from the last call to `get_new_results`. + fn get_new_results(&mut self) -> Vec>; + + /// Returns true if the block is full or the deadline is reached. + fn is_done(&self) -> bool; + + /// Finalizes the block creation and returns the commitment state diff, visited + /// segments mapping and bouncer. + /// + /// Every block must be closed with either `close_block` or `abort_block`. + #[allow(clippy::result_large_err)] + fn close_block( + &mut self, + final_n_executed_txs: usize, + ) -> TransactionExecutorResult; + + /// Notifies the transaction executor that the block is aborted. + /// This allows the worker threads to continue to the next block. + /// + /// Every block must be closed with either `close_block` or `abort_block`. + fn abort_block(&mut self); +} + +/// See [TransactionExecutorTrait] for documentation. +impl TransactionExecutorTrait + for ConcurrentTransactionExecutor +{ + fn add_txs_to_block(&mut self, txs: &[BlockifierTransaction]) { + self.add_txs(txs); + } + + fn get_new_results(&mut self) -> Vec> { + ConcurrentTransactionExecutor::get_new_results(self) + } + + fn is_done(&self) -> bool { + ConcurrentTransactionExecutor::is_done(self) + } + + #[allow(clippy::result_large_err)] + fn close_block( + &mut self, + final_n_executed_txs: usize, + ) -> TransactionExecutorResult { + ConcurrentTransactionExecutor::close_block(self, final_n_executed_txs) + } + + fn abort_block(&mut self) { + ConcurrentTransactionExecutor::abort_block(self) + } +} diff --git a/crates/apollo_batcher/src/transaction_provider.rs b/crates/apollo_batcher/src/transaction_provider.rs new file mode 100644 index 00000000000..fb34f8324cf --- /dev/null +++ b/crates/apollo_batcher/src/transaction_provider.rs @@ -0,0 +1,197 @@ +use std::cmp::min; +use std::vec; + +use apollo_l1_provider_types::errors::L1ProviderClientError; +use apollo_l1_provider_types::{ + InvalidValidationStatus as L1InvalidValidationStatus, + SharedL1ProviderClient, + ValidationStatus as L1ValidationStatus, +}; +use apollo_mempool_types::communication::{MempoolClientError, SharedMempoolClient}; +use async_trait::async_trait; +#[cfg(test)] +use mockall::automock; +use starknet_api::block::BlockNumber; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::transaction::TransactionHash; +use thiserror::Error; + +type TransactionProviderResult = Result; + +#[derive(Clone, Debug, Error)] +pub enum TransactionProviderError { + #[error(transparent)] + MempoolError(#[from] MempoolClientError), + #[error( + "L1Handler transaction validation failed for tx with hash {} status {:?}.", + tx_hash.0.to_hex_string(), + validation_status + )] + L1HandlerTransactionValidationFailed { + tx_hash: TransactionHash, + validation_status: L1InvalidValidationStatus, + }, + #[error(transparent)] + L1ProviderError(#[from] L1ProviderClientError), +} + +pub type NextTxs = Vec; + +#[cfg_attr(test, automock)] +#[async_trait] +pub trait TransactionProvider: Send { + async fn get_txs(&mut self, n_txs: usize) -> TransactionProviderResult; + /// In validate mode ([ValidateTransactionProvider]) returns the final number of transactions + /// in the block once it is known, or `None` if it is not known yet. + /// Once `Some()` is returned for the first time, future calls to this method may return `None`. + /// Returns `None` in propose mode ([ProposeTransactionProvider]). + async fn get_final_n_executed_txs(&mut self) -> Option; +} + +#[derive(Clone)] +pub struct ProposeTransactionProvider { + pub mempool_client: SharedMempoolClient, + pub l1_provider_client: SharedL1ProviderClient, + pub max_l1_handler_txs_per_block: usize, + pub height: BlockNumber, + phase: TxProviderPhase, + n_l1handler_txs_so_far: usize, +} + +// Keeps track of whether we need to fetch L1 handler transactions or mempool transactions. +#[derive(Clone, Debug, PartialEq)] +enum TxProviderPhase { + L1, + Mempool, +} + +impl ProposeTransactionProvider { + pub fn new( + mempool_client: SharedMempoolClient, + l1_provider_client: SharedL1ProviderClient, + max_l1_handler_txs_per_block: usize, + height: BlockNumber, + ) -> Self { + Self { + mempool_client, + l1_provider_client, + max_l1_handler_txs_per_block, + height, + phase: TxProviderPhase::L1, + n_l1handler_txs_so_far: 0, + } + } + + async fn get_l1_handler_txs( + &mut self, + n_txs: usize, + ) -> TransactionProviderResult> { + Ok(self + .l1_provider_client + .get_txs(n_txs, self.height) + .await? + .into_iter() + .map(InternalConsensusTransaction::L1Handler) + .collect()) + } + + async fn get_mempool_txs( + &mut self, + n_txs: usize, + ) -> TransactionProviderResult> { + Ok(self + .mempool_client + .get_txs(n_txs) + .await? + .into_iter() + .map(InternalConsensusTransaction::RpcTransaction) + .collect()) + } +} + +#[async_trait] +impl TransactionProvider for ProposeTransactionProvider { + async fn get_txs(&mut self, n_txs: usize) -> TransactionProviderResult { + assert!(n_txs > 0, "The number of transactions requested must be greater than zero."); + let mut txs = vec![]; + if self.phase == TxProviderPhase::L1 { + let n_l1handler_txs_to_get = + min(self.max_l1_handler_txs_per_block - self.n_l1handler_txs_so_far, n_txs); + let mut l1handler_txs = self.get_l1_handler_txs(n_l1handler_txs_to_get).await?; + self.n_l1handler_txs_so_far += l1handler_txs.len(); + + // Determine whether we need to switch to mempool phase. + let no_more_l1handler_in_provider = l1handler_txs.len() < n_l1handler_txs_to_get; + let reached_max_l1handler_txs_in_block = + self.n_l1handler_txs_so_far == self.max_l1_handler_txs_per_block; + if no_more_l1handler_in_provider || reached_max_l1handler_txs_in_block { + self.phase = TxProviderPhase::Mempool; + } + + txs.append(&mut l1handler_txs); + if txs.len() == n_txs { + return Ok(txs); + } + } + + let mut mempool_txs = self.get_mempool_txs(n_txs - txs.len()).await?; + txs.append(&mut mempool_txs); + Ok(txs) + } + + async fn get_final_n_executed_txs(&mut self) -> Option { + None + } +} + +pub struct ValidateTransactionProvider { + tx_receiver: tokio::sync::mpsc::Receiver, + final_n_executed_txs_receiver: tokio::sync::oneshot::Receiver, + l1_provider_client: SharedL1ProviderClient, + height: BlockNumber, +} + +impl ValidateTransactionProvider { + pub fn new( + tx_receiver: tokio::sync::mpsc::Receiver, + final_n_executed_txs_receiver: tokio::sync::oneshot::Receiver, + l1_provider_client: SharedL1ProviderClient, + height: BlockNumber, + ) -> Self { + Self { tx_receiver, final_n_executed_txs_receiver, l1_provider_client, height } + } +} + +#[async_trait] +impl TransactionProvider for ValidateTransactionProvider { + async fn get_txs(&mut self, n_txs: usize) -> TransactionProviderResult { + assert!(n_txs > 0, "The number of transactions requested must be greater than zero."); + + if self.tx_receiver.is_empty() { + // Return immediately to avoid blocking the caller. + return Ok(vec![]); + } + + let mut buffer = Vec::with_capacity(n_txs); + self.tx_receiver.recv_many(&mut buffer, n_txs).await; + + for tx in &buffer { + if let InternalConsensusTransaction::L1Handler(tx) = tx { + let l1_validation_status = + self.l1_provider_client.validate(tx.tx_hash, self.height).await?; + if let L1ValidationStatus::Invalid(validation_status) = l1_validation_status { + return Err(TransactionProviderError::L1HandlerTransactionValidationFailed { + tx_hash: tx.tx_hash, + validation_status, + }); + } + } + } + Ok(buffer) + } + + async fn get_final_n_executed_txs(&mut self) -> Option { + // Return None if the receiver is empty or closed unexpectedly. + self.final_n_executed_txs_receiver.try_recv().ok() + } +} diff --git a/crates/apollo_batcher/src/transaction_provider_test.rs b/crates/apollo_batcher/src/transaction_provider_test.rs new file mode 100644 index 00000000000..caa4d97f92d --- /dev/null +++ b/crates/apollo_batcher/src/transaction_provider_test.rs @@ -0,0 +1,277 @@ +use std::sync::Arc; + +use apollo_l1_provider_types::{ + InvalidValidationStatus, + MockL1ProviderClient, + ValidationStatus as L1ValidationStatus, +}; +use apollo_mempool_types::communication::MockMempoolClient; +use assert_matches::assert_matches; +use mockall::predicate::eq; +use rstest::{fixture, rstest}; +use starknet_api::block::BlockNumber; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::executable_transaction::L1HandlerTransaction; +use starknet_api::test_utils::invoke::{internal_invoke_tx, InvokeTxArgs}; +use starknet_api::tx_hash; + +use crate::transaction_provider::{ + ProposeTransactionProvider, + TransactionProvider, + TransactionProviderError, + ValidateTransactionProvider, +}; + +const MAX_L1_HANDLER_TXS_PER_BLOCK: usize = 15; +const HEIGHT: BlockNumber = BlockNumber(1); +const MAX_TXS_PER_FETCH: usize = 10; +const VALIDATE_BUFFER_SIZE: usize = 30; + +struct MockDependencies { + mempool_client: MockMempoolClient, + l1_provider_client: MockL1ProviderClient, + tx_sender: tokio::sync::mpsc::Sender, + tx_receiver: tokio::sync::mpsc::Receiver, + final_n_executed_txs_sender: tokio::sync::oneshot::Sender, + final_n_executed_txs_receiver: tokio::sync::oneshot::Receiver, +} + +impl MockDependencies { + fn expect_get_l1_handler_txs(&mut self, n_to_request: usize, n_to_return: usize) { + self.l1_provider_client + .expect_get_txs() + .with(eq(n_to_request), eq(HEIGHT)) + .returning(move |_, _| Ok(vec![L1HandlerTransaction::default(); n_to_return])); + } + + fn expect_get_mempool_txs(&mut self, n_to_request: usize) { + self.mempool_client.expect_get_txs().with(eq(n_to_request)).returning(move |n_requested| { + Ok(vec![internal_invoke_tx(InvokeTxArgs::default()); n_requested]) + }); + } + + fn expect_validate_l1handler(&mut self, tx: L1HandlerTransaction, result: L1ValidationStatus) { + self.l1_provider_client + .expect_validate() + .withf(move |tx_arg, height| tx_arg == &tx.tx_hash && *height == HEIGHT) + .returning(move |_, _| Ok(result)); + } + + async fn simulate_input_txs(&mut self, txs: Vec) { + for tx in txs { + self.tx_sender.send(tx).await.unwrap(); + } + } + + fn propose_tx_provider(self) -> ProposeTransactionProvider { + ProposeTransactionProvider::new( + Arc::new(self.mempool_client), + Arc::new(self.l1_provider_client), + MAX_L1_HANDLER_TXS_PER_BLOCK, + HEIGHT, + ) + } + + fn validate_tx_provider(self) -> ValidateTransactionProvider { + self.validate_tx_provider_with_final_n_executed_txs().0 + } + + fn validate_tx_provider_with_final_n_executed_txs( + self, + ) -> (ValidateTransactionProvider, tokio::sync::oneshot::Sender) { + let validate_tx_provider = ValidateTransactionProvider::new( + self.tx_receiver, + self.final_n_executed_txs_receiver, + Arc::new(self.l1_provider_client), + HEIGHT, + ); + (validate_tx_provider, self.final_n_executed_txs_sender) + } +} + +#[fixture] +fn mock_dependencies( + tx_channel: ( + tokio::sync::mpsc::Sender, + tokio::sync::mpsc::Receiver, + ), + final_n_executed_txs_channel: ( + tokio::sync::oneshot::Sender, + tokio::sync::oneshot::Receiver, + ), +) -> MockDependencies { + let (tx_sender, tx_receiver) = tx_channel; + let (final_n_executed_txs_sender, final_n_executed_txs_receiver) = final_n_executed_txs_channel; + MockDependencies { + mempool_client: MockMempoolClient::new(), + l1_provider_client: MockL1ProviderClient::new(), + tx_sender, + tx_receiver, + final_n_executed_txs_sender, + final_n_executed_txs_receiver, + } +} + +#[fixture] +fn tx_channel() -> ( + tokio::sync::mpsc::Sender, + tokio::sync::mpsc::Receiver, +) { + tokio::sync::mpsc::channel(VALIDATE_BUFFER_SIZE) +} + +#[fixture] +fn final_n_executed_txs_channel() +-> (tokio::sync::oneshot::Sender, tokio::sync::oneshot::Receiver) { + tokio::sync::oneshot::channel() +} + +fn test_l1handler_tx() -> L1HandlerTransaction { + L1HandlerTransaction { tx_hash: tx_hash!(1), ..Default::default() } +} + +#[rstest] +#[tokio::test] +async fn fill_max_l1_handler(mut mock_dependencies: MockDependencies) { + // Set values so fetches will be done in multiple steps: + // 1. Fetch 10 l1 handler transactions. + // 2. Fetch 5 l1 handler transactions (reach max_l1_handler_txs_per_block) + 5 mempool txs. + // 3. Fetch 10 mempool txs. + mock_dependencies.expect_get_l1_handler_txs(MAX_TXS_PER_FETCH, MAX_TXS_PER_FETCH); + let n_l1handler_left = MAX_L1_HANDLER_TXS_PER_BLOCK - MAX_TXS_PER_FETCH; + mock_dependencies.expect_get_l1_handler_txs(n_l1handler_left, n_l1handler_left); + let n_mempool_left_after_l1 = MAX_TXS_PER_FETCH - n_l1handler_left; + mock_dependencies.expect_get_mempool_txs(n_mempool_left_after_l1); + mock_dependencies.expect_get_mempool_txs(MAX_TXS_PER_FETCH); + + let mut tx_provider = mock_dependencies.propose_tx_provider(); + + let txs = tx_provider.get_txs(MAX_TXS_PER_FETCH).await.unwrap(); + let data = assert_matches!(txs, txs if txs.len() == MAX_TXS_PER_FETCH => txs); + assert!(data.iter().all(|tx| matches!(tx, InternalConsensusTransaction::L1Handler(_)))); + + let txs = tx_provider.get_txs(MAX_TXS_PER_FETCH).await.unwrap(); + let data = assert_matches!(txs, txs if txs.len() == MAX_TXS_PER_FETCH => txs); + assert!( + data[..n_l1handler_left] + .iter() + .all(|tx| matches!(tx, InternalConsensusTransaction::L1Handler(_))) + ); + assert!( + data[n_l1handler_left..] + .iter() + .all(|tx| matches!(tx, InternalConsensusTransaction::RpcTransaction(_))) + ); + + let txs = tx_provider.get_txs(MAX_TXS_PER_FETCH).await.unwrap(); + let data = assert_matches!(txs, txs if txs.len() == MAX_TXS_PER_FETCH => txs); + assert!(data.iter().all(|tx| matches!(tx, InternalConsensusTransaction::RpcTransaction(_)))); +} + +#[rstest] +#[tokio::test] +async fn no_more_l1_handler(mut mock_dependencies: MockDependencies) { + // Request more l1 handler transactions than the provider can provide. + // Expecting the following behavior: + // 1. Request 10 l1 handler transactions, get 5 (no more l1 handler txs from provider). Request + // 5 more from mempool. + // 2. Request 10 mempool transactions. + const NUM_L1_HANDLER_TXS_IN_PROVIDER: usize = 5; + + mock_dependencies.expect_get_l1_handler_txs(MAX_TXS_PER_FETCH, NUM_L1_HANDLER_TXS_IN_PROVIDER); + let n_mempool_left = MAX_TXS_PER_FETCH - NUM_L1_HANDLER_TXS_IN_PROVIDER; + mock_dependencies.expect_get_mempool_txs(n_mempool_left); + mock_dependencies.expect_get_mempool_txs(MAX_TXS_PER_FETCH); + + let mut tx_provider = mock_dependencies.propose_tx_provider(); + + let txs = tx_provider.get_txs(MAX_TXS_PER_FETCH).await.unwrap(); + let data = assert_matches!(txs, txs if txs.len() == MAX_TXS_PER_FETCH => txs); + assert!( + data[..NUM_L1_HANDLER_TXS_IN_PROVIDER] + .iter() + .all(|tx| matches!(tx, InternalConsensusTransaction::L1Handler(_))) + ); + assert!( + data[NUM_L1_HANDLER_TXS_IN_PROVIDER..] + .iter() + .all(|tx| { matches!(tx, InternalConsensusTransaction::RpcTransaction(_)) }) + ); + + let txs = tx_provider.get_txs(MAX_TXS_PER_FETCH).await.unwrap(); + let data = assert_matches!(txs, txs if txs.len() == MAX_TXS_PER_FETCH => txs); + assert!(data.iter().all(|tx| matches!(tx, InternalConsensusTransaction::RpcTransaction(_)))); +} + +#[rstest] +#[tokio::test] +async fn validate_flow(mut mock_dependencies: MockDependencies) { + let test_tx = test_l1handler_tx(); + mock_dependencies.expect_validate_l1handler(test_tx.clone(), L1ValidationStatus::Validated); + mock_dependencies + .simulate_input_txs(vec![ + InternalConsensusTransaction::L1Handler(test_tx), + InternalConsensusTransaction::RpcTransaction(internal_invoke_tx( + InvokeTxArgs::default(), + )), + ]) + .await; + let mut validate_tx_provider = mock_dependencies.validate_tx_provider(); + + let txs = validate_tx_provider.get_txs(MAX_TXS_PER_FETCH).await.unwrap(); + let data = assert_matches!(txs, txs => txs); + assert_eq!(data.len(), 2); + assert!(matches!(data[0], InternalConsensusTransaction::L1Handler(_))); + assert!(matches!(data[1], InternalConsensusTransaction::RpcTransaction(_))); +} + +#[rstest] +#[tokio::test] +async fn get_final_n_executed_txs(mock_dependencies: MockDependencies) { + let (mut validate_tx_provider, final_n_executed_txs_sender) = + mock_dependencies.validate_tx_provider_with_final_n_executed_txs(); + + // Calling `get_final_n_executed_txs` before sending the number of transactions returns `None`. + assert_eq!(validate_tx_provider.get_final_n_executed_txs().await, None); + + // Send the number of transactions and verify that it is returned. + final_n_executed_txs_sender.send(10).unwrap(); + assert_eq!(validate_tx_provider.get_final_n_executed_txs().await, Some(10)); + + // Future calls to `get_final_n_executed_txsed_txs` return `None`. + assert_eq!(validate_tx_provider.get_final_n_executed_txs().await, None); +} + +#[rstest] +#[tokio::test] +async fn validate_fails( + mut mock_dependencies: MockDependencies, + #[values( + InvalidValidationStatus::AlreadyIncludedInProposedBlock, + InvalidValidationStatus::AlreadyIncludedOnL2, + InvalidValidationStatus::ConsumedOnL1OrUnknown + )] + expected_validation_status: InvalidValidationStatus, +) { + let test_tx = test_l1handler_tx(); + mock_dependencies.expect_validate_l1handler( + test_tx.clone(), + L1ValidationStatus::Invalid(expected_validation_status), + ); + mock_dependencies + .simulate_input_txs(vec![ + InternalConsensusTransaction::L1Handler(test_tx), + InternalConsensusTransaction::RpcTransaction(internal_invoke_tx( + InvokeTxArgs::default(), + )), + ]) + .await; + let mut validate_tx_provider = mock_dependencies.validate_tx_provider(); + + let result = validate_tx_provider.get_txs(MAX_TXS_PER_FETCH).await; + assert_matches!( + result, + Err(TransactionProviderError::L1HandlerTransactionValidationFailed { validation_status, .. }) + if validation_status == expected_validation_status + ); +} diff --git a/crates/apollo_batcher/src/utils.rs b/crates/apollo_batcher/src/utils.rs new file mode 100644 index 00000000000..0e2c6e79e5e --- /dev/null +++ b/crates/apollo_batcher/src/utils.rs @@ -0,0 +1,80 @@ +use std::sync::Arc; + +use apollo_batcher_types::batcher_types::{BatcherResult, ProposalStatus}; +use apollo_batcher_types::errors::BatcherError; +use blockifier::abi::constants; +use chrono::Utc; +use starknet_api::block::{BlockHashAndNumber, BlockNumber}; + +use crate::block_builder::BlockBuilderError; + +// BlockBuilderError is wrapped in an Arc since it doesn't implement Clone. +pub(crate) type ProposalResult = Result>; + +// Represents a spawned task of building new block proposal. +pub(crate) struct ProposalTask { + pub abort_signal_sender: tokio::sync::oneshot::Sender<()>, + pub final_n_executed_txs_sender: Option>, + // Handle for awaiting completion of the block proposal execution task. + pub execution_join_handle: tokio::task::JoinHandle<()>, + // Optional handle for awaiting completion of the pre-confirmed block writer task, + // which streams transaction execution states to Cende during block construction. + pub writer_join_handle: Option>, +} + +pub(crate) fn deadline_as_instant( + deadline: chrono::DateTime, +) -> BatcherResult { + let time_to_deadline = deadline - chrono::Utc::now(); + let as_duration = + time_to_deadline.to_std().map_err(|_| BatcherError::TimeToDeadlineError { deadline })?; + Ok((std::time::Instant::now() + as_duration).into()) +} + +pub(crate) fn verify_block_input( + height: BlockNumber, + block_number: BlockNumber, + retrospective_block_hash: Option, +) -> BatcherResult<()> { + verify_non_empty_retrospective_block_hash(height, retrospective_block_hash)?; + verify_block_number(height, block_number)?; + Ok(()) +} + +pub(crate) fn verify_non_empty_retrospective_block_hash( + height: BlockNumber, + retrospective_block_hash: Option, +) -> BatcherResult<()> { + if height >= BlockNumber(constants::STORED_BLOCK_HASH_BUFFER) + && retrospective_block_hash.is_none() + { + return Err(BatcherError::MissingRetrospectiveBlockHash); + } + Ok(()) +} + +pub(crate) fn verify_block_number( + height: BlockNumber, + block_number: BlockNumber, +) -> BatcherResult<()> { + if block_number != height { + return Err(BatcherError::InvalidBlockNumber { active_height: height, block_number }); + } + Ok(()) +} + +// Return the appropriate ProposalStatus for a given ProposalError. +pub(crate) fn proposal_status_from( + block_builder_error: Arc, +) -> BatcherResult { + match block_builder_error.as_ref() { + // FailOnError means the proposal either failed due to bad input (e.g. invalid + // transactions), or couldn't finish in time. + BlockBuilderError::FailOnError(err) => Ok(ProposalStatus::InvalidProposal(err.to_string())), + BlockBuilderError::Aborted => Err(BatcherError::ProposalAborted), + _ => { + tracing::error!("Unexpected error: {}", block_builder_error); + Err(BatcherError::InternalError) + } + } +} diff --git a/crates/apollo_batcher_types/Cargo.toml b/crates/apollo_batcher_types/Cargo.toml new file mode 100644 index 00000000000..a7a331a2594 --- /dev/null +++ b/crates/apollo_batcher_types/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "apollo_batcher_types" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[features] +testing = ["mockall"] + +[lints] +workspace = true + +[dependencies] +apollo_infra.workspace = true +apollo_proc_macros.workspace = true +apollo_state_sync_types.workspace = true +async-trait.workspace = true +blockifier = { workspace = true, features = ["transaction_serde"] } +chrono = { workspace = true, features = ["serde"] } +derive_more.workspace = true +indexmap.workspace = true +mockall = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"] } +starknet_api.workspace = true +strum_macros.workspace = true +thiserror.workspace = true + + +[dev-dependencies] +mockall.workspace = true diff --git a/crates/apollo_batcher_types/src/batcher_types.rs b/crates/apollo_batcher_types/src/batcher_types.rs new file mode 100644 index 00000000000..d00e81ea939 --- /dev/null +++ b/crates/apollo_batcher_types/src/batcher_types.rs @@ -0,0 +1,149 @@ +use std::fmt::Debug; + +use blockifier::bouncer::{BouncerWeights, CasmHashComputationData}; +use blockifier::state::cached_state::CommitmentStateDiff; +use blockifier::transaction::objects::TransactionExecutionInfo; +use chrono::prelude::*; +use indexmap::IndexMap; +use serde::{Deserialize, Serialize}; +use starknet_api::block::{BlockHashAndNumber, BlockInfo, BlockNumber}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::StateDiffCommitment; +use starknet_api::execution_resources::GasAmount; +use starknet_api::state::ThinStateDiff; +use starknet_api::transaction::TransactionHash; + +use crate::errors::BatcherError; + +// TODO(Matan): decide on the id structure +#[derive( + Copy, + Clone, + Debug, + Serialize, + Deserialize, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + derive_more::Display, + Hash, +)] +pub struct ProposalId(pub u64); + +pub type Round = u32; + +#[derive(Clone, Debug, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ProposalCommitment { + pub state_diff_commitment: StateDiffCommitment, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ProposeBlockInput { + pub proposal_id: ProposalId, + pub deadline: chrono::DateTime, + pub retrospective_block_hash: Option, + pub block_info: BlockInfo, + pub proposal_round: Round, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct GetProposalContentInput { + // TBD: We don't really need the proposal_id because there is only one proposal at a time. + pub proposal_id: ProposalId, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetHeightResponse { + pub height: BlockNumber, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetProposalContentResponse { + pub content: GetProposalContent, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum GetProposalContent { + Txs(Vec), + Finished { id: ProposalCommitment, final_n_executed_txs: usize }, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +// TODO(Dan): Consider unifying with BuildProposalInput as they have the same fields. +pub struct ValidateBlockInput { + pub proposal_id: ProposalId, + pub deadline: chrono::DateTime, + pub retrospective_block_hash: Option, + pub block_info: BlockInfo, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct SendProposalContentInput { + pub proposal_id: ProposalId, + pub content: SendProposalContent, +} + +/// The content of the stream that the consensus sends to the batcher. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum SendProposalContent { + Txs(Vec), + /// Contains the final number of transactions in the block. + Finish(usize), + Abort, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct SendProposalContentResponse { + pub response: ProposalStatus, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[cfg_attr(any(test, feature = "testing"), derive(Default))] +pub struct CentralObjects { + pub execution_infos: IndexMap, + pub bouncer_weights: BouncerWeights, + pub compressed_state_diff: Option, + pub casm_hash_computation_data_sierra_gas: CasmHashComputationData, + pub casm_hash_computation_data_proving_gas: CasmHashComputationData, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[cfg_attr(any(test, feature = "testing"), derive(Default))] +pub struct DecisionReachedResponse { + // TODO(Yael): Consider passing the state_diff as CommitmentStateDiff inside CentralObjects. + // Today the ThinStateDiff is used for the state sync but it may not be needed in the future. + pub state_diff: ThinStateDiff, + pub l2_gas_used: GasAmount, + pub central_objects: CentralObjects, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum ProposalStatus { + Processing, + // Only sent in response to `Finish`. + Finished(ProposalCommitment), + // Only sent in response to `Abort`. + Aborted, + // May be caused due to handling of a previous item of the new proposal. + // In this case, the proposal is aborted and no additional content will be processed. + InvalidProposal(String), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct StartHeightInput { + pub height: BlockNumber, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DecisionReachedInput { + pub proposal_id: ProposalId, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct RevertBlockInput { + pub height: BlockNumber, +} + +pub type BatcherResult = Result; diff --git a/crates/apollo_batcher_types/src/communication.rs b/crates/apollo_batcher_types/src/communication.rs new file mode 100644 index 00000000000..fe79bf9797c --- /dev/null +++ b/crates/apollo_batcher_types/src/communication.rs @@ -0,0 +1,230 @@ +use std::sync::Arc; + +use apollo_infra::component_client::{ClientError, LocalComponentClient, RemoteComponentClient}; +use apollo_infra::component_definitions::{ComponentClient, ComponentRequestAndResponseSender}; +use apollo_infra::impl_debug_for_infra_requests_and_responses; +use apollo_proc_macros::handle_all_response_variants; +use apollo_state_sync_types::state_sync_types::SyncBlock; +use async_trait::async_trait; +#[cfg(any(feature = "testing", test))] +use mockall::automock; +use serde::{Deserialize, Serialize}; +use strum_macros::AsRefStr; +use thiserror::Error; + +use crate::batcher_types::{ + BatcherResult, + DecisionReachedInput, + DecisionReachedResponse, + GetHeightResponse, + GetProposalContentInput, + GetProposalContentResponse, + ProposeBlockInput, + RevertBlockInput, + SendProposalContentInput, + SendProposalContentResponse, + StartHeightInput, + ValidateBlockInput, +}; +use crate::errors::BatcherError; + +pub type LocalBatcherClient = LocalComponentClient; +pub type RemoteBatcherClient = RemoteComponentClient; +pub type BatcherClientResult = Result; +pub type BatcherRequestAndResponseSender = + ComponentRequestAndResponseSender; +pub type SharedBatcherClient = Arc; + +/// Serves as the batcher's shared interface. Requires `Send + Sync` to allow transferring and +/// sharing resources (inputs, futures) across threads. +#[cfg_attr(any(test, feature = "testing"), automock)] +#[async_trait] +pub trait BatcherClient: Send + Sync { + /// Starts the process of building a proposal. + async fn propose_block(&self, input: ProposeBlockInput) -> BatcherClientResult<()>; + /// Gets the first height that is not written in the storage yet. + async fn get_height(&self) -> BatcherClientResult; + /// Gets the next available content from the proposal stream (only relevant when building a + /// proposal). + async fn get_proposal_content( + &self, + input: GetProposalContentInput, + ) -> BatcherClientResult; + /// Starts the process of validating a proposal. + async fn validate_block(&self, input: ValidateBlockInput) -> BatcherClientResult<()>; + /// Sends the content of a proposal. Only relevant when validating a proposal. + /// Note: + /// * This call can be blocking if the batcher has too many unprocessed transactions. + /// * The next send might receive an `InvalidProposal` response for the previous send. + /// * If this marks the end of the content, i.e. `SendProposalContent::Finish` is received, + /// the batcher will block until the proposal has finished processing before responding. + async fn send_proposal_content( + &self, + input: SendProposalContentInput, + ) -> BatcherClientResult; + /// Starts the process of a new height. + /// From this point onwards, the batcher will accept requests only for proposals associated + /// with this height. + async fn start_height(&self, input: StartHeightInput) -> BatcherClientResult<()>; + /// Adds a block from the state sync. Updates the batcher's state and commits the + /// transactions to the mempool. + async fn add_sync_block(&self, sync_block: SyncBlock) -> BatcherClientResult<()>; + /// Notifies the batcher that a decision has been reached. + /// This closes the process of the given height, and the accepted proposal is committed. + async fn decision_reached( + &self, + input: DecisionReachedInput, + ) -> BatcherClientResult; + /// Reverts the block with the given block number, only if it is the last in the storage. + async fn revert_block(&self, input: RevertBlockInput) -> BatcherClientResult<()>; +} + +#[derive(Serialize, Deserialize, Clone, AsRefStr)] +pub enum BatcherRequest { + ProposeBlock(ProposeBlockInput), + GetProposalContent(GetProposalContentInput), + ValidateBlock(ValidateBlockInput), + SendProposalContent(SendProposalContentInput), + StartHeight(StartHeightInput), + GetCurrentHeight, + DecisionReached(DecisionReachedInput), + AddSyncBlock(SyncBlock), + RevertBlock(RevertBlockInput), +} +impl_debug_for_infra_requests_and_responses!(BatcherRequest); + +#[derive(Serialize, Deserialize, AsRefStr)] +pub enum BatcherResponse { + ProposeBlock(BatcherResult<()>), + GetCurrentHeight(BatcherResult), + GetProposalContent(BatcherResult), + ValidateBlock(BatcherResult<()>), + SendProposalContent(BatcherResult), + StartHeight(BatcherResult<()>), + DecisionReached(BatcherResult>), + AddSyncBlock(BatcherResult<()>), + RevertBlock(BatcherResult<()>), +} +impl_debug_for_infra_requests_and_responses!(BatcherResponse); + +#[derive(Clone, Debug, Error)] +pub enum BatcherClientError { + #[error(transparent)] + ClientError(#[from] ClientError), + #[error(transparent)] + BatcherError(#[from] BatcherError), +} + +#[async_trait] +impl BatcherClient for ComponentClientType +where + ComponentClientType: Send + Sync + ComponentClient, +{ + async fn propose_block(&self, input: ProposeBlockInput) -> BatcherClientResult<()> { + let request = BatcherRequest::ProposeBlock(input); + handle_all_response_variants!( + BatcherResponse, + ProposeBlock, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn get_proposal_content( + &self, + input: GetProposalContentInput, + ) -> BatcherClientResult { + let request = BatcherRequest::GetProposalContent(input); + handle_all_response_variants!( + BatcherResponse, + GetProposalContent, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn validate_block(&self, input: ValidateBlockInput) -> BatcherClientResult<()> { + let request = BatcherRequest::ValidateBlock(input); + handle_all_response_variants!( + BatcherResponse, + ValidateBlock, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn send_proposal_content( + &self, + input: SendProposalContentInput, + ) -> BatcherClientResult { + let request = BatcherRequest::SendProposalContent(input); + handle_all_response_variants!( + BatcherResponse, + SendProposalContent, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn start_height(&self, input: StartHeightInput) -> BatcherClientResult<()> { + let request = BatcherRequest::StartHeight(input); + handle_all_response_variants!( + BatcherResponse, + StartHeight, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn get_height(&self) -> BatcherClientResult { + let request = BatcherRequest::GetCurrentHeight; + handle_all_response_variants!( + BatcherResponse, + GetCurrentHeight, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn decision_reached( + &self, + input: DecisionReachedInput, + ) -> BatcherClientResult { + let request = BatcherRequest::DecisionReached(input); + handle_all_response_variants!( + BatcherResponse, + DecisionReached, + BatcherClientError, + BatcherError, + Boxed + ) + } + + async fn add_sync_block(&self, sync_block: SyncBlock) -> BatcherClientResult<()> { + let request = BatcherRequest::AddSyncBlock(sync_block); + handle_all_response_variants!( + BatcherResponse, + AddSyncBlock, + BatcherClientError, + BatcherError, + Direct + ) + } + + async fn revert_block(&self, input: RevertBlockInput) -> BatcherClientResult<()> { + let request = BatcherRequest::RevertBlock(input); + handle_all_response_variants!( + BatcherResponse, + RevertBlock, + BatcherClientError, + BatcherError, + Direct + ) + } +} diff --git a/crates/apollo_batcher_types/src/errors.rs b/crates/apollo_batcher_types/src/errors.rs new file mode 100644 index 00000000000..59b39bbc9ec --- /dev/null +++ b/crates/apollo_batcher_types/src/errors.rs @@ -0,0 +1,53 @@ +use chrono::prelude::*; +use serde::{Deserialize, Serialize}; +use starknet_api::block::BlockNumber; +use thiserror::Error; + +use crate::batcher_types::ProposalId; + +#[derive(Clone, Debug, Error, PartialEq, Eq, Serialize, Deserialize)] +pub enum BatcherError { + #[error( + "There is already an active proposal {}, can't start proposal {}.", + active_proposal_id, + new_proposal_id + )] + AnotherProposalInProgress { active_proposal_id: ProposalId, new_proposal_id: ProposalId }, + #[error( + "Decision reached for proposal with ID {proposal_id} that does not exist (might still \ + being executed)." + )] + ExecutedProposalNotFound { proposal_id: ProposalId }, + #[error("Height is in progress.")] + HeightInProgress, + #[error("Internal server error.")] + InternalError, + #[error("Invalid block number. The active height is {active_height}, got {block_number}.")] + InvalidBlockNumber { active_height: BlockNumber, block_number: BlockNumber }, + #[error("Missing retrospective block hash.")] + MissingRetrospectiveBlockHash, + #[error("Attempt to start proposal with no active height.")] + NoActiveHeight, + #[error("Not ready to begin work on proposal.")] + NotReady, + #[error("Proposal aborted.")] + ProposalAborted, + #[error("Proposal with ID {proposal_id} already exists.")] + ProposalAlreadyExists { proposal_id: ProposalId }, + #[error( + "Proposal with ID {proposal_id} is already done processing and cannot get more \ + transactions." + )] + ProposalAlreadyFinished { proposal_id: ProposalId }, + #[error("Proposal failed.")] + ProposalFailed, + #[error("Proposal with ID {proposal_id} not found.")] + ProposalNotFound { proposal_id: ProposalId }, + #[error( + "Storage height marker mismatch. Storage marker (first unwritten height): \ + {marker_height}, requested height: {requested_height}." + )] + StorageHeightMarkerMismatch { marker_height: BlockNumber, requested_height: BlockNumber }, + #[error("Time to deadline is out of range. Got {deadline}.")] + TimeToDeadlineError { deadline: chrono::DateTime }, +} diff --git a/crates/starknet_batcher_types/src/lib.rs b/crates/apollo_batcher_types/src/lib.rs similarity index 100% rename from crates/starknet_batcher_types/src/lib.rs rename to crates/apollo_batcher_types/src/lib.rs diff --git a/crates/apollo_central_sync/Cargo.toml b/crates/apollo_central_sync/Cargo.toml new file mode 100644 index 00000000000..8fdaacc4064 --- /dev/null +++ b/crates/apollo_central_sync/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "apollo_central_sync" +version.workspace = true +edition.workspace = true +repository.workspace = true +license-file.workspace = true + +[features] +testing = [] + +[dependencies] +apollo_class_manager_types.workspace = true +apollo_config.workspace = true +apollo_proc_macros.workspace = true +apollo_starknet_client.workspace = true +apollo_state_sync_metrics.workspace = true +apollo_storage.workspace = true +async-stream.workspace = true +async-trait.workspace = true +cairo-lang-starknet-classes.workspace = true +chrono.workspace = true +futures.workspace = true +futures-util.workspace = true +validator.workspace = true +indexmap = { workspace = true, features = ["serde"] } +itertools.workspace = true +lru.workspace = true +metrics.workspace = true +papyrus_base_layer.workspace = true +papyrus_common.workspace = true +reqwest = { workspace = true, features = ["blocking", "json"] } +url.workspace = true +serde = { workspace = true, features = ["derive"] } +starknet-types-core.workspace = true +starknet_api.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["full", "sync"] } +tracing.workspace = true + +[dev-dependencies] +apollo_class_manager_types = { workspace = true, features = ["testing"] } +apollo_starknet_client = { workspace = true, features = ["testing"] } +apollo_storage = { workspace = true, features = ["testing"] } +apollo_test_utils.workspace = true +assert_matches.workspace = true +mockall.workspace = true +pretty_assertions.workspace = true +simple_logger.workspace = true +starknet_api = { workspace = true, features = ["testing"] } +tokio-stream.workspace = true + +[package.metadata.cargo-machete] +# `metrics` is used in `latency_histogram` but is falsely detected as unused. +ignored = ["metrics"] + +[lints] +workspace = true diff --git a/crates/apollo_central_sync/src/lib.rs b/crates/apollo_central_sync/src/lib.rs new file mode 100644 index 00000000000..8a359b4289c --- /dev/null +++ b/crates/apollo_central_sync/src/lib.rs @@ -0,0 +1,1102 @@ +// config compiler to support coverage_attribute feature when running coverage in nightly mode +// within this crate +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] + +mod pending_sync; +pub mod sources; +#[cfg(test)] +mod sync_test; + +use std::cmp::min; +use std::collections::BTreeMap; +use std::sync::Arc; +use std::time::Duration; + +use apollo_class_manager_types::{ClassManagerClientError, SharedClassManagerClient}; +use apollo_config::converters::deserialize_seconds_to_duration; +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_proc_macros::latency_histogram; +use apollo_starknet_client::reader::PendingData; +use apollo_state_sync_metrics::metrics::{ + CENTRAL_SYNC_BASE_LAYER_MARKER, + CENTRAL_SYNC_CENTRAL_BLOCK_MARKER, + STATE_SYNC_BODY_MARKER, + STATE_SYNC_CLASS_MANAGER_MARKER, + STATE_SYNC_COMPILED_CLASS_MARKER, + STATE_SYNC_HEADER_LATENCY_SEC, + STATE_SYNC_HEADER_MARKER, + STATE_SYNC_PROCESSED_TRANSACTIONS, + STATE_SYNC_STATE_MARKER, +}; +use apollo_storage::base_layer::{BaseLayerStorageReader, BaseLayerStorageWriter}; +use apollo_storage::body::BodyStorageWriter; +use apollo_storage::class::{ClassStorageReader, ClassStorageWriter}; +use apollo_storage::class_manager::{ClassManagerStorageReader, ClassManagerStorageWriter}; +use apollo_storage::compiled_class::{CasmStorageReader, CasmStorageWriter}; +use apollo_storage::db::DbError; +use apollo_storage::header::{HeaderStorageReader, HeaderStorageWriter}; +use apollo_storage::state::{StateStorageReader, StateStorageWriter}; +use apollo_storage::{StorageError, StorageReader, StorageWriter}; +use async_stream::try_stream; +use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; +use chrono::{TimeZone, Utc}; +use futures::future::pending; +use futures::stream; +use futures_util::{pin_mut, select, Stream, StreamExt}; +use indexmap::IndexMap; +use papyrus_common::pending_classes::PendingClasses; +use serde::{Deserialize, Serialize}; +use sources::base_layer::BaseLayerSourceError; +use starknet_api::block::{ + Block, + BlockHash, + BlockHashAndNumber, + BlockNumber, + BlockSignature, + StarknetVersion, +}; +use starknet_api::contract_class::{ContractClass, SierraVersion}; +use starknet_api::core::{ClassHash, CompiledClassHash, SequencerPublicKey}; +use starknet_api::deprecated_contract_class::ContractClass as DeprecatedContractClass; +use starknet_api::state::{StateDiff, ThinStateDiff}; +use tokio::sync::{Mutex, RwLock}; +use tokio::task::{spawn_blocking, JoinError}; +use tracing::{debug, error, info, instrument, trace, warn}; + +use crate::pending_sync::sync_pending_data; +use crate::sources::base_layer::{BaseLayerSourceTrait, EthereumBaseLayerSource}; +use crate::sources::central::{CentralError, CentralSource, CentralSourceTrait}; +use crate::sources::pending::{PendingError, PendingSource, PendingSourceTrait}; + +// TODO(shahak): Consider adding genesis hash to the config to support chains that have +// different genesis hash. +// TODO(Shahak): Consider moving to a more general place. +pub const GENESIS_HASH: &str = "0x0"; + +// TODO(dvir): add to config. +// Sleep duration between polling for pending data. +const PENDING_SLEEP_DURATION: Duration = Duration::from_millis(500); + +// Sleep duration, in seconds, between sync progress checks. +const SLEEP_TIME_SYNC_PROGRESS: Duration = Duration::from_secs(300); + +// The first starknet version where we can send sierras to the class manager without casms and it +// will compile them, in a backward-compatible manner. +const STARKNET_VERSION_TO_COMPILE_FROM: StarknetVersion = StarknetVersion::V0_12_0; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] +pub struct SyncConfig { + #[serde(deserialize_with = "deserialize_seconds_to_duration")] + pub block_propagation_sleep_duration: Duration, + #[serde(deserialize_with = "deserialize_seconds_to_duration")] + pub base_layer_propagation_sleep_duration: Duration, + #[serde(deserialize_with = "deserialize_seconds_to_duration")] + pub recoverable_error_sleep_duration: Duration, + pub blocks_max_stream_size: u32, + pub state_updates_max_stream_size: u32, + pub verify_blocks: bool, + pub collect_pending_data: bool, + pub store_sierras_and_casms: bool, +} + +impl SerializeConfig for SyncConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param( + "block_propagation_sleep_duration", + &self.block_propagation_sleep_duration.as_secs(), + "Time in seconds before checking for a new block after the node is synchronized.", + ParamPrivacyInput::Public, + ), + ser_param( + "base_layer_propagation_sleep_duration", + &self.base_layer_propagation_sleep_duration.as_secs(), + "Time in seconds to poll the base layer to get the latest proved block.", + ParamPrivacyInput::Public, + ), + ser_param( + "recoverable_error_sleep_duration", + &self.recoverable_error_sleep_duration.as_secs(), + "Waiting time in seconds before restarting synchronization after a recoverable \ + error.", + ParamPrivacyInput::Public, + ), + ser_param( + "blocks_max_stream_size", + &self.blocks_max_stream_size, + "Max amount of blocks to download in a stream.", + ParamPrivacyInput::Public, + ), + ser_param( + "state_updates_max_stream_size", + &self.state_updates_max_stream_size, + "Max amount of state updates to download in a stream.", + ParamPrivacyInput::Public, + ), + ser_param( + "verify_blocks", + &self.verify_blocks, + "Whether to verify incoming blocks.", + ParamPrivacyInput::Public, + ), + ser_param( + "collect_pending_data", + &self.collect_pending_data, + "Whether to collect data on pending blocks.", + ParamPrivacyInput::Public, + ), + ser_param( + "store_sierras_and_casms", + &self.store_sierras_and_casms, + "Whether to store sierras and casms to the storage. This allows maintaining \ + backward-compatibility with native-blockifier", + ParamPrivacyInput::Public, + ), + ]) + } +} + +impl Default for SyncConfig { + fn default() -> Self { + SyncConfig { + block_propagation_sleep_duration: Duration::from_secs(2), + base_layer_propagation_sleep_duration: Duration::from_secs(10), + recoverable_error_sleep_duration: Duration::from_secs(3), + blocks_max_stream_size: 1000, + state_updates_max_stream_size: 1000, + verify_blocks: true, + collect_pending_data: false, + store_sierras_and_casms: false, + } + } +} + +// Orchestrates specific network interfaces (e.g. central, p2p, l1) and writes to Storage and shared +// memory. +pub struct GenericStateSync< + TCentralSource: CentralSourceTrait + Sync + Send, + TPendingSource: PendingSourceTrait + Sync + Send, + TBaseLayerSource: BaseLayerSourceTrait + Sync + Send, +> { + config: SyncConfig, + shared_highest_block: Arc>>, + pending_data: Arc>, + central_source: Arc, + pending_source: Arc, + pending_classes: Arc>, + base_layer_source: Option>, + reader: StorageReader, + writer: Arc>, + sequencer_pub_key: Option, + class_manager_client: Option, +} + +pub type StateSyncResult = Result<(), StateSyncError>; + +// TODO(DanB): Sort alphabetically. +// TODO(DanB): Change this to CentralStateSyncError +#[derive(thiserror::Error, Debug)] +pub enum StateSyncError { + #[error("Sync stopped progress.")] + NoProgress, + #[error(transparent)] + StorageError(#[from] StorageError), + #[error(transparent)] + CentralSourceError(#[from] CentralError), + #[error(transparent)] + PendingSourceError(#[from] PendingError), + #[error( + "Parent block hash of block {block_number} is not consistent with the stored block. \ + Expected {expected_parent_block_hash}, found {stored_parent_block_hash}." + )] + ParentBlockHashMismatch { + block_number: BlockNumber, + expected_parent_block_hash: BlockHash, + stored_parent_block_hash: BlockHash, + }, + #[error("Header for block {block_number} wasn't found when trying to store base layer block.")] + BaseLayerBlockWithoutMatchingHeader { block_number: BlockNumber }, + #[error(transparent)] + BaseLayerSourceError(#[from] BaseLayerSourceError), + #[error( + "For {block_number} base layer and l2 doesn't match. Base layer hash: {base_layer_hash}, \ + L2 hash: {l2_hash}." + )] + BaseLayerHashMismatch { + block_number: BlockNumber, + base_layer_hash: BlockHash, + l2_hash: BlockHash, + }, + #[error("Sequencer public key changed from {old:?} to {new:?}.")] + SequencerPubKeyChanged { old: SequencerPublicKey, new: SequencerPublicKey }, + #[error(transparent)] + ClassManagerClientError(#[from] ClassManagerClientError), + #[error(transparent)] + JoinError(#[from] JoinError), +} + +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +pub enum SyncEvent { + NoProgress, + BlockAvailable { + block_number: BlockNumber, + block: Block, + signature: BlockSignature, + }, + StateDiffAvailable { + block_number: BlockNumber, + block_hash: BlockHash, + state_diff: StateDiff, + // TODO(anatg): Remove once there are no more deployed contracts with undeclared classes. + // Class definitions of deployed contracts with classes that were not declared in this + // state diff. + // Note: Since 0.11 new classes can not be implicitly declared. + deployed_contract_class_definitions: IndexMap, + }, + CompiledClassAvailable { + class_hash: ClassHash, + compiled_class_hash: CompiledClassHash, + compiled_class: CasmContractClass, + is_compiler_backward_compatible: bool, + }, + NewBaseLayerBlock { + block_number: BlockNumber, + block_hash: BlockHash, + }, +} + +impl< + TCentralSource: CentralSourceTrait + Sync + Send + 'static, + TPendingSource: PendingSourceTrait + Sync + Send + 'static, + TBaseLayerSource: BaseLayerSourceTrait + Sync + Send, +> GenericStateSync +{ + pub async fn run(mut self) -> StateSyncResult { + info!("State sync started."); + loop { + match self.sync_while_ok().await { + // A recoverable error occurred. Sleep and try syncing again. + Err(err) if is_recoverable(&err) => { + warn!("Recoverable error encountered while syncing, error: {}", err); + tokio::time::sleep(self.config.recoverable_error_sleep_duration).await; + continue; + } + // Unrecoverable errors. + Err(err) => { + error!("Fatal error while syncing: {}", err); + return Err(err); + } + Ok(_) => { + unreachable!("Sync should either return with an error or continue forever.") + } + } + } + + // Whitelisting of errors from which we might be able to recover. + fn is_recoverable(err: &StateSyncError) -> bool { + // We don't use here catch-all pattern to enforce conscious decision for each error + // kind. + match err { + StateSyncError::StorageError(error) => { + matches!(error, StorageError::InnerError(_)) + } + StateSyncError::NoProgress + | StateSyncError::CentralSourceError(_) + | StateSyncError::PendingSourceError(_) + | StateSyncError::BaseLayerSourceError(_) + | StateSyncError::ParentBlockHashMismatch { .. } + | StateSyncError::BaseLayerHashMismatch { .. } + | StateSyncError::ClassManagerClientError(_) + | StateSyncError::BaseLayerBlockWithoutMatchingHeader { .. } + | StateSyncError::JoinError(_) => true, + StateSyncError::SequencerPubKeyChanged { .. } => false, + } + } + } + + async fn track_sequencer_public_key_changes(&mut self) -> StateSyncResult { + let sequencer_pub_key = self.central_source.get_sequencer_pub_key().await?; + match self.sequencer_pub_key { + // First time setting the sequencer public key. + None => { + info!("Sequencer public key set to {sequencer_pub_key:?}."); + self.sequencer_pub_key = Some(sequencer_pub_key); + } + Some(cur_key) => { + if cur_key != sequencer_pub_key { + warn!( + "Sequencer public key changed from {cur_key:?} to {sequencer_pub_key:?}." + ); + // TODO(Yair): Add alert. + self.sequencer_pub_key = Some(sequencer_pub_key); + return Err(StateSyncError::SequencerPubKeyChanged { + old: cur_key, + new: sequencer_pub_key, + }); + } + } + }; + Ok(()) + } + + // Sync until encountering an error: + // 1. If needed, revert blocks from the end of the chain. + // 2. Create infinite block and state diff streams to fetch data from the central source. + // 3. Fetch data from the streams with unblocking wait while there is no new data. + async fn sync_while_ok(&mut self) -> StateSyncResult { + if self.config.verify_blocks { + self.track_sequencer_public_key_changes().await?; + } + self.handle_block_reverts().await?; + let block_stream = stream_new_blocks( + self.reader.clone(), + self.central_source.clone(), + self.pending_source.clone(), + self.shared_highest_block.clone(), + self.pending_data.clone(), + self.pending_classes.clone(), + self.config.block_propagation_sleep_duration, + self.config.collect_pending_data, + PENDING_SLEEP_DURATION, + self.config.blocks_max_stream_size, + ) + .fuse(); + let state_diff_stream = stream_new_state_diffs( + self.reader.clone(), + self.central_source.clone(), + self.config.block_propagation_sleep_duration, + self.config.state_updates_max_stream_size, + ) + .fuse(); + let compiled_class_stream = stream_new_compiled_classes( + self.reader.clone(), + self.central_source.clone(), + self.config.block_propagation_sleep_duration, + // TODO(yair): separate config param. + self.config.state_updates_max_stream_size, + self.config.store_sierras_and_casms, + ) + .fuse(); + let base_layer_block_stream = match &self.base_layer_source { + Some(base_layer_source) => stream_new_base_layer_block( + self.reader.clone(), + base_layer_source.clone(), + self.config.base_layer_propagation_sleep_duration, + ) + .boxed() + .fuse(), + None => stream::pending().boxed().fuse(), + }; + // TODO(dvir): try use interval instead of stream. + // TODO(DvirYo): fix the bug and remove this check. + let check_sync_progress = + check_sync_progress(self.reader.clone(), self.config.store_sierras_and_casms).fuse(); + pin_mut!( + block_stream, + state_diff_stream, + compiled_class_stream, + base_layer_block_stream, + check_sync_progress + ); + + loop { + debug!("Selecting between block sync and state diff sync."); + let sync_event = select! { + res = block_stream.next() => res, + res = state_diff_stream.next() => res, + res = compiled_class_stream.next() => res, + res = base_layer_block_stream.next() => res, + res = check_sync_progress.next() => res, + complete => break, + } + .expect("Received None as a sync event.")?; + self.process_sync_event(sync_event).await?; + debug!("Finished processing sync event."); + } + unreachable!("Fetching data loop should never return."); + } + + // Tries to store the incoming data. + async fn process_sync_event(&mut self, sync_event: SyncEvent) -> StateSyncResult { + match sync_event { + SyncEvent::BlockAvailable { block_number, block, signature } => { + self.store_block(block_number, block, signature).await + } + SyncEvent::StateDiffAvailable { + block_number, + block_hash, + state_diff, + deployed_contract_class_definitions, + } => { + self.store_state_diff( + block_number, + block_hash, + state_diff, + deployed_contract_class_definitions, + ) + .await + } + SyncEvent::CompiledClassAvailable { + class_hash, + compiled_class_hash, + compiled_class, + is_compiler_backward_compatible, + } => { + self.store_compiled_class( + class_hash, + compiled_class_hash, + compiled_class, + is_compiler_backward_compatible, + ) + .await + } + SyncEvent::NewBaseLayerBlock { block_number, block_hash } => { + self.store_base_layer_block(block_number, block_hash).await + } + SyncEvent::NoProgress => Err(StateSyncError::NoProgress), + } + } + + #[latency_histogram("sync_store_block_latency_seconds", false)] + #[instrument( + skip(self, block), + level = "debug", + fields(block_hash = format_args!("{:#064x}", block.header.block_hash.0)), + err + )] + #[allow(clippy::as_conversions)] // FIXME: use int metrics so `as f64` may be removed. + async fn store_block( + &mut self, + block_number: BlockNumber, + block: Block, + signature: BlockSignature, + ) -> StateSyncResult { + // Assuming the central source is trusted, detect reverts by comparing the incoming block's + // parent hash to the current hash. + self.verify_parent_block_hash(block_number, &block)?; + + debug!("Storing block number: {block_number}, block header: {:?}", block.header); + trace!("Block data: {block:#?}, signature: {signature:?}"); + let num_txs = + block.body.transactions.len().try_into().expect("Failed to convert usize to u64"); + let timestamp = block.header.block_header_without_hash.timestamp; + self.perform_storage_writes(move |writer| { + let mut txn = writer + .begin_rw_txn()? + .append_header(block_number, &block.header)? + .append_block_signature(block_number, &signature)? + .append_body(block_number, block.body)?; + if block.header.block_header_without_hash.starknet_version + < STARKNET_VERSION_TO_COMPILE_FROM + { + txn = txn.update_compiler_backward_compatibility_marker( + &block_number.unchecked_next(), + )?; + } + txn.commit()?; + Ok(()) + }) + .await?; + STATE_SYNC_HEADER_MARKER.set_lossy(block_number.unchecked_next().0); + STATE_SYNC_BODY_MARKER.set_lossy(block_number.unchecked_next().0); + STATE_SYNC_PROCESSED_TRANSACTIONS.increment(num_txs); + let time_delta = Utc::now() + - Utc + .timestamp_opt(timestamp.0 as i64, 0) + .single() + .expect("block timestamp should be valid"); + let header_latency = time_delta.num_seconds(); + debug!("Header latency: {}.", header_latency); + if header_latency >= 0 { + STATE_SYNC_HEADER_LATENCY_SEC.set_lossy(header_latency); + } + + Ok(()) + } + + #[latency_histogram("sync_store_state_diff_latency_seconds", false)] + #[instrument(skip(self, state_diff, deployed_contract_class_definitions), level = "debug", err)] + async fn store_state_diff( + &mut self, + block_number: BlockNumber, + block_hash: BlockHash, + state_diff: StateDiff, + deployed_contract_class_definitions: IndexMap, + ) -> StateSyncResult { + // TODO(dan): verifications - verify state diff against stored header. + debug!("Storing state diff."); + trace!("StateDiff data: {state_diff:#?}"); + + // TODO(shahak): split the state diff stream to 2 separate streams for blocks and for + // classes. + let (thin_state_diff, classes, deprecated_classes) = + ThinStateDiff::from_state_diff(state_diff); + + // Sending to class manager before updating the storage so that if the class manager send + // fails we retry the same block. + if let Some(class_manager_client) = &self.class_manager_client { + // Blocks smaller than compiler_backward_compatibility marker are added to class + // manager via the compiled classes stream. + // We're sure that if the current block is above the compiler_backward_compatibility + // marker then the compiler_backward_compatibility will not advance anymore, because + // the compiler_backward_compatibility marker advances in the header stream and this + // stream is behind the header stream + // The compiled classes stream is always behind the compiler_backward_compatibility + // marker + // TODO(shahak): Consider storing a boolean and updating it to true once + // compiler_backward_compatibility_marker <= block_number and avoiding the check if the + // boolean is true. + let compiler_backward_compatibility_marker = + self.reader.begin_ro_txn()?.get_compiler_backward_compatibility_marker()?; + + if compiler_backward_compatibility_marker <= block_number { + for (expected_class_hash, class) in &classes { + let class_hash = + class_manager_client.add_class(class.clone()).await?.class_hash; + if class_hash != *expected_class_hash { + panic!( + "Class hash mismatch. Expected: {expected_class_hash}, got: \ + {class_hash}." + ); + } + } + } + + for (class_hash, deprecated_class) in &deprecated_classes { + class_manager_client + .add_deprecated_class(*class_hash, deprecated_class.clone()) + .await?; + } + } + let has_class_manager = self.class_manager_client.is_some(); + let store_sierras_and_casms = self.config.store_sierras_and_casms; + self.perform_storage_writes(move |writer| { + if has_class_manager { + writer + .begin_rw_txn()? + .update_class_manager_block_marker(&block_number.unchecked_next())? + .commit()?; + STATE_SYNC_CLASS_MANAGER_MARKER.set_lossy(block_number.unchecked_next().0); + } + let mut txn = writer.begin_rw_txn()?; + txn = txn.append_state_diff(block_number, thin_state_diff)?; + if store_sierras_and_casms { + txn = txn.append_classes( + block_number, + &classes + .iter() + .map(|(class_hash, class)| (*class_hash, class)) + .collect::>(), + &deprecated_classes + .iter() + .chain(deployed_contract_class_definitions.iter()) + .map(|(class_hash, deprecated_class)| (*class_hash, deprecated_class)) + .collect::>(), + )?; + } + txn.commit()?; + Ok(()) + }) + .await?; + + let compiled_class_marker = self.reader.begin_ro_txn()?.get_compiled_class_marker()?; + STATE_SYNC_STATE_MARKER.set_lossy(block_number.unchecked_next().0); + STATE_SYNC_COMPILED_CLASS_MARKER.set_lossy(compiled_class_marker.0); + + // Info the user on syncing the block once all the data is stored. + info!("SYNC_NEW_BLOCK: Added block {} with hash {:#064x}.", block_number, block_hash.0); + + Ok(()) + } + + #[latency_histogram("sync_store_compiled_class_latency_seconds", false)] + #[instrument(skip(self, compiled_class), level = "debug", err)] + async fn store_compiled_class( + &mut self, + class_hash: ClassHash, + compiled_class_hash: CompiledClassHash, + compiled_class: CasmContractClass, + is_compiler_backward_compatible: bool, + ) -> StateSyncResult { + if !is_compiler_backward_compatible { + if let Some(class_manager_client) = &self.class_manager_client { + let class = self.reader.begin_ro_txn()?.get_class(&class_hash)?.expect( + "Compiled classes stream gave class hash that doesn't appear in storage.", + ); + let sierra_version = SierraVersion::extract_from_program(&class.sierra_program) + .expect("Failed reading sierra version from program."); + let contract_class = ContractClass::V1((compiled_class.clone(), sierra_version)); + class_manager_client + .add_class_and_executable_unsafe( + class_hash, + class, + compiled_class_hash, + contract_class, + ) + .await + .expect("Failed adding class and compiled class to class manager."); + } + } + if !self.config.store_sierras_and_casms { + return Ok(()); + } + let result = self + .perform_storage_writes(move |writer| { + writer.begin_rw_txn()?.append_casm(&class_hash, &compiled_class)?.commit()?; + Ok(()) + }) + .await; + // TODO(Yair): verifications - verify casm corresponds to a class on storage. + match result { + Ok(()) => { + let compiled_class_marker = + self.reader.begin_ro_txn()?.get_compiled_class_marker()?; + // Write class and casm to class manager. + STATE_SYNC_COMPILED_CLASS_MARKER.set_lossy(compiled_class_marker.0); + debug!("Added compiled class."); + Ok(()) + } + // TODO(yair): Modify the stream so it skips already stored classes. + // Compiled classes rewrite is valid because the stream downloads from the beginning + // of the block instead of the last downloaded class. + Err(StateSyncError::StorageError(StorageError::InnerError( + DbError::KeyAlreadyExists(..), + ))) => { + debug!("Compiled class of {class_hash} already stored."); + Ok(()) + } + Err(err) => Err(err), + } + } + + #[instrument(skip(self), level = "debug", err)] + // In case of a mismatch between the base layer and l2, an error will be returned, then the + // sync will revert blocks if needed based on the l2 central source. This approach works as long + // as l2 is trusted so all the reverts can be detect by using it. + async fn store_base_layer_block( + &mut self, + block_number: BlockNumber, + block_hash: BlockHash, + ) -> StateSyncResult { + self.perform_storage_writes(move |writer| { + let txn = writer.begin_rw_txn()?; + // Missing header can be because of a base layer reorg, the matching header may be + // reverted. + let expected_hash = txn + .get_block_header(block_number)? + .ok_or(StateSyncError::BaseLayerBlockWithoutMatchingHeader { block_number })? + .block_hash; + // Can be caused because base layer reorg or l2 reverts. + if expected_hash != block_hash { + return Err(StateSyncError::BaseLayerHashMismatch { + block_number, + base_layer_hash: block_hash, + l2_hash: expected_hash, + }); + } + if txn.get_base_layer_block_marker()? != block_number.unchecked_next() { + info!("Verified block {block_number} hash against base layer."); + txn.update_base_layer_block_marker(&block_number.unchecked_next())?.commit()?; + CENTRAL_SYNC_BASE_LAYER_MARKER.set_lossy(block_number.unchecked_next().0); + } + Ok(()) + }) + .await + } + + // Compares the block's parent hash to the stored block. + fn verify_parent_block_hash( + &self, + block_number: BlockNumber, + block: &Block, + ) -> StateSyncResult { + let prev_block_number = match block_number.prev() { + None => return Ok(()), + Some(bn) => bn, + }; + let prev_hash = self + .reader + .begin_ro_txn()? + .get_block_header(prev_block_number)? + .ok_or(StorageError::DBInconsistency { + msg: format!( + "Missing block {prev_block_number} in the storage (for verifying block \ + {block_number}).", + ), + })? + .block_hash; + + if prev_hash != block.header.block_header_without_hash.parent_hash { + // A revert detected, log and restart sync loop. + info!( + "Detected revert while processing block {}. Parent hash of the incoming block is \ + {}, current block hash is {}.", + block_number, block.header.block_header_without_hash.parent_hash, prev_hash + ); + return Err(StateSyncError::ParentBlockHashMismatch { + block_number, + expected_parent_block_hash: block.header.block_header_without_hash.parent_hash, + stored_parent_block_hash: prev_hash, + }); + } + + Ok(()) + } + + // Reverts data if needed. + async fn handle_block_reverts(&mut self) -> Result<(), StateSyncError> { + debug!("Handling block reverts."); + let header_marker = self.reader.begin_ro_txn()?.get_header_marker()?; + + // Revert last blocks if needed. + let mut last_block_in_storage = header_marker.prev(); + while let Some(block_number) = last_block_in_storage { + if self.should_revert_block(block_number).await? { + self.revert_block(block_number).await?; + last_block_in_storage = block_number.prev(); + } else { + break; + } + } + Ok(()) + } + + // TODO(dan): update necessary metrics. + // Deletes the block data from the storage. + #[allow(clippy::expect_fun_call)] + #[instrument(skip(self), level = "debug", err)] + async fn revert_block(&mut self, block_number: BlockNumber) -> StateSyncResult { + debug!("Reverting block."); + + self.perform_storage_writes(move |writer| { + let mut txn = writer.begin_rw_txn()?; + txn = txn.try_revert_base_layer_marker(block_number)?; + let res = txn.revert_header(block_number)?; + txn = res.0; + let mut reverted_block_hash: Option = None; + if let Some(header) = res.1 { + reverted_block_hash = Some(header.block_hash); + + let res = txn.revert_body(block_number)?; + txn = res.0; + + let res = txn.revert_state_diff(block_number)?; + txn = res.0; + } + + txn.commit()?; + if let Some(hash) = reverted_block_hash { + info!(%hash, %block_number, "Reverted block."); + } + Ok(()) + }) + .await + } + + /// Checks if centrals block hash at the block number is different from ours (or doesn't exist). + /// If so, a revert is required. + async fn should_revert_block(&self, block_number: BlockNumber) -> Result { + if let Some(central_block_hash) = self.central_source.get_block_hash(block_number).await? { + let storage_block_header = + self.reader.begin_ro_txn()?.get_block_header(block_number)?; + + match storage_block_header { + Some(block_header) => Ok(block_header.block_hash != central_block_hash), + None => Ok(false), + } + } else { + // Block number doesn't exist in central, revert. + Ok(true) + } + } + + async fn perform_storage_writes< + F: FnOnce(&mut StorageWriter) -> Result<(), StateSyncError> + Send + 'static, + >( + &mut self, + f: F, + ) -> Result<(), StateSyncError> { + let writer = self.writer.clone(); + spawn_blocking(move || f(&mut (writer.blocking_lock()))).await? + } +} +// TODO(dvir): consider gathering in a single pending argument instead. +#[allow(clippy::too_many_arguments)] +fn stream_new_blocks< + TCentralSource: CentralSourceTrait + Sync + Send + 'static, + TPendingSource: PendingSourceTrait + Sync + Send + 'static, +>( + reader: StorageReader, + central_source: Arc, + pending_source: Arc, + shared_highest_block: Arc>>, + pending_data: Arc>, + pending_classes: Arc>, + block_propagation_sleep_duration: Duration, + collect_pending_data: bool, + pending_sleep_duration: Duration, + max_stream_size: u32, +) -> impl Stream> { + try_stream! { + loop { + let header_marker = reader.begin_ro_txn()?.get_header_marker()?; + let latest_central_block = central_source.get_latest_block().await?; + *shared_highest_block.write().await = latest_central_block; + let central_block_marker = latest_central_block.map_or( + BlockNumber::default(), |block_hash_and_number| block_hash_and_number.number.unchecked_next() + ); + CENTRAL_SYNC_CENTRAL_BLOCK_MARKER.set_lossy(central_block_marker.0); + if header_marker == central_block_marker { + // Only if the node have the last block and state (without casms), sync pending data. + if collect_pending_data && reader.begin_ro_txn()?.get_state_marker()? == header_marker{ + // Here is the only place we update the pending data. + debug!("Start polling for pending data of block {:?}.", header_marker); + sync_pending_data( + reader.clone(), + central_source.clone(), + pending_source.clone(), + pending_data.clone(), + pending_classes.clone(), + pending_sleep_duration, + ).await?; + } + else{ + trace!("Blocks syncing reached the last known block {:?}, waiting for blockchain to advance.", header_marker.prev()); + tokio::time::sleep(block_propagation_sleep_duration).await; + }; + continue; + } + let up_to = min(central_block_marker, BlockNumber(header_marker.0 + u64::from(max_stream_size))); + debug!("Downloading blocks [{} - {}).", header_marker, up_to); + let block_stream = + central_source.stream_new_blocks(header_marker, up_to).fuse(); + pin_mut!(block_stream); + while let Some(maybe_block) = block_stream.next().await { + let (block_number, block, signature) = maybe_block?; + yield SyncEvent::BlockAvailable { block_number, block , signature }; + } + } + } +} + +fn stream_new_state_diffs( + reader: StorageReader, + central_source: Arc, + block_propagation_sleep_duration: Duration, + max_stream_size: u32, +) -> impl Stream> { + try_stream! { + loop { + let txn = reader.begin_ro_txn()?; + let state_marker = txn.get_state_marker()?; + let last_block_number = txn.get_header_marker()?; + drop(txn); + if state_marker == last_block_number { + trace!("State updates syncing reached the last downloaded block {:?}, waiting for more blocks.", state_marker.prev()); + tokio::time::sleep(block_propagation_sleep_duration).await; + continue; + } + let up_to = min(last_block_number, BlockNumber(state_marker.0 + u64::from(max_stream_size))); + debug!("Downloading state diffs [{} - {}).", state_marker, up_to); + let state_diff_stream = + central_source.stream_state_updates(state_marker, up_to).fuse(); + pin_mut!(state_diff_stream); + + while let Some(maybe_state_diff) = state_diff_stream.next().await { + let ( + block_number, + block_hash, + mut state_diff, + deployed_contract_class_definitions, + ) = maybe_state_diff?; + sort_state_diff(&mut state_diff); + yield SyncEvent::StateDiffAvailable { + block_number, + block_hash, + state_diff, + deployed_contract_class_definitions, + }; + } + } + } +} + +pub fn sort_state_diff(diff: &mut StateDiff) { + diff.declared_classes.sort_unstable_keys(); + diff.deprecated_declared_classes.sort_unstable_keys(); + diff.deployed_contracts.sort_unstable_keys(); + diff.nonces.sort_unstable_keys(); + diff.storage_diffs.sort_unstable_keys(); + for storage_entries in diff.storage_diffs.values_mut() { + storage_entries.sort_unstable_keys(); + } +} + +pub type StateSync = GenericStateSync; + +impl StateSync { + #[allow(clippy::too_many_arguments)] + pub fn new( + config: SyncConfig, + shared_highest_block: Arc>>, + pending_data: Arc>, + pending_classes: Arc>, + central_source: CentralSource, + pending_source: PendingSource, + base_layer_source: Option, + reader: StorageReader, + writer: StorageWriter, + class_manager_client: Option, + ) -> Self { + let base_layer_source = base_layer_source.map(Arc::new); + Self { + config, + shared_highest_block, + pending_data, + pending_classes, + central_source: Arc::new(central_source), + pending_source: Arc::new(pending_source), + base_layer_source, + reader, + writer: Arc::new(Mutex::new(writer)), + sequencer_pub_key: None, + class_manager_client, + } + } +} + +fn stream_new_compiled_classes( + reader: StorageReader, + central_source: Arc, + block_propagation_sleep_duration: Duration, + max_stream_size: u32, + store_sierras_and_casms: bool, +) -> impl Stream> { + try_stream! { + loop { + let txn = reader.begin_ro_txn()?; + let mut from = txn.get_compiled_class_marker()?; + let state_marker = txn.get_state_marker()?; + let compiler_backward_compatibility_marker = txn.get_compiler_backward_compatibility_marker()?; + // Avoid starting streams from blocks without declared classes. + while from < state_marker { + let state_diff = txn.get_state_diff(from)?.expect("Expecting to have state diff up to the marker."); + if state_diff.declared_classes.is_empty() { + from = from.unchecked_next(); + } + else { + break; + } + } + + if from == state_marker { + debug!( + "Compiled classes syncing reached the last downloaded state update{:?}, waiting \ + for more state updates.", state_marker.prev() + ); + tokio::time::sleep(block_propagation_sleep_duration).await; + continue; + } + let mut up_to = min(state_marker, BlockNumber(from.0 + u64::from(max_stream_size))); + let are_casms_backward_compatible = from >= compiler_backward_compatibility_marker; + // We want that the stream will either have all compiled classes as backward compatible + // or all as not backward compatible. If needed we'll decrease up_to + if from < compiler_backward_compatibility_marker && up_to > compiler_backward_compatibility_marker { + up_to = compiler_backward_compatibility_marker; + } + + // No point in downloading casms if we don't store them and don't send them to the + // class manager + if are_casms_backward_compatible && !store_sierras_and_casms { + info!("Compiled classes stream reached a block that has backward compatibility for \ + the compiler, and store_sierras_and_casms is set to false. \ + Finishing the compiled class stream"); + pending::<()>().await; + continue; + } + + debug!("Downloading compiled classes of blocks [{} - {}).", from, up_to); + let compiled_classes_stream = + central_source.stream_compiled_classes(from, up_to).fuse(); + pin_mut!(compiled_classes_stream); + + while let Some(maybe_compiled_class) = compiled_classes_stream.next().await { + let (class_hash, compiled_class_hash, compiled_class) = maybe_compiled_class?; + yield SyncEvent::CompiledClassAvailable { + class_hash, + compiled_class_hash, + compiled_class, + is_compiler_backward_compatible: are_casms_backward_compatible, + }; + } + } + } +} + +// TODO(dvir): consider combine this function and store_base_layer_block. +fn stream_new_base_layer_block( + reader: StorageReader, + base_layer_source: Arc, + base_layer_propagation_sleep_duration: Duration, +) -> impl Stream> { + try_stream! { + loop { + tokio::time::sleep(base_layer_propagation_sleep_duration).await; + let txn = reader.begin_ro_txn()?; + let header_marker = txn.get_header_marker()?; + match base_layer_source.latest_proved_block().await? { + Some((block_number, _block_hash)) if header_marker <= block_number => { + debug!( + "Sync headers ({header_marker}) is behind the base layer tip \ + ({block_number}), waiting for sync to advance." + ); + } + Some((block_number, block_hash)) => { + debug!("Returns a block from the base layer. Block number: {block_number}."); + yield SyncEvent::NewBaseLayerBlock { block_number, block_hash } + } + None => { + debug!( + "No blocks were proved on the base layer, waiting for blockchain to \ + advance." + ); + } + } + } + } +} + +// This function is used to check if the sync is stuck. +// TODO(DvirYo): fix the bug and remove this function. +// TODO(dvir): add a test for this scenario. +fn check_sync_progress( + reader: StorageReader, + store_sierras_and_casms: bool, +) -> impl Stream> { + try_stream! { + let mut txn=reader.begin_ro_txn()?; + let mut header_marker=txn.get_header_marker()?; + let mut state_marker=txn.get_state_marker()?; + let mut casm_marker=txn.get_compiled_class_marker()?; + loop{ + tokio::time::sleep(SLEEP_TIME_SYNC_PROGRESS).await; + debug!("Checking if sync stopped progress."); + txn=reader.begin_ro_txn()?; + let new_header_marker=txn.get_header_marker()?; + let new_state_marker=txn.get_state_marker()?; + let new_casm_marker=txn.get_compiled_class_marker()?; + let compiler_backward_compatibility_marker = txn.get_compiler_backward_compatibility_marker()?; + let is_casm_stuck = casm_marker == new_casm_marker && (new_casm_marker < compiler_backward_compatibility_marker || store_sierras_and_casms); + if header_marker==new_header_marker || state_marker==new_state_marker || is_casm_stuck { + debug!("No progress in the sync. Return NoProgress event. Header marker: {header_marker}, \ + State marker: {state_marker}, Casm marker: {casm_marker}."); + yield SyncEvent::NoProgress; + } + header_marker=new_header_marker; + state_marker=new_state_marker; + casm_marker=new_casm_marker; + } + } +} diff --git a/crates/papyrus_sync/src/pending_sync.rs b/crates/apollo_central_sync/src/pending_sync.rs similarity index 96% rename from crates/papyrus_sync/src/pending_sync.rs rename to crates/apollo_central_sync/src/pending_sync.rs index 0a6afbce3d1..4aa686c7838 100644 --- a/crates/papyrus_sync/src/pending_sync.rs +++ b/crates/apollo_central_sync/src/pending_sync.rs @@ -2,14 +2,14 @@ use std::collections::HashSet; use std::sync::Arc; use std::time::Duration; +use apollo_starknet_client::reader::{DeclaredClassHashEntry, PendingData}; +use apollo_storage::header::HeaderStorageReader; +use apollo_storage::StorageReader; use futures::stream::FuturesUnordered; use futures_util::{FutureExt, StreamExt}; use papyrus_common::pending_classes::{PendingClasses, PendingClassesTrait}; -use papyrus_storage::header::HeaderStorageReader; -use papyrus_storage::StorageReader; use starknet_api::block::{BlockHash, BlockNumber}; use starknet_api::core::ClassHash; -use starknet_client::reader::{DeclaredClassHashEntry, PendingData}; use starknet_types_core::felt::Felt; use tokio::sync::RwLock; use tracing::{debug, trace}; @@ -32,7 +32,7 @@ pub(crate) async fn sync_pending_data< ) -> Result<(), StateSyncError> { let txn = reader.begin_ro_txn()?; let header_marker = txn.get_header_marker()?; - // TODO: Consider extracting this functionality to different Π° function. + // TODO(Shahak): Consider extracting this functionality to different Π° function. let latest_block_hash = match header_marker { BlockNumber(0) => BlockHash(Felt::from_hex_unchecked(crate::GENESIS_HASH)), _ => { @@ -63,7 +63,7 @@ pub(crate) async fn sync_pending_data< PendingSyncTaskResult::PendingSyncFinished => return Ok(()), PendingSyncTaskResult::DownloadedNewPendingData => { let (declared_classes, old_declared_contracts) = { - // TODO (shahak): Consider getting the pending data from the task result instead + // TODO(shahak): Consider getting the pending data from the task result instead // of reading from the lock. let pending_state_diff = &pending_data.read().await.state_update.state_diff; ( diff --git a/crates/papyrus_sync/src/sources/base_layer.rs b/crates/apollo_central_sync/src/sources/base_layer.rs similarity index 100% rename from crates/papyrus_sync/src/sources/base_layer.rs rename to crates/apollo_central_sync/src/sources/base_layer.rs diff --git a/crates/papyrus_sync/src/sources/central.rs b/crates/apollo_central_sync/src/sources/central.rs similarity index 89% rename from crates/papyrus_sync/src/sources/central.rs rename to crates/apollo_central_sync/src/sources/central.rs index a1fa122bf89..938fddb4c55 100644 --- a/crates/papyrus_sync/src/sources/central.rs +++ b/crates/apollo_central_sync/src/sources/central.rs @@ -7,6 +7,18 @@ use std::collections::{BTreeMap, HashMap}; use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; +use apollo_config::converters::{deserialize_optional_map, serialize_optional_map}; +use apollo_config::dumping::{prepend_sub_config_name, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_starknet_client::reader::{ + BlockSignatureData, + ReaderClientError, + StarknetFeederGatewayClient, + StarknetReader, +}; +use apollo_starknet_client::{ClientCreationError, RetryConfig}; +use apollo_storage::state::StateStorageReader; +use apollo_storage::{StorageError, StorageReader}; use async_stream::stream; use async_trait::async_trait; use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; @@ -18,11 +30,6 @@ use lru::LruCache; #[cfg(test)] use mockall::automock; use papyrus_common::pending_classes::ApiContractClass; -use papyrus_config::converters::{deserialize_optional_map, serialize_optional_map}; -use papyrus_config::dumping::{append_sub_config_name, ser_param, SerializeConfig}; -use papyrus_config::{ParamPath, ParamPrivacyInput, SerializedParam}; -use papyrus_storage::state::StateStorageReader; -use papyrus_storage::{StorageError, StorageReader}; use serde::{Deserialize, Serialize}; use starknet_api::block::{Block, BlockHash, BlockHashAndNumber, BlockNumber, BlockSignature}; use starknet_api::core::{ClassHash, CompiledClassHash, SequencerPublicKey}; @@ -30,22 +37,17 @@ use starknet_api::crypto::utils::Signature; use starknet_api::deprecated_contract_class::ContractClass as DeprecatedContractClass; use starknet_api::state::StateDiff; use starknet_api::StarknetApiError; -use starknet_client::reader::{ - BlockSignatureData, - ReaderClientError, - StarknetFeederGatewayClient, - StarknetReader, -}; -use starknet_client::{ClientCreationError, RetryConfig}; use tracing::{debug, trace}; +use url::Url; +use validator::Validate; use self::state_update_stream::{StateUpdateStream, StateUpdateStreamConfig}; type CentralResult = Result; -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Validate)] pub struct CentralSourceConfig { pub concurrent_requests: usize, - pub starknet_url: String, + pub starknet_url: Url, #[serde(deserialize_with = "deserialize_optional_map")] pub http_headers: Option>, pub max_state_updates_to_download: usize, @@ -60,7 +62,8 @@ impl Default for CentralSourceConfig { fn default() -> Self { CentralSourceConfig { concurrent_requests: 10, - starknet_url: String::from("https://alpha-mainnet.starknet.io/"), + starknet_url: Url::parse("https://alpha-mainnet.starknet.io/") + .expect("Unable to parse default URL, this should never happen."), http_headers: None, max_state_updates_to_download: 20, max_state_updates_to_store_in_memory: 20, @@ -122,14 +125,14 @@ impl SerializeConfig for CentralSourceConfig { ParamPrivacyInput::Public, ), ]); - chain!(self_params_dump, append_sub_config_name(self.retry_config.dump(), "retry_config")) + chain!(self_params_dump, prepend_sub_config_name(self.retry_config.dump(), "retry_config")) .collect() } } pub struct GenericCentralSource { pub concurrent_requests: usize, - pub starknet_client: Arc, + pub apollo_starknet_client: Arc, pub storage_reader: StorageReader, pub state_update_stream_config: StateUpdateStreamConfig, pub(crate) class_cache: Arc>>, @@ -214,9 +217,15 @@ impl CentralSourceTrait { // Returns the block hash and the block number of the latest block from the central source. async fn get_latest_block(&self) -> Result, CentralError> { - self.starknet_client.latest_block().await.map_err(Arc::new)?.map_or(Ok(None), |block| { - Ok(Some(BlockHashAndNumber { hash: block.block_hash(), number: block.block_number() })) - }) + self.apollo_starknet_client.latest_block().await.map_err(Arc::new)?.map_or( + Ok(None), + |block| { + Ok(Some(BlockHashAndNumber { + hash: block.block_hash(), + number: block.block_number(), + })) + }, + ) } // Returns the current block hash of the given block number from the central source. @@ -224,7 +233,7 @@ impl CentralSourceTrait &self, block_number: BlockNumber, ) -> Result, CentralError> { - self.starknet_client + self.apollo_starknet_client .block(block_number) .await .map_err(Arc::new)? @@ -240,7 +249,7 @@ impl CentralSourceTrait StateUpdateStream::new( initial_block_number, up_to_block_number, - self.starknet_client.clone(), + self.apollo_starknet_client.clone(), self.storage_reader.clone(), self.state_update_stream_config.clone(), self.class_cache.clone(), @@ -261,8 +270,8 @@ impl CentralSourceTrait futures_util::stream::iter(initial_block_number.iter_up_to(up_to_block_number)) .map(|bn| async move { let block_and_signature = futures_util::try_join!( - self.starknet_client.block(bn), - self.starknet_client.block_signature(bn) + self.apollo_starknet_client.block(bn), + self.apollo_starknet_client.block_signature(bn) ); (bn, block_and_signature) }) @@ -352,7 +361,7 @@ impl CentralSourceTrait } } let client_class = - self.starknet_client.class_by_hash(class_hash).await.map_err(Arc::new)?; + self.apollo_starknet_client.class_by_hash(class_hash).await.map_err(Arc::new)?; match client_class { None => Err(CentralError::ClassNotFound), Some(class) => { @@ -377,7 +386,7 @@ impl CentralSourceTrait return Ok(class.clone()); } } - match self.starknet_client.compiled_class_by_hash(class_hash).await { + match self.apollo_starknet_client.compiled_class_by_hash(class_hash).await { Ok(Some(compiled_class)) => { let mut compiled_class_cache = self.compiled_class_cache.lock().expect("Failed to lock class cache."); @@ -390,7 +399,7 @@ impl CentralSourceTrait } async fn get_sequencer_pub_key(&self) -> Result { - Ok(self.starknet_client.sequencer_pub_key().await.map_err(Arc::new)?) + Ok(self.apollo_starknet_client.sequencer_pub_key().await.map_err(Arc::new)?) } } @@ -398,8 +407,8 @@ fn client_to_central_block( current_block_number: BlockNumber, maybe_client_block: Result< ( - Option, - Option, + Option, + Option, ), ReaderClientError, >, @@ -444,8 +453,8 @@ impl CentralSource { node_version: &'static str, storage_reader: StorageReader, ) -> Result { - let starknet_client = StarknetFeederGatewayClient::new( - &config.starknet_url, + let apollo_starknet_client = StarknetFeederGatewayClient::new( + config.starknet_url.as_ref(), config.http_headers, node_version, config.retry_config, @@ -453,7 +462,7 @@ impl CentralSource { Ok(CentralSource { concurrent_requests: config.concurrent_requests, - starknet_client: Arc::new(starknet_client), + apollo_starknet_client: Arc::new(apollo_starknet_client), storage_reader, state_update_stream_config: StateUpdateStreamConfig { max_state_updates_to_download: config.max_state_updates_to_download, diff --git a/crates/papyrus_sync/src/sources/central/state_update_stream.rs b/crates/apollo_central_sync/src/sources/central/state_update_stream.rs similarity index 92% rename from crates/papyrus_sync/src/sources/central/state_update_stream.rs rename to crates/apollo_central_sync/src/sources/central/state_update_stream.rs index db811567c69..254e46b9948 100644 --- a/crates/papyrus_sync/src/sources/central/state_update_stream.rs +++ b/crates/apollo_central_sync/src/sources/central/state_update_stream.rs @@ -3,16 +3,16 @@ use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::task::Poll; +use apollo_starknet_client::reader::{ReaderClientResult, StarknetReader, StateUpdate}; +use apollo_storage::state::StateStorageReader; +use apollo_storage::StorageReader; use futures_util::stream::FuturesOrdered; use futures_util::{Future, Stream, StreamExt}; use indexmap::IndexMap; use lru::LruCache; -use papyrus_storage::state::StateStorageReader; -use papyrus_storage::StorageReader; use starknet_api::block::BlockNumber; use starknet_api::core::ClassHash; use starknet_api::state::{StateDiff, StateNumber}; -use starknet_client::reader::{ReaderClientResult, StarknetReader, StateUpdate}; use tracing::log::trace; use tracing::{debug, instrument}; @@ -32,7 +32,7 @@ pub struct StateUpdateStreamConfig { pub(crate) struct StateUpdateStream { initial_block_number: BlockNumber, up_to_block_number: BlockNumber, - starknet_client: Arc, + apollo_starknet_client: Arc, storage_reader: StorageReader, download_state_update_tasks: TasksQueue<(BlockNumber, ReaderClientResult>)>, // Contains NumberOfClasses so we don't need to calculate it from the StateUpdate. @@ -88,7 +88,7 @@ impl StateUpdateStream< pub fn new( initial_block_number: BlockNumber, up_to_block_number: BlockNumber, - starknet_client: Arc, + apollo_starknet_client: Arc, storage_reader: StorageReader, config: StateUpdateStreamConfig, class_cache: Arc>>, @@ -96,7 +96,7 @@ impl StateUpdateStream< StateUpdateStream { initial_block_number, up_to_block_number, - starknet_client, + apollo_starknet_client, storage_reader, download_state_update_tasks: futures::stream::FuturesOrdered::new(), downloaded_state_updates: VecDeque::with_capacity( @@ -153,13 +153,13 @@ impl StateUpdateStream< let Some(class_hash) = self.classes_to_download.pop_front() else { break; }; - let starknet_client = self.starknet_client.clone(); + let apollo_starknet_client = self.apollo_starknet_client.clone(); let storage_reader = self.storage_reader.clone(); let cache = self.class_cache.clone(); self.download_class_tasks.push_back(Box::pin(download_class_if_necessary( cache, class_hash, - starknet_client, + apollo_starknet_client, storage_reader, ))); *should_poll_again = true; @@ -199,10 +199,13 @@ impl StateUpdateStream< && self.download_state_update_tasks.len() < self.config.max_state_updates_to_download { let current_block_number = self.initial_block_number; - let starknet_client = self.starknet_client.clone(); + let apollo_starknet_client = self.apollo_starknet_client.clone(); *should_poll_again = true; self.download_state_update_tasks.push_back(Box::pin(async move { - (current_block_number, starknet_client.state_update(current_block_number).await) + ( + current_block_number, + apollo_starknet_client.state_update(current_block_number).await, + ) })); self.initial_block_number = self.initial_block_number.unchecked_next(); } @@ -251,7 +254,7 @@ fn client_to_central_state_update( match maybe_client_state_update { Ok((state_update, mut declared_classes)) => { // Destruct the state diff to avoid partial move. - let starknet_client::reader::StateDiff { + let apollo_starknet_client::reader::StateDiff { storage_diffs, deployed_contracts, declared_classes: declared_class_hashes, @@ -268,10 +271,14 @@ fn client_to_central_state_update( let deployed_contract_class_definitions = deprecated_classes.split_off(n_deprecated_declared_classes); + let mut deployed_contracts = IndexMap::from_iter( + deployed_contracts.into_iter().map(|dc| (dc.address, dc.class_hash)), + ); + replaced_classes.into_iter().for_each(|rc| { + deployed_contracts.insert(rc.address, rc.class_hash); + }); let state_diff = StateDiff { - deployed_contracts: IndexMap::from_iter( - deployed_contracts.iter().map(|dc| (dc.address, dc.class_hash)), - ), + deployed_contracts, storage_diffs: IndexMap::from_iter(storage_diffs.into_iter().map( |(address, entries)| { (address, entries.into_iter().map(|se| (se.key, se.value)).collect()) @@ -298,10 +305,6 @@ fn client_to_central_state_update( }) .collect(), nonces, - replaced_classes: replaced_classes - .into_iter() - .map(|replaced_class| (replaced_class.address, replaced_class.class_hash)) - .collect(), }; // Filter out deployed contracts of new classes because since 0.11 new classes can not // be implicitly declared by deployment. @@ -334,11 +337,11 @@ fn client_to_central_state_update( // Given a class hash, returns the corresponding class definition. // First tries to retrieve the class from the storage. // If not found in the storage, the class is downloaded. -#[instrument(skip(starknet_client, storage_reader), level = "debug", err)] +#[instrument(skip(apollo_starknet_client, storage_reader), level = "debug", err)] async fn download_class_if_necessary( cache: Arc>>, class_hash: ClassHash, - starknet_client: Arc, + apollo_starknet_client: Arc, storage_reader: StorageReader, ) -> CentralResult> { { @@ -377,7 +380,7 @@ async fn download_class_if_necessary( // Class not found in storage - download. trace!("Downloading class {:?}.", class_hash); - let client_class = starknet_client.class_by_hash(class_hash).await.map_err(Arc::new)?; + let client_class = apollo_starknet_client.class_by_hash(class_hash).await.map_err(Arc::new)?; match client_class { None => Ok(None), Some(class) => { diff --git a/crates/papyrus_sync/src/sources/central_sync_test.rs b/crates/apollo_central_sync/src/sources/central_sync_test.rs similarity index 93% rename from crates/papyrus_sync/src/sources/central_sync_test.rs rename to crates/apollo_central_sync/src/sources/central_sync_test.rs index 581de1263e5..bad44b9687a 100644 --- a/crates/papyrus_sync/src/sources/central_sync_test.rs +++ b/crates/apollo_central_sync/src/sources/central_sync_test.rs @@ -1,6 +1,14 @@ +use core::panic; use std::sync::Arc; use std::time::Duration; +use apollo_class_manager_types::ClassManagerClient; +use apollo_starknet_client::reader::PendingData; +use apollo_storage::base_layer::BaseLayerStorageReader; +use apollo_storage::header::HeaderStorageReader; +use apollo_storage::state::StateStorageReader; +use apollo_storage::test_utils::get_test_storage; +use apollo_storage::{StorageError, StorageReader, StorageWriter}; use assert_matches::assert_matches; use async_stream::stream; use async_trait::async_trait; @@ -8,11 +16,6 @@ use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; use futures::StreamExt; use indexmap::IndexMap; use papyrus_common::pending_classes::{ApiContractClass, PendingClasses}; -use papyrus_storage::base_layer::BaseLayerStorageReader; -use papyrus_storage::header::HeaderStorageReader; -use papyrus_storage::state::StateStorageReader; -use papyrus_storage::test_utils::get_test_storage; -use papyrus_storage::{StorageError, StorageReader, StorageWriter}; use starknet_api::block::{ Block, BlockBody, @@ -27,8 +30,8 @@ use starknet_api::core::{ClassHash, SequencerPublicKey}; use starknet_api::crypto::utils::PublicKey; use starknet_api::felt; use starknet_api::state::StateDiff; -use starknet_client::reader::PendingData; use tokio::sync::{Mutex, RwLock}; +use tokio::time::sleep; use tracing::{debug, error}; use super::pending::MockPendingSourceTrait; @@ -51,7 +54,7 @@ use crate::{ const SYNC_SLEEP_DURATION: Duration = Duration::from_millis(100); // 100ms const BASE_LAYER_SLEEP_DURATION: Duration = Duration::from_millis(10); // 10ms const DURATION_BEFORE_CHECKING_STORAGE: Duration = SYNC_SLEEP_DURATION.saturating_mul(2); // 200ms twice the sleep duration of the sync loop. -const MAX_CHECK_STORAGE_ITERATIONS: u8 = 3; +const MAX_CHECK_STORAGE_ITERATIONS: u8 = 5; const STREAM_SIZE: u32 = 1000; // TODO(dvir): separate this file to flow tests and unit tests. @@ -103,6 +106,8 @@ fn get_test_sync_config(verify_blocks: bool) -> SyncConfig { state_updates_max_stream_size: STREAM_SIZE, verify_blocks, collect_pending_data: false, + // TODO(Shahak): Add test where store_sierras_and_casms is set to false. + store_sierras_and_casms: true, } } @@ -113,6 +118,7 @@ async fn run_sync( central: impl CentralSourceTrait + Send + Sync + 'static, base_layer: impl BaseLayerSourceTrait + Send + Sync, config: SyncConfig, + class_manager_client: Option>, ) -> StateSyncResult { // Mock to the pending source that always returns the default pending data. let mut pending_source = MockPendingSourceTrait::new(); @@ -125,10 +131,15 @@ async fn run_sync( central_source: Arc::new(central), pending_source: Arc::new(pending_source), pending_classes: Arc::new(RwLock::new(PendingClasses::default())), - base_layer_source: Arc::new(base_layer), + base_layer_source: Some(Arc::new(base_layer)), reader, - writer, + writer: Arc::new(Mutex::new(writer)), sequencer_pub_key: None, + // TODO(shahak): Add test with mock class manager client. + // TODO(shahak): Add test with post 0.14.0 block and mock class manager client and see that + // up until that block we call add_class_and_executable_unsafe and from that block we call + // add_class. + class_manager_client, }; state_sync.run().await?; @@ -148,12 +159,14 @@ async fn sync_empty_chain() { base_layer_mock.expect_latest_proved_block().returning(|| Ok(None)); let ((reader, writer), _temp_dir) = get_test_storage(); + let class_manager_client = None; let sync_future = run_sync( reader.clone(), writer, central_mock, base_layer_mock, get_test_sync_config(false), + class_manager_client, ); // Check that the header marker is 0. @@ -215,6 +228,7 @@ async fn sync_happy_flow() { central_mock.expect_stream_state_updates().returning(move |initial, up_to| { let state_stream: StateUpdatesStream<'_> = stream! { for block_number in initial.iter_up_to(up_to) { + // TODO(Eitan): test classes were added to class manager by including declared classes and deprecated declared classes if block_number.0 >= N_BLOCKS { yield Err(CentralError::BlockNotFound { block_number }) } @@ -256,6 +270,7 @@ async fn sync_happy_flow() { central_mock, base_layer_mock, get_test_sync_config(false), + None, ); // Check that the storage reached N_BLOCKS within MAX_TIME_TO_SYNC_MS. @@ -293,6 +308,7 @@ async fn sync_happy_flow() { }); tokio::select! { + _ = sleep(Duration::from_secs(1)) => panic!("Test timed out."), sync_result = sync_future => sync_result.unwrap(), storage_check_result = check_storage_future => assert!(storage_check_result), } @@ -313,13 +329,21 @@ async fn sync_with_revert() { let mock = MockedCentralWithRevert { reverted: reverted_mutex.clone() }; let mut base_layer_mock = MockBaseLayerSourceTrait::new(); base_layer_mock.expect_latest_proved_block().returning(|| Ok(None)); - let sync_future = - run_sync(reader.clone(), writer, mock, base_layer_mock, get_test_sync_config(false)); + let class_manager_client = None; + let sync_future = run_sync( + reader.clone(), + writer, + mock, + base_layer_mock, + get_test_sync_config(false), + class_manager_client, + ); // Prepare functions that check that the sync worked up to N_BLOCKS_BEFORE_REVERT and then // reacted correctly to the revert. const N_BLOCKS_BEFORE_REVERT: u64 = 8; - const MAX_TIME_TO_SYNC_BEFORE_REVERT_MS: u64 = 100; + // FIXME: (Shahak) analyze and set a lower value. + const MAX_TIME_TO_SYNC_BEFORE_REVERT_MS: u64 = 900; const CHAIN_FORK_BLOCK_NUMBER: u64 = 5; const N_BLOCKS_AFTER_REVERT: u64 = 10; // FIXME: (Omri) analyze and set a lower value. @@ -403,7 +427,7 @@ async fn sync_with_revert() { return CheckStoragePredicateResult::Error; } - // TODO: add checks to the state diff. + // TODO(Yair): add checks to the state diff. } CheckStoragePredicateResult::Passed @@ -652,12 +676,14 @@ async fn test_unrecoverable_sync_error_flow() { .returning(|_| Ok(Some(create_block_hash(WRONG_BLOCK_NUMBER, false)))); let ((reader, writer), _temp_dir) = get_test_storage(); + let class_manager_client = None; let sync_future = run_sync( reader.clone(), writer, mock, MockBaseLayerSourceTrait::new(), get_test_sync_config(false), + class_manager_client, ); let sync_res = tokio::join! {sync_future}; assert!(sync_res.0.is_err()); @@ -692,7 +718,15 @@ async fn sequencer_pub_key_management() { let ((reader, writer), _temp_dir) = get_test_storage(); let config = get_test_sync_config(true); - let sync_future = run_sync(reader.clone(), writer, central_mock, base_layer_mock, config); + let class_manager_client = None; + let sync_future = run_sync( + reader.clone(), + writer, + central_mock, + base_layer_mock, + config, + class_manager_client, + ); let sync_result = tokio::time::timeout(config.block_propagation_sleep_duration * 4, sync_future) diff --git a/crates/papyrus_sync/src/sources/central_test.rs b/crates/apollo_central_sync/src/sources/central_test.rs similarity index 96% rename from crates/papyrus_sync/src/sources/central_test.rs rename to crates/apollo_central_sync/src/sources/central_test.rs index c8c593ee10d..c6512a25321 100644 --- a/crates/papyrus_sync/src/sources/central_test.rs +++ b/crates/apollo_central_sync/src/sources/central_test.rs @@ -1,15 +1,30 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; +use apollo_starknet_client::reader::objects::block::BlockPostV0_13_1; +use apollo_starknet_client::reader::{ + Block, + BlockSignatureData, + ContractClass, + DeclaredClassHashEntry, + DeployedContract, + GenericContractClass, + MockStarknetReader, + ReaderClientError, + ReplacedClass, + StateUpdate, + StorageEntry, +}; +use apollo_starknet_client::ClientError; +use apollo_storage::class::ClassStorageWriter; +use apollo_storage::state::StateStorageWriter; +use apollo_storage::test_utils::get_test_storage; use assert_matches::assert_matches; use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; use futures_util::pin_mut; use indexmap::{indexmap, IndexMap}; use lru::LruCache; use mockall::predicate; -use papyrus_storage::class::ClassStorageWriter; -use papyrus_storage::state::StateStorageWriter; -use papyrus_storage::test_utils::get_test_storage; use pretty_assertions::assert_eq; use reqwest::StatusCode; use starknet_api::block::{BlockHash, BlockNumber}; @@ -19,21 +34,6 @@ use starknet_api::deprecated_contract_class::ContractClass as DeprecatedContract use starknet_api::hash::StarkHash; use starknet_api::state::{SierraContractClass as sn_api_ContractClass, ThinStateDiff}; use starknet_api::{class_hash, contract_address, felt, storage_key}; -use starknet_client::reader::objects::block::BlockPostV0_13_1; -use starknet_client::reader::{ - Block, - BlockSignatureData, - ContractClass, - DeclaredClassHashEntry, - DeployedContract, - GenericContractClass, - MockStarknetReader, - ReaderClientError, - ReplacedClass, - StateUpdate, - StorageEntry, -}; -use starknet_client::ClientError; use tokio_stream::StreamExt; use super::state_update_stream::StateUpdateStreamConfig; @@ -57,7 +57,7 @@ async fn last_block_number() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), concurrent_requests: TEST_CONCURRENT_REQUESTS, storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), @@ -97,7 +97,7 @@ async fn stream_block_headers() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), @@ -177,7 +177,7 @@ async fn stream_block_headers_some_are_missing() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), @@ -241,7 +241,7 @@ async fn stream_block_headers_error() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), @@ -312,7 +312,7 @@ async fn stream_state_updates() { compiled_class_hash: compiled_class_hash2, }; - let client_state_diff1 = starknet_client::reader::StateDiff { + let client_state_diff1 = apollo_starknet_client::reader::StateDiff { storage_diffs: IndexMap::from([(contract_address1, vec![StorageEntry { key, value }])]), deployed_contracts: vec![ DeployedContract { address: contract_address1, class_hash: class_hash2 }, @@ -326,7 +326,7 @@ async fn stream_state_updates() { class_hash: class_hash4, }], }; - let client_state_diff2 = starknet_client::reader::StateDiff::default(); + let client_state_diff2 = apollo_starknet_client::reader::StateDiff::default(); let block_state_update1 = StateUpdate { block_hash: block_hash1, @@ -379,7 +379,7 @@ async fn stream_state_updates() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), // TODO(shahak): Check that downloaded classes appear in the cache. @@ -406,7 +406,11 @@ async fn stream_state_updates() { ); assert_eq!( - IndexMap::from([(contract_address1, class_hash2), (contract_address2, class_hash3)]), + IndexMap::from([ + (contract_address1, class_hash2), + (contract_address2, class_hash3), + (contract_address3, class_hash4) + ]), state_diff.deployed_contracts ); assert_eq!( @@ -440,7 +444,6 @@ async fn stream_state_updates() { state_diff.declared_classes, ); assert_eq!(IndexMap::from([(contract_address1, nonce1)]), state_diff.nonces); - assert_eq!(IndexMap::from([(contract_address3, class_hash4)]), state_diff.replaced_classes); let Some(Ok(state_diff_tuple)) = stream.next().await else { panic!("Match of streamed state_update failed!"); @@ -471,7 +474,6 @@ async fn stream_compiled_classes() { }, deprecated_declared_classes: vec![], nonces: indexmap! {}, - replaced_classes: indexmap! {}, }, ) .unwrap() @@ -486,7 +488,6 @@ async fn stream_compiled_classes() { }, deprecated_declared_classes: vec![], nonces: indexmap! {}, - replaced_classes: indexmap! {}, }, ) .unwrap() @@ -532,7 +533,7 @@ async fn stream_compiled_classes() { let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), @@ -586,7 +587,7 @@ async fn get_class() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), @@ -631,7 +632,7 @@ async fn get_compiled_class() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), @@ -655,7 +656,7 @@ async fn get_sequencer_pub_key() { let ((reader, _), _temp_dir) = get_test_storage(); let central_source = GenericCentralSource { concurrent_requests: TEST_CONCURRENT_REQUESTS, - starknet_client: Arc::new(mock), + apollo_starknet_client: Arc::new(mock), storage_reader: reader, state_update_stream_config: state_update_stream_config_for_test(), class_cache: get_test_class_cache(), diff --git a/crates/papyrus_sync/src/sources/mod.rs b/crates/apollo_central_sync/src/sources/mod.rs similarity index 100% rename from crates/papyrus_sync/src/sources/mod.rs rename to crates/apollo_central_sync/src/sources/mod.rs diff --git a/crates/apollo_central_sync/src/sources/pending.rs b/crates/apollo_central_sync/src/sources/pending.rs new file mode 100644 index 00000000000..286d738d4fd --- /dev/null +++ b/crates/apollo_central_sync/src/sources/pending.rs @@ -0,0 +1,69 @@ +#[cfg(test)] +#[path = "pending_test.rs"] +mod pending_test; + +use std::sync::Arc; + +use apollo_starknet_client::reader::{ + PendingData, + ReaderClientError, + StarknetFeederGatewayClient, + StarknetReader, +}; +use apollo_starknet_client::ClientCreationError; +use async_trait::async_trait; +#[cfg(test)] +use mockall::automock; + +// TODO(dvir): add pending config. +use super::central::CentralSourceConfig; + +pub struct GenericPendingSource { + pub apollo_starknet_client: Arc, +} + +#[derive(thiserror::Error, Debug)] +pub enum PendingError { + #[error(transparent)] + ClientCreation(#[from] ClientCreationError), + #[error(transparent)] + ClientError(#[from] Arc), + #[error("Pending block not found")] + PendingBlockNotFound, +} +#[cfg_attr(test, automock)] +#[async_trait] +pub trait PendingSourceTrait { + async fn get_pending_data(&self) -> Result; +} + +#[async_trait] +impl PendingSourceTrait + for GenericPendingSource +{ + async fn get_pending_data(&self) -> Result { + match self.apollo_starknet_client.pending_data().await { + Ok(Some(pending_data)) => Ok(pending_data), + Ok(None) => Err(PendingError::PendingBlockNotFound), + Err(err) => Err(PendingError::ClientError(Arc::new(err))), + } + } +} + +pub type PendingSource = GenericPendingSource; + +impl PendingSource { + pub fn new( + config: CentralSourceConfig, + node_version: &'static str, + ) -> Result { + let apollo_starknet_client = StarknetFeederGatewayClient::new( + config.starknet_url.as_ref(), + config.http_headers, + node_version, + config.retry_config, + )?; + + Ok(PendingSource { apollo_starknet_client: Arc::new(apollo_starknet_client) }) + } +} diff --git a/crates/papyrus_sync/src/sources/pending_test.rs b/crates/apollo_central_sync/src/sources/pending_test.rs similarity index 78% rename from crates/papyrus_sync/src/sources/pending_test.rs rename to crates/apollo_central_sync/src/sources/pending_test.rs index 68e0140397a..185fddeadc8 100644 --- a/crates/papyrus_sync/src/sources/pending_test.rs +++ b/crates/apollo_central_sync/src/sources/pending_test.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use apollo_starknet_client::reader::{MockStarknetReader, PendingData}; use pretty_assertions::assert_eq; -use starknet_client::reader::{MockStarknetReader, PendingData}; use crate::sources::pending::{GenericPendingSource, PendingSourceTrait}; @@ -13,7 +13,7 @@ async fn get_pending_data() { // TODO(dvir): use pending_data which isn't the default. client_mock.expect_pending_data().times(1).returning(|| Ok(Some(PendingData::default()))); - let pending_source = GenericPendingSource { starknet_client: Arc::new(client_mock) }; + let pending_source = GenericPendingSource { apollo_starknet_client: Arc::new(client_mock) }; let pending_data = pending_source.get_pending_data().await.unwrap(); assert_eq!(pending_data, PendingData::default()); diff --git a/crates/papyrus_sync/src/sync_test.rs b/crates/apollo_central_sync/src/sync_test.rs similarity index 95% rename from crates/papyrus_sync/src/sync_test.rs rename to crates/apollo_central_sync/src/sync_test.rs index 7a2796d2f0d..1dcf0587321 100644 --- a/crates/papyrus_sync/src/sync_test.rs +++ b/crates/apollo_central_sync/src/sync_test.rs @@ -1,16 +1,25 @@ use std::sync::Arc; use std::time::Duration; +use apollo_starknet_client::reader::objects::pending_data::{ + AcceptedOnL2ExtraData, + DeprecatedPendingBlock, + PendingBlockOrDeprecated, + PendingStateUpdate, +}; +use apollo_starknet_client::reader::objects::state::StateDiff as ClientStateDiff; +use apollo_starknet_client::reader::objects::transaction::Transaction as ClientTransaction; +use apollo_starknet_client::reader::{DeclaredClassHashEntry, PendingData}; +use apollo_storage::base_layer::BaseLayerStorageReader; +use apollo_storage::header::HeaderStorageWriter; +use apollo_storage::test_utils::get_test_storage; +use apollo_storage::{StorageReader, StorageWriter}; +use apollo_test_utils::{get_rng, GetTestInstance}; use assert_matches::assert_matches; use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; use futures_util::StreamExt; use indexmap::IndexMap; use papyrus_common::pending_classes::{ApiContractClass, PendingClasses, PendingClassesTrait}; -use papyrus_storage::base_layer::BaseLayerStorageReader; -use papyrus_storage::header::HeaderStorageWriter; -use papyrus_storage::test_utils::get_test_storage; -use papyrus_storage::{StorageReader, StorageWriter}; -use papyrus_test_utils::{get_rng, GetTestInstance}; use pretty_assertions::assert_eq; use starknet_api::block::{BlockHash, BlockHeader, BlockHeaderWithoutHash, BlockNumber}; use starknet_api::core::{ClassHash, CompiledClassHash, Nonce}; @@ -18,16 +27,7 @@ use starknet_api::deprecated_contract_class::ContractClass as DeprecatedContract use starknet_api::hash::StarkHash; use starknet_api::state::{SierraContractClass, StateDiff}; use starknet_api::{contract_address, felt, storage_key}; -use starknet_client::reader::objects::pending_data::{ - AcceptedOnL2ExtraData, - DeprecatedPendingBlock, - PendingBlockOrDeprecated, - PendingStateUpdate, -}; -use starknet_client::reader::objects::state::StateDiff as ClientStateDiff; -use starknet_client::reader::objects::transaction::Transaction as ClientTransaction; -use starknet_client::reader::{DeclaredClassHashEntry, PendingData}; -use tokio::sync::RwLock; +use tokio::sync::{Mutex, RwLock}; use crate::sources::base_layer::MockBaseLayerSourceTrait; use crate::sources::central::MockCentralSourceTrait; @@ -64,8 +64,6 @@ fn state_sorted() { let deprecated_declared_1 = (ClassHash(hash1), DeprecatedContractClass::default()); let nonce_0 = (contract_address_0, Nonce(hash0)); let nonce_1 = (contract_address_1, Nonce(hash1)); - let replaced_class_0 = (contract_address_0, ClassHash(hash0)); - let replaced_class_1 = (contract_address_1, ClassHash(hash1)); let unsorted_deployed_contracts = IndexMap::from([dep_contract_1, dep_contract_0]); let unsorted_declared_classes = @@ -78,7 +76,6 @@ fn state_sorted() { (contract_address_1, unsorted_storage_entries.clone()), (contract_address_0, unsorted_storage_entries), ]); - let unsorted_replaced_classes = IndexMap::from([replaced_class_1, replaced_class_0]); let mut state_diff = StateDiff { deployed_contracts: unsorted_deployed_contracts, @@ -86,7 +83,6 @@ fn state_sorted() { deprecated_declared_classes: unsorted_deprecated_declared, declared_classes: unsorted_declared_classes, nonces: unsorted_nonces, - replaced_classes: unsorted_replaced_classes, }; let sorted_deployed_contracts = IndexMap::from([dep_contract_0, dep_contract_1]); @@ -98,7 +94,6 @@ fn state_sorted() { (contract_address_0, sorted_storage_entries.clone()), (contract_address_1, sorted_storage_entries.clone()), ]); - let sorted_replaced_classes = IndexMap::from([replaced_class_0, replaced_class_1]); sort_state_diff(&mut state_diff); assert_eq!( @@ -122,10 +117,6 @@ fn state_sorted() { sorted_storage_entries.get_index(0).unwrap(), ); assert_eq!(state_diff.nonces.get_index(0).unwrap(), sorted_nonces.get_index(0).unwrap()); - assert_eq!( - state_diff.replaced_classes.get_index(0).unwrap(), - sorted_replaced_classes.get_index(0).unwrap(), - ); } #[tokio::test] @@ -171,8 +162,8 @@ async fn stream_new_base_layer_block_no_blocks_on_base_layer() { assert_matches!(event, SyncEvent::NewBaseLayerBlock { block_number: BlockNumber(1), .. }); } -#[test] -fn store_base_layer_block_test() { +#[tokio::test] +async fn store_base_layer_block_test() { let (reader, mut writer) = get_test_storage().0; let header_hash = BlockHash(felt!("0x0")); @@ -199,22 +190,24 @@ fn store_base_layer_block_test() { central_source: Arc::new(MockCentralSourceTrait::new()), pending_source: Arc::new(MockPendingSourceTrait::new()), pending_classes: Arc::new(RwLock::new(PendingClasses::default())), - base_layer_source: Arc::new(MockBaseLayerSourceTrait::new()), + base_layer_source: Some(Arc::new(MockBaseLayerSourceTrait::new())), reader, - writer, + writer: Arc::new(Mutex::new(writer)), sequencer_pub_key: None, + class_manager_client: None, }; // Trying to store a block without a header in the storage. - let res = gen_state_sync.store_base_layer_block(BlockNumber(1), BlockHash::default()); + let res = gen_state_sync.store_base_layer_block(BlockNumber(1), BlockHash::default()).await; assert_matches!(res, Err(StateSyncError::BaseLayerBlockWithoutMatchingHeader { .. })); // Trying to store a block with mismatching header. - let res = gen_state_sync.store_base_layer_block(BlockNumber(0), BlockHash(felt!("0x666"))); + let res = + gen_state_sync.store_base_layer_block(BlockNumber(0), BlockHash(felt!("0x666"))).await; assert_matches!(res, Err(StateSyncError::BaseLayerHashMismatch { .. })); // Happy flow. - let res = gen_state_sync.store_base_layer_block(BlockNumber(0), header_hash); + let res = gen_state_sync.store_base_layer_block(BlockNumber(0), header_hash).await; assert!(res.is_ok()); let base_layer_marker = gen_state_sync.reader.begin_ro_txn().unwrap().get_base_layer_block_marker().unwrap(); diff --git a/crates/apollo_class_manager/Cargo.toml b/crates/apollo_class_manager/Cargo.toml new file mode 100644 index 00000000000..9c07cd708c3 --- /dev/null +++ b/crates/apollo_class_manager/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "apollo_class_manager" +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true + +[features] +testing = [] + +[lints] +workspace = true + +[dependencies] +apollo_class_manager_types.workspace = true +apollo_compile_to_casm_types.workspace = true +apollo_config.workspace = true +apollo_infra.workspace = true +apollo_metrics.workspace = true +apollo_storage.workspace = true +async-trait.workspace = true +hex.workspace = true +serde.workspace = true +starknet_api.workspace = true +strum.workspace = true +strum_macros.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tracing.workspace = true +validator.workspace = true + +[dev-dependencies] +apollo_compile_to_casm_types = { workspace = true, features = ["testing"] } +assert_matches.workspace = true +mockall.workspace = true +starknet_api = { workspace = true, features = ["testing"] } +tokio.workspace = true diff --git a/crates/apollo_class_manager/src/class_manager.rs b/crates/apollo_class_manager/src/class_manager.rs new file mode 100644 index 00000000000..cf748e4fb7d --- /dev/null +++ b/crates/apollo_class_manager/src/class_manager.rs @@ -0,0 +1,148 @@ +use apollo_class_manager_types::{ + ClassHashes, + ClassId, + ClassManagerError, + ClassManagerResult, + ExecutableClassHash, +}; +use apollo_compile_to_casm_types::{ + RawClass, + RawExecutableClass, + SharedSierraCompilerClient, + SierraCompilerClientError, +}; +use apollo_infra::component_definitions::{default_component_start_fn, ComponentStarter}; +use async_trait::async_trait; +use starknet_api::state::SierraContractClass; +use tracing::instrument; + +use crate::class_storage::{CachedClassStorage, ClassStorage, FsClassStorage}; +use crate::config::{ClassManagerConfig, FsClassManagerConfig}; +use crate::metrics::register_metrics; +use crate::FsClassManager; + +#[cfg(test)] +#[path = "class_manager_test.rs"] +pub mod class_manager_test; + +pub struct ClassManager { + pub config: ClassManagerConfig, + pub compiler: SharedSierraCompilerClient, + pub classes: CachedClassStorage, +} + +impl ClassManager { + pub fn new( + config: ClassManagerConfig, + compiler: SharedSierraCompilerClient, + storage: S, + ) -> Self { + let cached_class_storage_config = config.cached_class_storage_config.clone(); + Self { + config, + compiler, + classes: CachedClassStorage::new(cached_class_storage_config, storage), + } + } + + #[instrument(skip(self, class), ret, err)] + pub async fn add_class(&mut self, class: RawClass) -> ClassManagerResult { + // TODO(Elin): think how to not clone the class to deserialize. + let sierra_class = SierraContractClass::try_from(class.clone())?; + let class_hash = sierra_class.calculate_class_hash(); + if let Ok(Some(executable_class_hash)) = self.classes.get_executable_class_hash(class_hash) + { + // Class already exists. + return Ok(ClassHashes { class_hash, executable_class_hash }); + } + + let (raw_executable_class, executable_class_hash) = + self.compiler.compile(class.clone()).await.map_err(|err| match err { + SierraCompilerClientError::SierraCompilerError(error) => { + ClassManagerError::SierraCompiler { class_hash, error } + } + SierraCompilerClientError::ClientError(error) => { + ClassManagerError::Client(error.to_string()) + } + })?; + + self.validate_class_length(&raw_executable_class)?; + + self.classes.set_class(class_hash, class, executable_class_hash, raw_executable_class)?; + + let class_hashes = ClassHashes { class_hash, executable_class_hash }; + Ok(class_hashes) + } + + #[instrument(skip(self), err)] + pub fn get_executable( + &self, + class_id: ClassId, + ) -> ClassManagerResult> { + Ok(self.classes.get_executable(class_id)?) + } + + #[instrument(skip(self), err)] + pub fn get_sierra(&self, class_id: ClassId) -> ClassManagerResult> { + Ok(self.classes.get_sierra(class_id)?) + } + + #[instrument(skip(self, class), ret, err)] + pub fn add_deprecated_class( + &mut self, + class_id: ClassId, + class: RawExecutableClass, + ) -> ClassManagerResult<()> { + self.classes.set_deprecated_class(class_id, class)?; + Ok(()) + } + + #[instrument(skip(self, class, executable_class), ret, err)] + pub fn add_class_and_executable_unsafe( + &mut self, + class_id: ClassId, + class: RawClass, + executable_class_id: ExecutableClassHash, + executable_class: RawExecutableClass, + ) -> ClassManagerResult<()> { + Ok(self.classes.set_class(class_id, class, executable_class_id, executable_class)?) + } + + fn validate_class_length( + &self, + serialized_class: &RawExecutableClass, + ) -> ClassManagerResult<()> { + // Note: The class bytecode length is validated in the compiler. + + let contract_class_object_size = + serialized_class.size().expect("Unexpected error serializing contract class."); + if contract_class_object_size > self.config.max_compiled_contract_class_object_size { + return Err(ClassManagerError::ContractClassObjectSizeTooLarge { + contract_class_object_size, + max_contract_class_object_size: self.config.max_compiled_contract_class_object_size, + }); + } + + Ok(()) + } +} + +pub fn create_class_manager( + config: FsClassManagerConfig, + compiler_client: SharedSierraCompilerClient, +) -> FsClassManager { + let FsClassManagerConfig { class_manager_config, class_storage_config } = config; + let fs_class_storage = + FsClassStorage::new(class_storage_config).expect("Failed to create class storage."); + let class_manager = ClassManager::new(class_manager_config, compiler_client, fs_class_storage); + + FsClassManager(class_manager) +} + +#[async_trait] +impl ComponentStarter for FsClassManager { + async fn start(&mut self) { + default_component_start_fn::().await; + register_metrics(); + } +} diff --git a/crates/apollo_class_manager/src/class_manager_test.rs b/crates/apollo_class_manager/src/class_manager_test.rs new file mode 100644 index 00000000000..9adb0816613 --- /dev/null +++ b/crates/apollo_class_manager/src/class_manager_test.rs @@ -0,0 +1,153 @@ +use std::sync::Arc; + +use apollo_class_manager_types::{ClassHashes, ClassManagerError}; +use apollo_compile_to_casm_types::{MockSierraCompilerClient, RawClass, RawExecutableClass}; +use assert_matches::assert_matches; +use mockall::predicate::eq; +use starknet_api::contract_class::ContractClass; +use starknet_api::core::{ClassHash, CompiledClassHash}; +use starknet_api::felt; +use starknet_api::state::SierraContractClass; + +use crate::class_manager::ClassManager; +use crate::class_storage::{create_tmp_dir, CachedClassStorageConfig, FsClassStorage}; +use crate::config::ClassManagerConfig; + +impl ClassManager { + fn new_for_testing(compiler: MockSierraCompilerClient, config: ClassManagerConfig) -> Self { + let storage = + FsClassStorage::new_for_testing(&create_tmp_dir().unwrap(), &create_tmp_dir().unwrap()); + + ClassManager::new(config, Arc::new(compiler), storage) + } +} + +fn mock_compile_expectations( + compiler: &mut MockSierraCompilerClient, + class: RawClass, +) -> (RawExecutableClass, CompiledClassHash) { + let compile_output = ( + RawExecutableClass::try_from(ContractClass::test_casm_contract_class()).unwrap(), + CompiledClassHash(felt!("0x5678")), + ); + let cloned_compiled_output = compile_output.clone(); + + compiler + .expect_compile() + .with(eq(class.clone())) + .times(1) + .return_once(move |_| Ok(compile_output)); + + cloned_compiled_output +} + +// TODO(Elin): consider sharing setup code, keeping it clear for the test reader how the compiler is +// mocked per test. + +#[tokio::test] +async fn class_manager() { + // Setup. + + // Prepare mock compiler. + let mut compiler = MockSierraCompilerClient::new(); + let class = RawClass::try_from(SierraContractClass::default()).unwrap(); + let (expected_executable_class, expected_executable_class_hash) = + mock_compile_expectations(&mut compiler, class.clone()); + + // Prepare class manager. + let cached_class_storage_config = + CachedClassStorageConfig { class_cache_size: 10, deprecated_class_cache_size: 10 }; + let mut class_manager = ClassManager::new_for_testing( + compiler, + ClassManagerConfig { cached_class_storage_config, ..Default::default() }, + ); + + // Test. + + // Non-existent class. + let class_id = SierraContractClass::try_from(class.clone()).unwrap().calculate_class_hash(); + assert_eq!(class_manager.get_sierra(class_id), Ok(None)); + assert_eq!(class_manager.get_executable(class_id), Ok(None)); + + // Add new class. + let class_hashes = class_manager.add_class(class.clone()).await.unwrap(); + let expected_class_hashes = + ClassHashes { class_hash: class_id, executable_class_hash: expected_executable_class_hash }; + assert_eq!(class_hashes, expected_class_hashes); + + // Get class. + assert_eq!(class_manager.get_sierra(class_id).unwrap(), Some(class.clone())); + assert_eq!(class_manager.get_executable(class_id).unwrap(), Some(expected_executable_class)); + + // Add existing class; response returned immediately, without invoking compilation. + let class_hashes = class_manager.add_class(class).await.unwrap(); + assert_eq!(class_hashes, expected_class_hashes); +} + +#[tokio::test] +#[ignore = "Test deprecated class API"] +async fn class_manager_deprecated_class_api() { + todo!("Test deprecated class API"); +} + +#[tokio::test] +async fn class_manager_get_executable() { + // Setup. + + // Prepare mock compiler. + let mut compiler = MockSierraCompilerClient::new(); + let class = RawClass::try_from(SierraContractClass::default()).unwrap(); + let (expected_executable_class, _) = mock_compile_expectations(&mut compiler, class.clone()); + + // Prepare class manager. + let cached_class_storage_config = + CachedClassStorageConfig { class_cache_size: 10, deprecated_class_cache_size: 10 }; + let mut class_manager = ClassManager::new_for_testing( + compiler, + ClassManagerConfig { cached_class_storage_config, ..Default::default() }, + ); + + // Test. + + // Add classes: deprecated and non-deprecated, under different hashes. + let ClassHashes { class_hash, executable_class_hash: _ } = + class_manager.add_class(class.clone()).await.unwrap(); + + let deprecated_class_hash = ClassHash(felt!("0x1806")); + let deprecated_executable_class = RawExecutableClass::new_unchecked(vec![1, 2, 3].into()); + class_manager + .add_deprecated_class(deprecated_class_hash, deprecated_executable_class.clone()) + .unwrap(); + + // Get both executable classes. + assert_eq!(class_manager.get_executable(class_hash).unwrap(), Some(expected_executable_class)); + assert_eq!( + class_manager.get_executable(deprecated_class_hash).unwrap(), + Some(deprecated_executable_class) + ); +} + +#[tokio::test] +async fn class_manager_class_length_validation() { + // Setup. + + // Prepare mock compiler. + let mut compiler = MockSierraCompilerClient::new(); + let class = RawClass::try_from(SierraContractClass::default()).unwrap(); + let (expected_executable_class, _) = mock_compile_expectations(&mut compiler, class.clone()); + + // Prepare class manager. + let mut class_manager = ClassManager::new_for_testing( + compiler, + ClassManagerConfig { + max_compiled_contract_class_object_size: expected_executable_class.size().unwrap() - 1, + ..Default::default() + }, + ); + + // Test. + assert_matches!( + class_manager.add_class(class).await, + Err(ClassManagerError::ContractClassObjectSizeTooLarge { .. }) + ); +} diff --git a/crates/apollo_class_manager/src/class_storage.rs b/crates/apollo_class_manager/src/class_storage.rs new file mode 100644 index 00000000000..a5dbfe3ce60 --- /dev/null +++ b/crates/apollo_class_manager/src/class_storage.rs @@ -0,0 +1,579 @@ +use std::collections::BTreeMap; +use std::error::Error; +use std::mem; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex, MutexGuard}; + +use apollo_class_manager_types::{CachedClassStorageError, ClassId, ExecutableClassHash}; +use apollo_compile_to_casm_types::{RawClass, RawClassError, RawExecutableClass}; +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_storage::class_hash::{ClassHashStorageReader, ClassHashStorageWriter}; +use apollo_storage::StorageConfig; +use serde::{Deserialize, Serialize}; +use starknet_api::class_cache::GlobalContractCache; +use starknet_api::contract_class::ContractClass; +use thiserror::Error; +use tracing::instrument; + +use crate::config::{ClassHashStorageConfig, FsClassStorageConfig}; +use crate::metrics::{increment_n_classes, record_class_size, CairoClassType, ClassObjectType}; + +#[cfg(test)] +#[path = "class_storage_test.rs"] +mod class_storage_test; + +// TODO(Elin): restrict visibility once this code is used. + +pub trait ClassStorage: Send + Sync { + type Error: Error; + + fn set_class( + &mut self, + class_id: ClassId, + class: RawClass, + executable_class_hash: ExecutableClassHash, + executable_class: RawExecutableClass, + ) -> Result<(), Self::Error>; + + fn get_sierra(&self, class_id: ClassId) -> Result, Self::Error>; + + fn get_executable(&self, class_id: ClassId) -> Result, Self::Error>; + + fn get_executable_class_hash( + &self, + class_id: ClassId, + ) -> Result, Self::Error>; + + fn set_deprecated_class( + &mut self, + class_id: ClassId, + class: RawExecutableClass, + ) -> Result<(), Self::Error>; + + fn get_deprecated_class( + &self, + class_id: ClassId, + ) -> Result, Self::Error>; +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct CachedClassStorageConfig { + pub class_cache_size: usize, + pub deprecated_class_cache_size: usize, +} + +// TODO(Elin): provide default values for the fields. +impl Default for CachedClassStorageConfig { + fn default() -> Self { + Self { class_cache_size: 10, deprecated_class_cache_size: 10 } + } +} + +impl SerializeConfig for CachedClassStorageConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ + ser_param( + "class_cache_size", + &self.class_cache_size, + "Contract classes cache size.", + ParamPrivacyInput::Public, + ), + ser_param( + "deprecated_class_cache_size", + &self.deprecated_class_cache_size, + "Deprecated contract classes cache size.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +pub struct CachedClassStorage { + storage: S, + + // Cache. + classes: GlobalContractCache, + executable_classes: GlobalContractCache, + executable_class_hashes: GlobalContractCache, + deprecated_classes: GlobalContractCache, +} + +impl CachedClassStorage { + pub fn new(config: CachedClassStorageConfig, storage: S) -> Self { + Self { + storage, + classes: GlobalContractCache::new(config.class_cache_size), + executable_classes: GlobalContractCache::new(config.class_cache_size), + executable_class_hashes: GlobalContractCache::new(config.class_cache_size), + deprecated_classes: GlobalContractCache::new(config.deprecated_class_cache_size), + } + } + + pub fn class_cached(&self, class_id: ClassId) -> bool { + self.executable_class_hashes.get(&class_id).is_some() + } + + pub fn deprecated_class_cached(&self, class_id: ClassId) -> bool { + self.deprecated_classes.get(&class_id).is_some() + } +} + +impl ClassStorage for CachedClassStorage { + type Error = CachedClassStorageError; + + #[instrument(skip(self, class, executable_class), level = "debug", ret, err)] + fn set_class( + &mut self, + class_id: ClassId, + class: RawClass, + executable_class_hash: ExecutableClassHash, + executable_class: RawExecutableClass, + ) -> Result<(), Self::Error> { + if self.class_cached(class_id) { + return Ok(()); + } + + self.storage.set_class( + class_id, + class.clone(), + executable_class_hash, + executable_class.clone(), + )?; + + increment_n_classes(CairoClassType::Regular); + record_class_size(ClassObjectType::Sierra, &class); + record_class_size(ClassObjectType::Casm, &executable_class); + + // Cache the class. + // Done after successfully writing to storage as an optimization; + // does not require atomicity. + self.classes.set(class_id, class); + self.executable_classes.set(class_id, executable_class); + // Cache the executable class hash last; acts as an existence marker. + self.executable_class_hashes.set(class_id, executable_class_hash); + + Ok(()) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_sierra(&self, class_id: ClassId) -> Result, Self::Error> { + if let Some(class) = self.classes.get(&class_id) { + return Ok(Some(class)); + } + + let Some(class) = self.storage.get_sierra(class_id)? else { + return Ok(None); + }; + + self.classes.set(class_id, class.clone()); + Ok(Some(class)) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_executable(&self, class_id: ClassId) -> Result, Self::Error> { + if let Some(class) = self + .executable_classes + .get(&class_id) + .or_else(|| self.deprecated_classes.get(&class_id)) + { + return Ok(Some(class)); + } + + let Some(class) = self.storage.get_executable(class_id)? else { + return Ok(None); + }; + + // TODO(Elin): separate Cairo0<>1 getters to avoid deserializing here. + match ContractClass::try_from(class.clone()).unwrap() { + ContractClass::V0(_) => { + self.deprecated_classes.set(class_id, class.clone()); + } + ContractClass::V1(_) => { + self.executable_classes.set(class_id, class.clone()); + } + } + + Ok(Some(class)) + } + + #[instrument(skip(self), level = "debug", ret, err)] + fn get_executable_class_hash( + &self, + class_id: ClassId, + ) -> Result, Self::Error> { + if let Some(class_hash) = self.executable_class_hashes.get(&class_id) { + return Ok(Some(class_hash)); + } + + let Some(class_hash) = self.storage.get_executable_class_hash(class_id)? else { + return Ok(None); + }; + + self.executable_class_hashes.set(class_id, class_hash); + Ok(Some(class_hash)) + } + + #[instrument(skip(self, class), level = "debug", ret, err)] + fn set_deprecated_class( + &mut self, + class_id: ClassId, + class: RawExecutableClass, + ) -> Result<(), Self::Error> { + if self.deprecated_class_cached(class_id) { + return Ok(()); + } + + self.storage.set_deprecated_class(class_id, class.clone())?; + + increment_n_classes(CairoClassType::Deprecated); + record_class_size(ClassObjectType::DeprecatedCasm, &class); + + self.deprecated_classes.set(class_id, class); + + Ok(()) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_deprecated_class( + &self, + class_id: ClassId, + ) -> Result, Self::Error> { + if let Some(class) = self.deprecated_classes.get(&class_id) { + return Ok(Some(class)); + } + + let Some(class) = self.storage.get_deprecated_class(class_id)? else { + return Ok(None); + }; + + self.deprecated_classes.set(class_id, class.clone()); + Ok(Some(class)) + } +} + +impl Clone for CachedClassStorage { + fn clone(&self) -> Self { + Self { + storage: self.storage.clone(), + classes: self.classes.clone(), + executable_classes: self.executable_classes.clone(), + executable_class_hashes: self.executable_class_hashes.clone(), + deprecated_classes: self.deprecated_classes.clone(), + } + } +} + +#[derive(Debug, Error)] +pub enum ClassHashStorageError { + #[error(transparent)] + Storage(#[from] apollo_storage::StorageError), +} + +type ClassHashStorageResult = Result; +type LockedWriter<'a> = MutexGuard<'a, apollo_storage::StorageWriter>; + +#[derive(Clone)] +pub struct ClassHashStorage { + reader: apollo_storage::StorageReader, + writer: Arc>, +} + +impl ClassHashStorage { + pub fn new(config: ClassHashStorageConfig) -> ClassHashStorageResult { + let storage_config = StorageConfig::from(config); + let (reader, writer) = apollo_storage::open_storage(storage_config)?; + + Ok(Self { reader, writer: Arc::new(Mutex::new(writer)) }) + } + + fn writer(&self) -> ClassHashStorageResult> { + Ok(self.writer.lock().expect("Writer is poisoned.")) + } + + #[instrument(skip(self), level = "debug", ret, err)] + fn get_executable_class_hash( + &self, + class_id: ClassId, + ) -> ClassHashStorageResult> { + Ok(self.reader.begin_ro_txn()?.get_executable_class_hash(&class_id)?) + } + + #[instrument(skip(self), level = "debug", ret, err)] + fn set_executable_class_hash( + &mut self, + class_id: ClassId, + executable_class_hash: ExecutableClassHash, + ) -> ClassHashStorageResult<()> { + let mut writer = self.writer()?; + let txn = + writer.begin_rw_txn()?.set_executable_class_hash(&class_id, executable_class_hash)?; + txn.commit()?; + + Ok(()) + } +} + +type FsClassStorageResult = Result; + +#[derive(Clone)] +pub struct FsClassStorage { + pub persistent_root: PathBuf, + pub class_hash_storage: ClassHashStorage, +} + +#[derive(Debug, Error)] +pub enum FsClassStorageError { + #[error(transparent)] + ClassHashStorage(#[from] ClassHashStorageError), + #[error("Class of hash {class_id} not found.")] + ClassNotFound { class_id: ClassId }, + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error(transparent)] + RawClass(#[from] RawClassError), +} + +impl FsClassStorage { + pub fn new(config: FsClassStorageConfig) -> FsClassStorageResult { + let class_hash_storage = ClassHashStorage::new(config.class_hash_storage_config)?; + Ok(Self { persistent_root: config.persistent_root, class_hash_storage }) + } + + fn contains_class(&self, class_id: ClassId) -> FsClassStorageResult { + Ok(self.get_executable_class_hash(class_id)?.is_some()) + } + + // TODO(Elin): make this more robust; checking file existence is not enough, since by reading + // it can be deleted. + fn contains_deprecated_class(&self, class_id: ClassId) -> bool { + self.get_deprecated_executable_path(class_id).exists() + } + + /// Returns the directory that will hold classes related to the given class ID. + /// For a class ID: 0xa1b2c3d4... (rest of hash), the structure is: + /// a1/ + /// └── b2/ + /// └── a1b2c3d4.../ + fn get_class_dir(&self, class_id: ClassId) -> PathBuf { + let class_id = hex::encode(class_id.to_bytes_be()); + let (first_msb_byte, second_msb_byte, _rest_of_bytes) = + (&class_id[..2], &class_id[2..4], &class_id[4..]); + + PathBuf::from(first_msb_byte).join(second_msb_byte).join(class_id) + } + + fn get_persistent_dir(&self, class_id: ClassId) -> PathBuf { + self.persistent_root.join(self.get_class_dir(class_id)) + } + + fn get_persistent_dir_with_create(&self, class_id: ClassId) -> FsClassStorageResult { + let path = self.get_persistent_dir(class_id); + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + + Ok(path) + } + + fn get_sierra_path(&self, class_id: ClassId) -> PathBuf { + concat_sierra_filename(&self.get_persistent_dir(class_id)) + } + + fn get_executable_path(&self, class_id: ClassId) -> PathBuf { + concat_executable_filename(&self.get_persistent_dir(class_id)) + } + + fn get_deprecated_executable_path(&self, class_id: ClassId) -> PathBuf { + concat_deprecated_executable_filename(&self.get_persistent_dir(class_id)) + } + + fn mark_class_id_as_existent( + &mut self, + class_id: ClassId, + executable_class_hash: ExecutableClassHash, + ) -> FsClassStorageResult<()> { + Ok(self.class_hash_storage.set_executable_class_hash(class_id, executable_class_hash)?) + } + + fn write_class( + &self, + class_id: ClassId, + class: RawClass, + executable_class: RawExecutableClass, + ) -> FsClassStorageResult<()> { + let persistent_dir = self.get_persistent_dir_with_create(class_id)?; + class.write_to_file(concat_sierra_filename(&persistent_dir))?; + executable_class.write_to_file(concat_executable_filename(&persistent_dir))?; + + Ok(()) + } + + fn write_deprecated_class( + &self, + class_id: ClassId, + class: RawExecutableClass, + ) -> FsClassStorageResult<()> { + let persistent_dir = self.get_persistent_dir_with_create(class_id)?; + class.write_to_file(concat_deprecated_executable_filename(&persistent_dir))?; + + Ok(()) + } + + // TODO(Elin): restore use of `write_[deprecated_]class_atomically`, but tmpdir + // should be located inside the PVC to prevent linking errors. + #[allow(dead_code)] + fn write_class_atomically( + &self, + class_id: ClassId, + class: RawClass, + executable_class: RawExecutableClass, + ) -> FsClassStorageResult<()> { + // Write classes to a temporary directory. + let tmp_dir = create_tmp_dir()?; + let tmp_dir = tmp_dir.path().join(self.get_class_dir(class_id)); + class.write_to_file(concat_sierra_filename(&tmp_dir))?; + executable_class.write_to_file(concat_executable_filename(&tmp_dir))?; + + // Atomically rename directory to persistent one. + let persistent_dir = self.get_persistent_dir_with_create(class_id)?; + std::fs::rename(tmp_dir, persistent_dir)?; + + Ok(()) + } + + #[allow(dead_code)] + fn write_deprecated_class_atomically( + &self, + class_id: ClassId, + class: RawExecutableClass, + ) -> FsClassStorageResult<()> { + // Write class to a temporary directory. + let tmp_dir = create_tmp_dir()?; + let tmp_dir = tmp_dir.path().join(self.get_class_dir(class_id)); + class.write_to_file(concat_deprecated_executable_filename(&tmp_dir))?; + + // Atomically rename directory to persistent one. + let persistent_dir = self.get_persistent_dir_with_create(class_id)?; + std::fs::rename(tmp_dir, persistent_dir)?; + + Ok(()) + } +} + +impl ClassStorage for FsClassStorage { + type Error = FsClassStorageError; + + #[instrument(skip(self, class, executable_class), level = "debug", ret, err)] + fn set_class( + &mut self, + class_id: ClassId, + class: RawClass, + executable_class_hash: ExecutableClassHash, + executable_class: RawExecutableClass, + ) -> Result<(), Self::Error> { + if self.contains_class(class_id)? { + return Ok(()); + } + + self.write_class(class_id, class, executable_class)?; + self.mark_class_id_as_existent(class_id, executable_class_hash)?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_sierra(&self, class_id: ClassId) -> Result, Self::Error> { + if !self.contains_class(class_id)? { + return Ok(None); + } + + let path = self.get_sierra_path(class_id); + let class = + RawClass::from_file(path)?.ok_or(FsClassStorageError::ClassNotFound { class_id })?; + + Ok(Some(class)) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_executable(&self, class_id: ClassId) -> Result, Self::Error> { + let path = if self.contains_class(class_id)? { + self.get_executable_path(class_id) + } else if self.contains_deprecated_class(class_id) { + self.get_deprecated_executable_path(class_id) + } else { + // Class does not exist in storage. + return Ok(None); + }; + + let class = RawExecutableClass::from_file(path)? + .ok_or(FsClassStorageError::ClassNotFound { class_id })?; + Ok(Some(class)) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_executable_class_hash( + &self, + class_id: ClassId, + ) -> Result, Self::Error> { + Ok(self.class_hash_storage.get_executable_class_hash(class_id)?) + } + + #[instrument(skip(self, class), level = "debug", ret, err)] + fn set_deprecated_class( + &mut self, + class_id: ClassId, + class: RawExecutableClass, + ) -> Result<(), Self::Error> { + if self.contains_deprecated_class(class_id) { + return Ok(()); + } + + self.write_deprecated_class(class_id, class)?; + + Ok(()) + } + + #[instrument(skip(self), level = "debug", err)] + fn get_deprecated_class( + &self, + class_id: ClassId, + ) -> Result, Self::Error> { + if !self.contains_deprecated_class(class_id) { + return Ok(None); + } + + let path = self.get_deprecated_executable_path(class_id); + let class = RawExecutableClass::from_file(path)? + .ok_or(FsClassStorageError::ClassNotFound { class_id })?; + + Ok(Some(class)) + } +} + +impl PartialEq for FsClassStorageError { + fn eq(&self, other: &Self) -> bool { + // Only compare enum variants; no need to compare the error values. + mem::discriminant(self) == mem::discriminant(other) + } +} + +// Utils. + +fn concat_sierra_filename(path: &Path) -> PathBuf { + path.join("sierra") +} + +fn concat_executable_filename(path: &Path) -> PathBuf { + path.join("casm") +} + +fn concat_deprecated_executable_filename(path: &Path) -> PathBuf { + path.join("deprecated_casm") +} + +// Creates a tmp directory and returns a owned representation of it. +// As long as the returned directory object is lived, the directory is not deleted. +pub(crate) fn create_tmp_dir() -> FsClassStorageResult { + Ok(tempfile::tempdir()?) +} diff --git a/crates/apollo_class_manager/src/class_storage_test.rs b/crates/apollo_class_manager/src/class_storage_test.rs new file mode 100644 index 00000000000..0d489916b3c --- /dev/null +++ b/crates/apollo_class_manager/src/class_storage_test.rs @@ -0,0 +1,140 @@ +use apollo_compile_to_casm_types::{RawClass, RawExecutableClass}; +use starknet_api::core::{ClassHash, CompiledClassHash}; +use starknet_api::felt; +use starknet_api::state::SierraContractClass; + +use crate::class_storage::{ + create_tmp_dir, + ClassHashStorage, + ClassHashStorageConfig, + ClassStorage, + FsClassStorage, + FsClassStorageError, +}; +use crate::config::ClassHashDbConfig; + +// TODO(Elin): consider creating an empty Casm instead of vec (doesn't implement default). + +#[cfg(test)] +impl ClassHashStorage { + pub fn new_for_testing(path_prefix: &tempfile::TempDir) -> Self { + let config = ClassHashStorageConfig { + class_hash_db_config: ClassHashDbConfig { + path_prefix: path_prefix.path().to_path_buf(), + enforce_file_exists: false, + max_size: 1 << 30, // 1GB. + min_size: 1 << 10, // 1KB. + growth_step: 1 << 26, // 64MB. + }, + ..Default::default() + }; + Self::new(config).unwrap() + } +} + +#[cfg(test)] +impl FsClassStorage { + pub fn new_for_testing( + persistent_root: &tempfile::TempDir, + class_hash_storage_path_prefix: &tempfile::TempDir, + ) -> Self { + let class_hash_storage = ClassHashStorage::new_for_testing(class_hash_storage_path_prefix); + Self { persistent_root: persistent_root.path().to_path_buf(), class_hash_storage } + } +} + +#[test] +fn fs_storage() { + let persistent_root = create_tmp_dir().unwrap(); + let class_hash_storage_path_prefix = create_tmp_dir().unwrap(); + let mut storage = + FsClassStorage::new_for_testing(&persistent_root, &class_hash_storage_path_prefix); + + // Non-existent class. + let class_id = ClassHash(felt!("0x1234")); + assert_eq!(storage.get_sierra(class_id), Ok(None)); + assert_eq!(storage.get_executable(class_id), Ok(None)); + assert_eq!(storage.get_executable_class_hash(class_id), Ok(None)); + + // Add new class. + let class = RawClass::try_from(SierraContractClass::default()).unwrap(); + let executable_class = RawExecutableClass::new_unchecked(vec![4, 5, 6].into()); + let executable_class_hash = CompiledClassHash(felt!("0x5678")); + storage + .set_class(class_id, class.clone(), executable_class_hash, executable_class.clone()) + .unwrap(); + + // Get class. + assert_eq!(storage.get_sierra(class_id).unwrap(), Some(class.clone())); + assert_eq!(storage.get_executable(class_id).unwrap(), Some(executable_class.clone())); + assert_eq!(storage.get_executable_class_hash(class_id).unwrap(), Some(executable_class_hash)); + + // Add existing class. + storage + .set_class(class_id, class.clone(), executable_class_hash, executable_class.clone()) + .unwrap(); +} + +#[test] +fn fs_storage_deprecated_class_api() { + let persistent_root = create_tmp_dir().unwrap(); + let class_hash_storage_path_prefix = create_tmp_dir().unwrap(); + let mut storage = + FsClassStorage::new_for_testing(&persistent_root, &class_hash_storage_path_prefix); + + // Non-existent class. + let class_id = ClassHash(felt!("0x1234")); + assert_eq!(storage.get_deprecated_class(class_id), Ok(None)); + + // Add new class. + let executable_class = RawExecutableClass::new_unchecked(vec![4, 5, 6].into()); + storage.set_deprecated_class(class_id, executable_class.clone()).unwrap(); + + // Get class. + assert_eq!(storage.get_deprecated_class(class_id).unwrap(), Some(executable_class.clone())); + + // Add existing class. + storage.set_deprecated_class(class_id, executable_class).unwrap(); +} + +// TODO(Elin): check a nonexistent persistent root (should be created). +// TODO(Elin): add unimplemented skeletons for test above and rest of missing tests. + +/// This scenario simulates a (manual) DB corruption; e.g., files were deleted. +// TODO(Elin): should this flow return an error? +#[test] +fn fs_storage_partial_write_only_atomic_marker() { + let persistent_root = create_tmp_dir().unwrap(); + let class_hash_storage_path_prefix = create_tmp_dir().unwrap(); + let mut storage = + FsClassStorage::new_for_testing(&persistent_root, &class_hash_storage_path_prefix); + + // Write only atomic marker, no class files. + let class_id = ClassHash(felt!("0x1234")); + let executable_class_hash = CompiledClassHash(felt!("0x5678")); + storage.mark_class_id_as_existent(class_id, executable_class_hash).unwrap(); + + // Query class, should be considered an erroneous flow. + let class_not_found_error = FsClassStorageError::ClassNotFound { class_id }; + assert_eq!(storage.get_sierra(class_id).unwrap_err(), class_not_found_error); + assert_eq!(storage.get_executable(class_id).unwrap_err(), class_not_found_error); +} + +#[test] +fn fs_storage_partial_write_no_atomic_marker() { + let persistent_root = create_tmp_dir().unwrap(); + let class_hash_storage_path_prefix = create_tmp_dir().unwrap(); + let storage = + FsClassStorage::new_for_testing(&persistent_root, &class_hash_storage_path_prefix); + + // Fully write class files, without atomic marker. + let class_id = ClassHash(felt!("0x1234")); + let class = RawClass::try_from(SierraContractClass::default()).unwrap(); + let executable_class = RawExecutableClass::new_unchecked(vec![4, 5, 6].into()); + storage.write_class_atomically(class_id, class, executable_class).unwrap(); + assert_eq!(storage.get_executable_class_hash(class_id), Ok(None)); + + // Query class, should be considered non-existent. + assert_eq!(storage.get_sierra(class_id), Ok(None)); + assert_eq!(storage.get_executable(class_id), Ok(None)); +} diff --git a/crates/apollo_class_manager/src/communication.rs b/crates/apollo_class_manager/src/communication.rs new file mode 100644 index 00000000000..10df6b04551 --- /dev/null +++ b/crates/apollo_class_manager/src/communication.rs @@ -0,0 +1,58 @@ +use apollo_class_manager_types::{ClassManagerRequest, ClassManagerResponse}; +use apollo_infra::component_definitions::ComponentRequestHandler; +use apollo_infra::component_server::{ConcurrentLocalComponentServer, RemoteComponentServer}; +use async_trait::async_trait; +use starknet_api::contract_class::ContractClass; + +use crate::ClassManager; + +pub type LocalClassManagerServer = + ConcurrentLocalComponentServer; +pub type RemoteClassManagerServer = + RemoteComponentServer; + +// TODO(Elin): change the request and response the server sees to raw types; remove conversions and +// unwraps. +#[async_trait] +impl ComponentRequestHandler for ClassManager { + async fn handle_request(&mut self, request: ClassManagerRequest) -> ClassManagerResponse { + match request { + ClassManagerRequest::AddClass(class) => { + ClassManagerResponse::AddClass(self.0.add_class(class.try_into().unwrap()).await) + } + ClassManagerRequest::AddClassAndExecutableUnsafe( + class_id, + class, + executable_class_id, + executable_class, + ) => ClassManagerResponse::AddClassAndExecutableUnsafe( + self.0.add_class_and_executable_unsafe( + class_id, + class.try_into().unwrap(), + executable_class_id, + executable_class.try_into().unwrap(), + ), + ), + ClassManagerRequest::AddDeprecatedClass(class_id, class) => { + let class = ContractClass::V0(class).try_into().unwrap(); + ClassManagerResponse::AddDeprecatedClass( + self.0.add_deprecated_class(class_id, class), + ) + } + ClassManagerRequest::GetExecutable(class_id) => { + let result = self + .0 + .get_executable(class_id) + .map(|optional_class| optional_class.map(|class| class.try_into().unwrap())); + ClassManagerResponse::GetExecutable(result) + } + ClassManagerRequest::GetSierra(class_id) => { + let result = self + .0 + .get_sierra(class_id) + .map(|optional_class| optional_class.map(|class| class.try_into().unwrap())); + ClassManagerResponse::GetSierra(result) + } + } + } +} diff --git a/crates/apollo_class_manager/src/config.rs b/crates/apollo_class_manager/src/config.rs new file mode 100644 index 00000000000..2e9445cefde --- /dev/null +++ b/crates/apollo_class_manager/src/config.rs @@ -0,0 +1,209 @@ +use std::collections::BTreeMap; +use std::path::PathBuf; + +use apollo_config::dumping::{prepend_sub_config_name, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_storage::mmap_file::MmapFileConfig; +use apollo_storage::{StorageConfig, StorageScope}; +use serde::{Deserialize, Serialize}; +use starknet_api::core::ChainId; +use validator::Validate; + +use crate::class_storage::CachedClassStorageConfig; + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Validate)] +pub struct ClassHashDbConfig { + pub path_prefix: PathBuf, + pub enforce_file_exists: bool, + pub min_size: usize, + pub max_size: usize, + pub growth_step: isize, +} + +impl SerializeConfig for ClassHashDbConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param( + "path_prefix", + &self.path_prefix, + "Prefix of the path of the node's storage directory.", + ParamPrivacyInput::Public, + ), + ser_param( + "enforce_file_exists", + &self.enforce_file_exists, + "Whether to enforce that the path exists. If true, `open_env` fails when the \ + mdbx.dat file does not exist.", + ParamPrivacyInput::Public, + ), + ser_param( + "min_size", + &self.min_size, + "The minimum size of the node's storage in bytes.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_size", + &self.max_size, + "The maximum size of the node's storage in bytes.", + ParamPrivacyInput::Public, + ), + ser_param( + "growth_step", + &self.growth_step, + "The growth step in bytes, must be greater than zero to allow the database to \ + grow.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, Validate)] +pub struct ClassHashStorageConfig { + #[validate] + pub class_hash_db_config: ClassHashDbConfig, + #[validate] + pub mmap_file_config: MmapFileConfig, + pub scope: StorageScope, +} + +impl Default for ClassHashStorageConfig { + fn default() -> Self { + Self { + class_hash_db_config: ClassHashDbConfig { + path_prefix: "/data/class_hash_storage".into(), + enforce_file_exists: false, + min_size: 1 << 20, // 1MB + max_size: 1 << 40, // 1TB + growth_step: 1 << 32, // 4GB + }, + mmap_file_config: MmapFileConfig { + max_size: 1 << 30, // 1GB. + growth_step: 1 << 20, // 1MB. + max_object_size: 1 << 10, // 1KB; a class hash is 32B. + }, + scope: StorageScope::StateOnly, + } + } +} + +impl From for StorageConfig { + fn from(value: ClassHashStorageConfig) -> Self { + Self { + db_config: apollo_storage::db::DbConfig { + // TODO(Noamsp): move the chain id into the config and use StorageConfig instead of + // ClassHashStorageConfig + chain_id: ChainId::Other("UnusedChainID".to_string()), + path_prefix: value.class_hash_db_config.path_prefix, + enforce_file_exists: value.class_hash_db_config.enforce_file_exists, + min_size: value.class_hash_db_config.min_size, + max_size: value.class_hash_db_config.max_size, + growth_step: value.class_hash_db_config.growth_step, + }, + scope: value.scope, + mmap_file_config: value.mmap_file_config, + } + } +} + +impl SerializeConfig for ClassHashStorageConfig { + fn dump(&self) -> BTreeMap { + let mut dumped_config = BTreeMap::from([ser_param( + "scope", + &self.scope, + "The categories of data saved in storage.", + ParamPrivacyInput::Public, + )]); + dumped_config + .append(&mut prepend_sub_config_name(self.mmap_file_config.dump(), "mmap_file_config")); + dumped_config.append(&mut prepend_sub_config_name( + self.class_hash_db_config.dump(), + "class_hash_db_config", + )); + dumped_config + } +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct FsClassStorageConfig { + pub persistent_root: PathBuf, + pub class_hash_storage_config: ClassHashStorageConfig, +} + +impl Default for FsClassStorageConfig { + fn default() -> Self { + Self { + persistent_root: "/data/classes".into(), + class_hash_storage_config: Default::default(), + } + } +} + +impl SerializeConfig for FsClassStorageConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::from([ser_param( + "persistent_root", + &self.persistent_root, + "Path to the node's class storage directory.", + ParamPrivacyInput::Public, + )]); + dump.append(&mut prepend_sub_config_name( + self.class_hash_storage_config.dump(), + "class_hash_storage_config", + )); + dump + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct ClassManagerConfig { + pub cached_class_storage_config: CachedClassStorageConfig, + pub max_compiled_contract_class_object_size: usize, +} + +impl Default for ClassManagerConfig { + fn default() -> Self { + ClassManagerConfig { + cached_class_storage_config: CachedClassStorageConfig::default(), + max_compiled_contract_class_object_size: 4089446, + } + } +} + +impl SerializeConfig for ClassManagerConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::from([ser_param( + "max_compiled_contract_class_object_size", + &self.max_compiled_contract_class_object_size, + "Limitation of compiled contract class object size.", + ParamPrivacyInput::Public, + )]); + dump.append(&mut prepend_sub_config_name( + self.cached_class_storage_config.dump(), + "cached_class_storage_config", + )); + dump + } +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, Validate, PartialEq)] +pub struct FsClassManagerConfig { + pub class_manager_config: ClassManagerConfig, + pub class_storage_config: FsClassStorageConfig, +} + +impl SerializeConfig for FsClassManagerConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::new(); + dump.append(&mut prepend_sub_config_name( + self.class_manager_config.dump(), + "class_manager_config", + )); + dump.append(&mut prepend_sub_config_name( + self.class_storage_config.dump(), + "class_storage_config", + )); + dump + } +} diff --git a/crates/apollo_class_manager/src/lib.rs b/crates/apollo_class_manager/src/lib.rs new file mode 100644 index 00000000000..c9a6e6a3c3d --- /dev/null +++ b/crates/apollo_class_manager/src/lib.rs @@ -0,0 +1,27 @@ +pub mod class_manager; +pub mod class_storage; +pub mod communication; +pub mod config; +pub mod metrics; + +use crate::class_manager::ClassManager as GenericClassManager; +use crate::class_storage::FsClassStorage; + +pub struct FsClassManager(pub GenericClassManager); + +impl Clone for FsClassManager { + fn clone(&self) -> Self { + let GenericClassManager { config, compiler, classes } = &self.0; + + FsClassManager(GenericClassManager { + config: config.clone(), + compiler: compiler.clone(), + classes: classes.clone(), + }) + } +} + +pub use FsClassManager as ClassManager; + +#[cfg(any(feature = "testing", test))] +pub mod test_utils; diff --git a/crates/apollo_class_manager/src/metrics.rs b/crates/apollo_class_manager/src/metrics.rs new file mode 100644 index 00000000000..bbc9eb1e2fb --- /dev/null +++ b/crates/apollo_class_manager/src/metrics.rs @@ -0,0 +1,75 @@ +use apollo_compile_to_casm_types::SerializedClass; +use apollo_metrics::{define_metrics, generate_permutation_labels}; +use strum::VariantNames; + +const CAIRO_CLASS_TYPE_LABEL: &str = "class_type"; + +#[derive(strum_macros::EnumVariantNames, strum_macros::IntoStaticStr)] +#[strum(serialize_all = "snake_case")] +pub(crate) enum CairoClassType { + Regular, + Deprecated, +} + +generate_permutation_labels! { + CAIRO_CLASS_TYPE_LABELS, + (CAIRO_CLASS_TYPE_LABEL, CairoClassType), +} + +const CLASS_OBJECT_TYPE_LABEL: &str = "class_object_type"; + +#[derive( + Debug, strum_macros::Display, strum_macros::EnumVariantNames, strum_macros::IntoStaticStr, +)] +#[strum(serialize_all = "snake_case")] +pub(crate) enum ClassObjectType { + Sierra, + Casm, + DeprecatedCasm, +} + +generate_permutation_labels! { + CLASS_OBJECT_TYPE_LABELS, + (CLASS_OBJECT_TYPE_LABEL, ClassObjectType), +} + +define_metrics!( + ClassManager => { + LabeledMetricCounter { + N_CLASSES, + "class_manager_n_classes", "Number of classes, by label (regular, deprecated)", + init = 0 , + labels = CAIRO_CLASS_TYPE_LABELS + }, + LabeledMetricHistogram { + CLASS_SIZES, + "class_manager_class_sizes", + "Size of the classes in bytes, labeled by type (sierra, casm, deprecated casm)", + labels = CLASS_OBJECT_TYPE_LABELS + }, + }, +); + +pub(crate) fn increment_n_classes(cls_type: CairoClassType) { + N_CLASSES.increment(1, &[(CAIRO_CLASS_TYPE_LABEL, cls_type.into())]); +} + +pub(crate) fn record_class_size(class_type: ClassObjectType, class: &SerializedClass) { + let class_size = class.size().unwrap_or_else(|_| { + panic!("Illegally formatted {} class, should not have gotten into the system.", class_type) + }); + let class_size = u32::try_from(class_size).unwrap_or_else(|_| { + panic!( + "{} class size {} is bigger than what is allowed, + should not have gotten into the system.", + class_type, class_size + ) + }); + + CLASS_SIZES.record(class_size, &[(CLASS_OBJECT_TYPE_LABEL, class_type.into())]); +} + +pub(crate) fn register_metrics() { + N_CLASSES.register(); + CLASS_SIZES.register(); +} diff --git a/crates/apollo_class_manager/src/test_utils.rs b/crates/apollo_class_manager/src/test_utils.rs new file mode 100644 index 00000000000..3075c509bfa --- /dev/null +++ b/crates/apollo_class_manager/src/test_utils.rs @@ -0,0 +1,58 @@ +use std::path::PathBuf; + +use tempfile::TempDir; + +use crate::class_storage::{ClassHashStorage, FsClassStorage}; +use crate::config::{ClassHashDbConfig, ClassHashStorageConfig, FsClassStorageConfig}; + +pub type FileHandles = (TempDir, TempDir); + +pub struct FsClassStorageBuilderForTesting { + config: FsClassStorageConfig, + handles: Option, +} + +impl Default for FsClassStorageBuilderForTesting { + fn default() -> Self { + let class_hash_storage_handle = tempfile::tempdir().unwrap(); + let persistent_root_handle = tempfile::tempdir().unwrap(); + let persistent_root = persistent_root_handle.path().to_path_buf(); + let config = FsClassStorageConfig { + persistent_root, + class_hash_storage_config: ClassHashStorageConfig { + class_hash_db_config: ClassHashDbConfig { + path_prefix: class_hash_storage_handle.path().to_path_buf(), + enforce_file_exists: false, + max_size: 1 << 30, // 1GB. + min_size: 1 << 10, // 1KB. + growth_step: 1 << 26, // 64MB. + }, + ..Default::default() + }, + }; + Self { config, handles: Some((class_hash_storage_handle, persistent_root_handle)) } + } +} + +impl FsClassStorageBuilderForTesting { + pub fn with_existing_paths( + mut self, + class_hash_storage_path_prefix: PathBuf, + persistent_path: PathBuf, + ) -> Self { + self.config.class_hash_storage_config.class_hash_db_config.path_prefix = + class_hash_storage_path_prefix; + self.config.persistent_root = persistent_path; + self.handles = None; + self + } + + pub fn build(self) -> (FsClassStorage, FsClassStorageConfig, Option) { + let Self { config, handles } = self; + let class_hash_storage = + ClassHashStorage::new(config.class_hash_storage_config.clone()).unwrap(); + let fs_class_storage = + FsClassStorage { persistent_root: config.persistent_root.clone(), class_hash_storage }; + (fs_class_storage, config, handles) + } +} diff --git a/crates/apollo_class_manager_types/Cargo.toml b/crates/apollo_class_manager_types/Cargo.toml new file mode 100644 index 00000000000..e88cc7c5d14 --- /dev/null +++ b/crates/apollo_class_manager_types/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "apollo_class_manager_types" +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true + +[features] +testing = ["mockall"] + +[lints] +workspace = true + +[dependencies] +apollo_compile_to_casm_types.workspace = true +apollo_infra.workspace = true +apollo_proc_macros.workspace = true +async-trait.workspace = true +mockall = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +starknet_api.workspace = true +strum_macros.workspace = true +thiserror.workspace = true + +[dev-dependencies] +mockall.workspace = true +starknet_api = { workspace = true, features = ["testing"] } diff --git a/crates/apollo_class_manager_types/src/lib.rs b/crates/apollo_class_manager_types/src/lib.rs new file mode 100644 index 00000000000..04ca2cc50e1 --- /dev/null +++ b/crates/apollo_class_manager_types/src/lib.rs @@ -0,0 +1,264 @@ +pub mod transaction_converter; + +use std::error::Error; +use std::sync::Arc; + +use apollo_compile_to_casm_types::SierraCompilerError; +use apollo_infra::component_client::{ClientError, LocalComponentClient, RemoteComponentClient}; +use apollo_infra::component_definitions::{ComponentClient, ComponentRequestAndResponseSender}; +use apollo_infra::impl_debug_for_infra_requests_and_responses; +use apollo_proc_macros::handle_all_response_variants; +use async_trait::async_trait; +#[cfg(feature = "testing")] +use mockall::automock; +use serde::{Deserialize, Serialize}; +use starknet_api::contract_class::ContractClass; +use starknet_api::core::{ClassHash, CompiledClassHash}; +use starknet_api::deprecated_contract_class::ContractClass as DeprecatedClass; +use starknet_api::state::SierraContractClass; +use strum_macros::AsRefStr; +use thiserror::Error; + +pub type ClassManagerResult = Result; +pub type ClassManagerClientResult = Result; + +pub type LocalClassManagerClient = LocalComponentClient; +pub type RemoteClassManagerClient = + RemoteComponentClient; + +pub type SharedClassManagerClient = Arc; +pub type ClassManagerRequestAndResponseSender = + ComponentRequestAndResponseSender; + +// TODO(Elin): export. +pub type ClassId = ClassHash; +pub type Class = SierraContractClass; +pub type ExecutableClass = ContractClass; +pub type ExecutableClassHash = CompiledClassHash; + +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct ClassHashes { + pub class_hash: ClassHash, + pub executable_class_hash: ExecutableClassHash, +} + +/// Serves as the class manager's shared interface. +/// Requires `Send + Sync` to allow transferring and sharing resources (inputs, futures) across +/// threads. +#[cfg_attr(feature = "testing", automock)] +#[async_trait] +pub trait ClassManagerClient: Send + Sync { + async fn add_class(&self, class: Class) -> ClassManagerClientResult; + + // TODO(Elin): separate V0 and V1 APIs; remove Sierra version. + async fn get_executable( + &self, + class_id: ClassId, + ) -> ClassManagerClientResult>; + + async fn get_sierra(&self, class_id: ClassId) -> ClassManagerClientResult>; + + async fn add_deprecated_class( + &self, + class_id: ClassId, + class: DeprecatedClass, + ) -> ClassManagerClientResult<()>; + + // This method should only be used through state sync. + // It acts as a writer to the class storage, and bypasses compilation - thus unsafe. + async fn add_class_and_executable_unsafe( + &self, + class_id: ClassId, + class: Class, + executable_class_id: ExecutableClassHash, + executable_class: ExecutableClass, + ) -> ClassManagerClientResult<()>; +} + +#[derive(Clone, Debug, Error, Eq, PartialEq, Serialize, Deserialize)] +pub enum CachedClassStorageError { + // TODO(Elin): remove from, it's too permissive. + #[error(transparent)] + Storage(#[from] E), +} + +#[derive(Clone, Debug, Error, Eq, PartialEq, Serialize, Deserialize)] +pub enum ClassManagerError { + #[error("Internal client error: {0}")] + Client(String), + #[error("Failed to deserialize Sierra class: {0}")] + ClassSerde(String), + #[error("Class storage error: {0}")] + ClassStorage(String), + #[error("Sierra compiler error for class hash {class_hash}: {error}")] + SierraCompiler { + class_hash: ClassHash, + #[source] + error: SierraCompilerError, + }, + #[error( + "Cannot declare contract class with size of {contract_class_object_size}; max allowed \ + size: {max_contract_class_object_size}." + )] + ContractClassObjectSizeTooLarge { + contract_class_object_size: usize, + max_contract_class_object_size: usize, + }, +} + +impl From> for ClassManagerError { + fn from(error: CachedClassStorageError) -> Self { + ClassManagerError::ClassStorage(error.to_string()) + } +} + +impl From for ClassManagerError { + fn from(error: serde_json::Error) -> Self { + ClassManagerError::ClassSerde(error.to_string()) + } +} + +#[derive(Clone, Debug, Error)] +pub enum ClassManagerClientError { + #[error(transparent)] + ClientError(#[from] ClientError), + #[error(transparent)] + ClassManagerError(#[from] ClassManagerError), +} + +#[derive(Clone, Serialize, Deserialize, AsRefStr)] +pub enum ClassManagerRequest { + AddClass(Class), + AddClassAndExecutableUnsafe(ClassId, Class, ExecutableClassHash, ExecutableClass), + AddDeprecatedClass(ClassId, DeprecatedClass), + GetExecutable(ClassId), + GetSierra(ClassId), +} +impl_debug_for_infra_requests_and_responses!(ClassManagerRequest); + +#[derive(Clone, Serialize, Deserialize, AsRefStr)] +pub enum ClassManagerResponse { + AddClass(ClassManagerResult), + AddClassAndExecutableUnsafe(ClassManagerResult<()>), + AddDeprecatedClass(ClassManagerResult<()>), + GetExecutable(ClassManagerResult>), + GetSierra(ClassManagerResult>), +} +impl_debug_for_infra_requests_and_responses!(ClassManagerResponse); + +#[async_trait] +impl ClassManagerClient for ComponentClientType +where + ComponentClientType: Send + Sync + ComponentClient, +{ + async fn add_class(&self, class: Class) -> ClassManagerClientResult { + let request = ClassManagerRequest::AddClass(class); + handle_all_response_variants!( + ClassManagerResponse, + AddClass, + ClassManagerClientError, + ClassManagerError, + Direct + ) + } + + async fn add_deprecated_class( + &self, + class_id: ClassId, + class: DeprecatedClass, + ) -> ClassManagerClientResult<()> { + let request = ClassManagerRequest::AddDeprecatedClass(class_id, class); + handle_all_response_variants!( + ClassManagerResponse, + AddDeprecatedClass, + ClassManagerClientError, + ClassManagerError, + Direct + ) + } + + async fn get_executable( + &self, + class_id: ClassId, + ) -> ClassManagerClientResult> { + let request = ClassManagerRequest::GetExecutable(class_id); + handle_all_response_variants!( + ClassManagerResponse, + GetExecutable, + ClassManagerClientError, + ClassManagerError, + Direct + ) + } + + async fn get_sierra(&self, class_id: ClassId) -> ClassManagerClientResult> { + let request = ClassManagerRequest::GetSierra(class_id); + handle_all_response_variants!( + ClassManagerResponse, + GetSierra, + ClassManagerClientError, + ClassManagerError, + Direct + ) + } + + async fn add_class_and_executable_unsafe( + &self, + class_id: ClassId, + class: Class, + executable_class_id: ExecutableClassHash, + executable_class: ExecutableClass, + ) -> ClassManagerClientResult<()> { + let request = ClassManagerRequest::AddClassAndExecutableUnsafe( + class_id, + class, + executable_class_id, + executable_class, + ); + handle_all_response_variants!( + ClassManagerResponse, + AddClassAndExecutableUnsafe, + ClassManagerClientError, + ClassManagerError, + Direct + ) + } +} + +pub struct EmptyClassManagerClient; + +#[async_trait] +impl ClassManagerClient for EmptyClassManagerClient { + async fn add_class(&self, _class: Class) -> ClassManagerClientResult { + Ok(Default::default()) + } + + async fn add_deprecated_class( + &self, + _class_id: ClassId, + _class: DeprecatedClass, + ) -> ClassManagerClientResult<()> { + Ok(()) + } + + async fn get_executable( + &self, + _class_id: ClassId, + ) -> ClassManagerClientResult> { + Ok(Some(ExecutableClass::V0(Default::default()))) + } + + async fn get_sierra(&self, _class_id: ClassId) -> ClassManagerClientResult> { + Ok(Some(Default::default())) + } + + async fn add_class_and_executable_unsafe( + &self, + _class_id: ClassId, + _class: Class, + _executable_class_id: ExecutableClassHash, + _executable_class: ExecutableClass, + ) -> ClassManagerClientResult<()> { + Ok(()) + } +} diff --git a/crates/apollo_class_manager_types/src/transaction_converter.rs b/crates/apollo_class_manager_types/src/transaction_converter.rs new file mode 100644 index 00000000000..2737d4cfbec --- /dev/null +++ b/crates/apollo_class_manager_types/src/transaction_converter.rs @@ -0,0 +1,280 @@ +use std::str::FromStr; + +use async_trait::async_trait; +#[cfg(any(feature = "testing", test))] +use mockall::automock; +use starknet_api::consensus_transaction::{ConsensusTransaction, InternalConsensusTransaction}; +use starknet_api::contract_class::{ClassInfo, ContractClass, SierraVersion}; +use starknet_api::core::{ChainId, ClassHash}; +use starknet_api::executable_transaction::{ + AccountTransaction, + Transaction as ExecutableTransaction, + ValidateCompiledClassHashError, +}; +use starknet_api::rpc_transaction::{ + InternalRpcDeclareTransactionV3, + InternalRpcDeployAccountTransaction, + InternalRpcTransaction, + InternalRpcTransactionWithoutTxHash, + RpcDeclareTransaction, + RpcDeclareTransactionV3, + RpcDeployAccountTransaction, + RpcTransaction, +}; +use starknet_api::state::SierraContractClass; +use starknet_api::transaction::fields::Fee; +use starknet_api::transaction::CalculateContractAddress; +use starknet_api::{executable_transaction, transaction, StarknetApiError}; +use thiserror::Error; + +use crate::{ClassHashes, ClassManagerClientError, SharedClassManagerClient}; + +#[derive(Error, Debug, Clone)] +pub enum TransactionConverterError { + #[error(transparent)] + ClassManagerClientError(#[from] ClassManagerClientError), + #[error("Class of hash: {class_hash} not found")] + ClassNotFound { class_hash: ClassHash }, + #[error(transparent)] + StarknetApiError(#[from] StarknetApiError), + #[error(transparent)] + ValidateCompiledClassHashError(#[from] ValidateCompiledClassHashError), +} + +pub type TransactionConverterResult = Result; + +#[cfg_attr(any(test, feature = "testing"), automock)] +#[async_trait] +pub trait TransactionConverterTrait: Send + Sync { + async fn convert_internal_consensus_tx_to_consensus_tx( + &self, + tx: InternalConsensusTransaction, + ) -> TransactionConverterResult; + + async fn convert_consensus_tx_to_internal_consensus_tx( + &self, + tx: ConsensusTransaction, + ) -> TransactionConverterResult; + + async fn convert_internal_rpc_tx_to_rpc_tx( + &self, + tx: InternalRpcTransaction, + ) -> TransactionConverterResult; + + async fn convert_rpc_tx_to_internal_rpc_tx( + &self, + tx: RpcTransaction, + ) -> TransactionConverterResult; + + async fn convert_internal_rpc_tx_to_executable_tx( + &self, + tx: InternalRpcTransaction, + ) -> TransactionConverterResult; + + async fn convert_internal_consensus_tx_to_executable_tx( + &self, + tx: InternalConsensusTransaction, + ) -> TransactionConverterResult; +} + +#[derive(Clone)] +pub struct TransactionConverter { + class_manager_client: SharedClassManagerClient, + chain_id: ChainId, +} + +impl TransactionConverter { + pub fn new(class_manager_client: SharedClassManagerClient, chain_id: ChainId) -> Self { + Self { class_manager_client, chain_id } + } + + async fn get_sierra( + &self, + class_hash: ClassHash, + ) -> TransactionConverterResult { + self.class_manager_client + .get_sierra(class_hash) + .await? + .ok_or(TransactionConverterError::ClassNotFound { class_hash }) + } + + async fn get_executable( + &self, + class_hash: ClassHash, + ) -> TransactionConverterResult { + self.class_manager_client + .get_executable(class_hash) + .await? + .ok_or(TransactionConverterError::ClassNotFound { class_hash }) + } +} + +#[async_trait] +impl TransactionConverterTrait for TransactionConverter { + async fn convert_internal_consensus_tx_to_consensus_tx( + &self, + tx: InternalConsensusTransaction, + ) -> TransactionConverterResult { + match tx { + InternalConsensusTransaction::RpcTransaction(tx) => self + .convert_internal_rpc_tx_to_rpc_tx(tx) + .await + .map(ConsensusTransaction::RpcTransaction), + InternalConsensusTransaction::L1Handler(tx) => { + Ok(ConsensusTransaction::L1Handler(tx.tx)) + } + } + } + + async fn convert_consensus_tx_to_internal_consensus_tx( + &self, + tx: ConsensusTransaction, + ) -> TransactionConverterResult { + match tx { + ConsensusTransaction::RpcTransaction(tx) => self + .convert_rpc_tx_to_internal_rpc_tx(tx) + .await + .map(InternalConsensusTransaction::RpcTransaction), + ConsensusTransaction::L1Handler(tx) => self + .convert_consensus_l1_handler_to_internal_l1_handler(tx) + .map(InternalConsensusTransaction::L1Handler), + } + } + + async fn convert_internal_rpc_tx_to_rpc_tx( + &self, + tx: InternalRpcTransaction, + ) -> TransactionConverterResult { + match tx.tx { + InternalRpcTransactionWithoutTxHash::Invoke(tx) => Ok(RpcTransaction::Invoke(tx)), + InternalRpcTransactionWithoutTxHash::Declare(tx) => { + Ok(RpcTransaction::Declare(RpcDeclareTransaction::V3(RpcDeclareTransactionV3 { + sender_address: tx.sender_address, + compiled_class_hash: tx.compiled_class_hash, + signature: tx.signature, + nonce: tx.nonce, + contract_class: self.get_sierra(tx.class_hash).await?, + resource_bounds: tx.resource_bounds, + tip: tx.tip, + paymaster_data: tx.paymaster_data, + account_deployment_data: tx.account_deployment_data, + nonce_data_availability_mode: tx.nonce_data_availability_mode, + fee_data_availability_mode: tx.fee_data_availability_mode, + }))) + } + InternalRpcTransactionWithoutTxHash::DeployAccount( + InternalRpcDeployAccountTransaction { tx, .. }, + ) => Ok(RpcTransaction::DeployAccount(tx)), + } + } + + async fn convert_rpc_tx_to_internal_rpc_tx( + &self, + tx: RpcTransaction, + ) -> TransactionConverterResult { + let tx_without_hash = match tx { + RpcTransaction::Invoke(tx) => InternalRpcTransactionWithoutTxHash::Invoke(tx), + RpcTransaction::Declare(RpcDeclareTransaction::V3(tx)) => { + let ClassHashes { class_hash, executable_class_hash } = + self.class_manager_client.add_class(tx.contract_class).await?; + if tx.compiled_class_hash != executable_class_hash { + return Err(TransactionConverterError::ValidateCompiledClassHashError( + ValidateCompiledClassHashError::CompiledClassHashMismatch { + computed_class_hash: executable_class_hash, + supplied_class_hash: tx.compiled_class_hash, + }, + )); + } + InternalRpcTransactionWithoutTxHash::Declare(InternalRpcDeclareTransactionV3 { + sender_address: tx.sender_address, + compiled_class_hash: tx.compiled_class_hash, + signature: tx.signature, + nonce: tx.nonce, + class_hash, + resource_bounds: tx.resource_bounds, + tip: tx.tip, + paymaster_data: tx.paymaster_data, + account_deployment_data: tx.account_deployment_data, + nonce_data_availability_mode: tx.nonce_data_availability_mode, + fee_data_availability_mode: tx.fee_data_availability_mode, + }) + } + RpcTransaction::DeployAccount(RpcDeployAccountTransaction::V3(tx)) => { + let contract_address = tx.calculate_contract_address()?; + InternalRpcTransactionWithoutTxHash::DeployAccount( + InternalRpcDeployAccountTransaction { + tx: RpcDeployAccountTransaction::V3(tx), + contract_address, + }, + ) + } + }; + let tx_hash = tx_without_hash.calculate_transaction_hash(&self.chain_id)?; + + Ok(InternalRpcTransaction { tx: tx_without_hash, tx_hash }) + } + + async fn convert_internal_rpc_tx_to_executable_tx( + &self, + InternalRpcTransaction { tx, tx_hash }: InternalRpcTransaction, + ) -> TransactionConverterResult { + match tx { + InternalRpcTransactionWithoutTxHash::Invoke(tx) => { + Ok(AccountTransaction::Invoke(executable_transaction::InvokeTransaction { + tx: tx.into(), + tx_hash, + })) + } + InternalRpcTransactionWithoutTxHash::Declare(tx) => { + let sierra = self.get_sierra(tx.class_hash).await?; + let class_info = ClassInfo { + contract_class: self.get_executable(tx.class_hash).await?, + sierra_program_length: sierra.sierra_program.len(), + abi_length: sierra.abi.len(), + sierra_version: SierraVersion::from_str(&sierra.contract_class_version)?, + }; + + Ok(AccountTransaction::Declare(executable_transaction::DeclareTransaction { + tx: tx.into(), + tx_hash, + class_info, + })) + } + InternalRpcTransactionWithoutTxHash::DeployAccount( + InternalRpcDeployAccountTransaction { tx, contract_address }, + ) => Ok(AccountTransaction::DeployAccount( + executable_transaction::DeployAccountTransaction { + tx: tx.into(), + contract_address, + tx_hash, + }, + )), + } + } + + async fn convert_internal_consensus_tx_to_executable_tx( + &self, + tx: InternalConsensusTransaction, + ) -> TransactionConverterResult { + match tx { + InternalConsensusTransaction::RpcTransaction(tx) => Ok(ExecutableTransaction::Account( + self.convert_internal_rpc_tx_to_executable_tx(tx).await?, + )), + InternalConsensusTransaction::L1Handler(tx) => Ok(ExecutableTransaction::L1Handler(tx)), + } + } +} + +impl TransactionConverter { + fn convert_consensus_l1_handler_to_internal_l1_handler( + &self, + tx: transaction::L1HandlerTransaction, + ) -> TransactionConverterResult { + Ok(executable_transaction::L1HandlerTransaction::create( + tx, + &self.chain_id, + // TODO(Gilad): Change this once we put real value in paid_fee_on_l1. + Fee(1), + )?) + } +} diff --git a/crates/apollo_compilation_utils/Cargo.toml b/crates/apollo_compilation_utils/Cargo.toml new file mode 100644 index 00000000000..d4f03db24d0 --- /dev/null +++ b/crates/apollo_compilation_utils/Cargo.toml @@ -0,0 +1,36 @@ +[package] +edition.workspace = true +license.workspace = true +name = "apollo_compilation_utils" +repository.workspace = true +version.workspace = true +description = "A utility crate for Sierra compilation related code." + +[features] +cairo_native = ["dep:cairo-native"] +testing = [] + +[lints] +workspace = true + +[dependencies] +cairo-lang-sierra.workspace = true +cairo-lang-starknet-classes.workspace = true +cairo-lang-utils.workspace = true +cairo-native = { workspace = true, optional = true } +rlimit.workspace = true +serde.workspace = true +serde_json.workspace = true +starknet-types-core.workspace = true +starknet_api.workspace = true +tempfile.workspace = true +thiserror.workspace = true + +[dev-dependencies] +apollo_infra_utils.workspace = true +assert_matches.workspace = true +rstest.workspace = true + +[build-dependencies] +apollo_infra_utils.workspace = true +tempfile.workspace = true diff --git a/crates/apollo_compilation_utils/src/build_utils.rs b/crates/apollo_compilation_utils/src/build_utils.rs new file mode 100644 index 00000000000..b420de3b773 --- /dev/null +++ b/crates/apollo_compilation_utils/src/build_utils.rs @@ -0,0 +1,68 @@ +use std::process::Command; + +use tempfile::TempDir; + +use crate::paths::{binary_path, shared_folder_dir}; + +pub fn install_compiler_binary( + binary_name: &str, + required_version: &str, + cargo_install_args: &[&str], + out_dir: &std::path::Path, +) { + let binary_path = binary_path(out_dir, binary_name); + println!("cargo:rerun-if-changed={}", binary_path.to_str().unwrap()); + + match Command::new(&binary_path).args(["--version"]).output() { + Ok(binary_version) => { + let binary_version = String::from_utf8(binary_version.stdout) + .expect("Failed to convert the binary version to a string."); + if binary_version.contains(required_version) { + println!("The {binary_name} binary is up to date."); + return; + } else { + println!( + "The {binary_name} binary is not up to date. Installing the required version." + ); + std::fs::remove_file(&binary_path).expect("Failed to remove the old binary."); + } + } + Err(_) => { + println!("The {binary_name} binary is not installed. Installing the required version."); + } + } + + let temp_cargo_path = TempDir::new().expect("Failed to create a temporary directory."); + let post_install_file_path = temp_cargo_path.path().join("bin").join(binary_name); + + let install_command_status = Command::new("cargo") + .args([ + "install", + "--root", + temp_cargo_path.path().to_str().expect("Failed to convert cargo_path to str"), + "--locked", + ]) + .args(cargo_install_args) + .status() + .unwrap_or_else(|_| panic!("Failed to install {binary_name}")); + + if !install_command_status.success() { + panic!("Failed to install {}", binary_name); + } + + // Move the '{binary_name}' executable to a shared location. + std::fs::create_dir_all(shared_folder_dir(out_dir)) + .expect("Failed to create shared executables folder"); + let move_command_status = Command::new("mv") + .args([post_install_file_path.as_os_str(), binary_path.as_os_str()]) + .status() + .expect("Failed to perform mv command."); + + if !move_command_status.success() { + panic!("Failed to move the {} binary to the shared folder.", binary_name); + } + + std::fs::remove_dir_all(temp_cargo_path).expect("Failed to remove the cargo directory."); + + println!("Successfully set executable file: {:?}", binary_path.display()); +} diff --git a/crates/apollo_compilation_utils/src/class_utils.rs b/crates/apollo_compilation_utils/src/class_utils.rs new file mode 100644 index 00000000000..ceacb18f2f1 --- /dev/null +++ b/crates/apollo_compilation_utils/src/class_utils.rs @@ -0,0 +1,67 @@ +use std::clone::Clone; + +use cairo_lang_starknet_classes::contract_class::{ + ContractClass as CairoLangContractClass, + ContractEntryPoint as CairoLangContractEntryPoint, + ContractEntryPoints as CairoLangContractEntryPoints, +}; +use cairo_lang_utils::bigint::BigUintAsHex; +use starknet_api::rpc_transaction::EntryPointByType as StarknetApiEntryPointByType; +use starknet_api::state::{ + EntryPoint as StarknetApiEntryPoint, + SierraContractClass as StarknetApiContractClass, +}; +use starknet_types_core::felt::Felt; + +/// Returns a [`CairoLangContractClass`] struct ready for Sierra compilation. Note the `abi` field +/// is None as it is not relevant for the compilation. +pub fn into_contract_class_for_compilation( + rpc_contract_class: &StarknetApiContractClass, +) -> CairoLangContractClass { + let sierra_program = + sierra_program_as_felts_to_big_uint_as_hex(&rpc_contract_class.sierra_program); + let entry_points_by_type = + into_cairo_lang_contract_entry_points(&rpc_contract_class.entry_points_by_type); + + CairoLangContractClass { + sierra_program, + sierra_program_debug_info: None, + contract_class_version: rpc_contract_class.contract_class_version.clone(), + entry_points_by_type, + abi: None, + } +} + +fn into_cairo_lang_contract_entry_points( + entry_points_by_type: &StarknetApiEntryPointByType, +) -> CairoLangContractEntryPoints { + let StarknetApiEntryPointByType { constructor, external, l1handler } = entry_points_by_type; + CairoLangContractEntryPoints { + external: into_cairo_lang_contract_entry_points_vec(external), + l1_handler: into_cairo_lang_contract_entry_points_vec(l1handler), + constructor: into_cairo_lang_contract_entry_points_vec(constructor), + } +} + +fn into_cairo_lang_contract_entry_points_vec( + entry_points: &[StarknetApiEntryPoint], +) -> Vec { + entry_points.iter().map(into_cairo_lang_contract_entry_point).collect() +} + +fn into_cairo_lang_contract_entry_point( + entry_point: &StarknetApiEntryPoint, +) -> CairoLangContractEntryPoint { + CairoLangContractEntryPoint { + selector: entry_point.selector.0.to_biguint(), + function_idx: entry_point.function_idx.0, + } +} + +pub fn sierra_program_as_felts_to_big_uint_as_hex(sierra_program: &[Felt]) -> Vec { + sierra_program.iter().map(felt_to_big_uint_as_hex).collect() +} + +fn felt_to_big_uint_as_hex(felt: &Felt) -> BigUintAsHex { + BigUintAsHex { value: felt.to_biguint() } +} diff --git a/crates/apollo_compilation_utils/src/compiler_utils.rs b/crates/apollo_compilation_utils/src/compiler_utils.rs new file mode 100644 index 00000000000..44b08db8ae7 --- /dev/null +++ b/crates/apollo_compilation_utils/src/compiler_utils.rs @@ -0,0 +1,61 @@ +use std::io::Write; +// TODO(Avi, 01/06/2025): Adapt this import to make the crate compile on windows. +use std::os::unix::process::ExitStatusExt; +use std::path::Path; +use std::process::Command; + +use cairo_lang_starknet_classes::contract_class::ContractClass; +use tempfile::NamedTempFile; + +use crate::errors::CompilationUtilError; +use crate::resource_limits::ResourceLimits; + +pub fn compile_with_args( + compiler_binary_path: &Path, + contract_class: ContractClass, + additional_args: &[&str], + resource_limits: ResourceLimits, +) -> Result, CompilationUtilError> { + // Create a temporary file to store the Sierra contract class. + let serialized_contract_class = serde_json::to_string(&contract_class)?; + + let mut temp_file = NamedTempFile::new()?; + temp_file.write_all(serialized_contract_class.as_bytes())?; + let temp_file_path = temp_file.path().to_str().ok_or(CompilationUtilError::UnexpectedError( + "Failed to get temporary file path".to_owned(), + ))?; + + // Set the parameters for the compile process. + let mut command = Command::new(compiler_binary_path.as_os_str()); + command.arg(temp_file_path).args(additional_args); + + // Apply the resource limits to the command. + resource_limits.apply(&mut command); + + // Run the compile process. + let compile_output = command.output()?; + + if !compile_output.status.success() { + let signal_info = match compile_output.status.signal() { + Some(9) => { + "SIGKILL (9): Process was forcefully killed (for example, because it exceeded CPU \ + limit)." + } + Some(25) => "SIGXFSZ (25): File size limit exceeded.", + None => { + "Process exited with non-zero status but no signal (likely a handled error, e.g., \ + memory allocation failure)." + } + Some(sig) => &format!("Process terminated by unexpected signal: {}", sig), + }; + + let stderr_output = String::from_utf8(compile_output.stderr) + .unwrap_or_else(|_| "Failed to decode stderr output".to_string()); + + return Err(CompilationUtilError::CompilationError(format!( + "Exit status: {}\nStderr: {}\nSignal info: {}", + compile_output.status, stderr_output, signal_info + ))); + } + Ok(compile_output.stdout) +} diff --git a/crates/starknet_sierra_multicompile/src/errors.rs b/crates/apollo_compilation_utils/src/errors.rs similarity index 100% rename from crates/starknet_sierra_multicompile/src/errors.rs rename to crates/apollo_compilation_utils/src/errors.rs diff --git a/crates/apollo_compilation_utils/src/lib.rs b/crates/apollo_compilation_utils/src/lib.rs new file mode 100644 index 00000000000..8763babc34d --- /dev/null +++ b/crates/apollo_compilation_utils/src/lib.rs @@ -0,0 +1,11 @@ +//! A utiltity lib for Sierra compilation. + +pub mod build_utils; +pub mod class_utils; +pub mod compiler_utils; +pub mod errors; +pub mod paths; +pub mod resource_limits; + +#[cfg(feature = "testing")] +pub mod test_utils; diff --git a/crates/apollo_compilation_utils/src/paths.rs b/crates/apollo_compilation_utils/src/paths.rs new file mode 100644 index 00000000000..2d1230a77f8 --- /dev/null +++ b/crates/apollo_compilation_utils/src/paths.rs @@ -0,0 +1,19 @@ +// Note: This module includes path resolution functions that are needed during build and run times. +// It must not contain functionality that is available in only in one of these modes. Specifically, +// it must avoid relying on env variables such as 'CARGO_*' or 'OUT_DIR'. + +fn target_dir(out_dir: &std::path::Path) -> std::path::PathBuf { + out_dir + .ancestors() + .nth(3) + .expect("Failed to navigate up three levels from OUT_DIR") + .to_path_buf() +} + +pub fn shared_folder_dir(out_dir: &std::path::Path) -> std::path::PathBuf { + target_dir(out_dir).join("shared_executables") +} + +pub fn binary_path(out_dir: &std::path::Path, binary_name: &str) -> std::path::PathBuf { + shared_folder_dir(out_dir).join(binary_name) +} diff --git a/crates/starknet_sierra_multicompile/src/resource_limits.rs b/crates/apollo_compilation_utils/src/resource_limits.rs similarity index 100% rename from crates/starknet_sierra_multicompile/src/resource_limits.rs rename to crates/apollo_compilation_utils/src/resource_limits.rs diff --git a/crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_test.rs b/crates/apollo_compilation_utils/src/resource_limits/resource_limits_test.rs similarity index 78% rename from crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_test.rs rename to crates/apollo_compilation_utils/src/resource_limits/resource_limits_test.rs index 67530b16b3a..39636212b07 100644 --- a/crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_test.rs +++ b/crates/apollo_compilation_utils/src/resource_limits/resource_limits_test.rs @@ -1,3 +1,4 @@ +use std::os::unix::process::ExitStatusExt; use std::process::Command; use std::time::Instant; @@ -15,8 +16,10 @@ fn test_cpu_time_limit() { let mut command = Command::new("bash"); command.args(["-c", "while true; do :; done;"]); cpu_time_rlimit.apply(&mut command); - command.spawn().expect("Failed to start CPU consuming process").wait().unwrap(); + let status = command.spawn().expect("Failed to start CPU consuming process").wait().unwrap(); assert!(start.elapsed().as_secs() <= cpu_limit); + let signal = status.signal(); + assert_eq!(signal, Some(9), "Process should terminate with SIGKILL (9) got {:?}", signal); } #[rstest] @@ -29,7 +32,12 @@ fn test_memory_size_limit() { command.stderr(std::process::Stdio::piped()); memory_size_rlimit.apply(&mut command); let output = command.output().expect("Failed to start memory consuming process"); + + let signal = output.status.signal(); + assert!(signal.is_none(), "Exceeding memory usage should not cause a signal, got {:?}", signal); + let stderr = String::from_utf8_lossy(&output.stderr); + for line in stderr.lines() { if line.starts_with("bash: xmalloc: cannot allocate") { println!( @@ -55,8 +63,10 @@ fn test_file_size_limit() { let mut command = Command::new("bash"); command.args(["-c", format!("while true; do echo 0 >> {temp_file_path}; done;").as_str()]); file_size_rlimit.apply(&mut command); - command.spawn().expect("Failed to start disk consuming process").wait().unwrap(); + let status = command.spawn().expect("Failed to start disk consuming process").wait().unwrap(); assert_eq!(std::fs::metadata(temp_file_path).unwrap().len(), file_limit); + let signal = status.signal(); + assert!(signal == Some(25), "Process should terminate with SIGXFSZ (25), got {:?}", signal); } #[rstest] @@ -75,7 +85,10 @@ fn test_successful_resource_limited_command() { command.args(["-c", format!("echo '{print_message}' > {temp_file_path}").as_str()]); resource_limits.apply(&mut command); let exit_status = command.spawn().expect("Failed to start process").wait().unwrap(); - - assert!(exit_status.success()); + assert!( + exit_status.success(), + "Process did not complete successfully: signal={:?}", + exit_status.signal() + ); assert_eq!(std::fs::read_to_string(temp_file_path).unwrap(), format!("{print_message}\n")); } diff --git a/crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_unix.rs b/crates/apollo_compilation_utils/src/resource_limits/resource_limits_unix.rs similarity index 94% rename from crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_unix.rs rename to crates/apollo_compilation_utils/src/resource_limits/resource_limits_unix.rs index 5499dc0e17a..f549adde873 100644 --- a/crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_unix.rs +++ b/crates/apollo_compilation_utils/src/resource_limits/resource_limits_unix.rs @@ -24,9 +24,10 @@ struct RLimit { impl RLimit { /// Set the resource limit for the current process. fn set(&self) -> io::Result<()> { - // Use `println!` and not a logger because this method is called in an unsafe block, and we - // don't want to risk unexpected behavior. - println!( + // Use `eprintln!` and not a logger because this method is called in an unsafe block, and we + // don't want to risk unexpected behavior. Use 'eprintln!' and not 'println!' because it + // corrupts stdout which is deserialized later. + eprintln!( "Setting {:?} limits: {} {} soft limit; {} {} hard limit.", self.resource, self.soft_limit, self.units, self.hard_limit, self.units ); diff --git a/crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_windows.rs b/crates/apollo_compilation_utils/src/resource_limits/resource_limits_windows.rs similarity index 100% rename from crates/starknet_sierra_multicompile/src/resource_limits/resource_limits_windows.rs rename to crates/apollo_compilation_utils/src/resource_limits/resource_limits_windows.rs diff --git a/crates/apollo_compilation_utils/src/test_utils.rs b/crates/apollo_compilation_utils/src/test_utils.rs new file mode 100644 index 00000000000..9ead9a4f9bf --- /dev/null +++ b/crates/apollo_compilation_utils/src/test_utils.rs @@ -0,0 +1,33 @@ +use std::fs; +use std::path::Path; + +use cairo_lang_starknet_classes::contract_class::{ContractClass, ContractEntryPoints}; +use cairo_lang_utils::bigint::BigUintAsHex; +use serde::Deserialize; + +/// Same as `ContractClass` - but ignores unnecessary fields like `abi` in deserialization. +#[derive(Deserialize)] +struct DeserializedContractClass { + pub sierra_program: Vec, + pub sierra_program_debug_info: Option, + pub contract_class_version: String, + pub entry_points_by_type: ContractEntryPoints, +} + +pub fn contract_class_from_file>(path: P) -> ContractClass { + let DeserializedContractClass { + sierra_program, + sierra_program_debug_info, + contract_class_version, + entry_points_by_type, + } = serde_json::from_str(&fs::read_to_string(path).expect("Failed to read input file.")) + .expect("deserialization Failed."); + + ContractClass { + sierra_program, + sierra_program_debug_info, + contract_class_version, + entry_points_by_type, + abi: None, + } +} diff --git a/crates/apollo_compile_to_casm/Cargo.toml b/crates/apollo_compile_to_casm/Cargo.toml new file mode 100644 index 00000000000..605b14b5692 --- /dev/null +++ b/crates/apollo_compile_to_casm/Cargo.toml @@ -0,0 +1,40 @@ +[package] +edition.workspace = true +license.workspace = true +name = "apollo_compile_to_casm" +repository.workspace = true +version.workspace = true +description = "A utility crate for compiling Sierra code into CASM." + +[features] +testing = [] + +[lints] +workspace = true + +[dependencies] +apollo_compilation_utils.workspace = true +apollo_compile_to_casm_types.workspace = true +apollo_config.workspace = true +apollo_infra.workspace = true +apollo_metrics.workspace = true +apollo_proc_macros.workspace = true +async-trait.workspace = true +cairo-lang-starknet-classes.workspace = true +pretty_assertions.workspace = true +serde.workspace = true +serde_json.workspace = true +starknet_api.workspace = true +thiserror.workspace = true +tracing.workspace = true +validator.workspace = true + +[dev-dependencies] +apollo_compilation_utils = { workspace = true, features = ["testing"] } +apollo_infra_utils.workspace = true +assert_matches.workspace = true +mempool_test_utils.workspace = true + +[build-dependencies] +apollo_compilation_utils.workspace = true +apollo_infra_utils.workspace = true diff --git a/crates/apollo_compile_to_casm/src/allowed_libfuncs.json b/crates/apollo_compile_to_casm/src/allowed_libfuncs.json new file mode 100644 index 00000000000..d42a987af0c --- /dev/null +++ b/crates/apollo_compile_to_casm/src/allowed_libfuncs.json @@ -0,0 +1,249 @@ +{ + "allowed_libfuncs": [ + "add_circuit_input", + "alloc_local", + "array_append", + "array_get", + "array_len", + "array_new", + "array_pop_front", + "array_pop_front_consume", + "array_slice", + "array_snapshot_multi_pop_back", + "array_snapshot_multi_pop_front", + "array_snapshot_pop_back", + "array_snapshot_pop_front", + "bitwise", + "bool_and_impl", + "bool_not_impl", + "bool_or_impl", + "bool_to_felt252", + "bool_xor_impl", + "bounded_int_add", + "bounded_int_constrain", + "bounded_int_div_rem", + "bounded_int_is_zero", + "bounded_int_mul", + "bounded_int_sub", + "bounded_int_trim_max", + "bounded_int_trim_min", + "bounded_int_wrap_non_zero", + "box_forward_snapshot", + "branch_align", + "bytes31_const", + "bytes31_to_felt252", + "bytes31_try_from_felt252", + "call_contract_syscall", + "circuit_failure_guarantee_verify", + "class_hash_const", + "class_hash_to_felt252", + "class_hash_try_from_felt252", + "const_as_box", + "const_as_immediate", + "contract_address_const", + "contract_address_to_felt252", + "contract_address_try_from_felt252", + "coupon_buy", + "coupon_call", + "coupon_refund", + "deploy_syscall", + "disable_ap_tracking", + "downcast", + "drop", + "dup", + "ec_neg", + "ec_point_from_x_nz", + "ec_point_is_zero", + "ec_point_try_new_nz", + "ec_point_unwrap", + "ec_point_zero", + "ec_state_add", + "ec_state_add_mul", + "ec_state_init", + "ec_state_try_finalize_nz", + "emit_event_syscall", + "enable_ap_tracking", + "enum_from_bounded_int", + "enum_init", + "enum_match", + "enum_snapshot_match", + "eval_circuit", + "felt252_add", + "felt252_const", + "felt252_dict_entry_finalize", + "felt252_dict_entry_get", + "felt252_dict_new", + "felt252_dict_squash", + "felt252_div", + "felt252_is_zero", + "felt252_mul", + "felt252_sub", + "finalize_locals", + "function_call", + "get_block_hash_syscall", + "get_builtin_costs", + "get_circuit_descriptor", + "get_circuit_output", + "get_class_hash_at_syscall", + "get_execution_info_syscall", + "get_execution_info_v2_syscall", + "hades_permutation", + "i128_const", + "i128_diff", + "i128_eq", + "i128_overflowing_add_impl", + "i128_overflowing_sub_impl", + "i128_to_felt252", + "i128_try_from_felt252", + "i16_const", + "i16_diff", + "i16_eq", + "i16_overflowing_add_impl", + "i16_overflowing_sub_impl", + "i16_to_felt252", + "i16_try_from_felt252", + "i16_wide_mul", + "i32_const", + "i32_diff", + "i32_eq", + "i32_overflowing_add_impl", + "i32_overflowing_sub_impl", + "i32_to_felt252", + "i32_try_from_felt252", + "i32_wide_mul", + "i64_const", + "i64_diff", + "i64_eq", + "i64_overflowing_add_impl", + "i64_overflowing_sub_impl", + "i64_to_felt252", + "i64_try_from_felt252", + "i64_wide_mul", + "i8_const", + "i8_diff", + "i8_eq", + "i8_overflowing_add_impl", + "i8_overflowing_sub_impl", + "i8_to_felt252", + "i8_try_from_felt252", + "i8_wide_mul", + "init_circuit_data", + "int_range_pop_front", + "int_range_try_new", + "into_box", + "into_u96_guarantee", + "jump", + "keccak_syscall", + "library_call_syscall", + "match_nullable", + "meta_tx_v0_syscall", + "null", + "nullable_forward_snapshot", + "nullable_from_box", + "pedersen", + "redeposit_gas", + "rename", + "replace_class_syscall", + "revoke_ap_tracking", + "secp256k1_add_syscall", + "secp256k1_get_point_from_x_syscall", + "secp256k1_get_xy_syscall", + "secp256k1_mul_syscall", + "secp256k1_new_syscall", + "secp256r1_add_syscall", + "secp256r1_get_point_from_x_syscall", + "secp256r1_get_xy_syscall", + "secp256r1_mul_syscall", + "secp256r1_new_syscall", + "send_message_to_l1_syscall", + "sha256_process_block_syscall", + "sha256_state_handle_digest", + "sha256_state_handle_init", + "snapshot_take", + "span_from_tuple", + "storage_address_from_base", + "storage_address_from_base_and_offset", + "storage_address_to_felt252", + "storage_address_try_from_felt252", + "storage_base_address_const", + "storage_base_address_from_felt252", + "storage_read_syscall", + "storage_write_syscall", + "store_local", + "store_temp", + "struct_construct", + "struct_deconstruct", + "struct_snapshot_deconstruct", + "try_into_circuit_modulus", + "tuple_from_span", + "u128_byte_reverse", + "u128_const", + "u128_eq", + "u128_guarantee_mul", + "u128_is_zero", + "u128_mul_guarantee_verify", + "u128_overflowing_add", + "u128_overflowing_sub", + "u128_safe_divmod", + "u128_sqrt", + "u128_to_felt252", + "u128s_from_felt252", + "u16_bitwise", + "u16_const", + "u16_eq", + "u16_is_zero", + "u16_overflowing_add", + "u16_overflowing_sub", + "u16_safe_divmod", + "u16_sqrt", + "u16_to_felt252", + "u16_try_from_felt252", + "u16_wide_mul", + "u256_guarantee_inv_mod_n", + "u256_is_zero", + "u256_safe_divmod", + "u256_sqrt", + "u32_bitwise", + "u32_const", + "u32_eq", + "u32_is_zero", + "u32_overflowing_add", + "u32_overflowing_sub", + "u32_safe_divmod", + "u32_sqrt", + "u32_to_felt252", + "u32_try_from_felt252", + "u32_wide_mul", + "u512_safe_divmod_by_u256", + "u64_bitwise", + "u64_const", + "u64_eq", + "u64_is_zero", + "u64_overflowing_add", + "u64_overflowing_sub", + "u64_safe_divmod", + "u64_sqrt", + "u64_to_felt252", + "u64_try_from_felt252", + "u64_wide_mul", + "u8_bitwise", + "u8_const", + "u8_eq", + "u8_is_zero", + "u8_overflowing_add", + "u8_overflowing_sub", + "u8_safe_divmod", + "u8_sqrt", + "u8_to_felt252", + "u8_try_from_felt252", + "u8_wide_mul", + "u96_guarantee_verify", + "u96_limbs_less_than_guarantee_verify", + "u96_single_limb_less_than_guarantee_verify", + "unbox", + "unwrap_non_zero", + "upcast", + "withdraw_gas", + "withdraw_gas_all" + ] +} diff --git a/crates/apollo_compile_to_casm/src/communication.rs b/crates/apollo_compile_to_casm/src/communication.rs new file mode 100644 index 00000000000..7ae918899db --- /dev/null +++ b/crates/apollo_compile_to_casm/src/communication.rs @@ -0,0 +1,28 @@ +use apollo_compile_to_casm_types::{ + SierraCompilerError, + SierraCompilerRequest, + SierraCompilerResponse, +}; +use apollo_infra::component_definitions::ComponentRequestHandler; +use apollo_infra::component_server::{ConcurrentLocalComponentServer, RemoteComponentServer}; +use async_trait::async_trait; + +use crate::SierraCompiler; + +pub type LocalSierraCompilerServer = + ConcurrentLocalComponentServer; +pub type RemoteSierraCompilerServer = + RemoteComponentServer; + +#[async_trait] +impl ComponentRequestHandler for SierraCompiler { + async fn handle_request(&mut self, request: SierraCompilerRequest) -> SierraCompilerResponse { + match request { + SierraCompilerRequest::Compile(contract_class) => { + let compilation_result = + self.compile(contract_class).map_err(SierraCompilerError::from); + SierraCompilerResponse::Compile(compilation_result) + } + } + } +} diff --git a/crates/apollo_compile_to_casm/src/compile_test.rs b/crates/apollo_compile_to_casm/src/compile_test.rs new file mode 100644 index 00000000000..e2ce7bdcc1a --- /dev/null +++ b/crates/apollo_compile_to_casm/src/compile_test.rs @@ -0,0 +1,164 @@ +use apollo_compilation_utils::errors::CompilationUtilError; +use apollo_compilation_utils::test_utils::contract_class_from_file; +use apollo_infra_utils::path::resolve_project_relative_path; +use assert_matches::assert_matches; +use cairo_lang_starknet_classes::allowed_libfuncs::{ + lookup_allowed_libfuncs_list, + AllowedLibfuncs, + ListSelector, + BUILTIN_AUDITED_LIBFUNCS_LIST, +}; +use cairo_lang_starknet_classes::contract_class::ContractClass as CairoLangContractClass; +use mempool_test_utils::{FAULTY_ACCOUNT_CLASS_FILE, TEST_FILES_FOLDER}; +use pretty_assertions::assert_eq; +use starknet_api::contract_class::{ContractClass, SierraVersion}; +use starknet_api::state::SierraContractClass; + +use crate::compiler::SierraToCasmCompiler; +use crate::config::{SierraCompilationConfig, DEFAULT_MAX_BYTECODE_SIZE, DEFAULT_MAX_MEMORY_USAGE}; +use crate::{RawClass, SierraCompiler}; + +const SIERRA_COMPILATION_CONFIG: SierraCompilationConfig = SierraCompilationConfig { + max_bytecode_size: DEFAULT_MAX_BYTECODE_SIZE, + max_memory_usage: None, +}; + +fn compiler() -> SierraToCasmCompiler { + SierraToCasmCompiler::new(SIERRA_COMPILATION_CONFIG) +} + +fn get_test_contract() -> CairoLangContractClass { + let sierra_path = + resolve_project_relative_path(TEST_FILES_FOLDER).unwrap().join(FAULTY_ACCOUNT_CLASS_FILE); + contract_class_from_file(sierra_path) +} + +fn get_faulty_test_contract() -> CairoLangContractClass { + let mut contract_class = get_test_contract(); + // Truncate the sierra program to trigger an error. + contract_class.sierra_program = contract_class.sierra_program[..100].to_vec(); + contract_class +} + +#[test] +fn test_compile_sierra_to_casm() { + let compiler = compiler(); + let expected_casm_contract_length = 72305; + + let contract_class = get_test_contract(); + let casm_contract = compiler.compile(contract_class).unwrap(); + let serialized_casm = serde_json::to_string_pretty(&casm_contract).unwrap().into_bytes(); + + assert_eq!(serialized_casm.len(), expected_casm_contract_length); +} + +// TODO(Arni, 1/5/2024): Add a test for panic result test. +#[test] +fn test_negative_flow_compile_sierra_to_casm() { + let compiler = compiler(); + let contract_class = get_faulty_test_contract(); + + let result = compiler.compile(contract_class); + assert_matches!(result, Err(CompilationUtilError::CompilationError(..))); +} + +#[test] +fn test_max_bytecode_size() { + let contract_class = get_test_contract(); + let expected_casm_bytecode_length = 1965; + + // Positive flow. + let compiler = SierraToCasmCompiler::new(SierraCompilationConfig { + max_bytecode_size: expected_casm_bytecode_length, + max_memory_usage: None, + }); + let casm_contract_class = compiler + .compile(contract_class.clone()) + .expect("Failed to compile contract class. Probably an issue with the max_bytecode_size."); + assert_eq!(casm_contract_class.bytecode.len(), expected_casm_bytecode_length); + + // Negative flow. + let compiler = SierraToCasmCompiler::new(SierraCompilationConfig { + max_bytecode_size: expected_casm_bytecode_length - 1, + max_memory_usage: None, + }); + let result = compiler.compile(contract_class); + assert_matches!(result, Err(CompilationUtilError::CompilationError(string)) + if string.contains("Code size limit exceeded.") + ); +} + +// TODO(Elin): mock compiler. +#[test] +fn test_sierra_compiler() { + // Setup. + let compiler = compiler(); + let class = get_test_contract(); + let expected_executable_class = compiler.compile(class.clone()).unwrap(); + + let compiler = SierraCompiler::new(compiler); + let class = SierraContractClass::from(class); + let sierra_version = SierraVersion::extract_from_program(&class.sierra_program).unwrap(); + let expected_executable_class = ContractClass::V1((expected_executable_class, sierra_version)); + + // Test. + let raw_class = RawClass::try_from(class).unwrap(); + let (raw_executable_class, executable_class_hash) = compiler.compile(raw_class).unwrap(); + let executable_class = ContractClass::try_from(raw_executable_class).unwrap(); + + // Assert. + assert_eq!(executable_class, expected_executable_class); + assert_eq!(executable_class_hash, expected_executable_class.compiled_class_hash()); +} + +#[test] +fn allowed_libfuncs_aligned_to_audited() { + let libfuncs_list_selector = ListSelector::ListName(BUILTIN_AUDITED_LIBFUNCS_LIST.to_string()); + let expected = lookup_allowed_libfuncs_list(libfuncs_list_selector).unwrap().allowed_libfuncs; + + let actual = include_str!("allowed_libfuncs.json").to_string(); + let actual = serde_json::from_str::(&actual).unwrap().allowed_libfuncs; + + // Audited libfuncs are usually added as versions progress, but can also be deprecated; + // test both directions. + let missing: Vec<_> = expected.difference(&actual).map(ToString::to_string).collect(); + let extra: Vec<_> = actual.difference(&expected).map(ToString::to_string).collect(); + assert_eq!( + (missing, extra), + (Vec::::new(), Vec::::new()), + "Audited libfuncs mismatch: (missing, extra)" + ); +} + +#[test] +fn test_max_memory_usage() { + let contract_class = get_test_contract(); + + // Compile the contract class without any memory usage limit to get the expected output. + let compiler = SierraToCasmCompiler::new(SierraCompilationConfig { + max_bytecode_size: DEFAULT_MAX_BYTECODE_SIZE, + max_memory_usage: None, + }); + let expected_executable_class = compiler.compile(contract_class.clone()).unwrap(); + + // Positive flow. + let compiler = SierraToCasmCompiler::new(SierraCompilationConfig { + max_bytecode_size: DEFAULT_MAX_BYTECODE_SIZE, + max_memory_usage: Some(DEFAULT_MAX_MEMORY_USAGE), + }); + let executable_class = compiler.compile(contract_class.clone()).unwrap(); + assert_eq!(executable_class, expected_executable_class); + + // Negative flow. + let compiler = SierraToCasmCompiler::new(SierraCompilationConfig { + max_bytecode_size: DEFAULT_MAX_BYTECODE_SIZE, + max_memory_usage: Some(8 * 1024 * 1024), + }); + let compilation_result = compiler.compile(contract_class); + assert_matches!(compilation_result, Err(CompilationUtilError::CompilationError(string)) + if string.contains("memory allocation failure") + ); +} + +// TODO(Noamsp): Add a test to ensure that applying resource limits doesn't corrupt the +// compilation process output. diff --git a/crates/apollo_compile_to_casm/src/compiler.rs b/crates/apollo_compile_to_casm/src/compiler.rs new file mode 100644 index 00000000000..06a8b80610c --- /dev/null +++ b/crates/apollo_compile_to_casm/src/compiler.rs @@ -0,0 +1,55 @@ +use std::path::PathBuf; + +use apollo_compilation_utils::compiler_utils::compile_with_args; +use apollo_compilation_utils::errors::CompilationUtilError; +use apollo_compilation_utils::paths::binary_path; +use apollo_compilation_utils::resource_limits::ResourceLimits; +use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; +use cairo_lang_starknet_classes::contract_class::ContractClass; +use tracing::info; + +use crate::config::SierraCompilationConfig; +use crate::constants::CAIRO_LANG_BINARY_NAME; + +#[derive(Clone)] +pub struct SierraToCasmCompiler { + pub config: SierraCompilationConfig, + path_to_binary: PathBuf, +} + +impl SierraToCasmCompiler { + pub fn new(config: SierraCompilationConfig) -> Self { + let path_to_binary = binary_path(&out_dir(), CAIRO_LANG_BINARY_NAME); + info!("Using Sierra compiler binary at: {:?}", path_to_binary); + Self { config, path_to_binary } + } + + pub fn compile( + &self, + contract_class: ContractClass, + ) -> Result { + let compiler_binary_path = &self.path_to_binary; + let additional_args = &[ + "--add-pythonic-hints", + "--max-bytecode-size", + &self.config.max_bytecode_size.to_string(), + // TODO(Shahak, Elin): Fix this in a safe way. + "--allowed-libfuncs-list-name", + "audited", + ]; + let resource_limits = ResourceLimits::new(None, None, self.config.max_memory_usage); + + let stdout = compile_with_args( + compiler_binary_path, + contract_class, + additional_args, + resource_limits, + )?; + Ok(serde_json::from_slice::(&stdout)?) + } +} + +// Returns the OUT_DIR. This function is only operable at run time. +fn out_dir() -> PathBuf { + env!("RUNTIME_ACCESSIBLE_OUT_DIR").into() +} diff --git a/crates/apollo_compile_to_casm/src/config.rs b/crates/apollo_compile_to_casm/src/config.rs new file mode 100644 index 00000000000..b70e8516811 --- /dev/null +++ b/crates/apollo_compile_to_casm/src/config.rs @@ -0,0 +1,46 @@ +use std::collections::BTreeMap; + +use apollo_config::dumping::{ser_optional_param, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use serde::{Deserialize, Serialize}; +use validator::Validate; + +// TODO(Noa): Reconsider the default values. +pub const DEFAULT_MAX_BYTECODE_SIZE: usize = 80 * 1024; +pub const DEFAULT_MAX_MEMORY_USAGE: u64 = 5 * 1024 * 1024 * 1024; + +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct SierraCompilationConfig { + /// CASM bytecode size limit (in felts). + pub max_bytecode_size: usize, + /// Compilation process’s virtual memory (address space) byte limit. + pub max_memory_usage: Option, +} + +impl Default for SierraCompilationConfig { + fn default() -> Self { + Self { + max_bytecode_size: DEFAULT_MAX_BYTECODE_SIZE, + max_memory_usage: Some(DEFAULT_MAX_MEMORY_USAGE), + } + } +} + +impl SerializeConfig for SierraCompilationConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::from([ser_param( + "max_bytecode_size", + &self.max_bytecode_size, + "Limitation of compiled CASM bytecode size (felts).", + ParamPrivacyInput::Public, + )]); + dump.extend(ser_optional_param( + &self.max_memory_usage, + DEFAULT_MAX_MEMORY_USAGE, + "max_memory_usage", + "Limitation of compilation process's virtual memory (bytes).", + ParamPrivacyInput::Public, + )); + dump + } +} diff --git a/crates/apollo_compile_to_casm/src/constants.rs b/crates/apollo_compile_to_casm/src/constants.rs new file mode 100644 index 00000000000..2a3d9cbb742 --- /dev/null +++ b/crates/apollo_compile_to_casm/src/constants.rs @@ -0,0 +1,5 @@ +// Note: This module includes constants that are needed during build and run times. It must +// not contain functionality that is available in only in one of these modes. Specifically, it +// must avoid relying on env variables such as 'CARGO_*' or 'OUT_DIR'. + +pub(crate) const CAIRO_LANG_BINARY_NAME: &str = "starknet-sierra-compile"; diff --git a/crates/apollo_compile_to_casm/src/lib.rs b/crates/apollo_compile_to_casm/src/lib.rs new file mode 100644 index 00000000000..c7ce2d69401 --- /dev/null +++ b/crates/apollo_compile_to_casm/src/lib.rs @@ -0,0 +1,89 @@ +//! A lib for compiling Sierra into Casm. +use apollo_compilation_utils::class_utils::into_contract_class_for_compilation; +use apollo_compilation_utils::errors::CompilationUtilError; +use apollo_compile_to_casm_types::{RawClass, RawExecutableClass, RawExecutableHashedClass}; +use apollo_infra::component_definitions::{default_component_start_fn, ComponentStarter}; +use apollo_proc_macros::sequencer_latency_histogram; +use async_trait::async_trait; +use starknet_api::contract_class::{ContractClass, SierraVersion}; +use starknet_api::core::CompiledClassHash; +use starknet_api::state::SierraContractClass; +use starknet_api::StarknetApiError; +use thiserror::Error; +use tracing::instrument; + +use crate::compiler::SierraToCasmCompiler; +use crate::config::SierraCompilationConfig; +use crate::metrics::{register_metrics, COMPILATION_DURATION}; + +pub mod communication; +pub mod compiler; +pub mod config; +pub mod constants; +pub mod metrics; + +#[cfg(test)] +#[path = "compile_test.rs"] +pub mod compile_test; + +pub type SierraCompilerResult = Result; + +#[derive(Debug, Error)] +pub enum SierraCompilerError { + #[error(transparent)] + ClassSerde(#[from] serde_json::Error), + #[error(transparent)] + CompilationFailed(#[from] CompilationUtilError), + #[error("Failed to parse Sierra version: {0}")] + SierraVersionFormat(StarknetApiError), +} + +impl From for apollo_compile_to_casm_types::SierraCompilerError { + fn from(error: SierraCompilerError) -> Self { + apollo_compile_to_casm_types::SierraCompilerError::CompilationFailed(error.to_string()) + } +} + +// TODO(Elin): consider generalizing the compiler if invocation implementations are added. +#[derive(Clone)] +pub struct SierraCompiler { + compiler: SierraToCasmCompiler, +} + +impl SierraCompiler { + pub fn new(compiler: SierraToCasmCompiler) -> Self { + Self { compiler } + } + + // TODO(Elin): move (de)serialization to infra. layer. + #[instrument(skip(self, class), err)] + #[sequencer_latency_histogram(COMPILATION_DURATION, true)] + pub fn compile(&self, class: RawClass) -> SierraCompilerResult { + let class = SierraContractClass::try_from(class)?; + let sierra_version = SierraVersion::extract_from_program(&class.sierra_program) + .map_err(SierraCompilerError::SierraVersionFormat)?; + let class = into_contract_class_for_compilation(&class); + + // TODO(Elin): handle resources (whether here or an infra. layer load-balancing). + let executable_class = self.compiler.compile(class)?; + // TODO(Elin): consider spawning a worker for hash calculatioln. + let executable_class_hash = CompiledClassHash(executable_class.compiled_class_hash()); + let executable_class = ContractClass::V1((executable_class, sierra_version)); + let executable_class = RawExecutableClass::try_from(executable_class)?; + + Ok((executable_class, executable_class_hash)) + } +} + +pub fn create_sierra_compiler(config: SierraCompilationConfig) -> SierraCompiler { + let compiler = SierraToCasmCompiler::new(config); + SierraCompiler::new(compiler) +} + +#[async_trait] +impl ComponentStarter for SierraCompiler { + async fn start(&mut self) { + default_component_start_fn::().await; + register_metrics(); + } +} diff --git a/crates/apollo_compile_to_casm/src/metrics.rs b/crates/apollo_compile_to_casm/src/metrics.rs new file mode 100644 index 00000000000..89403ceb1fa --- /dev/null +++ b/crates/apollo_compile_to_casm/src/metrics.rs @@ -0,0 +1,11 @@ +use apollo_metrics::define_metrics; + +define_metrics!( + CompileToCasm => { + MetricHistogram { COMPILATION_DURATION, "compile_to_casm_compilation_duration", "Server-side compilation to casm duration in seconds" }, + }, +); + +pub(crate) fn register_metrics() { + COMPILATION_DURATION.register(); +} diff --git a/crates/apollo_compile_to_casm_types/Cargo.toml b/crates/apollo_compile_to_casm_types/Cargo.toml new file mode 100644 index 00000000000..0f84567bd34 --- /dev/null +++ b/crates/apollo_compile_to_casm_types/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "apollo_compile_to_casm_types" +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true + +[lints] +workspace = true + +[features] +testing = ["mockall"] + +[dependencies] +apollo_infra.workspace = true +apollo_proc_macros.workspace = true +async-trait.workspace = true +mockall = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +starknet_api.workspace = true +thiserror.workspace = true + +[dev-dependencies] +mockall.workspace = true diff --git a/crates/apollo_compile_to_casm_types/src/lib.rs b/crates/apollo_compile_to_casm_types/src/lib.rs new file mode 100644 index 00000000000..3324df68cf9 --- /dev/null +++ b/crates/apollo_compile_to_casm_types/src/lib.rs @@ -0,0 +1,189 @@ +use std::fs::{File, OpenOptions}; +use std::io::{BufReader, BufWriter}; +use std::path::PathBuf; +use std::sync::Arc; + +use apollo_infra::component_client::{ClientError, LocalComponentClient, RemoteComponentClient}; +use apollo_infra::component_definitions::{ComponentClient, ComponentRequestAndResponseSender}; +use apollo_proc_macros::handle_all_response_variants; +use async_trait::async_trait; +#[cfg(any(feature = "testing", test))] +use mockall::automock; +use serde::{Deserialize, Serialize}; +use starknet_api::contract_class::ContractClass; +use starknet_api::core::CompiledClassHash; +use starknet_api::state::SierraContractClass; +use thiserror::Error; + +pub type SierraCompilerResult = Result; +pub type SierraCompilerClientResult = Result; + +pub type RawExecutableHashedClass = (RawExecutableClass, CompiledClassHash); + +pub type LocalSierraCompilerClient = + LocalComponentClient; +pub type RemoteSierraCompilerClient = + RemoteComponentClient; +pub type SharedSierraCompilerClient = Arc; +pub type SierraCompilerRequestAndResponseSender = + ComponentRequestAndResponseSender; + +// TODO(Elin): change to a more efficient serde (bytes, or something similar). +// A prerequisite for this is to solve serde-untagged lack of support. + +type RawClassResult = Result; +pub type RawClass = SerializedClass; +pub type RawExecutableClass = SerializedClass; + +#[derive(Debug, Error)] +pub enum RawClassError { + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error(transparent)] + WriteError(#[from] serde_json::Error), +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct SerializedClass(serde_json::Value, std::marker::PhantomData); + +impl SerializedClass { + pub fn into_value(self) -> serde_json::Value { + self.0 + } + + pub fn size(&self) -> RawClassResult { + Ok(serde_json::to_string_pretty(&self.0)?.len()) + } + + fn new(value: serde_json::Value) -> Self { + Self(value, std::marker::PhantomData) + } + + pub fn from_file(path: PathBuf) -> RawClassResult> { + let file = match File::open(path) { + Ok(file) => file, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None), + Err(e) => return Err(e.into()), + }; + + match serde_json::from_reader(BufReader::new(file)) { + Ok(value) => Ok(Some(Self::new(value))), + // In case the file was deleted/tempered with until actual read is done. + Err(e) if e.is_io() && e.to_string().contains("No such file or directory") => Ok(None), + Err(e) => Err(e.into()), + } + } + + pub fn write_to_file(self, path: PathBuf) -> RawClassResult<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + + // Open a file for writing, deleting any existing content. + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path) + .expect("Failing to open file with given options is impossible"); + + let writer = BufWriter::new(file); + serde_json::to_writer_pretty(writer, &self.into_value())?; + + Ok(()) + } + + #[cfg(any(feature = "testing", test))] + pub fn new_unchecked(value: serde_json::Value) -> Self { + Self::new(value) + } +} + +impl TryFrom for RawClass { + type Error = serde_json::Error; + + fn try_from(class: SierraContractClass) -> Result { + Ok(Self::new(serde_json::to_value(class)?)) + } +} + +impl TryFrom for SierraContractClass { + type Error = serde_json::Error; + + fn try_from(class: RawClass) -> Result { + serde_json::from_value(class.0) + } +} + +impl TryFrom for RawExecutableClass { + type Error = serde_json::Error; + + fn try_from(class: ContractClass) -> Result { + Ok(Self::new(serde_json::to_value(class)?)) + } +} + +impl TryFrom for ContractClass { + type Error = serde_json::Error; + + fn try_from(class: RawExecutableClass) -> Result { + serde_json::from_value(class.0) + } +} + +/// Serves as the Sierra compilation unit's shared interface. +/// Requires `Send + Sync` to allow transferring and sharing resources (inputs, futures) across +/// threads. +#[cfg_attr(any(feature = "testing", test), automock)] +#[async_trait] +pub trait SierraCompilerClient: Send + Sync { + async fn compile( + &self, + class: RawClass, + ) -> SierraCompilerClientResult; +} + +#[derive(Clone, Debug, Error, Eq, PartialEq, Serialize, Deserialize)] +pub enum SierraCompilerError { + #[error("Compilation failed: {0}")] + CompilationFailed(String), +} + +#[derive(Clone, Debug, Error)] +pub enum SierraCompilerClientError { + #[error(transparent)] + ClientError(#[from] ClientError), + #[error(transparent)] + SierraCompilerError(#[from] SierraCompilerError), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum SierraCompilerRequest { + Compile(RawClass), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum SierraCompilerResponse { + Compile(SierraCompilerResult), +} + +#[async_trait] +impl SierraCompilerClient for ComponentClientType +where + ComponentClientType: + Send + Sync + ComponentClient, +{ + async fn compile( + &self, + class: RawClass, + ) -> SierraCompilerClientResult { + let request = SierraCompilerRequest::Compile(class); + handle_all_response_variants!( + SierraCompilerResponse, + Compile, + SierraCompilerClientError, + SierraCompilerError, + Direct + ) + } +} diff --git a/crates/apollo_compile_to_native/Cargo.toml b/crates/apollo_compile_to_native/Cargo.toml new file mode 100644 index 00000000000..fdd7a2822c4 --- /dev/null +++ b/crates/apollo_compile_to_native/Cargo.toml @@ -0,0 +1,41 @@ +[package] +edition.workspace = true +license.workspace = true +name = "apollo_compile_to_native" +repository.workspace = true +version.workspace = true +description = "A utility crate for compiling Sierra code into Cairo native." + +[features] +cairo_native = [ + "apollo_compilation_utils/cairo_native", + "dep:apollo_compilation_utils", + "dep:apollo_infra_utils", + "dep:cairo-lang-starknet-classes", + "dep:cairo-native", + "dep:tempfile", +] + +[lints] +workspace = true + +[dependencies] +apollo_compilation_utils = { workspace = true, optional = true } +apollo_config.workspace = true +cairo-lang-starknet-classes = { workspace = true, optional = true } +cairo-native = { workspace = true, optional = true } +serde.workspace = true +tempfile = { workspace = true, optional = true } +validator.workspace = true + +[dev-dependencies] +apollo_compilation_utils = { workspace = true, features = ["testing"] } +apollo_infra_utils.workspace = true +assert_matches.workspace = true +mempool_test_utils.workspace = true +rstest.workspace = true +toml_test_utils.workspace = true + +[build-dependencies] +apollo_compilation_utils = { workspace = true, optional = true } +apollo_infra_utils = { workspace = true, optional = true } diff --git a/crates/apollo_compile_to_native/build.rs b/crates/apollo_compile_to_native/build.rs new file mode 100644 index 00000000000..1bdd745113a --- /dev/null +++ b/crates/apollo_compile_to_native/build.rs @@ -0,0 +1,7 @@ +#[cfg(not(feature = "cairo_native"))] +fn main() { + // Cairo Native is not enabled, so we don't need to do anything. +} + +#[cfg(feature = "cairo_native")] +include!("build_with_cairo_native.rs"); diff --git a/crates/apollo_compile_to_native/build_with_cairo_native.rs b/crates/apollo_compile_to_native/build_with_cairo_native.rs new file mode 100644 index 00000000000..36e65ec9eda --- /dev/null +++ b/crates/apollo_compile_to_native/build_with_cairo_native.rs @@ -0,0 +1,36 @@ +use apollo_compilation_utils::build_utils::install_compiler_binary; + +include!("src/constants.rs"); + +fn main() { + println!("cargo:rerun-if-changed=../../Cargo.lock"); + println!("cargo:rerun-if-changed=build.rs"); + + set_run_time_out_dir_env_var(); + install_starknet_native_compile(); +} + +/// Install the `starknet-native-compile` binary from the Cairo Native crate and moves the binary +/// to the `target` directory. The `starknet-native-compile` binary is used to compile Sierra to +/// Native. The binary is executed as a subprocess whenever Sierra to Cairo compilation is required. +fn install_starknet_native_compile() { + let binary_name = CAIRO_NATIVE_BINARY_NAME; + let required_version = REQUIRED_CAIRO_NATIVE_VERSION; + + let cargo_install_args = &["cairo-native", "--version", required_version, "--bin", binary_name]; + install_compiler_binary(binary_name, required_version, cargo_install_args, &out_dir()); +} + +// Sets the `RUNTIME_ACCESSIBLE_OUT_DIR` environment variable to the `OUT_DIR` value, which will be +// available only after the build is completed. Most importantly, it is available during runtime. +fn set_run_time_out_dir_env_var() { + let out_dir = std::env::var("OUT_DIR").expect("OUT_DIR is not set"); + println!("cargo:rustc-env=RUNTIME_ACCESSIBLE_OUT_DIR={}", out_dir); +} + +// Returns the OUT_DIR. This function is only operable at build time. +fn out_dir() -> std::path::PathBuf { + std::env::var("OUT_DIR") + .expect("Failed to get the build time OUT_DIR environment variable") + .into() +} diff --git a/crates/apollo_compile_to_native/src/compile_test.rs b/crates/apollo_compile_to_native/src/compile_test.rs new file mode 100644 index 00000000000..43ce866e564 --- /dev/null +++ b/crates/apollo_compile_to_native/src/compile_test.rs @@ -0,0 +1,57 @@ +use apollo_compilation_utils::errors::CompilationUtilError; +use apollo_compilation_utils::test_utils::contract_class_from_file; +use apollo_infra_utils::path::resolve_project_relative_path; +use assert_matches::assert_matches; +use cairo_lang_starknet_classes::contract_class::ContractClass; +use mempool_test_utils::{FAULTY_ACCOUNT_CLASS_FILE, TEST_FILES_FOLDER}; + +use crate::compiler::SierraToNativeCompiler; +use crate::config::{ + SierraCompilationConfig, + DEFAULT_MAX_CPU_TIME, + DEFAULT_MAX_FILE_SIZE, + DEFAULT_MAX_MEMORY_USAGE, + DEFAULT_OPTIMIZATION_LEVEL, +}; + +const SIERRA_COMPILATION_CONFIG: SierraCompilationConfig = SierraCompilationConfig { + compiler_binary_path: None, + max_file_size: Some(DEFAULT_MAX_FILE_SIZE), + max_cpu_time: Some(DEFAULT_MAX_CPU_TIME), + max_memory_usage: Some(DEFAULT_MAX_MEMORY_USAGE), + optimization_level: DEFAULT_OPTIMIZATION_LEVEL, +}; + +fn compiler() -> SierraToNativeCompiler { + SierraToNativeCompiler::new(SIERRA_COMPILATION_CONFIG) +} + +fn get_test_contract() -> ContractClass { + let sierra_path = + resolve_project_relative_path(TEST_FILES_FOLDER).unwrap().join(FAULTY_ACCOUNT_CLASS_FILE); + contract_class_from_file(sierra_path) +} + +fn get_faulty_test_contract() -> ContractClass { + let mut contract_class = get_test_contract(); + // Truncate the sierra program to trigger an error. + contract_class.sierra_program = contract_class.sierra_program[..100].to_vec(); + contract_class +} + +#[test] +fn test_compile_sierra_to_native() { + let compiler = compiler(); + let contract_class = get_test_contract(); + + let _native_contract_executor = compiler.compile(contract_class).unwrap(); +} + +#[test] +fn test_negative_flow_compile_sierra_to_native() { + let compiler = compiler(); + let contract_class = get_faulty_test_contract(); + + let result = compiler.compile(contract_class); + assert_matches!(result, Err(CompilationUtilError::CompilationError(..))); +} diff --git a/crates/apollo_compile_to_native/src/compiler.rs b/crates/apollo_compile_to_native/src/compiler.rs new file mode 100644 index 00000000000..f656b1f3eae --- /dev/null +++ b/crates/apollo_compile_to_native/src/compiler.rs @@ -0,0 +1,60 @@ +use std::path::{Path, PathBuf}; + +use apollo_compilation_utils::compiler_utils::compile_with_args; +use apollo_compilation_utils::errors::CompilationUtilError; +use apollo_compilation_utils::paths::binary_path; +use apollo_compilation_utils::resource_limits::ResourceLimits; +use cairo_lang_starknet_classes::contract_class::ContractClass; +use cairo_native::executor::AotContractExecutor; +use tempfile::NamedTempFile; + +use crate::config::SierraCompilationConfig; +use crate::constants::CAIRO_NATIVE_BINARY_NAME; + +#[derive(Clone)] +pub struct SierraToNativeCompiler { + pub config: SierraCompilationConfig, + path_to_binary: PathBuf, +} + +impl SierraToNativeCompiler { + pub fn new(config: SierraCompilationConfig) -> Self { + let path_to_binary = match &config.compiler_binary_path { + Some(path) => path.clone(), + None => binary_path(&out_dir(), CAIRO_NATIVE_BINARY_NAME), + }; + Self { config, path_to_binary } + } + + pub fn compile( + &self, + contract_class: ContractClass, + ) -> Result { + let compiler_binary_path = &self.path_to_binary; + + let output_file = NamedTempFile::new()?; + let output_file_path = output_file.path().to_str().ok_or( + CompilationUtilError::UnexpectedError("Failed to get output file path".to_owned()), + )?; + let optimization_level = self.config.optimization_level.to_string(); + let additional_args = [output_file_path, "--opt-level", &optimization_level]; + let resource_limits = ResourceLimits::new( + self.config.max_cpu_time, + self.config.max_file_size, + self.config.max_memory_usage, + ); + let _stdout = compile_with_args( + compiler_binary_path, + contract_class, + &additional_args, + resource_limits, + )?; + + Ok(AotContractExecutor::from_path(Path::new(&output_file_path))?.unwrap()) + } +} + +// Returns the OUT_DIR. This function is only operable at run time. +fn out_dir() -> PathBuf { + env!("RUNTIME_ACCESSIBLE_OUT_DIR").into() +} diff --git a/crates/apollo_compile_to_native/src/config.rs b/crates/apollo_compile_to_native/src/config.rs new file mode 100644 index 00000000000..724dbf1a89d --- /dev/null +++ b/crates/apollo_compile_to_native/src/config.rs @@ -0,0 +1,79 @@ +use std::collections::BTreeMap; +use std::path::PathBuf; + +use apollo_config::dumping::{ser_optional_param, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use serde::{Deserialize, Serialize}; +use validator::Validate; + +// TODO(Noa): Reconsider the default values. +pub const DEFAULT_MAX_FILE_SIZE: u64 = 15 * 1024 * 1024; +pub const DEFAULT_MAX_CPU_TIME: u64 = 20; +pub const DEFAULT_MAX_MEMORY_USAGE: u64 = 5 * 1024 * 1024 * 1024; +pub const DEFAULT_OPTIMIZATION_LEVEL: u8 = 2; + +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct SierraCompilationConfig { + /// Cairo Native file size limit (in bytes). + pub max_file_size: Option, + /// Compilation CPU time limit (in seconds). + pub max_cpu_time: Option, + /// Compilation process’s virtual memory (address space) byte limit. + pub max_memory_usage: Option, + /// The level of optimization to apply during compilation. + pub optimization_level: u8, + /// Compiler binary path. + pub compiler_binary_path: Option, +} + +impl Default for SierraCompilationConfig { + fn default() -> Self { + Self { + compiler_binary_path: None, + max_file_size: Some(DEFAULT_MAX_FILE_SIZE), + max_cpu_time: Some(DEFAULT_MAX_CPU_TIME), + max_memory_usage: Some(DEFAULT_MAX_MEMORY_USAGE), + optimization_level: DEFAULT_OPTIMIZATION_LEVEL, + } + } +} + +impl SerializeConfig for SierraCompilationConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::from([ser_param( + "optimization_level", + &self.optimization_level, + "The level of optimization to apply during compilation.", + ParamPrivacyInput::Public, + )]); + dump.extend(ser_optional_param( + &self.compiler_binary_path, + "".into(), + "compiler_binary_path", + "The path to the Sierra-to-Native compiler binary.", + ParamPrivacyInput::Public, + )); + dump.extend(ser_optional_param( + &self.max_file_size, + DEFAULT_MAX_FILE_SIZE, + "max_file_size", + "Limitation of compiled Cairo Native file size (bytes).", + ParamPrivacyInput::Public, + )); + dump.extend(ser_optional_param( + &self.max_cpu_time, + DEFAULT_MAX_CPU_TIME, + "max_cpu_time", + "Limitation of compilation cpu time (seconds).", + ParamPrivacyInput::Public, + )); + dump.extend(ser_optional_param( + &self.max_memory_usage, + DEFAULT_MAX_MEMORY_USAGE, + "max_memory_usage", + "Limitation of compilation process's virtual memory (bytes).", + ParamPrivacyInput::Public, + )); + dump + } +} diff --git a/crates/apollo_compile_to_native/src/constants.rs b/crates/apollo_compile_to_native/src/constants.rs new file mode 100644 index 00000000000..eb57b960f7a --- /dev/null +++ b/crates/apollo_compile_to_native/src/constants.rs @@ -0,0 +1,7 @@ +// Note: This module includes constants that are needed during build and run times. It must +// not contain functionality that is available in only in one of these modes. Specifically, it +// must avoid relying on env variables such as 'CARGO_*' or 'OUT_DIR'. + +pub(crate) const CAIRO_NATIVE_BINARY_NAME: &str = "starknet-native-compile"; + +pub const REQUIRED_CAIRO_NATIVE_VERSION: &str = "0.5.0-rc.6"; diff --git a/crates/apollo_compile_to_native/src/constants_test.rs b/crates/apollo_compile_to_native/src/constants_test.rs new file mode 100644 index 00000000000..b3acf1ead48 --- /dev/null +++ b/crates/apollo_compile_to_native/src/constants_test.rs @@ -0,0 +1,17 @@ +use toml_test_utils::{DependencyValue, ROOT_TOML}; + +use crate::constants::REQUIRED_CAIRO_NATIVE_VERSION; + +#[test] +fn required_cairo_native_version_test() { + let cairo_native_version = ROOT_TOML + .dependencies() + .filter_map(|(name, value)| match (name.as_str(), value) { + ("cairo-native", DependencyValue::Object { version, .. }) => version.as_ref(), + ("cairo-native", DependencyValue::String(version)) => Some(version), + _ => None, + }) + .next() + .expect("cairo-native dependency not found in root toml file."); + assert_eq!(REQUIRED_CAIRO_NATIVE_VERSION, cairo_native_version); +} diff --git a/crates/apollo_compile_to_native/src/lib.rs b/crates/apollo_compile_to_native/src/lib.rs new file mode 100644 index 00000000000..f2662877913 --- /dev/null +++ b/crates/apollo_compile_to_native/src/lib.rs @@ -0,0 +1,18 @@ +//! A lib for compiling Sierra into Native. + +// When cairo_native is not active, this crate just exposes the config. +pub mod config; + +// Include the rest of the crate when cairo_native is active. +#[cfg(feature = "cairo_native")] +pub mod compiler; +#[cfg(feature = "cairo_native")] +pub mod constants; + +#[cfg(all(feature = "cairo_native", test))] +#[path = "compile_test.rs"] +pub mod compile_test; + +#[cfg(all(feature = "cairo_native", test))] +#[path = "constants_test.rs"] +pub mod constants_test; diff --git a/crates/apollo_config/Cargo.toml b/crates/apollo_config/Cargo.toml new file mode 100644 index 00000000000..b9544415fce --- /dev/null +++ b/crates/apollo_config/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "apollo_config" +version.workspace = true +edition.workspace = true +repository.workspace = true +license-file.workspace = true +description = "A library for handling node configuration." + +[dependencies] +apollo_infra_utils.workspace = true +clap = { workspace = true, features = ["env", "string"] } +const_format.workspace = true +itertools.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["arbitrary_precision"] } +strum_macros.workspace = true +thiserror.workspace = true +tracing.workspace = true +url = { workspace = true, features = ["serde"] } +validator = { workspace = true, features = ["derive"] } + +[dev-dependencies] +apollo_infra_utils.workspace = true +apollo_test_utils.workspace = true +assert_matches.workspace = true +itertools.workspace = true +lazy_static.workspace = true +starknet_api.workspace = true +tempfile.workspace = true + +[lints] +workspace = true diff --git a/crates/apollo_config/README.md b/crates/apollo_config/README.md new file mode 100644 index 00000000000..a5a0f8a97e2 --- /dev/null +++ b/crates/apollo_config/README.md @@ -0,0 +1,34 @@ +# papyrus-config + +## Description + +papyrus-config is a flexible and powerful layered configuration system designed specifically for Papyrus, a Starknet node. This system allows you to easily manage configurations for your Papyrus node by leveraging various sources and providing additional helpful features. + +## Configuration sources + +Supports multiple configuration sources in ascending order of overriding priority: + +- Default values +- Configuration files (from first to last) +- Environment variables +- Command-line arguments + +## Additional features + +- **Support for Nested Configuration Components:** Organize your configurations into nested components, making it easy to manage complex settings for different aspects of the application. + +- **Usage of Pointers:** Use pointers to merge parameters that are common to multiple components. This capability helps in streamlining configurations and avoiding duplication of settings. + +- **Automatically-Generated Command Line Parser:** To simplify the process of handling command-line arguments, the system automatically generates a command-line parser. This means you don't have to write complex argument parsing code; it's ready to use out-of-the-box. + +- **Automatically-Generated Reference Configuration File:** Makes it easier for users by generating a reference configuration file. This file serves as a template that highlights all available configuration options and their default values, enabling users to customize their configurations efficiently. + +## Documentation + +Developer reference documentation is available at https://docs.rs/apollo_config/. The documentation on this site is updated periodically. + +To view the most up-to-date documentation, enter the following command at the root directory of the `papyrus` project: + +```shell +cargo doc --open -p apollo_config +``` \ No newline at end of file diff --git a/crates/papyrus_config/resources/custom_config_example.json b/crates/apollo_config/resources/custom_config_example.json similarity index 100% rename from crates/papyrus_config/resources/custom_config_example.json rename to crates/apollo_config/resources/custom_config_example.json diff --git a/crates/apollo_config/src/command.rs b/crates/apollo_config/src/command.rs new file mode 100644 index 00000000000..45c1558dffc --- /dev/null +++ b/crates/apollo_config/src/command.rs @@ -0,0 +1,91 @@ +use std::collections::BTreeMap; +use std::path::PathBuf; + +use clap::{value_parser, Arg, ArgMatches, Command}; +use serde_json::{json, Value}; + +use crate::loading::update_config_map; +use crate::{ConfigError, ParamPath, SerializationType, SerializedParam, CONFIG_FILE_ARG_NAME}; + +pub(crate) fn get_command_matches( + config_map: &BTreeMap, + command: Command, + command_input: Vec, +) -> Result { + Ok(command.args(build_args_parser(config_map)).try_get_matches_from(command_input)?) +} + +// Takes matched arguments from the command line interface and env variables and updates the config +// map. +// Supports f64, u64, i64, bool and String. +pub(crate) fn update_config_map_by_command_args( + config_map: &mut BTreeMap, + types_map: &BTreeMap, + arg_match: &ArgMatches, +) -> Result<(), ConfigError> { + for param_path_id in arg_match.ids() { + let param_path = param_path_id.as_str(); + let new_value = get_arg_by_type(types_map, arg_match, param_path)?; + update_config_map(config_map, types_map, param_path, new_value)?; + } + Ok(()) +} + +// Builds the parser for the command line flags and env variables according to the types of the +// values in the config map. +fn build_args_parser(config_map: &BTreeMap) -> Vec { + let mut args_parser = vec![ + // Custom_config_file_path. + Arg::new(CONFIG_FILE_ARG_NAME) + .long(CONFIG_FILE_ARG_NAME) + .short('f') + .value_delimiter(',') + .help("Optionally sets a config file to use") + .value_parser(value_parser!(PathBuf)) + .num_args(1..) // Allow multiple values + .action(clap::ArgAction::Append), // Collect multiple occurrences + ]; + + for (param_path, serialized_param) in config_map.iter() { + let Some(serialization_type) = serialized_param.content.get_serialization_type() else { + continue; // Pointer target + }; + let clap_parser = match serialization_type { + SerializationType::Boolean => clap::value_parser!(bool), + SerializationType::Float => clap::value_parser!(f64).into(), + SerializationType::NegativeInteger => clap::value_parser!(i64).into(), + SerializationType::PositiveInteger => clap::value_parser!(u64).into(), + SerializationType::String => clap::value_parser!(String), + }; + + let arg = Arg::new(param_path) + .long(param_path) + .env(to_env_var_name(param_path)) + .help(&serialized_param.description) + .value_parser(clap_parser) + .allow_negative_numbers(true); + + args_parser.push(arg); + } + args_parser +} + +// Converts clap arg_matches into json values. +fn get_arg_by_type( + types_map: &BTreeMap, + arg_match: &ArgMatches, + param_path: &str, +) -> Result { + let serialization_type = types_map.get(param_path).expect("missing type"); + match serialization_type { + SerializationType::Boolean => Ok(json!(arg_match.try_get_one::(param_path)?)), + SerializationType::Float => Ok(json!(arg_match.try_get_one::(param_path)?)), + SerializationType::NegativeInteger => Ok(json!(arg_match.try_get_one::(param_path)?)), + SerializationType::PositiveInteger => Ok(json!(arg_match.try_get_one::(param_path)?)), + SerializationType::String => Ok(json!(arg_match.try_get_one::(param_path)?)), + } +} + +fn to_env_var_name(param_path: &str) -> String { + param_path.replace("#is_none", "__is_none__").to_uppercase().replace('.', "__") +} diff --git a/crates/apollo_config/src/config_test.rs b/crates/apollo_config/src/config_test.rs new file mode 100644 index 00000000000..18a10ebacc1 --- /dev/null +++ b/crates/apollo_config/src/config_test.rs @@ -0,0 +1,971 @@ +use std::collections::{BTreeMap, HashSet}; +use std::env; +use std::fs::File; +use std::path::PathBuf; +use std::time::Duration; + +use apollo_infra_utils::path::resolve_project_relative_path; +use assert_matches::assert_matches; +use clap::Command; +use itertools::chain; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use tempfile::{NamedTempFile, TempDir}; +use url::Url; +use validator::Validate; + +use crate::command::{get_command_matches, update_config_map_by_command_args}; +use crate::converters::{ + deserialize_milliseconds_to_duration, + deserialize_optional_list_with_url_and_headers, + serialize_optional_list_with_url_and_headers, + UrlAndHeaders, +}; +use crate::dumping::{ + combine_config_map_and_pointers, + generate_struct_pointer, + prepend_sub_config_name, + required_param_description, + ser_generated_param, + ser_optional_param, + ser_optional_sub_config, + ser_param, + ser_pointer_target_param, + ser_pointer_target_required_param, + ser_required_param, + set_pointing_param_paths, + SerializeConfig, +}; +use crate::loading::{ + load, + load_and_process_config, + split_pointers_map, + split_values_and_types, + update_config_map_by_pointers, + update_optional_values, +}; +use crate::presentation::get_config_presentation; +use crate::{ + ConfigError, + ParamPath, + ParamPrivacy, + ParamPrivacyInput, + SerializationType, + SerializedContent, + SerializedParam, + CONFIG_FILE_ARG, +}; + +lazy_static! { + static ref CUSTOM_CONFIG_PATH: PathBuf = + resolve_project_relative_path("crates/apollo_config/resources/custom_config_example.json") + .unwrap(); +} + +#[derive(Clone, Copy, Default, Serialize, Deserialize, Debug, PartialEq, Validate)] +struct InnerConfig { + #[validate(range(min = 0, max = 10))] + o: usize, +} + +impl SerializeConfig for InnerConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ser_param("o", &self.o, "This is o.", ParamPrivacyInput::Public)]) + } +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Validate)] +struct OuterConfig { + opt_elem: Option, + opt_config: Option, + #[validate] + inner_config: InnerConfig, +} + +impl SerializeConfig for OuterConfig { + fn dump(&self) -> BTreeMap { + chain!( + ser_optional_param( + &self.opt_elem, + 1, + "opt_elem", + "This is elem.", + ParamPrivacyInput::Public + ), + ser_optional_sub_config(&self.opt_config, "opt_config"), + prepend_sub_config_name(self.inner_config.dump(), "inner_config"), + ) + .collect() + } +} + +#[test] +fn dump_and_load_config() { + let some_outer_config = OuterConfig { + opt_elem: Some(2), + opt_config: Some(InnerConfig { o: 3 }), + inner_config: InnerConfig { o: 4 }, + }; + let none_outer_config = + OuterConfig { opt_elem: None, opt_config: None, inner_config: InnerConfig { o: 5 } }; + + for outer_config in [some_outer_config, none_outer_config] { + let (mut dumped, _) = split_values_and_types(outer_config.dump()); + update_optional_values(&mut dumped); + let loaded_config = load::(&dumped).unwrap(); + assert_eq!(loaded_config, outer_config); + } +} + +#[test] +fn test_validation() { + let outer_config = + OuterConfig { opt_elem: None, opt_config: None, inner_config: InnerConfig { o: 20 } }; + assert!(outer_config.validate().is_err()); +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +struct TypicalConfig { + #[serde(deserialize_with = "deserialize_milliseconds_to_duration")] + a: Duration, + b: String, + c: bool, + d: i64, + e: u64, + f: f64, +} + +impl SerializeConfig for TypicalConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ + ser_param( + "a", + &self.a.as_millis(), + "This is a as milliseconds.", + ParamPrivacyInput::Public, + ), + ser_param("b", &self.b, "This is b.", ParamPrivacyInput::Public), + ser_param("c", &self.c, "This is c.", ParamPrivacyInput::Private), + ser_param("d", &self.d, "This is d.", ParamPrivacyInput::Public), + ser_param("e", &self.e, "This is e.", ParamPrivacyInput::Public), + ser_param("f", &self.f, "This is f.", ParamPrivacyInput::Public), + ]) + } +} + +#[test] +fn test_update_dumped_config() { + let command = Command::new("Testing"); + let dumped_config = TypicalConfig { + a: Duration::from_secs(1), + b: "bbb".to_owned(), + c: false, + d: -1, + e: 10, + f: 1.5, + } + .dump(); + let args = vec!["Testing", "--a", "1234", "--b", "15", "--d", "-2", "--e", "20", "--f", "0.5"]; + env::set_var("C", "true"); + let args: Vec = args.into_iter().map(|s| s.to_owned()).collect(); + + let arg_matches = get_command_matches(&dumped_config, command, args).unwrap(); + let (mut config_map, required_map) = split_values_and_types(dumped_config); + update_config_map_by_command_args(&mut config_map, &required_map, &arg_matches).unwrap(); + + assert_eq!(json!(1234), config_map["a"]); + assert_eq!(json!("15"), config_map["b"]); + assert_eq!(json!(true), config_map["c"]); + assert_eq!(json!(-2), config_map["d"]); + assert_eq!(json!(20), config_map["e"]); + assert_eq!(json!(0.5), config_map["f"]); + + let loaded_config: TypicalConfig = load(&config_map).unwrap(); + assert_eq!(Duration::from_millis(1234), loaded_config.a); +} + +#[test] +fn test_env_nested_params() { + let command = Command::new("Testing"); + let dumped_config = OuterConfig { + opt_elem: Some(1), + opt_config: Some(InnerConfig { o: 2 }), + inner_config: InnerConfig { o: 3 }, + } + .dump(); + let args = vec!["Testing", "--opt_elem", "1234"]; + env::set_var("OPT_CONFIG____IS_NONE__", "true"); + env::set_var("INNER_CONFIG__O", "4"); + let args: Vec = args.into_iter().map(|s| s.to_owned()).collect(); + + let arg_matches = get_command_matches(&dumped_config, command, args).unwrap(); + let (mut config_map, required_map) = split_values_and_types(dumped_config); + update_config_map_by_command_args(&mut config_map, &required_map, &arg_matches).unwrap(); + + assert_eq!(json!(1234), config_map["opt_elem"]); + assert_eq!(json!(true), config_map["opt_config.#is_none"]); + assert_eq!(json!(4), config_map["inner_config.o"]); + + update_optional_values(&mut config_map); + + let loaded_config: OuterConfig = load(&config_map).unwrap(); + assert_eq!(Some(1234), loaded_config.opt_elem); + assert_eq!(None, loaded_config.opt_config); + assert_eq!(4, loaded_config.inner_config.o); +} + +#[test] +fn test_config_presentation() { + let config = TypicalConfig { + a: Duration::from_secs(1), + b: "bbb".to_owned(), + c: false, + d: -1, + e: 10, + f: 0.5, + }; + let presentation = get_config_presentation(&config, true).unwrap(); + let keys: Vec<_> = presentation.as_object().unwrap().keys().collect(); + assert_eq!(keys, vec!["a", "b", "c", "d", "e", "f"]); + + let public_presentation = get_config_presentation(&config, false).unwrap(); + let keys: Vec<_> = public_presentation.as_object().unwrap().keys().collect(); + assert_eq!(keys, vec!["a", "b", "d", "e", "f"]); +} + +#[test] +fn test_nested_config_presentation() { + let configs = vec![ + OuterConfig { + opt_elem: Some(1), + opt_config: Some(InnerConfig { o: 2 }), + inner_config: InnerConfig { o: 3 }, + }, + OuterConfig { + opt_elem: None, + opt_config: Some(InnerConfig { o: 2 }), + inner_config: InnerConfig { o: 3 }, + }, + OuterConfig { opt_elem: Some(1), opt_config: None, inner_config: InnerConfig { o: 3 } }, + ]; + + for config in configs { + let presentation = get_config_presentation(&config, true).unwrap(); + let keys: Vec<_> = presentation.as_object().unwrap().keys().collect(); + assert_eq!(keys, vec!["inner_config", "opt_config", "opt_elem"]); + let public_presentation = get_config_presentation(&config, false).unwrap(); + let keys: Vec<_> = public_presentation.as_object().unwrap().keys().collect(); + assert_eq!(keys, vec!["inner_config", "opt_config", "opt_elem"]); + } +} + +#[test] +fn test_pointers_flow() { + const TARGET_PARAM_NAME: &str = "a"; + const TARGET_PARAM_DESCRIPTION: &str = "This is common a."; + const POINTING_PARAM_DESCRIPTION: &str = "This is a."; + const PUBLIC_POINTING_PARAM_NAME: &str = "public_a.a"; + const PRIVATE_POINTING_PARAM_NAME: &str = "private_a.a"; + const WHITELISTED_POINTING_PARAM_NAME: &str = "non_pointing.a"; + const VALUE: usize = 5; + + let config_map = BTreeMap::from([ + ser_param( + PUBLIC_POINTING_PARAM_NAME, + &json!(VALUE), + POINTING_PARAM_DESCRIPTION, + ParamPrivacyInput::Public, + ), + ser_param( + PRIVATE_POINTING_PARAM_NAME, + &json!(VALUE), + POINTING_PARAM_DESCRIPTION, + ParamPrivacyInput::Private, + ), + ser_param( + WHITELISTED_POINTING_PARAM_NAME, + &json!(VALUE), + POINTING_PARAM_DESCRIPTION, + ParamPrivacyInput::Private, + ), + ]); + let pointers = vec![( + ser_pointer_target_param(TARGET_PARAM_NAME, &json!(10), TARGET_PARAM_DESCRIPTION), + HashSet::from([ + PUBLIC_POINTING_PARAM_NAME.to_string(), + PRIVATE_POINTING_PARAM_NAME.to_string(), + ]), + )]; + let non_pointer_params = HashSet::from([WHITELISTED_POINTING_PARAM_NAME.to_string()]); + let stored_map = + combine_config_map_and_pointers(config_map, &pointers, &non_pointer_params).unwrap(); + + // Assert the pointing parameters are correctly set. + assert_eq!( + stored_map[PUBLIC_POINTING_PARAM_NAME], + json!(SerializedParam { + description: POINTING_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::PointerTarget(TARGET_PARAM_NAME.to_owned()), + privacy: ParamPrivacy::Public, + }) + ); + assert_eq!( + stored_map[PRIVATE_POINTING_PARAM_NAME], + json!(SerializedParam { + description: POINTING_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::PointerTarget(TARGET_PARAM_NAME.to_owned()), + privacy: ParamPrivacy::Private, + }) + ); + + // Assert the whitelisted parameter is correctly set. + assert_eq!( + stored_map[WHITELISTED_POINTING_PARAM_NAME], + json!(SerializedParam { + description: POINTING_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::DefaultValue(json!(VALUE)), + privacy: ParamPrivacy::Private, + }) + ); + + // Assert the pointed parameter is correctly set as a required parameter. + assert_eq!( + stored_map[TARGET_PARAM_NAME], + json!(SerializedParam { + description: TARGET_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::DefaultValue(json!(10)), + privacy: ParamPrivacy::TemporaryValue, + }) + ); + let serialized = serde_json::to_string(&stored_map).unwrap(); + let loaded = serde_json::from_str(&serialized).unwrap(); + let (loaded_config_map, loaded_pointers_map) = split_pointers_map(loaded); + let (mut config_map, _) = split_values_and_types(loaded_config_map); + update_config_map_by_pointers(&mut config_map, &loaded_pointers_map).unwrap(); + assert_eq!(config_map[PUBLIC_POINTING_PARAM_NAME], json!(10)); + assert_eq!(config_map[PUBLIC_POINTING_PARAM_NAME], config_map[PRIVATE_POINTING_PARAM_NAME]); +} + +#[test] +fn test_required_pointers_flow() { + // Set up the config map and pointers. + const REQUIRED_PARAM_NAME: &str = "b"; + const REQUIRED_PARAM_DESCRIPTION: &str = "This is common required b."; + const POINTING_PARAM_DESCRIPTION: &str = "This is b."; + const PUBLIC_POINTING_PARAM_NAME: &str = "public_b.b"; + const PRIVATE_POINTING_PARAM_NAME: &str = "private_b.b"; + const WHITELISTED_POINTING_PARAM_NAME: &str = "non_pointing.b"; + const VALUE: usize = 6; + + let config_map = BTreeMap::from([ + ser_param( + PUBLIC_POINTING_PARAM_NAME, + &json!(VALUE), + POINTING_PARAM_DESCRIPTION, + ParamPrivacyInput::Public, + ), + ser_param( + PRIVATE_POINTING_PARAM_NAME, + &json!(VALUE), + POINTING_PARAM_DESCRIPTION, + ParamPrivacyInput::Private, + ), + ser_param( + WHITELISTED_POINTING_PARAM_NAME, + &json!(VALUE), + POINTING_PARAM_DESCRIPTION, + ParamPrivacyInput::Private, + ), + ]); + let pointers = vec![( + ser_pointer_target_required_param( + REQUIRED_PARAM_NAME, + SerializationType::PositiveInteger, + REQUIRED_PARAM_DESCRIPTION, + ), + HashSet::from([ + PUBLIC_POINTING_PARAM_NAME.to_string(), + PRIVATE_POINTING_PARAM_NAME.to_string(), + ]), + )]; + let non_pointer_params = HashSet::from([WHITELISTED_POINTING_PARAM_NAME.to_string()]); + let stored_map = + combine_config_map_and_pointers(config_map, &pointers, &non_pointer_params).unwrap(); + + // Assert the pointing parameters are correctly set. + assert_eq!( + stored_map[PUBLIC_POINTING_PARAM_NAME], + json!(SerializedParam { + description: POINTING_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::PointerTarget(REQUIRED_PARAM_NAME.to_owned()), + privacy: ParamPrivacy::Public, + }) + ); + assert_eq!( + stored_map[PRIVATE_POINTING_PARAM_NAME], + json!(SerializedParam { + description: POINTING_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::PointerTarget(REQUIRED_PARAM_NAME.to_owned()), + privacy: ParamPrivacy::Private, + }) + ); + + // Assert the whitelisted parameter is correctly set. + assert_eq!( + stored_map[WHITELISTED_POINTING_PARAM_NAME], + json!(SerializedParam { + description: POINTING_PARAM_DESCRIPTION.to_owned(), + content: SerializedContent::DefaultValue(json!(VALUE)), + privacy: ParamPrivacy::Private, + }) + ); + + // Assert the pointed parameter is correctly set as a required parameter. + assert_eq!( + stored_map[REQUIRED_PARAM_NAME], + json!(SerializedParam { + description: required_param_description(REQUIRED_PARAM_DESCRIPTION).to_owned(), + content: SerializedContent::ParamType(SerializationType::PositiveInteger), + privacy: ParamPrivacy::TemporaryValue, + }) + ); +} + +#[test] +#[should_panic( + expected = "The target param should_be_pointing.c should point to c, or to be whitelisted." +)] +fn test_missing_pointer_flow() { + const TARGET_PARAM_NAME: &str = "c"; + const TARGET_PARAM_DESCRIPTION: &str = "This is common c."; + const PARAM_DESCRIPTION: &str = "This is c."; + const NON_POINTING_PARAM_NAME: &str = "should_be_pointing.c"; + + // Define a non-pointing parameter and a target pointer such that the parameter name matches the + // target. + let config_map = BTreeMap::from([ser_param( + NON_POINTING_PARAM_NAME, + &json!(7), + PARAM_DESCRIPTION, + ParamPrivacyInput::Private, + )]); + let pointers = vec![( + ser_pointer_target_param(TARGET_PARAM_NAME, &json!(10), TARGET_PARAM_DESCRIPTION), + HashSet::new(), + )]; + // Do not whitelist the non-pointing parameter. + let non_pointer_params = HashSet::new(); + + // Attempt to combine the config map and pointers. This should panic. + combine_config_map_and_pointers(config_map, &pointers, &non_pointer_params).unwrap(); +} + +#[test] +fn test_replace_pointers() { + let (mut config_map, _) = split_values_and_types(BTreeMap::from([ser_param( + "a", + &json!(5), + "This is a.", + ParamPrivacyInput::Public, + )])); + let pointers_map = + BTreeMap::from([("b".to_owned(), "a".to_owned()), ("c".to_owned(), "a".to_owned())]); + update_config_map_by_pointers(&mut config_map, &pointers_map).unwrap(); + assert_eq!(config_map["a"], config_map["b"]); + assert_eq!(config_map["a"], config_map["c"]); + + let err = update_config_map_by_pointers(&mut BTreeMap::default(), &pointers_map).unwrap_err(); + assert_matches!(err, ConfigError::PointerTargetNotFound { .. }); +} + +#[test] +fn test_struct_pointers() { + const TARGET_PREFIX: &str = "base"; + let target_value = + RequiredConfig { param_path: "Not a default param_path.".to_owned(), num: 10 }; + let config_map = StructPointersConfig::default().dump(); + + let pointers = generate_struct_pointer( + TARGET_PREFIX.to_owned(), + &target_value, + set_pointing_param_paths(&["a", "b"]), + ); + let stored_map = + combine_config_map_and_pointers(config_map, &pointers, &HashSet::default()).unwrap(); + + // Assert the pointing parameters are correctly set. + assert_eq!( + stored_map["a.param_path"], + json!(SerializedParam { + description: required_param_description(RequiredConfig::param_path_description()) + .to_owned(), + content: SerializedContent::PointerTarget( + format!("{TARGET_PREFIX}.param_path").to_owned() + ), + privacy: ParamPrivacy::Public, + }) + ); + assert_eq!( + stored_map["a.num"], + json!(SerializedParam { + description: RequiredConfig::num_description().to_owned(), + content: SerializedContent::PointerTarget(format!("{TARGET_PREFIX}.num").to_owned()), + privacy: ParamPrivacy::Public, + }) + ); + assert_eq!( + stored_map["b.param_path"], + json!(SerializedParam { + description: required_param_description(RequiredConfig::param_path_description()) + .to_owned(), + content: SerializedContent::PointerTarget( + format!("{TARGET_PREFIX}.param_path").to_owned() + ), + privacy: ParamPrivacy::Public, + }) + ); + assert_eq!( + stored_map["b.num"], + json!(SerializedParam { + description: RequiredConfig::num_description().to_owned(), + content: SerializedContent::PointerTarget(format!("{TARGET_PREFIX}.num").to_owned()), + privacy: ParamPrivacy::Public, + }) + ); + + // Assert the pointed parameter is correctly set. + assert_eq!( + stored_map[format!("{TARGET_PREFIX}.param_path").to_owned()], + json!(SerializedParam { + description: required_param_description(RequiredConfig::param_path_description()) + .to_owned(), + content: SerializedContent::ParamType(SerializationType::String), + privacy: ParamPrivacy::TemporaryValue, + }) + ); + assert_eq!( + stored_map[format!("{TARGET_PREFIX}.num").to_owned()], + json!(SerializedParam { + description: RequiredConfig::num_description().to_owned(), + content: SerializedContent::DefaultValue(json!(10)), + privacy: ParamPrivacy::TemporaryValue, + }) + ); +} + +#[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq)] +struct StructPointersConfig { + pub a: RequiredConfig, + pub b: RequiredConfig, +} +impl SerializeConfig for StructPointersConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::new(); + dump.append(&mut prepend_sub_config_name(self.a.dump(), "a")); + dump.append(&mut prepend_sub_config_name(self.b.dump(), "b")); + dump + } +} + +#[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq)] +struct CustomConfig { + param_path: String, + #[serde(default)] + seed: usize, +} + +impl SerializeConfig for CustomConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ + ser_param( + "param_path", + &self.param_path, + "This is param_path.", + ParamPrivacyInput::Public, + ), + ser_generated_param( + "seed", + SerializationType::PositiveInteger, + "A dummy seed with generated default = 0.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +// Loads CustomConfig from args. +fn load_custom_config(args: Vec<&str>) -> CustomConfig { + let dir = TempDir::new().unwrap(); + let file_path = dir.path().join("config.json"); + CustomConfig { param_path: "default value".to_owned(), seed: 5 } + .dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()) + .unwrap(); + + load_and_process_config::( + File::open(file_path).unwrap(), + Command::new("Program"), + args.into_iter().map(|s| s.to_owned()).collect(), + false, + ) + .unwrap() +} + +#[test] +fn test_load_default_config() { + let args = vec!["Testing"]; + let param_path = load_custom_config(args).param_path; + assert_eq!(param_path, "default value"); +} + +#[test] +fn test_load_custom_config_file() { + let args = vec!["Testing", "-f", CUSTOM_CONFIG_PATH.to_str().unwrap()]; + let param_path = load_custom_config(args).param_path; + assert_eq!(param_path, "custom value"); +} + +#[test] +fn test_load_custom_config_file_and_args() { + let args = vec![ + "Testing", + CONFIG_FILE_ARG, + CUSTOM_CONFIG_PATH.to_str().unwrap(), + "--param_path", + "command value", + ]; + let param_path = load_custom_config(args).param_path; + assert_eq!(param_path, "command value"); +} + +#[test] +fn test_load_many_custom_config_files() { + let custom_config_path = CUSTOM_CONFIG_PATH.to_str().unwrap(); + let cli_config_param = format!("{custom_config_path},{custom_config_path}"); + let args = vec!["Testing", "-f", cli_config_param.as_str()]; + let param_path = load_custom_config(args).param_path; + assert_eq!(param_path, "custom value"); +} + +// Make sure that if we have a field `foo_bar` and an optional field called `foo` with a value of +// None, we don't remove the foo_bar field from the config. +// This test was added following bug #37984 (see bug for more details). +#[test] +fn load_config_allows_optional_fields_can_be_prefixes_of_other_fields() { + #[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq)] + struct ConfigWithOptionalAndPrefixField { + foo: Option, + foo_non_optional: String, + } + impl SerializeConfig for ConfigWithOptionalAndPrefixField { + fn dump(&self) -> BTreeMap { + let mut res = BTreeMap::from([ser_param( + "foo_non_optional", + &self.foo_non_optional, + "This is foo_non_optional.", + ParamPrivacyInput::Public, + )]); + res.extend(ser_optional_param( + &self.foo, + "foo".to_string(), + "foo", + "This is foo.", + ParamPrivacyInput::Public, + )); + res + } + } + + let config_file = NamedTempFile::new().expect("Failed to create test config file"); + let file_path = config_file.path(); + ConfigWithOptionalAndPrefixField { + foo: None, + foo_non_optional: "bar non optional".to_string(), + } + .dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()) + .unwrap(); + + load_and_process_config::( + File::open(file_path).unwrap(), + Command::new("Program"), + vec![], + false, + ) + .expect("Unexpected error from loading test config."); +} + +#[test] +fn test_generated_type() { + let args = vec!["Testing"]; + assert_eq!(load_custom_config(args).seed, 0); + + let args = vec!["Testing", "--seed", "7"]; + assert_eq!(load_custom_config(args).seed, 7); +} + +#[test] +fn serialization_precision() { + let input = + "{\"value\":244116128358498188146337218061232635775543270890529169229936851982759783745}"; + let serialized = serde_json::from_str::(input).unwrap(); + let deserialized = serde_json::to_string(&serialized).unwrap(); + assert_eq!(input, deserialized); +} + +#[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq)] +struct RequiredConfig { + param_path: String, + num: usize, +} + +impl RequiredConfig { + pub const fn param_path_description() -> &'static str { + "This is param_path." + } + pub const fn num_description() -> &'static str { + "This is num." + } +} + +impl SerializeConfig for RequiredConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ + ser_required_param( + "param_path", + SerializationType::String, + Self::param_path_description(), + ParamPrivacyInput::Public, + ), + ser_param("num", &self.num, Self::num_description(), ParamPrivacyInput::Public), + ]) + } +} + +// Loads param_path of RequiredConfig from args. +fn load_required_param_path(args: Vec<&str>) -> String { + let dir = TempDir::new().unwrap(); + let file_path = dir.path().join("config.json"); + RequiredConfig { param_path: "default value".to_owned(), num: 3 } + .dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()) + .unwrap(); + + let loaded_config = load_and_process_config::( + File::open(file_path).unwrap(), + Command::new("Program"), + args.into_iter().map(|s| s.to_owned()).collect(), + false, + ) + .unwrap(); + loaded_config.param_path +} + +#[test] +fn test_negative_required_param() { + let dumped_config = RequiredConfig { param_path: "0".to_owned(), num: 3 }.dump(); + let (config_map, _) = split_values_and_types(dumped_config); + let err = load::(&config_map).unwrap_err(); + assert_matches!(err, ConfigError::MissingParam { .. }); +} + +#[test] +fn test_required_param_from_command() { + let args = vec!["Testing", "--param_path", "1234"]; + let param_path = load_required_param_path(args); + assert_eq!(param_path, "1234"); +} + +#[test] +fn test_required_param_from_file() { + let args = vec!["Testing", CONFIG_FILE_ARG, CUSTOM_CONFIG_PATH.to_str().unwrap()]; + let param_path = load_required_param_path(args); + assert_eq!(param_path, "custom value"); +} + +#[test] +fn deeply_nested_optionals() { + #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Default)] + struct Level0 { + level0_value: u8, + level1: Option, + } + + impl SerializeConfig for Level0 { + fn dump(&self) -> BTreeMap { + let mut res = BTreeMap::from([ser_param( + "level0_value", + &self.level0_value, + "This is level0_value.", + ParamPrivacyInput::Public, + )]); + res.extend(ser_optional_sub_config(&self.level1, "level1")); + res + } + } + + #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Default)] + struct Level1 { + pub level1_value: u8, + pub level2: Option, + } + + impl SerializeConfig for Level1 { + fn dump(&self) -> BTreeMap { + let mut res = BTreeMap::from([ser_param( + "level1_value", + &self.level1_value, + "This is level1_value.", + ParamPrivacyInput::Public, + )]); + res.extend(ser_optional_sub_config(&self.level2, "level2")); + res + } + } + + #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Default)] + struct Level2 { + pub level2_value: Option, + } + + impl SerializeConfig for Level2 { + fn dump(&self) -> BTreeMap { + ser_optional_param( + &self.level2_value, + 1, + "level2_value", + "This is level2_value.", + ParamPrivacyInput::Public, + ) + } + } + + let dir = TempDir::new().unwrap(); + let file_path = dir.path().join("config2.json"); + Level0 { level0_value: 1, level1: None } + .dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()) + .unwrap(); + + let l0 = load_and_process_config::( + File::open(file_path.clone()).unwrap(), + Command::new("Testing"), + Vec::new(), + false, + ) + .unwrap(); + assert_eq!(l0, Level0 { level0_value: 1, level1: None }); + + let l1 = load_and_process_config::( + File::open(file_path.clone()).unwrap(), + Command::new("Testing"), + vec!["Testing".to_owned(), "--level1.#is_none".to_owned(), "false".to_owned()], + false, + ) + .unwrap(); + assert_eq!( + l1, + Level0 { level0_value: 1, level1: Some(Level1 { level1_value: 0, level2: None }) } + ); + + let l2 = load_and_process_config::( + File::open(file_path.clone()).unwrap(), + Command::new("Testing"), + vec![ + "Testing".to_owned(), + "--level1.#is_none".to_owned(), + "false".to_owned(), + "--level1.level2.#is_none".to_owned(), + "false".to_owned(), + ], + false, + ) + .unwrap(); + assert_eq!( + l2, + Level0 { + level0_value: 1, + level1: Some(Level1 { level1_value: 0, level2: Some(Level2 { level2_value: None }) }), + } + ); + + let l2_value = load_and_process_config::( + File::open(file_path).unwrap(), + Command::new("Testing"), + vec![ + "Testing".to_owned(), + "--level1.#is_none".to_owned(), + "false".to_owned(), + "--level1.level2.#is_none".to_owned(), + "false".to_owned(), + "--level1.level2.level2_value.#is_none".to_owned(), + "false".to_owned(), + ], + false, + ) + .unwrap(); + assert_eq!( + l2_value, + Level0 { + level0_value: 1, + level1: Some(Level1 { + level1_value: 0, + level2: Some(Level2 { level2_value: Some(1) }), + }), + } + ); +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Default)] +struct TestConfigWithNestedJson { + #[serde(deserialize_with = "deserialize_optional_list_with_url_and_headers")] + list_of_maps: Option>, +} + +impl SerializeConfig for TestConfigWithNestedJson { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ser_param( + "list_of_maps", + &serialize_optional_list_with_url_and_headers(&self.list_of_maps), + "A list of nested JSON values.", + ParamPrivacyInput::Public, + )]) + } +} + +#[test] +fn optional_list_nested_btreemaps() { + let config = TestConfigWithNestedJson { + list_of_maps: Some(vec![ + UrlAndHeaders { + url: Url::parse("http://a.com/").unwrap(), + headers: BTreeMap::from([ + ("key1".to_owned(), "value 1".to_owned()), + ("key2".to_owned(), "value 2".to_owned()), + ]), + }, + UrlAndHeaders { + url: Url::parse("http://b.com/").unwrap(), + headers: BTreeMap::from([ + ("key3".to_owned(), "value 3".to_owned()), + ("key4".to_owned(), "value 4".to_owned()), + ]), + }, + UrlAndHeaders { + url: Url::parse("http://c.com/").unwrap(), + headers: BTreeMap::from([]), + }, + UrlAndHeaders { + url: Url::parse("http://d.com/").unwrap(), + headers: BTreeMap::from([("key5".to_owned(), "value 5".to_owned())]), + }, + ]), + }; + let dumped = config.dump(); + let (config_map, _) = split_values_and_types(dumped); + let loaded_config = load::(&config_map).unwrap(); + // println!("{:#?}", loaded_config); + assert_eq!(loaded_config.list_of_maps, config.list_of_maps); + let serialized = serde_json::to_string(&config_map).unwrap(); + assert_eq!( + serialized, + r#"{"list_of_maps":"http://a.com/,key1^value 1,key2^value 2|http://b.com/,key3^value 3,key4^value 4|http://c.com/|http://d.com/,key5^value 5"}"# /* r#"{"list_of_maps":[{"url":"http://a.com/","headers":{"inner1":"1","inner2":"2"}},{"url":"http://b.com/","headers":{"inner3":"3","inner4":"4"}},{"url":"http://c.com/","headers":{}},{"url":"http://d.com/","headers":{"inner5":"5"}}]}"# */ + ); +} diff --git a/crates/apollo_config/src/converters.rs b/crates/apollo_config/src/converters.rs new file mode 100644 index 00000000000..80aa7e26993 --- /dev/null +++ b/crates/apollo_config/src/converters.rs @@ -0,0 +1,265 @@ +//! Utils for serialization and deserialization of nested config fields into simple types. +//! These conversions let the command line updater (which supports only numbers strings and +//! booleans) handle these fields. +//! +//! # example +//! +//! ``` +//! use std::collections::BTreeMap; +//! use std::time::Duration; +//! +//! use apollo_config::converters::deserialize_milliseconds_to_duration; +//! use apollo_config::loading::load; +//! use serde::Deserialize; +//! use serde_json::json; +//! +//! #[derive(Clone, Deserialize, Debug, PartialEq)] +//! struct DurationConfig { +//! #[serde(deserialize_with = "deserialize_milliseconds_to_duration")] +//! dur: Duration, +//! } +//! +//! let dumped_config = BTreeMap::from([("dur".to_owned(), json!(1000))]); +//! let loaded_config = load::(&dumped_config).unwrap(); +//! assert_eq!(loaded_config.dur.as_secs(), 1); +//! ``` + +use std::collections::{BTreeMap, HashMap}; +use std::str::FromStr; +use std::time::Duration; + +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serialize}; +use url::Url; + +/// Deserializes milliseconds to duration object. +pub fn deserialize_milliseconds_to_duration<'de, D>(de: D) -> Result +where + D: Deserializer<'de>, +{ + let millis: u64 = Deserialize::deserialize(de)?; + Ok(Duration::from_millis(millis)) +} + +/// Deserializes seconds to duration object. +pub fn deserialize_seconds_to_duration<'de, D>(de: D) -> Result +where + D: Deserializer<'de>, +{ + let secs: u64 = Deserialize::deserialize(de)?; + Ok(Duration::from_secs(secs)) +} + +/// Deserializes float seconds to duration object. +pub fn deserialize_float_seconds_to_duration<'de, D>(de: D) -> Result +where + D: Deserializer<'de>, +{ + let secs: f64 = Deserialize::deserialize(de)?; + Ok(Duration::from_secs_f64(secs)) +} + +/// Serializes a map to "k1:v1 k2:v2" string structure. +pub fn serialize_optional_map(optional_map: &Option>) -> String { + match optional_map { + None => "".to_owned(), + Some(map) => map.iter().map(|(k, v)| format!("{k}:{v}")).collect::>().join(" "), + } +} + +/// Deserializes a map from "k1:v1 k2:v2" string structure. +pub fn deserialize_optional_map<'de, D>(de: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw_str: String = Deserialize::deserialize(de)?; + if raw_str.is_empty() { + return Ok(None); + } + + let mut map = HashMap::new(); + for raw_pair in raw_str.split(' ') { + let split: Vec<&str> = raw_pair.split(':').collect(); + if split.len() != 2 { + return Err(D::Error::custom(format!( + "pair \"{raw_pair}\" is not valid. The Expected format is name:value" + ))); + } + map.insert(split[0].to_string(), split[1].to_string()); + } + Ok(Some(map)) +} + +/// A struct containing a URL and its associated headers. +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +pub struct UrlAndHeaders { + /// The base URL. + pub url: Url, + /// A map of header keyword-value pairs. + pub headers: BTreeMap, +} + +impl UrlAndHeaders { + /// Reserved characters that are not allowed in keys or values. + const RESERVED_CHARS: [char; 2] = ['^', ',']; + + /// Serialize into: url,key1^val1,key2^val2,... + pub fn to_custom_string(&self) -> Result { + for (k, v) in &self.headers { + Self::validate_component(k, "key")?; + Self::validate_component(v, "value")?; + } + + let mut output = self.url.as_str().to_string(); + for (key, value) in &self.headers { + output.push(','); + output.push_str(key); + output.push('^'); + output.push_str(value); + } + Ok(output) + } + + /// Deserialize from: url,key1^val1,key2^val2,... + pub fn from_custom_string(s: &str) -> Result { + // Split the string into URL and headers on the first comma. + let mut parts = s.splitn(2, ','); + let url_str = parts.next().ok_or("Missing URL")?; + let rest = parts.next().unwrap_or(""); + + let url = Url::parse(url_str).map_err(|e| format!("Invalid URL: {}", e))?; + + let mut headers = BTreeMap::new(); + if !rest.is_empty() { + for pair in rest.split(',') { + let mut kv = pair.splitn(2, '^'); + let k = kv.next().ok_or("Missing header key")?; + let v = kv.next().ok_or("Missing header value")?; + + Self::validate_component(k, "key")?; + Self::validate_component(v, "value")?; + + headers.insert(k.to_string(), v.to_string()); + } + } + + Ok(UrlAndHeaders { url, headers }) + } + + fn validate_component(value: &str, label: &str) -> Result<(), String> { + if let Some(c) = value.chars().find(|c| Self::RESERVED_CHARS.contains(c)) { + return Err(format!("Invalid character '{}' in header {}: '{}'", c, label, value)); + } + Ok(()) + } +} + +/// Serializes a vector containing the UrlAndHeaders struct into a space-separated string. +pub fn serialize_optional_list_with_url_and_headers(list: &Option>) -> String { + match list { + None => "".to_owned(), + Some(list) => list + .iter() + .map(|item| { + UrlAndHeaders::to_custom_string(item).expect("Failed to serialize UrlAndHeader") + }) + .collect::>() + .join("|"), + } +} + +/// Deserializes a space-separated string into a vector of UrlAndHeaders structs. +/// Returns an error if any of the substrings cannot be parsed into a valid struct. +pub fn deserialize_optional_list_with_url_and_headers<'de, D>( + de: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = Deserialize::deserialize(de)?; + if raw.trim().is_empty() { + return Ok(None); + } + let items = raw.split('|'); + let number_of_items = items.clone().count(); + let mut output = Vec::with_capacity(number_of_items); + for item in items { + let value: UrlAndHeaders = UrlAndHeaders::from_custom_string(item).map_err(|e| { + D::Error::custom(format!("Invalid UrlAndHeaders formatting '{}': {}", item, e)) + })?; + output.push(value); + } + Ok(Some(output)) +} + +/// Serializes a vector to string structure. The vector is expected to be a hex string. +pub fn serialize_optional_vec_u8(optional_vector: &Option>) -> String { + match optional_vector { + None => "".to_owned(), + Some(vector) => { + format!( + "0x{}", + vector.iter().map(|num| format!("{:02x}", num)).collect::>().join("") + ) + } + } +} + +/// Deserializes a vector from string structure. The vector is expected to be a list of u8 values +/// separated by spaces. +pub fn deserialize_optional_vec_u8<'de, D>(de: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw_str: String = Deserialize::deserialize(de)?; + if raw_str.is_empty() { + return Ok(None); + } + + if !raw_str.starts_with("0x") { + return Err(D::Error::custom( + "Couldn't deserialize vector. Expected hex string starting with \"0x\"", + )); + } + + let hex_str = &raw_str[2..]; // Strip the "0x" prefix + + let mut vector = Vec::new(); + for i in (0..hex_str.len()).step_by(2) { + let byte_str = &hex_str[i..i + 2]; + let byte = u8::from_str_radix(byte_str, 16).map_err(|e| { + D::Error::custom(format!( + "Couldn't deserialize vector. Failed to parse byte: {} {}", + byte_str, e + )) + })?; + vector.push(byte); + } + Ok(Some(vector)) +} + +/// Serializes a `&[Url]` into a single space-separated string. +pub fn serialize_slice>(vector: &[T]) -> String { + vector.iter().map(|item| item.as_ref()).collect::>().join(" ") +} + +/// Deserializes a space-separated string into a `Vec`. +/// Returns an error if any of the substrings cannot be parsed into `T`. +pub fn deserialize_vec<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: std::fmt::Display, +{ + let raw: String = ::deserialize(de)?; + + if raw.trim().is_empty() { + return Ok(Vec::new()); + } + + raw.split_whitespace() + .map(|s| { + T::from_str(s).map_err(|e| D::Error::custom(format!("Invalid value '{}': {}", s, e))) + }) + .collect() +} diff --git a/crates/apollo_config/src/dumping.rs b/crates/apollo_config/src/dumping.rs new file mode 100644 index 00000000000..b4dc998a7a1 --- /dev/null +++ b/crates/apollo_config/src/dumping.rs @@ -0,0 +1,431 @@ +//! Utils for serializing config objects into flatten map and json file. +//! The elements structure is: +//! +//! ```json +//! "conf1.conf2.conf3.param_name": { +//! "description": "Param description.", +//! "value": json_value +//! } +//! ``` +//! In addition, supports pointers in the map, with the structure: +//! +//! ```json +//! "conf1.conf2.conf3.param_name": { +//! "description": "Param description.", +//! "pointer_target": "target_param_path" +//! } +//! ``` +//! +//! Supports required params. A required param has no default value, but the type of value that the +//! user must set: +//! ```json +//! "conf1.conf2.conf3.param_name: { +//! "description": "Param description.", +//! "required_type": Number +//! } +//! ``` +//! +//! Supports flags for optional params and sub-configs. An optional param / sub-config has an +//! "#is_none" indicator that determines whether to take its value or to deserialize it to None: +//! ```json +//! "conf1.conf2.#is_none": { +//! "description": "Flag for an optional field.", +//! "value": true +//! } +//! ``` + +use std::collections::{BTreeMap, HashSet}; + +use apollo_infra_utils::dumping::serialize_to_file; +use itertools::chain; +use serde::Serialize; +use serde_json::{json, Value}; + +use crate::{ + ConfigError, + ParamPath, + ParamPrivacy, + ParamPrivacyInput, + SerializationType, + SerializedContent, + SerializedParam, + FIELD_SEPARATOR, + IS_NONE_MARK, +}; + +/// Type alias for a pointer parameter and its serialized representation. +type PointerTarget = (ParamPath, SerializedParam); + +/// Type alias for a set of pointing parameters. +pub type Pointers = HashSet; + +/// Detailing pointers in the config map. +pub type ConfigPointers = Vec<(PointerTarget, Pointers)>; + +/// Given a set of paths that are configuration of the same struct type, makes all the paths point +/// to the same target. +pub fn generate_struct_pointer( + target_prefix: ParamPath, + default_instance: &T, + pointer_prefixes: HashSet, +) -> ConfigPointers { + let mut res = ConfigPointers::new(); + for (param_path, serialized_param) in default_instance.dump() { + let pointer_target = serialized_param_to_pointer_target( + target_prefix.clone(), + ¶m_path, + &serialized_param, + ); + let pointers = pointer_prefixes + .iter() + .map(|pointer| chain_param_paths(&[pointer, ¶m_path])) + .collect(); + + res.push((pointer_target, pointers)); + } + res +} + +// Converts a serialized param to a pointer target. +fn serialized_param_to_pointer_target( + target_prefix: ParamPath, + param_path: &ParamPath, + serialized_param: &SerializedParam, +) -> PointerTarget { + let full_param_path = chain_param_paths(&[&target_prefix, param_path]); + if serialized_param.is_required() { + let description = serialized_param + .description + .strip_prefix(REQUIRED_PARAM_DESCRIPTION_PREFIX) + .unwrap_or(&serialized_param.description) + .trim_start(); + ser_pointer_target_required_param( + &full_param_path, + serialized_param.content.get_serialization_type().unwrap(), + description, + ) + } else { + let default_value = match &serialized_param.content { + SerializedContent::DefaultValue(value) => value, + SerializedContent::PointerTarget(_) => panic!("Pointers to pointer is not supported."), + // We already checked that the param is not required, so it must be a generated param. + SerializedContent::ParamType(_) => { + panic!("Generated pointer targets are not supported.") + } + }; + ser_pointer_target_param(&full_param_path, default_value, &serialized_param.description) + } +} + +fn chain_param_paths(param_paths: &[&str]) -> ParamPath { + param_paths.join(FIELD_SEPARATOR) +} + +/// Serialization for configs. +pub trait SerializeConfig { + /// Conversion of a configuration to a mapping of flattened parameters to their descriptions and + /// values. + /// Note, in the case of a None sub configs, its elements will not included in the flatten map. + fn dump(&self) -> BTreeMap; + + /// Serialization of a configuration into a JSON file. + /// Takes a vector of {target pointer params, SerializedParam, and vector of pointing params}, + /// adds the target pointer params with the description and a value, and replaces the value of + /// the pointing params to contain only the name of the target they point to. + /// Fails if a param is not pointing to a same-named pointer target nor whitelisted. + /// + /// # Example + /// + /// ``` + /// # use std::collections::{BTreeMap, HashSet}; + /// + /// # use apollo_config::dumping::{ser_param, SerializeConfig}; + /// # use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; + /// # use serde::{Deserialize, Serialize}; + /// # use tempfile::TempDir; + /// + /// #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] + /// struct ConfigExample { + /// key: usize, + /// } + /// + /// impl SerializeConfig for ConfigExample { + /// fn dump(&self) -> BTreeMap { + /// BTreeMap::from([ser_param( + /// "key", + /// &self.key, + /// "This is key description.", + /// ParamPrivacyInput::Public, + /// )]) + /// } + /// } + /// + /// let dir = TempDir::new().unwrap(); + /// let file_path = dir.path().join("config.json"); + /// ConfigExample { key: 42 }.dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()); + /// ``` + /// Note, in the case of a None sub configs, its elements will not be included in the file. + fn dump_to_file( + &self, + config_pointers: &ConfigPointers, + non_pointer_params: &Pointers, + file_path: &str, + ) -> Result<(), ConfigError> { + let combined_map = + combine_config_map_and_pointers(self.dump(), config_pointers, non_pointer_params)?; + serialize_to_file(combined_map, file_path); + Ok(()) + } +} + +/// Prepends `sub_config_name` to the ParamPath for each entry in `sub_config_dump`. +/// In order to load from a dump properly, `sub_config_name` must match the field's name for the +/// struct this function is called from. +pub fn prepend_sub_config_name( + sub_config_dump: BTreeMap, + sub_config_name: &str, +) -> BTreeMap { + BTreeMap::from_iter( + sub_config_dump.into_iter().map(|(field_name, val)| { + (format!("{sub_config_name}{FIELD_SEPARATOR}{field_name}"), val) + }), + ) +} + +// Serializes a parameter of a config. +fn common_ser_param( + name: &str, + content: SerializedContent, + description: &str, + privacy: ParamPrivacy, +) -> (String, SerializedParam) { + (name.to_owned(), SerializedParam { description: description.to_owned(), content, privacy }) +} + +/// Serializes a single param of a config. +/// The returned pair is designed to be an input to a dumped config map. +pub fn ser_param( + name: &str, + value: &T, + description: &str, + privacy: ParamPrivacyInput, +) -> (String, SerializedParam) { + common_ser_param( + name, + SerializedContent::DefaultValue(json!(value)), + description, + privacy.into(), + ) +} + +/// Serializes expected type for a single required param of a config. +/// The returned pair is designed to be an input to a dumped config map. +pub fn ser_required_param( + name: &str, + serialization_type: SerializationType, + description: &str, + privacy: ParamPrivacyInput, +) -> (String, SerializedParam) { + common_ser_param( + name, + SerializedContent::ParamType(serialization_type), + required_param_description(description).as_str(), + privacy.into(), + ) +} + +/// Serializes expected type for a single param of a config that the system may generate. The +/// generation should be defined as serde default field attribute. +/// The returned pair is designed to be an input to a dumped config map. +pub fn ser_generated_param( + name: &str, + serialization_type: SerializationType, + description: &str, + privacy: ParamPrivacyInput, +) -> (String, SerializedParam) { + common_ser_param( + name, + SerializedContent::ParamType(serialization_type), + format!("{} If no value is provided, the system will generate one.", description).as_str(), + privacy.into(), + ) +} + +/// Serializes optional sub-config fields (or default fields for None sub-config) and adds an +/// "#is_none" flag. +pub fn ser_optional_sub_config( + optional_config: &Option, + name: &str, +) -> BTreeMap { + chain!( + BTreeMap::from_iter([ser_is_param_none(name, optional_config.is_none())]), + prepend_sub_config_name( + match optional_config { + None => T::default().dump(), + Some(config) => config.dump(), + }, + name, + ), + ) + .collect() +} + +/// Serializes optional param value (or default value for None param) and adds an "#is_none" flag. +pub fn ser_optional_param( + optional_param: &Option, + default_value: T, + name: &str, + description: &str, + privacy: ParamPrivacyInput, +) -> BTreeMap { + BTreeMap::from([ + ser_is_param_none(name, optional_param.is_none()), + ser_param( + name, + match optional_param { + Some(param) => param, + None => &default_value, + }, + description, + privacy, + ), + ]) +} + +/// Serializes is_none flag for a param. +pub fn ser_is_param_none(name: &str, is_none: bool) -> (String, SerializedParam) { + common_ser_param( + format!("{name}{FIELD_SEPARATOR}{IS_NONE_MARK}").as_str(), + SerializedContent::DefaultValue(json!(is_none)), + "Flag for an optional field.", + ParamPrivacy::TemporaryValue, + ) +} + +/// Serializes a pointer target param of a config. +/// +/// # Example +/// Create config_pointers vector to be used in `dump_to_file`: +/// ``` +/// # use apollo_config::dumping::ser_pointer_target_param; +/// +/// let pointer_target_param = ser_pointer_target_param( +/// "shared_param", +/// &("param".to_string()), +/// "A string parameter description.", +/// ); +/// let pointer_param_paths = +/// vec!["conf1.conf2.same_param".to_owned(), "conf3.same_param".to_owned()]; +/// let config_pointers = vec![(pointer_target_param, pointer_param_paths)]; +/// ``` +pub fn ser_pointer_target_param( + name: &str, + value: &T, + description: &str, +) -> (String, SerializedParam) { + common_ser_param( + name, + SerializedContent::DefaultValue(json!(value)), + description, + ParamPrivacy::TemporaryValue, + ) +} + +/// Serializes a pointer target for a required param of a config. +pub fn ser_pointer_target_required_param( + name: &str, + serialization_type: SerializationType, + description: &str, +) -> (String, SerializedParam) { + common_ser_param( + name, + SerializedContent::ParamType(serialization_type), + required_param_description(description).as_str(), + ParamPrivacy::TemporaryValue, + ) +} + +/// Takes a config map and a vector of target parameters with their serialized representations. +/// Adds each target param to the config map. +/// Updates entries in the map to point to these targets, replacing values of entries that match +/// the target parameter paths to contain only the name of the target they point to. +/// Fails if a param is not pointing to a same-named pointer target nor whitelisted. +pub fn combine_config_map_and_pointers( + mut config_map: BTreeMap, + pointers: &ConfigPointers, + non_pointer_params: &Pointers, +) -> Result { + // Update config with target params. + for ((target_param, serialized_pointer), pointing_params_vec) in pointers { + // Insert target param. + config_map.insert(target_param.clone(), serialized_pointer.clone()); + + // Update pointing params to point at the target param. + for pointing_param in pointing_params_vec { + let pointing_serialized_param = + config_map.get(pointing_param).ok_or(ConfigError::PointerSourceNotFound { + pointing_param: pointing_param.to_owned(), + })?; + config_map.insert( + pointing_param.to_owned(), + SerializedParam { + description: pointing_serialized_param.description.clone(), + content: SerializedContent::PointerTarget(target_param.to_owned()), + privacy: pointing_serialized_param.privacy.clone(), + }, + ); + } + } + + verify_pointing_params_by_name(&config_map, pointers, non_pointer_params); + + Ok(json!(config_map)) +} + +/// Creates a set of pointing params, ensuring no duplications. +pub fn set_pointing_param_paths(param_path_list: &[&str]) -> Pointers { + let mut param_paths = HashSet::new(); + for ¶m_path in param_path_list { + assert!( + param_paths.insert(param_path.to_string()), + "Duplicate parameter path found: {}", + param_path + ); + } + param_paths +} + +/// Prefix for required params description. +pub(crate) const REQUIRED_PARAM_DESCRIPTION_PREFIX: &str = "A required param!"; + +pub(crate) fn required_param_description(description: &str) -> String { + format!("{} {}", REQUIRED_PARAM_DESCRIPTION_PREFIX, description) +} + +/// Verifies that params whose name matches a pointer target either point at it, or are whitelisted. +fn verify_pointing_params_by_name( + config_map: &BTreeMap, + pointers: &ConfigPointers, + non_pointer_params: &Pointers, +) { + // Iterate over the config, check that all parameters whose name matches a pointer target either + // point at it or are in the whitelist. + config_map.iter().for_each(|(param_path, serialized_param)| { + for ((target_param, _), _) in pointers { + // Check if the param name matches a pointer target, and that it is not in the + // whitelist. + if param_path.ends_with(format!("{FIELD_SEPARATOR}{target_param}").as_str()) + && !non_pointer_params.contains(param_path) + { + // Check that the param points to the target param. + assert!( + serialized_param.content + == SerializedContent::PointerTarget(target_param.to_owned()), + "The target param {} should point to {}, or to be whitelisted.", + param_path, + target_param + ); + }; + } + }); +} diff --git a/crates/apollo_config/src/lib.rs b/crates/apollo_config/src/lib.rs new file mode 100644 index 00000000000..cfceff0e962 --- /dev/null +++ b/crates/apollo_config/src/lib.rs @@ -0,0 +1,207 @@ +// config compiler to support coverage_attribute feature when running coverage in nightly mode +// within this crate +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] +#![warn(missing_docs)] +//! Configuration utilities for a Starknet node. +//! +//! # Example +//! +//! ``` +//! use std::collections::{BTreeMap, HashSet}; +//! use std::fs::File; +//! use std::path::Path; +//! +//! use apollo_config::dumping::{ser_param, SerializeConfig}; +//! use apollo_config::loading::load_and_process_config; +//! use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +//! use clap::Command; +//! use serde::{Deserialize, Serialize}; +//! use tempfile::TempDir; +//! +//! #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +//! struct ConfigExample { +//! key: usize, +//! } +//! +//! impl SerializeConfig for ConfigExample { +//! fn dump(&self) -> BTreeMap { +//! BTreeMap::from([ser_param( +//! "key", +//! &self.key, +//! "This is key description.", +//! ParamPrivacyInput::Public, +//! )]) +//! } +//! } +//! +//! let dir = TempDir::new().unwrap(); +//! let file_path = dir.path().join("config.json"); +//! ConfigExample { key: 42 }.dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()); +//! let file = File::open(file_path).unwrap(); +//! let loaded_config = load_and_process_config::( +//! file, +//! Command::new("Program"), +//! vec!["Program".to_owned(), "--key".to_owned(), "770".to_owned()], +//! false, +//! ) +//! .unwrap(); +//! assert_eq!(loaded_config.key, 770); +//! ``` + +use clap::parser::MatchesError; +use const_format::formatcp; +use dumping::REQUIRED_PARAM_DESCRIPTION_PREFIX; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use validator::ValidationError; +use validators::ParsedValidationErrors; + +/// Arg name for providing a configuration file. +pub const CONFIG_FILE_ARG_NAME: &str = "config_file"; +/// The config file arg name prepended with a double dash. +pub const CONFIG_FILE_ARG: &str = formatcp!("--{}", CONFIG_FILE_ARG_NAME); + +pub(crate) const IS_NONE_MARK: &str = "#is_none"; +pub(crate) const FIELD_SEPARATOR: &str = "."; + +/// A nested path of a configuration parameter. +pub type ParamPath = String; +/// A description of a configuration parameter. +pub type Description = String; + +#[cfg(test)] +mod config_test; + +mod command; +pub mod converters; +pub mod dumping; +pub mod loading; +pub mod presentation; +pub mod validators; + +/// The privacy level of a config parameter, that received as input from the configs. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +pub enum ParamPrivacyInput { + /// The field is visible only by a secret. + Private, + /// The field is visible only to node's users. + Public, +} + +/// The privacy level of a config parameter. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +enum ParamPrivacy { + /// The field is visible only by a secret. + Private, + /// The field is visible only to node's users. + Public, + /// The field is not a part of the final config. + TemporaryValue, +} + +impl From for ParamPrivacy { + fn from(user_param_privacy: ParamPrivacyInput) -> Self { + match user_param_privacy { + ParamPrivacyInput::Private => ParamPrivacy::Private, + ParamPrivacyInput::Public => ParamPrivacy::Public, + } + } +} + +/// A serialized content of a configuration parameter. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum SerializedContent { + /// Serialized JSON default value. + #[serde(rename = "value")] + DefaultValue(Value), + /// The target from which to take the JSON value of a configuration parameter. + PointerTarget(ParamPath), + /// Type of a configuration parameter. + ParamType(SerializationType), +} + +impl SerializedContent { + fn get_serialization_type(&self) -> Option { + match self { + SerializedContent::DefaultValue(value) => match value { + // JSON "Number" is handled as PosInt(u64), NegInt(i64), or Float(f64). + Value::Number(num) => { + if num.is_f64() { + Some(SerializationType::Float) + } else if num.is_u64() { + Some(SerializationType::PositiveInteger) + } else { + Some(SerializationType::NegativeInteger) + } + } + Value::Bool(_) => Some(SerializationType::Boolean), + Value::String(_) => Some(SerializationType::String), + _ => None, + }, + SerializedContent::PointerTarget(_) => None, + SerializedContent::ParamType(ser_type) => Some(*ser_type), + } + } +} + +/// A description and serialized content of a configuration parameter. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +pub struct SerializedParam { + /// The description of the parameter. + pub description: Description, + /// The content of the parameter. + #[serde(flatten)] + pub content: SerializedContent, + pub(crate) privacy: ParamPrivacy, +} + +impl SerializedParam { + /// Whether the parameter is required. + // TODO(yair): Find a better way to identify required params - maybe add to the dump. + pub fn is_required(&self) -> bool { + self.description.starts_with(REQUIRED_PARAM_DESCRIPTION_PREFIX) + } + + /// Whether the parameter is private. + pub fn is_private(&self) -> bool { + self.privacy == ParamPrivacy::Private + } +} + +/// A serialized type of a configuration parameter. +#[derive(Clone, Copy, Serialize, Deserialize, Debug, PartialEq, strum_macros::Display)] +#[allow(missing_docs)] +pub enum SerializationType { + Boolean, + Float, + NegativeInteger, + PositiveInteger, + String, +} + +/// Errors at the configuration dumping and loading process. +#[allow(missing_docs)] +#[derive(thiserror::Error, Debug)] +pub enum ConfigError { + #[error(transparent)] + CommandInput(#[from] clap::error::Error), + #[error(transparent)] + MissingParam(#[from] serde_json::Error), + #[error(transparent)] + CommandMatches(#[from] MatchesError), + #[error(transparent)] + IOError(#[from] std::io::Error), + #[error("Received an unexpected parameter: {param_path}.")] + UnexpectedParam { param_path: String }, + #[error("{target_param} is not found.")] + PointerTargetNotFound { target_param: String }, + #[error("{pointing_param} is not found.")] + PointerSourceNotFound { pointing_param: String }, + #[error("Changing {param_path} from required type {required} to {given} is not allowed.")] + ChangeRequiredParamType { param_path: String, required: SerializationType, given: Value }, + #[error(transparent)] + ValidationError(#[from] ValidationError), + #[error(transparent)] + ConfigValidationError(#[from] ParsedValidationErrors), +} diff --git a/crates/apollo_config/src/loading.rs b/crates/apollo_config/src/loading.rs new file mode 100644 index 00000000000..152dcdf0221 --- /dev/null +++ b/crates/apollo_config/src/loading.rs @@ -0,0 +1,282 @@ +//! Loads a configuration object, and set values for the fields in the following order of priority: +//! * Command line arguments. +//! * Environment variables (capital letters). +//! * Custom config files, separated by ',' (comma), from last to first. + +use std::collections::{BTreeMap, HashSet}; +use std::fs::File; +use std::ops::IndexMut; +use std::path::PathBuf; + +use clap::parser::Values; +use clap::Command; +use command::{get_command_matches, update_config_map_by_command_args}; +use itertools::any; +use serde::Deserialize; +use serde_json::{json, Map, Value}; +use tracing::{error, info, instrument}; + +use crate::validators::validate_path_exists; +use crate::{ + command, + ConfigError, + ParamPath, + SerializationType, + SerializedContent, + SerializedParam, + CONFIG_FILE_ARG_NAME, + FIELD_SEPARATOR, + IS_NONE_MARK, +}; + +/// Deserializes config from flatten JSON. +/// For an explanation of `for<'a> Deserialize<'a>` see +/// ``. +#[instrument(skip(config_map))] +pub fn load Deserialize<'a>>( + config_map: &BTreeMap, +) -> Result { + let mut nested_map = json!({}); + for (param_path, value) in config_map { + let mut entry = &mut nested_map; + for config_name in param_path.split('.') { + entry = entry.index_mut(config_name); + } + *entry = value.clone(); + } + Ok(serde_json::from_value(nested_map)?) +} + +/// Deserializes a json config file, updates the values by the given arguments for the command, and +/// set values for the pointers. +pub fn load_and_process_config Deserialize<'a>>( + config_schema_file: File, + command: Command, + args: Vec, + ignore_default_values: bool, +) -> Result { + let deserialized_config_schema: Map = + serde_json::from_reader(&config_schema_file)?; + // Store the pointers separately from the default values. The pointers will receive a value + // only at the end of the process. + let (config_map, pointers_map) = split_pointers_map(deserialized_config_schema.clone()); + // Take param paths with corresponding descriptions, and get the matching arguments. + let mut arg_matches = get_command_matches(&config_map, command, args)?; + // Retaining values from the default config map for backward compatibility. + let (mut values_map, types_map) = split_values_and_types(config_map); + if ignore_default_values { + info!("Ignoring default values by overriding with an empty map."); + values_map = BTreeMap::new(); + } + // If the config_file arg is given, updates the values map according to this files. + if let Some(custom_config_paths) = arg_matches.remove_many::(CONFIG_FILE_ARG_NAME) { + update_config_map_by_custom_configs(&mut values_map, &types_map, custom_config_paths)?; + }; + // Updates the values map according to the args. + update_config_map_by_command_args(&mut values_map, &types_map, &arg_matches)?; + // Set values to the pointers. + update_config_map_by_pointers(&mut values_map, &pointers_map)?; + // Set values according to the is-none marks. + update_optional_values(&mut values_map); + // Build the Config object. + let load_result = load(&values_map); + // In case of an error, print the error and the missing keys. + if load_result.is_err() { + error!("Loading the config resulted with an error: {:?}", load_result.as_ref().err()); + // Obtain the loaded and schema keys. + let loaded_config_keys = values_map.keys().cloned().collect::>(); + let schema_keys = deserialized_config_schema.keys().cloned().collect::>(); + + // Obtain the loaded and schema keys that are not in the other. + let mut keys_only_in_loaded_config: HashSet<_> = + loaded_config_keys.difference(&schema_keys).collect(); + let mut keys_only_in_schema: HashSet<_> = + schema_keys.difference(&loaded_config_keys).collect(); + + // Address optional None value discrepancies: + // 1. Find None (null) values in the config entries. + let null_config_entries = values_map + .iter() + .filter(|(_, value)| value.is_null()) + .map(|(key, _)| key.clone()) + .collect::>(); + + // 2. Filter out None-value keys in the loaded config entries. + keys_only_in_loaded_config.retain(|item| !null_config_entries.contains(item.as_str())); + + // 3. Filter out None-value keys in the schema entries. + let optional_param_suffix = format!("{FIELD_SEPARATOR}{IS_NONE_MARK}"); + keys_only_in_schema.retain(|item| { + // Consider a schema key only if both: + // - it does NOT start with any prefix in `null_config_entries` (i.e., a None value) + // - it does NOT end with the optional param suffix (i.e., a None value indicator) + let has_prefix = null_config_entries.iter().any(|prefix| item.starts_with(prefix)); + let has_suffix = item.ends_with(optional_param_suffix.as_str()); + !has_prefix && !has_suffix + }); + + // Log the keys that are only in the loaded config. + if !keys_only_in_loaded_config.is_empty() { + error!( + "Detected loaded-schema config difference. + keys missing in schema: {:?}", + keys_only_in_loaded_config + ); + } + + // Log the keys that are only in the schema. + if !keys_only_in_schema.is_empty() { + error!( + "Detected loaded-schema config difference. + Keys missing in loaded config: {:?}", + keys_only_in_schema + ); + } + } + // Return the loaded config result. + load_result +} + +// Separates a json map into config map of the raw values and pointers map. +pub(crate) fn split_pointers_map( + json_map: Map, +) -> (BTreeMap, BTreeMap) { + let mut config_map: BTreeMap = BTreeMap::new(); + let mut pointers_map: BTreeMap = BTreeMap::new(); + for (param_path, stored_param) in json_map { + let Ok(ser_param) = serde_json::from_value::(stored_param.clone()) else { + unreachable!("Invalid type in the json config map") + }; + match ser_param.content { + SerializedContent::PointerTarget(pointer_target) => { + pointers_map.insert(param_path, pointer_target); + } + _ => { + config_map.insert(param_path, ser_param); + } + }; + } + (config_map, pointers_map) +} + +// Removes the description from the config map, and splits the config map into default values and +// types of the default and required values. +// The types map includes required params, that do not have a value yet. +pub(crate) fn split_values_and_types( + config_map: BTreeMap, +) -> (BTreeMap, BTreeMap) { + let mut values_map: BTreeMap = BTreeMap::new(); + let mut types_map: BTreeMap = BTreeMap::new(); + for (param_path, serialized_param) in config_map { + let Some(serialization_type) = serialized_param.content.get_serialization_type() else { + continue; + }; + types_map.insert(param_path.clone(), serialization_type); + + if let SerializedContent::DefaultValue(value) = serialized_param.content { + values_map.insert(param_path, value); + }; + } + (values_map, types_map) +} + +// Updates the config map by param path to value custom json files. +pub(crate) fn update_config_map_by_custom_configs( + config_map: &mut BTreeMap, + types_map: &BTreeMap, + custom_config_paths: Values, +) -> Result<(), ConfigError> { + for config_path in custom_config_paths { + info!("Loading custom config file: {:?}", config_path); + validate_path_exists(&config_path)?; + let file = std::fs::File::open(config_path)?; + let custom_config: Map = serde_json::from_reader(file)?; + for (param_path, json_value) in custom_config { + update_config_map(config_map, types_map, param_path.as_str(), json_value)?; + } + } + Ok(()) +} + +// Sets values in the config map to the params in the pointers map. +pub(crate) fn update_config_map_by_pointers( + config_map: &mut BTreeMap, + pointers_map: &BTreeMap, +) -> Result<(), ConfigError> { + for (param_path, target_param_path) in pointers_map { + let Some(target_value) = config_map.get(target_param_path) else { + return Err(ConfigError::PointerTargetNotFound { + target_param: target_param_path.to_owned(), + }); + }; + config_map.insert(param_path.to_owned(), target_value.clone()); + } + Ok(()) +} + +// Removes the none marks, and sets null for the params marked as None instead of the inner params. +pub(crate) fn update_optional_values(config_map: &mut BTreeMap) { + let optional_params: Vec<_> = config_map + .keys() + .filter_map(|param_path| param_path.strip_suffix(&format!(".{IS_NONE_MARK}"))) + .map(|param_path| param_path.to_owned()) + .collect(); + let mut none_params = vec![]; + for optional_param in optional_params { + let value = config_map + .remove(&format!("{optional_param}.{IS_NONE_MARK}")) + .expect("Not found optional param"); + if value == json!(true) { + none_params.push(optional_param); + } + } + // Remove param paths that start with any None param. + config_map.retain(|param_path, _| { + !any(&none_params, |none_param| { + param_path.starts_with(format!("{none_param}{FIELD_SEPARATOR}").as_str()) + || param_path == none_param + }) + }); + + // Set null for the None params. + for none_param in &none_params { + let mut is_nested_in_outer_none_config = false; + for other_none_param in &none_params { + if none_param.starts_with(other_none_param) && none_param != other_none_param { + is_nested_in_outer_none_config = true; + } + } + if is_nested_in_outer_none_config { + continue; + } + config_map.insert(none_param.clone(), Value::Null); + } +} + +pub(crate) fn update_config_map( + config_map: &mut BTreeMap, + types_map: &BTreeMap, + param_path: &str, + new_value: Value, +) -> Result<(), ConfigError> { + let Some(serialization_type) = types_map.get(param_path) else { + return Err(ConfigError::UnexpectedParam { param_path: param_path.to_string() }); + }; + let is_type_matched = match serialization_type { + SerializationType::Boolean => new_value.is_boolean(), + SerializationType::Float => new_value.is_number(), + SerializationType::NegativeInteger => new_value.is_number(), + SerializationType::PositiveInteger => new_value.is_number(), + SerializationType::String => new_value.is_string(), + }; + if !is_type_matched { + return Err(ConfigError::ChangeRequiredParamType { + param_path: param_path.to_string(), + required: serialization_type.to_owned(), + given: new_value, + }); + } + + config_map.insert(param_path.to_owned(), new_value); + Ok(()) +} diff --git a/crates/papyrus_config/src/presentation.rs b/crates/apollo_config/src/presentation.rs similarity index 97% rename from crates/papyrus_config/src/presentation.rs rename to crates/apollo_config/src/presentation.rs index ea8f3702470..19c8fb9eefd 100644 --- a/crates/papyrus_config/src/presentation.rs +++ b/crates/apollo_config/src/presentation.rs @@ -56,7 +56,7 @@ fn remove_path_from_json( // be "v1". let mut path_to_entry = param_path.split('.').collect_vec(); let Some(entry_to_remove) = path_to_entry.pop() else { - // TODO: Can we expect this to never happen? + // TODO(Yair): Can we expect this to never happen? return Ok(()); // Empty param path. }; let mut inner_json = json; diff --git a/crates/papyrus_config/src/validators.rs b/crates/apollo_config/src/validators.rs similarity index 98% rename from crates/papyrus_config/src/validators.rs rename to crates/apollo_config/src/validators.rs index ec2fc27ea12..93b2fcab6b3 100644 --- a/crates/papyrus_config/src/validators.rs +++ b/crates/apollo_config/src/validators.rs @@ -15,7 +15,7 @@ pub fn validate_ascii(name: &impl ToString) -> Result<(), ValidationError> { } /// Custom validation for file or directory path existence. -pub fn validate_path_exists(file_path: &Path) -> Result<(), ValidationError> { +pub(crate) fn validate_path_exists(file_path: &Path) -> Result<(), ValidationError> { if !file_path.exists() { let mut error = ValidationError::new("file or directory not found"); error.message = Some( diff --git a/crates/apollo_consensus/Cargo.toml b/crates/apollo_consensus/Cargo.toml new file mode 100644 index 00000000000..a9ae6baacf8 --- /dev/null +++ b/crates/apollo_consensus/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "apollo_consensus" +version.workspace = true +edition.workspace = true +repository.workspace = true +license-file.workspace = true +description = "Reach consensus for Starknet" + +[features] +testing = [] + +[dependencies] +apollo_config.workspace = true +apollo_metrics.workspace = true +apollo_network.workspace = true +apollo_network_types.workspace = true +apollo_protobuf.workspace = true +apollo_time = { workspace = true, features = ["tokio"] } +async-trait.workspace = true +futures.workspace = true +lazy_static.workspace = true +lru.workspace = true +prost.workspace = true +serde = { workspace = true, features = ["derive"] } +starknet-types-core.workspace = true +starknet_api.workspace = true +strum.workspace = true +strum_macros.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["sync"] } +tracing.workspace = true +validator.workspace = true + +[dev-dependencies] +apollo_network = { workspace = true, features = ["testing"] } +apollo_network_types = { workspace = true, features = ["testing"] } +apollo_storage = { workspace = true, features = ["testing"] } +apollo_test_utils.workspace = true +enum-as-inner.workspace = true +mockall.workspace = true +test-case.workspace = true + +[lints] +workspace = true diff --git a/crates/apollo_consensus/README.md b/crates/apollo_consensus/README.md new file mode 100644 index 00000000000..b85bdeb4086 --- /dev/null +++ b/crates/apollo_consensus/README.md @@ -0,0 +1,36 @@ +# papyrus-consensus + +This crate provides an implementation of consensus for a Starknet node. + +### Disclaimer +This crate is still under development and is not keeping backwards compatibility with previous +versions. Breaking changes are expected to happen in the near future. + +## How to run +1. You must turn consensus on and provide a validator ID by passing: `--consensus.#is_none false --consensus.validator_id 0x` +2. Start by running any nodes which are validators for `consensus.start_height` which is by default 0 to avoid them missing the proposal. + 1. You can change the default number of validators by passing: `--consensus.num_validators ` + 2. You can change the default topic by passing: `--consensus.topic "TOPIC"` + 3. You can test the consensus under simulated network conditions, by passing: `--consensus.test.#is_none false` + 1. Optional arguments: + `--consensus.test.cache_size ` + `--consensus.test.random_seed ` + `--consensus.test.drop_probability 0` (set to 0 for now) + `--consensus.test.invalid_probability ` (0 to 1) + +#### Bootstrap Node +This must be run first: +``` +cargo run --package papyrus_node --bin papyrus_node -- --base_layer.node_url --network.#is_none false --consensus.#is_none false --consensus.validator_id 0x1 --storage.db_config.path_prefix +``` +- This will log `local_peer_id` which is used by other nodes. (Alternatively pass `network.secret_key` to have a fixed peer id). + +#### Other Nodes +Run each of the other nodes separately, using different `consensus.validator_id` {`0x2`, `0x3`, `0x0`}: + +``` +cargo run --package papyrus_node --bin papyrus_node -- --base_layer.node_url --network.#is_none false --consensus.#is_none false --consensus.validator_id 0x --network.port --network.bootstrap_peer_multiaddr.#is_none false --rpc.server_address 127.0.0.1: --monitoring_gateway.server_address 127.0.0.1: --storage.db_config.path_prefix --network.bootstrap_peer_multiaddr /ip4/127.0.0.1/tcp/10000/p2p/ +``` +- Node 0 is the first proposer and should be run last. + +UNIQUE - a value unique among all nodes running locally. diff --git a/crates/apollo_consensus/src/config.rs b/crates/apollo_consensus/src/config.rs new file mode 100644 index 00000000000..3aad64eeea0 --- /dev/null +++ b/crates/apollo_consensus/src/config.rs @@ -0,0 +1,181 @@ +//! This module contains the configuration for consensus, including the `ConsensusConfig` struct +//! and its implementation of the `SerializeConfig` trait. The configuration includes parameters +//! such as the validator ID, the network topic of the consensus, and the starting block height. + +use std::collections::BTreeMap; +use std::time::Duration; + +use apollo_config::converters::{ + deserialize_float_seconds_to_duration, + deserialize_seconds_to_duration, +}; +use apollo_config::dumping::{prepend_sub_config_name, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_protobuf::consensus::DEFAULT_VALIDATOR_ID; +use serde::{Deserialize, Serialize}; +use validator::Validate; + +use crate::types::ValidatorId; + +/// Configuration for consensus. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Validate)] +pub struct ConsensusConfig { + /// The validator ID of the node. + pub validator_id: ValidatorId, + /// The delay (seconds) before starting consensus to give time for network peering. + #[serde(deserialize_with = "deserialize_seconds_to_duration")] + pub startup_delay: Duration, + /// Timeouts configuration for consensus. + pub timeouts: TimeoutsConfig, + /// The duration (seconds) between sync attempts. + #[serde(deserialize_with = "deserialize_float_seconds_to_duration")] + pub sync_retry_interval: Duration, + /// How many heights in the future should we cache. + pub future_height_limit: u32, + /// How many rounds in the future (for current height) should we cache. + pub future_round_limit: u32, + /// How many rounds should we cache for future heights. + pub future_height_round_limit: u32, +} + +impl SerializeConfig for ConsensusConfig { + fn dump(&self) -> BTreeMap { + let mut config = BTreeMap::from_iter([ + ser_param( + "validator_id", + &self.validator_id, + "The validator id of the node.", + ParamPrivacyInput::Public, + ), + ser_param( + "startup_delay", + &self.startup_delay.as_secs(), + "Delay (seconds) before starting consensus to give time for network peering.", + ParamPrivacyInput::Public, + ), + ser_param( + "sync_retry_interval", + &self.sync_retry_interval.as_secs_f64(), + "The duration (seconds) between sync attempts.", + ParamPrivacyInput::Public, + ), + ser_param( + "future_height_limit", + &self.future_height_limit, + "How many heights in the future should we cache.", + ParamPrivacyInput::Public, + ), + ser_param( + "future_round_limit", + &self.future_round_limit, + "How many rounds in the future (for current height) should we cache.", + ParamPrivacyInput::Public, + ), + ser_param( + "future_height_round_limit", + &self.future_height_round_limit, + "How many rounds should we cache for future heights.", + ParamPrivacyInput::Public, + ), + ]); + config.extend(prepend_sub_config_name(self.timeouts.dump(), "timeouts")); + config + } +} + +impl Default for ConsensusConfig { + fn default() -> Self { + Self { + validator_id: ValidatorId::from(DEFAULT_VALIDATOR_ID), + startup_delay: Duration::from_secs(5), + timeouts: TimeoutsConfig::default(), + sync_retry_interval: Duration::from_secs_f64(1.0), + future_height_limit: 10, + future_round_limit: 10, + future_height_round_limit: 1, + } + } +} + +/// Configuration for consensus timeouts. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct TimeoutsConfig { + /// The timeout for a proposal. + #[serde(deserialize_with = "deserialize_float_seconds_to_duration")] + pub proposal_timeout: Duration, + /// The timeout for a prevote. + #[serde(deserialize_with = "deserialize_float_seconds_to_duration")] + pub prevote_timeout: Duration, + /// The timeout for a precommit. + #[serde(deserialize_with = "deserialize_float_seconds_to_duration")] + pub precommit_timeout: Duration, +} + +impl SerializeConfig for TimeoutsConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param( + "proposal_timeout", + &self.proposal_timeout.as_secs_f64(), + "The timeout (seconds) for a proposal.", + ParamPrivacyInput::Public, + ), + ser_param( + "prevote_timeout", + &self.prevote_timeout.as_secs_f64(), + "The timeout (seconds) for a prevote.", + ParamPrivacyInput::Public, + ), + ser_param( + "precommit_timeout", + &self.precommit_timeout.as_secs_f64(), + "The timeout (seconds) for a precommit.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +impl Default for TimeoutsConfig { + fn default() -> Self { + Self { + proposal_timeout: Duration::from_secs_f64(3.0), + prevote_timeout: Duration::from_secs_f64(1.0), + precommit_timeout: Duration::from_secs_f64(1.0), + } + } +} + +/// Configuration for the `StreamHandler`. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct StreamHandlerConfig { + /// The capacity of the channel buffer for stream messages. + pub channel_buffer_capacity: usize, + /// The maximum number of streams that can be open at the same time. + pub max_streams: usize, +} + +impl Default for StreamHandlerConfig { + fn default() -> Self { + Self { channel_buffer_capacity: 1000, max_streams: 100 } + } +} + +impl SerializeConfig for StreamHandlerConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param( + "channel_buffer_capacity", + &self.channel_buffer_capacity, + "The capacity of the channel buffer for stream messages.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_streams", + &self.max_streams, + "The maximum number of streams that can be open at the same time.", + ParamPrivacyInput::Public, + ), + ]) + } +} diff --git a/crates/apollo_consensus/src/lib.rs b/crates/apollo_consensus/src/lib.rs new file mode 100644 index 00000000000..86f0f1af5c9 --- /dev/null +++ b/crates/apollo_consensus/src/lib.rs @@ -0,0 +1,46 @@ +#![warn(missing_docs)] +// TODO(Matan): Add links to the spec. +// TODO(Matan): fix #[allow(missing_docs)]. +//! A consensus implementation for a [Starknet](https://www.starknet.io/) node. The consensus +//! algorithm is based on [Tendermint](https://arxiv.org/pdf/1807.04938). +//! +//! Consensus communicates with other nodes via a gossip network; sending and receiving votes on one +//! topic and streaming proposals on a separate topic. [details](https://github.com/starknet-io/starknet-p2p-specs/tree/main/p2p/proto/consensus). +//! +//! In addition to the network inputs, consensus reaches out to the rest of the node via the +//! [`Context`](types::ConsensusContext) API. +//! +//! Consensus is generic over the content of the proposals, and merely requires an identifier to be +//! produced by the Context. +//! +//! Consensus operates in two modes: +//! 1. Observer - Receives consensus messages and updates the node when a decision is reached. +//! 2. Active - In addition to receiving messages, the node can also send messages to the network. +//! +//! Observer mode offers lower latency compared to sync, as Proposals and votes are processed in +//! real-time rather than after a decision has been made. +//! +//! Consensus is an active component, it doesn't follow the server/client model: +//! 1. The outbound messages are not sent as responses to the inbound messages. +//! 2. It generates and runs its own events (e.g. timeouts). + +pub mod config; +#[allow(missing_docs)] +pub mod types; +pub use manager::{run_consensus, RunConsensusArguments}; +#[allow(missing_docs)] +pub mod metrics; +#[allow(missing_docs)] +pub mod simulation_network_receiver; +pub mod stream_handler; + +mod manager; +#[allow(missing_docs)] +mod single_height_consensus; +#[allow(missing_docs)] +mod state_machine; +#[allow(missing_docs)] +pub mod votes_threshold; + +#[cfg(test)] +pub(crate) mod test_utils; diff --git a/crates/apollo_consensus/src/manager.rs b/crates/apollo_consensus/src/manager.rs new file mode 100644 index 00000000000..5fef0cb0f21 --- /dev/null +++ b/crates/apollo_consensus/src/manager.rs @@ -0,0 +1,521 @@ +//! Top level of consensus, used to run multiple heights of consensus. +//! +//! [`run_consensus`] - This is the primary entrypoint for running the consensus component. +//! +//! [`MultiHeightManager`] - Runs consensus repeatedly across different heights using +//! [`run_height`](MultiHeightManager::run_height). + +#[cfg(test)] +#[path = "manager_test.rs"] +mod manager_test; + +use std::collections::BTreeMap; +use std::time::Duration; + +use apollo_network::network_manager::BroadcastTopicClientTrait; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_protobuf::consensus::{ProposalInit, Vote}; +use apollo_protobuf::converters::ProtobufConversionError; +use apollo_time::time::{sleep_until, Clock, DefaultClock}; +use futures::channel::mpsc; +use futures::stream::FuturesUnordered; +use futures::{FutureExt, StreamExt}; +use starknet_api::block::BlockNumber; +use tracing::{debug, error, info, instrument, trace}; + +use crate::config::TimeoutsConfig; +use crate::metrics::{ + register_metrics, + CONSENSUS_BLOCK_NUMBER, + CONSENSUS_CACHED_VOTES, + CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS, + CONSENSUS_DECISIONS_REACHED_BY_SYNC, + CONSENSUS_MAX_CACHED_BLOCK_NUMBER, + CONSENSUS_PROPOSALS_RECEIVED, +}; +use crate::single_height_consensus::{ShcReturn, SingleHeightConsensus}; +use crate::types::{BroadcastVoteChannel, ConsensusContext, ConsensusError, Decision, ValidatorId}; +use crate::votes_threshold::QuorumType; + +/// Arguments for running consensus. +#[derive(Clone, Debug)] +pub struct RunConsensusArguments { + /// The height at which the node may participate in consensus (if it is a validator). + pub start_active_height: BlockNumber, + /// The height at which the node begins to run consensus. + pub start_observe_height: BlockNumber, + /// The ID of this node. + pub validator_id: ValidatorId, + /// Delay before starting consensus; allowing the network to connect to peers. + pub consensus_delay: Duration, + /// The timeouts for the consensus algorithm. + pub timeouts: TimeoutsConfig, + /// The interval to wait between sync retries. + pub sync_retry_interval: Duration, + /// Set to Byzantine by default. Using Honest means we trust all validators. Use with caution! + pub quorum_type: QuorumType, +} + +/// Run consensus indefinitely. +/// +/// If a decision is reached via consensus, the context is updated. If a decision is learned via the +/// sync protocol, consensus silently moves on to the next height. +/// +/// Inputs: +/// - `run_consensus_args`: Configuration arguments for consensus. See [`RunConsensusArguments`] for +/// detailed documentation. +/// - `context`: The API for consensus to reach out to the rest of the node. +/// - `vote_receiver`: The channels to receive votes from the network. These are self contained +/// messages. +/// - `proposals_receiver`: The channel to receive proposals from the network. Proposals are +/// represented as streams (ProposalInit, Content.*, ProposalFin). +// Always print the validator ID since some tests collate multiple consensus logs in a single file. +#[instrument(skip_all, fields(validator_id=%run_consensus_args.validator_id), level = "error")] +pub async fn run_consensus( + run_consensus_args: RunConsensusArguments, + mut context: ContextT, + mut vote_receiver: BroadcastVoteChannel, + mut proposals_receiver: mpsc::Receiver>, +) -> Result<(), ConsensusError> +where + ContextT: ConsensusContext, +{ + info!("Running consensus, args: {:?}", run_consensus_args.clone()); + register_metrics(); + // Add a short delay to allow peers to connect and avoid "InsufficientPeers" error + tokio::time::sleep(run_consensus_args.consensus_delay).await; + assert!(run_consensus_args.start_observe_height <= run_consensus_args.start_active_height); + let mut current_height = run_consensus_args.start_observe_height; + let mut manager = MultiHeightManager::new( + run_consensus_args.validator_id, + run_consensus_args.sync_retry_interval, + run_consensus_args.quorum_type, + run_consensus_args.timeouts, + ); + loop { + let must_observer = current_height < run_consensus_args.start_active_height; + match manager + .run_height( + &mut context, + current_height, + must_observer, + &mut vote_receiver, + &mut proposals_receiver, + ) + .await? + { + RunHeightRes::Decision(decision) => { + // We expect there to be under 100 validators, so this is a reasonable number of + // precommits to print. + let round = decision.precommits[0].round; + let proposer = context.proposer(current_height, round); + info!( + "DECISION_REACHED: Decision reached for round {} with proposer {}. {:?}", + round, proposer, decision + ); + CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS.increment(1); + context.decision_reached(decision.block, decision.precommits).await?; + } + RunHeightRes::Sync => { + info!(height = current_height.0, "Decision learned via sync protocol."); + CONSENSUS_DECISIONS_REACHED_BY_SYNC.increment(1); + } + } + current_height = current_height.unchecked_next(); + } +} + +/// Run height can end either when consensus reaches a decision or when we learn, via sync, of the +/// decision. +#[derive(Debug, PartialEq)] +pub enum RunHeightRes { + /// Decision reached. + Decision(Decision), + /// Decision learned via sync. + Sync, +} + +type ProposalReceiverTuple = (ProposalInit, mpsc::Receiver); + +/// Runs Tendermint repeatedly across different heights. Handles issues which are not explicitly +/// part of the single height consensus algorithm (e.g. messages from future heights). +#[derive(Debug, Default)] +struct MultiHeightManager { + validator_id: ValidatorId, + future_votes: BTreeMap>, + sync_retry_interval: Duration, + quorum_type: QuorumType, + // Mapping: { Height : { Round : (Init, Receiver)}} + cached_proposals: BTreeMap>>, + timeouts: TimeoutsConfig, +} + +impl MultiHeightManager { + /// Create a new consensus manager. + pub(crate) fn new( + validator_id: ValidatorId, + sync_retry_interval: Duration, + quorum_type: QuorumType, + timeouts: TimeoutsConfig, + ) -> Self { + Self { + validator_id, + sync_retry_interval, + quorum_type, + future_votes: BTreeMap::new(), + cached_proposals: BTreeMap::new(), + timeouts, + } + } + + /// Run the consensus algorithm for a single height. + /// + /// A height of consensus ends either when the node learns of a decision, either by consensus + /// directly or via the sync protocol. + /// - An error implies that consensus cannot continue, not just that the current height failed. + /// + /// This is the "top level" task of consensus, which is able to multiplex across activities: + /// network messages and self generated events. + /// + /// Assumes that `height` is monotonically increasing across calls. + /// + /// Inputs - see [`run_consensus`]. + /// - `must_observer`: Whether the node must observe or if it is allowed to be active (assuming + /// it is in the validator set). + #[instrument(skip_all, fields(height=%height.0), level = "error")] + pub(crate) async fn run_height( + &mut self, + context: &mut ContextT, + height: BlockNumber, + must_observer: bool, + broadcast_channels: &mut BroadcastVoteChannel, + proposals_receiver: &mut mpsc::Receiver>, + ) -> Result { + let res = self + .run_height_inner( + context, + height, + must_observer, + broadcast_channels, + proposals_receiver, + ) + .await?; + + // Clear any existing votes and proposals for previous heights as well as the current just + // completed height. + // + // Networking layer assumes messages are handled in a timely fashion, otherwise we may build + // up a backlog of useless messages. Similarly we don't want to waste space on old messages. + // This is particularly important when there is a significant lag and we continually finish + // heights immediately due to sync. + + // We use get_current_height_votes for its side effect of removing votes for lower + // heights (we don't care about the actual votes). + self.get_current_height_votes(height); + while let Some(message) = + broadcast_channels.broadcasted_messages_receiver.next().now_or_never() + { + // Discard any votes for this heigh or lower by sending a None SHC. + self.handle_vote(context, height, None, message, broadcast_channels).await?; + } + // We call this method to filter out any proposals for previous/current heights (we don't + // care about the returned proposals). + self.get_current_height_proposals(height); + while let Ok(content_receiver) = proposals_receiver.try_next() { + self.handle_proposal(context, height, None, content_receiver).await?; + } + + Ok(res) + } + + async fn run_height_inner( + &mut self, + context: &mut ContextT, + height: BlockNumber, + must_observer: bool, + broadcast_channels: &mut BroadcastVoteChannel, + proposals_receiver: &mut mpsc::Receiver>, + ) -> Result { + self.report_max_cached_block_number_metric(height); + if context.try_sync(height).await { + return Ok(RunHeightRes::Sync); + } + + let validators = context.validators(height).await; + let is_observer = must_observer || !validators.contains(&self.validator_id); + info!( + "START_HEIGHT: running consensus for height {:?}. is_observer: {}, validators: {:?}", + height, is_observer, validators, + ); + CONSENSUS_BLOCK_NUMBER.set_lossy(height.0); + + let mut shc = SingleHeightConsensus::new( + height, + is_observer, + self.validator_id, + validators, + self.quorum_type, + self.timeouts.clone(), + ); + let mut shc_events = FuturesUnordered::new(); + + match self.start_height(context, height, &mut shc).await? { + ShcReturn::Decision(decision) => { + return Ok(RunHeightRes::Decision(decision)); + } + ShcReturn::Tasks(tasks) => { + for task in tasks { + shc_events.push(task.run()); + } + } + } + + // Loop over incoming proposals, messages, and self generated events. + let clock = DefaultClock; + let mut sync_poll_deadline = clock.now() + self.sync_retry_interval; + loop { + self.report_max_cached_block_number_metric(height); + let shc_return = tokio::select! { + message = broadcast_channels.broadcasted_messages_receiver.next() => { + self.handle_vote( + context, height, Some(&mut shc), message, broadcast_channels).await? + }, + content_receiver = proposals_receiver.next() => { + self.handle_proposal(context, height, Some(&mut shc), content_receiver).await? + }, + Some(shc_event) = shc_events.next() => { + shc.handle_event(context, shc_event).await? + }, + // Using sleep_until to make sure that we won't restart the sleep due to other + // events occuring. + _ = sleep_until(sync_poll_deadline, &clock) => { + sync_poll_deadline += self.sync_retry_interval; + if context.try_sync(height).await { + return Ok(RunHeightRes::Sync); + } + continue; + } + }; + + match shc_return { + ShcReturn::Decision(decision) => return Ok(RunHeightRes::Decision(decision)), + ShcReturn::Tasks(tasks) => { + for task in tasks { + shc_events.push(task.run()); + } + } + } + } + } + + async fn start_height( + &mut self, + context: &mut ContextT, + height: BlockNumber, + shc: &mut SingleHeightConsensus, + ) -> Result { + CONSENSUS_CACHED_VOTES.set_lossy(self.future_votes.entry(height.0).or_default().len()); + let mut tasks = match shc.start(context).await? { + decision @ ShcReturn::Decision(_) => { + // Start should generate either TimeoutProposal (validator) or GetProposal + // (proposer). We do not enforce this since the Manager is + // intentionally not meant to understand consensus in detail. + error!("Decision reached at start of height. {:?}", decision); + return Ok(decision); + } + ShcReturn::Tasks(tasks) => tasks, + }; + + let cached_proposals = self.get_current_height_proposals(height); + trace!("Cached proposals for height {}: {:?}", height, cached_proposals); + for (init, content_receiver) in cached_proposals { + match shc.handle_proposal(context, init, content_receiver).await? { + decision @ ShcReturn::Decision(_) => return Ok(decision), + ShcReturn::Tasks(new_tasks) => tasks.extend(new_tasks), + } + } + + let cached_votes = self.get_current_height_votes(height); + trace!("Cached votes for height {}: {:?}", height, cached_votes); + for msg in cached_votes { + match shc.handle_vote(context, msg).await? { + decision @ ShcReturn::Decision(_) => return Ok(decision), + ShcReturn::Tasks(new_tasks) => tasks.extend(new_tasks), + } + } + + Ok(ShcReturn::Tasks(tasks)) + } + + // Handle a new proposal receiver from the network. + // shc - None if the height was just completed and we should drop the message. + async fn handle_proposal( + &mut self, + context: &mut ContextT, + height: BlockNumber, + shc: Option<&mut SingleHeightConsensus>, + content_receiver: Option>, + ) -> Result { + CONSENSUS_PROPOSALS_RECEIVED.increment(1); + // Get the first message to verify the init was sent. + let Some(mut content_receiver) = content_receiver else { + return Err(ConsensusError::InternalNetworkError( + "proposal receiver should never be closed".to_string(), + )); + }; + let Some(first_part) = content_receiver.try_next().map_err(|_| { + ConsensusError::InternalNetworkError( + "Stream handler must fill the first message before sending the stream".to_string(), + ) + })? + else { + return Err(ConsensusError::InternalNetworkError( + "Content receiver closed".to_string(), + )); + }; + let proposal_init: ProposalInit = first_part.try_into()?; + + match proposal_init.height.cmp(&height) { + std::cmp::Ordering::Greater => { + debug!("Received a proposal for a future height. {:?}", proposal_init); + // Note: new proposals with the same height/round will be ignored. + // + // TODO(matan): This only work for trusted peers. In the case of possibly malicious + // peers this is a possible DoS attack (malicious users can insert + // invalid/bad/malicious proposals before "good" nodes can propose). + // + // When moving to version 1.0 make sure this is addressed. + self.cached_proposals + .entry(proposal_init.height.0) + .or_default() + .entry(proposal_init.round) + .or_insert((proposal_init, content_receiver)); + Ok(ShcReturn::Tasks(Vec::new())) + } + std::cmp::Ordering::Less => { + trace!("Drop proposal from past height. {:?}", proposal_init); + Ok(ShcReturn::Tasks(Vec::new())) + } + std::cmp::Ordering::Equal => match shc { + Some(shc) => shc.handle_proposal(context, proposal_init, content_receiver).await, + None => { + trace!("Drop proposal from just completed height. {:?}", proposal_init); + Ok(ShcReturn::Tasks(Vec::new())) + } + }, + } + } + + // Handle a single consensus message. + // shc - None if the height was just completed and we should drop the message. + async fn handle_vote( + &mut self, + context: &mut ContextT, + height: BlockNumber, + shc: Option<&mut SingleHeightConsensus>, + vote: Option<(Result, BroadcastedMessageMetadata)>, + broadcast_channels: &mut BroadcastVoteChannel, + ) -> Result { + let message = match vote { + None => Err(ConsensusError::InternalNetworkError( + "NetworkReceiver should never be closed".to_string(), + )), + Some((Ok(msg), metadata)) => { + // TODO(matan): Hold onto report_sender for use in later errors by SHC. + if broadcast_channels + .broadcast_topic_client + .continue_propagation(&metadata) + .now_or_never() + .is_none() + { + error!("Unable to send continue_propagation. {:?}", metadata); + } + Ok(msg) + } + Some((Err(e), metadata)) => { + // Failed to parse consensus message + if broadcast_channels + .broadcast_topic_client + .report_peer(metadata.clone()) + .now_or_never() + .is_none() + { + error!("Unable to send report_peer. {:?}", metadata) + } + Err(e.into()) + } + }?; + + // TODO(matan): We need to figure out an actual caching strategy under 2 constraints: + // 1. Malicious - must be capped so a malicious peer can't DoS us. + // 2. Parallel proposals - we may send/receive a proposal for (H+1, 0). + match message.height.cmp(&height.0) { + std::cmp::Ordering::Greater => { + debug!("Cache message for a future height. {:?}", message); + self.future_votes.entry(message.height).or_default().push(message); + Ok(ShcReturn::Tasks(Vec::new())) + } + std::cmp::Ordering::Less => { + trace!("Drop message from past height. {:?}", message); + Ok(ShcReturn::Tasks(Vec::new())) + } + std::cmp::Ordering::Equal => match shc { + Some(shc) => shc.handle_vote(context, message).await, + None => { + trace!("Drop message from just completed height. {:?}", message); + Ok(ShcReturn::Tasks(Vec::new())) + } + }, + } + } + + /// Checks if a cached proposal already exists (with correct height) + /// - returns the proposals for the height if they exist and removes them from the cache. + /// - cleans up any proposals from earlier heights. + fn get_current_height_proposals( + &mut self, + height: BlockNumber, + ) -> Vec<(ProposalInit, mpsc::Receiver)> { + loop { + let Some(entry) = self.cached_proposals.first_entry() else { + return Vec::new(); + }; + match entry.key().cmp(&height.0) { + std::cmp::Ordering::Greater => return vec![], + std::cmp::Ordering::Equal => { + let round_to_proposals = entry.remove(); + return round_to_proposals.into_values().collect(); + } + std::cmp::Ordering::Less => { + entry.remove(); + } + } + } + } + + /// Filters the cached messages: + /// - returns (and removes from stored votes) all of the current height votes. + /// - drops votes from earlier heights. + /// - retains future votes in the cache. + fn get_current_height_votes(&mut self, height: BlockNumber) -> Vec { + // Depends on `future_votes` being sorted by height. + loop { + let Some(entry) = self.future_votes.first_entry() else { + return Vec::new(); + }; + match entry.key().cmp(&height.0) { + std::cmp::Ordering::Greater => return Vec::new(), + std::cmp::Ordering::Equal => return entry.remove(), + std::cmp::Ordering::Less => { + entry.remove(); + } + } + } + } + + fn report_max_cached_block_number_metric(&self, height: BlockNumber) { + // If nothing is cached use current height as "max". + let max_cached_block_number = self.cached_proposals.keys().max().unwrap_or(&height.0); + CONSENSUS_MAX_CACHED_BLOCK_NUMBER.set_lossy(*max_cached_block_number); + } +} diff --git a/crates/apollo_consensus/src/manager_test.rs b/crates/apollo_consensus/src/manager_test.rs new file mode 100644 index 00000000000..409b5334ca8 --- /dev/null +++ b/crates/apollo_consensus/src/manager_test.rs @@ -0,0 +1,328 @@ +use std::time::Duration; +use std::vec; + +use apollo_network::network_manager::test_utils::{ + mock_register_broadcast_topic, + MockBroadcastedMessagesSender, + TestSubscriberChannels, +}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_protobuf::consensus::{Vote, DEFAULT_VALIDATOR_ID}; +use apollo_test_utils::{get_rng, GetTestInstance}; +use futures::channel::{mpsc, oneshot}; +use futures::{FutureExt, SinkExt}; +use lazy_static::lazy_static; +use starknet_api::block::{BlockHash, BlockNumber}; +use starknet_types_core::felt::Felt; + +use super::{run_consensus, MultiHeightManager, RunHeightRes}; +use crate::config::TimeoutsConfig; +use crate::test_utils::{precommit, prevote, proposal_init, MockTestContext, TestProposalPart}; +use crate::types::ValidatorId; +use crate::votes_threshold::QuorumType; +use crate::RunConsensusArguments; + +lazy_static! { + static ref PROPOSER_ID: ValidatorId = DEFAULT_VALIDATOR_ID.into(); + static ref VALIDATOR_ID: ValidatorId = (DEFAULT_VALIDATOR_ID + 1).into(); + static ref VALIDATOR_ID_2: ValidatorId = (DEFAULT_VALIDATOR_ID + 2).into(); + static ref VALIDATOR_ID_3: ValidatorId = (DEFAULT_VALIDATOR_ID + 3).into(); + static ref TIMEOUTS: TimeoutsConfig = TimeoutsConfig { + prevote_timeout: Duration::from_millis(100), + precommit_timeout: Duration::from_millis(100), + proposal_timeout: Duration::from_millis(100), + }; +} + +const CHANNEL_SIZE: usize = 10; +const SYNC_RETRY_INTERVAL: Duration = Duration::from_millis(100); + +async fn send(sender: &mut MockBroadcastedMessagesSender, msg: Vote) { + let broadcasted_message_metadata = + BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + sender.send((msg, broadcasted_message_metadata)).await.unwrap(); +} + +async fn send_proposal( + proposal_receiver_sender: &mut mpsc::Sender>, + content: Vec, +) { + let (mut proposal_sender, proposal_receiver) = mpsc::channel(CHANNEL_SIZE); + proposal_receiver_sender.send(proposal_receiver).await.unwrap(); + for item in content { + proposal_sender.send(item).await.unwrap(); + } +} + +fn expect_validate_proposal(context: &mut MockTestContext, block_hash: Felt, times: usize) { + context + .expect_validate_proposal() + .returning(move |_, _, _| { + let (block_sender, block_receiver) = oneshot::channel(); + block_sender.send(BlockHash(block_hash)).unwrap(); + block_receiver + }) + .times(times); +} + +fn assert_decision(res: RunHeightRes, id: Felt) { + match res { + RunHeightRes::Decision(decision) => assert_eq!(decision.block, BlockHash(id)), + _ => panic!("Expected decision"), + } +} + +#[tokio::test] +async fn manager_multiple_heights_unordered() { + let TestSubscriberChannels { mock_network, subscriber_channels } = + mock_register_broadcast_topic().unwrap(); + let mut sender = mock_network.broadcasted_messages_sender; + + let (mut proposal_receiver_sender, mut proposal_receiver_receiver) = + mpsc::channel(CHANNEL_SIZE); + + // Send messages for height 2 followed by those for height 1. + send_proposal( + &mut proposal_receiver_sender, + vec![TestProposalPart::Init(proposal_init(2, 0, *PROPOSER_ID))], + ) + .await; + send(&mut sender, prevote(Some(Felt::TWO), 2, 0, *PROPOSER_ID)).await; + send(&mut sender, precommit(Some(Felt::TWO), 2, 0, *PROPOSER_ID)).await; + + send_proposal( + &mut proposal_receiver_sender, + vec![TestProposalPart::Init(proposal_init(1, 0, *PROPOSER_ID))], + ) + .await; + send(&mut sender, prevote(Some(Felt::ONE), 1, 0, *PROPOSER_ID)).await; + send(&mut sender, precommit(Some(Felt::ONE), 1, 0, *PROPOSER_ID)).await; + + let mut context = MockTestContext::new(); + // Run the manager for height 1. + context.expect_try_sync().returning(|_| false); + expect_validate_proposal(&mut context, Felt::ONE, 1); + context.expect_validators().returning(move |_| vec![*PROPOSER_ID, *VALIDATOR_ID]); + context.expect_proposer().returning(move |_, _| *PROPOSER_ID); + context.expect_set_height_and_round().returning(move |_, _| ()); + context.expect_broadcast().returning(move |_| Ok(())); + + let mut manager = MultiHeightManager::new( + *VALIDATOR_ID, + SYNC_RETRY_INTERVAL, + QuorumType::Byzantine, + TIMEOUTS.clone(), + ); + let mut subscriber_channels = subscriber_channels.into(); + let decision = manager + .run_height( + &mut context, + BlockNumber(1), + false, + &mut subscriber_channels, + &mut proposal_receiver_receiver, + ) + .await + .unwrap(); + assert_decision(decision, Felt::ONE); + + // Run the manager for height 2. + expect_validate_proposal(&mut context, Felt::TWO, 1); + let decision = manager + .run_height( + &mut context, + BlockNumber(2), + false, + &mut subscriber_channels, + &mut proposal_receiver_receiver, + ) + .await + .unwrap(); + assert_decision(decision, Felt::TWO); +} + +#[tokio::test] +async fn run_consensus_sync() { + // Set expectations. + let mut context = MockTestContext::new(); + let (decision_tx, decision_rx) = oneshot::channel(); + + let (mut proposal_receiver_sender, proposal_receiver_receiver) = mpsc::channel(CHANNEL_SIZE); + + expect_validate_proposal(&mut context, Felt::TWO, 1); + context.expect_validators().returning(move |_| vec![*PROPOSER_ID, *VALIDATOR_ID]); + context.expect_proposer().returning(move |_, _| *PROPOSER_ID); + context.expect_set_height_and_round().returning(move |_, _| ()); + context.expect_broadcast().returning(move |_| Ok(())); + context + .expect_decision_reached() + .withf(move |block, votes| *block == BlockHash(Felt::TWO) && votes[0].height == 2) + .return_once(move |_, _| { + decision_tx.send(()).unwrap(); + Ok(()) + }); + context + .expect_try_sync() + .withf(move |height| *height == BlockNumber(1)) + .times(1) + .returning(|_| true); + context.expect_try_sync().returning(|_| false); + + // Send messages for height 2. + send_proposal( + &mut proposal_receiver_sender, + vec![TestProposalPart::Init(proposal_init(2, 0, *PROPOSER_ID))], + ) + .await; + let TestSubscriberChannels { mock_network, subscriber_channels } = + mock_register_broadcast_topic().unwrap(); + let mut network_sender = mock_network.broadcasted_messages_sender; + send(&mut network_sender, prevote(Some(Felt::TWO), 2, 0, *PROPOSER_ID)).await; + send(&mut network_sender, precommit(Some(Felt::TWO), 2, 0, *PROPOSER_ID)).await; + let run_consensus_args = RunConsensusArguments { + start_active_height: BlockNumber(1), + start_observe_height: BlockNumber(1), + validator_id: *VALIDATOR_ID, + consensus_delay: Duration::ZERO, + timeouts: TIMEOUTS.clone(), + sync_retry_interval: SYNC_RETRY_INTERVAL, + quorum_type: QuorumType::Byzantine, + }; + // Start at height 1. + tokio::spawn(async move { + run_consensus( + run_consensus_args, + context, + subscriber_channels.into(), + proposal_receiver_receiver, + ) + .await + }); + + // Decision for height 2. + decision_rx.await.unwrap(); +} + +#[tokio::test] +async fn test_timeouts() { + let TestSubscriberChannels { mock_network, subscriber_channels } = + mock_register_broadcast_topic().unwrap(); + let mut sender = mock_network.broadcasted_messages_sender; + + let (mut proposal_receiver_sender, mut proposal_receiver_receiver) = + mpsc::channel(CHANNEL_SIZE); + + send_proposal( + &mut proposal_receiver_sender, + vec![TestProposalPart::Init(proposal_init(1, 0, *PROPOSER_ID))], + ) + .await; + send(&mut sender, prevote(None, 1, 0, *VALIDATOR_ID_2)).await; + send(&mut sender, prevote(None, 1, 0, *VALIDATOR_ID_3)).await; + send(&mut sender, precommit(None, 1, 0, *VALIDATOR_ID_2)).await; + send(&mut sender, precommit(None, 1, 0, *VALIDATOR_ID_3)).await; + + let mut context = MockTestContext::new(); + context.expect_set_height_and_round().returning(move |_, _| ()); + expect_validate_proposal(&mut context, Felt::ONE, 2); + context + .expect_validators() + .returning(move |_| vec![*PROPOSER_ID, *VALIDATOR_ID, *VALIDATOR_ID_2, *VALIDATOR_ID_3]); + context.expect_proposer().returning(move |_, _| *PROPOSER_ID); + context.expect_try_sync().returning(|_| false); + + let (timeout_send, timeout_receive) = oneshot::channel(); + // Node handled Timeout events and responded with NIL vote. + context + .expect_broadcast() + .times(1) + .withf(move |msg: &Vote| msg == &prevote(None, 1, 1, *VALIDATOR_ID)) + .return_once(move |_| { + timeout_send.send(()).unwrap(); + Ok(()) + }); + context.expect_broadcast().returning(move |_| Ok(())); + + let mut manager = MultiHeightManager::new( + *VALIDATOR_ID, + SYNC_RETRY_INTERVAL, + QuorumType::Byzantine, + TIMEOUTS.clone(), + ); + let manager_handle = tokio::spawn(async move { + let decision = manager + .run_height( + &mut context, + BlockNumber(1), + false, + &mut subscriber_channels.into(), + &mut proposal_receiver_receiver, + ) + .await + .unwrap(); + assert_decision(decision, Felt::ONE); + }); + + // Wait for the timeout to be triggered. + timeout_receive.await.unwrap(); + // Show that after the timeout is triggered we can still precommit in favor of the block and + // reach a decision. + send_proposal( + &mut proposal_receiver_sender, + vec![TestProposalPart::Init(proposal_init(1, 1, *PROPOSER_ID))], + ) + .await; + send(&mut sender, prevote(Some(Felt::ONE), 1, 1, *PROPOSER_ID)).await; + send(&mut sender, prevote(Some(Felt::ONE), 1, 1, *VALIDATOR_ID_2)).await; + send(&mut sender, prevote(Some(Felt::ONE), 1, 1, *VALIDATOR_ID_3)).await; + send(&mut sender, precommit(Some(Felt::ONE), 1, 1, *VALIDATOR_ID_2)).await; + send(&mut sender, precommit(Some(Felt::ONE), 1, 1, *VALIDATOR_ID_3)).await; + + manager_handle.await.unwrap(); +} + +#[tokio::test] +async fn timely_message_handling() { + // TODO(matan): Make run_height more generic so don't need mock network? + // Check that, even when sync is immediately ready, consensus still handles queued messages. + let mut context = MockTestContext::new(); + context.expect_try_sync().returning(|_| true); + + // Send messages + let (mut proposal_receiver_sender, mut proposal_receiver_receiver) = mpsc::channel(0); + let (mut content_sender, content_receiver) = mpsc::channel(0); + content_sender.try_send(TestProposalPart::Init(proposal_init(1, 0, *PROPOSER_ID))).unwrap(); + proposal_receiver_sender.try_send(content_receiver).unwrap(); + + // Fill up the sender. + let TestSubscriberChannels { mock_network, subscriber_channels } = + mock_register_broadcast_topic().unwrap(); + let mut subscriber_channels = subscriber_channels.into(); + let mut vote_sender = mock_network.broadcasted_messages_sender; + let metadata = BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + let vote = prevote(Some(Felt::TWO), 1, 0, *PROPOSER_ID); + // Fill up the buffer. + while vote_sender.send((vote.clone(), metadata.clone())).now_or_never().is_some() {} + + let mut manager = MultiHeightManager::new( + *VALIDATOR_ID, + SYNC_RETRY_INTERVAL, + QuorumType::Byzantine, + TIMEOUTS.clone(), + ); + let res = manager + .run_height( + &mut context, + BlockNumber(1), + false, + &mut subscriber_channels, + &mut proposal_receiver_receiver, + ) + .await; + assert_eq!(res, Ok(RunHeightRes::Sync)); + + // Try sending another proposal, to check that, even though sync was known at the beginning of + // the height and so consensus was not actually run, the inbound channels are cleared. + proposal_receiver_sender.try_send(mpsc::channel(1).1).unwrap(); + assert!(vote_sender.send((vote.clone(), metadata.clone())).now_or_never().is_some()); +} diff --git a/crates/apollo_consensus/src/metrics.rs b/crates/apollo_consensus/src/metrics.rs new file mode 100644 index 00000000000..113fbd9153d --- /dev/null +++ b/crates/apollo_consensus/src/metrics.rs @@ -0,0 +1,73 @@ +use apollo_metrics::{define_metrics, generate_permutation_labels}; +use strum::{EnumVariantNames, VariantNames}; +use strum_macros::{EnumIter, IntoStaticStr}; + +define_metrics!( + Consensus => { + MetricGauge { CONSENSUS_BLOCK_NUMBER, "consensus_block_number", "The block number consensus is working to decide" }, + MetricGauge { CONSENSUS_ROUND, "consensus_round", "The round of the state machine"}, + MetricGauge { CONSENSUS_MAX_CACHED_BLOCK_NUMBER, "consensus_max_cached_block_number", "How many blocks after current are cached"}, + MetricGauge { CONSENSUS_CACHED_VOTES, "consensus_cached_votes", "How many votes are cached when starting to work on a new block number" }, + MetricCounter { CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS, "consensus_decisions_reached_by_consensus", "The total number of decisions reached by way of consensus", init=0}, + MetricCounter { CONSENSUS_DECISIONS_REACHED_BY_SYNC, "consensus_decisions_reached_by_sync", "The total number of decisions reached by way of sync", init=0}, + MetricCounter { CONSENSUS_PROPOSALS_RECEIVED, "consensus_proposals_received", "The total number of proposals received", init=0}, + MetricCounter { CONSENSUS_PROPOSALS_VALID_INIT, "consensus_proposals_valid_init", "The total number of proposals received with a valid init", init=0}, + MetricCounter { CONSENSUS_PROPOSALS_VALIDATED, "consensus_proposals_validated", "The total number of complete, valid proposals received", init=0}, + MetricCounter { CONSENSUS_PROPOSALS_INVALID, "consensus_proposals_invalid", "The total number of proposals that failed validation", init=0}, + MetricCounter { CONSENSUS_BUILD_PROPOSAL_TOTAL, "consensus_build_proposal_total", "The total number of proposals built", init=0}, + MetricCounter { CONSENSUS_BUILD_PROPOSAL_FAILED, "consensus_build_proposal_failed", "The number of proposals that failed to be built", init=0}, + MetricCounter { CONSENSUS_REPROPOSALS, "consensus_reproposals", "The number of reproposals sent", init=0}, + MetricCounter { CONSENSUS_NEW_VALUE_LOCKS, "consensus_new_value_locks", "The number of times consensus has attained a lock on a new value", init=0}, + MetricCounter { CONSENSUS_HELD_LOCKS, "consensus_held_locks", "The number of times consensus progressed to a new round while holding a lock", init=0}, + MetricCounter { CONSENSUS_OUTBOUND_STREAM_STARTED, "consensus_outbound_stream_started", "The total number of outbound streams started", init=0 }, + MetricCounter { CONSENSUS_OUTBOUND_STREAM_FINISHED, "consensus_outbound_stream_finished", "The total number of outbound streams finished", init=0 }, + MetricCounter { CONSENSUS_INBOUND_STREAM_STARTED, "consensus_inbound_stream_started", "The total number of inbound streams started", init=0 }, + MetricCounter { CONSENSUS_INBOUND_STREAM_EVICTED, "consensus_inbound_stream_evicted", "The total number of inbound streams evicted due to cache capacity", init=0 }, + MetricCounter { CONSENSUS_INBOUND_STREAM_FINISHED, "consensus_inbound_stream_finished", "The total number of inbound streams finished", init=0 }, + // TODO(Matan): remove this metric. + MetricCounter { CONSENSUS_ROUND_ABOVE_ZERO, "consensus_round_above_zero", "The number of times the consensus round has increased above zero", init=0 }, + MetricCounter { CONSENSUS_CONFLICTING_VOTES, "consensus_conflicting_votes", "The number of times consensus has received conflicting votes", init=0 }, + LabeledMetricCounter { CONSENSUS_TIMEOUTS, "consensus_timeouts", "The number of times consensus has timed out", init=0, labels = CONSENSUS_TIMEOUT_LABELS }, + }, +); + +pub const LABEL_NAME_TIMEOUT_REASON: &str = "timeout_reason"; + +#[derive(IntoStaticStr, EnumIter, EnumVariantNames)] +#[strum(serialize_all = "snake_case")] +pub(crate) enum TimeoutReason { + Propose, + Prevote, + Precommit, +} + +generate_permutation_labels! { + CONSENSUS_TIMEOUT_LABELS, + (LABEL_NAME_TIMEOUT_REASON, TimeoutReason), +} + +pub(crate) fn register_metrics() { + CONSENSUS_BLOCK_NUMBER.register(); + CONSENSUS_ROUND.register(); + CONSENSUS_MAX_CACHED_BLOCK_NUMBER.register(); + CONSENSUS_CACHED_VOTES.register(); + CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS.register(); + CONSENSUS_DECISIONS_REACHED_BY_SYNC.register(); + CONSENSUS_PROPOSALS_RECEIVED.register(); + CONSENSUS_PROPOSALS_VALID_INIT.register(); + CONSENSUS_PROPOSALS_VALIDATED.register(); + CONSENSUS_PROPOSALS_INVALID.register(); + CONSENSUS_BUILD_PROPOSAL_TOTAL.register(); + CONSENSUS_BUILD_PROPOSAL_FAILED.register(); + CONSENSUS_NEW_VALUE_LOCKS.register(); + CONSENSUS_HELD_LOCKS.register(); + CONSENSUS_REPROPOSALS.register(); + CONSENSUS_INBOUND_STREAM_STARTED.register(); + CONSENSUS_INBOUND_STREAM_EVICTED.register(); + CONSENSUS_INBOUND_STREAM_FINISHED.register(); + CONSENSUS_OUTBOUND_STREAM_STARTED.register(); + CONSENSUS_OUTBOUND_STREAM_FINISHED.register(); + CONSENSUS_ROUND_ABOVE_ZERO.register(); + CONSENSUS_CONFLICTING_VOTES.register(); + CONSENSUS_TIMEOUTS.register(); +} diff --git a/crates/sequencing/papyrus_consensus/src/simulation_network_receiver.rs b/crates/apollo_consensus/src/simulation_network_receiver.rs similarity index 78% rename from crates/sequencing/papyrus_consensus/src/simulation_network_receiver.rs rename to crates/apollo_consensus/src/simulation_network_receiver.rs index daa5da4bb54..301a92fadc0 100644 --- a/crates/sequencing/papyrus_consensus/src/simulation_network_receiver.rs +++ b/crates/apollo_consensus/src/simulation_network_receiver.rs @@ -7,13 +7,12 @@ use std::hash::{Hash, Hasher}; use std::num::NonZeroUsize; use std::task::Poll; +use apollo_network::network_manager::BroadcastTopicServer; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_protobuf::consensus::Vote; +use apollo_protobuf::converters::ProtobufConversionError; use futures::{Stream, StreamExt}; use lru::LruCache; -use papyrus_network::network_manager::BroadcastTopicServer; -use papyrus_network_types::network_types::BroadcastedMessageMetadata; -use papyrus_protobuf::consensus::ConsensusMessage; -use papyrus_protobuf::converters::ProtobufConversionError; -use starknet_api::block::BlockHash; use starknet_api::core::{ContractAddress, PatriciaKey}; use tracing::{debug, instrument}; @@ -25,10 +24,10 @@ use tracing::{debug, instrument}; /// messages all the same, meaning that a dropped message would always be dropped. To avoid this we /// have the cache, which allows us to treat resends of a specific message differently. pub struct NetworkReceiver { - pub broadcasted_messages_receiver: BroadcastTopicServer, + pub broadcasted_messages_receiver: BroadcastTopicServer, // Cache is used so that repeat sends of a message can be processed differently. For example, // if a message is dropped resending it should result in a new decision. - pub cache: LruCache, + pub cache: LruCache, pub seed: u64, // Probability of dropping a message [0, 1]. pub drop_probability: f64, @@ -46,7 +45,7 @@ impl NetworkReceiver { /// - `drop_probability`: Probability of dropping a message [0, 1]. /// - `invalid_probability`: Probability of making a message invalid [0, 1]. pub fn new( - broadcasted_messages_receiver: BroadcastTopicServer, + broadcasted_messages_receiver: BroadcastTopicServer, cache_size: usize, seed: u64, drop_probability: f64, @@ -63,13 +62,13 @@ impl NetworkReceiver { } } - /// Determine how to handle a message. If None then the message is silently droppeds. If some, + /// Determine how to handle a message. If None then the message is silently dropped. If some, /// the returned message is what is sent to the consensus crate. /// /// Applies `drop_probability` followed by `invalid_probability`. So the probability of an /// invalid message is `(1- drop_probability) * invalid_probability`. #[instrument(skip(self), level = "debug")] - pub fn filter_msg(&mut self, msg: ConsensusMessage) -> Option { + pub fn filter_msg(&mut self, msg: Vote) -> Option { let msg_hash = self.calculate_msg_hash(&msg); if self.should_drop_msg(msg_hash) { @@ -80,7 +79,7 @@ impl NetworkReceiver { Some(self.maybe_invalidate_msg(msg, msg_hash)) } - fn calculate_msg_hash(&mut self, msg: &ConsensusMessage) -> u64 { + fn calculate_msg_hash(&mut self, msg: &Vote) -> u64 { let count = if let Some(count) = self.cache.get_mut(msg) { *count += 1; *count @@ -102,31 +101,20 @@ impl NetworkReceiver { prob <= self.drop_probability } - fn maybe_invalidate_msg( - &mut self, - mut msg: ConsensusMessage, - msg_hash: u64, - ) -> ConsensusMessage { + fn maybe_invalidate_msg(&mut self, mut msg: Vote, msg_hash: u64) -> Vote { #[allow(clippy::as_conversions)] if (msg_hash as f64) / (u64::MAX as f64) > self.invalid_probability { return msg; } debug!("Invalidating message"); // TODO(matan): Allow for invalid votes based on signature. - match msg { - ConsensusMessage::Proposal(ref mut proposal) => { - proposal.block_hash = BlockHash(proposal.block_hash.0 + 1); - } - ConsensusMessage::Vote(ref mut vote) => { - vote.voter = ContractAddress(PatriciaKey::from(msg_hash)); - } - } + msg.voter = ContractAddress(PatriciaKey::from(msg_hash)); msg } } impl Stream for NetworkReceiver { - type Item = (Result, BroadcastedMessageMetadata); + type Item = (Result, BroadcastedMessageMetadata); fn poll_next( mut self: std::pin::Pin<&mut Self>, diff --git a/crates/apollo_consensus/src/simulation_network_receiver_test.rs b/crates/apollo_consensus/src/simulation_network_receiver_test.rs new file mode 100644 index 00000000000..a3b7eb9dc48 --- /dev/null +++ b/crates/apollo_consensus/src/simulation_network_receiver_test.rs @@ -0,0 +1,80 @@ +use apollo_network::network_manager::test_utils::{ + mock_register_broadcast_topic, + TestSubscriberChannels, +}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_protobuf::consensus::Vote; +use apollo_test_utils::{get_rng, GetTestInstance}; +use futures::{SinkExt, StreamExt}; +use test_case::test_case; + +use super::NetworkReceiver; + +const CACHE_SIZE: usize = 10; +const SEED: u64 = 123; +const DROP_PROBABILITY: f64 = 0.5; +const INVALID_PROBABILITY: f64 = 0.5; + +#[test_case(true; "distinct_vote")] +#[test_case(false; "repeat_vote")] +#[tokio::test] +async fn test_invalid(distinct_messages: bool) { + let TestSubscriberChannels { subscriber_channels, mut mock_network } = + mock_register_broadcast_topic().unwrap(); + let mut receiver = NetworkReceiver::new( + subscriber_channels.broadcasted_messages_receiver, + CACHE_SIZE, + SEED, + 0.0, + INVALID_PROBABILITY, + ); + let mut invalid_messages = 0; + + for height in 0..1000 { + let msg = Vote { height: if distinct_messages { height } else { 0 }, ..Default::default() }; + let broadcasted_message_metadata = + BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + mock_network + .broadcasted_messages_sender + .send((msg.clone(), broadcasted_message_metadata)) + .await + .unwrap(); + if receiver.next().await.unwrap().0.unwrap() != msg { + invalid_messages += 1; + } + } + assert!((400..=600).contains(&invalid_messages), "num_invalid={invalid_messages}"); +} + +#[test_case(true; "distinct_vote")] +#[test_case(false; "repeat_vote")] +#[tokio::test] +async fn test_drops(distinct_messages: bool) { + let TestSubscriberChannels { subscriber_channels, mut mock_network } = + mock_register_broadcast_topic().unwrap(); + let mut receiver = NetworkReceiver::new( + subscriber_channels.broadcasted_messages_receiver, + CACHE_SIZE, + SEED, + DROP_PROBABILITY, + 0.0, + ); + let mut num_received = 0; + + for height in 0..1000 { + let msg = Vote { height: if distinct_messages { height } else { 0 }, ..Default::default() }; + let broadcasted_message_metadata = + BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + mock_network + .broadcasted_messages_sender + .send((msg.clone(), broadcasted_message_metadata)) + .await + .unwrap(); + } + drop(mock_network.broadcasted_messages_sender); + + while receiver.next().await.is_some() { + num_received += 1; + } + assert!((400..=600).contains(&num_received), "num_received={num_received}"); +} diff --git a/crates/apollo_consensus/src/single_height_consensus.rs b/crates/apollo_consensus/src/single_height_consensus.rs new file mode 100644 index 00000000000..c90a2659fcf --- /dev/null +++ b/crates/apollo_consensus/src/single_height_consensus.rs @@ -0,0 +1,636 @@ +//! Run a single height of consensus. +//! +//! [`SingleHeightConsensus`] (SHC) - run consensus for a single height. +//! +//! [`ShcTask`] - a task which should be run without blocking consensus. +//! +//! [`ShcEvent`] - an event, generated from an `ShcTask` which should be handled by the SHC. + +#[cfg(test)] +#[path = "single_height_consensus_test.rs"] +mod single_height_consensus_test; + +use std::collections::hash_map::Entry; +use std::collections::{HashMap, VecDeque}; +use std::time::Duration; + +use apollo_protobuf::consensus::{ProposalInit, Vote, VoteType}; +#[cfg(test)] +use enum_as_inner::EnumAsInner; +use futures::channel::{mpsc, oneshot}; +use serde::{Deserialize, Serialize}; +use starknet_api::block::BlockNumber; +use tracing::{debug, info, instrument, trace, warn}; + +use crate::config::TimeoutsConfig; +use crate::metrics::{ + CONSENSUS_BUILD_PROPOSAL_FAILED, + CONSENSUS_BUILD_PROPOSAL_TOTAL, + CONSENSUS_CONFLICTING_VOTES, + CONSENSUS_PROPOSALS_INVALID, + CONSENSUS_PROPOSALS_VALIDATED, + CONSENSUS_PROPOSALS_VALID_INIT, + CONSENSUS_REPROPOSALS, +}; +use crate::state_machine::{StateMachine, StateMachineEvent}; +use crate::types::{ + ConsensusContext, + ConsensusError, + Decision, + ProposalCommitment, + Round, + ValidatorId, +}; +use crate::votes_threshold::QuorumType; + +/// The SHC can either update the manager of a decision or return tasks that should be run without +/// blocking further calls to itself. +#[derive(Debug, PartialEq)] +#[cfg_attr(test, derive(EnumAsInner))] +pub enum ShcReturn { + Tasks(Vec), + Decision(Decision), +} + +/// Events produced from tasks for the SHC to handle. +#[derive(Debug, Clone)] +pub enum ShcEvent { + TimeoutPropose(StateMachineEvent), + TimeoutPrevote(StateMachineEvent), + TimeoutPrecommit(StateMachineEvent), + Prevote(StateMachineEvent), + Precommit(StateMachineEvent), + BuildProposal(StateMachineEvent), + // TODO(Matan): Replace ProposalCommitment with the unvalidated signature from the proposer. + ValidateProposal(StateMachineEvent), +} + +/// A task which should be run without blocking calls to SHC. +#[derive(Debug)] +#[cfg_attr(test, derive(EnumAsInner))] +pub enum ShcTask { + TimeoutPropose(Duration, StateMachineEvent), + TimeoutPrevote(Duration, StateMachineEvent), + TimeoutPrecommit(Duration, StateMachineEvent), + Prevote(Duration, StateMachineEvent), + Precommit(Duration, StateMachineEvent), + /// Building a proposal is handled in 3 stages: + /// 1. The SHC requests a block to be built from the context. + /// 2. SHC returns, allowing the context to build the block while the Manager awaits the result + /// without blocking consensus. + /// 3. Once building is complete, the manager returns the built block to the SHC as an event, + /// which can be sent to the SM. + /// * During this process, the SM is frozen; it will accept and buffer other events, only + /// processing them once it receives the built proposal. + BuildProposal(Round, oneshot::Receiver), + /// Validating a proposal is handled in 3 stages: + /// 1. The SHC validates `ProposalInit`, then starts block validation within the context. + /// 2. SHC returns, allowing the context to validate the content while the Manager await the + /// result without blocking consensus. + /// 3. Once validation is complete, the manager returns the built proposal to the SHC as an + /// event, which can be sent to the SM. + ValidateProposal(ProposalInit, oneshot::Receiver), +} + +impl PartialEq for ShcTask { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (ShcTask::TimeoutPropose(d1, e1), ShcTask::TimeoutPropose(d2, e2)) + | (ShcTask::TimeoutPrevote(d1, e1), ShcTask::TimeoutPrevote(d2, e2)) + | (ShcTask::TimeoutPrecommit(d1, e1), ShcTask::TimeoutPrecommit(d2, e2)) + | (ShcTask::Prevote(d1, e1), ShcTask::Prevote(d2, e2)) + | (ShcTask::Precommit(d1, e1), ShcTask::Precommit(d2, e2)) => d1 == d2 && e1 == e2, + (ShcTask::BuildProposal(r1, _), ShcTask::BuildProposal(r2, _)) => r1 == r2, + (ShcTask::ValidateProposal(pi1, _), ShcTask::ValidateProposal(pi2, _)) => pi1 == pi2, + _ => false, + } + } +} + +impl ShcTask { + pub async fn run(self) -> ShcEvent { + trace!("Running task: {:?}", self); + match self { + ShcTask::TimeoutPropose(duration, event) => { + tokio::time::sleep(duration).await; + ShcEvent::TimeoutPropose(event) + } + ShcTask::TimeoutPrevote(duration, event) => { + tokio::time::sleep(duration).await; + ShcEvent::TimeoutPrevote(event) + } + ShcTask::TimeoutPrecommit(duration, event) => { + tokio::time::sleep(duration).await; + ShcEvent::TimeoutPrecommit(event) + } + ShcTask::Prevote(duration, event) => { + tokio::time::sleep(duration).await; + ShcEvent::Prevote(event) + } + ShcTask::Precommit(duration, event) => { + tokio::time::sleep(duration).await; + ShcEvent::Precommit(event) + } + ShcTask::BuildProposal(round, receiver) => { + let proposal_id = receiver.await.ok(); + ShcEvent::BuildProposal(StateMachineEvent::GetProposal(proposal_id, round)) + } + ShcTask::ValidateProposal(init, block_receiver) => { + // TODO(Asmaa): Consider if we want to differentiate between an interrupt and other + // failures. + let proposal_id = block_receiver.await.ok(); + ShcEvent::ValidateProposal(StateMachineEvent::Proposal( + proposal_id, + init.round, + init.valid_round, + )) + } + } + } +} + +/// Represents a single height of consensus. It is responsible for mapping between the idealized +/// view of consensus represented in the StateMachine and the real world implementation. +/// +/// Example: +/// - Timeouts: the SM returns an event timeout, but SHC then maps that to a task which can be run +/// by the Manager. The manager though unaware of the specific task as it has minimal consensus +/// logic. +/// +/// Each height is begun with a call to `start`, with no further calls to it. +/// +/// SHC is not a top level task, it is called directly and returns values (doesn't directly run sub +/// tasks). SHC does have side effects, such as sending messages to the network via the context. +#[derive(Serialize, Deserialize)] +pub(crate) struct SingleHeightConsensus { + height: BlockNumber, + validators: Vec, + id: ValidatorId, + timeouts: TimeoutsConfig, + state_machine: StateMachine, + proposals: HashMap>, + prevotes: HashMap<(Round, ValidatorId), Vote>, + precommits: HashMap<(Round, ValidatorId), Vote>, + last_prevote: Option, + last_precommit: Option, +} + +impl SingleHeightConsensus { + pub(crate) fn new( + height: BlockNumber, + is_observer: bool, + id: ValidatorId, + validators: Vec, + quroum_type: QuorumType, + timeouts: TimeoutsConfig, + ) -> Self { + // TODO(matan): Use actual weights, not just `len`. + let n_validators = + u64::try_from(validators.len()).expect("Should have way less than u64::MAX validators"); + let state_machine = StateMachine::new(id, n_validators, is_observer, quroum_type); + Self { + height, + validators, + id, + timeouts, + state_machine, + proposals: HashMap::new(), + prevotes: HashMap::new(), + precommits: HashMap::new(), + last_prevote: None, + last_precommit: None, + } + } + + #[instrument(skip_all)] + pub(crate) async fn start( + &mut self, + context: &mut ContextT, + ) -> Result { + context.set_height_and_round(self.height, self.state_machine.round()).await; + let leader_fn = |round: Round| -> ValidatorId { context.proposer(self.height, round) }; + let events = self.state_machine.start(&leader_fn); + let ret = self.handle_state_machine_events(context, events).await; + // Defensive programming. We don't expect the height and round to have changed from the + // start of this method. + context.set_height_and_round(self.height, self.state_machine.round()).await; + ret + } + + /// Process the proposal init and initiate block validation. See [`ShcTask::ValidateProposal`] + /// for more details on the full proposal flow. + #[instrument(skip_all)] + pub(crate) async fn handle_proposal( + &mut self, + context: &mut ContextT, + init: ProposalInit, + p2p_messages_receiver: mpsc::Receiver, + ) -> Result { + debug!("Received {init:?}"); + let proposer_id = context.proposer(self.height, init.round); + if init.height != self.height { + warn!("Invalid proposal height: expected {:?}, got {:?}", self.height, init.height); + return Ok(ShcReturn::Tasks(Vec::new())); + } + if init.proposer != proposer_id { + warn!("Invalid proposer: expected {:?}, got {:?}", proposer_id, init.proposer); + return Ok(ShcReturn::Tasks(Vec::new())); + } + let Entry::Vacant(proposal_entry) = self.proposals.entry(init.round) else { + warn!("Round {} already has a proposal, ignoring", init.round); + return Ok(ShcReturn::Tasks(Vec::new())); + }; + let timeout = self.timeouts.proposal_timeout; + info!( + "Accepting {init:?}. node_round: {}, timeout: {timeout:?}", + self.state_machine.round() + ); + CONSENSUS_PROPOSALS_VALID_INIT.increment(1); + + // Since validating the proposal is non-blocking, we want to avoid validating the same round + // twice in parallel. This could be caused by a network repeat or a malicious spam attack. + proposal_entry.insert(None); + let block_receiver = context.validate_proposal(init, timeout, p2p_messages_receiver).await; + context.set_height_and_round(self.height, self.state_machine.round()).await; + Ok(ShcReturn::Tasks(vec![ShcTask::ValidateProposal(init, block_receiver)])) + } + + #[instrument(skip_all)] + pub async fn handle_event( + &mut self, + context: &mut ContextT, + event: ShcEvent, + ) -> Result { + trace!("Received ShcEvent: {:?}", event); + let ret = match event { + ShcEvent::TimeoutPropose(event) + | ShcEvent::TimeoutPrevote(event) + | ShcEvent::TimeoutPrecommit(event) => self.handle_timeout(context, event).await, + ShcEvent::Prevote(StateMachineEvent::Prevote(proposal_id, round)) => { + let Some(last_vote) = &self.last_prevote else { + return Err(ConsensusError::InternalInconsistency( + "No prevote to send".to_string(), + )); + }; + if last_vote.round > round { + // Only replay the newest prevote. + return Ok(ShcReturn::Tasks(Vec::new())); + } + trace!("Rebroadcasting {last_vote:?}"); + context.broadcast(last_vote.clone()).await?; + Ok(ShcReturn::Tasks(vec![ShcTask::Prevote( + self.timeouts.prevote_timeout, + StateMachineEvent::Prevote(proposal_id, round), + )])) + } + ShcEvent::Precommit(StateMachineEvent::Precommit(proposal_id, round)) => { + let Some(last_vote) = &self.last_precommit else { + return Err(ConsensusError::InternalInconsistency( + "No precommit to send".to_string(), + )); + }; + if last_vote.round > round { + // Only replay the newest precommit. + return Ok(ShcReturn::Tasks(Vec::new())); + } + debug!("Rebroadcasting {last_vote:?}"); + context.broadcast(last_vote.clone()).await?; + Ok(ShcReturn::Tasks(vec![ShcTask::Precommit( + self.timeouts.precommit_timeout, + StateMachineEvent::Precommit(proposal_id, round), + )])) + } + ShcEvent::ValidateProposal(StateMachineEvent::Proposal( + proposal_id, + round, + valid_round, + )) => { + let leader_fn = + |round: Round| -> ValidatorId { context.proposer(self.height, round) }; + debug!( + proposer = %leader_fn(round), + %round, + ?valid_round, + proposal_commitment = ?proposal_id, + node_round = self.state_machine.round(), + "Validated proposal.", + ); + if proposal_id.is_some() { + CONSENSUS_PROPOSALS_VALIDATED.increment(1); + } else { + CONSENSUS_PROPOSALS_INVALID.increment(1); + } + + // Retaining the entry for this round prevents us from receiving another proposal on + // this round. While this prevents spam attacks it also prevents re-receiving after + // a network issue. + let old = self.proposals.insert(round, proposal_id); + let old = old.unwrap_or_else(|| { + panic!("Proposal entry should exist from init. round={round}") + }); + assert!(old.is_none(), "Proposal already exists for this round={round}. {old:?}"); + let sm_events = self.state_machine.handle_event( + StateMachineEvent::Proposal(proposal_id, round, valid_round), + &leader_fn, + ); + self.handle_state_machine_events(context, sm_events).await + } + ShcEvent::BuildProposal(StateMachineEvent::GetProposal(proposal_id, round)) => { + if proposal_id.is_none() { + CONSENSUS_BUILD_PROPOSAL_FAILED.increment(1); + } + let old = self.proposals.insert(round, proposal_id); + assert!(old.is_none(), "There should be no entry for round {round} when proposing"); + assert_eq!( + round, + self.state_machine.round(), + "State machine should not progress while awaiting proposal" + ); + debug!(%round, proposal_commitment = ?proposal_id, "Built proposal."); + let leader_fn = + |round: Round| -> ValidatorId { context.proposer(self.height, round) }; + let sm_events = self + .state_machine + .handle_event(StateMachineEvent::GetProposal(proposal_id, round), &leader_fn); + self.handle_state_machine_events(context, sm_events).await + } + _ => unimplemented!("Unexpected event: {:?}", event), + }; + context.set_height_and_round(self.height, self.state_machine.round()).await; + ret + } + + async fn handle_timeout( + &mut self, + context: &mut ContextT, + event: StateMachineEvent, + ) -> Result { + let leader_fn = |round: Round| -> ValidatorId { context.proposer(self.height, round) }; + let sm_events = self.state_machine.handle_event(event, &leader_fn); + self.handle_state_machine_events(context, sm_events).await + } + + /// Handle vote messages from peer nodes. + #[instrument(skip_all)] + pub(crate) async fn handle_vote( + &mut self, + context: &mut ContextT, + vote: Vote, + ) -> Result { + trace!("Received {:?}", vote); + if !self.validators.contains(&vote.voter) { + debug!("Ignoring vote from non validator: vote={:?}", vote); + return Ok(ShcReturn::Tasks(Vec::new())); + } + + let (votes, sm_vote) = match vote.vote_type { + VoteType::Prevote => { + (&mut self.prevotes, StateMachineEvent::Prevote(vote.block_hash, vote.round)) + } + VoteType::Precommit => { + (&mut self.precommits, StateMachineEvent::Precommit(vote.block_hash, vote.round)) + } + }; + + match votes.entry((vote.round, vote.voter)) { + Entry::Vacant(entry) => { + entry.insert(vote.clone()); + } + Entry::Occupied(entry) => { + let old = entry.get(); + if old.block_hash != vote.block_hash { + warn!("Conflicting votes: old={:?}, new={:?}", old, vote); + CONSENSUS_CONFLICTING_VOTES.increment(1); + return Ok(ShcReturn::Tasks(Vec::new())); + } else { + // Replay, ignore. + return Ok(ShcReturn::Tasks(Vec::new())); + } + } + } + info!("Accepting {:?}", vote); + let leader_fn = |round: Round| -> ValidatorId { context.proposer(self.height, round) }; + let sm_events = self.state_machine.handle_event(sm_vote, &leader_fn); + let ret = self.handle_state_machine_events(context, sm_events).await; + context.set_height_and_round(self.height, self.state_machine.round()).await; + ret + } + + // Handle events output by the state machine. + async fn handle_state_machine_events( + &mut self, + context: &mut ContextT, + mut events: VecDeque, + ) -> Result { + let mut ret_val = Vec::new(); + while let Some(event) = events.pop_front() { + trace!("Handling sm event: {:?}", event); + match event { + StateMachineEvent::GetProposal(proposal_id, round) => { + ret_val.extend( + self.handle_state_machine_get_proposal(context, proposal_id, round).await, + ); + } + StateMachineEvent::Proposal(proposal_id, round, valid_round) => { + self.handle_state_machine_proposal(context, proposal_id, round, valid_round) + .await; + } + StateMachineEvent::Decision(proposal_id, round) => { + return self.handle_state_machine_decision(proposal_id, round).await; + } + StateMachineEvent::Prevote(proposal_id, round) => { + ret_val.extend( + self.handle_state_machine_vote( + context, + proposal_id, + round, + VoteType::Prevote, + ) + .await?, + ); + } + StateMachineEvent::Precommit(proposal_id, round) => { + ret_val.extend( + self.handle_state_machine_vote( + context, + proposal_id, + round, + VoteType::Precommit, + ) + .await?, + ); + } + StateMachineEvent::TimeoutPropose(_) => { + ret_val.push(ShcTask::TimeoutPropose(self.timeouts.proposal_timeout, event)); + } + StateMachineEvent::TimeoutPrevote(_) => { + ret_val.push(ShcTask::TimeoutPrevote(self.timeouts.prevote_timeout, event)); + } + StateMachineEvent::TimeoutPrecommit(_) => { + ret_val.push(ShcTask::TimeoutPrecommit(self.timeouts.precommit_timeout, event)); + } + } + } + Ok(ShcReturn::Tasks(ret_val)) + } + + /// Initiate block building. See [`ShcTask::BuildProposal`] for more details on the full + /// proposal flow. + async fn handle_state_machine_get_proposal( + &mut self, + context: &mut ContextT, + proposal_id: Option, + round: Round, + ) -> Vec { + assert!( + proposal_id.is_none(), + "StateMachine is requesting a new proposal, but provided a content id." + ); + + // TODO(Matan): Figure out how to handle failed proposal building. I believe this should be + // handled by applying timeoutPropose when we are the leader. + let init = + ProposalInit { height: self.height, round, proposer: self.id, valid_round: None }; + CONSENSUS_BUILD_PROPOSAL_TOTAL.increment(1); + let fin_receiver = context.build_proposal(init, self.timeouts.proposal_timeout).await; + vec![ShcTask::BuildProposal(round, fin_receiver)] + } + + async fn handle_state_machine_proposal( + &mut self, + context: &mut ContextT, + proposal_id: Option, + round: Round, + valid_round: Option, + ) { + let Some(valid_round) = valid_round else { + // Newly built proposals are handled by the BuildProposal flow. + return; + }; + let proposal_id = proposal_id.expect("Reproposal must have a valid ID"); + + let id = self + .proposals + .get(&valid_round) + .unwrap_or_else(|| panic!("A proposal should exist for valid_round: {valid_round}")) + .unwrap_or_else(|| { + panic!("A valid proposal should exist for valid_round: {valid_round}") + }); + assert_eq!(id, proposal_id, "reproposal should match the stored proposal"); + let old = self.proposals.insert(round, Some(proposal_id)); + assert!(old.is_none(), "There should be no proposal for round {round}."); + let init = ProposalInit { + height: self.height, + round, + proposer: self.id, + valid_round: Some(valid_round), + }; + CONSENSUS_REPROPOSALS.increment(1); + context.repropose(id, init).await; + } + + async fn handle_state_machine_vote( + &mut self, + context: &mut ContextT, + proposal_id: Option, + round: Round, + vote_type: VoteType, + ) -> Result, ConsensusError> { + let (votes, last_vote, task) = match vote_type { + VoteType::Prevote => ( + &mut self.prevotes, + &mut self.last_prevote, + ShcTask::Prevote( + self.timeouts.prevote_timeout, + StateMachineEvent::Prevote(proposal_id, round), + ), + ), + VoteType::Precommit => ( + &mut self.precommits, + &mut self.last_precommit, + ShcTask::Precommit( + self.timeouts.precommit_timeout, + StateMachineEvent::Precommit(proposal_id, round), + ), + ), + }; + let vote = Vote { + vote_type, + height: self.height.0, + round, + block_hash: proposal_id, + voter: self.id, + }; + if let Some(old) = votes.insert((round, self.id), vote.clone()) { + return Err(ConsensusError::InternalInconsistency(format!( + "State machine should not send repeat votes: old={:?}, new={:?}", + old, vote + ))); + } + *last_vote = match last_vote { + None => Some(vote.clone()), + Some(last_vote) if round > last_vote.round => Some(vote.clone()), + Some(_) => { + // According to the Tendermint paper, the state machine should only vote for its + // current round. It should monotonicly increase its round. It should only vote once + // per step. + return Err(ConsensusError::InternalInconsistency(format!( + "State machine must progress in time: last_vote: {:?} new_vote: {:?}", + last_vote, vote, + ))); + } + }; + + info!("Broadcasting {vote:?}"); + context.broadcast(vote).await?; + Ok(vec![task]) + } + + async fn handle_state_machine_decision( + &mut self, + proposal_id: ProposalCommitment, + round: Round, + ) -> Result { + let invalid_decision = |msg: String| { + ConsensusError::InternalInconsistency(format!( + "Invalid decision: sm_proposal_id={proposal_id}, round={round}. {msg}", + )) + }; + let block = self + .proposals + .remove(&round) + .ok_or_else(|| invalid_decision("No proposal entry for this round".to_string()))? + .ok_or_else(|| { + invalid_decision( + "Proposal is invalid or validations haven't yet completed".to_string(), + ) + })?; + if block != proposal_id { + return Err(invalid_decision(format!( + "StateMachine block hash should match the stored block. Shc.block_id: {block}" + ))); + } + let supporting_precommits: Vec = self + .validators + .iter() + .filter_map(|v| { + let vote = self.precommits.get(&(round, *v))?; + if vote.block_hash == Some(proposal_id) { Some(vote.clone()) } else { None } + }) + .collect(); + + // TODO(matan): Check actual weights. + let vote_weight = u64::try_from(supporting_precommits.len()) + .expect("Should have way less than u64::MAX supporting votes"); + let total_weight = self.state_machine.total_weight(); + + if !self.state_machine.quorum().is_met(vote_weight, total_weight) { + let msg = format!( + "Not enough supporting votes. num_supporting_votes: {vote_weight} out of \ + {total_weight}. supporting_votes: {supporting_precommits:?}", + ); + return Err(invalid_decision(msg)); + } + Ok(ShcReturn::Decision(Decision { precommits: supporting_precommits, block })) + } +} diff --git a/crates/sequencing/papyrus_consensus/src/single_height_consensus_test.rs b/crates/apollo_consensus/src/single_height_consensus_test.rs similarity index 75% rename from crates/sequencing/papyrus_consensus/src/single_height_consensus_test.rs rename to crates/apollo_consensus/src/single_height_consensus_test.rs index 7648c1d5933..3602208d269 100644 --- a/crates/sequencing/papyrus_consensus/src/single_height_consensus_test.rs +++ b/crates/apollo_consensus/src/single_height_consensus_test.rs @@ -1,12 +1,7 @@ +use apollo_protobuf::consensus::{ProposalFin, ProposalInit, Vote, DEFAULT_VALIDATOR_ID}; use futures::channel::{mpsc, oneshot}; use futures::SinkExt; use lazy_static::lazy_static; -use papyrus_protobuf::consensus::{ - ConsensusMessage, - ProposalFin, - ProposalInit, - DEFAULT_VALIDATOR_ID, -}; use starknet_api::block::{BlockHash, BlockNumber}; use starknet_types_core::felt::Felt; use test_case::test_case; @@ -16,7 +11,8 @@ use crate::config::TimeoutsConfig; use crate::single_height_consensus::{ShcEvent, ShcReturn, ShcTask}; use crate::state_machine::StateMachineEvent; use crate::test_utils::{precommit, prevote, MockTestContext, TestBlock, TestProposalPart}; -use crate::types::{ConsensusError, ValidatorId}; +use crate::types::ValidatorId; +use crate::votes_threshold::QuorumType; lazy_static! { static ref PROPOSER_ID: ValidatorId = DEFAULT_VALIDATOR_ID.into(); @@ -31,9 +27,8 @@ lazy_static! { static ref TIMEOUTS: TimeoutsConfig = TimeoutsConfig::default(); static ref VALIDATE_PROPOSAL_EVENT: ShcEvent = ShcEvent::ValidateProposal( StateMachineEvent::Proposal(Some(BLOCK.id), PROPOSAL_INIT.round, PROPOSAL_INIT.valid_round,), - Some(ProposalFin { proposal_content_id: BLOCK.id }), ); - static ref PROPOSAL_FIN: ProposalFin = ProposalFin { proposal_content_id: BLOCK.id }; + static ref PROPOSAL_FIN: ProposalFin = ProposalFin { proposal_commitment: BLOCK.id }; } const CHANNEL_SIZE: usize = 1; @@ -83,6 +78,7 @@ async fn proposer() { false, *PROPOSER_ID, VALIDATORS.to_vec(), + QuorumType::Byzantine, TIMEOUTS.clone(), ); @@ -96,7 +92,7 @@ async fn proposer() { context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| msg == &prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) + .withf(move |msg: &Vote| msg == &prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) .returning(move |_| Ok(())); // Sends proposal and prevote. let shc_ret = shc.start(&mut context).await.unwrap(); @@ -110,20 +106,18 @@ async fn proposer() { Ok(ShcReturn::Tasks(vec![prevote_task(Some(BLOCK.id.0), 0)])) ); assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)).await, Ok(ShcReturn::Tasks(Vec::new())) ); // 3 of 4 Prevotes is enough to send a Precommit. context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| { - msg == &precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID) - }) + .withf(move |msg: &Vote| msg == &precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) .returning(move |_| Ok(())); // The Node got a Prevote quorum. assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, Ok(ShcReturn::Tasks(vec![timeout_prevote_task(0), precommit_task(Some(BLOCK.id.0), 0),])) ); @@ -134,27 +128,22 @@ async fn proposer() { precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID), ]; assert_eq!( - shc.handle_message(&mut context, precommits[0].clone()).await, + shc.handle_vote(&mut context, precommits[0].clone()).await, Ok(ShcReturn::Tasks(Vec::new())) ); // The disagreeing vote counts towards the timeout, which uses a heterogeneous quorum, but not // the decision, which uses a homogenous quorum. assert_eq!( - shc.handle_message(&mut context, precommits[1].clone()).await, + shc.handle_vote(&mut context, precommits[1].clone()).await, Ok(ShcReturn::Tasks(vec![timeout_precommit_task(0),])) ); let ShcReturn::Decision(decision) = - shc.handle_message(&mut context, precommits[2].clone()).await.unwrap() + shc.handle_vote(&mut context, precommits[2].clone()).await.unwrap() else { panic!("Expected decision"); }; assert_eq!(decision.block, BLOCK.id); - assert!( - decision - .precommits - .into_iter() - .all(|item| precommits.contains(&ConsensusMessage::Vote(item))) - ); + assert!(decision.precommits.into_iter().all(|item| precommits.contains(&item))); } #[test_case(false; "single_proposal")] @@ -169,22 +158,21 @@ async fn validator(repeat_proposal: bool) { false, *VALIDATOR_ID_1, VALIDATORS.to_vec(), + QuorumType::Byzantine, TIMEOUTS.clone(), ); context.expect_proposer().returning(move |_, _| *PROPOSER_ID); context.expect_validate_proposal().times(1).returning(move |_, _, _| { let (block_sender, block_receiver) = oneshot::channel(); - block_sender.send((BLOCK.id, PROPOSAL_FIN.clone())).unwrap(); + block_sender.send(BLOCK.id).unwrap(); block_receiver }); context.expect_set_height_and_round().returning(move |_, _| ()); context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| { - msg == &prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1) - }) + .withf(move |msg: &Vote| msg == &prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) .returning(move |_| Ok(())); let shc_ret = handle_proposal(&mut shc, &mut context).await; assert_eq!(shc_ret.as_tasks().unwrap()[0].as_validate_proposal().unwrap().0, &*PROPOSAL_INIT); @@ -198,20 +186,18 @@ async fn validator(repeat_proposal: bool) { assert_eq!(shc_ret, ShcReturn::Tasks(Vec::new())); } assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)).await, Ok(ShcReturn::Tasks(Vec::new())) ); // 3 of 4 Prevotes is enough to send a Precommit. context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| { - msg == &precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1) - }) + .withf(move |msg: &Vote| msg == &precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) .returning(move |_| Ok(())); // The Node got a Prevote quorum. assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, Ok(ShcReturn::Tasks(vec![timeout_prevote_task(0), precommit_task(Some(BLOCK.id.0), 0)])) ); @@ -221,21 +207,16 @@ async fn validator(repeat_proposal: bool) { precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1), ]; assert_eq!( - shc.handle_message(&mut context, precommits[0].clone()).await, + shc.handle_vote(&mut context, precommits[0].clone()).await, Ok(ShcReturn::Tasks(Vec::new())) ); let ShcReturn::Decision(decision) = - shc.handle_message(&mut context, precommits[1].clone()).await.unwrap() + shc.handle_vote(&mut context, precommits[1].clone()).await.unwrap() else { panic!("Expected decision"); }; assert_eq!(decision.block, BLOCK.id); - assert!( - decision - .precommits - .into_iter() - .all(|item| precommits.contains(&ConsensusMessage::Vote(item))) - ); + assert!(decision.precommits.into_iter().all(|item| precommits.contains(&item))); } #[test_case(true; "repeat")] @@ -249,20 +230,21 @@ async fn vote_twice(same_vote: bool) { false, *VALIDATOR_ID_1, VALIDATORS.to_vec(), + QuorumType::Byzantine, TIMEOUTS.clone(), ); context.expect_proposer().times(1).returning(move |_, _| *PROPOSER_ID); context.expect_validate_proposal().times(1).returning(move |_, _, _| { let (block_sender, block_receiver) = oneshot::channel(); - block_sender.send((BLOCK.id, PROPOSAL_FIN.clone())).unwrap(); + block_sender.send(BLOCK.id).unwrap(); block_receiver }); context.expect_set_height_and_round().returning(move |_, _| ()); context .expect_broadcast() .times(1) // Shows the repeat vote is ignored. - .withf(move |msg: &ConsensusMessage| msg == &prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) + .withf(move |msg: &Vote| msg == &prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) .returning(move |_| Ok(())); let shc_ret = handle_proposal(&mut shc, &mut context).await; assert_eq!(shc_ret.as_tasks().unwrap()[0].as_validate_proposal().unwrap().0, &*PROPOSAL_INIT,); @@ -271,16 +253,15 @@ async fn vote_twice(same_vote: bool) { Ok(ShcReturn::Tasks(vec![prevote_task(Some(BLOCK.id.0), 0)])) ); - let res = shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)).await; + let res = shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)).await; assert_eq!(res, Ok(ShcReturn::Tasks(Vec::new()))); context .expect_broadcast() .times(1) // Shows the repeat vote is ignored. - .withf(move |msg: &ConsensusMessage| msg == &precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) + .withf(move |msg: &Vote| msg == &precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) .returning(move |_| Ok(())); - let res = - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await; + let res = shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await; // The Node got a Prevote quorum. assert_eq!( res, @@ -288,20 +269,16 @@ async fn vote_twice(same_vote: bool) { ); let first_vote = precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID); - let res = shc.handle_message(&mut context, first_vote.clone()).await; + let res = shc.handle_vote(&mut context, first_vote.clone()).await; assert_eq!(res, Ok(ShcReturn::Tasks(Vec::new()))); let second_vote = if same_vote { first_vote.clone() } else { precommit(Some(Felt::TWO), 0, 0, *PROPOSER_ID) }; - let res = shc.handle_message(&mut context, second_vote.clone()).await; - if same_vote { - assert_eq!(res, Ok(ShcReturn::Tasks(Vec::new()))); - } else { - assert!(matches!(res, Err(ConsensusError::Equivocation(_, _, _)))); - } + let res = shc.handle_vote(&mut context, second_vote.clone()).await; + assert_eq!(res, Ok(ShcReturn::Tasks(Vec::new()))); let ShcReturn::Decision(decision) = shc - .handle_message(&mut context, precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)) + .handle_vote(&mut context, precommit(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)) .await .unwrap() else { @@ -319,6 +296,7 @@ async fn rebroadcast_votes() { false, *PROPOSER_ID, VALIDATORS.to_vec(), + QuorumType::Byzantine, TIMEOUTS.clone(), ); @@ -332,7 +310,7 @@ async fn rebroadcast_votes() { context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| msg == &prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) + .withf(move |msg: &Vote| msg == &prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) .returning(move |_| Ok(())); // Sends proposal and prevote. let shc_ret = shc.start(&mut context).await.unwrap(); @@ -346,20 +324,20 @@ async fn rebroadcast_votes() { Ok(ShcReturn::Tasks(vec![prevote_task(Some(BLOCK.id.0), 0)])) ); assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)).await, Ok(ShcReturn::Tasks(Vec::new())) ); // 3 of 4 Prevotes is enough to send a Precommit. context .expect_broadcast() .times(2) // vote rebroadcast - .withf(move |msg: &ConsensusMessage| { + .withf(move |msg: &Vote| { msg == &precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID) }) .returning(move |_| Ok(())); // The Node got a Prevote quorum. assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, Ok(ShcReturn::Tasks(vec![timeout_prevote_task(0), precommit_task(Some(BLOCK.id.0), 0),])) ); // Re-broadcast vote. @@ -382,6 +360,7 @@ async fn repropose() { false, *PROPOSER_ID, VALIDATORS.to_vec(), + QuorumType::Byzantine, TIMEOUTS.clone(), ); @@ -395,7 +374,7 @@ async fn repropose() { context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| msg == &prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) + .withf(move |msg: &Vote| msg == &prevote(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) .returning(move |_| Ok(())); // Sends proposal and prevote. shc.start(&mut context).await.unwrap(); @@ -405,19 +384,15 @@ async fn repropose() { ) .await .unwrap(); - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)) - .await - .unwrap(); + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_1)).await.unwrap(); context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| { - msg == &precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID) - }) + .withf(move |msg: &Vote| msg == &precommit(Some(BLOCK.id.0), 0, 0, *PROPOSER_ID)) .returning(move |_| Ok(())); // The Node got a Prevote quorum, and set valid proposal. assert_eq!( - shc.handle_message(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, + shc.handle_vote(&mut context, prevote(Some(BLOCK.id.0), 0, 0, *VALIDATOR_ID_2)).await, Ok(ShcReturn::Tasks(vec![timeout_prevote_task(0), precommit_task(Some(BLOCK.id.0), 0),])) ); // Advance to the next round. @@ -426,8 +401,8 @@ async fn repropose() { precommit(None, 0, 0, *VALIDATOR_ID_2), precommit(None, 0, 0, *VALIDATOR_ID_3), ]; - shc.handle_message(&mut context, precommits[0].clone()).await.unwrap(); - shc.handle_message(&mut context, precommits[1].clone()).await.unwrap(); + shc.handle_vote(&mut context, precommits[0].clone()).await.unwrap(); + shc.handle_vote(&mut context, precommits[1].clone()).await.unwrap(); // After NIL precommits, the proposer should re-propose. context.expect_repropose().returning(move |id, init| { assert_eq!(init.height, BlockNumber(0)); @@ -436,9 +411,9 @@ async fn repropose() { context .expect_broadcast() .times(1) - .withf(move |msg: &ConsensusMessage| msg == &prevote(Some(BLOCK.id.0), 0, 1, *PROPOSER_ID)) + .withf(move |msg: &Vote| msg == &prevote(Some(BLOCK.id.0), 0, 1, *PROPOSER_ID)) .returning(move |_| Ok(())); - shc.handle_message(&mut context, precommits[2].clone()).await.unwrap(); + shc.handle_vote(&mut context, precommits[2].clone()).await.unwrap(); shc.handle_event( &mut context, ShcEvent::TimeoutPrecommit(StateMachineEvent::TimeoutPrecommit(0)), @@ -451,18 +426,13 @@ async fn repropose() { precommit(Some(BLOCK.id.0), 0, 1, *VALIDATOR_ID_2), precommit(Some(BLOCK.id.0), 0, 1, *VALIDATOR_ID_3), ]; - shc.handle_message(&mut context, precommits[0].clone()).await.unwrap(); - shc.handle_message(&mut context, precommits[1].clone()).await.unwrap(); + shc.handle_vote(&mut context, precommits[0].clone()).await.unwrap(); + shc.handle_vote(&mut context, precommits[1].clone()).await.unwrap(); let ShcReturn::Decision(decision) = - shc.handle_message(&mut context, precommits[2].clone()).await.unwrap() + shc.handle_vote(&mut context, precommits[2].clone()).await.unwrap() else { panic!("Expected decision"); }; assert_eq!(decision.block, BLOCK.id); - assert!( - decision - .precommits - .into_iter() - .all(|item| precommits.contains(&ConsensusMessage::Vote(item))) - ); + assert!(decision.precommits.into_iter().all(|item| precommits.contains(&item))); } diff --git a/crates/sequencing/papyrus_consensus/src/state_machine.rs b/crates/apollo_consensus/src/state_machine.rs similarity index 79% rename from crates/sequencing/papyrus_consensus/src/state_machine.rs rename to crates/apollo_consensus/src/state_machine.rs index 810b833116e..44eddbe052f 100644 --- a/crates/sequencing/papyrus_consensus/src/state_machine.rs +++ b/crates/apollo_consensus/src/state_machine.rs @@ -9,29 +9,40 @@ mod state_machine_test; use std::collections::{HashMap, HashSet, VecDeque}; -use tracing::trace; - -use crate::types::{ProposalContentId, Round, ValidatorId}; +use serde::{Deserialize, Serialize}; +use tracing::{debug, info, trace, warn}; + +use crate::metrics::{ + TimeoutReason, + CONSENSUS_HELD_LOCKS, + CONSENSUS_NEW_VALUE_LOCKS, + CONSENSUS_ROUND, + CONSENSUS_ROUND_ABOVE_ZERO, + CONSENSUS_TIMEOUTS, + LABEL_NAME_TIMEOUT_REASON, +}; +use crate::types::{ProposalCommitment, Round, ValidatorId}; +use crate::votes_threshold::{QuorumType, VotesThreshold, ROUND_SKIP_THRESHOLD}; /// Events which the state machine sends/receives. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum StateMachineEvent { - /// Sent by the state machine when a block is required to propose (ProposalContentId is always + /// Sent by the state machine when a block is required to propose (ProposalCommitment is always /// None). While waiting for the response of GetProposal, the state machine will buffer all /// other events. The caller *must* respond with a valid proposal id for this height to the /// state machine, and the same round sent out. - GetProposal(Option, Round), + GetProposal(Option, Round), /// Consensus message, can be both sent from and to the state machine. // (proposal_id, round, valid_round) - Proposal(Option, Round, Option), + Proposal(Option, Round, Option), /// Consensus message, can be both sent from and to the state machine. - Prevote(Option, Round), + Prevote(Option, Round), /// Consensus message, can be both sent from and to the state machine. - Precommit(Option, Round), + Precommit(Option, Round), /// The state machine returns this event to the caller when a decision is reached. Not /// expected as an inbound message. We presume that the caller is able to recover the set of /// precommits which led to this decision from the information returned here. - Decision(ProposalContentId, Round), + Decision(ProposalCommitment, Round), /// Timeout events, can be both sent from and to the state machine. TimeoutPropose(Round), /// Timeout events, can be both sent from and to the state machine. @@ -40,7 +51,7 @@ pub enum StateMachineEvent { TimeoutPrecommit(Round), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum Step { Propose, Prevote, @@ -52,24 +63,26 @@ pub enum Step { /// 2. SM must handle "out of order" messages (E.g. vote arrives before proposal). /// /// Each height is begun with a call to `start`, with no further calls to it. +#[derive(Serialize, Deserialize)] pub struct StateMachine { id: ValidatorId, round: Round, step: Step, - quorum: u32, - round_skip_threshold: u32, + quorum: VotesThreshold, + round_skip_threshold: VotesThreshold, + total_weight: u64, is_observer: bool, // {round: (proposal_id, valid_round)} - proposals: HashMap, Option)>, + proposals: HashMap, Option)>, // {round: {proposal_id: vote_count} - prevotes: HashMap, u32>>, - precommits: HashMap, u32>>, + prevotes: HashMap, u32>>, + precommits: HashMap, u32>>, // When true, the state machine will wait for a GetProposal event, buffering all other input // events in `events_queue`. awaiting_get_proposal: bool, events_queue: VecDeque, - locked_value_round: Option<(ProposalContentId, Round)>, - valid_value_round: Option<(ProposalContentId, Round)>, + locked_value_round: Option<(ProposalCommitment, Round)>, + valid_value_round: Option<(ProposalCommitment, Round)>, prevote_quorum: HashSet, mixed_prevote_quorum: HashSet, mixed_precommit_quorum: HashSet, @@ -77,13 +90,21 @@ pub struct StateMachine { impl StateMachine { /// total_weight - the total voting weight of all validators for this height. - pub fn new(id: ValidatorId, total_weight: u32, is_observer: bool) -> Self { + pub fn new( + id: ValidatorId, + total_weight: u64, + is_observer: bool, + quorum_type: QuorumType, + ) -> Self { Self { id, round: 0, step: Step::Propose, - quorum: (2 * total_weight / 3) + 1, - round_skip_threshold: total_weight / 3 + 1, + // Byzantine: 2/3 votes, Honest: 1/2 votes. + quorum: VotesThreshold::from_quorum_type(quorum_type), + // Skip round threshold is 1/3 of the total weight. + round_skip_threshold: ROUND_SKIP_THRESHOLD, + total_weight, is_observer, proposals: HashMap::new(), prevotes: HashMap::new(), @@ -102,8 +123,12 @@ impl StateMachine { self.round } - pub fn quorum_size(&self) -> u32 { - self.quorum + pub fn total_weight(&self) -> u64 { + self.total_weight + } + + pub fn quorum(&self) -> &VotesThreshold { + &self.quorum } /// Starts the state machine, effectively calling `StartRound(0)` from the paper. This is @@ -133,7 +158,6 @@ impl StateMachine { where LeaderFn: Fn(Round) -> ValidatorId, { - trace!("Handling event: {:?}", event); // Mimic LOC 18 in the paper; the state machine doesn't // handle any events until `getValue` completes. if self.awaiting_get_proposal { @@ -202,6 +226,7 @@ impl StateMachine { where LeaderFn: Fn(Round) -> ValidatorId, { + trace!("Processing event: {:?}", event); if self.awaiting_get_proposal { assert!(matches!(event, StateMachineEvent::GetProposal(_, _)), "{:?}", event); } @@ -234,7 +259,7 @@ impl StateMachine { fn handle_get_proposal( &mut self, - proposal_id: Option, + proposal_id: Option, round: u32, ) -> VecDeque { // TODO(matan): Will we allow other events (timeoutPropose) to exit this state? @@ -247,7 +272,7 @@ impl StateMachine { // A proposal from a peer (or self) node. fn handle_proposal( &mut self, - proposal_id: Option, + proposal_id: Option, round: u32, valid_round: Option, leader_fn: &LeaderFn, @@ -264,6 +289,9 @@ impl StateMachine { if self.step != Step::Propose || round != self.round { return VecDeque::new(); }; + warn!("Proposal failed. Applying TimeoutPropose for round={round}."); + CONSENSUS_TIMEOUTS + .increment(1, &[(LABEL_NAME_TIMEOUT_REASON, TimeoutReason::Propose.into())]); let mut output = VecDeque::from([StateMachineEvent::Prevote(None, round)]); output.append(&mut self.advance_to_step(Step::Prevote)); output @@ -272,7 +300,7 @@ impl StateMachine { // A prevote from a peer (or self) node. fn handle_prevote( &mut self, - proposal_id: Option, + proposal_id: Option, round: u32, leader_fn: &LeaderFn, ) -> VecDeque @@ -289,6 +317,9 @@ impl StateMachine { if self.step != Step::Prevote || round != self.round { return VecDeque::new(); }; + debug!("Applying TimeoutPrevote for round={round}."); + CONSENSUS_TIMEOUTS + .increment(1, &[(LABEL_NAME_TIMEOUT_REASON, TimeoutReason::Prevote.into())]); let mut output = VecDeque::from([StateMachineEvent::Precommit(None, round)]); output.append(&mut self.advance_to_step(Step::Precommit)); output @@ -297,7 +328,7 @@ impl StateMachine { // A precommit from a peer (or self) node. fn handle_precommit( &mut self, - proposal_id: Option, + proposal_id: Option, round: u32, leader_fn: &LeaderFn, ) -> VecDeque @@ -322,6 +353,9 @@ impl StateMachine { if round != self.round { return VecDeque::new(); }; + debug!("Applying TimeoutPrecommit for round={round}."); + CONSENSUS_TIMEOUTS + .increment(1, &[(LABEL_NAME_TIMEOUT_REASON, TimeoutReason::Precommit.into())]); self.advance_to_round(round + 1, leader_fn) } @@ -334,9 +368,18 @@ impl StateMachine { where LeaderFn: Fn(Round) -> ValidatorId, { + CONSENSUS_ROUND.set(round); + // Count how many times consensus advanced above round 0. + if round == 1 { + CONSENSUS_ROUND_ABOVE_ZERO.increment(1); + } + if self.locked_value_round.is_some() { + CONSENSUS_HELD_LOCKS.increment(1); + } self.round = round; self.step = Step::Propose; let mut output = if !self.is_observer && self.id == leader_fn(self.round) { + info!("START_ROUND_PROPOSER: Starting round {round} as Proposer"); // Leader. match self.valid_value_round { Some((proposal_id, valid_round)) => VecDeque::from([StateMachineEvent::Proposal( @@ -351,6 +394,7 @@ impl StateMachine { } } } else { + info!("START_ROUND_VALIDATOR: Starting round {round} as Validator"); VecDeque::from([StateMachineEvent::TimeoutPropose(self.round)]) }; output.append(&mut self.current_round_upons()); @@ -359,6 +403,7 @@ impl StateMachine { fn advance_to_step(&mut self, step: Step) -> VecDeque { assert_ne!(step, Step::Propose, "Advancing to Propose is done by advancing rounds"); + info!("Advancing step: from {:?} to {step:?} in round={}", self.step, self.round); self.step = step; self.current_round_upons() } @@ -434,7 +479,7 @@ impl StateMachine { if valid_round >= &self.round { return VecDeque::new(); } - if !value_has_enough_votes(&self.prevotes, *valid_round, proposal_id, self.quorum) { + if !self.value_has_enough_votes(&self.prevotes, *valid_round, proposal_id, &self.quorum) { return VecDeque::new(); } let mut output = if proposal_id.is_some_and(|v| { @@ -455,7 +500,7 @@ impl StateMachine { if self.step != Step::Prevote { return VecDeque::new(); } - if !round_has_enough_votes(&self.prevotes, self.round, self.quorum) { + if !self.round_has_enough_votes(&self.prevotes, self.round, &self.quorum) { return VecDeque::new(); } // Getting mixed prevote quorum for the first time. @@ -473,7 +518,12 @@ impl StateMachine { let Some((Some(proposal_id), _)) = self.proposals.get(&self.round) else { return VecDeque::new(); }; - if !value_has_enough_votes(&self.prevotes, self.round, &Some(*proposal_id), self.quorum) { + if !self.value_has_enough_votes( + &self.prevotes, + self.round, + &Some(*proposal_id), + &self.quorum, + ) { return VecDeque::new(); } // Getting prevote quorum for the first time. @@ -484,7 +534,11 @@ impl StateMachine { if self.step != Step::Prevote { return VecDeque::new(); } - self.locked_value_round = Some((*proposal_id, self.round)); + let new_value = Some((*proposal_id, self.round)); + if new_value != self.locked_value_round { + CONSENSUS_NEW_VALUE_LOCKS.increment(1); + } + self.locked_value_round = new_value; let mut output = VecDeque::from([StateMachineEvent::Precommit(Some(*proposal_id), self.round)]); output.append(&mut self.advance_to_step(Step::Precommit)); @@ -496,7 +550,7 @@ impl StateMachine { if self.step != Step::Prevote { return VecDeque::new(); } - if !value_has_enough_votes(&self.prevotes, self.round, &None, self.quorum) { + if !self.value_has_enough_votes(&self.prevotes, self.round, &None, &self.quorum) { return VecDeque::new(); } let mut output = VecDeque::from([StateMachineEvent::Precommit(None, self.round)]); @@ -506,7 +560,7 @@ impl StateMachine { // LOC 47 in the paper. fn maybe_initiate_timeout_precommit(&mut self) -> VecDeque { - if !round_has_enough_votes(&self.precommits, self.round, self.quorum) { + if !self.round_has_enough_votes(&self.precommits, self.round, &self.quorum) { return VecDeque::new(); } // Getting mixed precommit quorum for the first time. @@ -521,7 +575,8 @@ impl StateMachine { let Some((Some(proposal_id), _)) = self.proposals.get(&round) else { return VecDeque::new(); }; - if !value_has_enough_votes(&self.precommits, round, &Some(*proposal_id), self.quorum) { + if !self.value_has_enough_votes(&self.precommits, round, &Some(*proposal_id), &self.quorum) + { return VecDeque::new(); } @@ -537,29 +592,35 @@ impl StateMachine { where LeaderFn: Fn(Round) -> ValidatorId, { - if round_has_enough_votes(&self.prevotes, round, self.round_skip_threshold) - || round_has_enough_votes(&self.precommits, round, self.round_skip_threshold) + if self.round_has_enough_votes(&self.prevotes, round, &self.round_skip_threshold) + || self.round_has_enough_votes(&self.precommits, round, &self.round_skip_threshold) { self.advance_to_round(round, leader_fn) } else { VecDeque::new() } } -} -fn round_has_enough_votes( - votes: &HashMap, u32>>, - round: u32, - threshold: u32, -) -> bool { - votes.get(&round).map_or(0, |v| v.values().sum()) >= threshold -} + fn round_has_enough_votes( + &self, + votes: &HashMap, u32>>, + round: u32, + threshold: &VotesThreshold, + ) -> bool { + threshold + .is_met(votes.get(&round).map_or(0, |v| v.values().sum()).into(), self.total_weight) + } -fn value_has_enough_votes( - votes: &HashMap, u32>>, - round: u32, - value: &Option, - threshold: u32, -) -> bool { - votes.get(&round).map_or(0, |v| *v.get(value).unwrap_or(&0)) >= threshold + fn value_has_enough_votes( + &self, + votes: &HashMap, u32>>, + round: u32, + value: &Option, + threshold: &VotesThreshold, + ) -> bool { + threshold.is_met( + votes.get(&round).map_or(0, |v| *v.get(value).unwrap_or(&0)).into(), + self.total_weight, + ) + } } diff --git a/crates/sequencing/papyrus_consensus/src/state_machine_test.rs b/crates/apollo_consensus/src/state_machine_test.rs similarity index 79% rename from crates/sequencing/papyrus_consensus/src/state_machine_test.rs rename to crates/apollo_consensus/src/state_machine_test.rs index ff91f999dbe..44a1888dfbc 100644 --- a/crates/sequencing/papyrus_consensus/src/state_machine_test.rs +++ b/crates/apollo_consensus/src/state_machine_test.rs @@ -1,21 +1,22 @@ use std::collections::VecDeque; +use apollo_protobuf::consensus::DEFAULT_VALIDATOR_ID; use lazy_static::lazy_static; -use papyrus_protobuf::consensus::DEFAULT_VALIDATOR_ID; use starknet_api::block::BlockHash; use starknet_types_core::felt::Felt; use test_case::test_case; use super::Round; use crate::state_machine::{StateMachine, StateMachineEvent}; -use crate::types::{ProposalContentId, ValidatorId}; +use crate::types::{ProposalCommitment, ValidatorId}; +use crate::votes_threshold::QuorumType; lazy_static! { static ref PROPOSER_ID: ValidatorId = DEFAULT_VALIDATOR_ID.into(); static ref VALIDATOR_ID: ValidatorId = (DEFAULT_VALIDATOR_ID + 1).into(); } -const PROPOSAL_ID: Option = Some(BlockHash(Felt::ONE)); +const PROPOSAL_ID: Option = Some(BlockHash(Felt::ONE)); const ROUND: Round = 0; struct TestWrapper ValidatorId> { @@ -25,9 +26,15 @@ struct TestWrapper ValidatorId> { } impl ValidatorId> TestWrapper { - pub fn new(id: ValidatorId, total_weight: u32, leader_fn: LeaderFn, is_observer: bool) -> Self { + pub fn new( + id: ValidatorId, + total_weight: u64, + leader_fn: LeaderFn, + is_observer: bool, + quorum_type: QuorumType, + ) -> Self { Self { - state_machine: StateMachine::new(id, total_weight, is_observer), + state_machine: StateMachine::new(id, total_weight, is_observer, quorum_type), leader_fn, events: VecDeque::new(), } @@ -41,19 +48,19 @@ impl ValidatorId> TestWrapper { self.events.append(&mut self.state_machine.start(&self.leader_fn)) } - pub fn send_get_proposal(&mut self, proposal_id: Option, round: Round) { + pub fn send_get_proposal(&mut self, proposal_id: Option, round: Round) { self.send_event(StateMachineEvent::GetProposal(proposal_id, round)) } - pub fn send_proposal(&mut self, proposal_id: Option, round: Round) { + pub fn send_proposal(&mut self, proposal_id: Option, round: Round) { self.send_event(StateMachineEvent::Proposal(proposal_id, round, None)) } - pub fn send_prevote(&mut self, proposal_id: Option, round: Round) { + pub fn send_prevote(&mut self, proposal_id: Option, round: Round) { self.send_event(StateMachineEvent::Prevote(proposal_id, round)) } - pub fn send_precommit(&mut self, proposal_id: Option, round: Round) { + pub fn send_precommit(&mut self, proposal_id: Option, round: Round) { self.send_event(StateMachineEvent::Precommit(proposal_id, round)) } @@ -78,7 +85,8 @@ impl ValidatorId> TestWrapper { #[test_case(false; "validator")] fn events_arrive_in_ideal_order(is_proposer: bool) { let id = if is_proposer { *PROPOSER_ID } else { *VALIDATOR_ID }; - let mut wrapper = TestWrapper::new(id, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(id, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); if is_proposer { @@ -121,7 +129,8 @@ fn events_arrive_in_ideal_order(is_proposer: bool) { #[test] fn validator_receives_votes_first() { - let mut wrapper = TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); // Waiting for the proposal. @@ -154,8 +163,9 @@ fn validator_receives_votes_first() { #[test_case(PROPOSAL_ID ; "valid_proposal")] #[test_case(None ; "invalid_proposal")] -fn buffer_events_during_get_proposal(vote: Option) { - let mut wrapper = TestWrapper::new(*PROPOSER_ID, 4, |_: Round| *PROPOSER_ID, false); +fn buffer_events_during_get_proposal(vote: Option) { + let mut wrapper = + TestWrapper::new(*PROPOSER_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::GetProposal(None, 0)); @@ -180,7 +190,8 @@ fn buffer_events_during_get_proposal(vote: Option) { #[test] fn only_send_precommit_with_prevote_quorum_and_proposal() { - let mut wrapper = TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); // Waiting for the proposal. @@ -203,7 +214,8 @@ fn only_send_precommit_with_prevote_quorum_and_proposal() { #[test] fn only_decide_with_prcommit_quorum_and_proposal() { - let mut wrapper = TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); // Waiting for the proposal. @@ -233,7 +245,8 @@ fn only_decide_with_prcommit_quorum_and_proposal() { #[test] fn advance_to_the_next_round() { - let mut wrapper = TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); // Waiting for the proposal. @@ -259,7 +272,8 @@ fn advance_to_the_next_round() { #[test] fn prevote_when_receiving_proposal_in_current_round() { - let mut wrapper = TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::TimeoutPropose(ROUND)); @@ -283,15 +297,16 @@ fn prevote_when_receiving_proposal_in_current_round() { #[test_case(true ; "send_proposal")] #[test_case(false ; "send_timeout_propose")] -fn mixed_quorum(send_prposal: bool) { - let mut wrapper = TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false); +fn mixed_quorum(send_proposal: bool) { + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); // Waiting for the proposal. assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::TimeoutPropose(ROUND)); assert!(wrapper.events.is_empty()); - if send_prposal { + if send_proposal { wrapper.send_proposal(PROPOSAL_ID, ROUND); assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::Prevote(PROPOSAL_ID, ROUND)); } else { @@ -314,7 +329,8 @@ fn mixed_quorum(send_prposal: bool) { #[test] fn dont_handle_enqueued_while_awaiting_get_proposal() { - let mut wrapper = TestWrapper::new(*PROPOSER_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*PROPOSER_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::GetProposal(None, ROUND)); @@ -359,7 +375,8 @@ fn dont_handle_enqueued_while_awaiting_get_proposal() { #[test] fn return_proposal_if_locked_value_is_set() { - let mut wrapper = TestWrapper::new(*PROPOSER_ID, 4, |_: Round| *PROPOSER_ID, false); + let mut wrapper = + TestWrapper::new(*PROPOSER_ID, 4, |_: Round| *PROPOSER_ID, false, QuorumType::Byzantine); wrapper.start(); assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::GetProposal(None, ROUND)); @@ -394,7 +411,7 @@ fn return_proposal_if_locked_value_is_set() { #[test] fn observer_node_reaches_decision() { let id = *VALIDATOR_ID; - let mut wrapper = TestWrapper::new(id, 4, |_: Round| *PROPOSER_ID, true); + let mut wrapper = TestWrapper::new(id, 4, |_: Round| *PROPOSER_ID, true, QuorumType::Byzantine); wrapper.start(); @@ -416,3 +433,61 @@ fn observer_node_reaches_decision() { ); assert!(wrapper.next_event().is_none()); } + +#[test_case(QuorumType::Byzantine; "byzantine")] +#[test_case(QuorumType::Honest; "honest")] +fn number_of_required_votes(quorum_type: QuorumType) { + let mut wrapper = + TestWrapper::new(*VALIDATOR_ID, 3, |_: Round| *PROPOSER_ID, false, quorum_type); + + wrapper.start(); + // Waiting for the proposal. + assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::TimeoutPropose(ROUND)); + assert!(wrapper.next_event().is_none()); + wrapper.send_proposal(PROPOSAL_ID, ROUND); + + // The node says this proposal is valid (vote 1). + assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::Prevote(PROPOSAL_ID, ROUND)); + assert!(wrapper.next_event().is_none()); + + // Another node sends a Prevote (vote 2). + wrapper.send_prevote(PROPOSAL_ID, ROUND); + + // Byzantine quorum requires 3 votes, so we need one more vote. + if quorum_type == QuorumType::Byzantine { + // Not enough votes for a quorum yet. + assert!(wrapper.next_event().is_none()); + + // Another node sends a Prevote (vote 3). + wrapper.send_prevote(PROPOSAL_ID, ROUND); + } + // In honest case, the second vote is enough for a quorum. + + // The Node got a Prevote quorum. + assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::TimeoutPrevote(ROUND)); + + // The Node sends a Precommit (vote 1). + assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::Precommit(PROPOSAL_ID, ROUND)); + assert!(wrapper.next_event().is_none()); + + // Another node sends a Precommit (vote 2). + wrapper.send_precommit(PROPOSAL_ID, ROUND); + + // Byzantine quorum requires 3 votes, so we need one more vote. + if quorum_type == QuorumType::Byzantine { + // Not enough votes for a quorum yet. + assert!(wrapper.next_event().is_none()); + + // Another node sends a Precommit (vote 3). + wrapper.send_precommit(PROPOSAL_ID, ROUND); + } + // In honest case, the second vote is enough for a quorum. + + // The Node got a Precommit quorum. + assert_eq!(wrapper.next_event().unwrap(), StateMachineEvent::TimeoutPrecommit(ROUND)); + assert_eq!( + wrapper.next_event().unwrap(), + StateMachineEvent::Decision(PROPOSAL_ID.unwrap(), ROUND) + ); + assert!(wrapper.next_event().is_none()); +} diff --git a/crates/apollo_consensus/src/stream_handler.rs b/crates/apollo_consensus/src/stream_handler.rs new file mode 100644 index 00000000000..e1f9daaf0ff --- /dev/null +++ b/crates/apollo_consensus/src/stream_handler.rs @@ -0,0 +1,482 @@ +//! Overlay streaming logic onto individual messages. + +use std::cmp::Ordering; +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::{BTreeMap, HashMap}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; +use std::num::NonZeroUsize; + +use apollo_network::network_manager::{BroadcastTopicClientTrait, ReceivedBroadcastedMessage}; +use apollo_network::utils::StreamMap; +use apollo_network_types::network_types::{BroadcastedMessageMetadata, OpaquePeerId}; +use apollo_protobuf::consensus::{StreamMessage, StreamMessageBody}; +use apollo_protobuf::converters::ProtobufConversionError; +use futures::channel::mpsc; +use futures::never::Never; +use futures::StreamExt; +use lru::LruCache; +use tracing::{info, instrument, warn}; + +use crate::config::StreamHandlerConfig; +use crate::metrics::{ + CONSENSUS_INBOUND_STREAM_EVICTED, + CONSENSUS_INBOUND_STREAM_FINISHED, + CONSENSUS_INBOUND_STREAM_STARTED, + CONSENSUS_OUTBOUND_STREAM_FINISHED, + CONSENSUS_OUTBOUND_STREAM_STARTED, +}; + +#[cfg(test)] +#[path = "stream_handler_test.rs"] +mod stream_handler_test; + +type PeerId = OpaquePeerId; +type MessageId = u64; + +/// Errors which cause the stream handler to stop functioning. +#[derive(thiserror::Error, PartialEq, Debug)] +pub enum StreamHandlerError { + /// Client has closed their sender, so no more outbound streams can be sent. + #[error("Client has closed their sender, so no more outbound streams can be sent.")] + OutboundChannelClosed, + /// Network has closed their sender, so no more inbound streams can be sent. + #[error("Network has closed their sender, so no more inbound streams can be sent.")] + InboundChannelClosed, + /// StreamId sent by client for a stream which is in use for an existing stream. + #[error("StreamId sent by client for a stream which is in use for an existing stream. {0}")] + StreamIdReused(String), +} + +/// A combination of trait bounds needed for the content of the stream. +pub trait StreamContentTrait: + Clone + Into> + TryFrom, Error = ProtobufConversionError> + Send +{ +} +impl StreamContentTrait for StreamContent where + StreamContent: Clone + Into> + TryFrom, Error = ProtobufConversionError> + Send +{ +} +/// A combination of trait bounds needed for the stream ID. +pub trait StreamIdTrait: + Into> + + TryFrom, Error = ProtobufConversionError> + + Eq + + Hash + + Clone + + Unpin + + Display + + Debug + + Send + + Ord +{ +} +impl StreamIdTrait for StreamId where + StreamId: Into> + + TryFrom, Error = ProtobufConversionError> + + Eq + + Hash + + Clone + + Unpin + + Display + + Debug + + Send + + Ord +{ +} + +// Use this struct for each inbound stream. +// Drop the struct when: +// (1) receiver on the other end is dropped, +// (2) fin message is received and all messages are sent. +#[derive(Debug)] +struct StreamData { + next_message_id: MessageId, + // Last message ID. If None, it means we have not yet gotten to it. + fin_message_id: Option, + max_message_id_received: MessageId, + // Keep the receiver until it is time to send it to the application. + receiver: Option>, + sender: mpsc::Sender, + // A buffer for messages that were received out of order. + message_buffer: HashMap>, +} + +impl + StreamData +{ + fn new(channel_buffer_capacity: usize) -> Self { + let (sender, receiver) = mpsc::channel(channel_buffer_capacity); + StreamData { + next_message_id: 0, + fin_message_id: None, + max_message_id_received: 0, + sender, + receiver: Some(receiver), + message_buffer: HashMap::new(), + } + } +} + +/// A StreamHandler is responsible for: +/// - Buffering inbound messages and reporting them to the application in order. +/// - Sending outbound messages to the network, wrapped in StreamMessage. +pub struct StreamHandler +where + StreamContent: StreamContentTrait, + StreamId: StreamIdTrait, + InboundReceiverT: Unpin + + StreamExt>>, + OutboundSenderT: BroadcastTopicClientTrait>, +{ + config: StreamHandlerConfig, + // For each stream ID from the network, send the application a Receiver + // that will receive the messages in order. This allows sending such Receivers. + inbound_channel_sender: mpsc::Sender>, + // This receives messages from the network. + inbound_receiver: InboundReceiverT, + // An LRU cache mapping (peer_id, stream_id) to a struct that contains all the information + // about the stream. This includes both the message buffer and some metadata + // (like the latest message ID). + inbound_stream_data: LruCache<(PeerId, StreamId), StreamData>, + // Whenever application wants to start a new stream, it must send out a + // (stream_id, Receiver) pair. Each receiver gets messages that should + // be sent out to the network. + outbound_channel_receiver: mpsc::Receiver<(StreamId, mpsc::Receiver)>, + // A map where the abovementioned Receivers are stored. + outbound_stream_receivers: StreamMap>, + // A network sender that allows sending StreamMessages to peers. + outbound_sender: OutboundSenderT, + // For each stream, keep track of the message_id of the last message sent. + outbound_stream_number: HashMap, +} + +impl + StreamHandler +where + StreamContent: StreamContentTrait, + StreamId: StreamIdTrait, + InboundReceiverT: Unpin + + StreamExt>>, + OutboundSenderT: BroadcastTopicClientTrait>, +{ + /// Create a new StreamHandler. + pub fn new( + config: StreamHandlerConfig, + inbound_channel_sender: mpsc::Sender>, + inbound_receiver: InboundReceiverT, + outbound_channel_receiver: mpsc::Receiver<(StreamId, mpsc::Receiver)>, + outbound_sender: OutboundSenderT, + ) -> Self { + let cache = LruCache::new( + NonZeroUsize::new(config.max_streams).expect("max_streams must be non-zero"), + ); + + Self { + config, + inbound_channel_sender, + inbound_receiver, + inbound_stream_data: cache, + outbound_channel_receiver, + outbound_sender, + outbound_stream_receivers: StreamMap::new(BTreeMap::new()), + outbound_stream_number: HashMap::new(), + } + } + + /// Run the stream handler indefinitely. + pub async fn run(mut self) -> Result { + loop { + self.handle_next_msg().await? + } + } + + /// Listen for a single message coming from the network or from an application. + /// - Outbound messages are wrapped as StreamMessage and sent to the network directly. + /// - Inbound messages are stripped of StreamMessage and buffered until they can be sent in the + /// correct order to the application. + /// + /// Expects to live forever, returning an Error if the client or network close their sender. + pub async fn handle_next_msg(&mut self) -> Result<(), StreamHandlerError> { + tokio::select!( + // New outbound stream. + outbound_stream = self.outbound_channel_receiver.next() => { + self.handle_new_stream(outbound_stream).await + } + // New message on an existing outbound stream. + output = self.outbound_stream_receivers.next() => { + self.handle_outbound_message(output).await; + Ok(()) + } + // New inbound message from the network. + message = self.inbound_receiver.next() => { + self.handle_inbound_message(message) + } + ) + } + + async fn handle_new_stream( + &mut self, + outbound_stream: Option<(StreamId, mpsc::Receiver)>, + ) -> Result<(), StreamHandlerError> { + let Some((stream_id, receiver)) = outbound_stream else { + warn!("Outbound streams channel closed. No new outbound streams can be started."); + return Err(StreamHandlerError::OutboundChannelClosed); + }; + if self.outbound_stream_receivers.insert(stream_id.clone(), receiver).is_some() { + warn!(%stream_id, "Outbound stream ID reused."); + return Err(StreamHandlerError::StreamIdReused(format!("{stream_id}"))); + } + CONSENSUS_OUTBOUND_STREAM_STARTED.increment(1); + info!(%stream_id, "Outbound stream started."); + Ok(()) + } + + async fn handle_outbound_message( + &mut self, + message: Option<(StreamId, Option)>, + ) { + match message { + Some((key, Some(msg))) => self.broadcast(key, msg).await, + Some((key, None)) => self.broadcast_fin(key).await, + None => { + panic!("StreamHashMap should never be closed") + } + } + } + + fn inbound_send( + &mut self, + data: &mut StreamData, + message: StreamMessage, + ) -> bool { + // TODO(guyn): reconsider the "expect" here. + let sender = &mut data.sender; + if let StreamMessageBody::Content(content) = message.message { + match sender.try_send(content) { + Ok(_) => {} + Err(e) => { + if e.is_disconnected() { + warn!( + "Sender is disconnected, dropping the message. StreamId: {}, \ + MessageId: {}", + message.stream_id, message.message_id + ); + return true; + } else if e.is_full() { + // TODO(guyn): replace panic with buffering of the message. + panic!( + "Sender is full, dropping the message. StreamId: {}, MessageId: {}", + message.stream_id, message.message_id + ); + } else { + // TODO(guyn): replace panic with more graceful error handling + panic!("Unexpected error: {:?}", e); + } + } + }; + // Send the receiver only once the first message has been sent. + if message.message_id == 0 { + // TODO(guyn): consider the expect in both cases. + // If this is the first message, send the receiver to the application. + let receiver = data.receiver.take().expect("Receiver should exist"); + // Send the receiver to the application. + self.inbound_channel_sender.try_send(receiver).expect("Send should succeed"); + } + data.next_message_id += 1; + return false; + } + // A Fin message is not sent. This is a no-op, can safely return true. + true + } + + // Send the message to the network. + async fn broadcast(&mut self, stream_id: StreamId, message: StreamContent) { + // TODO(guyn): add a random nonce to the outbound stream ID, + // such that even if the client sends the same stream ID, + // (e.g., after a crash) this will be treated as a new stream. + let message = StreamMessage { + message: StreamMessageBody::Content(message), + stream_id: stream_id.clone(), + message_id: *self.outbound_stream_number.get(&stream_id).unwrap_or(&0), + }; + // TODO(guyn): reconsider the "expect" here. + self.outbound_sender.broadcast_message(message).await.expect("Send should succeed"); + self.outbound_stream_number.insert( + stream_id.clone(), + self.outbound_stream_number.get(&stream_id).unwrap_or(&0) + 1, + ); + } + + // Send a fin message to the network. + async fn broadcast_fin(&mut self, stream_id: StreamId) { + let message = StreamMessage { + message: StreamMessageBody::Fin, + stream_id: stream_id.clone(), + message_id: *self.outbound_stream_number.get(&stream_id).unwrap_or(&0), + }; + self.outbound_sender.broadcast_message(message).await.expect("Send should succeed"); + self.outbound_stream_number.remove(&stream_id); + CONSENSUS_OUTBOUND_STREAM_FINISHED.increment(1); + info!(%stream_id, "Outbound stream finished."); + } + + // Handle a message that was received from the network. + #[instrument(skip_all, level = "warn")] + #[allow(clippy::type_complexity)] + fn handle_inbound_message( + &mut self, + message: Option<( + Result, ProtobufConversionError>, + BroadcastedMessageMetadata, + )>, + ) -> Result<(), StreamHandlerError> { + let (message, metadata) = match message { + None => return Err(StreamHandlerError::InboundChannelClosed), + Some((Ok(message), metadata)) => (message, metadata), + Some((Err(e), _)) => { + // TODO(guy): switch to debug when network is opened to "all". + warn!("Error converting message: {:?}", e); + return Ok(()); + } + }; + + let peer_id = metadata.originator_id.clone(); + let stream_id = message.stream_id.clone(); + let key = (peer_id.clone(), stream_id.clone()); + + // Try to get the stream data from the cache. + let data = match self.inbound_stream_data.pop(&key) { + Some(data) => data, + None => { + info!(?peer_id, ?stream_id, "Inbound stream started"); + CONSENSUS_INBOUND_STREAM_STARTED.increment(1); + StreamData::new(self.config.channel_buffer_capacity) + } + }; + if let Some(data) = self.handle_message_inner(message, metadata, data) { + if let Some((evicted_key, _)) = self.inbound_stream_data.push(key, data) { + CONSENSUS_INBOUND_STREAM_EVICTED.increment(1); + warn!(?evicted_key, "Evicted inbound stream due to capacity"); + } + } + Ok(()) + } + + /// Returns the StreamData struct if it should be put back into the LRU cache. None if the data + /// should be dropped. + fn handle_message_inner( + &mut self, + message: StreamMessage, + metadata: BroadcastedMessageMetadata, + mut data: StreamData, + ) -> Option> { + let peer_id = metadata.originator_id; + let stream_id = message.stream_id.clone(); + let key = (peer_id.clone(), stream_id.clone()); + let message_id = message.message_id; + + if data.max_message_id_received < message_id { + data.max_message_id_received = message_id; + } + + // Check for Fin type message. + match message.message { + StreamMessageBody::Content(_) => {} + StreamMessageBody::Fin => { + data.fin_message_id = Some(message_id); + if data.max_message_id_received > message_id { + // TODO(guyn): replace warnings with more graceful error handling + warn!( + "Received fin message with id that is smaller than a previous message! \ + key: {:?}, fin_message_id: {}, max_message_id_received: {}", + key.clone(), + message_id, + data.max_message_id_received + ); + return None; + } + } + } + + if message_id > data.fin_message_id.unwrap_or(u64::MAX) { + // TODO(guyn): replace warnings with more graceful error handling + warn!( + "Received message with id that is bigger than the id of the fin message! key: \ + {:?}, message_id: {}, fin_message_id: {}", + key.clone(), + message_id, + data.fin_message_id.unwrap_or(u64::MAX) + ); + return None; + } + + // This means we can just send the message without buffering it. + match message_id.cmp(&data.next_message_id) { + Ordering::Equal => { + let mut receiver_dropped = self.inbound_send(&mut data, message); + if !receiver_dropped { + receiver_dropped = self.process_buffer(&mut data); + } + + if data.message_buffer.is_empty() && data.fin_message_id.is_some() + || receiver_dropped + { + data.sender.close_channel(); + CONSENSUS_INBOUND_STREAM_FINISHED.increment(1); + info!(?peer_id, ?stream_id, "Inbound stream finished."); + return None; + } + } + Ordering::Greater => { + Self::store(&mut data, key.clone(), message); + } + Ordering::Less => { + // TODO(guyn): replace warnings with more graceful error handling + warn!( + "Received message with id that is smaller than the next message expected! \ + key: {:?}, message_id: {}, next_message_id: {}", + key.clone(), + message_id, + data.next_message_id + ); + return None; + } + } + Some(data) + } + + // Store an inbound message in the buffer. + fn store( + data: &mut StreamData, + key: (PeerId, StreamId), + message: StreamMessage, + ) { + let message_id = message.message_id; + + match data.message_buffer.entry(message_id) { + Vacant(e) => { + e.insert(message); + } + Occupied(_) => { + // TODO(guyn): replace warnings with more graceful error handling + warn!( + "Two messages with the same message_id in buffer! key: {:?}, message_id: {}", + key, message_id + ); + } + } + } + + // Tries to drain as many messages as possible from the buffer (in order), + // DOES NOT guarantee that the buffer will be empty after calling this function. + // Returns true if the receiver for this stream is dropped. + fn process_buffer(&mut self, data: &mut StreamData) -> bool { + while let Some(message) = data.message_buffer.remove(&data.next_message_id) { + if self.inbound_send(data, message) { + return true; + } + } + false + } +} diff --git a/crates/apollo_consensus/src/stream_handler_test.rs b/crates/apollo_consensus/src/stream_handler_test.rs new file mode 100644 index 00000000000..6cdca335f47 --- /dev/null +++ b/crates/apollo_consensus/src/stream_handler_test.rs @@ -0,0 +1,453 @@ +use std::collections::BTreeSet; +use std::fmt::Display; + +use apollo_network::network_manager::{BroadcastTopicClientTrait, ReceivedBroadcastedMessage}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_protobuf::consensus::{ProposalInit, ProposalPart, StreamMessageBody}; +use apollo_protobuf::converters::ProtobufConversionError; +use apollo_test_utils::{get_rng, GetTestInstance}; +use futures::channel::mpsc::{self, Receiver, SendError, Sender}; +use futures::{FutureExt, SinkExt, StreamExt}; +use prost::DecodeError; + +use crate::config::StreamHandlerConfig; +use crate::stream_handler::StreamHandler; +const CHANNEL_CAPACITY: usize = 100; +const MAX_STREAMS: usize = 10; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +struct TestStreamId(u64); + +impl From for Vec { + fn from(value: TestStreamId) -> Self { + value.0.to_be_bytes().to_vec() + } +} + +impl TryFrom> for TestStreamId { + type Error = ProtobufConversionError; + fn try_from(bytes: Vec) -> Result { + if bytes.len() != 8 { + return Err(ProtobufConversionError::DecodeError(DecodeError::new("Invalid length"))); + } + let mut array = [0; 8]; + array.copy_from_slice(&bytes); + Ok(TestStreamId(u64::from_be_bytes(array))) + } +} + +impl PartialOrd for TestStreamId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for TestStreamId { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl Display for TestStreamId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "TestStreamId({})", self.0) + } +} + +type StreamMessage = apollo_protobuf::consensus::StreamMessage; + +struct FakeBroadcastClient { + sender: Sender, +} + +#[async_trait::async_trait] +impl BroadcastTopicClientTrait for FakeBroadcastClient { + async fn broadcast_message(&mut self, message: StreamMessage) -> Result<(), SendError> { + self.sender.send(message).await + } + + async fn report_peer(&mut self, _: BroadcastedMessageMetadata) -> Result<(), SendError> { + todo!() + } + + async fn continue_propagation( + &mut self, + _: &BroadcastedMessageMetadata, + ) -> Result<(), SendError> { + todo!() + } +} + +#[allow(clippy::type_complexity)] +fn setup() -> ( + StreamHandler< + ProposalPart, + TestStreamId, + Receiver>, + FakeBroadcastClient, + >, + Sender>, + Receiver>, + Sender<(TestStreamId, Receiver)>, + Receiver, +) { + let (inbound_internal_sender, streamhandler_to_client_receiver) = + mpsc::channel(CHANNEL_CAPACITY); + let (network_to_streamhandler_sender, inbound_network_receiver) = + mpsc::channel(CHANNEL_CAPACITY); + let (outbound_internal_sender, outbound_internal_receiver) = mpsc::channel(CHANNEL_CAPACITY); + let (outbound_network_sender, outbound_network_receiver) = mpsc::channel(CHANNEL_CAPACITY); + let outbound_network_sender = FakeBroadcastClient { sender: outbound_network_sender }; + let config = + StreamHandlerConfig { channel_buffer_capacity: CHANNEL_CAPACITY, max_streams: MAX_STREAMS }; + let stream_handler = StreamHandler::new( + config, + inbound_internal_sender, + inbound_network_receiver, + outbound_internal_receiver, + outbound_network_sender, + ); + + ( + stream_handler, + network_to_streamhandler_sender, + streamhandler_to_client_receiver, + outbound_internal_sender, + outbound_network_receiver, + ) +} + +fn build_init_message(round: u32, stream_id: u64, message_id: u32) -> StreamMessage { + StreamMessage { + message: StreamMessageBody::Content(ProposalPart::Init(ProposalInit { + round, + ..Default::default() + })), + stream_id: TestStreamId(stream_id), + message_id: message_id.into(), + } +} + +fn build_fin_message(stream_id: u64, message_id: u32) -> StreamMessage { + StreamMessage { + message: StreamMessageBody::Fin, + stream_id: TestStreamId(stream_id), + message_id: message_id.into(), + } +} + +fn as_usize>(t: T) -> usize +where + >::Error: std::fmt::Debug, +{ + t.try_into().unwrap() +} + +#[tokio::test] +async fn outbound_single() { + let num_messages = 5; + let stream_id = 1; + let ( + mut stream_handler, + _network_to_streamhandler_sender, + _streamhandler_to_client_receiver, + mut client_to_streamhandler_sender, + mut streamhandler_to_network_receiver, + ) = setup(); + + // Create a new stream to send. + let (mut sender, stream_receiver) = mpsc::channel(CHANNEL_CAPACITY); + client_to_streamhandler_sender.send((TestStreamId(stream_id), stream_receiver)).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + + // Send the content of the stream. + for i in 0..num_messages { + let init = ProposalPart::Init(ProposalInit { round: i, ..Default::default() }); + sender.send(init).await.unwrap(); + } + + // Check the content is sent to the network in order. + for i in 0..num_messages { + stream_handler.handle_next_msg().await.unwrap(); + let actual = streamhandler_to_network_receiver.next().now_or_never().unwrap().unwrap(); + assert_eq!(actual, build_init_message(i, stream_id, i)); + } + + // Close the stream and check that a Fin is sent to the network. + sender.close_channel(); + stream_handler.handle_next_msg().await.unwrap(); + assert_eq!( + streamhandler_to_network_receiver.next().now_or_never().unwrap().unwrap(), + build_fin_message(stream_id, num_messages) + ); +} + +#[tokio::test] +async fn outbound_multiple() { + let num_messages = 5; + let num_streams = 3; + let ( + mut stream_handler, + _network_to_streamhandler_sender, + _streamhandler_to_client_receiver, + mut client_to_streamhandler_sender, + mut streamhandler_to_network_receiver, + ) = setup(); + + // Client opens up multiple outbound streams. + let mut stream_senders = Vec::new(); + for stream_id in 0..num_streams { + let (sender, stream_receiver) = mpsc::channel(CHANNEL_CAPACITY); + stream_senders.push(sender); + client_to_streamhandler_sender + .send((TestStreamId(stream_id), stream_receiver)) + .await + .unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + + // Send messages on all of the streams. + for stream_id in 0..num_streams { + let sender = stream_senders.get_mut(as_usize(stream_id)).unwrap(); + for i in 0..num_messages { + let init = ProposalPart::Init(ProposalInit { round: i, ..Default::default() }); + sender.send(init).await.unwrap(); + } + } + + // {StreamId : [Msgs]} - asserts order received matches expected order per stream. + let mut expected_msgs = (0..num_streams).map(|_| Vec::new()).collect::>(); + let mut actual_msgs = expected_msgs.clone(); + for stream_id in 0..num_streams { + for i in 0..num_messages { + // The order the stream handler selects from among multiple streams is undefined. + stream_handler.handle_next_msg().await.unwrap(); + let msg = streamhandler_to_network_receiver.next().now_or_never().unwrap().unwrap(); + actual_msgs[as_usize(msg.stream_id.0)].push(msg); + expected_msgs[as_usize(stream_id)].push(build_init_message(i, stream_id, i)); + } + } + assert_eq!(actual_msgs, expected_msgs); + + // Drop all the senders and check Fins are sent. + stream_senders.clear(); + let mut stream_ids = (0..num_streams).collect::>(); + for _ in 0..num_streams { + stream_handler.handle_next_msg().await.unwrap(); + let fin = streamhandler_to_network_receiver.next().now_or_never().unwrap().unwrap(); + assert_eq!(fin.message, StreamMessageBody::Fin); + assert_eq!(fin.message_id, u64::from(num_messages)); + assert!(stream_ids.remove(&fin.stream_id.0)); + } +} + +#[tokio::test] +async fn inbound_in_order() { + let num_messages = 10; + let stream_id = 127; + let ( + mut stream_handler, + mut network_to_streamhandler_sender, + mut streamhandler_to_client_receiver, + _client_to_streamhandler_sender, + _streamhandler_to_network_receiver, + ) = setup(); + let metadata = BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + + // Send all messages in order. + for i in 0..num_messages { + let message = build_init_message(i, stream_id, i); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + let message = build_fin_message(stream_id, num_messages); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + // Fin is communicated by dropping the sender, hence `..num_message` not `..=num_messages` + let mut receiver = streamhandler_to_client_receiver.next().now_or_never().unwrap().unwrap(); + for i in 0..num_messages { + let message = receiver.next().await.unwrap(); + assert_eq!(message, ProposalPart::Init(ProposalInit { round: i, ..Default::default() })); + } + // Check that the receiver was closed: + assert!(matches!(receiver.try_next(), Ok(None))); +} + +#[tokio::test] +async fn lru_cache_for_inbound_streams() { + let num_streams = MAX_STREAMS + 1; + let ( + mut stream_handler, + mut network_to_streamhandler_sender, + mut streamhandler_to_client_receiver, + _client_to_streamhandler_sender, + _streamhandler_to_network_receiver, + ) = setup(); + + let metadata = BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + for i in 0..num_streams { + let message = build_fin_message(i.try_into().unwrap(), 1); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + + for i in (0..num_streams).rev() { + let message = build_init_message(i.try_into().unwrap(), i.try_into().unwrap(), 0); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + + for i in (0..num_streams).rev() { + let mut receiver = streamhandler_to_client_receiver.next().now_or_never().unwrap().unwrap(); + let message = receiver.next().await.unwrap(); + assert_eq!( + message, + ProposalPart::Init(ProposalInit { round: i.try_into().unwrap(), ..Default::default() }) + ); + if i == 0 { + // This stream was reopened, but it should only have one message, and left open. + assert!(receiver.try_next().is_err()); + } else { + // The rest of the channels should have successfully received all messages, + // and closed after receiving the Fin message. + assert!(matches!(receiver.try_next(), Ok(None))); + } + } +} + +#[tokio::test] +async fn inbound_multiple() { + let num_messages = 5; + let num_streams = 3; + let ( + mut stream_handler, + mut network_to_streamhandler_sender, + mut streamhandler_to_client_receiver, + _client_to_streamhandler_sender, + _streamhandler_to_network_receiver, + ) = setup(); + let metadata = BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + + // Send all messages to all streams, each stream's messages in order. + for sid in 0..num_streams { + for i in 0..num_messages { + let message = build_init_message(i, sid, i); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + let message = build_fin_message(sid, num_messages); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + + let mut expected_msgs = (0..num_streams).map(|_| Vec::new()).collect::>(); + let mut actual_msgs = expected_msgs.clone(); + for sid in 0..num_streams { + let mut receiver = streamhandler_to_client_receiver.next().now_or_never().unwrap().unwrap(); + // Fin is communicated by dropping the sender, hence `..num_message` not `..=num_messages` + for i in 0..num_messages { + let message = receiver.next().await.unwrap(); + actual_msgs.get_mut(as_usize(sid)).unwrap().push(message); + expected_msgs + .get_mut(as_usize(sid)) + .unwrap() + .push(ProposalPart::Init(ProposalInit { round: i, ..Default::default() })); + } + // Check that the receiver was closed: + assert!(matches!(receiver.try_next(), Ok(None))); + } + assert_eq!(actual_msgs, expected_msgs); +} + +#[tokio::test] +async fn inbound_delayed_first() { + let num_messages = 10; + let stream_id = 127; + let ( + mut stream_handler, + mut network_to_streamhandler_sender, + mut streamhandler_to_client_receiver, + _client_to_streamhandler_sender, + _streamhandler_to_network_receiver, + ) = setup(); + let metadata = BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + + // Send all messages besides first one. + for i in 1..num_messages { + let message = build_init_message(i, stream_id, i); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + let message = build_fin_message(stream_id, num_messages); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + + // Check that no receiver was created yet. + assert!(streamhandler_to_client_receiver.try_next().is_err()); + + // Send first message now. + let first_message = build_init_message(0, stream_id, 0); + network_to_streamhandler_sender.send((Ok(first_message), metadata.clone())).await.unwrap(); + // Activate the stream handler to ingest this message. + stream_handler.handle_next_msg().await.unwrap(); + + // Now first message and all cached messages should be received. + let mut receiver = streamhandler_to_client_receiver.next().now_or_never().unwrap().unwrap(); + // Fin is communicated by dropping the sender, hence `..num_message` not `..=num_messages` + for i in 0..num_messages { + let message = receiver.next().await.unwrap(); + assert_eq!(message, ProposalPart::Init(ProposalInit { round: i, ..Default::default() })); + } + // Check that the receiver was closed: + assert!(matches!(receiver.try_next(), Ok(None))); +} + +#[tokio::test] +async fn inbound_delayed_middle() { + let num_messages = 10; + let missing_message_id = 3; + let stream_id = 127; + let ( + mut stream_handler, + mut network_to_streamhandler_sender, + mut streamhandler_to_client_receiver, + _client_to_streamhandler_sender, + _streamhandler_to_network_receiver, + ) = setup(); + let metadata = BroadcastedMessageMetadata::get_test_instance(&mut get_rng()); + + // Send all messages besides one in the middle of the stream. + for i in 0..num_messages { + if i == missing_message_id { + continue; + } + let message = build_init_message(i, stream_id, i); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + } + let message = build_fin_message(stream_id, num_messages); + network_to_streamhandler_sender.send((Ok(message), metadata.clone())).await.unwrap(); + stream_handler.handle_next_msg().await.unwrap(); + + // Should receive a few messages, until we reach the missing one. + let mut receiver = streamhandler_to_client_receiver.next().now_or_never().unwrap().unwrap(); + for i in 0..missing_message_id { + let message = receiver.next().await.unwrap(); + assert_eq!(message, ProposalPart::Init(ProposalInit { round: i, ..Default::default() })); + } + + // Send the missing message now. + let missing_msg = build_init_message(missing_message_id, stream_id, missing_message_id); + network_to_streamhandler_sender.send((Ok(missing_msg), metadata.clone())).await.unwrap(); + // Activate the stream handler to ingest this message. + stream_handler.handle_next_msg().await.unwrap(); + + // Should now get missing message and all the following ones. + // Fin is communicated by dropping the sender, hence `..num_message` not `..=num_messages` + for i in missing_message_id..num_messages { + let message = receiver.next().await.unwrap(); + assert_eq!(message, ProposalPart::Init(ProposalInit { round: i, ..Default::default() })); + } + // Check that the receiver was closed: + assert!(matches!(receiver.try_next(), Ok(None))); +} diff --git a/crates/apollo_consensus/src/test_utils.rs b/crates/apollo_consensus/src/test_utils.rs new file mode 100644 index 00000000000..6a5447cf3bd --- /dev/null +++ b/crates/apollo_consensus/src/test_utils.rs @@ -0,0 +1,110 @@ +use std::time::Duration; + +use apollo_protobuf::consensus::{ProposalInit, Vote, VoteType}; +use apollo_protobuf::converters::ProtobufConversionError; +use async_trait::async_trait; +use futures::channel::{mpsc, oneshot}; +use mockall::mock; +use starknet_api::block::{BlockHash, BlockNumber}; +use starknet_types_core::felt::Felt; + +use crate::types::{ConsensusContext, ConsensusError, ProposalCommitment, Round, ValidatorId}; + +/// Define a consensus block which can be used to enable auto mocking Context. +#[derive(Debug, PartialEq, Clone)] +pub struct TestBlock { + pub content: Vec, + pub id: BlockHash, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum TestProposalPart { + Init(ProposalInit), +} + +impl From for TestProposalPart { + fn from(init: ProposalInit) -> Self { + TestProposalPart::Init(init) + } +} + +impl TryFrom for ProposalInit { + type Error = ProtobufConversionError; + fn try_from(part: TestProposalPart) -> Result { + let TestProposalPart::Init(init) = part; + Ok(init) + } +} + +impl From for Vec { + fn from(part: TestProposalPart) -> Vec { + let TestProposalPart::Init(init) = part; + init.into() + } +} + +impl TryFrom> for TestProposalPart { + type Error = ProtobufConversionError; + + fn try_from(value: Vec) -> Result { + Ok(TestProposalPart::Init(value.try_into()?)) + } +} + +// TODO(matan): When QSelf is supported, switch to automocking `ConsensusContext`. +mock! { + pub TestContext {} + + #[async_trait] + impl ConsensusContext for TestContext { + type ProposalPart = TestProposalPart; + + async fn build_proposal( + &mut self, + init: ProposalInit, + timeout: Duration, + ) -> oneshot::Receiver; + + async fn validate_proposal( + &mut self, + init: ProposalInit, + timeout: Duration, + content: mpsc::Receiver + ) -> oneshot::Receiver; + + async fn repropose( + &mut self, + id: ProposalCommitment, + init: ProposalInit, + ); + + async fn validators(&self, height: BlockNumber) -> Vec; + + fn proposer(&self, height: BlockNumber, round: Round) -> ValidatorId; + + async fn broadcast(&mut self, message: Vote) -> Result<(), ConsensusError>; + + async fn decision_reached( + &mut self, + block: ProposalCommitment, + precommits: Vec, + ) -> Result<(), ConsensusError>; + + async fn try_sync(&mut self, height: BlockNumber) -> bool; + + async fn set_height_and_round(&mut self, height: BlockNumber, round: Round); + } +} + +pub fn prevote(block_felt: Option, height: u64, round: u32, voter: ValidatorId) -> Vote { + let block_hash = block_felt.map(BlockHash); + Vote { vote_type: VoteType::Prevote, height, round, block_hash, voter } +} + +pub fn precommit(block_felt: Option, height: u64, round: u32, voter: ValidatorId) -> Vote { + let block_hash = block_felt.map(BlockHash); + Vote { vote_type: VoteType::Precommit, height, round, block_hash, voter } +} +pub fn proposal_init(height: u64, round: u32, proposer: ValidatorId) -> ProposalInit { + ProposalInit { height: BlockNumber(height), round, proposer, ..Default::default() } +} diff --git a/crates/apollo_consensus/src/types.rs b/crates/apollo_consensus/src/types.rs new file mode 100644 index 00000000000..afca6e34327 --- /dev/null +++ b/crates/apollo_consensus/src/types.rs @@ -0,0 +1,168 @@ +//! Types for interfacing between consensus and the node. +use std::fmt::Debug; +use std::time::Duration; + +use apollo_network::network_manager::{ + BroadcastTopicChannels, + BroadcastTopicClient, + GenericReceiver, +}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_protobuf::consensus::{ProposalInit, Vote}; +use apollo_protobuf::converters::ProtobufConversionError; +use async_trait::async_trait; +use futures::channel::{mpsc, oneshot}; +use starknet_api::block::{BlockHash, BlockNumber}; +use starknet_api::core::ContractAddress; + +/// Used to identify the node by consensus. +/// 1. This ID is derived from the id registered with Starknet's L2 staking contract. +/// 2. We must be able to derive the public key associated with this ID for the sake of validating +/// signatures. +// TODO(matan): Determine the actual type of NodeId. +pub type ValidatorId = ContractAddress; +pub type Round = u32; +pub type ProposalCommitment = BlockHash; + +/// Interface for consensus to call out to the node. +/// +/// Function calls should be assumed to not be cancel safe. +#[async_trait] +pub trait ConsensusContext { + /// The parts of the proposal that are streamed in. + /// Must contain at least the ProposalInit and ProposalFin. + type ProposalPart: TryFrom, Error = ProtobufConversionError> + + Into> + + TryInto + + From + + Clone + + Send + + Debug; + + // TODO(matan): The oneshot for receiving the build block could be generalized to just be some + // future which returns a block. + + /// This function is called by consensus to request a block from the node. It expects that this + /// call will return immediately and that consensus can then stream in the block's content in + /// parallel to the block being built. + /// + /// Params: + /// - `init`: The `ProposalInit` that is broadcast to the network. + /// - `timeout`: The maximum time to wait for the block to be built. + /// + /// Returns: + /// - A receiver for the block id once ConsensusContext has finished streaming out the content + /// and building it. If the block fails to be built, the Sender will be dropped by + /// ConsensusContext. + async fn build_proposal( + &mut self, + init: ProposalInit, + timeout: Duration, + ) -> oneshot::Receiver; + + /// This function is called by consensus to validate a block. It expects that this call will + /// return immediately and that context can then stream in the block's content in parallel to + /// consensus continuing to handle other tasks. + /// + /// Params: + /// - `height`: The height of the block to be built. Specifically this indicates the initial + /// state of the block. + /// - `round`: The round of the block to be built. + /// - `timeout`: The maximum time to wait for the block to be built. + /// - `content`: A receiver for the stream of the block's content. + /// + /// Returns: + /// - A receiver for the block id. If a valid block cannot be built the Sender will be dropped + /// by ConsensusContext. + async fn validate_proposal( + &mut self, + init: ProposalInit, + timeout: Duration, + content: mpsc::Receiver, + ) -> oneshot::Receiver; + + /// This function is called by consensus to retrieve the content of a previously built or + /// validated proposal. It broadcasts the proposal to the network. + /// + /// Params: + /// - `id`: The `ProposalCommitment` associated with the block's content. + /// - `init`: The `ProposalInit` that is broadcast to the network. + async fn repropose(&mut self, id: ProposalCommitment, init: ProposalInit); + + /// Get the set of validators for a given height. These are the nodes that can propose and vote + /// on blocks. + // TODO(matan): We expect this to change in the future to BTreeMap. Why? + // 1. Map - The nodes will have associated information (e.g. voting weight). + // 2. BTreeMap - We want a stable ordering of the nodes for deterministic leader selection. + async fn validators(&self, height: BlockNumber) -> Vec; + + /// Calculates the ID of the Proposer based on the inputs. + // TODO(matan): Consider passing the validator set in order to keep this sync. + fn proposer(&self, height: BlockNumber, round: Round) -> ValidatorId; + + async fn broadcast(&mut self, message: Vote) -> Result<(), ConsensusError>; + + /// Update the context that a decision has been reached for a given height. + /// - `block` identifies the decision. + /// - `precommits` - All precommits must be for the same `(block, height, round)` and form a + /// quorum (>2/3 of the voting power) for this height. + async fn decision_reached( + &mut self, + block: ProposalCommitment, + precommits: Vec, + ) -> Result<(), ConsensusError>; + + /// Attempt to learn of a decision from the sync protocol. + /// Returns true if a decision was learned so consensus can proceed. + async fn try_sync(&mut self, height: BlockNumber) -> bool; + + /// Update the context with the current height and round. + /// Must be called at the beginning of each height. + async fn set_height_and_round(&mut self, height: BlockNumber, round: Round); +} + +#[derive(PartialEq, Debug)] +pub struct Decision { + pub precommits: Vec, + pub block: ProposalCommitment, +} + +pub struct BroadcastVoteChannel { + pub broadcasted_messages_receiver: + GenericReceiver<(Result, BroadcastedMessageMetadata)>, + pub broadcast_topic_client: BroadcastTopicClient, +} + +impl From> for BroadcastVoteChannel { + fn from(broadcast_topic_channels: BroadcastTopicChannels) -> Self { + BroadcastVoteChannel { + broadcasted_messages_receiver: Box::new( + broadcast_topic_channels.broadcasted_messages_receiver, + ), + broadcast_topic_client: broadcast_topic_channels.broadcast_topic_client, + } + } +} + +#[derive(thiserror::Error, PartialEq, Debug)] +pub enum ConsensusError { + #[error(transparent)] + Canceled(#[from] oneshot::Canceled), + #[error(transparent)] + ProtobufConversionError(#[from] ProtobufConversionError), + #[error(transparent)] + SendError(#[from] mpsc::SendError), + // Indicates an error in communication between consensus and the node's networking component. + // As opposed to an error between this node and peer nodes. + #[error("{0}")] + InternalNetworkError(String), + #[error("{0}")] + SyncError(String), + // For example the state machine and SHC are out of sync. + #[error("{0}")] + InternalInconsistency(String), + #[error("Block info conversion error: {0}")] + BlockInfoConversion(#[from] starknet_api::StarknetApiError), + #[error("{0}")] + Other(String), +} diff --git a/crates/apollo_consensus/src/votes_threshold.rs b/crates/apollo_consensus/src/votes_threshold.rs new file mode 100644 index 00000000000..537589b8a5e --- /dev/null +++ b/crates/apollo_consensus/src/votes_threshold.rs @@ -0,0 +1,49 @@ +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +#[path = "votes_threshold_test.rs"] +mod votes_threshold_test; + +/// Represents a threshold for the number of votes (out of total votes) required to meet a quorum. +/// For example, a threshold of 2/3 means that more than 2/3 of the total votes must be in favor. +/// Note that if the number of votes is exactly equal to the denominator, the threshold is not met. +/// If the total number of votes is zero, the threshold is not met. +#[derive(Serialize, Deserialize)] +pub struct VotesThreshold { + numerator: u64, + denominator: u64, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub enum QuorumType { + #[default] + Byzantine, + Honest, +} + +// Standard Tendermint consensus threshold. +pub const BYZANTINE_QUORUM: VotesThreshold = VotesThreshold::new(2, 3); +pub const ROUND_SKIP_THRESHOLD: VotesThreshold = VotesThreshold::new(1, 3); + +// Assumes no malicious validators. +pub const HONEST_QUORUM: VotesThreshold = VotesThreshold::new(1, 2); + +impl VotesThreshold { + const fn new(numerator: u64, denominator: u64) -> Self { + assert!(denominator > 0, "Denominator must be greater than zero"); + assert!(denominator >= numerator, "Denominator must be greater than or equal to numerator"); + Self { numerator, denominator } + } + + pub fn from_quorum_type(quorum_type: QuorumType) -> Self { + match quorum_type { + QuorumType::Byzantine => BYZANTINE_QUORUM, + QuorumType::Honest => HONEST_QUORUM, + } + } + + pub fn is_met(&self, amount: u64, total: u64) -> bool { + amount.checked_mul(self.denominator).expect("Numeric overflow") + > total.checked_mul(self.numerator).expect("Numeric overflow") + } +} diff --git a/crates/apollo_consensus/src/votes_threshold_test.rs b/crates/apollo_consensus/src/votes_threshold_test.rs new file mode 100644 index 00000000000..b994a263f37 --- /dev/null +++ b/crates/apollo_consensus/src/votes_threshold_test.rs @@ -0,0 +1,28 @@ +use crate::votes_threshold::VotesThreshold; + +#[test] +#[should_panic] +fn votes_threshold_denominator_zero() { + let _ = VotesThreshold::new(1, 0); +} + +#[test] +#[should_panic] +fn votes_threshold_numerator_greater() { + // Denominator must be greater than or equal to numerator + let _ = VotesThreshold::new(2, 1); +} + +#[test] +fn votes_threshold_is_met() { + let threshold = VotesThreshold::new(2, 3); + assert!(threshold.is_met(3, 4)); // 3 out of 4 votes + assert!(threshold.is_met(5, 6)); // 5 out of 6 votes + assert!(threshold.is_met(10, 10)); // All votes in favor + + // Test cases where the threshold is not met + let threshold = VotesThreshold::new(2, 3); + assert!(!threshold.is_met(1, 3)); // 1 out of 3 votes + assert!(!threshold.is_met(2, 3)); // 2 out of 3 votes (not enough, must be above threshold) + assert!(!threshold.is_met(2, 5)); // 2 out of 5 votes +} diff --git a/crates/apollo_consensus_manager/Cargo.toml b/crates/apollo_consensus_manager/Cargo.toml new file mode 100644 index 00000000000..9ec0732e1a3 --- /dev/null +++ b/crates/apollo_consensus_manager/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "apollo_consensus_manager" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[features] +testing = [] + +[lints] +workspace = true + +[dependencies] +apollo_batcher_types.workspace = true +apollo_class_manager_types.workspace = true +apollo_config.workspace = true +apollo_consensus.workspace = true +apollo_consensus_orchestrator.workspace = true +apollo_infra.workspace = true +apollo_infra_utils.workspace = true +apollo_l1_gas_price.workspace = true +apollo_l1_gas_price_types.workspace = true +apollo_metrics.workspace = true +apollo_network.workspace = true +apollo_protobuf.workspace = true +apollo_reverts.workspace = true +apollo_state_sync_types.workspace = true +apollo_time.workspace = true +async-trait.workspace = true +futures.workspace = true +serde.workspace = true +starknet_api.workspace = true +tokio.workspace = true +tracing.workspace = true +validator.workspace = true + +[dev-dependencies] +apollo_batcher_types = { workspace = true, features = ["testing"] } +apollo_l1_gas_price_types = { workspace = true, features = ["testing"] } +apollo_state_sync_types = { workspace = true, features = ["testing"] } +mockall.workspace = true +rstest.workspace = true diff --git a/crates/apollo_consensus_manager/src/communication.rs b/crates/apollo_consensus_manager/src/communication.rs new file mode 100644 index 00000000000..430ceb0392e --- /dev/null +++ b/crates/apollo_consensus_manager/src/communication.rs @@ -0,0 +1,5 @@ +use apollo_infra::component_server::WrapperServer; + +use crate::consensus_manager::ConsensusManager; + +pub type ConsensusManagerServer = WrapperServer; diff --git a/crates/apollo_consensus_manager/src/config.rs b/crates/apollo_consensus_manager/src/config.rs new file mode 100644 index 00000000000..0cb9a00ed2c --- /dev/null +++ b/crates/apollo_consensus_manager/src/config.rs @@ -0,0 +1,103 @@ +use std::collections::BTreeMap; + +use apollo_config::dumping::{prepend_sub_config_name, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_consensus::config::{ConsensusConfig, StreamHandlerConfig}; +use apollo_consensus_orchestrator::cende::CendeConfig; +use apollo_consensus_orchestrator::config::ContextConfig; +use apollo_l1_gas_price::eth_to_strk_oracle::EthToStrkOracleConfig; +use apollo_network::NetworkConfig; +use apollo_reverts::RevertConfig; +use serde::{Deserialize, Serialize}; +use starknet_api::block::BlockNumber; +use validator::Validate; + +/// The consensus manager related configuration. +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct ConsensusManagerConfig { + pub consensus_config: ConsensusConfig, + pub context_config: ContextConfig, + pub eth_to_strk_oracle_config: EthToStrkOracleConfig, + pub stream_handler_config: StreamHandlerConfig, + #[validate] + pub network_config: NetworkConfig, + pub cende_config: CendeConfig, + pub revert_config: RevertConfig, + pub votes_topic: String, + pub proposals_topic: String, + pub broadcast_buffer_size: usize, + pub immediate_active_height: BlockNumber, + // Assumes all validators are honest. If true, uses 1/2 votes to get quorum. Use with caution! + pub assume_no_malicious_validators: bool, +} + +impl SerializeConfig for ConsensusManagerConfig { + fn dump(&self) -> BTreeMap { + let mut config = BTreeMap::from_iter([ + ser_param( + "votes_topic", + &self.votes_topic, + "The topic for consensus votes.", + ParamPrivacyInput::Public, + ), + ser_param( + "proposals_topic", + &self.proposals_topic, + "The topic for consensus proposals.", + ParamPrivacyInput::Public, + ), + ser_param( + "broadcast_buffer_size", + &self.broadcast_buffer_size, + "The buffer size for the broadcast channel.", + ParamPrivacyInput::Public, + ), + ser_param( + "immediate_active_height", + &self.immediate_active_height, + "The height at which the node may actively participate in consensus.", + ParamPrivacyInput::Public, + ), + ser_param( + "assume_no_malicious_validators", + &self.assume_no_malicious_validators, + "Assumes all validators are honest. If true, uses 1/2 votes to get quorum. Use \ + with caution!", + ParamPrivacyInput::Public, + ), + ]); + config.extend(prepend_sub_config_name(self.consensus_config.dump(), "consensus_config")); + config.extend(prepend_sub_config_name(self.context_config.dump(), "context_config")); + config.extend(prepend_sub_config_name( + self.eth_to_strk_oracle_config.dump(), + "eth_to_strk_oracle_config", + )); + config.extend(prepend_sub_config_name( + self.stream_handler_config.dump(), + "stream_handler_config", + )); + config.extend(prepend_sub_config_name(self.cende_config.dump(), "cende_config")); + config.extend(prepend_sub_config_name(self.network_config.dump(), "network_config")); + config.extend(prepend_sub_config_name(self.revert_config.dump(), "revert_config")); + config + } +} + +impl Default for ConsensusManagerConfig { + fn default() -> Self { + ConsensusManagerConfig { + consensus_config: ConsensusConfig::default(), + context_config: ContextConfig::default(), + eth_to_strk_oracle_config: EthToStrkOracleConfig::default(), + stream_handler_config: StreamHandlerConfig::default(), + cende_config: CendeConfig::default(), + network_config: NetworkConfig::default(), + revert_config: RevertConfig::default(), + votes_topic: "consensus_votes".to_string(), + proposals_topic: "consensus_proposals".to_string(), + broadcast_buffer_size: 10000, + immediate_active_height: BlockNumber::default(), + assume_no_malicious_validators: false, + } + } +} diff --git a/crates/apollo_consensus_manager/src/consensus_manager.rs b/crates/apollo_consensus_manager/src/consensus_manager.rs new file mode 100644 index 00000000000..0af93c189ad --- /dev/null +++ b/crates/apollo_consensus_manager/src/consensus_manager.rs @@ -0,0 +1,263 @@ +#[cfg(test)] +#[path = "consensus_manager_test.rs"] +mod consensus_manager_test; + +use std::collections::HashMap; +use std::sync::Arc; + +use apollo_batcher_types::batcher_types::RevertBlockInput; +use apollo_batcher_types::communication::SharedBatcherClient; +use apollo_class_manager_types::transaction_converter::TransactionConverter; +use apollo_class_manager_types::SharedClassManagerClient; +use apollo_consensus::stream_handler::StreamHandler; +use apollo_consensus::types::ConsensusError; +use apollo_consensus::votes_threshold::QuorumType; +use apollo_consensus_orchestrator::cende::CendeAmbassador; +use apollo_consensus_orchestrator::sequencer_consensus_context::{ + SequencerConsensusContext, + SequencerConsensusContextDeps, +}; +use apollo_infra::component_definitions::ComponentStarter; +use apollo_infra_utils::type_name::short_type_name; +use apollo_l1_gas_price::eth_to_strk_oracle::EthToStrkOracleClient; +use apollo_l1_gas_price_types::L1GasPriceProviderClient; +use apollo_network::gossipsub_impl::Topic; +use apollo_network::network_manager::metrics::{BroadcastNetworkMetrics, NetworkMetrics}; +use apollo_network::network_manager::{BroadcastTopicChannels, NetworkManager}; +use apollo_protobuf::consensus::{HeightAndRound, ProposalPart, StreamMessage, Vote}; +use apollo_reverts::revert_blocks_and_eternal_pending; +use apollo_state_sync_types::communication::SharedStateSyncClient; +use apollo_time::time::DefaultClock; +use async_trait::async_trait; +use futures::channel::mpsc; +use starknet_api::block::BlockNumber; +use tracing::{info, info_span, Instrument}; + +use crate::config::ConsensusManagerConfig; +use crate::metrics::{ + CONSENSUS_NUM_BLACKLISTED_PEERS, + CONSENSUS_NUM_CONNECTED_PEERS, + CONSENSUS_PROPOSALS_NUM_RECEIVED_MESSAGES, + CONSENSUS_PROPOSALS_NUM_SENT_MESSAGES, + CONSENSUS_VOTES_NUM_RECEIVED_MESSAGES, + CONSENSUS_VOTES_NUM_SENT_MESSAGES, +}; + +#[derive(Clone)] +pub struct ConsensusManager { + pub config: ConsensusManagerConfig, + pub batcher_client: SharedBatcherClient, + pub state_sync_client: SharedStateSyncClient, + pub class_manager_client: SharedClassManagerClient, + l1_gas_price_provider: Arc, +} + +impl ConsensusManager { + pub fn new( + config: ConsensusManagerConfig, + batcher_client: SharedBatcherClient, + state_sync_client: SharedStateSyncClient, + class_manager_client: SharedClassManagerClient, + l1_gas_price_provider: Arc, + ) -> Self { + Self { + config, + batcher_client, + state_sync_client, + class_manager_client, + l1_gas_price_provider, + } + } + + pub async fn run(&self) -> Result<(), ConsensusError> { + if self.config.revert_config.should_revert { + self.revert_batcher_blocks(self.config.revert_config.revert_up_to_and_including).await; + } + + let mut broadcast_metrics_by_topic = HashMap::new(); + broadcast_metrics_by_topic.insert( + Topic::new(self.config.votes_topic.clone()).hash(), + BroadcastNetworkMetrics { + num_sent_broadcast_messages: CONSENSUS_VOTES_NUM_SENT_MESSAGES, + num_received_broadcast_messages: CONSENSUS_VOTES_NUM_RECEIVED_MESSAGES, + }, + ); + broadcast_metrics_by_topic.insert( + Topic::new(self.config.proposals_topic.clone()).hash(), + BroadcastNetworkMetrics { + num_sent_broadcast_messages: CONSENSUS_PROPOSALS_NUM_SENT_MESSAGES, + num_received_broadcast_messages: CONSENSUS_PROPOSALS_NUM_RECEIVED_MESSAGES, + }, + ); + let network_manager_metrics = Some(NetworkMetrics { + num_connected_peers: CONSENSUS_NUM_CONNECTED_PEERS, + num_blacklisted_peers: CONSENSUS_NUM_BLACKLISTED_PEERS, + broadcast_metrics_by_topic: Some(broadcast_metrics_by_topic), + sqmr_metrics: None, + }); + let mut network_manager = + NetworkManager::new(self.config.network_config.clone(), None, network_manager_metrics); + + let proposals_broadcast_channels = network_manager + .register_broadcast_topic::>( + Topic::new(self.config.proposals_topic.clone()), + self.config.broadcast_buffer_size, + ) + .expect("Failed to register broadcast topic"); + + let votes_broadcast_channels = network_manager + .register_broadcast_topic::( + Topic::new(self.config.votes_topic.clone()), + self.config.broadcast_buffer_size, + ) + .expect("Failed to register broadcast topic"); + + let BroadcastTopicChannels { + broadcasted_messages_receiver: inbound_network_receiver, + broadcast_topic_client: outbound_network_sender, + } = proposals_broadcast_channels; + + let (inbound_internal_sender, inbound_internal_receiver) = + mpsc::channel(self.config.stream_handler_config.channel_buffer_capacity); + let (outbound_internal_sender, outbound_internal_receiver) = + mpsc::channel(self.config.stream_handler_config.channel_buffer_capacity); + let stream_handler = StreamHandler::new( + self.config.stream_handler_config.clone(), + inbound_internal_sender, + inbound_network_receiver, + outbound_internal_receiver, + outbound_network_sender, + ); + + let observer_height = self + .batcher_client + .get_height() + .await + .expect("Failed to get observer_height from batcher") + .height; + let active_height = if self.config.immediate_active_height == observer_height { + // Setting `start_height` is only used to enable consensus starting immediately without + // observing the first height. This means consensus may return to a height + // it has already voted on, risking equivocation. This is only safe to do if we + // restart all nodes at this height. + observer_height + } else { + BlockNumber(observer_height.0 + 1) + }; + + let context = SequencerConsensusContext::new( + self.config.context_config.clone(), + SequencerConsensusContextDeps { + transaction_converter: Arc::new(TransactionConverter::new( + Arc::clone(&self.class_manager_client), + self.config.context_config.chain_id.clone(), + )), + state_sync_client: Arc::clone(&self.state_sync_client), + batcher: Arc::clone(&self.batcher_client), + cende_ambassador: Arc::new(CendeAmbassador::new( + self.config.cende_config.clone(), + Arc::clone(&self.class_manager_client), + )), + eth_to_strk_oracle_client: Arc::new(EthToStrkOracleClient::new( + self.config.eth_to_strk_oracle_config.clone(), + )), + l1_gas_price_provider: self.l1_gas_price_provider.clone(), + clock: Arc::new(DefaultClock), + outbound_proposal_sender: outbound_internal_sender, + vote_broadcast_client: votes_broadcast_channels.broadcast_topic_client.clone(), + }, + ); + + let network_task = + tokio::spawn(network_manager.run().instrument(info_span!("[Consensus network]"))); + let stream_handler_task = tokio::spawn(stream_handler.run()); + let quorum_type = if self.config.assume_no_malicious_validators { + QuorumType::Honest + } else { + QuorumType::Byzantine + }; + let run_consensus_args = apollo_consensus::RunConsensusArguments { + start_active_height: active_height, + start_observe_height: observer_height, + validator_id: self.config.consensus_config.validator_id, + consensus_delay: self.config.consensus_config.startup_delay, + timeouts: self.config.consensus_config.timeouts.clone(), + sync_retry_interval: self.config.consensus_config.sync_retry_interval, + quorum_type, + }; + let consensus_fut = apollo_consensus::run_consensus( + run_consensus_args, + context, + votes_broadcast_channels.into(), + inbound_internal_receiver, + ); + + tokio::select! { + consensus_result = consensus_fut => { + match consensus_result { + Ok(_) => panic!("Consensus task finished unexpectedly"), + Err(e) => Err(e), + } + }, + network_result = network_task => { + panic!("Consensus' network task finished unexpectedly: {:?}", network_result); + } + stream_handler_result = stream_handler_task => { + panic!("Consensus' stream handler task finished unexpectedly: {:?}", stream_handler_result); + } + } + } + + // Performs reverts to the batcher. + async fn revert_batcher_blocks(&self, revert_up_to_and_including: BlockNumber) { + // If we revert all blocks up to height X (including), the new height marker will be X. + let batcher_height_marker = self + .batcher_client + .get_height() + .await + .expect("Failed to get batcher_height_marker from batcher") + .height; + + // This function will panic if the revert fails. + let revert_blocks_fn = move |height| async move { + self.batcher_client + .revert_block(RevertBlockInput { height }) + .await + .expect("Failed to revert block at height {height} in the batcher"); + }; + + revert_blocks_and_eternal_pending( + batcher_height_marker, + revert_up_to_and_including, + revert_blocks_fn, + "Batcher", + ) + .await; + } +} + +pub fn create_consensus_manager( + config: ConsensusManagerConfig, + batcher_client: SharedBatcherClient, + state_sync_client: SharedStateSyncClient, + class_manager_client: SharedClassManagerClient, + l1_gas_price_provider: Arc, +) -> ConsensusManager { + ConsensusManager::new( + config, + batcher_client, + state_sync_client, + class_manager_client, + l1_gas_price_provider, + ) +} + +#[async_trait] +impl ComponentStarter for ConsensusManager { + async fn start(&mut self) { + info!("Starting component {}.", short_type_name::()); + self.run() + .await + .unwrap_or_else(|e| panic!("Failed to start ConsensusManager component: {:?}", e)) + } +} diff --git a/crates/apollo_consensus_manager/src/consensus_manager_test.rs b/crates/apollo_consensus_manager/src/consensus_manager_test.rs new file mode 100644 index 00000000000..3aa382fdade --- /dev/null +++ b/crates/apollo_consensus_manager/src/consensus_manager_test.rs @@ -0,0 +1,73 @@ +use std::sync::Arc; + +use apollo_batcher_types::batcher_types::{GetHeightResponse, RevertBlockInput}; +use apollo_batcher_types::communication::MockBatcherClient; +use apollo_class_manager_types::EmptyClassManagerClient; +use apollo_l1_gas_price_types::MockL1GasPriceProviderClient; +use apollo_reverts::RevertConfig; +use apollo_state_sync_types::communication::MockStateSyncClient; +use mockall::predicate::eq; +use starknet_api::block::BlockNumber; +use tokio::time::{timeout, Duration}; + +use crate::config::ConsensusManagerConfig; +use crate::consensus_manager::ConsensusManager; + +const BATCHER_HEIGHT: BlockNumber = BlockNumber(10); + +#[tokio::test] +async fn revert_batcher_blocks() { + const REVERT_UP_TO_AND_INCLUDING_HEIGHT: BlockNumber = BlockNumber(7); + + let mut mock_batcher_client = MockBatcherClient::new(); + mock_batcher_client + .expect_get_height() + .returning(|| Ok(GetHeightResponse { height: BATCHER_HEIGHT })); + + let expected_revert_heights = + (REVERT_UP_TO_AND_INCLUDING_HEIGHT.0..BATCHER_HEIGHT.0).rev().map(BlockNumber); + for height in expected_revert_heights { + mock_batcher_client + .expect_revert_block() + .times(1) + .with(eq(RevertBlockInput { height })) + .returning(|_| Ok(())); + } + + let manager_config = ConsensusManagerConfig { + revert_config: RevertConfig { + revert_up_to_and_including: REVERT_UP_TO_AND_INCLUDING_HEIGHT, + should_revert: true, + }, + ..Default::default() + }; + + let consensus_manager = ConsensusManager::new( + manager_config, + Arc::new(mock_batcher_client), + Arc::new(MockStateSyncClient::new()), + Arc::new(EmptyClassManagerClient), + Arc::new(MockL1GasPriceProviderClient::new()), + ); + + // TODO(Shahak, dvir): try to solve this better (the test will take 100 milliseconds to run). + timeout(Duration::from_millis(100), consensus_manager.run()).await.unwrap_err(); +} + +#[tokio::test] +async fn no_reverts_without_config() { + let mut mock_batcher = MockBatcherClient::new(); + mock_batcher.expect_revert_block().times(0).returning(|_| Ok(())); + mock_batcher.expect_get_height().returning(|| Ok(GetHeightResponse { height: BlockNumber(0) })); + + let consensus_manager = ConsensusManager::new( + ConsensusManagerConfig::default(), + Arc::new(mock_batcher), + Arc::new(MockStateSyncClient::new()), + Arc::new(EmptyClassManagerClient), + Arc::new(MockL1GasPriceProviderClient::new()), + ); + + // TODO(Shahak, dvir): try to solve this better (the test will take 100 milliseconds to run). + timeout(Duration::from_millis(100), consensus_manager.run()).await.unwrap_err(); +} diff --git a/crates/apollo_consensus_manager/src/lib.rs b/crates/apollo_consensus_manager/src/lib.rs new file mode 100644 index 00000000000..97eda850bbd --- /dev/null +++ b/crates/apollo_consensus_manager/src/lib.rs @@ -0,0 +1,4 @@ +pub mod communication; +pub mod config; +pub mod consensus_manager; +pub mod metrics; diff --git a/crates/apollo_consensus_manager/src/metrics.rs b/crates/apollo_consensus_manager/src/metrics.rs new file mode 100644 index 00000000000..32798901b0d --- /dev/null +++ b/crates/apollo_consensus_manager/src/metrics.rs @@ -0,0 +1,18 @@ +use apollo_metrics::define_metrics; + +define_metrics!( + ConsensusManager => { + // topic agnostic metrics + MetricGauge { CONSENSUS_NUM_CONNECTED_PEERS, "apollo_consensus_num_connected_peers", "The number of connected peers to the consensus p2p component" }, + MetricGauge { CONSENSUS_NUM_BLACKLISTED_PEERS, "apollo_consensus_num_blacklisted_peers", "The number of currently blacklisted peers by the consensus component" }, + + // Votes topic metrics + MetricCounter { CONSENSUS_VOTES_NUM_SENT_MESSAGES, "apollo_consensus_votes_num_sent_messages", "The number of messages sent by the consensus p2p component over the Votes topic", init = 0 }, + MetricCounter { CONSENSUS_VOTES_NUM_RECEIVED_MESSAGES, "apollo_consensus_votes_num_received_messages", "The number of messages received by the consensus p2p component over the Votes topic", init = 0 }, + + // Proposals topic metrics + MetricCounter { CONSENSUS_PROPOSALS_NUM_SENT_MESSAGES, "apollo_consensus_proposals_num_sent_messages", "The number of messages sent by the consensus p2p component over the Proposals topic", init = 0 }, + MetricCounter { CONSENSUS_PROPOSALS_NUM_RECEIVED_MESSAGES, "apollo_consensus_proposals_num_received_messages", "The number of messages received by the consensus p2p component over the Proposals topic", init = 0 }, + + }, +); diff --git a/crates/apollo_consensus_orchestrator/Cargo.toml b/crates/apollo_consensus_orchestrator/Cargo.toml new file mode 100644 index 00000000000..df6be2aedf6 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/Cargo.toml @@ -0,0 +1,80 @@ +[package] +name = "apollo_consensus_orchestrator" +version.workspace = true +edition.workspace = true +repository.workspace = true +license-file.workspace = true +description = "Implements the consensus context and orchestrates the node's components accordingly" + +[dependencies] +apollo_batcher_types.workspace = true +apollo_class_manager_types.workspace = true +apollo_config.workspace = true +apollo_consensus.workspace = true +apollo_infra_utils.workspace = true +apollo_l1_gas_price_types.workspace = true +apollo_metrics.workspace = true +apollo_network.workspace = true +apollo_proc_macros.workspace = true +apollo_protobuf.workspace = true +apollo_state_sync_types.workspace = true +apollo_time = { workspace = true, features = ["tokio"] } +async-trait.workspace = true +blockifier.workspace = true +cairo-lang-starknet-classes.workspace = true +chrono.workspace = true +ethnum.workspace = true +futures.workspace = true +indexmap.workspace = true +num-rational.workspace = true +paste.workspace = true +reqwest = { workspace = true, features = ["json"] } +serde.workspace = true +serde_json = { workspace = true, features = ["arbitrary_precision"] } +shared_execution_objects.workspace = true +starknet-types-core.workspace = true +starknet_api.workspace = true +strum.workspace = true +strum_macros.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true, features = ["rt"] } +tracing.workspace = true +url = { workspace = true, features = ["serde"] } +validator.workspace = true + +[dev-dependencies] +assert_matches.workspace = true +apollo_batcher.workspace = true +apollo_batcher_types = { workspace = true, features = ["testing"] } +apollo_class_manager_types = { workspace = true, features = ["testing"] } +apollo_infra = { workspace = true, features = ["testing"] } +apollo_infra_utils = { workspace = true, features = ["testing"] } +apollo_l1_gas_price_types = { workspace = true, features = ["testing"] } +apollo_metrics = { workspace = true, features = ["testing"] } +apollo_network = { workspace = true, features = ["testing"] } +apollo_starknet_client.workspace = true +apollo_state_sync_types = { workspace = true, features = ["testing"] } +apollo_storage = { workspace = true, features = ["testing"] } +apollo_test_utils.workspace = true +apollo_time = { workspace = true, features = ["testing", "tokio"] } +cairo-lang-casm.workspace = true +cairo-lang-utils.workspace = true +cairo-vm.workspace = true +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +mockall.workspace = true +mockito.workspace = true +num-bigint.workspace = true +rstest.workspace = true +serde_json.workspace = true + +[lints] +workspace = true + +[package.metadata.cargo-machete] +# `paste`, `apollo_infra_utils` are used in the `define_versioned_constants!` macro but may be falsely detected as unused. +ignored = ["apollo_infra_utils", "paste"] + +[features] +testing = [] diff --git a/crates/apollo_consensus_orchestrator/resources/central_blob.json b/crates/apollo_consensus_orchestrator/resources/central_blob.json new file mode 100644 index 00000000000..bde57f8fa52 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_blob.json @@ -0,0 +1,1061 @@ +{ + "block_number": 5, + "state_diff": { + "address_to_class_hash": { + "0x1": "0x1", + "0x5": "0x5" + }, + "nonces": { + "L1": { + "0x2": "0x2" + } + }, + "storage_updates": { + "L1": { + "0x3": { + "0x3": "0x3" + } + } + }, + "declared_classes": { + "0x4": "0x4" + }, + "block_info": { + "block_number": 5, + "block_timestamp": 6, + "sequencer_address": "0x7", + "l1_gas_price": { + "price_in_wei": "0x8", + "price_in_fri": "0x9" + }, + "l1_data_gas_price": { + "price_in_wei": "0xa", + "price_in_fri": "0xb" + }, + "l2_gas_price": { + "price_in_wei": "0xc", + "price_in_fri": "0xd" + }, + "use_kzg_da": true, + "starknet_version": "0.14.0" + } + }, + "compressed_state_diff": { + "address_to_class_hash": { + "0x1": "0x1", + "0x5": "0x5" + }, + "nonces": { + "L1": { + "0x2": "0x2" + } + }, + "storage_updates": { + "L1": { + "0x3": { + "0x3": "0x3" + } + } + }, + "declared_classes": { + "0x4": "0x4" + }, + "block_info": { + "block_number": 5, + "block_timestamp": 6, + "sequencer_address": "0x7", + "l1_gas_price": { + "price_in_wei": "0x8", + "price_in_fri": "0x9" + }, + "l1_data_gas_price": { + "price_in_wei": "0xa", + "price_in_fri": "0xb" + }, + "l2_gas_price": { + "price_in_wei": "0xc", + "price_in_fri": "0xd" + }, + "use_kzg_da": true, + "starknet_version": "0.14.0" + } + }, + "bouncer_weights": { + "l1_gas": 8, + "message_segment_length": 9, + "n_events": 2, + "state_diff_size": 45, + "sierra_gas": 10, + "n_txs": 2, + "proving_gas": 11 + }, + "fee_market_info": { + "l2_gas_consumed": 150000, + "next_l2_gas_price": "0x186a0" + }, + "transactions": [ + { + "tx": { + "type": "DECLARE", + "version": "0x3", + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "signature": [ + "0x0", + "0x1", + "0x2" + ], + "nonce": "0x1", + "class_hash": "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056", + "compiled_class_hash": "0x1", + "sender_address": "0x12fd537", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "paymaster_data": [], + "account_deployment_data": [], + "sierra_program_size": 3, + "abi_size": 9, + "sierra_version": [ + "0x0", + "0x1", + "0x0" + ], + "hash_value": "0x1" + }, + "time_created": 6 + }, + { + "tx": { + "type": "INVOKE_FUNCTION", + "version": "0x3", + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "signature": [ + "0x0", + "0x1", + "0x2" + ], + "nonce": "0x1", + "sender_address": "0x14abfd58671a1a9b30de2fcd2a42e8bff2ce1096a7c70bc7995904965f277e", + "calldata": [ + "0x0", + "0x1" + ], + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "paymaster_data": [], + "account_deployment_data": [], + "hash_value": "0x2" + }, + "time_created": 6 + }, + { + "tx": { + "type": "DEPLOY_ACCOUNT", + "version": "0x3", + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "signature": [ + "0x0", + "0x1", + "0x2" + ], + "nonce": "0x1", + "class_hash": "0x1b5a0b09f23b091d5d1fa2f660ddfad6bcfce607deba23806cd7328ccfb8ee9", + "contract_address_salt": "0x2", + "sender_address": "0x4c2e031b0ddaa38e06fd9b1bf32bff739965f9d64833006204c67cbc879a57c", + "constructor_calldata": [ + "0x0", + "0x1", + "0x2" + ], + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "paymaster_data": [], + "hash_value": "0x3" + }, + "time_created": 6 + }, + { + "tx": { + "type": "L1_HANDLER", + "contract_address": "0x14abfd58671a1a9b30de2fcd2a42e8bff2ce1096a7c70bc7995904965f277e", + "entry_point_selector": "0x2a", + "calldata": [ + "0x0", + "0x1" + ], + "nonce": "0x1", + "paid_fee_on_l1": "0x1", + "hash_value": "0xc947753befd252ca08042000cd6d783162ee2f5df87b519ddf3081b9b4b997" + }, + "time_created": 6 + }, + { + "tx": { + "type": "DECLARE", + "version": "0x3", + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "signature": [ + "0x0", + "0x1", + "0x2" + ], + "nonce": "0x1", + "class_hash": "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056", + "compiled_class_hash": "0x1", + "sender_address": "0x12fd537", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "paymaster_data": [], + "account_deployment_data": [], + "sierra_program_size": 3, + "abi_size": 9, + "sierra_version": [ + "0x0", + "0x1", + "0x0" + ], + "hash_value": "0x4" + }, + "time_created": 6 + } + ], + "execution_infos": [ + { + "validate_call_info": { + "call": { + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_type": "EXTERNAL", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "storage_address": "0xc0020000", + "caller_address": "0x1", + "call_type": "Call", + "initial_gas": 100000000 + }, + "execution": { + "retdata": [ + "0x56414c4944" + ], + "events": [ + { + "order": 2, + "event": { + "keys": [ + "0x9" + ], + "data": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "l2_to_l1_messages": [ + { + "order": 1, + "message": { + "to_address": "0x1", + "payload": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "cairo_native": false, + "failed": false, + "gas_consumed": 11690 + }, + "inner_calls": [ + { + "call": { + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_type": "EXTERNAL", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "storage_address": "0xc0020000", + "caller_address": "0x1", + "call_type": "Call", + "initial_gas": 100000000 + }, + "execution": { + "retdata": [ + "0x56414c4944" + ], + "events": [ + { + "order": 2, + "event": { + "keys": [ + "0x9" + ], + "data": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "l2_to_l1_messages": [ + { + "order": 1, + "message": { + "to_address": "0x1", + "payload": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "cairo_native": false, + "failed": false, + "gas_consumed": 11690 + }, + "inner_calls": [], + "resources": { + "n_steps": 2, + "n_memory_holes": 3, + "builtin_instance_counter": { + "range_check_builtin": 31, + "pedersen_builtin": 4 + } + }, + "tracked_resource": "SierraGas", + "storage_access_tracker": { + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "accessed_contract_addresses": [ + "0x1" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "builtin_counters": { + "range_check": 31, + "pedersen": 4 + } + } + ], + "resources": { + "n_steps": 2, + "n_memory_holes": 3, + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + } + }, + "tracked_resource": "SierraGas", + "storage_access_tracker": { + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "accessed_contract_addresses": [ + "0x1" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "builtin_counters": { + "range_check": 31, + "pedersen": 4 + } + }, + "execute_call_info": { + "call": { + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_type": "EXTERNAL", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "storage_address": "0xc0020000", + "caller_address": "0x1", + "call_type": "Call", + "initial_gas": 100000000 + }, + "execution": { + "retdata": [ + "0x56414c4944" + ], + "events": [ + { + "order": 2, + "event": { + "keys": [ + "0x9" + ], + "data": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "l2_to_l1_messages": [ + { + "order": 1, + "message": { + "to_address": "0x1", + "payload": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "cairo_native": false, + "failed": false, + "gas_consumed": 11690 + }, + "inner_calls": [ + { + "call": { + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_type": "EXTERNAL", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "storage_address": "0xc0020000", + "caller_address": "0x1", + "call_type": "Call", + "initial_gas": 100000000 + }, + "execution": { + "retdata": [ + "0x56414c4944" + ], + "events": [ + { + "order": 2, + "event": { + "keys": [ + "0x9" + ], + "data": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "l2_to_l1_messages": [ + { + "order": 1, + "message": { + "to_address": "0x1", + "payload": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "cairo_native": false, + "failed": false, + "gas_consumed": 11690 + }, + "inner_calls": [], + "resources": { + "n_steps": 2, + "n_memory_holes": 3, + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + } + }, + "tracked_resource": "SierraGas", + "storage_access_tracker": { + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "accessed_contract_addresses": [ + "0x1" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "builtin_counters": { + "range_check": 31, + "pedersen": 4 + } + } + ], + "resources": { + "n_steps": 2, + "n_memory_holes": 3, + "builtin_instance_counter": { + "range_check_builtin": 31, + "pedersen_builtin": 4 + } + }, + "tracked_resource": "SierraGas", + "storage_access_tracker": { + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "accessed_contract_addresses": [ + "0x1" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "builtin_counters": { + "range_check": 31, + "pedersen": 4 + } + }, + "fee_transfer_call_info": { + "call": { + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_type": "EXTERNAL", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "storage_address": "0xc0020000", + "caller_address": "0x1", + "call_type": "Call", + "initial_gas": 100000000 + }, + "execution": { + "retdata": [ + "0x56414c4944" + ], + "events": [ + { + "order": 2, + "event": { + "keys": [ + "0x9" + ], + "data": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "l2_to_l1_messages": [ + { + "order": 1, + "message": { + "to_address": "0x1", + "payload": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "cairo_native": false, + "failed": false, + "gas_consumed": 11690 + }, + "inner_calls": [ + { + "call": { + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_type": "EXTERNAL", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "storage_address": "0xc0020000", + "caller_address": "0x1", + "call_type": "Call", + "initial_gas": 100000000 + }, + "execution": { + "retdata": [ + "0x56414c4944" + ], + "events": [ + { + "order": 2, + "event": { + "keys": [ + "0x9" + ], + "data": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "l2_to_l1_messages": [ + { + "order": 1, + "message": { + "to_address": "0x1", + "payload": [ + "0x0", + "0x1", + "0x2" + ] + } + } + ], + "cairo_native": false, + "failed": false, + "gas_consumed": 11690 + }, + "inner_calls": [], + "resources": { + "n_steps": 2, + "n_memory_holes": 3, + "builtin_instance_counter": { + "range_check_builtin": 31, + "pedersen_builtin": 4 + } + }, + "tracked_resource": "SierraGas", + "storage_access_tracker": { + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "accessed_contract_addresses": [ + "0x1" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "builtin_counters": { + "range_check": 31, + "pedersen": 4 + } + } + ], + "resources": { + "n_steps": 2, + "n_memory_holes": 3, + "builtin_instance_counter": { + "range_check_builtin": 31, + "pedersen_builtin": 4 + } + }, + "tracked_resource": "SierraGas", + "storage_access_tracker": { + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "accessed_contract_addresses": [ + "0x1" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "builtin_counters": { + "range_check": 31, + "pedersen": 4 + } + }, + "actual_fee": "0x26fe9d250e000", + "da_gas": { + "l1_gas": 1652, + "l1_data_gas": 1, + "l2_gas": 1 + }, + "actual_resources": { + "n_steps": 4, + "range_check_builtin": 31, + "pedersen_builtin": 4 + }, + "revert_error": null, + "total_gas": { + "l1_gas": 6860, + "l1_data_gas": 1, + "l2_gas": 1 + } + } + ], + "contract_classes": [ + [ + "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056", + { + "contract_class": { + "sierra_program": [ + "0x0", + "0x1", + "0x2" + ], + "contract_class_version": "0.1.0", + "entry_points_by_type": { + "CONSTRUCTOR": [ + { + "function_idx": 1, + "selector": "0x2" + } + ], + "EXTERNAL": [ + { + "function_idx": 3, + "selector": "0x4" + } + ], + "L1_HANDLER": [ + { + "function_idx": 5, + "selector": "0x6" + } + ] + }, + "abi": "dummy abi" + } + } + ], + [ + "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056", + { + "contract_class": { + "sierra_program": [ + "0x0", + "0x1", + "0x2" + ], + "contract_class_version": "0.1.0", + "entry_points_by_type": { + "CONSTRUCTOR": [ + { + "function_idx": 1, + "selector": "0x2" + } + ], + "EXTERNAL": [ + { + "function_idx": 3, + "selector": "0x4" + } + ], + "L1_HANDLER": [ + { + "function_idx": 5, + "selector": "0x6" + } + ] + }, + "abi": "dummy abi" + } + } + ] + ], + "compiled_classes": [ + [ + "0x1", + { + "compiled_class": { + "prime": "0x1", + "compiler_version": "dummy version", + "bytecode": [ + "0x1" + ], + "bytecode_segment_lengths": [ + 1, + 2 + ], + "hints": [ + [ + 4, + [ + { + "AllocSegment": { + "dst": { + "register": "AP", + "offset": 1 + } + } + } + ] + ] + ], + "pythonic_hints": [ + [ + 5, + [ + "dummy pythonic hint" + ] + ] + ], + "entry_points_by_type": { + "EXTERNAL": [ + { + "selector": "0x1", + "offset": 1, + "builtins": [ + "dummy builtin" + ] + } + ], + "L1_HANDLER": [ + { + "selector": "0x1", + "offset": 1, + "builtins": [ + "dummy builtin" + ] + } + ], + "CONSTRUCTOR": [ + { + "selector": "0x1", + "offset": 1, + "builtins": [ + "dummy builtin" + ] + } + ] + } + } + } + ], + [ + "0x1", + { + "compiled_class": { + "prime": "0x1", + "compiler_version": "dummy version", + "bytecode": [ + "0x1" + ], + "bytecode_segment_lengths": [ + 1, + 2 + ], + "hints": [ + [ + 4, + [ + { + "AllocSegment": { + "dst": { + "register": "AP", + "offset": 1 + } + } + } + ] + ] + ], + "pythonic_hints": [ + [ + 5, + [ + "dummy pythonic hint" + ] + ] + ], + "entry_points_by_type": { + "EXTERNAL": [ + { + "selector": "0x1", + "offset": 1, + "builtins": [ + "dummy builtin" + ] + } + ], + "L1_HANDLER": [ + { + "selector": "0x1", + "offset": 1, + "builtins": [ + "dummy builtin" + ] + } + ], + "CONSTRUCTOR": [ + { + "selector": "0x1", + "offset": 1, + "builtins": [ + "dummy builtin" + ] + } + ] + } + } + } + ] + ], + "casm_hash_computation_data_sierra_gas": { + "class_hash_to_casm_hash_computation_gas": { + "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056": 1 + }, + "gas_without_casm_hash_computation": 3 + }, + "casm_hash_computation_data_proving_gas": { + "class_hash_to_casm_hash_computation_gas": { + "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056": 1 + }, + "gas_without_casm_hash_computation": 3 + } +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_bouncer_weights.json b/crates/apollo_consensus_orchestrator/resources/central_bouncer_weights.json new file mode 100644 index 00000000000..a874a4da376 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_bouncer_weights.json @@ -0,0 +1,9 @@ +{ + "l1_gas": 8, + "message_segment_length": 9, + "n_events": 2, + "state_diff_size": 45, + "sierra_gas": 10, + "n_txs": 2, + "proving_gas": 11 +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_casm_hash_computation_data.json b/crates/apollo_consensus_orchestrator/resources/central_casm_hash_computation_data.json new file mode 100644 index 00000000000..c7cf29ad7fb --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_casm_hash_computation_data.json @@ -0,0 +1,6 @@ +{ + "class_hash_to_casm_hash_computation_gas": { + "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056": 1 + }, + "gas_without_casm_hash_computation": 3 +} \ No newline at end of file diff --git a/crates/apollo_consensus_orchestrator/resources/central_contract_class.casm.json b/crates/apollo_consensus_orchestrator/resources/central_contract_class.casm.json new file mode 100644 index 00000000000..d5bae0551b0 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_contract_class.casm.json @@ -0,0 +1,65 @@ +{ + "compiled_class":{ + "prime":"0x1", + "compiler_version":"dummy version", + "bytecode":[ + "0x1" + ], + "bytecode_segment_lengths":[ + 1, + 2 + ], + "hints":[ + [ + 4, + [ + { + "AllocSegment":{ + "dst":{ + "offset":1, + "register":"AP" + } + } + } + ] + ] + ], + "pythonic_hints":[ + [ + 5, + [ + "dummy pythonic hint" + ] + ] + ], + "entry_points_by_type":{ + "CONSTRUCTOR":[ + { + "selector":"0x1", + "offset":1, + "builtins":[ + "dummy builtin" + ] + } + ], + "EXTERNAL":[ + { + "selector":"0x1", + "offset":1, + "builtins":[ + "dummy builtin" + ] + } + ], + "L1_HANDLER":[ + { + "selector":"0x1", + "offset":1, + "builtins":[ + "dummy builtin" + ] + } + ] + } + } +} \ No newline at end of file diff --git a/crates/apollo_consensus_orchestrator/resources/central_contract_class.sierra.json b/crates/apollo_consensus_orchestrator/resources/central_contract_class.sierra.json new file mode 100644 index 00000000000..b8da460a2c4 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_contract_class.sierra.json @@ -0,0 +1,31 @@ +{ + "contract_class":{ + "sierra_program":[ + "0x0", + "0x1", + "0x2" + ], + "contract_class_version":"0.1.0", + "entry_points_by_type":{ + "CONSTRUCTOR":[ + { + "function_idx":1, + "selector":"0x2" + } + ], + "EXTERNAL":[ + { + "function_idx":3, + "selector":"0x4" + } + ], + "L1_HANDLER":[ + { + "function_idx":5, + "selector":"0x6" + } + ] + }, + "abi":"dummy abi" + } +} \ No newline at end of file diff --git a/crates/apollo_consensus_orchestrator/resources/central_contract_class_default_optionals.casm.json b/crates/apollo_consensus_orchestrator/resources/central_contract_class_default_optionals.casm.json new file mode 100644 index 00000000000..e759aa900f7 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_contract_class_default_optionals.casm.json @@ -0,0 +1,56 @@ +{ + "compiled_class":{ + "prime":"0x1", + "compiler_version":"dummy version", + "bytecode":[ + "0x1" + ], + "hints":[ + [ + 4, + [ + { + "AllocSegment":{ + "dst":{ + "offset":1, + "register":"AP" + } + } + } + ] + ] + ], + "pythonic_hints":[ + + ], + "entry_points_by_type":{ + "CONSTRUCTOR":[ + { + "selector":"0x1", + "offset":1, + "builtins":[ + "dummy builtin" + ] + } + ], + "EXTERNAL":[ + { + "selector":"0x1", + "offset":1, + "builtins":[ + "dummy builtin" + ] + } + ], + "L1_HANDLER":[ + { + "selector":"0x1", + "offset":1, + "builtins":[ + "dummy builtin" + ] + } + ] + } + } +} \ No newline at end of file diff --git a/crates/apollo_consensus_orchestrator/resources/central_declare_tx.json b/crates/apollo_consensus_orchestrator/resources/central_declare_tx.json new file mode 100644 index 00000000000..060101ae235 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_declare_tx.json @@ -0,0 +1,39 @@ +{ + "tx": { + "hash_value": "0x41e7d973115400a98a7775190c27d4e3b1fcd8cd40b7d27464f6c3f10b8b706", + "version": "0x3", + "signature": ["0x0", "0x1", "0x2"], + "nonce": "0x1", + "sender_address": "0x12fd537", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "paymaster_data": [], + "class_hash": "0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056", + "compiled_class_hash": "0x1", + "sierra_program_size": 3, + "sierra_version": [ + "0x0", + "0x1", + "0x0" + ], + "abi_size": 9, + "account_deployment_data": [], + "type": "DECLARE" + }, + "time_created": 1734601649 +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_deploy_account_tx.json b/crates/apollo_consensus_orchestrator/resources/central_deploy_account_tx.json new file mode 100644 index 00000000000..0e81d6fbf3f --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_deploy_account_tx.json @@ -0,0 +1,32 @@ +{ + "tx": { + "hash_value": "0x429cb4dc45610a80a96800ab350a11ff50e2d69e25c7723c002934e66b5a282", + "version": "0x3", + "signature": ["0x0", "0x1", "0x2"], + "nonce": "0x1", + "sender_address": "0x4c2e031b0ddaa38e06fd9b1bf32bff739965f9d64833006204c67cbc879a57c", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "paymaster_data": [], + "contract_address_salt": "0x2", + "class_hash": "0x1b5a0b09f23b091d5d1fa2f660ddfad6bcfce607deba23806cd7328ccfb8ee9", + "constructor_calldata": ["0x0", "0x1", "0x2"], + "type": "DEPLOY_ACCOUNT" + }, + "time_created": 1734601616 +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_fee_market_info.json b/crates/apollo_consensus_orchestrator/resources/central_fee_market_info.json new file mode 100644 index 00000000000..50e0b97a560 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_fee_market_info.json @@ -0,0 +1,4 @@ +{ + "l2_gas_consumed": 150000, + "next_l2_gas_price": "0x186a0" +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_invoke_tx.json b/crates/apollo_consensus_orchestrator/resources/central_invoke_tx.json new file mode 100644 index 00000000000..0c477a4e7ce --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_invoke_tx.json @@ -0,0 +1,34 @@ +{ + "tx": { + "hash_value": "0x6efd067c859e6469d0f6d158e9ae408a9552eb8cc11f618ab3aef3e52450666", + "version": "0x3", + "signature": ["0x0", "0x1", "0x2"], + "nonce": "0x1", + "sender_address": "0x14abfd58671a1a9b30de2fcd2a42e8bff2ce1096a7c70bc7995904965f277e", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x1", + "max_price_per_unit": "0x1" + }, + "L2_GAS": { + "max_amount": "0x2", + "max_price_per_unit": "0x2" + }, + "L1_DATA_GAS": { + "max_amount": "0x3", + "max_price_per_unit": "0x3" + } + }, + "tip": "0x1", + "paymaster_data": [], + "calldata": [ + "0x0", + "0x1" + ], + "account_deployment_data": [], + "type": "INVOKE_FUNCTION" + }, + "time_created": 1734601615 +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_l1_handler_tx.json b/crates/apollo_consensus_orchestrator/resources/central_l1_handler_tx.json new file mode 100644 index 00000000000..308a21ecbf4 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_l1_handler_tx.json @@ -0,0 +1,15 @@ +{ + "tx": { + "hash_value": "0xc947753befd252ca08042000cd6d783162ee2f5df87b519ddf3081b9b4b997", + "contract_address": "0x14abfd58671a1a9b30de2fcd2a42e8bff2ce1096a7c70bc7995904965f277e", + "entry_point_selector": "0x2a", + "calldata": [ + "0x0", + "0x1" + ], + "nonce": "0x1", + "paid_fee_on_l1": "0x1", + "type": "L1_HANDLER" + }, + "time_created": 1734601657 +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_preconfirmed_block.json b/crates/apollo_consensus_orchestrator/resources/central_preconfirmed_block.json new file mode 100644 index 00000000000..e6f9366e2c1 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_preconfirmed_block.json @@ -0,0 +1,231 @@ +{ + "status":"PRE_CONFIRMED", + "starknet_version":"0.14.0", + "l1_da_mode":"BLOB", + "l1_gas_price":{ + "price_in_wei":"0x59a78b10", + "price_in_fri":"0x1a146bb0e3c5" + }, + "l1_data_gas_price":{ + "price_in_wei":"0x1", + "price_in_fri":"0xa0e" + }, + "l2_gas_price":{ + "price_in_wei":"0x92e4", + "price_in_fri":"0x2abaa5cb" + }, + "timestamp":1749388551, + "sequencer_address":"0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8", + "transactions":[ + { + "transaction_hash":"0xa07cd0a966655216edb9bf3982e8c3ee6321c7fb7a218c5c25e30c462f3f39", + "version":"0x3", + "signature":[ + "0xd54f34b32dfd64f10d45da9d86dc0c7a07f3b9424ba14bd05cbeaf375700a0", + "0x156827c4e6a6e89729a9cf1d8d5308aeba7b51bb84b07d41338ef6b566969d7" + ], + "nonce":"0x8874", + "nonce_data_availability_mode":0, + "fee_data_availability_mode":0, + "resource_bounds":{ + "L1_DATA_GAS":{ + "max_amount":"0x9c0", + "max_price_per_unit":"0xf15" + }, + "L1_GAS":{ + "max_amount":"0x0", + "max_price_per_unit":"0x271ea18955a7" + }, + "L2_GAS":{ + "max_amount":"0x5db4c0", + "max_price_per_unit":"0x4017f8b0" + } + }, + "tip":"0x0", + "paymaster_data":[ + + ], + "sender_address":"0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "calldata":[ + "0x1", + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "0x243435488ed6761090a70745a2ef8b3e468b80802ab98aeb7a3099f101c2219", + "0x2", + "0x20000000000021d41", + "0x0" + ], + "account_deployment_data":[ + + ], + "type":"INVOKE_FUNCTION" + }, + { + "transaction_hash":"0x22b8c1f3b42ed236c70dafe3ff431d68f360a495140f2f810de6f1f5b8bc75a", + "version":"0x3", + "signature":[ + "0x50c93a5542911159e32cc32c3d7ff53ad974f287c298dcad1ce3510a93c90e7", + "0x406632a826cb9559043166c08050cb8f9a3bcd1da31141faed7677d8d01423c" + ], + "nonce":"0x5c00", + "nonce_data_availability_mode":0, + "fee_data_availability_mode":0, + "resource_bounds":{ + "L1_DATA_GAS":{ + "max_amount":"0xde0", + "max_price_per_unit":"0xf15" + }, + "L1_GAS":{ + "max_amount":"0x0", + "max_price_per_unit":"0x271ea18955a7" + }, + "L2_GAS":{ + "max_amount":"0x772c20", + "max_price_per_unit":"0x4017f8b0" + } + }, + "tip":"0x0", + "paymaster_data":[ + + ], + "sender_address":"0x13a11a9c420fdfd1edc5654f14f83e18fe39567e79fcc75ff71a10ee236a672", + "calldata":[ + "0x1", + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "0x15d8c7e20459a3fe496afb46165f48dd1a7b11ab1f7d0c320d54994417875fb", + "0x8", + "0x1", + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "0x8828d6f2716ca20000", + "0x8a8e4b1a3d8000", + "0x0", + "0x1", + "0x2", + "0x0" + ], + "account_deployment_data":[ + + ], + "type":"INVOKE_FUNCTION" + } + ], + "transaction_receipts":[ + { + "execution_status":"SUCCEEDED", + "transaction_index":16, + "transaction_hash":"0xa07cd0a966655216edb9bf3982e8c3ee6321c7fb7a218c5c25e30c462f3f39", + "l2_to_l1_messages":[ + + ], + "events":[ + { + "from_address":"0x53c91253bc9682c04929ca02ed00b3e423f6710d2ee7e0d5ebb06f3ecf368a8", + "keys":[ + "0x99cd8bde557814842a3121e8ddfd433a539b8c9f14bf31ebf108d12e6196e9" + ], + "data":[ + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "0x5f3dbd0", + "0x0" + ] + }, + { + "from_address":"0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "keys":[ + "0x6f34037cb7ac4cb3f26daa25459d07e0b4e0bee0945d2ef381ebb4df7385ed" + ], + "data":[ + "0x2", + "0x20000000000021d41", + "0x0", + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "0x0", + "0x2", + "0x28fb9b8a8a53500000", + "0x0", + "0x28fb9b8a8a53500000", + "0x0", + "0x1d5504006d44000", + "0x0", + "0x68458cea", + "0x0", + "0x0", + "0x0", + "0x0", + "0x1" + ] + }, + { + "from_address":"0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d", + "keys":[ + "0x99cd8bde557814842a3121e8ddfd433a539b8c9f14bf31ebf108d12e6196e9" + ], + "data":[ + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8", + "0xa6d5102756880", + "0x0" + ] + } + ], + "execution_resources":{ + "n_steps":38987, + "builtin_instance_counter":{ + "ec_op_builtin":3, + "range_check_builtin":1824, + "pedersen_builtin":42, + "poseidon_builtin":27 + }, + "n_memory_holes":0, + "data_availability":{ + "l1_gas":0, + "l1_data_gas":1664, + "l2_gas":0 + }, + "total_gas_consumed":{ + "l1_gas":0, + "l1_data_gas":1664, + "l2_gas":4094080 + } + }, + "actual_fee":"0xa6d5102756880" + } + ], + "transaction_state_diffs":[ + { + "storage_diffs":{ + "0x1":[ + { + "key":"0xc5c06", + "value":"0x175268db82ce4da6eeff90d2cbe6e4516fa4d2fac6b9b2ee25979be220a4b2f" + } + ], + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d":[ + { + "key":"0x3c204dd68b8e800b4f42e438d9ed4ccbba9f8e436518758cd36553715c1d6ab", + "value":"0x1c285a1cb8b7a174010" + }, + { + "key":"0x5496768776e3db30053404f18067d81a6e06f5a2b0de326e21298fd9d569a9a", + "value":"0x1c8d6754f3cb5adc5414" + } + ] + }, + "nonces":{ + "0x352057331d5ad77465315d30b98135ddb815b86aa485d659dfeef59a904f88d":"0x24ef47" + }, + "deployed_contracts":[ + + ], + "old_declared_contracts":[ + + ], + "declared_classes":[ + + ], + "replaced_classes":[ + + ] + } + ] +} diff --git a/crates/sequencing/papyrus_consensus_orchestrator/resources/central_state_diff.json b/crates/apollo_consensus_orchestrator/resources/central_state_diff.json similarity index 90% rename from crates/sequencing/papyrus_consensus_orchestrator/resources/central_state_diff.json rename to crates/apollo_consensus_orchestrator/resources/central_state_diff.json index 36421d86b70..65a92abddcd 100644 --- a/crates/sequencing/papyrus_consensus_orchestrator/resources/central_state_diff.json +++ b/crates/apollo_consensus_orchestrator/resources/central_state_diff.json @@ -1,6 +1,7 @@ { "address_to_class_hash": { - "0x1": "0x1" + "0x1": "0x1", + "0x5": "0x5" }, "nonces": { "L1": { @@ -33,7 +34,7 @@ "price_in_fri": "0xd" }, "sequencer_address": "0x7", - "starknet_version": "0.13.5", + "starknet_version": "0.14.0", "use_kzg_da": true } } diff --git a/crates/apollo_consensus_orchestrator/resources/central_transaction_execution_info.json b/crates/apollo_consensus_orchestrator/resources/central_transaction_execution_info.json new file mode 100644 index 00000000000..c403b1e9cbe --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_transaction_execution_info.json @@ -0,0 +1,562 @@ +{ + "actual_fee": "0x26fe9d250e000", + "actual_resources": { + "n_steps": 4, + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "da_gas": { + "l1_data_gas": 1, + "l1_gas": 1652, + "l2_gas": 1 + }, + "execute_call_info": { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "inner_calls": [ + { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "inner_calls": [], + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } + ], + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + }, + "fee_transfer_call_info": { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "inner_calls": [ + { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "inner_calls": [], + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } + ], + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + }, + "revert_error": null, + "total_gas": { + "l1_data_gas": 1, + "l1_gas": 6860, + "l2_gas": 1 + }, + "validate_call_info": { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "inner_calls": [ + { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "inner_calls": [], + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } + ], + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } +} diff --git a/crates/apollo_consensus_orchestrator/resources/central_transaction_execution_info_reverted.json b/crates/apollo_consensus_orchestrator/resources/central_transaction_execution_info_reverted.json new file mode 100644 index 00000000000..df6d8e2a5aa --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/central_transaction_execution_info_reverted.json @@ -0,0 +1,382 @@ +{ + "actual_fee": "0x26fe9d250e000", + "actual_resources": { + "n_steps": 4, + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "da_gas": { + "l1_data_gas": 1, + "l1_gas": 1652, + "l2_gas": 1 + }, + "execute_call_info": null, + "fee_transfer_call_info": { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "inner_calls": [ + { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "inner_calls": [], + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } + ], + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + }, + "revert_error": "Insufficient fee token balance. Fee: 1, balance: low/high 2/3.", + "total_gas": { + "l1_data_gas": 1, + "l1_gas": 6860, + "l2_gas": 1 + }, + "validate_call_info": { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "inner_calls": [ + { + "call": { + "call_type": "Call", + "calldata": [ + "0x40070000", + "0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701", + "0x1", + "0x2" + ], + "caller_address": "0x1", + "class_hash": "0x80020000", + "code_address": "0x40070000", + "entry_point_selector": "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775", + "entry_point_type": "EXTERNAL", + "initial_gas": 100000000, + "storage_address": "0xc0020000" + }, + "execution": { + "events": [ + { + "event": { + "data": [ + "0x0", + "0x1", + "0x2" + ], + "keys": [ + "0x9" + ] + }, + "order": 2 + } + ], + "failed": false, + "gas_consumed": 11690, + "l2_to_l1_messages": [ + { + "message": { + "payload": [ + "0x0", + "0x1", + "0x2" + ], + "to_address": "0x1" + }, + "order": 1 + } + ], + "cairo_native": false, + "retdata": [ + "0x56414c4944" + ] + }, + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "inner_calls": [], + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } + ], + "resources": { + "builtin_instance_counter": { + "pedersen_builtin": 4, + "range_check_builtin": 31 + }, + "n_memory_holes": 3, + "n_steps": 2 + }, + "storage_access_tracker": { + "accessed_contract_addresses": [ + "0x1" + ], + "accessed_storage_keys": [ + "0x1" + ], + "read_class_hash_values": [ + "0x80020000" + ], + "storage_read_values": [ + "0x0", + "0x1", + "0x2" + ], + "read_block_hash_values": [ + "0xdeafbee" + ], + "accessed_blocks": [ + 100 + ] + }, + "tracked_resource": "SierraGas", + "builtin_counters": { + "pedersen": 4, + "range_check": 31 + } + } +} diff --git a/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json b/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json new file mode 100644 index 00000000000..e0d374fa192 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json @@ -0,0 +1,7 @@ +{ + "gas_price_max_change_denominator": 48, + "gas_target": 2000000000, + "max_block_size": 4000000000, + "min_gas_price": "0xb2d05e00", + "l1_gas_price_margin_percent": 10 +} diff --git a/crates/apollo_consensus_orchestrator/src/build_proposal.rs b/crates/apollo_consensus_orchestrator/src/build_proposal.rs new file mode 100644 index 00000000000..f5609e80e40 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/build_proposal.rs @@ -0,0 +1,288 @@ +#[cfg(test)] +#[path = "build_proposal_test.rs"] +mod build_proposal_test; + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + GetProposalContent, + GetProposalContentInput, + ProposalId, + ProposeBlockInput, +}; +use apollo_batcher_types::communication::{BatcherClient, BatcherClientError}; +use apollo_class_manager_types::transaction_converter::{ + TransactionConverterError, + TransactionConverterTrait, +}; +use apollo_consensus::types::{ProposalCommitment, Round}; +use apollo_l1_gas_price_types::errors::{EthToStrkOracleClientError, L1GasPriceClientError}; +use apollo_protobuf::consensus::{ + ConsensusBlockInfo, + ProposalFin, + ProposalInit, + ProposalPart, + TransactionBatch, +}; +use apollo_state_sync_types::communication::StateSyncClientError; +use apollo_time::time::{Clock, DateTime}; +use starknet_api::block::{BlockHash, GasPrice}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::ContractAddress; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::transaction::TransactionHash; +use starknet_api::StarknetApiError; +use tokio_util::sync::CancellationToken; +use tokio_util::task::AbortOnDropHandle; +use tracing::{debug, error, info, trace, warn}; + +use crate::sequencer_consensus_context::{BuiltProposals, SequencerConsensusContextDeps}; +use crate::utils::{ + convert_to_sn_api_block_info, + get_oracle_rate_and_prices, + retrospective_block_hash, + truncate_to_executed_txs, + GasPriceParams, + StreamSender, +}; + +pub(crate) struct ProposalBuildArguments { + pub deps: SequencerConsensusContextDeps, + pub batcher_timeout: Duration, + pub proposal_init: ProposalInit, + pub l1_da_mode: L1DataAvailabilityMode, + pub stream_sender: StreamSender, + pub gas_price_params: GasPriceParams, + pub valid_proposals: Arc>, + pub proposal_id: ProposalId, + pub cende_write_success: AbortOnDropHandle, + pub l2_gas_price: GasPrice, + pub builder_address: ContractAddress, + pub cancel_token: CancellationToken, + pub previous_block_info: Option, + pub proposal_round: Round, +} + +type BuildProposalResult = Result; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum BuildProposalError { + #[error("Batcher error: {0}")] + Batcher(String, BatcherClientError), + #[error("State sync client error: {0}")] + StateSyncClientError(#[from] StateSyncClientError), + #[error("State sync is not ready: {0}")] + StateSyncNotReady(String), + // Consensus may exit early (e.g. sync). + #[error("Failed to send commitment to consensus: {0}")] + SendError(ProposalCommitment), + #[error("EthToStrkOracle error: {0}")] + EthToStrkOracle(#[from] EthToStrkOracleClientError), + #[error("L1GasPriceProvider error: {0}")] + L1GasPriceProvider(#[from] L1GasPriceClientError), + #[error("Proposal interrupted.")] + Interrupted, + #[error("Writing blob to Aerospike failed. {0}")] + CendeWriteError(String), + #[error("Failed to convert transactions: {0}")] + TransactionConverterError(#[from] TransactionConverterError), + #[error("Block info conversion error: {0}")] + BlockInfoConversion(#[from] StarknetApiError), +} + +// Handles building a new proposal without blocking consensus: +pub(crate) async fn build_proposal( + mut args: ProposalBuildArguments, +) -> BuildProposalResult { + let batcher_deadline = args.deps.clock.now() + args.batcher_timeout; + let block_info = initiate_build(&args).await?; + args.stream_sender + .send(ProposalPart::Init(args.proposal_init)) + .await + .expect("Failed to send proposal init"); + args.stream_sender + .send(ProposalPart::BlockInfo(block_info.clone())) + .await + .expect("Failed to send block info"); + + let (proposal_commitment, content) = get_proposal_content( + args.proposal_id, + args.deps.batcher.as_ref(), + args.stream_sender, + args.cende_write_success, + args.deps.transaction_converter, + args.cancel_token, + args.deps.clock, + batcher_deadline, + ) + .await?; + + // Update valid_proposals before sending fin to avoid a race condition + // with `repropose` being called before `valid_proposals` is updated. + let mut valid_proposals = args.valid_proposals.lock().expect("Lock was poisoned"); + valid_proposals.insert_proposal_for_height( + &args.proposal_init.height, + &proposal_commitment, + block_info, + content, + &args.proposal_id, + ); + Ok(proposal_commitment) +} + +async fn initiate_build(args: &ProposalBuildArguments) -> BuildProposalResult { + let batcher_timeout = chrono::Duration::from_std(args.batcher_timeout) + .expect("Can't convert timeout to chrono::Duration"); + let timestamp = args.deps.clock.unix_now(); + let (eth_to_fri_rate, l1_prices) = get_oracle_rate_and_prices( + args.deps.eth_to_strk_oracle_client.clone(), + args.deps.l1_gas_price_provider.clone(), + timestamp, + args.previous_block_info.as_ref(), + &args.gas_price_params, + ) + .await; + + let block_info = ConsensusBlockInfo { + height: args.proposal_init.height, + timestamp, + builder: args.builder_address, + l1_da_mode: args.l1_da_mode, + l2_gas_price_fri: args.l2_gas_price, + l1_gas_price_wei: l1_prices.base_fee_per_gas, + l1_data_gas_price_wei: l1_prices.blob_fee, + eth_to_fri_rate, + }; + + let retrospective_block_hash = + retrospective_block_hash(args.deps.state_sync_client.clone(), &block_info).await?; + let build_proposal_input = ProposeBlockInput { + proposal_id: args.proposal_id, + deadline: args.deps.clock.now() + batcher_timeout, + retrospective_block_hash, + block_info: convert_to_sn_api_block_info(&block_info)?, + proposal_round: args.proposal_round, + }; + debug!("Initiating build proposal: {build_proposal_input:?}"); + args.deps.batcher.propose_block(build_proposal_input.clone()).await.map_err(|err| { + BuildProposalError::Batcher( + format!("Failed to initiate build proposal {build_proposal_input:?}."), + err, + ) + })?; + Ok(block_info) +} +/// 1. Receive chunks of content from the batcher. +/// 2. Forward these to the stream handler to be streamed out to the network. +/// 3. Once finished, receive the commitment from the batcher. +// TODO(guyn): consider passing a ref to BuildProposalArguments instead of all the fields +// separately. +#[allow(clippy::too_many_arguments)] +async fn get_proposal_content( + proposal_id: ProposalId, + batcher: &dyn BatcherClient, + mut stream_sender: StreamSender, + cende_write_success: AbortOnDropHandle, + transaction_converter: Arc, + cancel_token: CancellationToken, + clock: Arc, + batcher_deadline: DateTime, +) -> BuildProposalResult<(ProposalCommitment, Vec>)> { + let mut content = Vec::new(); + loop { + if cancel_token.is_cancelled() { + return Err(BuildProposalError::Interrupted); + } + // We currently want one part of the node failing to cause all components to fail. If this + // changes, we can simply return None and consider this as a failed proposal which consensus + // should support. + let response = batcher + .get_proposal_content(GetProposalContentInput { proposal_id }) + .await + .map_err(|err| { + BuildProposalError::Batcher( + format!("Failed to get proposal content for proposal_id {proposal_id}."), + err, + ) + })?; + + match response.content { + GetProposalContent::Txs(txs) => { + content.push(txs.clone()); + // TODO(matan): Make sure this isn't too large for a single proto message. + debug!( + hashes = ?txs.iter().map(|tx| tx.tx_hash()).collect::>(), + "Sending transaction batch with {} txs.", + txs.len() + ); + let transactions = futures::future::join_all(txs.into_iter().map(|tx| { + transaction_converter.convert_internal_consensus_tx_to_consensus_tx(tx) + })) + .await + .into_iter() + .collect::, _>>()?; + + trace!(?transactions, "Sending transaction batch with {} txs.", transactions.len()); + stream_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions })) + .await + .expect("Failed to broadcast proposal content"); + } + GetProposalContent::Finished { id, final_n_executed_txs } => { + let proposal_commitment = BlockHash(id.state_diff_commitment.0.0); + content = truncate_to_executed_txs(&mut content, final_n_executed_txs); + + info!( + ?proposal_commitment, + num_txs = final_n_executed_txs, + "Finished building proposal", + ); + if final_n_executed_txs == 0 { + warn!("Built an empty proposal."); + } + + // If the blob writing operation to Aerospike doesn't return a success status, we + // can't finish the proposal. Must wait for it at least until batcher_timeout is + // reached. + let remaining = (batcher_deadline - clock.now()) + .to_std() + .unwrap_or_default() + .max(Duration::from_millis(1)); // Ensure we wait at least 1 ms to avoid immediate timeout. + match tokio::time::timeout(remaining, cende_write_success).await { + Err(_) => { + return Err(BuildProposalError::CendeWriteError( + "Writing blob to Aerospike didn't return in time.".to_string(), + )); + } + Ok(Ok(true)) => { + info!("Writing blob to Aerospike completed successfully."); + } + Ok(Ok(false)) => { + return Err(BuildProposalError::CendeWriteError( + "Writing blob to Aerospike failed.".to_string(), + )); + } + Ok(Err(e)) => { + return Err(BuildProposalError::CendeWriteError(e.to_string())); + } + } + + let final_n_executed_txs_u64 = final_n_executed_txs + .try_into() + .expect("Number of executed transactions should fit in u64"); + stream_sender + .send(ProposalPart::ExecutedTransactionCount(final_n_executed_txs_u64)) + .await + .expect("Failed to broadcast executed transaction count"); + let fin = ProposalFin { proposal_commitment }; + info!("Sending fin={fin:?}"); + stream_sender + .send(ProposalPart::Fin(fin)) + .await + .expect("Failed to broadcast proposal fin"); + return Ok((proposal_commitment, content)); + } + } + } +} diff --git a/crates/apollo_consensus_orchestrator/src/build_proposal_test.rs b/crates/apollo_consensus_orchestrator/src/build_proposal_test.rs new file mode 100644 index 00000000000..568efe067ac --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/build_proposal_test.rs @@ -0,0 +1,260 @@ +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + GetProposalContent, + GetProposalContentResponse, + ProposalCommitment, + ProposalId, +}; +use apollo_batcher_types::communication::BatcherClientError; +use apollo_class_manager_types::transaction_converter::{ + MockTransactionConverterTrait, + TransactionConverterError, +}; +use apollo_consensus::types::Round; +use apollo_infra::component_client::ClientError; +use apollo_protobuf::consensus::{ConsensusBlockInfo, ProposalInit, ProposalPart}; +use apollo_state_sync_types::communication::StateSyncClientError; +use assert_matches::assert_matches; +use blockifier::abi::constants::STORED_BLOCK_HASH_BUFFER; +use futures::channel::mpsc; +use num_rational::Ratio; +use starknet_api::block::{BlockHash, BlockNumber, GasPrice}; +use starknet_api::core::{ClassHash, ContractAddress}; +use starknet_api::data_availability::L1DataAvailabilityMode; +use tokio_util::sync::CancellationToken; +use tokio_util::task::AbortOnDropHandle; + +use crate::build_proposal::{build_proposal, BuildProposalError, ProposalBuildArguments}; +use crate::config::ContextConfig; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::sequencer_consensus_context::BuiltProposals; +use crate::test_utils::{ + create_test_and_network_deps, + TestDeps, + CHANNEL_SIZE, + INTERNAL_TX_BATCH, + STATE_DIFF_COMMITMENT, + TIMEOUT, +}; +use crate::utils::{GasPriceParams, StreamSender}; + +struct TestProposalBuildArguments { + pub deps: TestDeps, + pub batcher_timeout: Duration, + pub proposal_init: ProposalInit, + pub l1_da_mode: L1DataAvailabilityMode, + pub stream_sender: StreamSender, + pub gas_price_params: GasPriceParams, + pub valid_proposals: Arc>, + pub proposal_id: ProposalId, + pub cende_write_success: AbortOnDropHandle, + pub l2_gas_price: GasPrice, + pub builder_address: ContractAddress, + pub cancel_token: CancellationToken, + pub previous_block_info: Option, + pub proposal_round: Round, +} + +impl From for ProposalBuildArguments { + fn from(args: TestProposalBuildArguments) -> Self { + ProposalBuildArguments { + deps: args.deps.into(), + batcher_timeout: args.batcher_timeout, + proposal_init: args.proposal_init, + l1_da_mode: args.l1_da_mode, + stream_sender: args.stream_sender, + gas_price_params: args.gas_price_params, + valid_proposals: args.valid_proposals, + proposal_id: args.proposal_id, + cende_write_success: args.cende_write_success, + l2_gas_price: args.l2_gas_price, + builder_address: args.builder_address, + cancel_token: args.cancel_token, + previous_block_info: args.previous_block_info, + proposal_round: args.proposal_round, + } + } +} + +fn create_proposal_build_arguments() -> (TestProposalBuildArguments, mpsc::Receiver) { + let (mut deps, _) = create_test_and_network_deps(); + deps.setup_default_expectations(); + let batcher_timeout = TIMEOUT; + let proposal_init = ProposalInit::default(); + let l1_da_mode = L1DataAvailabilityMode::Calldata; + let (proposal_sender, proposal_receiver) = mpsc::channel::(CHANNEL_SIZE); + let stream_sender = StreamSender { proposal_sender }; + let context_config = ContextConfig::default(); + + let gas_price_params = GasPriceParams { + min_l1_gas_price_wei: GasPrice(context_config.min_l1_gas_price_wei), + max_l1_gas_price_wei: GasPrice(context_config.max_l1_gas_price_wei), + min_l1_data_gas_price_wei: GasPrice(context_config.min_l1_data_gas_price_wei), + max_l1_data_gas_price_wei: GasPrice(context_config.max_l1_data_gas_price_wei), + l1_data_gas_price_multiplier: Ratio::new( + context_config.l1_data_gas_price_multiplier_ppt, + 1000, + ), + l1_gas_tip_wei: GasPrice(context_config.l1_gas_tip_wei), + }; + let valid_proposals = Arc::new(Mutex::new(BuiltProposals::new())); + let proposal_id = ProposalId(1); + let cende_write_success = AbortOnDropHandle::new(tokio::spawn(async { true })); + let l2_gas_price = VersionedConstants::latest_constants().min_gas_price; + let builder_address = ContractAddress::default(); + let cancel_token = CancellationToken::new(); + let previous_block_info = None; + let proposal_round = 0; + + ( + TestProposalBuildArguments { + deps, + batcher_timeout, + proposal_init, + l1_da_mode, + stream_sender, + gas_price_params, + valid_proposals, + proposal_id, + cende_write_success, + l2_gas_price, + builder_address, + cancel_token, + previous_block_info, + proposal_round, + }, + proposal_receiver, + ) +} + +#[tokio::test] +async fn build_proposal_succeed() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().returning(|_| { + Ok(GetProposalContentResponse { + content: GetProposalContent::Finished { + id: ProposalCommitment { state_diff_commitment: STATE_DIFF_COMMITMENT }, + final_n_executed_txs: 0, + }, + }) + }); + // Make sure cende returns on time. + tokio::time::sleep(Duration::from_millis(100)).await; + + let res = build_proposal(proposal_args.into()).await.unwrap(); + assert_eq!(res, BlockHash::default()); +} + +#[tokio::test] +async fn state_sync_client_error() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Make sure state_sync_client being called, by setting height to >= STORED_BLOCK_HASH_BUFFER. + proposal_args.proposal_init.height = BlockNumber(STORED_BLOCK_HASH_BUFFER); + // Setup state sync client to return an error. + proposal_args.deps.state_sync_client.expect_get_block().returning(|_| { + Err(StateSyncClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::StateSyncClientError(_)))); +} + +#[tokio::test] +async fn state_sync_not_ready_error() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Make sure state_sync_client being called, by setting height to >= STORED_BLOCK_HASH_BUFFER. + proposal_args.proposal_init.height = BlockNumber(STORED_BLOCK_HASH_BUFFER); + // Setup state sync client to return None, indicating that the state sync is not ready. + proposal_args.deps.state_sync_client.expect_get_block().returning(|_| Ok(None)); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::StateSyncNotReady(_)))); +} + +#[tokio::test] +async fn propose_block_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return an error on propose_block. + proposal_args.deps.batcher.expect_propose_block().returning(|_| { + Err(BatcherClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + + let res = build_proposal(proposal_args.into()).await; + assert_matches!( + res, + Err(BuildProposalError::Batcher(msg, _)) if msg.contains("Failed to initiate build proposal") + ); +} + +#[tokio::test] +async fn get_proposal_content_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return an error on get_proposal_content. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().returning(|_| { + Err(BatcherClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + + let res = build_proposal(proposal_args.into()).await; + assert_matches!( + res, + Err(BuildProposalError::Batcher(msg, _)) if msg.contains("Failed to get proposal content") + ); +} + +#[tokio::test] +async fn interrupt_proposal() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return Ok on propose_block. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + // Interrupt the proposal. + proposal_args.cancel_token.cancel(); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::Interrupted))); +} + +#[tokio::test] +async fn convert_internal_consensus_tx_to_consensus_tx_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return Ok on propose_block and TX from get_proposal_content. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().times(1).returning(|_| { + Ok(GetProposalContentResponse { + content: GetProposalContent::Txs(INTERNAL_TX_BATCH.clone()), + }) + }); + // Overwrite the transaction converter to return an error, since by default it returns Ok. + let mut transaction_converter = MockTransactionConverterTrait::new(); + transaction_converter.expect_convert_internal_consensus_tx_to_consensus_tx().returning(|_| { + Err(TransactionConverterError::ClassNotFound { class_hash: ClassHash::default() }) + }); + proposal_args.deps.transaction_converter = transaction_converter; + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::TransactionConverterError(_)))); +} + +#[tokio::test] +async fn cende_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return Ok on propose_block and Finished from get_proposal_content. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().times(1).returning(|_| { + Ok(GetProposalContentResponse { + content: GetProposalContent::Finished { + id: ProposalCommitment { state_diff_commitment: STATE_DIFF_COMMITMENT }, + final_n_executed_txs: 0, + }, + }) + }); + // Setup cende to return false, indicating a failure. + proposal_args.cende_write_success = AbortOnDropHandle::new(tokio::spawn(async { false })); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::CendeWriteError(_)))); +} diff --git a/crates/apollo_consensus_orchestrator/src/cende/cende_test.rs b/crates/apollo_consensus_orchestrator/src/cende/cende_test.rs new file mode 100644 index 00000000000..f1980ed68ba --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/cende/cende_test.rs @@ -0,0 +1,168 @@ +use std::sync::Arc; + +use apollo_class_manager_types::MockClassManagerClient; +use metrics_exporter_prometheus::PrometheusBuilder; +use rstest::rstest; +use starknet_api::block::{BlockInfo, BlockNumber}; + +use super::{CendeAmbassador, RECORDER_WRITE_BLOB_PATH}; +use crate::cende::{BlobParameters, CendeConfig, CendeContext}; +use crate::metrics::{ + register_metrics, + CendeWriteFailureReason, + CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER, + CENDE_WRITE_BLOB_FAILURE, + CENDE_WRITE_BLOB_SUCCESS, + LABEL_CENDE_FAILURE_REASON, +}; + +const HEIGHT_TO_WRITE: BlockNumber = BlockNumber(10); + +impl BlobParameters { + fn with_block_number(block_number: BlockNumber) -> Self { + Self { block_info: BlockInfo { block_number, ..Default::default() }, ..Default::default() } + } +} + +#[derive(Debug, Default)] +struct ExpectedMetrics { + success: usize, + failure_no_prev_blob: usize, + failure_block_height_mismatch: usize, + failure_recorder_error: usize, + failure_skip_write_height: usize, +} + +impl ExpectedMetrics { + fn success() -> Self { + Self { success: 1, ..Default::default() } + } + + fn no_prev_blob() -> Self { + Self { failure_no_prev_blob: 1, ..Default::default() } + } + + fn height_mismatch() -> Self { + Self { failure_block_height_mismatch: 1, ..Default::default() } + } + + fn recorder_error() -> Self { + Self { failure_recorder_error: 1, ..Default::default() } + } + + fn verify_metrics(&self, metrics: &str) { + CENDE_WRITE_BLOB_FAILURE.assert_eq( + metrics, + self.failure_skip_write_height, + &[(LABEL_CENDE_FAILURE_REASON, CendeWriteFailureReason::SkipWriteHeight.into())], + ); + CENDE_WRITE_BLOB_FAILURE.assert_eq( + metrics, + self.failure_no_prev_blob, + &[(LABEL_CENDE_FAILURE_REASON, CendeWriteFailureReason::BlobNotAvailable.into())], + ); + CENDE_WRITE_BLOB_FAILURE.assert_eq( + metrics, + self.failure_block_height_mismatch, + &[(LABEL_CENDE_FAILURE_REASON, CendeWriteFailureReason::HeightMismatch.into())], + ); + CENDE_WRITE_BLOB_FAILURE.assert_eq( + metrics, + self.failure_recorder_error, + &[(LABEL_CENDE_FAILURE_REASON, CendeWriteFailureReason::CendeRecorderError.into())], + ); + CENDE_WRITE_BLOB_SUCCESS.assert_eq(metrics, self.success); + } +} + +#[rstest] +#[case::success(200, Some(9), 1, true, ExpectedMetrics::success())] +#[case::no_prev_block(200, None, 0, false, ExpectedMetrics::no_prev_blob())] +#[case::prev_block_height_mismatch(200, Some(7), 0, false, ExpectedMetrics::height_mismatch())] +#[case::recorder_return_error(500, Some(9), 1, false, ExpectedMetrics::recorder_error())] +#[tokio::test] +async fn write_prev_height_blob( + #[case] mock_status_code: usize, + #[case] prev_block: Option, + #[case] expected_calls: usize, + #[case] expected_result: bool, + #[case] expected_metrics: ExpectedMetrics, +) { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + register_metrics(); + + let mut server = mockito::Server::new_async().await; + let url = server.url(); + let mock = server.mock("POST", RECORDER_WRITE_BLOB_PATH).with_status(mock_status_code).create(); + + let cende_ambassador = CendeAmbassador::new( + CendeConfig { recorder_url: url.parse().unwrap(), ..Default::default() }, + Arc::new(MockClassManagerClient::new()), + ); + + if let Some(prev_block) = prev_block { + cende_ambassador + .prepare_blob_for_next_height(BlobParameters::with_block_number(BlockNumber( + prev_block, + ))) + .await + .unwrap(); + } + + let receiver = cende_ambassador.write_prev_height_blob(HEIGHT_TO_WRITE); + + assert_eq!(receiver.await.unwrap(), expected_result); + mock.expect(expected_calls).assert(); + + expected_metrics.verify_metrics(&recorder.handle().render()); +} + +#[tokio::test] +async fn prepare_blob_for_next_height() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + register_metrics(); + + let cende_ambassador = + CendeAmbassador::new(CendeConfig::default(), Arc::new(MockClassManagerClient::new())); + + cende_ambassador + .prepare_blob_for_next_height(BlobParameters::with_block_number(HEIGHT_TO_WRITE)) + .await + .unwrap(); + assert_eq!( + cende_ambassador.prev_height_blob.lock().await.as_ref().unwrap().block_number, + HEIGHT_TO_WRITE + ); + + CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER.assert_eq(&recorder.handle().render(), HEIGHT_TO_WRITE.0); +} + +#[tokio::test] +async fn no_write_at_skipped_height() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + register_metrics(); + + const SKIP_WRITE_HEIGHT: BlockNumber = HEIGHT_TO_WRITE; + let cende_ambassador = CendeAmbassador::new( + CendeConfig { skip_write_height: Some(SKIP_WRITE_HEIGHT), ..Default::default() }, + Arc::new(MockClassManagerClient::new()), + ); + + // Returns false since the blob is missing and the height is different than skip_write_height. + assert!( + !cende_ambassador.write_prev_height_blob(HEIGHT_TO_WRITE.unchecked_next()).await.unwrap() + ); + + assert!(cende_ambassador.write_prev_height_blob(HEIGHT_TO_WRITE).await.unwrap()); + + // Verify metrics. + let expected_metrics = ExpectedMetrics { + failure_no_prev_blob: 1, + failure_skip_write_height: 1, + ..Default::default() + }; + expected_metrics.verify_metrics(&recorder.handle().render()); +} diff --git a/crates/apollo_consensus_orchestrator/src/cende/central_objects.rs b/crates/apollo_consensus_orchestrator/src/cende/central_objects.rs new file mode 100644 index 00000000000..05baa781062 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/cende/central_objects.rs @@ -0,0 +1,508 @@ +use std::str::FromStr; + +use apollo_class_manager_types::SharedClassManagerClient; +use blockifier::bouncer::{BouncerWeights, CasmHashComputationData}; +use blockifier::state::cached_state::CommitmentStateDiff; +use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; +use indexmap::{indexmap, IndexMap}; +use serde::Serialize; +use starknet_api::block::{ + BlockInfo, + BlockNumber, + BlockTimestamp, + NonzeroGasPrice, + StarknetVersion, +}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::contract_class::{ContractClass, SierraVersion}; +use starknet_api::core::{ + ClassHash, + CompiledClassHash, + ContractAddress, + EntryPointSelector, + Nonce, +}; +use starknet_api::data_availability::DataAvailabilityMode; +use starknet_api::executable_transaction::L1HandlerTransaction; +use starknet_api::rpc_transaction::{ + InternalRpcDeclareTransactionV3, + InternalRpcDeployAccountTransaction, + InternalRpcTransaction, + InternalRpcTransactionWithoutTxHash, + RpcDeployAccountTransaction, + RpcInvokeTransaction, +}; +use starknet_api::state::{SierraContractClass, StorageKey, ThinStateDiff}; +use starknet_api::transaction::fields::{ + AccountDeploymentData, + AllResourceBounds, + Calldata, + ContractAddressSalt, + Fee, + PaymasterData, + ResourceBounds, + Tip, + TransactionSignature, +}; +use starknet_api::transaction::TransactionHash; +use starknet_types_core::felt::Felt; + +use super::{CendeAmbassadorError, CendeAmbassadorResult}; +use crate::fee_market::FeeMarketInfo; + +/// Central objects are required in order to continue processing the block by the centralized +/// Python pipline. These objects are written to the Aerospike database and are used by python +/// services. In the future, all services will be decentralized and this module will be removed. +#[cfg(test)] +#[path = "central_objects_test.rs"] +mod central_objects_test; + +pub(crate) type CentralBouncerWeights = BouncerWeights; +pub(crate) type CentralFeeMarketInfo = FeeMarketInfo; +pub(crate) type CentralCompressedStateDiff = CentralStateDiff; +pub(crate) type CentralSierraContractClassEntry = (ClassHash, CentralSierraContractClass); +pub(crate) type CentralCasmContractClassEntry = (CompiledClassHash, CentralCasmContractClass); +pub(crate) type CentralCasmHashComputationData = CasmHashComputationData; + +#[derive(Clone, Debug, PartialEq, Serialize)] +struct CentralResourcePrice { + price_in_wei: NonzeroGasPrice, + price_in_fri: NonzeroGasPrice, +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub(crate) struct CentralBlockInfo { + block_number: BlockNumber, + block_timestamp: BlockTimestamp, + sequencer_address: ContractAddress, + l1_gas_price: CentralResourcePrice, + l1_data_gas_price: CentralResourcePrice, + l2_gas_price: CentralResourcePrice, + use_kzg_da: bool, + starknet_version: Option, +} + +impl From<(BlockInfo, StarknetVersion)> for CentralBlockInfo { + fn from((block_info, starknet_version): (BlockInfo, StarknetVersion)) -> CentralBlockInfo { + CentralBlockInfo { + block_number: block_info.block_number, + block_timestamp: block_info.block_timestamp, + sequencer_address: block_info.sequencer_address, + l1_gas_price: CentralResourcePrice { + price_in_wei: block_info.gas_prices.eth_gas_prices.l1_gas_price, + price_in_fri: block_info.gas_prices.strk_gas_prices.l1_gas_price, + }, + l1_data_gas_price: CentralResourcePrice { + price_in_wei: block_info.gas_prices.eth_gas_prices.l1_data_gas_price, + price_in_fri: block_info.gas_prices.strk_gas_prices.l1_data_gas_price, + }, + l2_gas_price: CentralResourcePrice { + price_in_wei: block_info.gas_prices.eth_gas_prices.l2_gas_price, + price_in_fri: block_info.gas_prices.strk_gas_prices.l2_gas_price, + }, + use_kzg_da: block_info.use_kzg_da, + starknet_version: Some(starknet_version), + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +pub(crate) struct CentralStateDiff { + address_to_class_hash: IndexMap, + nonces: IndexMap>, + storage_updates: + IndexMap>>, + declared_classes: IndexMap, + block_info: CentralBlockInfo, +} + +// We convert to CentralStateDiff from ThinStateDiff since this object is already sent to consensus +// for the Sync service, otherwise we could have used the CommitmentStateDiff as well. +impl From<(ThinStateDiff, CentralBlockInfo)> for CentralStateDiff { + fn from( + (state_diff, central_block_info): (ThinStateDiff, CentralBlockInfo), + ) -> CentralStateDiff { + assert!( + state_diff.deprecated_declared_classes.is_empty(), + "Deprecated classes are not supported" + ); + + CentralStateDiff { + address_to_class_hash: state_diff.deployed_contracts, + nonces: indexmap!(DataAvailabilityMode::L1=> state_diff.nonces), + storage_updates: indexmap!(DataAvailabilityMode::L1=> state_diff.storage_diffs), + declared_classes: state_diff.declared_classes, + block_info: central_block_info, + } + } +} + +impl From<(CommitmentStateDiff, CentralBlockInfo)> for CentralStateDiff { + fn from( + (state_diff, central_block_info): (CommitmentStateDiff, CentralBlockInfo), + ) -> CentralStateDiff { + CentralStateDiff { + address_to_class_hash: state_diff.address_to_class_hash, + nonces: indexmap!(DataAvailabilityMode::L1=> state_diff.address_to_nonce), + storage_updates: indexmap!(DataAvailabilityMode::L1=> state_diff.storage_updates), + declared_classes: state_diff.class_hash_to_compiled_class_hash, + block_info: central_block_info, + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +struct CentralResourceBounds { + #[serde(rename = "L1_GAS")] + l1_gas: ResourceBounds, + #[serde(rename = "L2_GAS")] + l2_gas: ResourceBounds, + #[serde(rename = "L1_DATA_GAS")] + l1_data_gas: ResourceBounds, +} + +impl From for CentralResourceBounds { + fn from(resource_bounds: AllResourceBounds) -> CentralResourceBounds { + CentralResourceBounds { + l1_gas: resource_bounds.l1_gas, + l2_gas: resource_bounds.l2_gas, + l1_data_gas: resource_bounds.l1_data_gas, + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +struct CentralInvokeTransactionV3 { + resource_bounds: CentralResourceBounds, + tip: Tip, + signature: TransactionSignature, + nonce: Nonce, + sender_address: ContractAddress, + calldata: Calldata, + nonce_data_availability_mode: u32, + fee_data_availability_mode: u32, + paymaster_data: PaymasterData, + account_deployment_data: AccountDeploymentData, + hash_value: TransactionHash, +} + +impl From<(RpcInvokeTransaction, TransactionHash)> for CentralInvokeTransactionV3 { + fn from( + (tx, hash_value): (RpcInvokeTransaction, TransactionHash), + ) -> CentralInvokeTransactionV3 { + let RpcInvokeTransaction::V3(tx) = tx; + CentralInvokeTransactionV3 { + sender_address: tx.sender_address, + calldata: tx.calldata, + signature: tx.signature, + nonce: tx.nonce, + resource_bounds: tx.resource_bounds.into(), + tip: tx.tip, + paymaster_data: tx.paymaster_data, + account_deployment_data: tx.account_deployment_data, + nonce_data_availability_mode: tx.nonce_data_availability_mode.into(), + fee_data_availability_mode: tx.fee_data_availability_mode.into(), + hash_value, + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +#[serde(tag = "version")] +enum CentralInvokeTransaction { + #[serde(rename = "0x3")] + V3(CentralInvokeTransactionV3), +} + +#[derive(Debug, PartialEq, Serialize)] +struct CentralDeployAccountTransactionV3 { + resource_bounds: CentralResourceBounds, + tip: Tip, + signature: TransactionSignature, + nonce: Nonce, + class_hash: ClassHash, + contract_address_salt: ContractAddressSalt, + sender_address: ContractAddress, + constructor_calldata: Calldata, + nonce_data_availability_mode: u32, + fee_data_availability_mode: u32, + paymaster_data: PaymasterData, + hash_value: TransactionHash, +} + +impl From<(InternalRpcDeployAccountTransaction, TransactionHash)> + for CentralDeployAccountTransactionV3 +{ + fn from( + (tx, hash_value): (InternalRpcDeployAccountTransaction, TransactionHash), + ) -> CentralDeployAccountTransactionV3 { + let sender_address = tx.contract_address; + let RpcDeployAccountTransaction::V3(tx) = tx.tx; + + CentralDeployAccountTransactionV3 { + resource_bounds: tx.resource_bounds.into(), + tip: tx.tip, + signature: tx.signature, + nonce: tx.nonce, + class_hash: tx.class_hash, + contract_address_salt: tx.contract_address_salt, + constructor_calldata: tx.constructor_calldata, + nonce_data_availability_mode: tx.nonce_data_availability_mode.into(), + fee_data_availability_mode: tx.fee_data_availability_mode.into(), + paymaster_data: tx.paymaster_data, + hash_value, + sender_address, + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +#[serde(tag = "version")] +enum CentralDeployAccountTransaction { + #[serde(rename = "0x3")] + V3(CentralDeployAccountTransactionV3), +} + +fn into_string_tuple(val: SierraVersion) -> (String, String, String) { + (format!("0x{:x}", val.major), format!("0x{:x}", val.minor), format!("0x{:x}", val.patch)) +} + +#[derive(Debug, PartialEq, Serialize)] +struct CentralDeclareTransactionV3 { + resource_bounds: CentralResourceBounds, + tip: Tip, + signature: TransactionSignature, + nonce: Nonce, + class_hash: ClassHash, + compiled_class_hash: CompiledClassHash, + sender_address: ContractAddress, + nonce_data_availability_mode: u32, + fee_data_availability_mode: u32, + paymaster_data: PaymasterData, + account_deployment_data: AccountDeploymentData, + sierra_program_size: usize, + abi_size: usize, + sierra_version: (String, String, String), + hash_value: TransactionHash, +} + +impl TryFrom<(InternalRpcDeclareTransactionV3, &SierraContractClass, TransactionHash)> + for CentralDeclareTransactionV3 +{ + type Error = CendeAmbassadorError; + + fn try_from( + (tx, sierra, hash_value): ( + InternalRpcDeclareTransactionV3, + &SierraContractClass, + TransactionHash, + ), + ) -> CendeAmbassadorResult { + Ok(CentralDeclareTransactionV3 { + resource_bounds: tx.resource_bounds.into(), + tip: tx.tip, + signature: tx.signature, + nonce: tx.nonce, + class_hash: tx.class_hash, + compiled_class_hash: tx.compiled_class_hash, + sender_address: tx.sender_address, + nonce_data_availability_mode: tx.nonce_data_availability_mode.into(), + fee_data_availability_mode: tx.fee_data_availability_mode.into(), + paymaster_data: tx.paymaster_data, + account_deployment_data: tx.account_deployment_data, + sierra_program_size: sierra.sierra_program.len(), + abi_size: sierra.abi.len(), + sierra_version: into_string_tuple(SierraVersion::from_str( + &sierra.contract_class_version, + )?), + hash_value, + }) + } +} + +#[derive(Debug, PartialEq, Serialize)] +#[serde(tag = "version")] +enum CentralDeclareTransaction { + #[serde(rename = "0x3")] + V3(CentralDeclareTransactionV3), +} + +#[derive(Debug, PartialEq, Serialize)] +struct CentralL1HandlerTransaction { + contract_address: ContractAddress, + entry_point_selector: EntryPointSelector, + calldata: Calldata, + nonce: Nonce, + paid_fee_on_l1: Fee, + hash_value: TransactionHash, +} + +impl From for CentralL1HandlerTransaction { + fn from(tx: L1HandlerTransaction) -> CentralL1HandlerTransaction { + CentralL1HandlerTransaction { + hash_value: tx.tx_hash, + contract_address: tx.tx.contract_address, + entry_point_selector: tx.tx.entry_point_selector, + calldata: tx.tx.calldata, + nonce: tx.tx.nonce, + paid_fee_on_l1: tx.paid_fee_on_l1, + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +#[serde(tag = "type")] +enum CentralTransaction { + #[serde(rename = "INVOKE_FUNCTION")] + Invoke(CentralInvokeTransaction), + #[serde(rename = "DEPLOY_ACCOUNT")] + DeployAccount(CentralDeployAccountTransaction), + #[serde(rename = "DECLARE")] + Declare(CentralDeclareTransaction), + #[serde(rename = "L1_HANDLER")] + L1Handler(CentralL1HandlerTransaction), +} + +impl TryFrom<(InternalConsensusTransaction, Option<&SierraContractClass>)> for CentralTransaction { + type Error = CendeAmbassadorError; + + fn try_from( + (tx, sierra): (InternalConsensusTransaction, Option<&SierraContractClass>), + ) -> CendeAmbassadorResult { + match tx { + InternalConsensusTransaction::RpcTransaction(rpc_transaction) => { + match rpc_transaction.tx { + InternalRpcTransactionWithoutTxHash::Invoke(invoke_tx) => { + Ok(CentralTransaction::Invoke(CentralInvokeTransaction::V3( + (invoke_tx, rpc_transaction.tx_hash).into(), + ))) + } + InternalRpcTransactionWithoutTxHash::DeployAccount(deploy_tx) => { + Ok(CentralTransaction::DeployAccount(CentralDeployAccountTransaction::V3( + (deploy_tx, rpc_transaction.tx_hash).into(), + ))) + } + InternalRpcTransactionWithoutTxHash::Declare(declare_tx) => { + let sierra = sierra + .expect("Sierra contract class is required for declare_tx conversion"); + Ok(CentralTransaction::Declare(CentralDeclareTransaction::V3( + (declare_tx, sierra, rpc_transaction.tx_hash).try_into()?, + ))) + } + } + } + InternalConsensusTransaction::L1Handler(l1_handler_tx) => { + Ok(CentralTransaction::L1Handler(l1_handler_tx.into())) + } + } + } +} + +#[derive(Debug, PartialEq, Serialize)] +pub(crate) struct CentralTransactionWritten { + tx: CentralTransaction, + // The timestamp is required for monitoring data, we use the block timestamp for this. + time_created: u64, +} + +// This function gets SierraContractClass only for declare_tx, otherwise use None. +impl TryFrom<(InternalConsensusTransaction, Option<&SierraContractClass>, u64)> + for CentralTransactionWritten +{ + type Error = CendeAmbassadorError; + + fn try_from( + (tx, sierra, timestamp): (InternalConsensusTransaction, Option<&SierraContractClass>, u64), + ) -> CendeAmbassadorResult { + Ok(CentralTransactionWritten { + tx: CentralTransaction::try_from((tx, sierra))?, + time_created: timestamp, + }) + } +} +#[derive(Clone, Debug, PartialEq, Serialize)] +pub(crate) struct CentralSierraContractClass { + contract_class: SierraContractClass, +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub(crate) struct CentralCasmContractClass { + compiled_class: CasmContractClass, +} + +impl From for CentralCasmContractClass { + fn from(compiled_class: CasmContractClass) -> CentralCasmContractClass { + CentralCasmContractClass { + compiled_class: CasmContractClass { + // This field is mandatory in the python object. + pythonic_hints: Some(compiled_class.pythonic_hints.unwrap_or_default()), + ..compiled_class + }, + } + } +} + +async fn get_contract_classes_if_declare( + class_manager: SharedClassManagerClient, + tx: &InternalConsensusTransaction, +) -> CendeAmbassadorResult> +{ + // Check if the tx is declare, otherwise return None. + let InternalConsensusTransaction::RpcTransaction(InternalRpcTransaction { + tx: InternalRpcTransactionWithoutTxHash::Declare(declare_tx), + .. + }) = &tx + else { + return Ok(None); + }; + + let class_hash = declare_tx.class_hash; + + // TODO(yael, dvir): get the classes in parallel from the class manager. + let ContractClass::V1(casm) = class_manager + .get_executable(class_hash) + .await? + .ok_or(CendeAmbassadorError::ClassNotFound { class_hash })? + else { + panic!("Only V1 contract classes are supported"); + }; + + let hashed_casm = (declare_tx.compiled_class_hash, CentralCasmContractClass::from(casm.0)); + let sierra = class_manager + .get_sierra(class_hash) + .await? + .ok_or(CendeAmbassadorError::ClassNotFound { class_hash })?; + let hashed_sierra = (class_hash, CentralSierraContractClass { contract_class: sierra }); + + Ok(Some((hashed_sierra, hashed_casm))) +} + +pub(crate) async fn process_transactions( + class_manager: SharedClassManagerClient, + txs: Vec, + timestamp: u64, +) -> CendeAmbassadorResult<( + Vec, + Vec, + Vec, +)> { + let mut contract_classes = Vec::new(); + let mut compiled_classes = Vec::new(); + let mut central_transactions = Vec::new(); + for tx in txs { + if let Some((contract_class, compiled_class)) = + get_contract_classes_if_declare(class_manager.clone(), &tx).await? + { + central_transactions.push(CentralTransactionWritten::try_from(( + tx, + Some(&contract_class.1.contract_class), + timestamp, + ))?); + contract_classes.push(contract_class); + compiled_classes.push(compiled_class); + } else { + central_transactions.push(CentralTransactionWritten::try_from((tx, None, timestamp))?); + } + } + Ok((central_transactions, contract_classes, compiled_classes)) +} diff --git a/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs b/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs new file mode 100644 index 00000000000..bce1dca860f --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs @@ -0,0 +1,971 @@ +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use std::vec; + +use apollo_batcher::cende_client_types::{ + Builtin, + CendeBlockMetadata, + CendePreconfirmedBlock, + CendePreconfirmedTransaction, + ExecutionResources as CendeClientExecutionResources, + IntermediateInvokeTransaction, + StarknetClientTransactionReceipt, + TransactionExecutionStatus, +}; +use apollo_class_manager_types::MockClassManagerClient; +use apollo_infra_utils::test_utils::assert_json_eq; +use apollo_starknet_client::reader::objects::state::StateDiff; +use apollo_starknet_client::reader::objects::transaction::ReservedDataAvailabilityMode; +use apollo_starknet_client::reader::StorageEntry; +use blockifier::execution::call_info::{ + CallExecution, + CallInfo, + MessageToL1, + OrderedEvent, + OrderedL2ToL1Message, + Retdata, + StorageAccessTracker, +}; +use blockifier::execution::contract_class::TrackedResource; +use blockifier::execution::entry_point::{CallEntryPoint, CallType}; +use blockifier::fee::fee_checks::FeeCheckError; +use blockifier::fee::receipt::TransactionReceipt; +use blockifier::fee::resources::{ + ArchivalDataResources, + ComputationResources, + MessageResources, + StarknetResources, + StateResources, + TransactionResources, +}; +use blockifier::state::cached_state::{ + CommitmentStateDiff, + StateChangesCount, + StateChangesCountForFee, +}; +use blockifier::transaction::objects::{ + ExecutionResourcesTraits, + RevertError, + TransactionExecutionInfo, +}; +use cairo_lang_casm::hints::{CoreHint, CoreHintBase, Hint}; +use cairo_lang_casm::operand::{CellRef, Register}; +use cairo_lang_starknet_classes::casm_contract_class::{ + CasmContractClass, + CasmContractEntryPoint, + CasmContractEntryPoints, +}; +use cairo_lang_starknet_classes::NestedIntList; +use cairo_lang_utils::bigint::BigUintAsHex; +use cairo_vm::types::builtin_name::BuiltinName; +use cairo_vm::vm::runners::cairo_runner::ExecutionResources; +use indexmap::indexmap; +use mockall::predicate::eq; +use num_bigint::BigUint; +use rstest::rstest; +use serde::Serialize; +use shared_execution_objects::central_objects::CentralTransactionExecutionInfo; +use starknet_api::block::{ + BlockHash, + BlockInfo, + BlockNumber, + BlockTimestamp, + GasPrice, + GasPricePerToken, + GasPriceVector, + GasPrices, + NonzeroGasPrice, + StarknetVersion, +}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::contract_class::{ContractClass, EntryPointType, SierraVersion}; +use starknet_api::core::{ClassHash, CompiledClassHash, EntryPointSelector, EthAddress}; +use starknet_api::data_availability::{DataAvailabilityMode, L1DataAvailabilityMode}; +use starknet_api::executable_transaction::L1HandlerTransaction; +use starknet_api::execution_resources::{GasAmount, GasVector}; +use starknet_api::rpc_transaction::{ + EntryPointByType, + InternalRpcDeclareTransactionV3, + InternalRpcDeployAccountTransaction, + InternalRpcTransaction, + InternalRpcTransactionWithoutTxHash, + RpcDeployAccountTransaction, + RpcDeployAccountTransactionV3, + RpcInvokeTransaction, + RpcInvokeTransactionV3, +}; +use starknet_api::state::{ + EntryPoint, + FunctionIndex, + SierraContractClass, + StorageKey, + ThinStateDiff, +}; +use starknet_api::test_utils::read_json_file; +use starknet_api::transaction::fields::{ + AccountDeploymentData, + AllResourceBounds, + Calldata, + ContractAddressSalt, + Fee, + PaymasterData, + ResourceBounds, + Tip, + TransactionSignature, +}; +use starknet_api::transaction::{ + Event, + EventContent, + EventData, + EventKey, + L2ToL1Payload, + TransactionHash, + TransactionOffsetInBlock, + TransactionVersion, +}; +use starknet_api::{contract_address, felt, nonce, storage_key}; +use starknet_types_core::felt::Felt; + +use super::{ + CentralBouncerWeights, + CentralCasmHashComputationData, + CentralCompressedStateDiff, + CentralDeclareTransaction, + CentralDeployAccountTransaction, + CentralFeeMarketInfo, + CentralInvokeTransaction, + CentralSierraContractClass, + CentralStateDiff, + CentralTransaction, + CentralTransactionWritten, +}; +use crate::cende::central_objects::CentralCasmContractClass; +use crate::cende::{AerospikeBlob, BlobParameters}; + +// TODO(yael, dvir): add default object serialization tests. + +pub const CENTRAL_STATE_DIFF_JSON_PATH: &str = "central_state_diff.json"; +pub const CENTRAL_INVOKE_TX_JSON_PATH: &str = "central_invoke_tx.json"; +pub const CENTRAL_DEPLOY_ACCOUNT_TX_JSON_PATH: &str = "central_deploy_account_tx.json"; +pub const CENTRAL_DECLARE_TX_JSON_PATH: &str = "central_declare_tx.json"; +pub const CENTRAL_L1_HANDLER_TX_JSON_PATH: &str = "central_l1_handler_tx.json"; +pub const CENTRAL_BOUNCER_WEIGHTS_JSON_PATH: &str = "central_bouncer_weights.json"; +pub const CENTRAL_FEE_MARKET_INFO_JSON_PATH: &str = "central_fee_market_info.json"; +pub const CENTRAL_SIERRA_CONTRACT_CLASS_JSON_PATH: &str = "central_contract_class.sierra.json"; +pub const CENTRAL_CASM_CONTRACT_CLASS_JSON_PATH: &str = "central_contract_class.casm.json"; +pub const CENTRAL_CASM_CONTRACT_CLASS_DEFAULT_OPTIONALS_JSON_PATH: &str = + "central_contract_class_default_optionals.casm.json"; +pub const CENTRAL_TRANSACTION_EXECUTION_INFO_JSON_PATH: &str = + "central_transaction_execution_info.json"; +pub const CENTRAL_TRANSACTION_EXECUTION_INFO_REVERTED_JSON_PATH: &str = + "central_transaction_execution_info_reverted.json"; +pub const CENTRAL_BLOB_JSON_PATH: &str = "central_blob.json"; +pub const CENTRAL_CASM_HASH_COMPUTATION_DATA_JSON_PATH: &str = + "central_casm_hash_computation_data.json"; +pub const CENTRAL_PRECONFIRMED_BLOCK_JSON_PATH: &str = "central_preconfirmed_block.json"; + +fn resource_bounds() -> AllResourceBounds { + AllResourceBounds { + l1_gas: ResourceBounds { max_amount: GasAmount(1), max_price_per_unit: GasPrice(1) }, + l2_gas: ResourceBounds { max_amount: GasAmount(2), max_price_per_unit: GasPrice(2) }, + l1_data_gas: ResourceBounds { max_amount: GasAmount(3), max_price_per_unit: GasPrice(3) }, + } +} + +fn felt_vector() -> Vec { + vec![felt!(0_u8), felt!(1_u8), felt!(2_u8)] +} + +fn declare_class_hash() -> ClassHash { + ClassHash(felt!("0x3a59046762823dc87385eb5ac8a21f3f5bfe4274151c6eb633737656c209056")) +} + +fn declare_compiled_class_hash() -> CompiledClassHash { + CompiledClassHash(felt!(1_u8)) +} + +fn thin_state_diff() -> ThinStateDiff { + ThinStateDiff { + deployed_contracts: indexmap! { + contract_address!(1_u8) => + ClassHash(felt!(1_u8)), + contract_address!(5_u8)=> ClassHash(felt!(5_u8)), + }, + storage_diffs: indexmap!(contract_address!(3_u8) => indexmap!(storage_key!(3_u8) => felt!(3_u8))), + declared_classes: indexmap!(ClassHash(felt!(4_u8))=> CompiledClassHash(felt!(4_u8))), + nonces: indexmap!(contract_address!(2_u8)=> nonce!(2)), + ..Default::default() + } +} + +fn block_info() -> BlockInfo { + BlockInfo { + block_number: BlockNumber(5), + block_timestamp: BlockTimestamp(6), + sequencer_address: contract_address!(7_u8), + gas_prices: GasPrices { + eth_gas_prices: GasPriceVector { + l1_gas_price: NonzeroGasPrice::new(GasPrice(8)).unwrap(), + l1_data_gas_price: NonzeroGasPrice::new(GasPrice(10)).unwrap(), + l2_gas_price: NonzeroGasPrice::new(GasPrice(12)).unwrap(), + }, + strk_gas_prices: GasPriceVector { + l1_gas_price: NonzeroGasPrice::new(GasPrice(9)).unwrap(), + l1_data_gas_price: NonzeroGasPrice::new(GasPrice(11)).unwrap(), + l2_gas_price: NonzeroGasPrice::new(GasPrice(13)).unwrap(), + }, + }, + use_kzg_da: true, + } +} + +fn central_state_diff() -> CentralStateDiff { + let state_diff = thin_state_diff(); + let block_info = block_info(); + let starknet_version = StarknetVersion::V0_14_0; + + (state_diff, (block_info, starknet_version).into()).into() +} + +fn commitment_state_diff() -> CommitmentStateDiff { + CommitmentStateDiff { + address_to_class_hash: indexmap! { + contract_address!(1_u8) => ClassHash(felt!(1_u8)), + contract_address!(5_u8)=> ClassHash(felt!(5_u8)), + }, + storage_updates: indexmap!(contract_address!(3_u8) => indexmap!(storage_key!(3_u8) => felt!(3_u8))), + class_hash_to_compiled_class_hash: indexmap!(ClassHash(felt!(4_u8))=> CompiledClassHash(felt!(4_u8))), + address_to_nonce: indexmap!(contract_address!(2_u8)=> nonce!(2)), + } +} + +fn central_compressed_state_diff() -> CentralCompressedStateDiff { + let state_diff = commitment_state_diff(); + let block_info = block_info(); + let starknet_version = StarknetVersion::V0_14_0; + + (state_diff, (block_info, starknet_version).into()).into() +} + +fn invoke_transaction() -> RpcInvokeTransaction { + RpcInvokeTransaction::V3(RpcInvokeTransactionV3 { + resource_bounds: resource_bounds(), + tip: Tip(1), + signature: TransactionSignature(felt_vector().into()), + nonce: nonce!(1), + sender_address: contract_address!( + "0x14abfd58671a1a9b30de2fcd2a42e8bff2ce1096a7c70bc7995904965f277e" + ), + calldata: Calldata(Arc::new(vec![felt!(0_u8), felt!(1_u8)])), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: PaymasterData(vec![]), + account_deployment_data: AccountDeploymentData(vec![]), + }) +} + +fn central_invoke_tx() -> CentralTransactionWritten { + let invoke_tx = invoke_transaction(); + let tx_hash = + TransactionHash(felt!("0x6efd067c859e6469d0f6d158e9ae408a9552eb8cc11f618ab3aef3e52450666")); + + CentralTransactionWritten { + tx: CentralTransaction::Invoke(CentralInvokeTransaction::V3((invoke_tx, tx_hash).into())), + time_created: 1734601615, + } +} + +fn deploy_account_tx() -> InternalRpcDeployAccountTransaction { + InternalRpcDeployAccountTransaction { + tx: RpcDeployAccountTransaction::V3(RpcDeployAccountTransactionV3 { + resource_bounds: resource_bounds(), + tip: Tip(1), + signature: TransactionSignature(felt_vector().into()), + nonce: nonce!(1), + class_hash: ClassHash(felt!( + "0x1b5a0b09f23b091d5d1fa2f660ddfad6bcfce607deba23806cd7328ccfb8ee9" + )), + contract_address_salt: ContractAddressSalt(felt!(2_u8)), + constructor_calldata: Calldata(Arc::new(felt_vector())), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: PaymasterData(vec![]), + }), + contract_address: contract_address!( + "0x4c2e031b0ddaa38e06fd9b1bf32bff739965f9d64833006204c67cbc879a57c" + ), + } +} + +fn central_deploy_account_tx() -> CentralTransactionWritten { + let deploy_account_tx = deploy_account_tx(); + + let tx_hash = + TransactionHash(felt!("0x429cb4dc45610a80a96800ab350a11ff50e2d69e25c7723c002934e66b5a282")); + + CentralTransactionWritten { + tx: CentralTransaction::DeployAccount(CentralDeployAccountTransaction::V3( + (deploy_account_tx, tx_hash).into(), + )), + time_created: 1734601616, + } +} + +fn declare_transaction() -> InternalRpcDeclareTransactionV3 { + InternalRpcDeclareTransactionV3 { + resource_bounds: resource_bounds(), + tip: Tip(1), + signature: TransactionSignature(felt_vector().into()), + nonce: nonce!(1), + class_hash: declare_class_hash(), + compiled_class_hash: declare_compiled_class_hash(), + sender_address: contract_address!("0x12fd537"), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: PaymasterData(vec![]), + account_deployment_data: AccountDeploymentData(vec![]), + } +} + +fn central_declare_tx() -> CentralTransactionWritten { + let tx_hash = + TransactionHash(felt!("0x41e7d973115400a98a7775190c27d4e3b1fcd8cd40b7d27464f6c3f10b8b706")); + let declare_tx = declare_transaction(); + + CentralTransactionWritten { + tx: CentralTransaction::Declare(CentralDeclareTransaction::V3( + (declare_tx, &sierra_contract_class(), tx_hash).try_into().unwrap(), + )), + time_created: 1734601649, + } +} + +fn l1_handler_tx() -> L1HandlerTransaction { + L1HandlerTransaction { + tx: starknet_api::transaction::L1HandlerTransaction { + version: TransactionVersion::ZERO, + nonce: nonce!(1), + contract_address: contract_address!( + "0x14abfd58671a1a9b30de2fcd2a42e8bff2ce1096a7c70bc7995904965f277e" + ), + entry_point_selector: EntryPointSelector(felt!("0x2a")), + calldata: Calldata(Arc::new(vec![felt!(0_u8), felt!(1_u8)])), + }, + tx_hash: TransactionHash(felt!( + "0xc947753befd252ca08042000cd6d783162ee2f5df87b519ddf3081b9b4b997" + )), + paid_fee_on_l1: Fee(1), + } +} + +fn central_l1_handler_tx() -> CentralTransactionWritten { + let l1_handler_tx = l1_handler_tx(); + + CentralTransactionWritten { + tx: CentralTransaction::L1Handler(l1_handler_tx.into()), + time_created: 1734601657, + } +} + +fn central_bouncer_weights() -> CentralBouncerWeights { + CentralBouncerWeights { + l1_gas: 8, + message_segment_length: 9, + n_events: 2, + state_diff_size: 45, + sierra_gas: GasAmount(10), + n_txs: 2, + proving_gas: GasAmount(11), + } +} + +fn central_fee_market_info() -> CentralFeeMarketInfo { + CentralFeeMarketInfo { l2_gas_consumed: GasAmount(150000), next_l2_gas_price: GasPrice(100000) } +} + +fn entry_point(idx: usize, selector: u8) -> EntryPoint { + EntryPoint { function_idx: FunctionIndex(idx), selector: EntryPointSelector(felt!(selector)) } +} + +fn sierra_contract_class() -> SierraContractClass { + SierraContractClass { + sierra_program: felt_vector(), + contract_class_version: "0.1.0".to_string(), + entry_points_by_type: EntryPointByType { + constructor: vec![entry_point(1, 2)], + external: vec![entry_point(3, 4)], + l1handler: vec![entry_point(5, 6)], + }, + abi: "dummy abi".to_string(), + } +} + +fn central_casm_hash_computation_data() -> CentralCasmHashComputationData { + CentralCasmHashComputationData { + class_hash_to_casm_hash_computation_gas: HashMap::from([( + declare_class_hash(), + GasAmount(1), + )]), + gas_without_casm_hash_computation: GasAmount(3), + } +} + +fn central_sierra_contract_class() -> CentralSierraContractClass { + CentralSierraContractClass { contract_class: sierra_contract_class() } +} + +fn casm_contract_entry_points() -> Vec { + vec![CasmContractEntryPoint { + selector: BigUint::from(1_u8), + offset: 1, + builtins: vec!["dummy builtin".to_string()], + }] +} + +fn casm_contract_class() -> CasmContractClass { + CasmContractClass { + prime: BigUint::from(1_u8), + compiler_version: "dummy version".to_string(), + bytecode: vec![BigUintAsHex { value: BigUint::from(1_u8) }], + bytecode_segment_lengths: Some(NestedIntList::Node(vec![ + NestedIntList::Leaf(1), + NestedIntList::Leaf(2), + ])), + hints: vec![( + 4, + vec![Hint::Core(CoreHintBase::Core(CoreHint::AllocSegment { + dst: CellRef { register: Register::AP, offset: 1 }, + }))], + )], + pythonic_hints: Some(vec![(5, vec!["dummy pythonic hint".to_string()])]), + entry_points_by_type: CasmContractEntryPoints { + external: casm_contract_entry_points(), + l1_handler: casm_contract_entry_points(), + constructor: casm_contract_entry_points(), + }, + } +} + +fn central_casm_contract_class() -> CentralCasmContractClass { + CentralCasmContractClass::from(casm_contract_class()) +} + +fn central_casm_contract_class_default_optional_fields() -> CentralCasmContractClass { + let casm_contract_class = CasmContractClass { + bytecode_segment_lengths: None, + pythonic_hints: None, + ..casm_contract_class() + }; + CentralCasmContractClass::from(casm_contract_class) +} + +fn execution_resources() -> ExecutionResources { + ExecutionResources { + n_steps: 2, + n_memory_holes: 3, + builtin_instance_counter: HashMap::from([ + (BuiltinName::range_check, 31), + (BuiltinName::pedersen, 4), + ]), + } +} + +fn call_info() -> CallInfo { + CallInfo { + call: CallEntryPoint { + class_hash: Some(ClassHash(felt!("0x80020000"))), + code_address: Some(contract_address!("0x40070000")), + entry_point_type: EntryPointType::External, + entry_point_selector: EntryPointSelector(felt!( + "0x162da33a4585851fe8d3af3c2a9c60b557814e221e0d4f30ff0b2189d9c7775" + )), + calldata: Calldata(Arc::new(vec![ + felt!("0x40070000"), + felt!("0x39a1491f76903a16feed0a6433bec78de4c73194944e1118e226820ad479701"), + felt!("0x1"), + felt!("0x2"), + ])), + storage_address: contract_address!("0xc0020000"), + caller_address: contract_address!("0x1"), + call_type: CallType::Call, + initial_gas: 100_000_000, + }, + execution: CallExecution { + retdata: Retdata(vec![felt!("0x56414c4944")]), + events: vec![OrderedEvent { + order: 2, + event: EventContent { + keys: vec![EventKey(felt!("0x9"))], + data: EventData(felt_vector()), + }, + }], + l2_to_l1_messages: vec![OrderedL2ToL1Message { + order: 1, + message: MessageToL1 { + to_address: EthAddress::try_from(felt!(1_u8)).unwrap(), + payload: L2ToL1Payload(felt_vector()), + }, + }], + failed: false, + gas_consumed: 11_690, + cairo_native: false, + }, + inner_calls: Vec::new(), + resources: execution_resources(), + tracked_resource: TrackedResource::SierraGas, + storage_access_tracker: StorageAccessTracker { + storage_read_values: felt_vector(), + accessed_storage_keys: HashSet::from([StorageKey::from(1_u128)]), + read_class_hash_values: vec![ClassHash(felt!("0x80020000"))], + accessed_contract_addresses: HashSet::from([contract_address!("0x1")]), + read_block_hash_values: vec![BlockHash(felt!("0xdeafbee"))], + accessed_blocks: HashSet::from([BlockNumber(100)]), + }, + // TODO(Meshi): insert relevant values. + builtin_counters: execution_resources().prover_builtins(), + ..Default::default() + } +} + +// This object is very long , so in order to test all types of sub-structs and refrain from filling +// the entire object, we fill only one CallInfo with non-default values and the other CallInfos are +// None. +fn transaction_execution_info() -> TransactionExecutionInfo { + TransactionExecutionInfo { + validate_call_info: Some(CallInfo { inner_calls: vec![call_info()], ..call_info() }), + execute_call_info: Some(CallInfo { inner_calls: vec![call_info()], ..call_info() }), + fee_transfer_call_info: Some(CallInfo { inner_calls: vec![call_info()], ..call_info() }), + revert_error: None, + receipt: TransactionReceipt { + fee: Fee(0x26fe9d250e000), + gas: GasVector { + l1_gas: GasAmount(6860), + l1_data_gas: GasAmount(1), + l2_gas: GasAmount(1), + }, + da_gas: GasVector { + l1_gas: GasAmount(1652), + l1_data_gas: GasAmount(1), + l2_gas: GasAmount(1), + }, + resources: TransactionResources { + starknet_resources: StarknetResources { + // The archival_data has private fields so it cannot be assigned, however, it is + // not being used in the central object anyway so it can be default. + archival_data: ArchivalDataResources::default(), + messages: MessageResources { + l2_to_l1_payload_lengths: vec![1, 2], + message_segment_length: 1, + l1_handler_payload_size: Some(1), + }, + state: StateResources { + state_changes_for_fee: StateChangesCountForFee { + state_changes_count: StateChangesCount { + n_storage_updates: 1, + n_class_hash_updates: 2, + n_compiled_class_hash_updates: 3, + n_modified_contracts: 4, + }, + n_allocated_keys: 5, + }, + }, + }, + computation: ComputationResources { + tx_vm_resources: execution_resources(), + os_vm_resources: ExecutionResources::default(), + n_reverted_steps: 2, + sierra_gas: GasAmount(0x128140), + reverted_sierra_gas: GasAmount(0x2), + }, + }, + }, + } +} + +fn central_transaction_execution_info() -> CentralTransactionExecutionInfo { + transaction_execution_info().into() +} + +fn central_transaction_execution_info_reverted() -> CentralTransactionExecutionInfo { + let mut transaction_execution_info = transaction_execution_info(); + // The python side enforces that if the transaction is reverted, the execute_call_info is None. + // Since we are using the same json files for python tests, we apply these rules here as well. + transaction_execution_info.execute_call_info = None; + + transaction_execution_info.revert_error = + Some(RevertError::PostExecution(FeeCheckError::InsufficientFeeTokenBalance { + fee: Fee(1), + balance_low: felt!(2_u8), + balance_high: felt!(3_u8), + })); + + transaction_execution_info.into() +} + +fn declare_tx_with_hash(tx_hash: u64) -> InternalConsensusTransaction { + InternalConsensusTransaction::RpcTransaction(InternalRpcTransaction { + tx: InternalRpcTransactionWithoutTxHash::Declare(declare_transaction()), + tx_hash: TransactionHash(felt!(tx_hash)), + }) +} + +// Returns a vector of transactions and a mock class manager with the expectation that needed to +// convert the consensus transactions to central transactions. +fn input_txs_and_mock_class_manager() -> (Vec, MockClassManagerClient) +{ + let invoke = InternalConsensusTransaction::RpcTransaction(InternalRpcTransaction { + tx: InternalRpcTransactionWithoutTxHash::Invoke(invoke_transaction()), + tx_hash: TransactionHash(Felt::TWO), + }); + let deploy_account = InternalConsensusTransaction::RpcTransaction(InternalRpcTransaction { + tx: InternalRpcTransactionWithoutTxHash::DeployAccount(deploy_account_tx()), + tx_hash: TransactionHash(Felt::THREE), + }); + let l1_handler = InternalConsensusTransaction::L1Handler(l1_handler_tx()); + + let transactions = + vec![declare_tx_with_hash(1), invoke, deploy_account, l1_handler, declare_tx_with_hash(4)]; + + let mut mock_class_manager = MockClassManagerClient::new(); + mock_class_manager + .expect_get_sierra() + .with(eq(declare_class_hash())) + .times(2) + .returning(|_| Ok(Some(sierra_contract_class()))); + mock_class_manager.expect_get_executable().with(eq(declare_class_hash())).times(2).returning( + |_| Ok(Some(ContractClass::V1((casm_contract_class(), SierraVersion::new(0, 0, 0))))), + ); + + (transactions, mock_class_manager) +} + +// TODO(dvir): use real blob when possible. +fn central_blob() -> AerospikeBlob { + let (input_txs, mock_class_manager) = input_txs_and_mock_class_manager(); + let blob_parameters = BlobParameters { + block_info: block_info(), + state_diff: thin_state_diff(), + compressed_state_diff: Some(commitment_state_diff()), + transactions: input_txs, + bouncer_weights: central_bouncer_weights(), + fee_market_info: central_fee_market_info(), + execution_infos: vec![transaction_execution_info()], + casm_hash_computation_data_sierra_gas: central_casm_hash_computation_data(), + casm_hash_computation_data_proving_gas: central_casm_hash_computation_data(), + }; + + // This is to make the function sync (not async) so that it can be used as a case in the + // serialize_central_objects test. + let runtime = tokio::runtime::Runtime::new().unwrap(); + runtime + .block_on(AerospikeBlob::from_blob_parameters_and_class_manager( + blob_parameters, + Arc::new(mock_class_manager), + )) + .unwrap() +} + +fn event_from_serialized_fields(from_address: &str, keys: Vec<&str>, data: Vec<&str>) -> Event { + Event { + from_address: contract_address!(from_address), + content: EventContent { + keys: keys.into_iter().map(|s| EventKey(felt!(s))).collect(), + data: EventData(data.into_iter().map(|s| felt!(s)).collect::>()), + }, + } +} + +fn starknet_preconfiremd_block() -> CendePreconfirmedBlock { + let metadata = CendeBlockMetadata { + status: "PRE_CONFIRMED", + starknet_version: StarknetVersion::V0_14_0, + l1_da_mode: L1DataAvailabilityMode::Blob, + l1_gas_price: GasPricePerToken { + price_in_fri: GasPrice(0x1a146bb0e3c5), + price_in_wei: GasPrice(0x59a78b10), + }, + l1_data_gas_price: GasPricePerToken { + price_in_fri: GasPrice(0xa0e), + price_in_wei: GasPrice(0x1), + }, + l2_gas_price: GasPricePerToken { + price_in_fri: GasPrice(0x2abaa5cb), + price_in_wei: GasPrice(0x92e4), + }, + timestamp: BlockTimestamp(1749388551), + sequencer_address: contract_address!( + "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" + ), + }; + + let transactions = vec![ + CendePreconfirmedTransaction::Invoke(IntermediateInvokeTransaction { + resource_bounds: Some( + AllResourceBounds { + l1_gas: ResourceBounds { + max_amount: GasAmount(0x0), + max_price_per_unit: GasPrice(0x271ea18955a7), + }, + l2_gas: ResourceBounds { + max_amount: GasAmount(0x5db4c0), + max_price_per_unit: GasPrice(0x4017f8b0), + }, + l1_data_gas: ResourceBounds { + max_amount: GasAmount(0x9c0), + max_price_per_unit: GasPrice(0xf15), + }, + } + .into(), + ), + tip: Some(Tip(0)), + calldata: Calldata(Arc::new( + [ + "0x1", + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "0x243435488ed6761090a70745a2ef8b3e468b80802ab98aeb7a3099f101c2219", + "0x2", + "0x20000000000021d41", + "0x0", + ] + .into_iter() + .map(|s| felt!(s)) + .collect::>(), + )), + sender_address: contract_address!( + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11" + ), + nonce: Some(nonce!(0x8874)), + signature: TransactionSignature(Arc::new( + [ + "0xd54f34b32dfd64f10d45da9d86dc0c7a07f3b9424ba14bd05cbeaf375700a0", + "0x156827c4e6a6e89729a9cf1d8d5308aeba7b51bb84b07d41338ef6b566969d7", + ] + .into_iter() + .map(|s| felt!(s)) + .collect::>(), + )), + nonce_data_availability_mode: Some(ReservedDataAvailabilityMode::Reserved), + fee_data_availability_mode: Some(ReservedDataAvailabilityMode::Reserved), + paymaster_data: Some(PaymasterData(vec![])), + account_deployment_data: Some(AccountDeploymentData(vec![])), + transaction_hash: TransactionHash(felt!( + "0xa07cd0a966655216edb9bf3982e8c3ee6321c7fb7a218c5c25e30c462f3f39" + )), + version: TransactionVersion::THREE, + // Irrelevant for V3 InvokeTransaction. + entry_point_selector: None, + max_fee: None, + }), + CendePreconfirmedTransaction::Invoke(IntermediateInvokeTransaction { + resource_bounds: Some( + AllResourceBounds { + l1_gas: ResourceBounds { + max_amount: GasAmount(0x0), + max_price_per_unit: GasPrice(0x271ea18955a7), + }, + l2_gas: ResourceBounds { + max_amount: GasAmount(0x772c20), + max_price_per_unit: GasPrice(0x4017f8b0), + }, + l1_data_gas: ResourceBounds { + max_amount: GasAmount(0xde0), + max_price_per_unit: GasPrice(0xf15), + }, + } + .into(), + ), + tip: Some(Tip(0)), + calldata: Calldata(Arc::new( + [ + "0x1", + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "0x15d8c7e20459a3fe496afb46165f48dd1a7b11ab1f7d0c320d54994417875fb", + "0x8", + "0x1", + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "0x8828d6f2716ca20000", + "0x8a8e4b1a3d8000", + "0x0", + "0x1", + "0x2", + "0x0", + ] + .into_iter() + .map(|s| felt!(s)) + .collect::>(), + )), + sender_address: contract_address!( + "0x13a11a9c420fdfd1edc5654f14f83e18fe39567e79fcc75ff71a10ee236a672" + ), + nonce: Some(nonce!(0x5c00)), + signature: TransactionSignature(Arc::new( + [ + "0x50c93a5542911159e32cc32c3d7ff53ad974f287c298dcad1ce3510a93c90e7", + "0x406632a826cb9559043166c08050cb8f9a3bcd1da31141faed7677d8d01423c", + ] + .into_iter() + .map(|s| felt!(s)) + .collect::>(), + )), + nonce_data_availability_mode: Some(ReservedDataAvailabilityMode::Reserved), + fee_data_availability_mode: Some(ReservedDataAvailabilityMode::Reserved), + paymaster_data: Some(PaymasterData(vec![])), + account_deployment_data: Some(AccountDeploymentData(vec![])), + transaction_hash: TransactionHash(felt!( + "0x22b8c1f3b42ed236c70dafe3ff431d68f360a495140f2f810de6f1f5b8bc75a" + )), + version: TransactionVersion::THREE, + // Irrelevant for V3 InvokeTransaction. + entry_point_selector: None, + max_fee: None, + }), + ]; + + let transaction_receipts = vec![Some(StarknetClientTransactionReceipt { + transaction_index: TransactionOffsetInBlock(16), + transaction_hash: TransactionHash(felt!( + "0xa07cd0a966655216edb9bf3982e8c3ee6321c7fb7a218c5c25e30c462f3f39" + )), + l1_to_l2_consumed_message: None, + l2_to_l1_messages: vec![], + events: vec![ + event_from_serialized_fields( + "0x53c91253bc9682c04929ca02ed00b3e423f6710d2ee7e0d5ebb06f3ecf368a8", + vec!["0x99cd8bde557814842a3121e8ddfd433a539b8c9f14bf31ebf108d12e6196e9"], + vec![ + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "0x5f3dbd0", + "0x0", + ], + ), + event_from_serialized_fields( + "0x67e7555f9ff00f5c4e9b353ad1f400e2274964ea0942483fae97363fd5d7958", + vec!["0x6f34037cb7ac4cb3f26daa25459d07e0b4e0bee0945d2ef381ebb4df7385ed"], + vec![ + "0x2", + "0x20000000000021d41", + "0x0", + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "0x0", + "0x2", + "0x28fb9b8a8a53500000", + "0x0", + "0x28fb9b8a8a53500000", + "0x0", + "0x1d5504006d44000", + "0x0", + "0x68458cea", + "0x0", + "0x0", + "0x0", + "0x0", + "0x1", + ], + ), + event_from_serialized_fields( + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d", + vec!["0x99cd8bde557814842a3121e8ddfd433a539b8c9f14bf31ebf108d12e6196e9"], + vec![ + "0x109f2f48abcfcaec8c12efdc8ac9836283a556e9497315c53795db96ef6ed11", + "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8", + "0xa6d5102756880", + "0x0", + ], + ), + ], + execution_resources: CendeClientExecutionResources { + n_steps: 38987, + builtin_instance_counter: [ + (Builtin::EcOp, 3), + (Builtin::RangeCheck, 1824), + (Builtin::Pedersen, 42), + (Builtin::Poseidon, 27), + ] + .into_iter() + .collect(), + n_memory_holes: 0, + data_availability: Some(GasVector { + l1_gas: 0_u64.into(), + l1_data_gas: 1664_u64.into(), + l2_gas: 0_u64.into(), + }), + total_gas_consumed: Some(GasVector { + l1_gas: 0_u64.into(), + l1_data_gas: 1664_u64.into(), + l2_gas: 4094080_u64.into(), + }), + }, + actual_fee: Fee(0xa6d5102756880), + execution_status: TransactionExecutionStatus::Succeeded, + revert_error: None, + })]; + + let transaction_state_diffs = vec![Some(StateDiff { + storage_diffs: indexmap! { + 0x1u64.into() => vec![StorageEntry{ + key: 0xc5c06_u64.into(), + value: felt!("0x175268db82ce4da6eeff90d2cbe6e4516fa4d2fac6b9b2ee25979be220a4b2f"), + }], + contract_address!("0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d") => vec! [ + StorageEntry { + key: storage_key!("0x3c204dd68b8e800b4f42e438d9ed4ccbba9f8e436518758cd36553715c1d6ab"), + value: felt!("0x1c285a1cb8b7a174010"), + }, + StorageEntry{ + key: storage_key!("0x5496768776e3db30053404f18067d81a6e06f5a2b0de326e21298fd9d569a9a"), + value: felt!("0x1c8d6754f3cb5adc5414"), + }], + }, + nonces: indexmap! { + contract_address!("0x352057331d5ad77465315d30b98135ddb815b86aa485d659dfeef59a904f88d") => { + nonce!(0x24ef47) + } + }, + ..Default::default() + })]; + + CendePreconfirmedBlock { metadata, transactions, transaction_receipts, transaction_state_diffs } +} + +#[rstest] +#[case::compressed_state_diff(central_compressed_state_diff(), CENTRAL_STATE_DIFF_JSON_PATH)] +#[case::state_diff(central_state_diff(), CENTRAL_STATE_DIFF_JSON_PATH)] +#[case::invoke_tx(central_invoke_tx(), CENTRAL_INVOKE_TX_JSON_PATH)] +#[case::deploy_account_tx(central_deploy_account_tx(), CENTRAL_DEPLOY_ACCOUNT_TX_JSON_PATH)] +#[case::declare_tx(central_declare_tx(), CENTRAL_DECLARE_TX_JSON_PATH)] +#[case::l1_handler_tx(central_l1_handler_tx(), CENTRAL_L1_HANDLER_TX_JSON_PATH)] +#[case::bouncer_weights(central_bouncer_weights(), CENTRAL_BOUNCER_WEIGHTS_JSON_PATH)] +#[case::fee_market_info(central_fee_market_info(), CENTRAL_FEE_MARKET_INFO_JSON_PATH)] +#[case::sierra_contract_class( + central_sierra_contract_class(), + CENTRAL_SIERRA_CONTRACT_CLASS_JSON_PATH +)] +#[case::optionals_are_some(central_casm_contract_class(), CENTRAL_CASM_CONTRACT_CLASS_JSON_PATH)] +#[case::optionals_are_none( + central_casm_contract_class_default_optional_fields(), + CENTRAL_CASM_CONTRACT_CLASS_DEFAULT_OPTIONALS_JSON_PATH +)] +#[case::transaction_execution_info( + central_transaction_execution_info(), + CENTRAL_TRANSACTION_EXECUTION_INFO_JSON_PATH +)] +#[case::transaction_execution_info_reverted( + central_transaction_execution_info_reverted(), + CENTRAL_TRANSACTION_EXECUTION_INFO_REVERTED_JSON_PATH +)] +#[case::casm_hash_computation_data_sierra_gas( + central_casm_hash_computation_data(), + CENTRAL_CASM_HASH_COMPUTATION_DATA_JSON_PATH +)] +#[case::central_blob(central_blob(), CENTRAL_BLOB_JSON_PATH)] +#[case::starknet_preconfirmed_block( + starknet_preconfiremd_block(), + CENTRAL_PRECONFIRMED_BLOCK_JSON_PATH +)] +fn serialize_central_objects(#[case] rust_obj: impl Serialize, #[case] python_json_path: &str) { + let python_json = read_json_file(python_json_path); + let rust_json = serde_json::to_value(rust_obj).unwrap(); + + assert_json_eq(&rust_json, &python_json, "Json Comparison failed".to_string()); +} diff --git a/crates/apollo_consensus_orchestrator/src/cende/mod.rs b/crates/apollo_consensus_orchestrator/src/cende/mod.rs new file mode 100644 index 00000000000..ca89897578e --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/cende/mod.rs @@ -0,0 +1,344 @@ +#[cfg(test)] +mod cende_test; +mod central_objects; + +use std::collections::BTreeMap; +use std::future::ready; +use std::sync::Arc; + +use apollo_class_manager_types::{ClassManagerClientError, SharedClassManagerClient}; +use apollo_config::dumping::{ser_optional_param, ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use apollo_proc_macros::sequencer_latency_histogram; +use async_trait::async_trait; +use blockifier::bouncer::{BouncerWeights, CasmHashComputationData}; +use blockifier::state::cached_state::CommitmentStateDiff; +use blockifier::transaction::objects::TransactionExecutionInfo; +use central_objects::{ + process_transactions, + CentralBlockInfo, + CentralBouncerWeights, + CentralCasmContractClassEntry, + CentralCasmHashComputationData, + CentralCompressedStateDiff, + CentralFeeMarketInfo, + CentralSierraContractClassEntry, + CentralStateDiff, + CentralTransactionWritten, +}; +#[cfg(test)] +use mockall::automock; +use reqwest::{Client, RequestBuilder, Response}; +use serde::{Deserialize, Serialize}; +use shared_execution_objects::central_objects::CentralTransactionExecutionInfo; +use starknet_api::block::{BlockInfo, BlockNumber, StarknetVersion}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::ClassHash; +use starknet_api::state::ThinStateDiff; +use tokio::sync::Mutex; +use tokio::task::{self, JoinHandle}; +use tracing::{error, info, warn, Instrument}; +use url::Url; + +use crate::fee_market::FeeMarketInfo; +use crate::metrics::{ + record_write_failure, + CendeWriteFailureReason, + CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER, + CENDE_PREPARE_BLOB_FOR_NEXT_HEIGHT_LATENCY, + CENDE_WRITE_BLOB_SUCCESS, + CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY, +}; + +#[derive(thiserror::Error, Debug)] +pub enum CendeAmbassadorError { + #[error(transparent)] + ClassManagerError(#[from] ClassManagerClientError), + #[error("Class of hash: {class_hash} not found")] + ClassNotFound { class_hash: ClassHash }, + #[error(transparent)] + StarknetApiError(#[from] starknet_api::StarknetApiError), +} + +pub type CendeAmbassadorResult = Result; + +/// A chunk of all the data to write to Aersopike. +#[derive(Debug, Serialize)] +pub(crate) struct AerospikeBlob { + block_number: BlockNumber, + state_diff: CentralStateDiff, + // The batcher may return a `None` compressed state diff if it is disabled in the + // configuration. + compressed_state_diff: Option, + bouncer_weights: CentralBouncerWeights, + fee_market_info: CentralFeeMarketInfo, + transactions: Vec, + execution_infos: Vec, + contract_classes: Vec, + compiled_classes: Vec, + casm_hash_computation_data_sierra_gas: CentralCasmHashComputationData, + casm_hash_computation_data_proving_gas: CentralCasmHashComputationData, +} + +#[cfg_attr(test, automock)] +#[async_trait] +pub trait CendeContext: Send + Sync { + /// Write the previous height blob to Aerospike. Returns a cell with an inner boolean indicating + /// whether the write was successful. + /// `current_height` is the height of the block that is built when calling this function. + fn write_prev_height_blob(&self, current_height: BlockNumber) -> JoinHandle; + + // Prepares the previous height blob that will be written in the next height. + async fn prepare_blob_for_next_height( + &self, + blob_parameters: BlobParameters, + ) -> CendeAmbassadorResult<()>; +} + +#[derive(Clone)] +pub struct CendeAmbassador { + // TODO(dvir): consider creating enum varaiant instead of the `Option`. + // `None` indicates that there is no blob to write, and therefore, the node can't be the + // proposer. + prev_height_blob: Arc>>, + url: Url, + client: Client, + skip_write_height: Option, + class_manager: SharedClassManagerClient, +} + +/// The path to write blob in the Recorder. +pub const RECORDER_WRITE_BLOB_PATH: &str = "/cende_recorder/write_blob"; + +impl CendeAmbassador { + pub fn new(cende_config: CendeConfig, class_manager: SharedClassManagerClient) -> Self { + CendeAmbassador { + prev_height_blob: Arc::new(Mutex::new(None)), + url: cende_config + .recorder_url + .join(RECORDER_WRITE_BLOB_PATH) + .expect("Failed to join `RECORDER_WRITE_BLOB_PATH` with the Recorder URL"), + client: Client::new(), + skip_write_height: cende_config.skip_write_height, + class_manager, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub struct CendeConfig { + pub recorder_url: Url, + pub skip_write_height: Option, +} + +impl Default for CendeConfig { + fn default() -> Self { + CendeConfig { + recorder_url: "https://recorder_url" + .parse() + .expect("recorder_url must be a valid Recorder URL"), + skip_write_height: None, + } + } +} + +impl SerializeConfig for CendeConfig { + fn dump(&self) -> BTreeMap { + let mut config = BTreeMap::from_iter([ser_param( + "recorder_url", + &self.recorder_url, + "The URL of the Pythonic cende_recorder", + ParamPrivacyInput::Private, + )]); + config.extend(ser_optional_param( + &self.skip_write_height, + BlockNumber(0), + "skip_write_height", + "A height that the consensus can skip writing to Aerospike. Needed for booting up (no \ + previous height blob to write) or to handle extreme cases (all the nodes failed).", + ParamPrivacyInput::Public, + )); + + config + } +} + +#[async_trait] +impl CendeContext for CendeAmbassador { + fn write_prev_height_blob(&self, current_height: BlockNumber) -> JoinHandle { + info!("Start writing to Aerospike previous height blob for height {current_height}."); + + // TODO(dvir): consider returning a future that will be spawned in the context instead. + if self.skip_write_height == Some(current_height) { + info!( + "Height {current_height} is configured as the `skip_write_height`, meaning \ + consensus can send a proposal without writing to Aerospike. The blob that should \ + have been written here in a normal flow, should already be written to Aerospike. \ + Not writing to Aerospike previous height blob!!!.", + ); + record_write_failure(CendeWriteFailureReason::SkipWriteHeight); + return tokio::spawn(ready(true)); + } + + let prev_height_blob = self.prev_height_blob.clone(); + let request_builder = self.client.post(self.url.clone()); + + task::spawn( + async move { + // TODO(dvir): consider extracting the "should write blob" logic to a function. + let Some(ref blob): Option = *prev_height_blob.lock().await else { + // This case happens when restarting the node, `prev_height_blob` initial value + // is `None`. + warn!("No blob to write to Aerospike."); + record_write_failure(CendeWriteFailureReason::BlobNotAvailable); + return false; + }; + + if blob.block_number.0 >= current_height.0 { + panic!( + "Blob block number is greater than or equal to the current height. That \ + means cende has a blob of height that hasn't reached a consensus." + ) + } + + // Can happen in case the consensus got a block from the state sync and due to that + // did not update the cende ambassador in `decision_reached` function. + if blob.block_number.0 + 1 != current_height.0 { + warn!( + "Mismatch blob block number and height, can't write blob to Aerospike. \ + Blob block number {}, height {current_height}", + blob.block_number + ); + record_write_failure(CendeWriteFailureReason::HeightMismatch); + return false; + } + + info!("Writing blob to Aerospike."); + return send_write_blob(request_builder, blob).await; + } + .instrument(tracing::debug_span!("cende write_prev_height_blob height")), + ) + } + + #[sequencer_latency_histogram(CENDE_PREPARE_BLOB_FOR_NEXT_HEIGHT_LATENCY, false)] + async fn prepare_blob_for_next_height( + &self, + blob_parameters: BlobParameters, + ) -> CendeAmbassadorResult<()> { + // TODO(dvir): as optimization, call the `into` and other preperation when writing to AS. + let block_number = blob_parameters.block_info.block_number; + *self.prev_height_blob.lock().await = Some( + AerospikeBlob::from_blob_parameters_and_class_manager( + blob_parameters, + self.class_manager.clone(), + ) + .await?, + ); + info!("Blob for block number {block_number} is ready."); + CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER.set_lossy(block_number.0); + Ok(()) + } +} + +#[sequencer_latency_histogram(CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY, false)] +async fn send_write_blob(request_builder: RequestBuilder, blob: &AerospikeBlob) -> bool { + // TODO(dvir): use compression to reduce the size of the blob in the network. + match request_builder.json(blob).send().await { + Ok(response) => { + if response.status().is_success() { + info!( + "Blob with block number {} and {} transactions was written to Aerospike \ + successfully.", + blob.block_number, + blob.transactions.len(), + ); + print_write_blob_response(response).await; + CENDE_WRITE_BLOB_SUCCESS.increment(1); + true + } else { + warn!( + "The recorder failed to write blob with block number {}. Status code: {}", + blob.block_number, + response.status(), + ); + print_write_blob_response(response).await; + record_write_failure(CendeWriteFailureReason::CendeRecorderError); + false + } + } + Err(err) => { + // TODO(dvir): try to test this case. + warn!("Failed to send a request to the recorder. Error: {err}"); + record_write_failure(CendeWriteFailureReason::CommunicationError); + false + } + } +} + +async fn print_write_blob_response(response: Response) { + info!("write blob response status code: {}", response.status()); + if let Ok(text) = response.text().await { + info!("write blob response text: {text}"); + } else { + info!("Failed to get response text."); + } +} + +#[derive(Debug, Default)] +pub struct BlobParameters { + pub(crate) block_info: BlockInfo, + pub(crate) state_diff: ThinStateDiff, + pub(crate) compressed_state_diff: Option, + pub(crate) bouncer_weights: BouncerWeights, + pub(crate) fee_market_info: FeeMarketInfo, + pub(crate) transactions: Vec, + pub(crate) casm_hash_computation_data_sierra_gas: CasmHashComputationData, + pub(crate) casm_hash_computation_data_proving_gas: CasmHashComputationData, + // TODO(dvir): consider passing the execution_infos from the batcher as a string that + // serialized in the correct format from the batcher. + pub(crate) execution_infos: Vec, +} + +impl AerospikeBlob { + async fn from_blob_parameters_and_class_manager( + blob_parameters: BlobParameters, + class_manager: SharedClassManagerClient, + ) -> CendeAmbassadorResult { + let block_number = blob_parameters.block_info.block_number; + let block_timestamp = blob_parameters.block_info.block_timestamp.0; + + let block_info = + CentralBlockInfo::from((blob_parameters.block_info, StarknetVersion::LATEST)); + let state_diff = CentralStateDiff::from((blob_parameters.state_diff, block_info.clone())); + let compressed_state_diff = + blob_parameters.compressed_state_diff.map(|compressed_state_diff| { + CentralStateDiff::from((compressed_state_diff, block_info)) + }); + + let (central_transactions, contract_classes, compiled_classes) = + process_transactions(class_manager, blob_parameters.transactions, block_timestamp) + .await?; + + let execution_infos = blob_parameters + .execution_infos + .into_iter() + .map(CentralTransactionExecutionInfo::from) + .collect(); + + Ok(AerospikeBlob { + block_number, + state_diff, + compressed_state_diff, + bouncer_weights: blob_parameters.bouncer_weights, + fee_market_info: blob_parameters.fee_market_info, + transactions: central_transactions, + execution_infos, + contract_classes, + compiled_classes, + casm_hash_computation_data_sierra_gas: blob_parameters + .casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas: blob_parameters + .casm_hash_computation_data_proving_gas, + }) + } +} diff --git a/crates/apollo_consensus_orchestrator/src/config.rs b/crates/apollo_consensus_orchestrator/src/config.rs new file mode 100644 index 00000000000..6f6aecf93b9 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/config.rs @@ -0,0 +1,180 @@ +use std::collections::BTreeMap; +use std::fmt::Debug; +use std::time::Duration; + +use apollo_config::converters::deserialize_milliseconds_to_duration; +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use serde::{Deserialize, Serialize}; +use starknet_api::core::{ChainId, ContractAddress}; +use validator::Validate; + +const GWEI_FACTOR: u128 = u128::pow(10, 9); +const ETH_FACTOR: u128 = u128::pow(10, 18); + +/// Configuration for the Context struct. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Validate)] +pub struct ContextConfig { + /// Buffer size for streaming outbound proposals. + pub proposal_buffer_size: usize, + /// The number of validators. + pub num_validators: u64, + /// The chain id of the Starknet chain. + pub chain_id: ChainId, + /// Maximum allowed deviation (seconds) of a proposed block's timestamp from the current time. + pub block_timestamp_window_seconds: u64, + /// The data availability mode, true: Blob, false: Calldata. + pub l1_da_mode: bool, + /// The address of the contract that builds the block. + pub builder_address: ContractAddress, + /// Safety margin in milliseconds to make sure that the batcher completes building the proposal + /// with enough time for the Fin to be checked by validators. + #[serde(deserialize_with = "deserialize_milliseconds_to_duration")] + pub build_proposal_margin_millis: Duration, + // When validating a proposal the Context is responsible for timeout handling. The Batcher + // though has a timeout as a defensive measure to make sure the proposal doesn't live + // forever if the Context crashes or has a bug. + /// Safety margin in milliseconds to allow the batcher to successfully validate a proposal. + #[serde(deserialize_with = "deserialize_milliseconds_to_duration")] + pub validate_proposal_margin_millis: Duration, + /// The minimum L1 gas price in wei. + pub min_l1_gas_price_wei: u128, + /// The maximum L1 gas price in wei. + pub max_l1_gas_price_wei: u128, + /// The minimum L1 data gas price in wei. + pub min_l1_data_gas_price_wei: u128, + /// The maximum L1 data gas price in wei. + pub max_l1_data_gas_price_wei: u128, + /// Part per thousand of multiplicative factor to apply to the data gas price, to enable + /// fine-tuning of the price charged to end users. Commonly used to apply a discount due to + /// the blob's data being compressed. Can be used to raise the prices in case of blob + /// under-utilization. + pub l1_data_gas_price_multiplier_ppt: u128, + /// This additional gas is added to the L1 gas price. + pub l1_gas_tip_wei: u128, + /// If true, sets STRK gas price to its minimum price from the versioned constants. + pub constant_l2_gas_price: bool, +} + +impl SerializeConfig for ContextConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param( + "proposal_buffer_size", + &self.proposal_buffer_size, + "The buffer size for streaming outbound proposals.", + ParamPrivacyInput::Public, + ), + ser_param( + "num_validators", + &self.num_validators, + "The number of validators.", + ParamPrivacyInput::Public, + ), + ser_param( + "chain_id", + &self.chain_id, + "The chain id of the Starknet chain.", + ParamPrivacyInput::Public, + ), + ser_param( + "block_timestamp_window_seconds", + &self.block_timestamp_window_seconds, + "Maximum allowed deviation (seconds) of a proposed block's timestamp from the \ + current time.", + ParamPrivacyInput::Public, + ), + ser_param( + "l1_da_mode", + &self.l1_da_mode, + "The data availability mode, true: Blob, false: Calldata.", + ParamPrivacyInput::Public, + ), + ser_param( + "builder_address", + &self.builder_address, + "The address of the contract that builds the block.", + ParamPrivacyInput::Public, + ), + ser_param( + "build_proposal_margin_millis", + &self.build_proposal_margin_millis.as_millis(), + "Safety margin (in ms) to make sure that the batcher completes building the \ + proposal with enough time for the Fin to be checked by validators.", + ParamPrivacyInput::Public, + ), + ser_param( + "validate_proposal_margin_millis", + &self.validate_proposal_margin_millis.as_millis(), + "Safety margin (in ms) to make sure that consensus determines when to timeout \ + validating a proposal.", + ParamPrivacyInput::Public, + ), + ser_param( + "min_l1_gas_price_wei", + &self.min_l1_gas_price_wei, + "The minimum L1 gas price in wei.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_l1_gas_price_wei", + &self.max_l1_gas_price_wei, + "The maximum L1 gas price in wei.", + ParamPrivacyInput::Public, + ), + ser_param( + "min_l1_data_gas_price_wei", + &self.min_l1_data_gas_price_wei, + "The minimum L1 data gas price in wei.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_l1_data_gas_price_wei", + &self.max_l1_data_gas_price_wei, + "The maximum L1 data gas price in wei.", + ParamPrivacyInput::Public, + ), + ser_param( + "l1_data_gas_price_multiplier_ppt", + &self.l1_data_gas_price_multiplier_ppt, + "Part per thousand of multiplicative factor to apply to the data gas price, to \ + enable fine-tuning of the price charged to end users.", + ParamPrivacyInput::Public, + ), + ser_param( + "l1_gas_tip_wei", + &self.l1_gas_tip_wei, + "This additional gas is added to the L1 gas price.", + ParamPrivacyInput::Public, + ), + ser_param( + "constant_l2_gas_price", + &self.constant_l2_gas_price, + "If true, sets STRK gas price to its minimum price from the versioned constants.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +impl Default for ContextConfig { + fn default() -> Self { + Self { + proposal_buffer_size: 100, + num_validators: 1, + chain_id: ChainId::Mainnet, + block_timestamp_window_seconds: 1, + l1_da_mode: true, + builder_address: ContractAddress::default(), + build_proposal_margin_millis: Duration::from_millis(1000), + validate_proposal_margin_millis: Duration::from_millis(10_000), + min_l1_gas_price_wei: GWEI_FACTOR, + max_l1_gas_price_wei: 200 * GWEI_FACTOR, + min_l1_data_gas_price_wei: 1, + max_l1_data_gas_price_wei: ETH_FACTOR, + l1_data_gas_price_multiplier_ppt: 135, + l1_gas_tip_wei: GWEI_FACTOR, + constant_l2_gas_price: false, + } + } +} diff --git a/crates/apollo_consensus_orchestrator/src/fee_market/mod.rs b/crates/apollo_consensus_orchestrator/src/fee_market/mod.rs new file mode 100644 index 00000000000..14d73db8920 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/fee_market/mod.rs @@ -0,0 +1,70 @@ +use std::cmp::max; + +use ethnum::U256; +use serde::Serialize; +use starknet_api::block::GasPrice; +use starknet_api::execution_resources::GasAmount; + +use crate::orchestrator_versioned_constants; + +#[cfg(test)] +mod test; + +/// Fee market information for the next block. +#[derive(Debug, Default, Serialize)] +pub struct FeeMarketInfo { + /// Total gas consumed in the current block. + pub l2_gas_consumed: GasAmount, + /// Gas price for the next block. + pub next_l2_gas_price: GasPrice, +} + +/// Calculate the base gas price for the next block according to EIP-1559. +/// +/// # Parameters +/// - `price`: The base gas price per unit (in fri) of the current block. +/// - `gas_used`: The total gas used in the current block. +/// - `gas_target`: The target gas usage per block (usually half of a block's gas limit). +pub fn calculate_next_base_gas_price( + price: GasPrice, + gas_used: GasAmount, + gas_target: GasAmount, +) -> GasPrice { + let versioned_constants = + orchestrator_versioned_constants::VersionedConstants::latest_constants(); + // Setting target to 50% of max block size balances price changes and prevents spikes. + assert_eq!( + gas_target, + versioned_constants.max_block_size.checked_factor_div(2).expect("Failed to divide by 2"), + "Gas target must be 50% of max block size to balance price changes." + ); + // A minimum gas price prevents precision loss. Additionally, a minimum gas price helps avoid + // extended periods of low pricing. + assert!( + price >= versioned_constants.min_gas_price, + "The gas price must be at least the minimum to prevent precision loss." + ); + + // Use U256 to avoid overflow, as multiplying a u128 by a u64 remains within U256 bounds. + let gas_delta = U256::from(gas_used.0.abs_diff(gas_target.0)); + let gas_target_u256 = U256::from(gas_target.0); + let price_u256 = U256::from(price.0); + + // Calculate price change by multiplying first, then dividing. This avoids the precision loss + // that occurs when dividing before multiplying. + let denominator = + gas_target_u256 * U256::from(versioned_constants.gas_price_max_change_denominator); + let price_change = (price_u256 * gas_delta) / denominator; + + let adjusted_price_u256 = + if gas_used > gas_target { price_u256 + price_change } else { price_u256 - price_change }; + + // Sanity check: ensure direction of change is correct + assert!( + gas_used > gas_target && adjusted_price_u256 >= price_u256 + || gas_used <= gas_target && adjusted_price_u256 <= price_u256 + ); + + let adjusted_price: u128 = adjusted_price_u256.try_into().expect("Failed to convert to u128"); + GasPrice(max(adjusted_price, versioned_constants.min_gas_price.0)) +} diff --git a/crates/apollo_consensus_orchestrator/src/fee_market/test.rs b/crates/apollo_consensus_orchestrator/src/fee_market/test.rs new file mode 100644 index 00000000000..ac9a703c224 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/fee_market/test.rs @@ -0,0 +1,75 @@ +use std::sync::LazyLock; + +use starknet_api::block::GasPrice; +use starknet_api::execution_resources::GasAmount; + +use crate::fee_market::calculate_next_base_gas_price; +use crate::orchestrator_versioned_constants::VersionedConstants; + +static VERSIONED_CONSTANTS: LazyLock<&VersionedConstants> = + LazyLock::new(VersionedConstants::latest_constants); + +#[test] +fn test_price_calculation_snapshot() { + // Setup: using realistic arbitrary values. + let init_price = GasPrice(30_000_000_000); + let max_block_size = VERSIONED_CONSTANTS.max_block_size; + let change_denominator = VERSIONED_CONSTANTS.gas_price_max_change_denominator; + let gas_target = max_block_size / 2; + let high_congestion_gas_used = GasAmount(max_block_size.0 * 3 / 4); + let low_congestion_gas_used = max_block_size / 4; + let stable_congestion_gas_used = gas_target; + // (30000000000 * 1 / 4 * max_block_size) / (0.5 * max_block_size * change_denominator) + let price_change = init_price.0 / (change_denominator * 2); + + // Fixed expected output values. + let increased_price = GasPrice(init_price.0 + price_change); + let decreased_price = GasPrice(init_price.0 - price_change); + + // Assert. + assert_eq!( + calculate_next_base_gas_price(init_price, high_congestion_gas_used, gas_target), + increased_price + ); + assert_eq!( + calculate_next_base_gas_price(init_price, low_congestion_gas_used, gas_target), + decreased_price + ); + assert_eq!( + calculate_next_base_gas_price(init_price, stable_congestion_gas_used, gas_target), + init_price + ); +} + +#[test] +// This test ensures that the gas price calculation does not overflow with extreme values, +fn test_gas_price_with_extreme_values() { + let max_block_size = VERSIONED_CONSTANTS.max_block_size; + let min_gas_price = VERSIONED_CONSTANTS.min_gas_price; + let gas_price_max_change_denominator = VERSIONED_CONSTANTS.gas_price_max_change_denominator; + + let price = min_gas_price; + let gas_target = max_block_size / 2; + let gas_used = GasAmount(0); + assert_eq!(calculate_next_base_gas_price(price, gas_used, gas_target), min_gas_price); + + let price = min_gas_price; + let gas_target = max_block_size / 2; + let gas_used = max_block_size; + assert!(calculate_next_base_gas_price(price, gas_used, gas_target) > min_gas_price); + + let price = GasPrice(u128::from(u64::MAX)); + let gas_target = max_block_size / 2; + let gas_used = GasAmount(0); + calculate_next_base_gas_price(price, gas_used, gas_target); // Should not panic. + + // To avoid overflow when updating the price, the value is set below a certain threshold so that + // the new price does not exceed u64::MAX. + let max_u128 = u128::from(u64::MAX); + let calculated_price = GasPrice( + max_u128 * gas_price_max_change_denominator / (gas_price_max_change_denominator + 1), + ); + let gas_target = max_block_size / 2; + let gas_used = max_block_size; + calculate_next_base_gas_price(calculated_price, gas_used, gas_target); // Should not panic. +} diff --git a/crates/apollo_consensus_orchestrator/src/lib.rs b/crates/apollo_consensus_orchestrator/src/lib.rs new file mode 100644 index 00000000000..c7a57d533a8 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/lib.rs @@ -0,0 +1,33 @@ +#![warn(missing_docs)] +//! An orchestrator for a StarkNet node. +//! Implements the consensus context - the interface for consensus to call out to the node. + +#[allow(missing_docs)] +pub mod sequencer_consensus_context; + +#[allow(missing_docs)] +pub mod build_proposal; + +#[allow(missing_docs)] +pub mod validate_proposal; + +/// Centralized and decentralized communication types and functionality. +#[allow(missing_docs)] +pub mod cende; + +/// Fee market logic. +pub mod fee_market; + +/// Consensus' versioned constants. +pub mod orchestrator_versioned_constants; + +/// The orchestrator's configuration. +pub mod config; + +#[allow(missing_docs)] +pub mod metrics; + +pub(crate) mod utils; + +#[cfg(test)] +pub(crate) mod test_utils; diff --git a/crates/apollo_consensus_orchestrator/src/metrics.rs b/crates/apollo_consensus_orchestrator/src/metrics.rs new file mode 100644 index 00000000000..4a6a071c278 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/metrics.rs @@ -0,0 +1,58 @@ +use apollo_metrics::{define_metrics, generate_permutation_labels}; +use strum::{EnumVariantNames, VariantNames}; +use strum_macros::{EnumIter, IntoStaticStr}; + +define_metrics!( + ConsensusOrchestrator => { + MetricGauge { CONSENSUS_NUM_BATCHES_IN_PROPOSAL, "consensus_num_batches_in_proposal", "The number of transaction batches in a valid proposal received" }, + MetricGauge { CONSENSUS_NUM_TXS_IN_PROPOSAL, "consensus_num_txs_in_proposal", "The total number of individual transactions in a valid proposal received" }, + MetricCounter { CONSENSUS_L1_GAS_MISMATCH, "consensus_l1_gas_mismatch", "The number of times the L1 gas in a proposal does not match the value expected by this validator", init = 0 }, + MetricCounter { CONSENSUS_L1_DATA_GAS_MISMATCH, "consensus_l1_data_gas_mismatch", "The number of times the L1 data gas in a proposal does not match the value expected by this validator", init = 0 }, + MetricGauge { CONSENSUS_L2_GAS_PRICE, "consensus_l2_gas_price", "The L2 gas price calculated in an accepted proposal" }, + MetricCounter { CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR, "consensus_l1_gas_price_provider_error", "Number of times the context got an error when querying the L1 gas price provider", init=0}, + + // Cende metrics + MetricGauge { CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER, "cende_last_prepared_blob_block_number", "The blob block number that cende knows. That means the sequencer can be the proposer only if the current height is greater by one than this value." }, + MetricHistogram { CENDE_PREPARE_BLOB_FOR_NEXT_HEIGHT_LATENCY, "cende_prepare_blob_for_next_height_latency", "The time it takes to prepare the blob for the next height, i.e create the blob object." }, + // TODO(dvir): consider to differ the case when the blob was already written, that will prevent using the `sequencer_latency_histogram` attribute. + // TODO(dvir): add a counter for successful blob writes and failed blob writes. + MetricHistogram { CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY, "cende_write_prev_height_blob_latency", "Be careful with this metric, if the blob was already written by another request, the latency is much lower since writing to Aerospike is not needed." }, + MetricCounter { CENDE_WRITE_BLOB_SUCCESS , "cende_write_blob_success", "The number of successful blob writes to Aerospike", init = 0 }, + LabeledMetricCounter { CENDE_WRITE_BLOB_FAILURE , "cende_write_blob_failure", "The number of failed blob writes to Aerospike", init = 0, labels = CENDE_WRITE_BLOB_FAILURE_REASON }, + } +); + +pub const LABEL_CENDE_FAILURE_REASON: &str = "cende_write_failure_reason"; + +#[derive(IntoStaticStr, EnumIter, EnumVariantNames)] +#[strum(serialize_all = "snake_case")] +pub(crate) enum CendeWriteFailureReason { + SkipWriteHeight, + CommunicationError, + CendeRecorderError, + BlobNotAvailable, + HeightMismatch, +} + +generate_permutation_labels! { + CENDE_WRITE_BLOB_FAILURE_REASON, + (LABEL_CENDE_FAILURE_REASON, CendeWriteFailureReason), +} + +pub(crate) fn record_write_failure(reason: CendeWriteFailureReason) { + CENDE_WRITE_BLOB_FAILURE.increment(1, &[(LABEL_CENDE_FAILURE_REASON, reason.into())]); +} + +pub(crate) fn register_metrics() { + CONSENSUS_NUM_BATCHES_IN_PROPOSAL.register(); + CONSENSUS_NUM_TXS_IN_PROPOSAL.register(); + CONSENSUS_L1_GAS_MISMATCH.register(); + CONSENSUS_L1_DATA_GAS_MISMATCH.register(); + CONSENSUS_L2_GAS_PRICE.register(); + CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR.register(); + CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER.register(); + CENDE_PREPARE_BLOB_FOR_NEXT_HEIGHT_LATENCY.register(); + CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY.register(); + CENDE_WRITE_BLOB_SUCCESS.register(); + CENDE_WRITE_BLOB_FAILURE.register(); +} diff --git a/crates/apollo_consensus_orchestrator/src/orchestrator_versioned_constants.rs b/crates/apollo_consensus_orchestrator/src/orchestrator_versioned_constants.rs new file mode 100644 index 00000000000..637407c3da5 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/orchestrator_versioned_constants.rs @@ -0,0 +1,37 @@ +use serde::Deserialize; +use starknet_api::block::{GasPrice, StarknetVersion}; +use starknet_api::define_versioned_constants; +use starknet_api::execution_resources::GasAmount; +use thiserror::Error; + +/// Versioned constants for the Consensus. +#[derive(Clone, Deserialize)] +pub struct VersionedConstants { + /// This is used to calculate the base gas price for the next block according to EIP-1559 and + /// serves as a sensitivity parameter that limits the maximum rate of change of the gas price + /// between consecutive blocks. + pub gas_price_max_change_denominator: u128, + /// The minimum gas price in fri. + pub min_gas_price: GasPrice, + /// The maximum block size in gas units. + pub max_block_size: GasAmount, + /// The target gas usage per block (usually half of a block's gas limit). + pub gas_target: GasAmount, + /// The margin for the eth to fri rate disagreement, expressed as a percentage (parts per + /// hundred). + pub l1_gas_price_margin_percent: u32, +} + +define_versioned_constants!( + VersionedConstants, + VersionedConstantsError, + (V0_14_0, "../resources/orchestrator_versioned_constants_0_14_0.json"), +); + +/// Error type for the Consensus' versioned constants. +#[derive(Debug, Error)] +pub enum VersionedConstantsError { + /// Invalid Starknet version. + #[error("Invalid Starknet version: {0}")] + InvalidStarknetVersion(StarknetVersion), +} diff --git a/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs b/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs new file mode 100644 index 00000000000..d211b9d3ad6 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs @@ -0,0 +1,766 @@ +//! Implementation of the ConsensusContext interface for running the sequencer. +//! +//! It connects to the Batcher who is responsible for building/validating blocks. +#[cfg(test)] +#[path = "sequencer_consensus_context_test.rs"] +mod sequencer_consensus_context_test; + +use std::cmp::max; +use std::collections::BTreeMap; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + DecisionReachedInput, + DecisionReachedResponse, + ProposalId, + StartHeightInput, +}; +use apollo_batcher_types::communication::BatcherClient; +use apollo_class_manager_types::transaction_converter::TransactionConverterTrait; +use apollo_consensus::types::{ + ConsensusContext, + ConsensusError, + ProposalCommitment, + Round, + ValidatorId, +}; +use apollo_l1_gas_price_types::{EthToStrkOracleClientTrait, L1GasPriceProviderClient}; +use apollo_network::network_manager::{BroadcastTopicClient, BroadcastTopicClientTrait}; +use apollo_protobuf::consensus::{ + ConsensusBlockInfo, + HeightAndRound, + ProposalFin, + ProposalInit, + ProposalPart, + TransactionBatch, + Vote, + DEFAULT_VALIDATOR_ID, +}; +use apollo_state_sync_types::communication::StateSyncClient; +use apollo_state_sync_types::state_sync_types::SyncBlock; +use apollo_time::time::Clock; +use async_trait::async_trait; +use futures::channel::{mpsc, oneshot}; +use futures::SinkExt; +use num_rational::Ratio; +use starknet_api::block::{ + BlockHeaderWithoutHash, + BlockNumber, + BlockTimestamp, + GasPrice, + GasPricePerToken, + WEI_PER_ETH, +}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::core::SequencerContractAddress; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::transaction::TransactionHash; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use tokio_util::task::AbortOnDropHandle; +use tracing::{error, error_span, info, instrument, trace, warn, Instrument}; + +use crate::build_proposal::{build_proposal, BuildProposalError, ProposalBuildArguments}; +use crate::cende::{BlobParameters, CendeContext}; +use crate::config::ContextConfig; +use crate::fee_market::{calculate_next_base_gas_price, FeeMarketInfo}; +use crate::metrics::{register_metrics, CONSENSUS_L2_GAS_PRICE}; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::utils::{convert_to_sn_api_block_info, GasPriceParams, StreamSender}; +use crate::validate_proposal::{ + validate_proposal, + BlockInfoValidation, + ProposalValidateArguments, + ValidateProposalError, +}; + +type ValidationParams = (BlockNumber, ValidatorId, Duration, mpsc::Receiver); + +type HeightToIdToContent = BTreeMap< + BlockNumber, + BTreeMap< + ProposalCommitment, + (ConsensusBlockInfo, Vec>, ProposalId), + >, +>; + +pub(crate) struct BuiltProposals { + // {height: {proposal_commitment: (block_info, content, [proposal_ids])}} + // Note that multiple proposals IDs can be associated with the same content, but we only need + // to store one of them. + // + // The tranasactions are stored as a vector of batches (as returned from the batcher) and not + // flattened. This is since we might need to repropose, in which case we need to send the + // transactions in batches. + data: HeightToIdToContent, +} + +impl BuiltProposals { + pub fn new() -> Self { + Self { data: HeightToIdToContent::default() } + } + + fn get_proposal( + &self, + height: &BlockNumber, + commitment: &ProposalCommitment, + ) -> &(ConsensusBlockInfo, Vec>, ProposalId) { + self.data + .get(height) + .unwrap_or_else(|| panic!("No proposals found for height {height}")) + .get(commitment) + .unwrap_or_else(|| panic!("No proposal found for height {height} and id {commitment}")) + } + + fn remove_proposals_below_or_at_height(&mut self, height: &BlockNumber) { + self.data.retain(|&h, _| h > *height); + } + + pub(crate) fn insert_proposal_for_height( + &mut self, + height: &BlockNumber, + proposal_commitment: &ProposalCommitment, + block_info: ConsensusBlockInfo, + transactions: Vec>, + proposal_id: &ProposalId, + ) { + self.data + .entry(*height) + .or_default() + .insert(*proposal_commitment, (block_info, transactions, *proposal_id)); + } +} + +pub struct SequencerConsensusContext { + config: ContextConfig, + deps: SequencerConsensusContextDeps, + validators: Vec, + // Proposal building/validating returns immediately, leaving the actual processing to a spawned + // task. The spawned task processes the proposal asynchronously and updates the + // valid_proposals map upon completion, ensuring consistency across tasks. + valid_proposals: Arc>, + // Used to generate unique proposal IDs across the lifetime of the context. + // TODO(matan): Consider robustness in case consensus can restart without the Batcher + // restarting. + proposal_id: u64, + current_height: Option, + current_round: Round, + // The active proposal refers to the proposal being validated at the current height/round. + // Building proposals are not tracked as active, as consensus can't move on to the next + // height/round until building is done. Context only works on proposals for the + // current round. + active_proposal: Option<(CancellationToken, JoinHandle<()>)>, + // Stores proposals for future rounds until the round is reached. + queued_proposals: BTreeMap)>, + l2_gas_price: GasPrice, + l1_da_mode: L1DataAvailabilityMode, + previous_block_info: Option, +} + +#[derive(Clone)] +pub struct SequencerConsensusContextDeps { + pub transaction_converter: Arc, + pub state_sync_client: Arc, + pub batcher: Arc, + pub cende_ambassador: Arc, + pub eth_to_strk_oracle_client: Arc, + pub l1_gas_price_provider: Arc, + /// Use DefaultClock if you don't want to inject timestamps. + pub clock: Arc, + // Used to initiate new outbound proposal streams. + pub outbound_proposal_sender: mpsc::Sender<(HeightAndRound, mpsc::Receiver)>, + // Used to broadcast votes to other consensus nodes. + pub vote_broadcast_client: BroadcastTopicClient, +} + +impl SequencerConsensusContext { + pub fn new(config: ContextConfig, deps: SequencerConsensusContextDeps) -> Self { + register_metrics(); + let num_validators = config.num_validators; + let l1_da_mode = if config.l1_da_mode { + L1DataAvailabilityMode::Blob + } else { + L1DataAvailabilityMode::Calldata + }; + Self { + config, + deps, + // TODO(Matan): Set the actual validator IDs (contract addresses). + validators: (0..num_validators) + .map(|i| ValidatorId::from(DEFAULT_VALIDATOR_ID + i)) + .collect(), + valid_proposals: Arc::new(Mutex::new(BuiltProposals::new())), + proposal_id: 0, + current_height: None, + current_round: 0, + active_proposal: None, + queued_proposals: BTreeMap::new(), + l2_gas_price: VersionedConstants::latest_constants().min_gas_price, + l1_da_mode, + previous_block_info: None, + } + } + + async fn start_stream(&mut self, stream_id: HeightAndRound) -> StreamSender { + let (proposal_sender, proposal_receiver) = mpsc::channel(self.config.proposal_buffer_size); + self.deps + .outbound_proposal_sender + .send((stream_id, proposal_receiver)) + .await + .expect("Failed to send proposal receiver"); + StreamSender { proposal_sender } + } +} + +#[async_trait] +impl ConsensusContext for SequencerConsensusContext { + type ProposalPart = ProposalPart; + + #[instrument(skip_all)] + async fn build_proposal( + &mut self, + proposal_init: ProposalInit, + timeout: Duration, + ) -> oneshot::Receiver { + // TODO(dvir): consider start writing the blob in `decision_reached`, to reduce transactions + // finality time. Use this option only for one special sequencer that is the same cluster as + // the recorder. + let cende_write_success = AbortOnDropHandle::new( + self.deps.cende_ambassador.write_prev_height_blob(proposal_init.height), + ); + // Handles interrupting an active proposal from a previous height/round + self.set_height_and_round(proposal_init.height, proposal_init.round).await; + assert!( + self.active_proposal.is_none(), + "We should not have an existing active proposal for the (height, round) when \ + build_proposal is called." + ); + + let (fin_sender, fin_receiver) = oneshot::channel(); + let proposal_id = ProposalId(self.proposal_id); + self.proposal_id += 1; + assert!(timeout > self.config.build_proposal_margin_millis); + let stream_id = HeightAndRound(proposal_init.height.0, proposal_init.round); + let stream_sender = self.start_stream(stream_id).await; + + info!(?proposal_init, ?timeout, %proposal_id, "Building proposal"); + let cancel_token = CancellationToken::new(); + let cancel_token_clone = cancel_token.clone(); + let gas_price_params = GasPriceParams { + min_l1_gas_price_wei: GasPrice(self.config.min_l1_gas_price_wei), + max_l1_gas_price_wei: GasPrice(self.config.max_l1_gas_price_wei), + min_l1_data_gas_price_wei: GasPrice(self.config.min_l1_data_gas_price_wei), + max_l1_data_gas_price_wei: GasPrice(self.config.max_l1_data_gas_price_wei), + l1_data_gas_price_multiplier: Ratio::new( + self.config.l1_data_gas_price_multiplier_ppt, + 1000, + ), + l1_gas_tip_wei: GasPrice(self.config.l1_gas_tip_wei), + }; + let args = ProposalBuildArguments { + deps: self.deps.clone(), + batcher_timeout: timeout - self.config.build_proposal_margin_millis, + proposal_init, + l1_da_mode: self.l1_da_mode, + stream_sender, + gas_price_params, + valid_proposals: Arc::clone(&self.valid_proposals), + proposal_id, + cende_write_success, + l2_gas_price: self.l2_gas_price, + builder_address: self.config.builder_address, + cancel_token, + previous_block_info: self.previous_block_info.clone(), + proposal_round: self.current_round, + }; + let handle = tokio::spawn( + async move { + let res = build_proposal(args).await.map(|proposal_commitment| { + fin_sender + .send(proposal_commitment) + .map_err(|_| BuildProposalError::SendError(proposal_commitment))?; + Ok::<_, BuildProposalError>(proposal_commitment) + }); + match res { + Ok(proposal_commitment) => { + info!(?proposal_id, ?proposal_commitment, "Proposal succeeded."); + } + Err(e) => { + warn!("Proposal failed. Error: {e:?}"); + } + } + } + .instrument( + error_span!("consensus_build_proposal", %proposal_id, round=proposal_init.round), + ), + ); + assert!(self.active_proposal.is_none()); + self.active_proposal = Some((cancel_token_clone, handle)); + + fin_receiver + } + + #[instrument(skip_all)] + async fn validate_proposal( + &mut self, + proposal_init: ProposalInit, + timeout: Duration, + content_receiver: mpsc::Receiver, + ) -> oneshot::Receiver { + assert_eq!(Some(proposal_init.height), self.current_height); + let (fin_sender, fin_receiver) = oneshot::channel(); + match proposal_init.round.cmp(&self.current_round) { + std::cmp::Ordering::Less => { + trace!("Dropping proposal from past round"); + fin_receiver + } + std::cmp::Ordering::Greater => { + trace!("Queueing proposal for future round."); + self.queued_proposals.insert( + proposal_init.round, + ( + (proposal_init.height, proposal_init.proposer, timeout, content_receiver), + fin_sender, + ), + ); + fin_receiver + } + std::cmp::Ordering::Equal => { + let block_info_validation = BlockInfoValidation { + height: proposal_init.height, + block_timestamp_window_seconds: self.config.block_timestamp_window_seconds, + previous_block_info: self.previous_block_info.clone(), + l1_da_mode: self.l1_da_mode, + l2_gas_price_fri: self.l2_gas_price, + }; + self.validate_current_round_proposal( + block_info_validation, + proposal_init.proposer, + timeout, + self.config.validate_proposal_margin_millis, + content_receiver, + fin_sender, + ) + .await; + fin_receiver + } + } + } + + async fn repropose(&mut self, id: ProposalCommitment, init: ProposalInit) { + info!(?id, ?init, "Reproposing."); + let height = init.height; + let (block_info, txs, _) = self + .valid_proposals + .lock() + .expect("Lock on active proposals was poisoned due to a previous panic") + .get_proposal(&height, &id) + .clone(); + + let transaction_converter = self.deps.transaction_converter.clone(); + let mut stream_sender = self.start_stream(HeightAndRound(height.0, init.round)).await; + tokio::spawn( + async move { + stream_sender + .send(ProposalPart::Init(init)) + .await + .expect("Failed to send proposal init"); + stream_sender + .send(ProposalPart::BlockInfo(block_info.clone())) + .await + .expect("Failed to send block info"); + let mut n_executed_txs: usize = 0; + for batch in txs.iter() { + let transactions = futures::future::join_all(batch.iter().map(|tx| { + transaction_converter + .convert_internal_consensus_tx_to_consensus_tx(tx.clone()) + })) + .await + .into_iter() + .collect::, _>>() + .expect("Failed converting transaction during repropose"); + + stream_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions })) + .await + .expect("Failed to broadcast proposal content"); + n_executed_txs += batch.len(); + } + stream_sender + .send(ProposalPart::ExecutedTransactionCount( + n_executed_txs + .try_into() + .expect("Number of executed transactions should fit in u64"), + )) + .await + .expect("Failed to broadcast executed transaction count"); + stream_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: id })) + .await + .expect("Failed to broadcast proposal fin"); + } + .instrument(error_span!("consensus_repropose", round = init.round)), + ); + } + + async fn validators(&self, _height: BlockNumber) -> Vec { + self.validators.clone() + } + + fn proposer(&self, height: BlockNumber, round: Round) -> ValidatorId { + let height: usize = height.0.try_into().expect("Cannot convert to usize"); + let round: usize = round.try_into().expect("Cannot convert to usize"); + *self + .validators + .get((height + round) % self.validators.len()) + .expect("There should be at least one validator") + } + + async fn broadcast(&mut self, message: Vote) -> Result<(), ConsensusError> { + trace!("Broadcasting message: {message:?}"); + self.deps.vote_broadcast_client.broadcast_message(message).await?; + Ok(()) + } + + async fn decision_reached( + &mut self, + block: ProposalCommitment, + precommits: Vec, + ) -> Result<(), ConsensusError> { + let height = precommits[0].height; + info!("Finished consensus for height: {height}. Agreed on block: {:#064x}", block.0); + + self.interrupt_active_proposal().await; + let proposal_id; + let transactions; + let block_info; + { + let height = BlockNumber(height); + let mut proposals = self + .valid_proposals + .lock() + .expect("Lock on active proposals was poisoned due to a previous panic"); + (block_info, transactions, proposal_id) = + proposals.get_proposal(&height, &block).clone(); + + proposals.remove_proposals_below_or_at_height(&height); + } + + // TODO(dvir): return from the batcher's 'decision_reached' function the relevant data to + // build a blob. + let DecisionReachedResponse { state_diff, l2_gas_used, central_objects } = self + .deps + .batcher + .decision_reached(DecisionReachedInput { proposal_id }) + .await + .expect("Failed to get state diff."); + + // Remove transactions that were not accepted by the Batcher, so `transactions` and + // `central_objects.execution_infos` correspond to the same list of (only accepted) + // transactions. + let transactions: Vec = transactions + .concat() + .into_iter() + .filter(|tx| central_objects.execution_infos.contains_key(&tx.tx_hash())) + .collect(); + + let gas_target = VersionedConstants::latest_constants().gas_target; + if self.config.constant_l2_gas_price { + self.l2_gas_price = VersionedConstants::latest_constants().min_gas_price; + } else { + self.l2_gas_price = + calculate_next_base_gas_price(self.l2_gas_price, l2_gas_used, gas_target); + } + + let gas_price_u64 = u64::try_from(self.l2_gas_price.0).unwrap_or(u64::MAX); + CONSENSUS_L2_GAS_PRICE.set_lossy(gas_price_u64); + + // The conversion should never fail, if we already managed to get a decision. + let cende_block_info = convert_to_sn_api_block_info(&block_info)?; + let l1_gas_price = GasPricePerToken { + price_in_fri: cende_block_info.gas_prices.strk_gas_prices.l1_gas_price.get(), + price_in_wei: cende_block_info.gas_prices.eth_gas_prices.l1_gas_price.get(), + }; + let l1_data_gas_price = GasPricePerToken { + price_in_fri: cende_block_info.gas_prices.strk_gas_prices.l1_data_gas_price.get(), + price_in_wei: cende_block_info.gas_prices.eth_gas_prices.l1_data_gas_price.get(), + }; + let l2_gas_price = GasPricePerToken { + price_in_fri: cende_block_info.gas_prices.strk_gas_prices.l2_gas_price.get(), + price_in_wei: cende_block_info.gas_prices.eth_gas_prices.l2_gas_price.get(), + }; + let sequencer = SequencerContractAddress(block_info.builder); + + let block_header_without_hash = BlockHeaderWithoutHash { + block_number: BlockNumber(height), + l1_gas_price, + l1_data_gas_price, + l2_gas_price, + l2_gas_consumed: l2_gas_used, + next_l2_gas_price: self.l2_gas_price, + sequencer, + timestamp: BlockTimestamp(block_info.timestamp), + l1_da_mode: block_info.l1_da_mode, + // TODO(guy.f): Figure out where/if to get the values below from and fill them. + ..Default::default() + }; + + // Divide transactions hashes to L1Handler and RpcTransaction hashes. + let account_transaction_hashes = transactions + .iter() + .filter_map(|tx| match tx { + InternalConsensusTransaction::RpcTransaction(_) => Some(tx.tx_hash()), + _ => None, + }) + .collect::>(); + let l1_transaction_hashes = transactions + .iter() + .filter_map(|tx| match tx { + InternalConsensusTransaction::L1Handler(_) => Some(tx.tx_hash()), + _ => None, + }) + .collect::>(); + + let sync_block = SyncBlock { + state_diff: state_diff.clone(), + account_transaction_hashes, + l1_transaction_hashes, + block_header_without_hash, + }; + let state_sync_client = self.deps.state_sync_client.clone(); + // `add_new_block` returns immediately, it doesn't wait for sync to fully process the block. + state_sync_client.add_new_block(sync_block).await.expect("Failed to add new block."); + + // Strip the transaction hashes from `execution_infos`, since we don't use it in the blob + // version of `execution_infos`. + let stripped_execution_infos = + central_objects.execution_infos.into_iter().map(|(_, info)| info).collect(); + + // TODO(dvir): pass here real `BlobParameters` info. + // TODO(dvir): when passing here the correct `BlobParameters`, also test that + // `prepare_blob_for_next_height` is called with the correct parameters. + let _ = self + .deps + .cende_ambassador + .prepare_blob_for_next_height(BlobParameters { + block_info: cende_block_info, + state_diff, + compressed_state_diff: central_objects.compressed_state_diff, + transactions, + execution_infos: stripped_execution_infos, + bouncer_weights: central_objects.bouncer_weights, + casm_hash_computation_data_sierra_gas: central_objects + .casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas: central_objects + .casm_hash_computation_data_proving_gas, + fee_market_info: FeeMarketInfo { + l2_gas_consumed: l2_gas_used, + next_l2_gas_price: self.l2_gas_price, + }, + }) + .await + .inspect_err(|e| { + error!("Failed to prepare blob for next height: {e:?}"); + }); + self.previous_block_info = Some(block_info); + Ok(()) + } + + async fn try_sync(&mut self, height: BlockNumber) -> bool { + let sync_block = match self.deps.state_sync_client.get_block(height).await { + Err(e) => { + error!("Sync returned an error: {e:?}"); + return false; + } + Ok(None) => return false, + Ok(Some(block)) => block, + }; + // May be default for blocks older than 0.14.0, ensure min gas price is met. + self.l2_gas_price = max( + sync_block.block_header_without_hash.next_l2_gas_price, + VersionedConstants::latest_constants().min_gas_price, + ); + // TODO(Asmaa): validate starknet_version and parent_hash when they are stored. + let block_number = sync_block.block_header_without_hash.block_number; + let timestamp = sync_block.block_header_without_hash.timestamp; + let last_block_timestamp = + self.previous_block_info.as_ref().map_or(0, |info| info.timestamp); + let now: u64 = self.deps.clock.unix_now(); + if !(block_number == height + && timestamp.0 >= last_block_timestamp + && timestamp.0 <= now + self.config.block_timestamp_window_seconds) + { + warn!( + "Invalid block info: expected block number {}, got {}, expected timestamp range \ + [{}, {}], got {}", + height, + block_number, + last_block_timestamp, + now + self.config.block_timestamp_window_seconds, + timestamp.0, + ); + return false; + } + let eth_to_fri_rate = sync_block + .block_header_without_hash + .l1_gas_price + .price_in_fri + .checked_mul_u128(WEI_PER_ETH) + .expect("Gas price overflow") + .checked_div(sync_block.block_header_without_hash.l1_gas_price.price_in_wei.0) + .expect("Price in wei should be non-zero") + .0; + self.previous_block_info = Some(ConsensusBlockInfo { + height, + timestamp: timestamp.0, + builder: sync_block.block_header_without_hash.sequencer.0, + l1_da_mode: sync_block.block_header_without_hash.l1_da_mode, + l2_gas_price_fri: sync_block.block_header_without_hash.l2_gas_price.price_in_fri, + l1_gas_price_wei: sync_block.block_header_without_hash.l1_gas_price.price_in_wei, + l1_data_gas_price_wei: sync_block + .block_header_without_hash + .l1_data_gas_price + .price_in_wei, + eth_to_fri_rate, + }); + self.interrupt_active_proposal().await; + self.deps.batcher.add_sync_block(sync_block).await.unwrap(); + true + } + + async fn set_height_and_round(&mut self, height: BlockNumber, round: Round) { + if self.current_height.map(|h| height > h).unwrap_or(true) { + self.current_height = Some(height); + assert_eq!(round, 0); + self.current_round = round; + self.queued_proposals.clear(); + // The Batcher must be told when we begin to work on a new height. The implicit model is + // that consensus works on a given height until it is done (either a decision is reached + // or sync causes us to move on) and then moves on to a different height, never to + // return to the old height. + self.deps + .batcher + .start_height(StartHeightInput { height }) + .await + .expect("Batcher should be ready to start the next height"); + return; + } + assert_eq!(Some(height), self.current_height); + if round == self.current_round { + return; + } + assert!(round > self.current_round); + self.interrupt_active_proposal().await; + self.current_round = round; + let mut to_process = None; + while let Some(entry) = self.queued_proposals.first_entry() { + match self.current_round.cmp(entry.key()) { + std::cmp::Ordering::Less => { + entry.remove(); + } + std::cmp::Ordering::Equal => { + to_process = Some(entry.remove()); + break; + } + std::cmp::Ordering::Greater => return, + } + } + // Validate the proposal for the current round if exists. + let Some(((height, validator, timeout, content), fin_sender)) = to_process else { + return; + }; + let block_info_validation = BlockInfoValidation { + height, + block_timestamp_window_seconds: self.config.block_timestamp_window_seconds, + previous_block_info: self.previous_block_info.clone(), + l1_da_mode: self.l1_da_mode, + l2_gas_price_fri: self.l2_gas_price, + }; + self.validate_current_round_proposal( + block_info_validation, + validator, + timeout, + self.config.validate_proposal_margin_millis, + content, + fin_sender, + ) + .await; + } +} + +impl SequencerConsensusContext { + async fn validate_current_round_proposal( + &mut self, + block_info_validation: BlockInfoValidation, + proposer: ValidatorId, + timeout: Duration, + batcher_timeout_margin: Duration, + content_receiver: mpsc::Receiver, + fin_sender: oneshot::Sender, + ) { + let proposal_id = ProposalId(self.proposal_id); + self.proposal_id += 1; + info!(?timeout, %proposal_id, %proposer, round=self.current_round, "Validating proposal."); + + let cancel_token = CancellationToken::new(); + let cancel_token_clone = cancel_token.clone(); + let gas_price_params = GasPriceParams { + min_l1_gas_price_wei: GasPrice(self.config.min_l1_gas_price_wei), + max_l1_gas_price_wei: GasPrice(self.config.max_l1_gas_price_wei), + min_l1_data_gas_price_wei: GasPrice(self.config.min_l1_data_gas_price_wei), + max_l1_data_gas_price_wei: GasPrice(self.config.max_l1_data_gas_price_wei), + l1_data_gas_price_multiplier: Ratio::new( + self.config.l1_data_gas_price_multiplier_ppt, + 1000, + ), + l1_gas_tip_wei: GasPrice(self.config.l1_gas_tip_wei), + }; + let args = ProposalValidateArguments { + deps: self.deps.clone(), + block_info_validation, + proposal_id, + timeout, + batcher_timeout_margin, + valid_proposals: Arc::clone(&self.valid_proposals), + content_receiver, + gas_price_params, + cancel_token: cancel_token_clone, + }; + + let handle = tokio::spawn( + async move { + match validate_and_send(args, fin_sender).await { + Ok(proposal_commitment) => { + info!(?proposal_id, ?proposal_commitment, "Proposal succeeded."); + } + Err(e) => { + warn!("Proposal failed. Error: {e:?}"); + } + } + } + .instrument( + error_span!("consensus_validate_proposal", %proposal_id, round=self.current_round), + ), + ); + self.active_proposal = Some((cancel_token, handle)); + } + + async fn interrupt_active_proposal(&mut self) { + if let Some((token, handle)) = self.active_proposal.take() { + token.cancel(); + handle.await.expect("Proposal task failed"); + } + } +} + +async fn validate_and_send( + args: ProposalValidateArguments, + fin_sender: oneshot::Sender, +) -> Result { + let proposal_commitment = validate_proposal(args).await?; + fin_sender + .send(proposal_commitment) + .map_err(|_| ValidateProposalError::SendError(proposal_commitment))?; + Ok(proposal_commitment) +} diff --git a/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context_test.rs b/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context_test.rs new file mode 100644 index 00000000000..556929317f2 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context_test.rs @@ -0,0 +1,826 @@ +use std::future::ready; +use std::sync::Arc; +use std::vec; + +use apollo_batcher_types::batcher_types::{CentralObjects, DecisionReachedResponse}; +use apollo_batcher_types::communication::BatcherClientError; +use apollo_batcher_types::errors::BatcherError; +use apollo_consensus::types::{ConsensusContext, Round}; +use apollo_l1_gas_price_types::errors::{ + EthToStrkOracleClientError, + L1GasPriceClientError, + L1GasPriceProviderError, +}; +use apollo_l1_gas_price_types::{ + MockEthToStrkOracleClientTrait, + MockL1GasPriceProviderClient, + PriceInfo, + DEFAULT_ETH_TO_FRI_RATE, +}; +use apollo_protobuf::consensus::{ProposalFin, ProposalInit, ProposalPart, TransactionBatch, Vote}; +use apollo_time::time::MockClock; +use chrono::{TimeZone, Utc}; +use futures::channel::mpsc; +use futures::channel::oneshot::Canceled; +use futures::future::pending; +use futures::{FutureExt, SinkExt, StreamExt}; +use metrics_exporter_prometheus::PrometheusBuilder; +use rstest::rstest; +use starknet_api::block::{ + BlockHash, + BlockNumber, + GasPrice, + TEMP_ETH_BLOB_GAS_FEE_IN_WEI, + TEMP_ETH_GAS_FEE_IN_WEI, +}; +use starknet_api::execution_resources::GasAmount; +use starknet_api::state::ThinStateDiff; + +use crate::cende::MockCendeContext; +use crate::config::ContextConfig; +use crate::metrics::CONSENSUS_L2_GAS_PRICE; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::test_utils::{ + block_info, + create_test_and_network_deps, + ETH_TO_FRI_RATE, + INTERNAL_TX_BATCH, + STATE_DIFF_COMMITMENT, + TIMEOUT, + TX_BATCH, +}; + +#[tokio::test] +async fn cancelled_proposal_aborts() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_default_expectations(); + + deps.batcher.expect_propose_block().times(1).return_const(Ok(())); + deps.batcher.expect_start_height().times(1).return_const(Ok(())); + + let mut context = deps.build_context(); + let fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await; + + // Now we intrrupt the proposal and verify that the fin_receiever is dropped. + context.set_height_and_round(BlockNumber(0), 1).await; + + assert_eq!(fin_receiver.await, Err(Canceled)); +} + +#[tokio::test] +async fn validate_proposal_success() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_deps_for_validate(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let mut context = deps.build_context(); + + // Initialize the context for a specific height, starting with round 0. + context.set_height_and_round(BlockNumber(0), 0).await; + + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + content_sender.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() })) + .await + .unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + })) + .await + .unwrap(); + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + content_sender.close_channel(); + assert_eq!(fin_receiver.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); +} + +#[tokio::test] +async fn dont_send_block_info() { + let (mut deps, _network) = create_test_and_network_deps(); + + deps.batcher + .expect_start_height() + .times(1) + .withf(|input| input.height == BlockNumber(0)) + .return_const(Ok(())); + let mut context = deps.build_context(); + + // Initialize the context for a specific height, starting with round 0. + context.set_height_and_round(BlockNumber(0), 0).await; + + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + content_sender.close_channel(); + // No block info was sent, the proposal is invalid. + assert!(fin_receiver.await.is_err()); +} + +#[rstest] +#[case::execute_all_txs(true)] +#[case::dont_execute_last_tx(false)] +#[tokio::test] +async fn validate_then_repropose(#[case] execute_all_txs: bool) { + // Receive a proposal. Then re-retrieve it. + let (mut deps, mut network) = create_test_and_network_deps(); + let executed_transactions = match execute_all_txs { + true => TX_BATCH.to_vec(), + false => TX_BATCH.iter().take(TX_BATCH.len() - 1).cloned().collect(), + }; + let final_n_executed_txs = executed_transactions.len(); + deps.setup_deps_for_validate(BlockNumber(0), final_n_executed_txs); + let mut context = deps.build_context(); + + // Initialize the context for a specific height, starting with round 0. + context.set_height_and_round(BlockNumber(0), 0).await; + + // Receive a valid proposal. + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + let block_info = ProposalPart::BlockInfo(block_info(BlockNumber(0))); + content_sender.send(block_info.clone()).await.unwrap(); + let transactions = + ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() }); + content_sender.send(transactions.clone()).await.unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(final_n_executed_txs.try_into().unwrap())) + .await + .unwrap(); + let fin = ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + }); + content_sender.send(fin.clone()).await.unwrap(); + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + content_sender.close_channel(); + assert_eq!(fin_receiver.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); + + let init = ProposalInit { round: 1, ..Default::default() }; + context.repropose(BlockHash(STATE_DIFF_COMMITMENT.0.0), init).await; + let (_, mut receiver) = network.outbound_proposal_receiver.next().await.unwrap(); + assert_eq!(receiver.next().await.unwrap(), ProposalPart::Init(init)); + assert_eq!(receiver.next().await.unwrap(), block_info); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Transactions(TransactionBatch { transactions: executed_transactions }) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::ExecutedTransactionCount(final_n_executed_txs.try_into().unwrap()) + ); + assert_eq!(receiver.next().await.unwrap(), fin); + assert!(receiver.next().await.is_none()); +} + +#[tokio::test] +async fn proposals_from_different_rounds() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_deps_for_validate(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let mut context = deps.build_context(); + // Initialize the context for a specific height, starting with round 0. + context.set_height_and_round(BlockNumber(0), 0).await; + context.set_height_and_round(BlockNumber(0), 1).await; + + // Proposal parts sent in the proposals. + let prop_part_txs = + ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() }); + let prop_part_executed_count = + ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap()); + let prop_part_fin = ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + }); + + // The proposal from the past round is ignored. + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + content_sender.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + content_sender.send(prop_part_txs.clone()).await.unwrap(); + content_sender.send(prop_part_executed_count.clone()).await.unwrap(); + + let mut init = ProposalInit { round: 0, ..Default::default() }; + let fin_receiver_past_round = context.validate_proposal(init, TIMEOUT, content_receiver).await; + // No fin was sent, channel remains open. + assert!(fin_receiver_past_round.await.is_err()); + + // The proposal from the current round should be validated. + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + content_sender.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + content_sender.send(prop_part_txs.clone()).await.unwrap(); + content_sender.send(prop_part_executed_count.clone()).await.unwrap(); + content_sender.send(prop_part_fin.clone()).await.unwrap(); + init.round = 1; + let fin_receiver_curr_round = context.validate_proposal(init, TIMEOUT, content_receiver).await; + assert_eq!(fin_receiver_curr_round.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); + + // The proposal from the future round should not be processed. + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + content_sender.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + content_sender.send(prop_part_txs.clone()).await.unwrap(); + content_sender.send(prop_part_executed_count.clone()).await.unwrap(); + content_sender.send(prop_part_fin.clone()).await.unwrap(); + let fin_receiver_future_round = context + .validate_proposal( + ProposalInit { round: 2, ..Default::default() }, + TIMEOUT, + content_receiver, + ) + .await; + content_sender.close_channel(); + // Even with sending fin and closing the channel. + assert!(fin_receiver_future_round.now_or_never().is_none()); +} + +#[tokio::test] +async fn interrupt_active_proposal() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_deps_for_validate(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let mut context = deps.build_context(); + // Initialize the context for a specific height, starting with round 0. + context.set_height_and_round(BlockNumber(0), 0).await; + + // Keep the sender open, as closing it or sending Fin would cause the validate to complete + // without needing interrupt. + let (mut _content_sender_0, content_receiver) = + mpsc::channel(context.config.proposal_buffer_size); + let fin_receiver_0 = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + + let (mut content_sender_1, content_receiver) = + mpsc::channel(context.config.proposal_buffer_size); + content_sender_1.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + content_sender_1 + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() })) + .await + .unwrap(); + content_sender_1 + .send(ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap())) + .await + .unwrap(); + content_sender_1 + .send(ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + })) + .await + .unwrap(); + let fin_receiver_1 = context + .validate_proposal( + ProposalInit { round: 1, ..Default::default() }, + TIMEOUT, + content_receiver, + ) + .await; + // Move the context to the next round. + context.set_height_and_round(BlockNumber(0), 1).await; + + // Interrupt active proposal. + assert!(fin_receiver_0.await.is_err()); + assert_eq!(fin_receiver_1.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); +} + +#[tokio::test] +async fn build_proposal() { + let before: u64 = + chrono::Utc::now().timestamp().try_into().expect("Timestamp conversion failed"); + let (mut deps, mut network) = create_test_and_network_deps(); + deps.setup_deps_for_build(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let mut context = deps.build_context(); + let fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await; + // Test proposal parts. + let (_, mut receiver) = network.outbound_proposal_receiver.next().await.unwrap(); + assert_eq!(receiver.next().await.unwrap(), ProposalPart::Init(ProposalInit::default())); + let block_info = receiver.next().await.unwrap(); + let after: u64 = + chrono::Utc::now().timestamp().try_into().expect("Timestamp conversion failed"); + let ProposalPart::BlockInfo(info) = block_info else { + panic!("Expected ProposalPart::BlockInfo"); + }; + assert!(info.timestamp >= before && info.timestamp <= after); + assert_eq!(info.eth_to_fri_rate, ETH_TO_FRI_RATE); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() }) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap()) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + }) + ); + assert!(receiver.next().await.is_none()); + assert_eq!(fin_receiver.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); +} +#[tokio::test] +async fn build_proposal_cende_failure() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_deps_for_build(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let mut mock_cende_context = MockCendeContext::new(); + mock_cende_context + .expect_write_prev_height_blob() + .times(1) + .return_once(|_height| tokio::spawn(ready(false))); + deps.cende_ambassador = mock_cende_context; + let mut context = deps.build_context(); + + let fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await; + assert_eq!(fin_receiver.await, Err(Canceled)); +} + +#[tokio::test] +async fn build_proposal_cende_incomplete() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_deps_for_build(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let mut mock_cende_context = MockCendeContext::new(); + mock_cende_context + .expect_write_prev_height_blob() + .times(1) + .return_once(|_height| tokio::spawn(pending())); + deps.cende_ambassador = mock_cende_context; + let mut context = deps.build_context(); + + let fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await; + assert_eq!(fin_receiver.await, Err(Canceled)); +} + +#[rstest] +#[case::proposer(true)] +#[case::validator(false)] +#[tokio::test] +async fn batcher_not_ready(#[case] proposer: bool) { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_default_expectations(); + deps.batcher.expect_start_height().times(1).return_const(Ok(())); + if proposer { + deps.batcher + .expect_propose_block() + .times(1) + .return_const(Err(BatcherClientError::BatcherError(BatcherError::NotReady))); + } else { + deps.batcher + .expect_validate_block() + .times(1) + .return_const(Err(BatcherClientError::BatcherError(BatcherError::NotReady))); + } + let mut context = deps.build_context(); + context.set_height_and_round(BlockNumber::default(), Round::default()).await; + + if proposer { + let fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await; + assert_eq!(fin_receiver.await, Err(Canceled)); + } else { + let (mut content_sender, content_receiver) = + mpsc::channel(context.config.proposal_buffer_size); + content_sender.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + assert_eq!(fin_receiver.await, Err(Canceled)); + } +} + +#[rstest] +#[case::execute_all_txs(true)] +#[case::dont_execute_last_tx(false)] +#[tokio::test] +async fn propose_then_repropose(#[case] execute_all_txs: bool) { + let (mut deps, mut network) = create_test_and_network_deps(); + let transactions = match execute_all_txs { + true => TX_BATCH.to_vec(), + false => TX_BATCH.iter().take(TX_BATCH.len() - 1).cloned().collect(), + }; + deps.setup_deps_for_build(BlockNumber(0), transactions.len()); + let mut context = deps.build_context(); + // Build proposal. + let fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await; + let (_, mut receiver) = network.outbound_proposal_receiver.next().await.unwrap(); + // Receive the proposal parts. + let _init = receiver.next().await.unwrap(); + let block_info = receiver.next().await.unwrap(); + let _txs = receiver.next().await.unwrap(); + let final_n_executed_txs = receiver.next().await.unwrap(); + assert!(matches!(final_n_executed_txs, ProposalPart::ExecutedTransactionCount(_))); + let fin = receiver.next().await.unwrap(); + assert_eq!(fin_receiver.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); + + // Re-propose. + context + .repropose( + BlockHash(STATE_DIFF_COMMITMENT.0.0), + ProposalInit { round: 1, ..Default::default() }, + ) + .await; + // Re-propose sends the same proposal. + let (_, mut receiver) = network.outbound_proposal_receiver.next().await.unwrap(); + let _init = receiver.next().await.unwrap(); + assert_eq!(receiver.next().await.unwrap(), block_info); + + let reproposed_txs = ProposalPart::Transactions(TransactionBatch { transactions }); + assert_eq!(receiver.next().await.unwrap(), reproposed_txs); + + assert_eq!(receiver.next().await.unwrap(), final_n_executed_txs); + assert_eq!(receiver.next().await.unwrap(), fin); + assert!(receiver.next().await.is_none()); +} + +#[tokio::test] +async fn eth_to_fri_rate_out_of_range() { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_default_expectations(); + + deps.batcher + .expect_start_height() + .times(1) + .withf(|input| input.height == BlockNumber(0)) + .return_const(Ok(())); + let mut context = deps.build_context(); + context.set_height_and_round(BlockNumber(0), 0).await; + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + // Send a block info with an eth_to_fri_rate that is outside the margin of error. + let mut block_info = block_info(BlockNumber(0)); + block_info.eth_to_fri_rate *= 2; + content_sender.send(ProposalPart::BlockInfo(block_info).clone()).await.unwrap(); + // Use a large enough timeout to ensure fin_receiver was canceled due to invalid block_info, + // not due to a timeout. + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT * 100, content_receiver).await; + assert_eq!(fin_receiver.await, Err(Canceled)); + // TODO(guyn): How to check that the rejection is due to the eth_to_fri_rate? +} + +#[rstest] +#[case::maximum(true)] +#[case::minimum(false)] +#[tokio::test] +async fn gas_price_limits(#[case] maximum: bool) { + let (mut deps, _network) = create_test_and_network_deps(); + deps.setup_deps_for_validate(BlockNumber(0), INTERNAL_TX_BATCH.len()); + let context_config = ContextConfig::default(); + let min_gas_price = context_config.min_l1_gas_price_wei; + let min_data_price = context_config.min_l1_data_gas_price_wei; + let max_gas_price = context_config.max_l1_gas_price_wei; + let max_data_price = context_config.max_l1_data_gas_price_wei; + + let price = if maximum { + // Take the higher maximum price and go much higher than that. + // If we don't go much higher, the l1_data_gas_price_multiplier will + // lower the data gas price below the clamp limit. + std::cmp::max(max_gas_price, max_data_price) * 100 + } else { + 0 + }; + let mut l1_gas_price_provider = MockL1GasPriceProviderClient::new(); + l1_gas_price_provider.expect_get_price_info().returning(move |_| { + Ok(PriceInfo { base_fee_per_gas: GasPrice(price), blob_fee: GasPrice(price) }) + }); + + deps.l1_gas_price_provider = l1_gas_price_provider; + let mut context = deps.build_context(); + + context.set_height_and_round(BlockNumber(0), 0).await; + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + + let mut block_info = block_info(BlockNumber(0)); + + if maximum { + // Set the gas price to the maximum value. + block_info.l1_gas_price_wei = GasPrice(max_gas_price); + block_info.l1_data_gas_price_wei = GasPrice(max_data_price); + } else { + // Set the gas price to the minimum value. + block_info.l1_gas_price_wei = GasPrice(min_gas_price); + block_info.l1_data_gas_price_wei = GasPrice(min_data_price); + } + + // Send the block info, some transactions and then fin. + content_sender.send(ProposalPart::BlockInfo(block_info).clone()).await.unwrap(); + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() })) + .await + .unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + })) + .await + .unwrap(); + + // Even though we used the minimum/maximum gas price, not the values we gave the provider, + // the proposal should be still be valid due to the clamping of limit prices. + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + assert_eq!(fin_receiver.await, Ok(BlockHash(STATE_DIFF_COMMITMENT.0.0))); +} + +#[tokio::test] +async fn decision_reached_sends_correct_values() { + let (mut deps, _network) = create_test_and_network_deps(); + + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + // We need to create a valid proposal to call decision_reached on. + // + // 1. Build proposal setup starts. + deps.setup_deps_for_build(BlockNumber(0), INTERNAL_TX_BATCH.len()); + + const BLOCK_TIME_STAMP_SECONDS: u64 = 123456; + let mut clock = MockClock::new(); + clock.expect_unix_now().return_const(BLOCK_TIME_STAMP_SECONDS); + clock + .expect_now() + .return_const(Utc.timestamp_opt(BLOCK_TIME_STAMP_SECONDS.try_into().unwrap(), 0).unwrap()); + deps.clock = Arc::new(clock); + + // 2. Decision reached setup starts. + deps.batcher + .expect_decision_reached() + .times(1) + .return_once(move |_| Ok(DecisionReachedResponse::default())); + + // This is the actual part of the test that checks the values are correct. + // TODO(guy.f): Add expectations and validations for all the other values being written. + deps.state_sync_client.expect_add_new_block().times(1).return_once(|block_info| { + assert_eq!(block_info.block_header_without_hash.timestamp.0, BLOCK_TIME_STAMP_SECONDS); + Ok(()) + }); + + deps.cende_ambassador + .expect_prepare_blob_for_next_height() + // TODO(guy.f): Verify the values sent here are correct. + .return_once(|_height| Ok(())); + + let mut context = deps.build_context(); + + // This sets up the required state for the test, prior to running the code being tested. + let _fin = context.build_proposal(ProposalInit::default(), TIMEOUT).await.await; + // At this point we should have a valid proposal in the context which contains the timestamp. + + let vote = Vote { + // Currently this is the only field used by decision_reached. + height: 0, + ..Default::default() + }; + + context.decision_reached(BlockHash(STATE_DIFF_COMMITMENT.0.0), vec![vote]).await.unwrap(); + + let metrics = recorder.handle().render(); + CONSENSUS_L2_GAS_PRICE + .assert_eq(&metrics, VersionedConstants::latest_constants().min_gas_price.0); +} + +#[rstest] +#[case::l1_price_oracle_failure(true)] +#[case::eth_to_strk_rate_oracle_failure(false)] +#[tokio::test] +async fn oracle_fails_on_startup(#[case] l1_oracle_failure: bool) { + let (mut deps, mut network) = create_test_and_network_deps(); + deps.setup_deps_for_build(BlockNumber(0), INTERNAL_TX_BATCH.len()); + + if l1_oracle_failure { + let mut l1_prices_oracle_client = MockL1GasPriceProviderClient::new(); + l1_prices_oracle_client.expect_get_price_info().times(1).return_const(Err( + L1GasPriceClientError::L1GasPriceProviderError( + // random error, these parameters don't mean anything + L1GasPriceProviderError::UnexpectedBlockNumberError { expected: 0, found: 1 }, + ), + )); + deps.l1_gas_price_provider = l1_prices_oracle_client; + } else { + let mut eth_to_strk_oracle_client = MockEthToStrkOracleClientTrait::new(); + eth_to_strk_oracle_client.expect_eth_to_fri_rate().times(1).return_once(|_| { + Err(EthToStrkOracleClientError::MissingFieldError("", "".to_string())) + }); + deps.eth_to_strk_oracle_client = eth_to_strk_oracle_client; + } + + let mut context = deps.build_context(); + + let init = ProposalInit::default(); + + let fin_receiver = context.build_proposal(init, TIMEOUT).await; + + let (_, mut receiver) = network.outbound_proposal_receiver.next().await.unwrap(); + + assert_eq!(receiver.next().await.unwrap(), ProposalPart::Init(ProposalInit::default())); + let block_info = receiver.next().await.unwrap(); + let ProposalPart::BlockInfo(info) = block_info else { + panic!("Expected ProposalPart::BlockInfo"); + }; + + let default_context_config = ContextConfig::default(); + assert_eq!(info.eth_to_fri_rate, DEFAULT_ETH_TO_FRI_RATE); + // Despite the l1_gas_price_provider being set up not to fail, we still expect the default + // values because eth_to_strk_rate_oracle_client failed. + assert_eq!(info.l1_gas_price_wei.0, default_context_config.min_l1_gas_price_wei); + assert_eq!(info.l1_data_gas_price_wei.0, default_context_config.min_l1_data_gas_price_wei); + + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() }) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap()) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + }) + ); + assert!(receiver.next().await.is_none()); + assert_eq!(fin_receiver.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); +} + +#[rstest] +#[case::l1_price_oracle_failure(true)] +#[case::eth_to_strk_rate_oracle_failure(false)] +#[tokio::test] +async fn oracle_fails_on_second_block(#[case] l1_oracle_failure: bool) { + let (mut deps, mut network) = create_test_and_network_deps(); + // Validate block number 0, call decision_reached to save the previous block info (block 0), and + // attempt to build_proposal on block number 1. + deps.setup_deps_for_validate(BlockNumber(0), INTERNAL_TX_BATCH.len()); + deps.setup_deps_for_build(BlockNumber(1), INTERNAL_TX_BATCH.len()); + + // set up batcher decision_reached + deps.batcher.expect_decision_reached().times(1).return_once(|_| { + Ok(DecisionReachedResponse { + state_diff: ThinStateDiff::default(), + l2_gas_used: GasAmount::default(), + central_objects: CentralObjects::default(), + }) + }); + + // required for decision reached flow + deps.state_sync_client.expect_add_new_block().times(1).return_once(|_| Ok(())); + deps.cende_ambassador.expect_prepare_blob_for_next_height().times(1).return_once(|_| Ok(())); + + // set the oracle to succeed on first block and fail on second + if l1_oracle_failure { + let mut l1_prices_oracle_client = MockL1GasPriceProviderClient::new(); + l1_prices_oracle_client.expect_get_price_info().times(1).return_const(Ok(PriceInfo { + base_fee_per_gas: GasPrice(TEMP_ETH_GAS_FEE_IN_WEI), + blob_fee: GasPrice(TEMP_ETH_BLOB_GAS_FEE_IN_WEI), + })); + l1_prices_oracle_client.expect_get_price_info().times(1).return_const(Err( + L1GasPriceClientError::L1GasPriceProviderError( + // random error, these parameters don't mean anything + L1GasPriceProviderError::UnexpectedBlockNumberError { expected: 0, found: 1 }, + ), + )); + deps.l1_gas_price_provider = l1_prices_oracle_client; + } else { + let mut eth_to_strk_oracle_client = MockEthToStrkOracleClientTrait::new(); + eth_to_strk_oracle_client + .expect_eth_to_fri_rate() + .times(1) + .return_once(|_| Ok(ETH_TO_FRI_RATE)); + eth_to_strk_oracle_client.expect_eth_to_fri_rate().times(1).return_once(|_| { + Err(EthToStrkOracleClientError::MissingFieldError("", "".to_string())) + }); + deps.eth_to_strk_oracle_client = eth_to_strk_oracle_client; + } + + let mut context = deps.build_context(); + + // Validate block number 0. + + // Initialize the context for a specific height, starting with round 0. + context.set_height_and_round(BlockNumber(0), 0).await; + + let (mut content_sender, content_receiver) = mpsc::channel(context.config.proposal_buffer_size); + content_sender.send(ProposalPart::BlockInfo(block_info(BlockNumber(0)))).await.unwrap(); + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() })) + .await + .unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + })) + .await + .unwrap(); + let fin_receiver = + context.validate_proposal(ProposalInit::default(), TIMEOUT, content_receiver).await; + content_sender.close_channel(); + let block_hash = fin_receiver.await.unwrap().0; + assert_eq!(block_hash, STATE_DIFF_COMMITMENT.0.0); + + // Decision reached + + context + .decision_reached( + BlockHash(block_hash), + vec![Vote { block_hash: Some(BlockHash(block_hash)), ..Default::default() }], + ) + .await + .unwrap(); + + // Build proposal for block number 1. + let init = ProposalInit { height: BlockNumber(1), ..Default::default() }; + + let fin_receiver = context.build_proposal(init, TIMEOUT).await; + + let (_, mut receiver) = network.outbound_proposal_receiver.next().await.unwrap(); + + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Init(ProposalInit { height: BlockNumber(1), ..Default::default() }) + ); + let info = receiver.next().await.unwrap(); + let ProposalPart::BlockInfo(info) = info else { + panic!("Expected ProposalPart::BlockInfo"); + }; + + let previous_block_info = block_info(BlockNumber(0)); + + assert_eq!(info.eth_to_fri_rate, previous_block_info.eth_to_fri_rate); + assert_eq!(info.l1_gas_price_wei, previous_block_info.l1_gas_price_wei); + assert_eq!(info.l1_data_gas_price_wei, previous_block_info.l1_data_gas_price_wei); + + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.to_vec() }) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::ExecutedTransactionCount(INTERNAL_TX_BATCH.len().try_into().unwrap()) + ); + assert_eq!( + receiver.next().await.unwrap(), + ProposalPart::Fin(ProposalFin { + proposal_commitment: BlockHash(STATE_DIFF_COMMITMENT.0.0), + }) + ); + assert!(receiver.next().await.is_none()); + assert_eq!(fin_receiver.await.unwrap().0, STATE_DIFF_COMMITMENT.0.0); +} + +#[rstest] +#[case::constant_l2_gas_price_true(true, GasAmount::default())] +#[case::constant_l2_gas_price_false(false, VersionedConstants::latest_constants().max_block_size)] +#[tokio::test] +async fn constant_l2_gas_price_behavior( + #[case] constant_l2_gas_price: bool, + #[case] mock_l2_gas_used: GasAmount, +) { + let (mut deps, _network) = create_test_and_network_deps(); + + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + + // Setup dependencies and mocks. + deps.setup_deps_for_build(BlockNumber(0), INTERNAL_TX_BATCH.len()); + + deps.batcher.expect_decision_reached().times(1).return_once(move |_| { + Ok(DecisionReachedResponse { + state_diff: ThinStateDiff::default(), + l2_gas_used: mock_l2_gas_used, + central_objects: CentralObjects::default(), + }) + }); + + deps.state_sync_client.expect_add_new_block().times(1).return_once(|_| Ok(())); + deps.cende_ambassador.expect_prepare_blob_for_next_height().times(1).return_once(|_| Ok(())); + + let context_config = ContextConfig { constant_l2_gas_price, ..Default::default() }; + let mut context = deps.build_context(); + context.config = context_config; + + // Run proposal and decision logic. + let _fin_receiver = context.build_proposal(ProposalInit::default(), TIMEOUT).await.await; + context + .decision_reached(BlockHash(STATE_DIFF_COMMITMENT.0.0), vec![Vote::default()]) + .await + .unwrap(); + + let min_gas_price = VersionedConstants::latest_constants().min_gas_price.0; + let actual_l2_gas_price = context.l2_gas_price.0; + + if constant_l2_gas_price { + assert_eq!( + actual_l2_gas_price, min_gas_price, + "Expected L2 gas price to match constant min_gas_price" + ); + } else { + assert!( + actual_l2_gas_price > min_gas_price, + "Expected L2 gas price > min ({}) due to high usage (EIP-1559), but got {}", + min_gas_price, + actual_l2_gas_price + ); + } +} diff --git a/crates/apollo_consensus_orchestrator/src/test_utils.rs b/crates/apollo_consensus_orchestrator/src/test_utils.rs new file mode 100644 index 00000000000..16ce088ad61 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/test_utils.rs @@ -0,0 +1,312 @@ +use std::future::ready; +use std::sync::{Arc, LazyLock, OnceLock}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + GetProposalContent, + GetProposalContentResponse, + ProposalCommitment, + ProposalStatus, + ProposeBlockInput, + SendProposalContent, + SendProposalContentInput, + SendProposalContentResponse, + ValidateBlockInput, +}; +use apollo_batcher_types::communication::MockBatcherClient; +use apollo_class_manager_types::transaction_converter::{ + MockTransactionConverterTrait, + TransactionConverter, + TransactionConverterTrait, +}; +use apollo_class_manager_types::EmptyClassManagerClient; +use apollo_l1_gas_price_types::{ + MockEthToStrkOracleClientTrait, + MockL1GasPriceProviderClient, + PriceInfo, +}; +use apollo_network::network_manager::test_utils::{ + mock_register_broadcast_topic, + BroadcastNetworkMock, + TestSubscriberChannels, +}; +use apollo_network::network_manager::{BroadcastTopicChannels, BroadcastTopicClient}; +use apollo_protobuf::consensus::{ConsensusBlockInfo, HeightAndRound, ProposalPart, Vote}; +use apollo_state_sync_types::communication::MockStateSyncClient; +use apollo_time::time::{Clock, DefaultClock}; +use futures::channel::mpsc; +use futures::executor::block_on; +use starknet_api::block::{ + BlockNumber, + GasPrice, + TEMP_ETH_BLOB_GAS_FEE_IN_WEI, + TEMP_ETH_GAS_FEE_IN_WEI, +}; +use starknet_api::consensus_transaction::{ConsensusTransaction, InternalConsensusTransaction}; +use starknet_api::core::{ChainId, Nonce, StateDiffCommitment}; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::felt; +use starknet_api::hash::PoseidonHash; +use starknet_api::test_utils::invoke::{rpc_invoke_tx, InvokeTxArgs}; +use starknet_types_core::felt::Felt; + +use crate::cende::MockCendeContext; +use crate::config::ContextConfig; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::sequencer_consensus_context::{ + SequencerConsensusContext, + SequencerConsensusContextDeps, +}; + +pub(crate) const TIMEOUT: Duration = Duration::from_millis(1200); +pub(crate) const CHANNEL_SIZE: usize = 5000; +pub(crate) const NUM_VALIDATORS: u64 = 4; +pub(crate) const STATE_DIFF_COMMITMENT: StateDiffCommitment = + StateDiffCommitment(PoseidonHash(Felt::ZERO)); +pub(crate) const CHAIN_ID: ChainId = ChainId::Mainnet; + +// In order for gas price in ETH to be greather than 0 (required) we must have large enough +// values here. +pub(crate) const ETH_TO_FRI_RATE: u128 = u128::pow(10, 18); + +pub(crate) static TX_BATCH: LazyLock> = + LazyLock::new(|| (0..3).map(generate_invoke_tx).collect()); + +pub(crate) static INTERNAL_TX_BATCH: LazyLock> = + LazyLock::new(|| { + // TODO(shahak): Use MockTransactionConverter instead. + static TRANSACTION_CONVERTER: LazyLock = LazyLock::new(|| { + TransactionConverter::new(Arc::new(EmptyClassManagerClient), CHAIN_ID) + }); + TX_BATCH + .iter() + .cloned() + .map(|tx| { + block_on(TRANSACTION_CONVERTER.convert_consensus_tx_to_internal_consensus_tx(tx)) + .unwrap() + }) + .collect() + }); + +pub(crate) struct TestDeps { + pub transaction_converter: MockTransactionConverterTrait, + pub state_sync_client: MockStateSyncClient, + pub batcher: MockBatcherClient, + pub cende_ambassador: MockCendeContext, + pub eth_to_strk_oracle_client: MockEthToStrkOracleClientTrait, + pub l1_gas_price_provider: MockL1GasPriceProviderClient, + pub clock: Arc, + pub outbound_proposal_sender: mpsc::Sender<(HeightAndRound, mpsc::Receiver)>, + pub vote_broadcast_client: BroadcastTopicClient, +} + +impl From for SequencerConsensusContextDeps { + fn from(deps: TestDeps) -> Self { + SequencerConsensusContextDeps { + transaction_converter: Arc::new(deps.transaction_converter), + state_sync_client: Arc::new(deps.state_sync_client), + batcher: Arc::new(deps.batcher), + cende_ambassador: Arc::new(deps.cende_ambassador), + eth_to_strk_oracle_client: Arc::new(deps.eth_to_strk_oracle_client), + l1_gas_price_provider: Arc::new(deps.l1_gas_price_provider), + clock: deps.clock, + outbound_proposal_sender: deps.outbound_proposal_sender, + vote_broadcast_client: deps.vote_broadcast_client, + } + } +} + +impl TestDeps { + pub(crate) fn setup_default_expectations(&mut self) { + self.setup_default_transaction_converter(); + self.setup_default_cende_ambassador(); + self.setup_default_gas_price_provider(); + self.setup_default_eth_to_strk_oracle_client(); + } + + pub(crate) fn setup_deps_for_build( + &mut self, + block_number: BlockNumber, + final_n_executed_txs: usize, + ) { + assert!(final_n_executed_txs <= INTERNAL_TX_BATCH.len()); + self.setup_default_expectations(); + let proposal_id = Arc::new(OnceLock::new()); + let proposal_id_clone = Arc::clone(&proposal_id); + self.batcher.expect_propose_block().times(1).returning(move |input: ProposeBlockInput| { + proposal_id_clone.set(input.proposal_id).unwrap(); + Ok(()) + }); + self.batcher + .expect_start_height() + .times(1) + .withf(move |input| input.height == block_number) + .return_const(Ok(())); + let proposal_id_clone = Arc::clone(&proposal_id); + self.batcher.expect_get_proposal_content().times(1).returning(move |input| { + assert_eq!(input.proposal_id, *proposal_id_clone.get().unwrap()); + Ok(GetProposalContentResponse { + content: GetProposalContent::Txs(INTERNAL_TX_BATCH.clone()), + }) + }); + let proposal_id_clone = Arc::clone(&proposal_id); + self.batcher.expect_get_proposal_content().times(1).returning(move |input| { + assert_eq!(input.proposal_id, *proposal_id_clone.get().unwrap()); + Ok(GetProposalContentResponse { + content: GetProposalContent::Finished { + id: ProposalCommitment { state_diff_commitment: STATE_DIFF_COMMITMENT }, + final_n_executed_txs, + }, + }) + }); + } + + pub(crate) fn setup_deps_for_validate( + &mut self, + block_number: BlockNumber, + final_n_executed_txs: usize, + ) { + assert!(final_n_executed_txs <= INTERNAL_TX_BATCH.len()); + self.setup_default_expectations(); + let proposal_id = Arc::new(OnceLock::new()); + let proposal_id_clone = Arc::clone(&proposal_id); + self.batcher.expect_validate_block().times(1).returning( + move |input: ValidateBlockInput| { + proposal_id_clone.set(input.proposal_id).unwrap(); + Ok(()) + }, + ); + self.batcher + .expect_start_height() + .withf(move |input| input.height == block_number) + .return_const(Ok(())); + let proposal_id_clone = Arc::clone(&proposal_id); + self.batcher.expect_send_proposal_content().times(1).returning( + move |input: SendProposalContentInput| { + assert_eq!(input.proposal_id, *proposal_id_clone.get().unwrap()); + let SendProposalContent::Txs(txs) = input.content else { + panic!("Expected SendProposalContent::Txs, got {:?}", input.content); + }; + assert_eq!(txs, *INTERNAL_TX_BATCH); + Ok(SendProposalContentResponse { response: ProposalStatus::Processing }) + }, + ); + let proposal_id_clone = Arc::clone(&proposal_id); + self.batcher.expect_send_proposal_content().times(1).returning( + move |input: SendProposalContentInput| { + assert_eq!(input.proposal_id, *proposal_id_clone.get().unwrap()); + assert_eq!(input.content, SendProposalContent::Finish(final_n_executed_txs)); + Ok(SendProposalContentResponse { + response: ProposalStatus::Finished(ProposalCommitment { + state_diff_commitment: STATE_DIFF_COMMITMENT, + }), + }) + }, + ); + } + + pub(crate) fn setup_default_transaction_converter(&mut self) { + for (tx, internal_tx) in TX_BATCH.iter().zip(INTERNAL_TX_BATCH.iter()) { + self.transaction_converter + .expect_convert_internal_consensus_tx_to_consensus_tx() + .withf(move |tx| tx == internal_tx) + .returning(|_| Ok(tx.clone())); + self.transaction_converter + .expect_convert_consensus_tx_to_internal_consensus_tx() + .withf(move |internal_tx| internal_tx == tx) + .returning(|_| Ok(internal_tx.clone())); + } + } + + pub(crate) fn setup_default_cende_ambassador(&mut self) { + self.cende_ambassador + .expect_write_prev_height_blob() + .return_once(|_height| tokio::spawn(ready(true))); + } + + pub(crate) fn setup_default_gas_price_provider(&mut self) { + self.l1_gas_price_provider.expect_get_price_info().return_const(Ok(PriceInfo { + base_fee_per_gas: GasPrice(TEMP_ETH_GAS_FEE_IN_WEI), + blob_fee: GasPrice(TEMP_ETH_BLOB_GAS_FEE_IN_WEI), + })); + } + + pub(crate) fn setup_default_eth_to_strk_oracle_client(&mut self) { + self.eth_to_strk_oracle_client.expect_eth_to_fri_rate().returning(|_| Ok(ETH_TO_FRI_RATE)); + } + + pub(crate) fn build_context(self) -> SequencerConsensusContext { + SequencerConsensusContext::new( + ContextConfig { + proposal_buffer_size: CHANNEL_SIZE, + num_validators: NUM_VALIDATORS, + chain_id: CHAIN_ID, + ..Default::default() + }, + self.into(), + ) + } +} + +pub(crate) fn create_test_and_network_deps() -> (TestDeps, NetworkDependencies) { + let (outbound_proposal_sender, outbound_proposal_receiver) = + mpsc::channel::<(HeightAndRound, mpsc::Receiver)>(CHANNEL_SIZE); + + let TestSubscriberChannels { mock_network: mock_vote_network, subscriber_channels } = + mock_register_broadcast_topic().expect("Failed to create mock network"); + let BroadcastTopicChannels { broadcast_topic_client: votes_topic_client, .. } = + subscriber_channels; + + let transaction_converter = MockTransactionConverterTrait::new(); + let state_sync_client = MockStateSyncClient::new(); + let batcher = MockBatcherClient::new(); + let cende_ambassador = MockCendeContext::new(); + let eth_to_strk_oracle_client = MockEthToStrkOracleClientTrait::new(); + let l1_gas_price_provider = MockL1GasPriceProviderClient::new(); + let clock = Arc::new(DefaultClock); + + let test_deps = TestDeps { + transaction_converter, + state_sync_client, + batcher, + cende_ambassador, + eth_to_strk_oracle_client, + l1_gas_price_provider, + clock, + outbound_proposal_sender, + vote_broadcast_client: votes_topic_client, + }; + + let network_deps = + NetworkDependencies { _vote_network: mock_vote_network, outbound_proposal_receiver }; + + (test_deps, network_deps) +} + +pub(crate) fn generate_invoke_tx(nonce: u8) -> ConsensusTransaction { + ConsensusTransaction::RpcTransaction(rpc_invoke_tx(InvokeTxArgs { + nonce: Nonce(felt!(nonce)), + ..Default::default() + })) +} + +pub(crate) fn block_info(height: BlockNumber) -> ConsensusBlockInfo { + let context_config = ContextConfig::default(); + ConsensusBlockInfo { + height, + timestamp: chrono::Utc::now().timestamp().try_into().expect("Timestamp conversion failed"), + builder: Default::default(), + l1_da_mode: L1DataAvailabilityMode::Blob, + l2_gas_price_fri: VersionedConstants::latest_constants().min_gas_price, + l1_gas_price_wei: GasPrice(TEMP_ETH_GAS_FEE_IN_WEI + context_config.l1_gas_tip_wei), + l1_data_gas_price_wei: GasPrice( + TEMP_ETH_BLOB_GAS_FEE_IN_WEI * context_config.l1_data_gas_price_multiplier_ppt / 1000, + ), + eth_to_fri_rate: ETH_TO_FRI_RATE, + } +} +// Structs which aren't utilized but should not be dropped. +pub(crate) struct NetworkDependencies { + _vote_network: BroadcastNetworkMock, + pub outbound_proposal_receiver: mpsc::Receiver<(HeightAndRound, mpsc::Receiver)>, +} diff --git a/crates/apollo_consensus_orchestrator/src/utils.rs b/crates/apollo_consensus_orchestrator/src/utils.rs new file mode 100644 index 00000000000..7c1d687bfc4 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/utils.rs @@ -0,0 +1,238 @@ +use std::sync::Arc; + +use apollo_l1_gas_price_types::{ + EthToStrkOracleClientTrait, + L1GasPriceProviderClient, + PriceInfo, + DEFAULT_ETH_TO_FRI_RATE, +}; +use apollo_protobuf::consensus::{ConsensusBlockInfo, ProposalPart}; +use apollo_state_sync_types::communication::{StateSyncClient, StateSyncClientError}; +// TODO(Gilad): Define in consensus, either pass to blockifier as config or keep the dup. +use blockifier::abi::constants::STORED_BLOCK_HASH_BUFFER; +use futures::channel::mpsc; +use futures::SinkExt; +use num_rational::Ratio; +use starknet_api::block::{ + BlockHashAndNumber, + BlockNumber, + BlockTimestamp, + GasPrice, + GasPriceVector, + GasPrices, + NonzeroGasPrice, +}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::StarknetApiError; +use tracing::{info, warn}; + +use crate::build_proposal::BuildProposalError; +use crate::metrics::CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR; +use crate::validate_proposal::ValidateProposalError; + +pub(crate) struct StreamSender { + pub proposal_sender: mpsc::Sender, +} + +impl StreamSender { + pub async fn send(&mut self, proposal_part: ProposalPart) -> Result<(), mpsc::SendError> { + self.proposal_sender.send(proposal_part).await + } +} + +pub(crate) struct GasPriceParams { + pub min_l1_gas_price_wei: GasPrice, + pub max_l1_gas_price_wei: GasPrice, + pub max_l1_data_gas_price_wei: GasPrice, + pub min_l1_data_gas_price_wei: GasPrice, + pub l1_data_gas_price_multiplier: Ratio, + pub l1_gas_tip_wei: GasPrice, +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum StateSyncError { + #[error("State sync is not ready: {0}")] + NotReady(String), + #[error("State sync client error: {0}")] + ClientError(#[from] StateSyncClientError), +} + +impl From for BuildProposalError { + fn from(e: StateSyncError) -> Self { + match e { + StateSyncError::NotReady(e) => BuildProposalError::StateSyncNotReady(e), + StateSyncError::ClientError(e) => BuildProposalError::StateSyncClientError(e), + } + } +} + +impl From for ValidateProposalError { + fn from(e: StateSyncError) -> Self { + match e { + StateSyncError::NotReady(e) => ValidateProposalError::StateSyncNotReady(e), + StateSyncError::ClientError(e) => ValidateProposalError::StateSyncClientError(e), + } + } +} + +pub(crate) async fn get_oracle_rate_and_prices( + eth_to_strk_oracle_client: Arc, + l1_gas_price_provider_client: Arc, + timestamp: u64, + previous_block_info: Option<&ConsensusBlockInfo>, + gas_price_params: &GasPriceParams, +) -> (u128, PriceInfo) { + let (eth_to_strk_rate, price_info) = tokio::join!( + eth_to_strk_oracle_client.eth_to_fri_rate(timestamp), + l1_gas_price_provider_client.get_price_info(BlockTimestamp(timestamp)) + ); + if price_info.is_err() { + warn!("Failed to get l1 gas price from provider: {:?}", price_info); + CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR.increment(1); + } + if eth_to_strk_rate.is_err() { + warn!("Failed to get eth to strk rate from oracle: {:?}", eth_to_strk_rate); + } + + match (eth_to_strk_rate, price_info) { + (Ok(eth_to_strk_rate), Ok(mut price_info)) => { + info!("eth_to_strk_rate: {eth_to_strk_rate}, l1 gas price: {price_info:?}"); + apply_fee_transformations(&mut price_info, gas_price_params); + return (eth_to_strk_rate, price_info); + } + _ => { + warn!("Using values from previous block info.") + } + } + + if let Some(previous_block_info) = previous_block_info { + let (prev_eth_to_strk_rate, prev_l1_price) = ( + previous_block_info.eth_to_fri_rate, + PriceInfo { + base_fee_per_gas: previous_block_info.l1_gas_price_wei, + blob_fee: previous_block_info.l1_data_gas_price_wei, + }, + ); + warn!( + "previous eth_to_strk_rate: {prev_eth_to_strk_rate}, previous l1 gas price: \ + {prev_l1_price:?}" + ); + return (prev_eth_to_strk_rate, prev_l1_price); + } + warn!("No previous block info available, using default values"); + warn!( + "default eth_to_strk_rate: {DEFAULT_ETH_TO_FRI_RATE}, default (min) l1 gas price: {:?}, \ + default (min) l1 data gas price: {:?}", + gas_price_params.min_l1_gas_price_wei, gas_price_params.min_l1_data_gas_price_wei + ); + + ( + DEFAULT_ETH_TO_FRI_RATE, + PriceInfo { + base_fee_per_gas: gas_price_params.min_l1_gas_price_wei, + blob_fee: gas_price_params.min_l1_data_gas_price_wei, + }, + ) +} + +fn apply_fee_transformations(price_info: &mut PriceInfo, gas_price_params: &GasPriceParams) { + price_info.base_fee_per_gas = price_info + .base_fee_per_gas + .saturating_add(gas_price_params.l1_gas_tip_wei) + .clamp(gas_price_params.min_l1_gas_price_wei, gas_price_params.max_l1_gas_price_wei); + + price_info.blob_fee = GasPrice( + (gas_price_params.l1_data_gas_price_multiplier * price_info.blob_fee.0).to_integer(), + ) + .clamp(gas_price_params.min_l1_data_gas_price_wei, gas_price_params.max_l1_data_gas_price_wei); +} + +pub(crate) fn convert_to_sn_api_block_info( + block_info: &ConsensusBlockInfo, +) -> Result { + let l1_gas_price_fri = + NonzeroGasPrice::new(block_info.l1_gas_price_wei.wei_to_fri(block_info.eth_to_fri_rate)?)?; + let l1_data_gas_price_fri = NonzeroGasPrice::new( + block_info.l1_data_gas_price_wei.wei_to_fri(block_info.eth_to_fri_rate)?, + )?; + let l2_gas_price_fri = NonzeroGasPrice::new(block_info.l2_gas_price_fri)?; + let l2_gas_price_wei = + NonzeroGasPrice::new(block_info.l2_gas_price_fri.fri_to_wei(block_info.eth_to_fri_rate)?)?; + let l1_gas_price_wei = NonzeroGasPrice::new(block_info.l1_gas_price_wei)?; + let l1_data_gas_price_wei = NonzeroGasPrice::new(block_info.l1_data_gas_price_wei)?; + + Ok(starknet_api::block::BlockInfo { + block_number: block_info.height, + block_timestamp: BlockTimestamp(block_info.timestamp), + sequencer_address: block_info.builder, + gas_prices: GasPrices { + strk_gas_prices: GasPriceVector { + l1_gas_price: l1_gas_price_fri, + l1_data_gas_price: l1_data_gas_price_fri, + l2_gas_price: l2_gas_price_fri, + }, + eth_gas_prices: GasPriceVector { + l1_gas_price: l1_gas_price_wei, + l1_data_gas_price: l1_data_gas_price_wei, + l2_gas_price: l2_gas_price_wei, + }, + }, + use_kzg_da: block_info.l1_da_mode == L1DataAvailabilityMode::Blob, + }) +} + +pub(crate) async fn retrospective_block_hash( + state_sync_client: Arc, + block_info: &ConsensusBlockInfo, +) -> Result, StateSyncError> { + let retrospective_block_number = block_info.height.0.checked_sub(STORED_BLOCK_HASH_BUFFER); + let retrospective_block_hash = match retrospective_block_number { + Some(block_number) => { + let block_number = BlockNumber(block_number); + let block = state_sync_client + // Getting the next block hash because the Sync block only contains parent hash. + .get_block(block_number.unchecked_next()) + .await + .map_err(StateSyncError::ClientError)? + .ok_or(StateSyncError::NotReady(format!( + "Failed to get retrospective block number {block_number}" + )))?; + Some(BlockHashAndNumber { + number: block_number, + hash: block.block_header_without_hash.parent_hash, + }) + } + None => { + info!( + "Retrospective block number is less than {STORED_BLOCK_HASH_BUFFER}, setting None \ + as expected." + ); + None + } + }; + Ok(retrospective_block_hash) +} + +pub(crate) fn truncate_to_executed_txs( + content: &mut Vec>, + final_n_executed_txs: usize, +) -> Vec> { + let content = std::mem::take(content); + // Truncate `content` to keep only the first `final_n_executed_txs`, preserving batch + // structure. + let mut executed_content: Vec> = Vec::new(); + let mut remaining = final_n_executed_txs; + + for batch in content { + if remaining < batch.len() { + executed_content.push(batch.into_iter().take(remaining).collect()); + break; + } else { + remaining -= batch.len(); + executed_content.push(batch); + } + } + + executed_content +} diff --git a/crates/apollo_consensus_orchestrator/src/validate_proposal.rs b/crates/apollo_consensus_orchestrator/src/validate_proposal.rs new file mode 100644 index 00000000000..e07e146895f --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/validate_proposal.rs @@ -0,0 +1,519 @@ +#[cfg(test)] +#[path = "validate_proposal_test.rs"] +mod validate_proposal_test; + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + ProposalId, + ProposalStatus, + SendProposalContent, + SendProposalContentInput, + ValidateBlockInput, +}; +use apollo_batcher_types::communication::{BatcherClient, BatcherClientError}; +use apollo_class_manager_types::transaction_converter::TransactionConverterTrait; +use apollo_consensus::types::ProposalCommitment; +use apollo_l1_gas_price_types::errors::{EthToStrkOracleClientError, L1GasPriceClientError}; +use apollo_l1_gas_price_types::{EthToStrkOracleClientTrait, L1GasPriceProviderClient}; +use apollo_protobuf::consensus::{ConsensusBlockInfo, ProposalFin, ProposalPart, TransactionBatch}; +use apollo_state_sync_types::communication::{StateSyncClient, StateSyncClientError}; +use apollo_time::time::{sleep_until, Clock, DateTime}; +use futures::channel::mpsc; +use futures::StreamExt; +use starknet_api::block::{BlockHash, BlockNumber, GasPrice}; +use starknet_api::consensus_transaction::InternalConsensusTransaction; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::transaction::TransactionHash; +use starknet_api::StarknetApiError; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info, instrument, warn}; + +use crate::metrics::{ + CONSENSUS_L1_DATA_GAS_MISMATCH, + CONSENSUS_L1_GAS_MISMATCH, + CONSENSUS_NUM_BATCHES_IN_PROPOSAL, + CONSENSUS_NUM_TXS_IN_PROPOSAL, +}; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::sequencer_consensus_context::{BuiltProposals, SequencerConsensusContextDeps}; +use crate::utils::{ + convert_to_sn_api_block_info, + get_oracle_rate_and_prices, + retrospective_block_hash, + truncate_to_executed_txs, + GasPriceParams, +}; + +const GAS_PRICE_ABS_DIFF_MARGIN: u128 = 1; + +pub(crate) struct ProposalValidateArguments { + pub deps: SequencerConsensusContextDeps, + pub block_info_validation: BlockInfoValidation, + pub proposal_id: ProposalId, + pub timeout: Duration, + pub batcher_timeout_margin: Duration, + pub valid_proposals: Arc>, + pub content_receiver: mpsc::Receiver, + pub gas_price_params: GasPriceParams, + pub cancel_token: CancellationToken, +} + +// Contains parameters required for validating block info. +#[derive(Clone, Debug)] +pub(crate) struct BlockInfoValidation { + pub height: BlockNumber, + pub block_timestamp_window_seconds: u64, + pub previous_block_info: Option, + pub l1_da_mode: L1DataAvailabilityMode, + pub l2_gas_price_fri: GasPrice, +} + +enum HandledProposalPart { + Continue, + Invalid(String), + Finished(ProposalCommitment, ProposalFin), + Failed(String), +} + +enum SecondProposalPart { + BlockInfo(ConsensusBlockInfo), + Fin(ProposalFin), +} + +type ValidateProposalResult = Result; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum ValidateProposalError { + #[error("Batcher error: {0}")] + Batcher(String, BatcherClientError), + #[error("State sync client error: {0}")] + StateSyncClientError(#[from] StateSyncClientError), + #[error("State sync is not ready: {0}")] + StateSyncNotReady(String), + // Consensus may exit early (e.g. sync). + #[error("Failed to send commitment to consensus: {0}")] + SendError(ProposalCommitment), + #[error("EthToStrkOracle error: {0}")] + EthToStrkOracle(#[from] EthToStrkOracleClientError), + #[error("L1GasPriceProvider error: {0}")] + L1GasPriceProvider(#[from] L1GasPriceClientError), + #[error("Block info conversion error: {0}")] + BlockInfoConversion(#[from] StarknetApiError), + #[error("Invalid BlockInfo: {2}. received:{0:?}, validation criteria {1:?}.")] + InvalidBlockInfo(ConsensusBlockInfo, BlockInfoValidation, String), + #[error("Validation timed out while {0}")] + ValidationTimeout(String), + #[error("Proposal interrupted while {0}")] + ProposalInterrupted(String), + #[error("Got an invalid second proposal part: {0:?}.")] + InvalidSecondProposalPart(Option), + #[error("Batcher returned Invalid status: {0}.")] + InvalidProposal(String), + #[error("Proposal part {1:?} failed validation: {0}.")] + ProposalPartFailed(String, Option), + #[error("proposal_commitment built by the batcher does not match the proposal fin.")] + ProposalFinMismatch, + #[error("Cannot calculate deadline. timeout: {timeout:?}, now: {now:?}")] + CannotCalculateDeadline { timeout: Duration, now: DateTime }, +} + +pub(crate) async fn validate_proposal( + mut args: ProposalValidateArguments, +) -> ValidateProposalResult { + let mut content = Vec::new(); + let mut final_n_executed_txs: Option = None; + let now = args.deps.clock.now(); + + let Some(deadline) = now.checked_add_signed(chrono::TimeDelta::from_std(args.timeout).unwrap()) + else { + return Err(ValidateProposalError::CannotCalculateDeadline { timeout: args.timeout, now }); + }; + + let block_info = match await_second_proposal_part( + &args.cancel_token, + deadline, + &mut args.content_receiver, + args.deps.clock.as_ref(), + ) + .await? + { + SecondProposalPart::BlockInfo(block_info) => block_info, + SecondProposalPart::Fin(ProposalFin { proposal_commitment }) => { + return Ok(proposal_commitment); + } + }; + is_block_info_valid( + args.block_info_validation.clone(), + block_info.clone(), + args.deps.eth_to_strk_oracle_client, + args.deps.clock.as_ref(), + args.deps.l1_gas_price_provider, + &args.gas_price_params, + ) + .await?; + + initiate_validation( + args.deps.batcher.as_ref(), + args.deps.state_sync_client, + block_info.clone(), + args.proposal_id, + args.timeout + args.batcher_timeout_margin, + args.deps.clock.as_ref(), + ) + .await?; + + // Validating the rest of the proposal parts. + let (built_block, received_fin) = loop { + tokio::select! { + _ = args.cancel_token.cancelled() => { + batcher_abort_proposal(args.deps.batcher.as_ref(), args.proposal_id).await; + return Err(ValidateProposalError::ProposalInterrupted( + "validating proposal parts".to_string(), + )); + } + _ = sleep_until(deadline, args.deps.clock.as_ref()) => { + batcher_abort_proposal(args.deps.batcher.as_ref(), args.proposal_id).await; + return Err(ValidateProposalError::ValidationTimeout( + "validating proposal parts".to_string(), + )); + } + proposal_part = args.content_receiver.next() => { + match handle_proposal_part( + args.proposal_id, + args.deps.batcher.as_ref(), + proposal_part.clone(), + &mut content, + &mut final_n_executed_txs, + args.deps.transaction_converter.clone(), + ).await { + HandledProposalPart::Finished(built_block, received_fin) => { + break (built_block, received_fin); + } + HandledProposalPart::Continue => {continue;} + HandledProposalPart::Invalid(err) => { + // No need to abort since the Batcher is the source of this info. + return Err(ValidateProposalError::InvalidProposal(err)); + } + HandledProposalPart::Failed(fail_reason) => { + batcher_abort_proposal(args.deps.batcher.as_ref(), args.proposal_id).await; + return Err(ValidateProposalError::ProposalPartFailed(fail_reason,proposal_part)); + } + } + } + } + }; + + let n_executed_txs = content.iter().map(|batch| batch.len()).sum::(); + CONSENSUS_NUM_BATCHES_IN_PROPOSAL.set_lossy(content.len()); + CONSENSUS_NUM_TXS_IN_PROPOSAL.set_lossy(n_executed_txs); + + // Update valid_proposals before sending fin to avoid a race condition + // with `repropose` being called before `valid_proposals` is updated. + let mut valid_proposals = args.valid_proposals.lock().unwrap(); + valid_proposals.insert_proposal_for_height( + &args.block_info_validation.height, + &built_block, + block_info, + content, + &args.proposal_id, + ); + + // TODO(matan): Switch to signature validation. + if built_block != received_fin.proposal_commitment { + return Err(ValidateProposalError::ProposalFinMismatch); + } + + Ok(built_block) +} + +#[instrument(level = "warn", skip_all, fields(?block_info_validation, ?block_info_proposed))] +async fn is_block_info_valid( + block_info_validation: BlockInfoValidation, + block_info_proposed: ConsensusBlockInfo, + eth_to_strk_oracle_client: Arc, + clock: &dyn Clock, + l1_gas_price_provider: Arc, + gas_price_params: &GasPriceParams, +) -> ValidateProposalResult<()> { + let now: u64 = clock.unix_now(); + let last_block_timestamp = + block_info_validation.previous_block_info.as_ref().map_or(0, |info| info.timestamp); + if block_info_proposed.timestamp < last_block_timestamp { + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed.clone(), + block_info_validation.clone(), + format!( + "Timestamp is too old: last_block_timestamp={}, proposed={}", + last_block_timestamp, block_info_proposed.timestamp + ), + )); + } + if block_info_proposed.timestamp > now + block_info_validation.block_timestamp_window_seconds { + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed.clone(), + block_info_validation.clone(), + format!( + "Timestamp is in the future: now={}, block_timestamp_window_seconds={}, \ + proposed={}", + now, + block_info_validation.block_timestamp_window_seconds, + block_info_proposed.timestamp + ), + )); + } + if !(block_info_proposed.height == block_info_validation.height + && block_info_proposed.l1_da_mode == block_info_validation.l1_da_mode + && block_info_proposed.l2_gas_price_fri == block_info_validation.l2_gas_price_fri) + { + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed.clone(), + block_info_validation.clone(), + "Block info validation failed".to_string(), + )); + } + let (eth_to_fri_rate, l1_gas_prices) = get_oracle_rate_and_prices( + eth_to_strk_oracle_client, + l1_gas_price_provider, + block_info_proposed.timestamp, + block_info_validation.previous_block_info.as_ref(), + gas_price_params, + ) + .await; + let l1_gas_price_margin_percent = + VersionedConstants::latest_constants().l1_gas_price_margin_percent.into(); + debug!("L1 price info: {l1_gas_prices:?}"); + + let l1_gas_price_fri = l1_gas_prices.base_fee_per_gas.wei_to_fri(eth_to_fri_rate)?; + let l1_data_gas_price_fri = l1_gas_prices.blob_fee.wei_to_fri(eth_to_fri_rate)?; + let l1_gas_price_fri_proposed = + block_info_proposed.l1_gas_price_wei.wei_to_fri(block_info_proposed.eth_to_fri_rate)?; + let l1_data_gas_price_fri_proposed = block_info_proposed + .l1_data_gas_price_wei + .wei_to_fri(block_info_proposed.eth_to_fri_rate)?; + + if !(within_margin(l1_gas_price_fri_proposed, l1_gas_price_fri, l1_gas_price_margin_percent) + && within_margin( + l1_data_gas_price_fri_proposed, + l1_data_gas_price_fri, + l1_gas_price_margin_percent, + )) + { + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed, + block_info_validation, + format!( + "L1 gas price mismatch: expected L1 gas price FRI={}, proposed={}, expected L1 \ + data gas price FRI={}, proposed={}, l1_gas_price_margin_percent={}", + l1_gas_price_fri, + l1_gas_price_fri_proposed, + l1_data_gas_price_fri, + l1_data_gas_price_fri_proposed, + l1_gas_price_margin_percent + ), + )); + } + // TODO(Asmaa): consider removing after 0.14 as other validators may use other sources. + if l1_gas_price_fri_proposed != l1_gas_price_fri { + CONSENSUS_L1_GAS_MISMATCH.increment(1); + } + if l1_data_gas_price_fri_proposed != l1_data_gas_price_fri { + CONSENSUS_L1_DATA_GAS_MISMATCH.increment(1); + } + Ok(()) +} + +fn within_margin(number1: GasPrice, number2: GasPrice, margin_percent: u128) -> bool { + // For small numbers (e.g., less than 10 wei, if margin is 10%), even an off-by-one + // error might be bigger than the margin, even if it is just a rounding error. + // We make an exception for such mismatch, and don't bother checking percentages + // if the difference in price is only one wei. + if number1.0.abs_diff(number2.0) <= GAS_PRICE_ABS_DIFF_MARGIN { + return true; + } + let margin = (number1.0 * margin_percent) / 100; + number1.0.abs_diff(number2.0) <= margin +} + +// The second proposal part when validating a proposal must be: +// 1. Fin - empty proposal. +// 2. BlockInfo - required to begin executing TX batches. +async fn await_second_proposal_part( + cancel_token: &CancellationToken, + deadline: DateTime, + content_receiver: &mut mpsc::Receiver, + clock: &dyn Clock, +) -> ValidateProposalResult { + tokio::select! { + _ = cancel_token.cancelled() => { + Err(ValidateProposalError::ProposalInterrupted( + "waiting for second proposal part".to_string(), + )) + } + _ = sleep_until(deadline, clock) => { + Err(ValidateProposalError::ValidationTimeout( + "waiting for second proposal part".to_string(), + )) + } + proposal_part = content_receiver.next() => { + match proposal_part { + Some(ProposalPart::BlockInfo(block_info)) => { + Ok(SecondProposalPart::BlockInfo(block_info)) + } + Some(ProposalPart::Fin(ProposalFin { proposal_commitment })) => { + warn!("Received an empty proposal."); + Ok(SecondProposalPart::Fin(ProposalFin { proposal_commitment })) + } + x => { + Err(ValidateProposalError::InvalidSecondProposalPart(x + )) + } + } + } + } +} + +async fn initiate_validation( + batcher: &dyn BatcherClient, + state_sync_client: Arc, + block_info: ConsensusBlockInfo, + proposal_id: ProposalId, + timeout_plus_margin: Duration, + clock: &dyn Clock, +) -> ValidateProposalResult<()> { + let chrono_timeout = chrono::Duration::from_std(timeout_plus_margin) + .expect("Can't convert timeout to chrono::Duration"); + + let input = ValidateBlockInput { + proposal_id, + deadline: clock.now() + chrono_timeout, + retrospective_block_hash: retrospective_block_hash(state_sync_client, &block_info).await?, + block_info: convert_to_sn_api_block_info(&block_info)?, + }; + debug!("Initiating validate proposal: input={input:?}"); + batcher.validate_block(input.clone()).await.map_err(|err| { + ValidateProposalError::Batcher( + format!("Failed to initiate validate proposal {input:?}."), + err, + ) + })?; + Ok(()) +} + +/// Handles receiving a proposal from another node without blocking consensus: +/// 1. Receives the proposal part from the network. +/// 2. Pass this to the batcher. +/// 3. Once finished, receive the commitment from the batcher. +async fn handle_proposal_part( + proposal_id: ProposalId, + batcher: &dyn BatcherClient, + proposal_part: Option, + content: &mut Vec>, + final_n_executed_txs: &mut Option, + transaction_converter: Arc, +) -> HandledProposalPart { + match proposal_part { + None => HandledProposalPart::Failed("Failed to receive proposal content".to_string()), + Some(ProposalPart::Fin(fin)) => { + info!("Received fin={fin:?}"); + let Some(final_n_executed_txs_nonopt) = *final_n_executed_txs else { + return HandledProposalPart::Failed( + "Received Fin without executed transaction count".to_string(), + ); + }; + // Output this along with the ID from batcher, to compare them. + let input = SendProposalContentInput { + proposal_id, + content: SendProposalContent::Finish(final_n_executed_txs_nonopt), + }; + let response = batcher.send_proposal_content(input).await.unwrap_or_else(|e| { + panic!("Failed to send Fin to batcher: {proposal_id:?}. {e:?}") + }); + let response_id = match response.response { + ProposalStatus::Finished(id) => id, + ProposalStatus::InvalidProposal(err) => return HandledProposalPart::Invalid(err), + status => panic!("Unexpected status: for {proposal_id:?}, {status:?}"), + }; + let batcher_block_id = BlockHash(response_id.state_diff_commitment.0.0); + + info!( + network_block_id = ?fin.proposal_commitment, + ?batcher_block_id, + final_n_executed_txs_nonopt, + "Finished validating proposal." + ); + if final_n_executed_txs_nonopt == 0 { + warn!("Validated an empty proposal."); + } + HandledProposalPart::Finished(batcher_block_id, fin) + } + Some(ProposalPart::Transactions(TransactionBatch { transactions: txs })) => { + debug!("Received transaction batch with {} txs", txs.len()); + if final_n_executed_txs.is_some() { + return HandledProposalPart::Failed( + "Received transactions after executed transaction count".to_string(), + ); + } + let txs = + futures::future::join_all(txs.into_iter().map(|tx| { + transaction_converter.convert_consensus_tx_to_internal_consensus_tx(tx) + })) + .await + .into_iter() + .collect::, _>>(); + let txs = match txs { + Ok(txs) => txs, + Err(e) => { + return HandledProposalPart::Failed(format!( + "Failed to convert transactions. Stopping the build of the current \ + proposal. {e:?}" + )); + } + }; + debug!( + "Converted transactions to internal representation. hashes={:?}", + txs.iter().map(|tx| tx.tx_hash()).collect::>() + ); + + content.push(txs.clone()); + let input = + SendProposalContentInput { proposal_id, content: SendProposalContent::Txs(txs) }; + let response = batcher.send_proposal_content(input).await.unwrap_or_else(|e| { + panic!("Failed to send proposal content to batcher: {proposal_id:?}. {e:?}") + }); + match response.response { + ProposalStatus::Processing => HandledProposalPart::Continue, + ProposalStatus::InvalidProposal(err) => HandledProposalPart::Invalid(err), + status => panic!("Unexpected status: for {proposal_id:?}, {status:?}"), + } + } + Some(ProposalPart::ExecutedTransactionCount(executed_txs_count)) => { + debug!("Received executed transaction count: {executed_txs_count}"); + if final_n_executed_txs.is_some() { + return HandledProposalPart::Failed( + "Received executed transaction count more than once".to_string(), + ); + } + let executed_txs_count_usize_res: Result = executed_txs_count.try_into(); + let Ok(executed_txs_count_usize) = executed_txs_count_usize_res else { + return HandledProposalPart::Failed( + "Number of executed transactions should fit in usize".to_string(), + ); + }; + *final_n_executed_txs = Some(executed_txs_count_usize); + *content = truncate_to_executed_txs(content, executed_txs_count_usize); + + HandledProposalPart::Continue + } + _ => HandledProposalPart::Failed("Invalid proposal part".to_string()), + } +} + +async fn batcher_abort_proposal(batcher: &dyn BatcherClient, proposal_id: ProposalId) { + let input = SendProposalContentInput { proposal_id, content: SendProposalContent::Abort }; + batcher + .send_proposal_content(input) + .await + .unwrap_or_else(|e| panic!("Failed to send Abort to batcher: {proposal_id:?}. {e:?}")); +} diff --git a/crates/apollo_consensus_orchestrator/src/validate_proposal_test.rs b/crates/apollo_consensus_orchestrator/src/validate_proposal_test.rs new file mode 100644 index 00000000000..d05a88e3a88 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/validate_proposal_test.rs @@ -0,0 +1,396 @@ +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + ProposalCommitment, + ProposalId, + ProposalStatus, + SendProposalContent, + SendProposalContentInput, + SendProposalContentResponse, +}; +use apollo_batcher_types::communication::BatcherClientError; +use apollo_infra::component_client::ClientError; +use apollo_protobuf::consensus::{ProposalFin, ProposalPart, TransactionBatch}; +use assert_matches::assert_matches; +use futures::channel::mpsc; +use futures::SinkExt; +use num_rational::Ratio; +use rstest::rstest; +use starknet_api::block::{BlockHash, BlockNumber, GasPrice}; +use starknet_api::core::StateDiffCommitment; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::hash::PoseidonHash; +use starknet_types_core::felt::Felt; +use tokio_util::sync::CancellationToken; + +use crate::config::ContextConfig; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::sequencer_consensus_context::BuiltProposals; +use crate::test_utils::{ + block_info, + create_test_and_network_deps, + TestDeps, + CHANNEL_SIZE, + TIMEOUT, + TX_BATCH, +}; +use crate::utils::GasPriceParams; +use crate::validate_proposal::{ + validate_proposal, + within_margin, + BlockInfoValidation, + ProposalValidateArguments, + ValidateProposalError, +}; + +struct TestProposalValidateArguments { + pub deps: TestDeps, + pub block_info_validation: BlockInfoValidation, + pub proposal_id: ProposalId, + pub timeout: Duration, + pub batcher_timeout_margin: Duration, + pub valid_proposals: Arc>, + pub content_receiver: mpsc::Receiver, + pub gas_price_params: GasPriceParams, + pub cancel_token: CancellationToken, +} + +impl From for ProposalValidateArguments { + fn from(args: TestProposalValidateArguments) -> Self { + ProposalValidateArguments { + deps: args.deps.into(), + block_info_validation: args.block_info_validation, + proposal_id: args.proposal_id, + timeout: args.timeout, + batcher_timeout_margin: args.batcher_timeout_margin, + valid_proposals: args.valid_proposals, + content_receiver: args.content_receiver, + gas_price_params: args.gas_price_params, + cancel_token: args.cancel_token, + } + } +} + +fn create_proposal_validate_arguments() +-> (TestProposalValidateArguments, mpsc::Sender) { + let (mut deps, _) = create_test_and_network_deps(); + deps.setup_default_expectations(); + let block_info_validation = BlockInfoValidation { + height: BlockNumber(0), + block_timestamp_window_seconds: 60, + previous_block_info: None, + l1_da_mode: L1DataAvailabilityMode::Blob, + l2_gas_price_fri: VersionedConstants::latest_constants().min_gas_price, + }; + let proposal_id = ProposalId(1); + let timeout = TIMEOUT; + let batcher_timeout_margin = TIMEOUT; + let valid_proposals = Arc::new(Mutex::new(BuiltProposals::new())); + let (content_sender, content_receiver) = mpsc::channel(CHANNEL_SIZE); + let context_config = ContextConfig::default(); + let gas_price_params = GasPriceParams { + min_l1_gas_price_wei: GasPrice(context_config.min_l1_gas_price_wei), + max_l1_gas_price_wei: GasPrice(context_config.max_l1_gas_price_wei), + min_l1_data_gas_price_wei: GasPrice(context_config.min_l1_data_gas_price_wei), + max_l1_data_gas_price_wei: GasPrice(context_config.max_l1_data_gas_price_wei), + l1_data_gas_price_multiplier: Ratio::new( + context_config.l1_data_gas_price_multiplier_ppt, + 1000, + ), + l1_gas_tip_wei: GasPrice(context_config.l1_gas_tip_wei), + }; + let cancel_token = CancellationToken::new(); + + ( + TestProposalValidateArguments { + deps, + block_info_validation, + proposal_id, + timeout, + batcher_timeout_margin, + valid_proposals, + content_receiver, + gas_price_params, + cancel_token, + }, + content_sender, + ) +} + +#[tokio::test] +async fn validate_empty_proposal() { + let (proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Send an empty proposal. + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Ok(val) if val == BlockHash::default()); +} + +#[tokio::test] +async fn validate_proposal_success() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + let n_executed = 1; + // Setup deps to validate the block. + proposal_args.deps.setup_deps_for_validate(BlockNumber(0), n_executed); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + // Send transactions, then executed transaction count, and finally Fin part. + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.clone() })) + .await + .unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(n_executed.try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Ok(val) if val == BlockHash::default()); +} + +#[tokio::test] +async fn interrupt_proposal() { + let (proposal_args, _content_sender) = create_proposal_validate_arguments(); + // Interrupt the proposal. + proposal_args.cancel_token.cancel(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::ProposalInterrupted(_)))); +} + +#[tokio::test] +async fn validation_timeout() { + let (mut proposal_args, _content_sender) = create_proposal_validate_arguments(); + // Set a very short timeout to trigger a timeout error. + proposal_args.timeout = Duration::from_micros(1); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::ValidationTimeout(_)))); +} + +#[tokio::test] +async fn invalid_second_proposal_part() { + let (proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Send an invalid proposal part (not BlockInfo or Fin). + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::InvalidSecondProposalPart(_)))); +} + +#[tokio::test] +async fn invalid_block_info() { + let (proposal_args, mut content_sender) = create_proposal_validate_arguments(); + + let mut block_info = block_info(BlockNumber(0)); + block_info.l2_gas_price_fri = + GasPrice(proposal_args.block_info_validation.l2_gas_price_fri.0 + 1); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::InvalidBlockInfo(_, _, _)))); +} + +#[tokio::test] +async fn validate_block_fail() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to return an error when validating the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| { + Err(BatcherClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::Batcher(msg,_ )) + if msg.contains("Failed to initiate validate proposal")); +} + +#[tokio::test] +async fn send_executed_transaction_count_more_than_once() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher aborts the proposal. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Abort + }) + .returning(|_| Ok(SendProposalContentResponse { response: ProposalStatus::Aborted })); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + // Send executed transaction count more than once. + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::ProposalPartFailed(err,_)) + if err.contains("Received executed transaction count more than once")); +} + +#[tokio::test] +async fn receive_fin_without_executed_transaction_count() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher aborts the proposal. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Abort + }) + .returning(|_| Ok(SendProposalContentResponse { response: ProposalStatus::Aborted })); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + // Send Fin part without sending executed transaction count. + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::ProposalPartFailed(err,_)) + if err.contains("Received Fin without executed transaction count")); +} + +#[tokio::test] +async fn receive_txs_after_executed_transaction_count() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher aborts the proposal. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Abort + }) + .returning(|_| Ok(SendProposalContentResponse { response: ProposalStatus::Aborted })); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + // Send transactions after executed transaction count. + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.clone() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::ProposalPartFailed(err,_)) + if err.contains("Received transactions after executed transaction count")); +} + +#[tokio::test] +async fn proposal_fin_mismatch() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + let n_executed = 0; + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher returns a different block hash than the one received in Fin. + let built_block = StateDiffCommitment(PoseidonHash(Felt::ONE)); + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Finish(n_executed) + }) + .returning(move |_| { + Ok(SendProposalContentResponse { + response: ProposalStatus::Finished(ProposalCommitment { + state_diff_commitment: built_block, + }), + }) + }); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(n_executed.try_into().unwrap())) + .await + .unwrap(); + // Send Fin part. + let received_fin = BlockHash::default(); + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: received_fin })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::ProposalFinMismatch))); +} + +#[tokio::test] +async fn batcher_returns_invalid_proposal() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + let n_executed = 0; + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher returns an invalid proposal status. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Finish(n_executed) + }) + .returning(|_| { + Ok(SendProposalContentResponse { + response: ProposalStatus::InvalidProposal("test error".to_string()), + }) + }); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(n_executed.try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::InvalidProposal(_)))); +} + +#[rstest] +#[case::big_number_in_margin(1000, 1050, 10, true)] +#[case::big_number_out_of_margin(1000, 1150, 10, false)] +#[case::small_number_in_margin(9, 10, 10, true)] +#[case::small_number_out_of_margin(9, 11, 10, false)] +#[case::identical_numbers(12345, 12345, 1, true)] +fn test_within_margin( + #[case] a: u128, + #[case] b: u128, + #[case] margin: u128, + #[case] expected: bool, +) { + assert_eq!(within_margin(GasPrice(a), GasPrice(b), margin), expected); +} diff --git a/crates/apollo_dashboard/Cargo.toml b/crates/apollo_dashboard/Cargo.toml new file mode 100644 index 00000000000..1109989775a --- /dev/null +++ b/crates/apollo_dashboard/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "apollo_dashboard" +version.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true + + +[features] +testing = [] + +[lints] +workspace = true + +[dependencies] +apollo_batcher.workspace = true +apollo_compile_to_casm.workspace = true +apollo_consensus.workspace = true +apollo_consensus_manager.workspace = true +apollo_consensus_orchestrator.workspace = true +apollo_gateway.workspace = true +apollo_http_server.workspace = true +apollo_infra.workspace = true +apollo_infra_utils.workspace = true +apollo_l1_gas_price.workspace = true +apollo_l1_provider.workspace = true +apollo_mempool.workspace = true +apollo_mempool_p2p.workspace = true +apollo_metrics.workspace = true +apollo_state_sync_metrics.workspace = true +blockifier.workspace = true +indexmap = { workspace = true, features = ["serde"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["arbitrary_precision"] } + +[dev-dependencies] +apollo_batcher = { workspace = true, features = ["testing"] } +apollo_class_manager = { workspace = true, features = ["testing"] } +apollo_compile_to_casm = { workspace = true, features = ["testing"] } +apollo_consensus = { workspace = true, features = ["testing"] } +apollo_consensus_manager = { workspace = true, features = ["testing"] } +apollo_consensus_orchestrator = { workspace = true, features = ["testing"] } +apollo_gateway = { workspace = true, features = ["testing"] } +apollo_http_server = { workspace = true, features = ["testing"] } +apollo_infra = { workspace = true, features = ["testing"] } +apollo_infra_utils = { workspace = true, features = ["testing"] } +apollo_l1_gas_price = { workspace = true, features = ["testing"] } +apollo_l1_provider = { workspace = true, features = ["testing"] } +apollo_mempool = { workspace = true, features = ["testing"] } +apollo_mempool_p2p = { workspace = true, features = ["testing"] } +apollo_state_sync_metrics = { workspace = true, features = ["testing"] } diff --git a/crates/apollo_dashboard/resources/dev_grafana.json b/crates/apollo_dashboard/resources/dev_grafana.json new file mode 100644 index 00000000000..31e35d7c29b --- /dev/null +++ b/crates/apollo_dashboard/resources/dev_grafana.json @@ -0,0 +1,1559 @@ +{ + "Sequencer Node Dashboard": { + "Batcher": [ + { + "title": "batcher_proposal_started", + "description": "Counter of proposals started", + "type": "stat", + "exprs": [ + "batcher_proposal_started{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_proposal_succeeded", + "description": "Counter of successful proposals", + "type": "stat", + "exprs": [ + "batcher_proposal_succeeded{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_proposal_failed", + "description": "Counter of failed proposals", + "type": "stat", + "exprs": [ + "batcher_proposal_failed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_batched_transactions", + "description": "Counter of batched transactions across all forks", + "type": "stat", + "exprs": [ + "batcher_batched_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_last_batched_block", + "description": "The last block received by batching", + "type": "stat", + "exprs": [ + "batcher_last_batched_block{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "rejection_ratio", + "description": "Ratio of rejected transactions out of all processed, over the last 5 minutes", + "type": "timeseries", + "exprs": [ + "100 * (increase(batcher_rejected_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) / (increase(batcher_rejected_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) + increase(batcher_batched_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])))" + ], + "extra_params": {} + } + ], + "Consensus": [ + { + "title": "consensus_block_number", + "description": "The block number consensus is working to decide", + "type": "timeseries", + "exprs": [ + "consensus_block_number{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_round", + "description": "The round of the state machine", + "type": "timeseries", + "exprs": [ + "consensus_round{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "Average consensus round", + "description": "Average consensus round (10m)", + "type": "timeseries", + "exprs": [ + "avg_over_time(consensus_round{cluster=~\"$cluster\", namespace=~\"$namespace\"}[10m])" + ], + "extra_params": {} + }, + { + "title": "consensus_round_above_zero", + "description": "The number of times the consensus round has increased above zero", + "type": "timeseries", + "exprs": [ + "consensus_round_above_zero{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_max_cached_block_number", + "description": "How many blocks after current are cached", + "type": "timeseries", + "exprs": [ + "consensus_max_cached_block_number{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_cached_votes", + "description": "How many votes are cached when starting to work on a new block number", + "type": "timeseries", + "exprs": [ + "consensus_cached_votes{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_decisions_reached_by_consensus", + "description": "The total number of decisions reached by way of consensus", + "type": "timeseries", + "exprs": [ + "consensus_decisions_reached_by_consensus{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_decisions_reached_by_sync", + "description": "The total number of decisions reached by way of sync", + "type": "timeseries", + "exprs": [ + "consensus_decisions_reached_by_sync{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_proposals_received", + "description": "The total number of proposals received", + "type": "timeseries", + "exprs": [ + "consensus_proposals_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_proposals_valid_init", + "description": "The total number of proposals received with a valid init", + "type": "timeseries", + "exprs": [ + "consensus_proposals_valid_init{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_proposals_validated", + "description": "The total number of complete, valid proposals received", + "type": "timeseries", + "exprs": [ + "consensus_proposals_validated{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_proposals_invalid", + "description": "The total number of proposals that failed validation", + "type": "timeseries", + "exprs": [ + "consensus_proposals_invalid{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_build_proposal_total", + "description": "The total number of proposals built", + "type": "timeseries", + "exprs": [ + "consensus_build_proposal_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_build_proposal_failed", + "description": "The number of proposals that failed to be built", + "type": "timeseries", + "exprs": [ + "consensus_build_proposal_failed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_reproposals", + "description": "The number of reproposals sent", + "type": "timeseries", + "exprs": [ + "consensus_reproposals{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_new_value_locks", + "description": "The number of times consensus has attained a lock on a new value", + "type": "timeseries", + "exprs": [ + "consensus_new_value_locks{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_held_locks", + "description": "The number of times consensus progressed to a new round while holding a lock", + "type": "timeseries", + "exprs": [ + "consensus_held_locks{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_timeouts", + "description": "The number of times consensus has timed out", + "type": "timeseries", + "exprs": [ + "sum by (timeout_reason) (consensus_timeouts{cluster=~\"$cluster\", namespace=~\"$namespace\"})" + ], + "extra_params": {} + }, + { + "title": "consensus_num_batches_in_proposal", + "description": "The number of transaction batches in a valid proposal received", + "type": "timeseries", + "exprs": [ + "consensus_num_batches_in_proposal{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_num_txs_in_proposal", + "description": "The total number of individual transactions in a valid proposal received", + "type": "timeseries", + "exprs": [ + "consensus_num_txs_in_proposal{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_inbound_stream_started", + "description": "The total number of inbound streams started", + "type": "timeseries", + "exprs": [ + "consensus_inbound_stream_started{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_inbound_stream_evicted", + "description": "The total number of inbound streams evicted due to cache capacity", + "type": "timeseries", + "exprs": [ + "consensus_inbound_stream_evicted{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_inbound_stream_finished", + "description": "The total number of inbound streams finished", + "type": "timeseries", + "exprs": [ + "consensus_inbound_stream_finished{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_outbound_stream_started", + "description": "The total number of outbound streams started", + "type": "timeseries", + "exprs": [ + "consensus_outbound_stream_started{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_outbound_stream_finished", + "description": "The total number of outbound streams finished", + "type": "timeseries", + "exprs": [ + "consensus_outbound_stream_finished{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_l2_gas_price", + "description": "The L2 gas price calculated in an accepted proposal", + "type": "timeseries", + "exprs": [ + "consensus_l2_gas_price{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "cende_last_prepared_blob_block_number", + "description": "The blob block number that cende knows. That means the sequencer can be the proposer only if the current height is greater by one than this value.", + "type": "timeseries", + "exprs": [ + "cende_last_prepared_blob_block_number{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "cende_prepare_blob_for_next_height_latency", + "description": "The time it takes to prepare the blob for the next height, i.e create the blob object.", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(cende_prepare_blob_for_next_height_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(cende_prepare_blob_for_next_height_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + }, + { + "title": "cende_write_prev_height_blob_latency", + "description": "Be careful with this metric, if the blob was already written by another request, the latency is much lower since writing to Aerospike is not needed.", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(cende_write_prev_height_blob_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(cende_write_prev_height_blob_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + }, + { + "title": "cende_write_blob_success", + "description": "The number of successful blob writes to Aerospike", + "type": "timeseries", + "exprs": [ + "cende_write_blob_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "cende_write_blob_failure", + "description": "The number of failed blob writes to Aerospike", + "type": "timeseries", + "exprs": [ + "sum by (cende_write_failure_reason) (cende_write_blob_failure{cluster=~\"$cluster\", namespace=~\"$namespace\"})" + ], + "extra_params": {} + }, + { + "title": "consensus_l1_data_gas_mismatch", + "description": "The number of times the L1 data gas in a proposal does not match the value expected by this validator", + "type": "timeseries", + "exprs": [ + "consensus_l1_data_gas_mismatch{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_l1_gas_mismatch", + "description": "The number of times the L1 gas in a proposal does not match the value expected by this validator", + "type": "timeseries", + "exprs": [ + "consensus_l1_gas_mismatch{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + } + ], + "Http Server": [ + { + "title": "http_server_added_transactions_total", + "description": "Total number of transactions added", + "type": "timeseries", + "exprs": [ + "http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "http_server_transactions_received_rate (TPS)", + "description": "The rate of transactions received by the HTTP Server during the last 20 minutes", + "type": "timeseries", + "exprs": [ + "sum(rate(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])) or vector(0)" + ], + "extra_params": {} + }, + { + "title": "http_server_added_transactions_success", + "description": "Number of successfully added transactions", + "type": "timeseries", + "exprs": [ + "http_server_added_transactions_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "http_server_added_transactions_failure", + "description": "Number of faulty added transactions", + "type": "timeseries", + "exprs": [ + "http_server_added_transactions_failure{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "http_server_added_transactions_internal_error", + "description": "Number of faulty added transactions failing on internal error", + "type": "timeseries", + "exprs": [ + "http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "http_server_added_transactions_deprecated_error", + "description": "Number of faulty added transactions failing on deprecated error", + "type": "timeseries", + "exprs": [ + "http_server_added_transactions_deprecated_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "http_server_add_tx_latency", + "description": "Latency of HTTP add_tx endpoint in secs", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(http_server_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(http_server_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "State Sync": [ + { + "title": "apollo_state_sync_processed_transactions", + "description": "The number of transactions processed by the state sync component", + "type": "stat", + "exprs": [ + "apollo_state_sync_processed_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_state_sync_reverted_transactions", + "description": "The number of transactions reverted by the state sync component", + "type": "stat", + "exprs": [ + "apollo_state_sync_reverted_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_central_sync_central_block_marker", + "description": "The first block number that doesn't exist yet", + "type": "stat", + "exprs": [ + "apollo_central_sync_central_block_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_state_sync_body_marker", + "description": "The first block number for which the state sync component does not have a body", + "type": "stat", + "exprs": [ + "apollo_state_sync_body_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_state_sync_class_manager_marker", + "description": "The first block number for which the state sync component does not guarantee all of the corresponding classes are stored in the class manager component", + "type": "stat", + "exprs": [ + "apollo_state_sync_class_manager_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_state_sync_header_marker", + "description": "The first block number for which the state sync component does not have a header", + "type": "stat", + "exprs": [ + "apollo_state_sync_header_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_state_sync_state_marker", + "description": "The first block number for which the state sync component does not have a state body", + "type": "stat", + "exprs": [ + "apollo_state_sync_state_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + } + ], + "MempoolP2p": [ + { + "title": "apollo_mempool_p2p_num_connected_peers", + "description": "The number of connected peers to the mempool p2p component", + "type": "stat", + "exprs": [ + "apollo_mempool_p2p_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_mempool_p2p_num_sent_messages", + "description": "The number of messages sent by the mempool p2p component", + "type": "stat", + "exprs": [ + "apollo_mempool_p2p_num_sent_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_mempool_p2p_num_received_messages", + "description": "The number of messages received by the mempool p2p component", + "type": "stat", + "exprs": [ + "apollo_mempool_p2p_num_received_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_mempool_p2p_broadcasted_transaction_batch_size", + "description": "The number of transactions in batches broadcast by the mempool p2p component", + "type": "stat", + "exprs": [ + "histogram_quantile(0.50, sum(rate(apollo_mempool_p2p_broadcasted_transaction_batch_size_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(apollo_mempool_p2p_broadcasted_transaction_batch_size_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "ConsensusP2p": [ + { + "title": "apollo_consensus_num_connected_peers", + "description": "The number of connected peers to the consensus p2p component", + "type": "timeseries", + "exprs": [ + "apollo_consensus_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_consensus_votes_num_sent_messages", + "description": "The number of messages sent by the consensus p2p component over the Votes topic", + "type": "timeseries", + "exprs": [ + "apollo_consensus_votes_num_sent_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_consensus_votes_num_received_messages", + "description": "The number of messages received by the consensus p2p component over the Votes topic", + "type": "timeseries", + "exprs": [ + "apollo_consensus_votes_num_received_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_consensus_proposals_num_sent_messages", + "description": "The number of messages sent by the consensus p2p component over the Proposals topic", + "type": "timeseries", + "exprs": [ + "apollo_consensus_proposals_num_sent_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_consensus_proposals_num_received_messages", + "description": "The number of messages received by the consensus p2p component over the Proposals topic", + "type": "timeseries", + "exprs": [ + "apollo_consensus_proposals_num_received_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "consensus_conflicting_votes", + "description": "The number of times consensus has received conflicting votes", + "type": "timeseries", + "exprs": [ + "consensus_conflicting_votes{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + } + ], + "StateSyncP2p": [ + { + "title": "apollo_p2p_sync_num_connected_peers", + "description": "The number of connected peers to the p2p sync component", + "type": "stat", + "exprs": [ + "apollo_p2p_sync_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_p2p_sync_num_active_inbound_sessions", + "description": "The number of inbound sessions to the p2p sync component", + "type": "stat", + "exprs": [ + "apollo_p2p_sync_num_active_inbound_sessions{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "apollo_p2p_sync_num_active_outbound_sessions", + "description": "The number of outbound sessions to the p2p sync component", + "type": "stat", + "exprs": [ + "apollo_p2p_sync_num_active_outbound_sessions{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + } + ], + "Gateway": [ + { + "title": "gateway_transactions_received", + "description": "Counter of transactions received", + "type": "stat", + "exprs": [ + "sum by (tx_type) (gateway_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}) " + ], + "extra_params": {} + }, + { + "title": "gateway_transactions_received", + "description": "Counter of transactions received", + "type": "stat", + "exprs": [ + "sum by (source) (gateway_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}) " + ], + "extra_params": {} + }, + { + "title": "gateway_transactions_received_rate (TPS)", + "description": "The rate of transactions received by the gateway during the last 20 minutes", + "type": "timeseries", + "exprs": [ + "sum(rate(gateway_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])) or vector(0)" + ], + "extra_params": {} + }, + { + "title": "gateway_add_tx_latency", + "description": "Latency of gateway add_tx function in secs", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(gateway_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(gateway_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + }, + { + "title": "gateway_validate_tx_latency", + "description": "Latency of gateway validate function in secs", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(gateway_validate_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(gateway_validate_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + }, + { + "title": "gateway_transactions_failed", + "description": "Counter of failed transactions", + "type": "stat", + "exprs": [ + "sum by (tx_type) (gateway_transactions_failed{cluster=~\"$cluster\", namespace=~\"$namespace\"})" + ], + "extra_params": {} + }, + { + "title": "gateway_transactions_sent_to_mempool", + "description": "Counter of transactions sent to the mempool", + "type": "stat", + "exprs": [ + "sum by (tx_type) (gateway_transactions_sent_to_mempool{cluster=~\"$cluster\", namespace=~\"$namespace\"})" + ], + "extra_params": {} + } + ], + "Mempool": [ + { + "title": "mempool_transactions_received", + "description": "Counter of transactions received by the mempool", + "type": "stat", + "exprs": [ + "sum by (tx_type) (mempool_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"})" + ], + "extra_params": {} + }, + { + "title": "mempool_transactions_received_rate (TPS)", + "description": "The rate of transactions received by the mempool during the last 20 minutes", + "type": "timeseries", + "exprs": [ + "sum(rate(mempool_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])) or vector(0)" + ], + "extra_params": {} + }, + { + "title": "mempool_transactions_dropped", + "description": "Counter of transactions dropped from the mempool", + "type": "stat", + "exprs": [ + "sum by (drop_reason) (mempool_transactions_dropped{cluster=~\"$cluster\", namespace=~\"$namespace\"})" + ], + "extra_params": {} + }, + { + "title": "mempool_txs_committed", + "description": "The number of transactions that were committed to block", + "type": "stat", + "exprs": [ + "mempool_txs_committed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_pool_size", + "description": "The average size of the pool", + "type": "timeseries", + "exprs": [ + "avg_over_time(mempool_pool_size{cluster=~\"$cluster\", namespace=~\"$namespace\"}[2m])" + ], + "extra_params": {} + }, + { + "title": "mempool_priority_queue_size", + "description": "The average size of the priority queue", + "type": "timeseries", + "exprs": [ + "avg_over_time(mempool_priority_queue_size{cluster=~\"$cluster\", namespace=~\"$namespace\"}[2m])" + ], + "extra_params": {} + }, + { + "title": "mempool_pending_queue_size", + "description": "The average size of the pending queue", + "type": "timeseries", + "exprs": [ + "avg_over_time(mempool_pending_queue_size{cluster=~\"$cluster\", namespace=~\"$namespace\"}[2m])" + ], + "extra_params": {} + }, + { + "title": "mempool_total_size_bytes", + "description": "The average total transaction size in bytes over time in the mempool", + "type": "timeseries", + "exprs": [ + "avg_over_time(mempool_total_size_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}[2m])" + ], + "extra_params": {} + }, + { + "title": "mempool_get_txs_size", + "description": "The average size of the get_txs", + "type": "timeseries", + "exprs": [ + "avg_over_time(mempool_get_txs_size{cluster=~\"$cluster\", namespace=~\"$namespace\"}[2m])" + ], + "extra_params": {} + }, + { + "title": "mempool_delayed_declare_size", + "description": "The average number of delayed declare transactions", + "type": "timeseries", + "exprs": [ + "avg_over_time(mempool_delayed_declare_size{cluster=~\"$cluster\", namespace=~\"$namespace\"}[2m])" + ], + "extra_params": {} + }, + { + "title": "mempool_transaction_time_spent", + "description": "The time (secs) that a transaction spent in the mempool", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(mempool_transaction_time_spent_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(mempool_transaction_time_spent_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "Blockifier": [ + { + "title": "class_cache_miss_ratio", + "description": "The ratio of cache misses when requesting compiled classes from the Blockifier State Reader", + "type": "timeseries", + "exprs": [ + "100 * (increase(class_cache_misses{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) / (increase(class_cache_misses{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) + increase(class_cache_hits{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])))" + ], + "extra_params": {} + }, + { + "title": "native_class_returned_ratio", + "description": "The ratio of Native classes returned by the Blockifier", + "type": "timeseries", + "exprs": [ + "100 * (increase(native_class_returned{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) / (increase(class_cache_hits{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) + increase(class_cache_misses{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])))" + ], + "extra_params": {} + }, + { + "title": "native_compilation_error", + "description": "Counter of Native compilation failures in the blockifier", + "type": "stat", + "exprs": [ + "native_compilation_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "native_execution_ratio", + "description": "The ratio of calls running Cairo Native in the Blockifier", + "type": "timeseries", + "exprs": [ + "100 * (increase(calls_running_native{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) / (increase(number_of_total_calls{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])))" + ], + "extra_params": {} + } + ], + "Batcher Infra": [ + { + "title": "batcher_local_msgs_received", + "description": "Counter of messages received by batcher local server", + "type": "timeseries", + "exprs": [ + "batcher_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_local_msgs_processed", + "description": "Counter of messages processed by batcher local server", + "type": "timeseries", + "exprs": [ + "batcher_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_local_queue_depth", + "description": "The depth of the batcher's local message queue", + "type": "timeseries", + "exprs": [ + "batcher_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_remote_msgs_received", + "description": "Counter of messages received by batcher remote server", + "type": "timeseries", + "exprs": [ + "batcher_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_remote_valid_msgs_received", + "description": "Counter of valid messages received by batcher remote server", + "type": "timeseries", + "exprs": [ + "batcher_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_remote_msgs_processed", + "description": "Counter of messages processed by batcher remote server", + "type": "timeseries", + "exprs": [ + "batcher_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "batcher_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a batcher remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(batcher_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(batcher_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "Gateway Infra": [ + { + "title": "gateway_local_msgs_received", + "description": "Counter of messages received by gateway local server", + "type": "timeseries", + "exprs": [ + "gateway_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "gateway_local_msgs_processed", + "description": "Counter of messages processed by gateway local server", + "type": "timeseries", + "exprs": [ + "gateway_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "gateway_local_queue_depth", + "description": "The depth of the gateway's local message queue", + "type": "timeseries", + "exprs": [ + "gateway_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "gateway_remote_msgs_received", + "description": "Counter of messages received by gateway remote server", + "type": "timeseries", + "exprs": [ + "gateway_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "gateway_remote_valid_msgs_received", + "description": "Counter of valid messages received by gateway remote server", + "type": "timeseries", + "exprs": [ + "gateway_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "gateway_remote_msgs_processed", + "description": "Counter of messages processed by gateway remote server", + "type": "timeseries", + "exprs": [ + "gateway_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "gateway_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a gateway remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(gateway_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(gateway_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "Class Manager Infra": [ + { + "title": "class_manager_local_msgs_received", + "description": "Counter of messages received by class manager local server", + "type": "timeseries", + "exprs": [ + "class_manager_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "class_manager_local_msgs_processed", + "description": "Counter of messages processed by class manager local server", + "type": "timeseries", + "exprs": [ + "class_manager_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "class_manager_local_queue_depth", + "description": "The depth of the class manager's local message queue", + "type": "timeseries", + "exprs": [ + "class_manager_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "class_manager_remote_msgs_received", + "description": "Counter of messages received by class manager remote server", + "type": "timeseries", + "exprs": [ + "class_manager_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "class_manager_remote_valid_msgs_received", + "description": "Counter of valid messages received by class manager remote server", + "type": "timeseries", + "exprs": [ + "class_manager_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "class_manager_remote_msgs_processed", + "description": "Counter of messages processed by class manager remote server", + "type": "timeseries", + "exprs": [ + "class_manager_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "class_manager_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a class manager remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(class_manager_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(class_manager_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "L1 Provider Infra": [ + { + "title": "l1_provider_local_msgs_received", + "description": "Counter of messages received by L1 provider local server", + "type": "timeseries", + "exprs": [ + "l1_provider_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_provider_local_msgs_processed", + "description": "Counter of messages processed by L1 provider local server", + "type": "timeseries", + "exprs": [ + "l1_provider_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_provider_local_queue_depth", + "description": "The depth of the L1 provider's local message queue", + "type": "timeseries", + "exprs": [ + "l1_provider_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_provider_remote_msgs_received", + "description": "Counter of messages received by L1 provider remote server", + "type": "timeseries", + "exprs": [ + "l1_provider_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_provider_remote_valid_msgs_received", + "description": "Counter of valid messages received by L1 provider remote server", + "type": "timeseries", + "exprs": [ + "l1_provider_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_provider_remote_msgs_processed", + "description": "Counter of messages processed by L1 provider remote server", + "type": "timeseries", + "exprs": [ + "l1_provider_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_provider_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a L1 provider remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(l1_provider_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(l1_provider_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "L1 Provider": [ + { + "title": "l1_message_scraper_success_count", + "description": "Number of times the L1 message scraper successfully scraped messages and updated the provider", + "type": "timeseries", + "exprs": [ + "l1_message_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_message_scraper_baselayer_error_count", + "description": "Number of times the L1 message scraper encountered an error while scraping the base layer", + "type": "timeseries", + "exprs": [ + "l1_message_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_message_scraper_reorg_detected", + "description": "Number of times the L1 message scraper detected a reorganization in the base layer", + "type": "timeseries", + "exprs": [ + "l1_message_scraper_reorg_detected{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + } + ], + "L1 Gas Price Infra": [ + { + "title": "l1_gas_price_provider_local_msgs_received", + "description": "Counter of messages received by L1 gas price provider local server", + "type": "timeseries", + "exprs": [ + "l1_gas_price_provider_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_local_msgs_processed", + "description": "Counter of messages processed by L1 gas price provider local server", + "type": "timeseries", + "exprs": [ + "l1_gas_price_provider_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_local_queue_depth", + "description": "The depth of the L1 gas price provider's local message queue", + "type": "timeseries", + "exprs": [ + "l1_gas_price_provider_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_remote_msgs_received", + "description": "Counter of messages received by L1 gas price provider remote server", + "type": "timeseries", + "exprs": [ + "l1_gas_price_provider_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_remote_valid_msgs_received", + "description": "Counter of valid messages received by L1 gas price provider remote server", + "type": "timeseries", + "exprs": [ + "l1_gas_price_provider_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_remote_msgs_processed", + "description": "Counter of messages processed by L1 gas price provider remote server", + "type": "timeseries", + "exprs": [ + "l1_gas_price_provider_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a L1 gas price provider remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(l1_gas_price_provider_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(l1_gas_price_provider_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "L1 Gas Price": [ + { + "title": "eth_to_strk_error_count", + "description": "Number of times the query to the Eth to Strk oracle failed due to an error or timeout", + "type": "stat", + "exprs": [ + "eth_to_strk_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "eth_to_strk_success_count", + "description": "Number of times the query to the Eth to Strk oracle succeeded", + "type": "stat", + "exprs": [ + "eth_to_strk_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "eth_to_strk_rate", + "description": "The current rate of ETH to STRK conversion", + "type": "timeseries", + "exprs": [ + "eth_to_strk_rate{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_provider_insufficient_history", + "description": "Number of times the L1 gas price provider calculated an average with too few blocks", + "type": "stat", + "exprs": [ + "l1_gas_price_provider_insufficient_history{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_scraper_success_count", + "description": "Number of times the L1 gas price scraper successfully scraped and updated gas prices", + "type": "stat", + "exprs": [ + "l1_gas_price_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_scraper_baselayer_error_count", + "description": "Number of times the L1 gas price scraper encountered an error while scraping the base layer", + "type": "stat", + "exprs": [ + "l1_gas_price_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_scraper_reorg_detected", + "description": "Number of times the L1 gas price scraper detected a reorganization in the base layer", + "type": "stat", + "exprs": [ + "l1_gas_price_scraper_reorg_detected{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_scraper_latest_scraped_block", + "description": "The latest block number that the L1 gas price scraper has scraped", + "type": "timeseries", + "exprs": [ + "l1_gas_price_scraper_latest_scraped_block{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "eth_to_strk_rate", + "description": "The current rate of ETH to STRK conversion", + "type": "timeseries", + "exprs": [ + "eth_to_strk_rate{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_gas_price_latest_mean_value", + "description": "The latest L1 gas price, calculated as an average by the provider client", + "type": "timeseries", + "exprs": [ + "l1_gas_price_latest_mean_value{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "l1_data_gas_price_latest_mean_value", + "description": "The latest L1 data gas price, calculated as an average by the provider client", + "type": "timeseries", + "exprs": [ + "l1_data_gas_price_latest_mean_value{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + } + ], + "Mempool Infra": [ + { + "title": "mempool_local_msgs_received", + "description": "Counter of messages received by mempool local server", + "type": "timeseries", + "exprs": [ + "mempool_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_local_msgs_processed", + "description": "Counter of messages processed by mempool local server", + "type": "timeseries", + "exprs": [ + "mempool_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_local_queue_depth", + "description": "The depth of the mempool's local message queue", + "type": "timeseries", + "exprs": [ + "mempool_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_remote_msgs_received", + "description": "Counter of messages received by mempool remote server", + "type": "timeseries", + "exprs": [ + "mempool_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_remote_valid_msgs_received", + "description": "Counter of valid messages received by mempool remote server", + "type": "timeseries", + "exprs": [ + "mempool_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_remote_msgs_processed", + "description": "Counter of messages processed by mempool remote server", + "type": "timeseries", + "exprs": [ + "mempool_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a mempool remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(mempool_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(mempool_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "MempoolP2pInfra": [ + { + "title": "mempool_p2p_propagator_local_msgs_received", + "description": "Counter of messages received by mempool p2p local server", + "type": "timeseries", + "exprs": [ + "mempool_p2p_propagator_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_p2p_propagator_local_msgs_processed", + "description": "Counter of messages processed by mempool p2p local server", + "type": "timeseries", + "exprs": [ + "mempool_p2p_propagator_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_p2p_propagator_local_queue_depth", + "description": "The depth of the mempool p2p's local message queue", + "type": "timeseries", + "exprs": [ + "mempool_p2p_propagator_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_p2p_propagator_remote_msgs_received", + "description": "Counter of messages received by mempool p2p remote server", + "type": "timeseries", + "exprs": [ + "mempool_p2p_propagator_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_p2p_propagator_remote_valid_msgs_received", + "description": "Counter of valid messages received by mempool p2p remote server", + "type": "timeseries", + "exprs": [ + "mempool_p2p_propagator_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_p2p_propagator_remote_msgs_processed", + "description": "Counter of messages processed by mempool p2p remote server", + "type": "timeseries", + "exprs": [ + "mempool_p2p_propagator_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "mempool_p2p_propagator_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a mempool p2p remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(mempool_p2p_propagator_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(mempool_p2p_propagator_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "SierraCompilerInfra": [ + { + "title": "sierra_compiler_local_msgs_received", + "description": "Counter of messages received by sierra compiler local server", + "type": "timeseries", + "exprs": [ + "sierra_compiler_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "sierra_compiler_local_msgs_processed", + "description": "Counter of messages processed by sierra compiler local server", + "type": "timeseries", + "exprs": [ + "sierra_compiler_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "sierra_compiler_local_queue_depth", + "description": "The depth of the sierra compiler's local message queue", + "type": "timeseries", + "exprs": [ + "sierra_compiler_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "sierra_compiler_remote_msgs_received", + "description": "Counter of messages received by sierra compiler remote server", + "type": "timeseries", + "exprs": [ + "sierra_compiler_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "sierra_compiler_remote_valid_msgs_received", + "description": "Counter of valid messages received by sierra compiler remote server", + "type": "timeseries", + "exprs": [ + "sierra_compiler_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "sierra_compiler_remote_msgs_processed", + "description": "Counter of messages processed by sierra compiler remote server", + "type": "timeseries", + "exprs": [ + "sierra_compiler_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "sierra_compiler_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a sierra compiler remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(sierra_compiler_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(sierra_compiler_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "Compile sierra to casm": [ + { + "title": "compile_to_casm_compilation_duration", + "description": "Server-side compilation to casm duration in seconds", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(compile_to_casm_compilation_duration_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(compile_to_casm_compilation_duration_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ], + "StateSyncInfra": [ + { + "title": "state_sync_local_msgs_received", + "description": "Counter of messages received by state sync local server", + "type": "timeseries", + "exprs": [ + "state_sync_local_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "state_sync_local_msgs_processed", + "description": "Counter of messages processed by state sync local server", + "type": "timeseries", + "exprs": [ + "state_sync_local_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "state_sync_local_queue_depth", + "description": "The depth of the state sync's local message queue", + "type": "timeseries", + "exprs": [ + "state_sync_local_queue_depth{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "state_sync_remote_msgs_received", + "description": "Counter of messages received by state sync remote server", + "type": "timeseries", + "exprs": [ + "state_sync_remote_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "state_sync_remote_valid_msgs_received", + "description": "Counter of valid messages received by state sync remote server", + "type": "timeseries", + "exprs": [ + "state_sync_remote_valid_msgs_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "state_sync_remote_msgs_processed", + "description": "Counter of messages processed by state sync remote server", + "type": "timeseries", + "exprs": [ + "state_sync_remote_msgs_processed{cluster=~\"$cluster\", namespace=~\"$namespace\"}" + ], + "extra_params": {} + }, + { + "title": "state_sync_remote_client_send_attempts", + "description": "Required number of remote connection attempts made by a state sync remote client", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(state_sync_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(state_sync_remote_client_send_attempts_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} + } + ] + } +} diff --git a/crates/apollo_dashboard/resources/dev_grafana_alerts.json b/crates/apollo_dashboard/resources/dev_grafana_alerts.json new file mode 100644 index 00000000000..cec9320b51a --- /dev/null +++ b/crates/apollo_dashboard/resources/dev_grafana_alerts.json @@ -0,0 +1,1300 @@ +{ + "alerts": [ + { + "name": "batched_transactions_stuck", + "title": "Batched transactions stuck", + "ruleGroup": "batcher", + "expr": "changes(batcher_batched_transactions{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "cende_write_blob_failure", + "title": "Cende write blob failure", + "ruleGroup": "consensus", + "expr": "increase(cende_write_blob_failure{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "cende_write_blob_failure_once", + "title": "Cende write blob failure once", + "ruleGroup": "consensus", + "expr": "increase(cende_write_blob_failure{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "cende_write_prev_height_blob_latency_too_high", + "title": "Cende write prev height blob latency too high", + "ruleGroup": "consensus", + "expr": "rate(cende_write_prev_height_blob_latency_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) / clamp_min(rate(cende_write_prev_height_blob_latency_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]), 0.0000001)", + "conditions": [ + { + "evaluator": { + "params": [ + 1.5 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "consensus_block_number_stuck", + "title": "Consensus block number stuck", + "ruleGroup": "consensus", + "expr": "sum(increase(consensus_block_number{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "consensus_build_proposal_failed", + "title": "Consensus build proposal failed", + "ruleGroup": "consensus", + "expr": "increase(consensus_build_proposal_failed{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "consensus_build_proposal_failed_once", + "title": "Consensus build proposal failed once", + "ruleGroup": "consensus", + "expr": "increase(consensus_build_proposal_failed{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "consensus_conflicting_votes", + "title": "Consensus conflicting votes", + "ruleGroup": "consensus", + "expr": "increase(consensus_conflicting_votes{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "consensus_decisions_reached_by_consensus_ratio", + "title": "Consensus decisions reached by consensus ratio", + "ruleGroup": "consensus", + "expr": "increase(consensus_decisions_reached_by_consensus{cluster=~\"$cluster\", namespace=~\"$namespace\"}[10m]) / clamp_min(increase(consensus_decisions_reached_by_sync{cluster=~\"$cluster\", namespace=~\"$namespace\"}[10m]) + increase(consensus_decisions_reached_by_consensus{cluster=~\"$cluster\", namespace=~\"$namespace\"}[10m]), 1)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.5 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "consensus_inbound_stream_evicted", + "title": "Consensus inbound stream evicted", + "ruleGroup": "consensus", + "expr": "increase(consensus_inbound_stream_evicted{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 5.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "consensus_l1_gas_price_provider_failure", + "title": "Consensus L1 gas price provider failure", + "ruleGroup": "consensus", + "expr": "increase(consensus_l1_gas_price_provider_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 5.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "consensus_l1_gas_price_provider_failure_once", + "title": "Consensus L1 gas price provider failure once", + "ruleGroup": "consensus", + "expr": "increase(consensus_l1_gas_price_provider_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "consensus_p2p_disconnections", + "title": "Consensus p2p disconnections", + "ruleGroup": "consensus", + "expr": "changes(apollo_consensus_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) / 2", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "consensus_p2p_not_enough_peers_for_quorum", + "title": "Consensus p2p not enough peers for quorum", + "ruleGroup": "consensus", + "expr": "max_over_time(apollo_consensus_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "consensus_p2p_peer_down", + "title": "Consensus p2p peer down", + "ruleGroup": "consensus", + "expr": "max_over_time(apollo_consensus_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 2.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "consensus_round_above_zero", + "title": "Consensus round above zero", + "ruleGroup": "consensus", + "expr": "max_over_time(consensus_round{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "consensus_round_above_zero_ratio", + "title": "Consensus round above zero ratio", + "ruleGroup": "consensus", + "expr": "increase(consensus_round_above_zero{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) / clamp_min(increase(consensus_block_number{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]), 1)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.05 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 10, + "severity": "p3" + }, + { + "name": "consensus_round_high", + "title": "Consensus round high", + "ruleGroup": "consensus", + "expr": "max_over_time(consensus_round{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", + "conditions": [ + { + "evaluator": { + "params": [ + 20.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "consensus_validate_proposal_failed", + "title": "Consensus validate proposal failed", + "ruleGroup": "consensus", + "expr": "increase(consensus_proposals_invalid{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "consensus_votes_num_sent_messages", + "title": "Consensus votes num sent messages", + "ruleGroup": "consensus", + "expr": "increase(apollo_consensus_votes_num_sent_messages{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])", + "conditions": [ + { + "evaluator": { + "params": [ + 20.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "eth_to_strk_error_count", + "title": "Eth to Strk error count", + "ruleGroup": "l1_gas_price", + "expr": "increase(eth_to_strk_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "1m", + "intervalSec": 20, + "severity": "p5" + }, + { + "name": "eth_to_strk_success_count", + "title": "Eth to Strk success count", + "ruleGroup": "l1_gas_price", + "expr": "increase(eth_to_strk_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "gateway_add_tx_idle", + "title": "Gateway add_tx idle", + "ruleGroup": "gateway", + "expr": "sum(increase(gateway_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "http_server_add_tx_idle", + "title": "HTTP Server add_tx idle", + "ruleGroup": "http_server", + "expr": "sum(increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "http_server_avg_add_tx_latency", + "title": "High HTTP server average add_tx latency", + "ruleGroup": "http_server", + "expr": "rate(http_server_add_tx_latency_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) / rate(http_server_add_tx_latency_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", + "conditions": [ + { + "evaluator": { + "params": [ + 2.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "http_server_high_transaction_failure_ratio", + "title": "http server high transaction failure ratio", + "ruleGroup": "http_server", + "expr": "(increase(http_server_added_transactions_failure{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) - increase(http_server_added_transactions_deprecated_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])) / clamp_min(increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]), 1)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.5 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "http_server_internal_error_ratio", + "title": "http server internal error ratio", + "ruleGroup": "http_server", + "expr": "increase(http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) / clamp_min(increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]), 1)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.2 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "http_server_internal_error_once", + "title": "http server internal error once", + "ruleGroup": "http_server", + "expr": "increase(http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "http_server_low_successful_transaction_rate", + "title": "http server low successful transaction rate", + "ruleGroup": "http_server", + "expr": "rate(http_server_added_transactions_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.01 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "http_server_no_successful_transactions", + "title": "http server no successful transactions", + "ruleGroup": "http_server", + "expr": "sum(increase(http_server_added_transactions_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "http_server_p95_add_tx_latency", + "title": "High HTTP server P95 add_tx latency", + "ruleGroup": "http_server", + "expr": "histogram_quantile(0.95, sum(rate(http_server_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "conditions": [ + { + "evaluator": { + "params": [ + 2.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "l1_gas_price_provider_insufficient_history", + "title": "L1 gas price provider insufficient history", + "ruleGroup": "l1_gas_price", + "expr": "increase(l1_gas_price_provider_insufficient_history{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "l1_gas_price_scraper_reorg_detected", + "title": "L1 gas price scraper reorg detected", + "ruleGroup": "l1_gas_price", + "expr": "increase(l1_gas_price_scraper_reorg_detected{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "l1_gas_price_scraper_success_count", + "title": "L1 gas price scraper success count", + "ruleGroup": "l1_gas_price", + "expr": "increase(l1_gas_price_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "l1_gas_price_scraper_baselayer_error_count", + "title": "L1 gas price scraper baselayer error count", + "ruleGroup": "l1_gas_price", + "expr": "increase(l1_gas_price_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "l1_message_scraper_baselayer_error_count", + "title": "L1 message scraper baselayer error count", + "ruleGroup": "l1_messages", + "expr": "increase(l1_message_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 5.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "l1_message_no_successes", + "title": "L1 message no successes", + "ruleGroup": "l1_gas_price", + "expr": "increase(l1_message_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "l1_message_scraper_reorg_detected", + "title": "L1 message scraper reorg detected", + "ruleGroup": "l1_messages", + "expr": "increase(l1_message_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "mempool_add_tx_idle", + "title": "Mempool add_tx idle", + "ruleGroup": "mempool", + "expr": "sum(increase(mempool_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])) or vector(0)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "mempool_evictions_count", + "title": "Mempool evictions count", + "ruleGroup": "mempool", + "expr": "mempool_evictions_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "mempool_p2p_disconnections", + "title": "Mempool p2p disconnections", + "ruleGroup": "mempool", + "expr": "changes(apollo_mempool_p2p_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) / 2", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p4" + }, + { + "name": "mempool_p2p_peer_down", + "title": "Mempool p2p peer down", + "ruleGroup": "mempool", + "expr": "max_over_time(apollo_mempool_p2p_num_connected_peers{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 2.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "mempool_pool_size_increase", + "title": "Mempool pool size increase", + "ruleGroup": "mempool", + "expr": "mempool_pool_size{cluster=~\"$cluster\", namespace=~\"$namespace\"}", + "conditions": [ + { + "evaluator": { + "params": [ + 2000.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "mempool_transaction_drop_ratio", + "title": "Mempool transaction drop ratio", + "ruleGroup": "mempool", + "expr": "increase(mempool_transactions_dropped{cluster=~\"$cluster\", namespace=~\"$namespace\"}[10m]) / clamp_min(increase(mempool_transactions_received{cluster=~\"$cluster\", namespace=~\"$namespace\"}[10m]), 1)", + "conditions": [ + { + "evaluator": { + "params": [ + 0.5 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "native_compilation_error", + "title": "Native compilation alert", + "ruleGroup": "batcher", + "expr": "increase(native_compilation_error[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 0.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p5" + }, + { + "name": "preconfirmed_block_not_written", + "title": "Preconfirmed block not written", + "ruleGroup": "batcher", + "expr": "increase(batcher_preconfirmed_block_written{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, + { + "name": "state_sync_lag", + "title": "State sync lag", + "ruleGroup": "state_sync", + "expr": "apollo_central_sync_central_block_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"} - apollo_state_sync_class_manager_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}", + "conditions": [ + { + "evaluator": { + "params": [ + 5.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, + { + "name": "state_sync_stuck", + "title": "State sync stuck", + "ruleGroup": "state_sync", + "expr": "increase(apollo_state_sync_class_manager_marker{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + } + ] +} diff --git a/crates/apollo_dashboard/src/alert_definitions.rs b/crates/apollo_dashboard/src/alert_definitions.rs new file mode 100644 index 00000000000..d3d54ec6935 --- /dev/null +++ b/crates/apollo_dashboard/src/alert_definitions.rs @@ -0,0 +1,1070 @@ +use std::collections::HashSet; + +use apollo_batcher::metrics::{BATCHED_TRANSACTIONS, PRECONFIRMED_BLOCK_WRITTEN}; +use apollo_consensus::metrics::{ + CONSENSUS_BLOCK_NUMBER, + CONSENSUS_BUILD_PROPOSAL_FAILED, + CONSENSUS_CONFLICTING_VOTES, + CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS, + CONSENSUS_DECISIONS_REACHED_BY_SYNC, + CONSENSUS_INBOUND_STREAM_EVICTED, + CONSENSUS_PROPOSALS_INVALID, + CONSENSUS_ROUND, + CONSENSUS_ROUND_ABOVE_ZERO, +}; +use apollo_consensus_manager::metrics::{ + CONSENSUS_NUM_CONNECTED_PEERS, + CONSENSUS_VOTES_NUM_SENT_MESSAGES, +}; +use apollo_consensus_orchestrator::metrics::{ + CENDE_WRITE_BLOB_FAILURE, + CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY, + CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR, +}; +use apollo_gateway::metrics::GATEWAY_TRANSACTIONS_RECEIVED; +use apollo_http_server::metrics::{ + ADDED_TRANSACTIONS_DEPRECATED_ERROR, + ADDED_TRANSACTIONS_FAILURE, + ADDED_TRANSACTIONS_INTERNAL_ERROR, + ADDED_TRANSACTIONS_SUCCESS, + ADDED_TRANSACTIONS_TOTAL, + HTTP_SERVER_ADD_TX_LATENCY, +}; +use apollo_l1_gas_price::metrics::{ + ETH_TO_STRK_ERROR_COUNT, + ETH_TO_STRK_SUCCESS_COUNT, + L1_GAS_PRICE_PROVIDER_INSUFFICIENT_HISTORY, + L1_GAS_PRICE_SCRAPER_BASELAYER_ERROR_COUNT, + L1_GAS_PRICE_SCRAPER_REORG_DETECTED, + L1_GAS_PRICE_SCRAPER_SUCCESS_COUNT, +}; +use apollo_l1_provider::metrics::{ + L1_MESSAGE_SCRAPER_BASELAYER_ERROR_COUNT, + L1_MESSAGE_SCRAPER_SUCCESS_COUNT, +}; +use apollo_mempool::metrics::{ + MEMPOOL_EVICTIONS_COUNT, + MEMPOOL_POOL_SIZE, + MEMPOOL_TRANSACTIONS_DROPPED, + MEMPOOL_TRANSACTIONS_RECEIVED, +}; +use apollo_mempool_p2p::metrics::MEMPOOL_P2P_NUM_CONNECTED_PEERS; +use apollo_state_sync_metrics::metrics::{ + CENTRAL_SYNC_CENTRAL_BLOCK_MARKER, + STATE_SYNC_CLASS_MANAGER_MARKER, +}; +use blockifier::metrics::NATIVE_COMPILATION_ERROR; + +use crate::alerts::{ + Alert, + AlertComparisonOp, + AlertCondition, + AlertGroup, + AlertLogicalOp, + AlertSeverity, + Alerts, +}; + +pub const DEV_ALERTS_JSON_PATH: &str = "crates/apollo_dashboard/resources/dev_grafana_alerts.json"; + +const PENDING_DURATION_DEFAULT: &str = "30s"; +const EVALUATION_INTERVAL_SEC_DEFAULT: u64 = 30; + +fn get_consensus_block_number_stuck() -> Alert { + Alert { + name: "consensus_block_number_stuck", + title: "Consensus block number stuck", + alert_group: AlertGroup::Consensus, + expr: format!( + "sum(increase({}[5m])) or vector(0)", + CONSENSUS_BLOCK_NUMBER.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +// If this happens, we expect to also see other nodes alert on `consensus_validate_proposal_failed`. +fn get_consensus_build_proposal_failed_alert() -> Alert { + Alert { + name: "consensus_build_proposal_failed", + title: "Consensus build proposal failed", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[1h])", CONSENSUS_BUILD_PROPOSAL_FAILED.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_consensus_build_proposal_failed_once_alert() -> Alert { + Alert { + name: "consensus_build_proposal_failed_once", + title: "Consensus build proposal failed once", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[1h])", CONSENSUS_BUILD_PROPOSAL_FAILED.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_consensus_validate_proposal_failed_alert() -> Alert { + Alert { + name: "consensus_validate_proposal_failed", + title: "Consensus validate proposal failed", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[1h])", CONSENSUS_PROPOSALS_INVALID.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_consensus_decisions_reached_by_consensus_ratio() -> Alert { + Alert { + name: "consensus_decisions_reached_by_consensus_ratio", + title: "Consensus decisions reached by consensus ratio", + alert_group: AlertGroup::Consensus, + // Clamp to avoid divide by 0. + expr: format!( + "increase({consensus}[10m]) / clamp_min(increase({sync}[10m]) + \ + increase({consensus}[10m]), 1)", + consensus = CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS.get_name_with_filter(), + sync = CONSENSUS_DECISIONS_REACHED_BY_SYNC.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 0.5, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn get_consensus_inbound_stream_evicted_alert() -> Alert { + Alert { + name: "consensus_inbound_stream_evicted", + title: "Consensus inbound stream evicted", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[1h])", CONSENSUS_INBOUND_STREAM_EVICTED.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 5.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_consensus_votes_num_sent_messages_alert() -> Alert { + Alert { + name: "consensus_votes_num_sent_messages", + title: "Consensus votes num sent messages", + alert_group: AlertGroup::Consensus, + expr: format!( + "increase({}[20m])", + CONSENSUS_VOTES_NUM_SENT_MESSAGES.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 20.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_cende_write_prev_height_blob_latency_too_high() -> Alert { + Alert { + name: "cende_write_prev_height_blob_latency_too_high", + title: "Cende write prev height blob latency too high", + alert_group: AlertGroup::Consensus, + expr: format!( + "rate({}[20m]) / clamp_min(rate({}[20m]), 0.0000001)", + CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY.get_name_sum_with_filter(), + CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY.get_name_count_with_filter(), + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + // This is 50% of the proposal timeout. + comparison_value: 1.5, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn get_cende_write_blob_failure_alert() -> Alert { + Alert { + name: "cende_write_blob_failure", + title: "Cende write blob failure", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[1h])", CENDE_WRITE_BLOB_FAILURE.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_cende_write_blob_failure_once_alert() -> Alert { + Alert { + name: "cende_write_blob_failure_once", + title: "Cende write blob failure once", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[1h])", CENDE_WRITE_BLOB_FAILURE.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_consensus_l1_gas_price_provider_failure() -> Alert { + Alert { + name: "consensus_l1_gas_price_provider_failure", + title: "Consensus L1 gas price provider failure", + alert_group: AlertGroup::Consensus, + expr: format!( + "increase({}[1h])", + CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 5.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn get_consensus_l1_gas_price_provider_failure_once() -> Alert { + Alert { + name: "consensus_l1_gas_price_provider_failure_once", + title: "Consensus L1 gas price provider failure once", + alert_group: AlertGroup::Consensus, + expr: format!( + "increase({}[1h])", + CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_consensus_round_above_zero() -> Alert { + Alert { + name: "consensus_round_above_zero", + title: "Consensus round above zero", + alert_group: AlertGroup::Consensus, + expr: format!("max_over_time({}[1h])", CONSENSUS_ROUND.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_consensus_conflicting_votes() -> Alert { + Alert { + name: "consensus_conflicting_votes", + title: "Consensus conflicting votes", + alert_group: AlertGroup::Consensus, + expr: format!("increase({}[20m])", CONSENSUS_CONFLICTING_VOTES.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + // TODO(matan): Increase severity once slashing is supported. + severity: AlertSeverity::WorkingHours, + } +} + +fn get_gateway_add_tx_idle() -> Alert { + Alert { + name: "gateway_add_tx_idle", + title: "Gateway add_tx idle", + alert_group: AlertGroup::Gateway, + expr: format!( + "sum(increase({}[20m])) or vector(0)", + GATEWAY_TRANSACTIONS_RECEIVED.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 0.1, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +// TODO(shahak): add gateway latency alert + +fn get_mempool_add_tx_idle() -> Alert { + Alert { + name: "mempool_add_tx_idle", + title: "Mempool add_tx idle", + alert_group: AlertGroup::Mempool, + expr: format!( + "sum(increase({}[20m])) or vector(0)", + MEMPOOL_TRANSACTIONS_RECEIVED.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 0.1, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_http_server_add_tx_idle() -> Alert { + Alert { + name: "http_server_add_tx_idle", + title: "HTTP Server add_tx idle", + alert_group: AlertGroup::HttpServer, + expr: format!( + "sum(increase({}[20m])) or vector(0)", + ADDED_TRANSACTIONS_TOTAL.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 0.1, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_http_server_internal_error_ratio() -> Alert { + Alert { + name: "http_server_internal_error_ratio", + title: "http server internal error ratio", + alert_group: AlertGroup::HttpServer, + expr: format!( + "increase({}[1h]) / clamp_min(increase({}[1h]), 1)", + ADDED_TRANSACTIONS_INTERNAL_ERROR.get_name_with_filter(), + ADDED_TRANSACTIONS_TOTAL.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.2, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_http_server_internal_error_once() -> Alert { + Alert { + name: "http_server_internal_error_once", + title: "http server internal error once", + alert_group: AlertGroup::HttpServer, + expr: format!( + "increase({}[20m]) or vector(0)", + ADDED_TRANSACTIONS_INTERNAL_ERROR.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn get_eth_to_strk_error_count_alert() -> Alert { + Alert { + name: "eth_to_strk_error_count", + title: "Eth to Strk error count", + alert_group: AlertGroup::L1GasPrice, + expr: format!("increase({}[1h])", ETH_TO_STRK_ERROR_COUNT.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: "1m", + evaluation_interval_sec: 20, + severity: AlertSeverity::Informational, + } +} + +fn get_eth_to_strk_success_count_alert() -> Alert { + Alert { + name: "eth_to_strk_success_count", + title: "Eth to Strk success count", + alert_group: AlertGroup::L1GasPrice, + expr: format!("increase({}[1h])", ETH_TO_STRK_SUCCESS_COUNT.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_l1_gas_price_scraper_success_count_alert() -> Alert { + Alert { + name: "l1_gas_price_scraper_success_count", + title: "L1 gas price scraper success count", + alert_group: AlertGroup::L1GasPrice, + expr: format!( + "increase({}[1h])", + L1_GAS_PRICE_SCRAPER_SUCCESS_COUNT.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_http_server_no_successful_transactions() -> Alert { + Alert { + name: "http_server_no_successful_transactions", + title: "http server no successful transactions", + alert_group: AlertGroup::HttpServer, + expr: format!( + "sum(increase({}[1h])) or vector(0)", + ADDED_TRANSACTIONS_SUCCESS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_http_server_low_successful_transaction_rate() -> Alert { + Alert { + name: "http_server_low_successful_transaction_rate", + title: "http server low successful transaction rate", + alert_group: AlertGroup::HttpServer, + expr: format!( + "rate({}[5m]) or vector(0)", + ADDED_TRANSACTIONS_SUCCESS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 0.01, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_http_server_high_transaction_failure_ratio() -> Alert { + Alert { + name: "http_server_high_transaction_failure_ratio", + title: "http server high transaction failure ratio", + alert_group: AlertGroup::HttpServer, + expr: format!( + "(increase({}[1h]) - increase({}[1h])) / clamp_min(increase({}[1h]), 1)", + ADDED_TRANSACTIONS_FAILURE.get_name_with_filter(), + ADDED_TRANSACTIONS_DEPRECATED_ERROR.get_name_with_filter(), + ADDED_TRANSACTIONS_TOTAL.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.5, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +/// Triggers if the average latency of `add_tx` calls, across all HTTP servers, exceeds 2 seconds +/// over a 5-minute window. +fn get_http_server_avg_add_tx_latency_alert() -> Alert { + let sum_metric = HTTP_SERVER_ADD_TX_LATENCY.get_name_sum_with_filter(); + let count_metric = HTTP_SERVER_ADD_TX_LATENCY.get_name_count_with_filter(); + + Alert { + name: "http_server_avg_add_tx_latency", + title: "High HTTP server average add_tx latency", + alert_group: AlertGroup::HttpServer, + expr: format!("rate({sum_metric}[5m]) / rate({count_metric}[5m])"), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 2.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +/// Triggers when the slowest 5% of transactions for a specific HTTP server are taking longer than 2 +/// seconds over a 5-minute window. +fn get_http_server_p95_add_tx_latency_alert() -> Alert { + Alert { + name: "http_server_p95_add_tx_latency", + title: "High HTTP server P95 add_tx latency", + alert_group: AlertGroup::HttpServer, + expr: format!( + "histogram_quantile(0.95, sum(rate({}[5m])) by (le))", + HTTP_SERVER_ADD_TX_LATENCY.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 2.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn get_l1_gas_price_scraper_baselayer_error_count_alert() -> Alert { + Alert { + name: "l1_gas_price_scraper_baselayer_error_count", + title: "L1 gas price scraper baselayer error count", + alert_group: AlertGroup::L1GasPrice, + expr: format!( + "increase({}[5m])", + L1_GAS_PRICE_SCRAPER_BASELAYER_ERROR_COUNT.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_l1_gas_price_provider_insufficient_history_alert() -> Alert { + Alert { + name: "l1_gas_price_provider_insufficient_history", + title: "L1 gas price provider insufficient history", + alert_group: AlertGroup::L1GasPrice, + expr: format!( + "increase({}[1m])", + L1_GAS_PRICE_PROVIDER_INSUFFICIENT_HISTORY.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_l1_gas_price_reorg_detected_alert() -> Alert { + Alert { + name: "l1_gas_price_scraper_reorg_detected", + title: "L1 gas price scraper reorg detected", + alert_group: AlertGroup::L1GasPrice, + expr: format!( + "increase({}[1m])", + L1_GAS_PRICE_SCRAPER_REORG_DETECTED.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_l1_message_scraper_no_successes_alert() -> Alert { + Alert { + name: "l1_message_no_successes", + title: "L1 message no successes", + alert_group: AlertGroup::L1GasPrice, + expr: format!("increase({}[20m])", L1_MESSAGE_SCRAPER_SUCCESS_COUNT.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_l1_message_scraper_baselayer_error_count_alert() -> Alert { + Alert { + name: "l1_message_scraper_baselayer_error_count", + title: "L1 message scraper baselayer error count", + alert_group: AlertGroup::L1Messages, + expr: format!( + "increase({}[1h])", + L1_MESSAGE_SCRAPER_BASELAYER_ERROR_COUNT.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 5.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_l1_message_scraper_reorg_detected_alert() -> Alert { + Alert { + name: "l1_message_scraper_reorg_detected", + title: "L1 message scraper reorg detected", + alert_group: AlertGroup::L1Messages, + expr: format!( + "increase({}[1m])", + L1_MESSAGE_SCRAPER_BASELAYER_ERROR_COUNT.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_mempool_pool_size_increase() -> Alert { + Alert { + name: "mempool_pool_size_increase", + title: "Mempool pool size increase", + alert_group: AlertGroup::Mempool, + expr: MEMPOOL_POOL_SIZE.get_name_with_filter().to_string(), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 2000.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_mempool_transaction_drop_ratio() -> Alert { + Alert { + name: "mempool_transaction_drop_ratio", + title: "Mempool transaction drop ratio", + alert_group: AlertGroup::Mempool, + expr: format!( + "increase({}[10m]) / clamp_min(increase({}[10m]), 1)", + MEMPOOL_TRANSACTIONS_DROPPED.get_name_with_filter(), + MEMPOOL_TRANSACTIONS_RECEIVED.get_name_with_filter(), + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.5, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_consensus_round_high() -> Alert { + Alert { + name: "consensus_round_high", + title: "Consensus round high", + alert_group: AlertGroup::Consensus, + expr: format!("max_over_time({}[1m])", CONSENSUS_ROUND.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 20.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_consensus_round_above_zero_ratio() -> Alert { + Alert { + name: "consensus_round_above_zero_ratio", + title: "Consensus round above zero ratio", + alert_group: AlertGroup::Consensus, + expr: format!( + "increase({}[1h]) / clamp_min(increase({}[1h]), 1)", + CONSENSUS_ROUND_ABOVE_ZERO.get_name_with_filter(), + CONSENSUS_BLOCK_NUMBER.get_name_with_filter(), + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.05, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: 10, + severity: AlertSeverity::DayOnly, + } +} + +fn get_native_compilation_error_increase() -> Alert { + Alert { + name: "native_compilation_error", + title: "Native compilation alert", + alert_group: AlertGroup::Batcher, + expr: format!("increase({}[1h])", NATIVE_COMPILATION_ERROR.get_name()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Informational, + } +} + +fn get_state_sync_lag() -> Alert { + Alert { + name: "state_sync_lag", + title: "State sync lag", + alert_group: AlertGroup::StateSync, + expr: format!( + "{} - {}", + CENTRAL_SYNC_CENTRAL_BLOCK_MARKER.get_name_with_filter(), + STATE_SYNC_CLASS_MANAGER_MARKER.get_name_with_filter() + ), // Alert when the central sync is ahead of the class manager by more than 5 blocks + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 5.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_state_sync_stuck() -> Alert { + Alert { + name: "state_sync_stuck", + title: "State sync stuck", + alert_group: AlertGroup::StateSync, + expr: format!("increase({}[5m])", STATE_SYNC_CLASS_MANAGER_MARKER.get_name_with_filter()), /* Alert is triggered when the class manager marker is not updated for 5m */ + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_batched_transactions_stuck() -> Alert { + Alert { + name: "batched_transactions_stuck", + title: "Batched transactions stuck", + alert_group: AlertGroup::Batcher, + expr: format!("changes({}[5m])", BATCHED_TRANSACTIONS.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +fn get_preconfirmed_block_not_written() -> Alert { + Alert { + name: "preconfirmed_block_not_written", + title: "Preconfirmed block not written", + alert_group: AlertGroup::Batcher, + expr: format!("increase({}[1h])", PRECONFIRMED_BLOCK_WRITTEN.get_name_with_filter()), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_consensus_p2p_peer_down() -> Alert { + Alert { + name: "consensus_p2p_peer_down", + title: "Consensus p2p peer down", + alert_group: AlertGroup::Consensus, + expr: format!( + "max_over_time({}[1h])", + CONSENSUS_NUM_CONNECTED_PEERS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + // TODO(shahak): find a way to make this depend on num_validators + comparison_value: 2.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +fn get_consensus_p2p_not_enough_peers_for_quorum() -> Alert { + Alert { + name: "consensus_p2p_not_enough_peers_for_quorum", + title: "Consensus p2p not enough peers for quorum", + alert_group: AlertGroup::Consensus, + expr: format!( + "max_over_time({}[5m])", + CONSENSUS_NUM_CONNECTED_PEERS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + // TODO(shahak): find a way to make this depend on num_validators and + // assume_no_malicious_validators + comparison_value: 1.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +/// Alert if there were too many disconnections in the given timespan +fn get_consensus_p2p_disconnections() -> Alert { + Alert { + name: "consensus_p2p_disconnections", + title: "Consensus p2p disconnections", + alert_group: AlertGroup::Consensus, + expr: format!( + // TODO(shahak): find a way to make this depend on num_validators + // Dividing by two since this counts both disconnections and reconnections + "changes({}[1h]) / 2", + CONSENSUS_NUM_CONNECTED_PEERS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn get_mempool_p2p_peer_down() -> Alert { + Alert { + name: "mempool_p2p_peer_down", + title: "Mempool p2p peer down", + alert_group: AlertGroup::Mempool, + expr: format!( + "max_over_time({}[1h])", + MEMPOOL_P2P_NUM_CONNECTED_PEERS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + // TODO(shahak): find a way to make this depend on num_validators + comparison_value: 2.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + +/// Alert if there were too many disconnections in the given timespan +fn get_mempool_p2p_disconnections() -> Alert { + Alert { + name: "mempool_p2p_disconnections", + title: "Mempool p2p disconnections", + alert_group: AlertGroup::Mempool, + expr: format!( + // TODO(shahak): find a way to make this depend on num_validators + // Dividing by two since this counts both disconnections and reconnections + "changes({}[1h]) / 2", + MEMPOOL_P2P_NUM_CONNECTED_PEERS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + +fn verify_unique_names(alerts: &[Alert]) { + let mut names = HashSet::new(); + for alert in alerts.iter() { + if !names.insert(&alert.name) { + panic!("Duplicate alert name found: {}", alert.name); + } + } +} + +fn get_mempool_evictions_count_alert() -> Alert { + Alert { + name: "mempool_evictions_count", + title: "Mempool evictions count", + alert_group: AlertGroup::Mempool, + expr: MEMPOOL_EVICTIONS_COUNT.get_name_with_filter().to_string(), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 0.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +pub fn get_apollo_alerts() -> Alerts { + let alerts = vec![ + get_batched_transactions_stuck(), + get_cende_write_blob_failure_alert(), + get_cende_write_blob_failure_once_alert(), + get_cende_write_prev_height_blob_latency_too_high(), + get_consensus_block_number_stuck(), + get_consensus_build_proposal_failed_alert(), + get_consensus_build_proposal_failed_once_alert(), + get_consensus_conflicting_votes(), + get_consensus_decisions_reached_by_consensus_ratio(), + get_consensus_inbound_stream_evicted_alert(), + get_consensus_l1_gas_price_provider_failure(), + get_consensus_l1_gas_price_provider_failure_once(), + get_consensus_p2p_disconnections(), + get_consensus_p2p_not_enough_peers_for_quorum(), + get_consensus_p2p_peer_down(), + get_consensus_round_above_zero(), + get_consensus_round_above_zero_ratio(), + get_consensus_round_high(), + get_consensus_validate_proposal_failed_alert(), + get_consensus_votes_num_sent_messages_alert(), + get_eth_to_strk_error_count_alert(), + get_eth_to_strk_success_count_alert(), + get_gateway_add_tx_idle(), + get_http_server_add_tx_idle(), + get_http_server_avg_add_tx_latency_alert(), + get_http_server_high_transaction_failure_ratio(), + get_http_server_internal_error_ratio(), + get_http_server_internal_error_once(), + get_http_server_low_successful_transaction_rate(), + get_http_server_no_successful_transactions(), + get_http_server_p95_add_tx_latency_alert(), + get_l1_gas_price_provider_insufficient_history_alert(), + get_l1_gas_price_reorg_detected_alert(), + get_l1_gas_price_scraper_success_count_alert(), + get_l1_gas_price_scraper_baselayer_error_count_alert(), + get_l1_message_scraper_baselayer_error_count_alert(), + get_l1_message_scraper_no_successes_alert(), + get_l1_message_scraper_reorg_detected_alert(), + get_mempool_add_tx_idle(), + get_mempool_evictions_count_alert(), + get_mempool_p2p_disconnections(), + get_mempool_p2p_peer_down(), + get_mempool_pool_size_increase(), + get_mempool_transaction_drop_ratio(), + get_native_compilation_error_increase(), + get_preconfirmed_block_not_written(), + get_state_sync_lag(), + get_state_sync_stuck(), + ]; + verify_unique_names(&alerts); + Alerts::new(alerts) +} diff --git a/crates/apollo_dashboard/src/alerts.rs b/crates/apollo_dashboard/src/alerts.rs new file mode 100644 index 00000000000..27cf302a107 --- /dev/null +++ b/crates/apollo_dashboard/src/alerts.rs @@ -0,0 +1,145 @@ +use serde::ser::SerializeStruct; +use serde::{Serialize, Serializer}; + +/// Alerts to be configured in the dashboard. +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct Alerts { + alerts: Vec, +} + +impl Alerts { + pub(crate) const fn new(alerts: Vec) -> Self { + Self { alerts } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub(crate) enum AlertSeverity { + // Critical issues that demand immediate attention. These are high-impact incidents that + // affect the system's availability. + #[serde(rename = "p1")] + // TODO(Tsabary): currently the `Sos` variant is used only in tests, and removing the + // `#[cfg(test)]` attribute results in a compilation error. When needed in non-test setup, + // remove the attribute. + #[cfg(test)] + Sos, + // Standard alerts for production issues that require attention around the clock but are not + // as time-sensitive as SOS alerts. + #[serde(rename = "p2")] + Regular, + // Important alerts that do not require overnight attention. These are delayed during night + // hours to reduce unnecessary off-hours noise. + #[serde(rename = "p3")] + DayOnly, + // Alerts that are only triggered during official business hours. These do not trigger during + // holidays. + #[serde(rename = "p4")] + WorkingHours, + // Non-critical alerts, meant purely for information. These are not intended to wake anyone up + // and are monitored only by the development team. + #[serde(rename = "p5")] + Informational, +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub(crate) enum AlertComparisonOp { + #[serde(rename = "gt")] + GreaterThan, + #[serde(rename = "lt")] + LessThan, +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +#[serde(rename_all = "snake_case")] +pub(crate) enum AlertLogicalOp { + And, + // TODO(Tsabary): remove the `allow(dead_code)` once this variant is used. + #[allow(dead_code)] + Or, +} + +/// Defines the condition to trigger the alert. +#[derive(Clone, Debug, PartialEq)] +pub(crate) struct AlertCondition { + // The comparison operator to use when comparing the expression to the value. + pub(crate) comparison_op: AlertComparisonOp, + // The value to compare the expression to. + pub(crate) comparison_value: f64, + // The logical operator between this condition and other conditions. + // TODO(Yael): Consider moving this field to the be one per alert to avoid ambiguity when + // trying to use a combination of `and` and `or` operators. + pub(crate) logical_op: AlertLogicalOp, +} + +impl Serialize for AlertCondition { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("AlertCondition", 4)?; + + state.serialize_field( + "evaluator", + &serde_json::json!({ + "params": [self.comparison_value], + "type": self.comparison_op + }), + )?; + + state.serialize_field( + "operator", + &serde_json::json!({ + "type": self.logical_op + }), + )?; + + state.serialize_field( + "reducer", + &serde_json::json!({ + "params": [], + "type": "avg" + }), + )?; + + state.serialize_field("type", "query")?; + + state.end() + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +#[serde(rename_all = "snake_case")] +pub(crate) enum AlertGroup { + Batcher, + Consensus, + Gateway, + HttpServer, + L1GasPrice, + L1Messages, + Mempool, + StateSync, +} + +/// Describes the properties of an alert defined in grafana. +#[derive(Clone, Debug, PartialEq, Serialize)] +pub(crate) struct Alert { + // The name of the alert. + pub(crate) name: &'static str, + // The title that will be displayed. + pub(crate) title: &'static str, + // The group that the alert will be displayed under. + #[serde(rename = "ruleGroup")] + pub(crate) alert_group: AlertGroup, + // The expression to evaluate for the alert. + pub(crate) expr: String, + // The conditions that must be met for the alert to be triggered. + pub(crate) conditions: &'static [AlertCondition], + // The time duration for which the alert conditions must be true before an alert is triggered. + #[serde(rename = "for")] + pub(crate) pending_duration: &'static str, + // The interval in sec between evaluations of the alert. + #[serde(rename = "intervalSec")] + pub(crate) evaluation_interval_sec: u64, + // The severity level of the alert. + pub(crate) severity: AlertSeverity, +} diff --git a/crates/apollo_dashboard/src/bin/sequencer_dashboard_generator.rs b/crates/apollo_dashboard/src/bin/sequencer_dashboard_generator.rs new file mode 100644 index 00000000000..0726f9a3309 --- /dev/null +++ b/crates/apollo_dashboard/src/bin/sequencer_dashboard_generator.rs @@ -0,0 +1,9 @@ +use apollo_dashboard::alert_definitions::{get_apollo_alerts, DEV_ALERTS_JSON_PATH}; +use apollo_dashboard::dashboard_definitions::{get_apollo_dashboard, DEV_JSON_PATH}; +use apollo_infra_utils::dumping::serialize_to_file; + +/// Creates the dashboard json file. +fn main() { + serialize_to_file(get_apollo_dashboard(), DEV_JSON_PATH); + serialize_to_file(get_apollo_alerts(), DEV_ALERTS_JSON_PATH); +} diff --git a/crates/apollo_dashboard/src/dashboard.rs b/crates/apollo_dashboard/src/dashboard.rs new file mode 100644 index 00000000000..7f8265906d4 --- /dev/null +++ b/crates/apollo_dashboard/src/dashboard.rs @@ -0,0 +1,181 @@ +use std::collections::HashMap; + +use apollo_metrics::metrics::{MetricCounter, MetricGauge, MetricHistogram}; +use indexmap::IndexMap; +use serde::ser::{SerializeMap, SerializeStruct}; +use serde::{Serialize, Serializer}; + +#[cfg(test)] +#[path = "dashboard_test.rs"] +mod dashboard_test; + +const HISTOGRAM_QUANTILES: &[f64] = &[0.50, 0.95]; +const HISTOGRAM_TIME_RANGE: &str = "5m"; + +#[derive(Clone, Debug, PartialEq)] +pub struct Dashboard { + name: &'static str, + rows: Vec, +} + +impl Dashboard { + pub(crate) fn new(name: &'static str, rows: Vec) -> Self { + Self { name, rows } + } +} + +// Custom Serialize implementation for Dashboard. +impl Serialize for Dashboard { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(1))?; + let mut row_map = IndexMap::new(); + for row in &self.rows { + row_map.insert(row.name, &row.panels); + } + + map.serialize_entry(self.name, &row_map)?; + map.end() + } +} + +/// Grafana panel types. +#[derive(Clone, Debug, Serialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub(crate) enum PanelType { + Stat, + TimeSeries, +} + +#[derive(Clone, Debug, PartialEq)] +pub(crate) struct Panel { + name: &'static str, + description: &'static str, + exprs: Vec, + panel_type: PanelType, +} + +impl Panel { + pub(crate) fn new( + name: &'static str, + description: &'static str, + exprs: Vec, + panel_type: PanelType, + ) -> Self { + // A panel assigns a unique id to each of its expressions. Conventionally, we use letters + // A–Z, and for simplicity, we limit the number of expressions to this range. + const NUM_LETTERS: u8 = b'Z' - b'A' + 1; + assert!( + exprs.len() <= NUM_LETTERS.into(), + "Too many expressions ({} > {}) in panel '{}'.", + exprs.len(), + NUM_LETTERS, + name + ); + Self { name, description, exprs, panel_type } + } + + pub(crate) fn from_counter(metric: MetricCounter, panel_type: PanelType) -> Self { + Self::new( + metric.get_name(), + metric.get_description(), + vec![metric.get_name_with_filter().to_string()], + panel_type, + ) + } + + pub(crate) fn from_gauge(metric: MetricGauge, panel_type: PanelType) -> Self { + Self::new( + metric.get_name(), + metric.get_description(), + vec![metric.get_name_with_filter().to_string()], + panel_type, + ) + } + + pub(crate) fn from_hist(metric: MetricHistogram, panel_type: PanelType) -> Self { + Self::new( + metric.get_name(), + metric.get_description(), + HISTOGRAM_QUANTILES + .iter() + .map(|q| { + format!( + "histogram_quantile({:.2}, sum(rate({}[{}])) by (le))", + q, + metric.get_name_with_filter(), + HISTOGRAM_TIME_RANGE + ) + }) + .collect(), + panel_type, + ) + } + + pub(crate) fn ratio_time_series( + name: &'static str, + description: &'static str, + numerator: &MetricCounter, + denominator_parts: &[&MetricCounter], + duration: &str, + ) -> Self { + let numerator_expr = + format!("increase({}[{}])", numerator.get_name_with_filter(), duration); + + let denominator_expr = denominator_parts + .iter() + .map(|m| format!("increase({}[{}])", m.get_name_with_filter(), duration)) + .collect::>() + .join(" + "); + + let expr = format!("100 * ({} / ({}))", numerator_expr, denominator_expr); + + Self::new(name, description, vec![expr], PanelType::TimeSeries) + } +} + +// Custom Serialize implementation for Panel. +impl Serialize for Panel { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("Panel", 5)?; // 5 fields (including extra dict) + state.serialize_field("title", &self.name)?; + state.serialize_field("description", &self.description)?; + state.serialize_field("type", &self.panel_type)?; + state.serialize_field("exprs", &self.exprs)?; + + // Append an empty dictionary `{}` at the end + let empty_map: HashMap = HashMap::new(); + state.serialize_field("extra_params", &empty_map)?; + + state.end() + } +} + +#[derive(Clone, Debug, PartialEq)] +pub(crate) struct Row { + name: &'static str, + panels: Vec, +} + +impl Row { + pub(crate) const fn new(name: &'static str, panels: Vec) -> Self { + Self { name, panels } + } +} + +// Custom Serialize implementation for Row. +impl Serialize for Row { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry(self.name, &self.panels)?; + map.end() + } +} diff --git a/crates/apollo_dashboard/src/dashboard_definitions.rs b/crates/apollo_dashboard/src/dashboard_definitions.rs new file mode 100644 index 00000000000..08d9c7ba625 --- /dev/null +++ b/crates/apollo_dashboard/src/dashboard_definitions.rs @@ -0,0 +1,53 @@ +use crate::dashboard::Dashboard; +use crate::panels::batcher::{get_batcher_infra_row, get_batcher_row}; +use crate::panels::blockifier::get_blockifier_row; +use crate::panels::class_manager::get_class_manager_infra_row; +use crate::panels::consensus::{get_consensus_p2p_row, get_consensus_row}; +use crate::panels::gateway::{get_gateway_infra_row, get_gateway_row}; +use crate::panels::http_server::get_http_server_row; +use crate::panels::l1_gas_price::{get_l1_gas_price_infra_row, get_l1_gas_price_row}; +use crate::panels::l1_provider::{get_l1_provider_infra_row, get_l1_provider_row}; +use crate::panels::mempool::{get_mempool_infra_row, get_mempool_row}; +use crate::panels::mempool_p2p::{get_mempool_p2p_infra_row, get_mempool_p2p_row}; +use crate::panels::sierra_compiler::{get_compile_to_casm_row, get_sierra_compiler_infra_row}; +use crate::panels::state_sync::{ + get_state_sync_infra_row, + get_state_sync_p2p_row, + get_state_sync_row, +}; + +#[cfg(test)] +#[path = "dashboard_definitions_test.rs"] +mod dashboard_definitions_test; + +pub const DEV_JSON_PATH: &str = "crates/apollo_dashboard/resources/dev_grafana.json"; + +pub fn get_apollo_dashboard() -> Dashboard { + Dashboard::new( + "Sequencer Node Dashboard", + vec![ + get_batcher_row(), + get_consensus_row(), + get_http_server_row(), + get_state_sync_row(), + get_mempool_p2p_row(), + get_consensus_p2p_row(), + get_state_sync_p2p_row(), + get_gateway_row(), + get_mempool_row(), + get_blockifier_row(), + get_batcher_infra_row(), + get_gateway_infra_row(), + get_class_manager_infra_row(), + get_l1_provider_infra_row(), + get_l1_provider_row(), + get_l1_gas_price_infra_row(), + get_l1_gas_price_row(), + get_mempool_infra_row(), + get_mempool_p2p_infra_row(), + get_sierra_compiler_infra_row(), + get_compile_to_casm_row(), + get_state_sync_infra_row(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/dashboard_definitions_test.rs b/crates/apollo_dashboard/src/dashboard_definitions_test.rs new file mode 100644 index 00000000000..5976f656e91 --- /dev/null +++ b/crates/apollo_dashboard/src/dashboard_definitions_test.rs @@ -0,0 +1,14 @@ +use apollo_infra_utils::dumping::serialize_to_file_test; + +use crate::alert_definitions::{get_apollo_alerts, DEV_ALERTS_JSON_PATH}; +use crate::dashboard_definitions::{get_apollo_dashboard, DEV_JSON_PATH}; + +const FIX_BINARY_NAME: &str = "sequencer_dashboard_generator"; + +// Test that the grafana dev dashboard and alert files are up to date. To update the default config +// file, run: cargo run --bin sequencer_dashboard_generator -q +#[test] +fn default_dev_grafana_dashboard() { + serialize_to_file_test(get_apollo_dashboard(), DEV_JSON_PATH, FIX_BINARY_NAME); + serialize_to_file_test(get_apollo_alerts(), DEV_ALERTS_JSON_PATH, FIX_BINARY_NAME); +} diff --git a/crates/apollo_dashboard/src/dashboard_test.rs b/crates/apollo_dashboard/src/dashboard_test.rs new file mode 100644 index 00000000000..e649f3ea5d6 --- /dev/null +++ b/crates/apollo_dashboard/src/dashboard_test.rs @@ -0,0 +1,85 @@ +use apollo_infra_utils::test_utils::assert_json_eq; +use apollo_metrics::metrics::{MetricCounter, MetricScope}; + +use crate::alerts::{ + Alert, + AlertComparisonOp, + AlertCondition, + AlertGroup, + AlertLogicalOp, + AlertSeverity, +}; +use crate::dashboard::Panel; + +#[test] +fn serialize_alert() { + let alert = Alert { + name: "Name", + title: "Message", + alert_group: AlertGroup::Batcher, + expr: "max".to_string(), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 10.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: "5m", + evaluation_interval_sec: 20, + severity: AlertSeverity::Sos, + }; + + let serialized = serde_json::to_value(&alert).unwrap(); + let expected = serde_json::json!({ + "name": "Name", + "title": "Message", + "ruleGroup": "batcher", + "expr": "max", + "conditions": [ + { + "evaluator": { "params": [10.0], "type": "gt" }, + "operator": { "type": "and" }, + "reducer": {"params": [], "type": "avg"}, + "type": "query" + } + ], + "for": "5m", + "intervalSec": 20, + "severity": "p1" + }); + assert_json_eq(&serialized, &expected, "Json Comparison failed".to_string()); +} + +#[test] +fn test_ratio_time_series() { + let duration = "5m"; + let metric_1 = MetricCounter::new(MetricScope::Batcher, "r", "r_f", "desc", 0); + let metric_2 = MetricCounter::new(MetricScope::Batcher, "p", "p_f", "desc", 0); + let metric_3 = MetricCounter::new(MetricScope::Batcher, "a", "a_f", "desc", 0); + + let panel = + Panel::ratio_time_series("x", "x", &metric_1, &[&metric_1, &metric_2, &metric_3], duration); + + let expected = format!( + "100 * (increase({}[{}]) / (increase({}[{}]) + increase({}[{}]) + increase({}[{}])))", + metric_1.get_name_with_filter(), + duration, + metric_1.get_name_with_filter(), + duration, + metric_2.get_name_with_filter(), + duration, + metric_3.get_name_with_filter(), + duration, + ); + + assert_eq!(panel.exprs, vec![expected]); + + let expected = format!( + "100 * (increase({}[{}]) / (increase({}[{}])))", + metric_1.get_name_with_filter(), + duration, + metric_2.get_name_with_filter(), + duration, + ); + let panel = Panel::ratio_time_series("y", "y", &metric_1, &[&metric_2], duration); + assert_eq!(panel.exprs, vec![expected]); +} diff --git a/crates/apollo_dashboard/src/lib.rs b/crates/apollo_dashboard/src/lib.rs new file mode 100644 index 00000000000..898dded2e1f --- /dev/null +++ b/crates/apollo_dashboard/src/lib.rs @@ -0,0 +1,7 @@ +pub mod alert_definitions; +mod alerts; +mod dashboard; +pub mod dashboard_definitions; +#[cfg(test)] +mod metric_definitions_test; +mod panels; diff --git a/crates/apollo_dashboard/src/metric_definitions_test.rs b/crates/apollo_dashboard/src/metric_definitions_test.rs new file mode 100644 index 00000000000..3a760714d82 --- /dev/null +++ b/crates/apollo_dashboard/src/metric_definitions_test.rs @@ -0,0 +1,43 @@ +use std::collections::HashSet; + +use apollo_batcher::metrics::BATCHER_ALL_METRICS; +use apollo_class_manager::metrics::CLASS_MANAGER_ALL_METRICS; +use apollo_compile_to_casm::metrics::COMPILE_TO_CASM_ALL_METRICS; +use apollo_consensus::metrics::CONSENSUS_ALL_METRICS; +use apollo_consensus_manager::metrics::CONSENSUS_MANAGER_ALL_METRICS; +use apollo_consensus_orchestrator::metrics::CONSENSUS_ORCHESTRATOR_ALL_METRICS; +use apollo_gateway::metrics::GATEWAY_ALL_METRICS; +use apollo_http_server::metrics::HTTP_SERVER_ALL_METRICS; +use apollo_infra::metrics::INFRA_ALL_METRICS; +use apollo_l1_gas_price::metrics::L1_GAS_PRICE_ALL_METRICS; +use apollo_l1_provider::metrics::L1_PROVIDER_ALL_METRICS; +use apollo_mempool::metrics::MEMPOOL_ALL_METRICS; +use apollo_mempool_p2p::metrics::MEMPOOL_P2P_ALL_METRICS; +use apollo_state_sync_metrics::metrics::STATE_SYNC_ALL_METRICS; +use blockifier::metrics::BLOCKIFIER_ALL_METRICS; + +#[test] +fn metric_names_no_duplications() { + let all_metric_names = BATCHER_ALL_METRICS + .iter() + .chain(CLASS_MANAGER_ALL_METRICS.iter()) + .chain(COMPILE_TO_CASM_ALL_METRICS.iter()) + .chain(CONSENSUS_ALL_METRICS.iter()) + .chain(CONSENSUS_MANAGER_ALL_METRICS.iter()) + .chain(CONSENSUS_ORCHESTRATOR_ALL_METRICS.iter()) + .chain(GATEWAY_ALL_METRICS.iter()) + .chain(HTTP_SERVER_ALL_METRICS.iter()) + .chain(INFRA_ALL_METRICS.iter()) + .chain(L1_GAS_PRICE_ALL_METRICS.iter()) + .chain(L1_PROVIDER_ALL_METRICS.iter()) + .chain(MEMPOOL_ALL_METRICS.iter()) + .chain(MEMPOOL_P2P_ALL_METRICS.iter()) + .chain(STATE_SYNC_ALL_METRICS.iter()) + .chain(BLOCKIFIER_ALL_METRICS.iter()) + .collect::>(); + + let mut unique_metric_names: HashSet<&&'static str> = HashSet::new(); + for metric_name in all_metric_names { + assert!(unique_metric_names.insert(metric_name), "Duplicated metric name: {}", metric_name); + } +} diff --git a/crates/apollo_dashboard/src/panels.rs b/crates/apollo_dashboard/src/panels.rs new file mode 100644 index 00000000000..2940395b011 --- /dev/null +++ b/crates/apollo_dashboard/src/panels.rs @@ -0,0 +1,12 @@ +pub(crate) mod batcher; +pub(crate) mod blockifier; +pub(crate) mod class_manager; +pub(crate) mod consensus; +pub(crate) mod gateway; +pub(crate) mod http_server; +pub(crate) mod l1_gas_price; +pub(crate) mod l1_provider; +pub(crate) mod mempool; +pub(crate) mod mempool_p2p; +pub(crate) mod sierra_compiler; +pub(crate) mod state_sync; diff --git a/crates/apollo_dashboard/src/panels/batcher.rs b/crates/apollo_dashboard/src/panels/batcher.rs new file mode 100644 index 00000000000..a192bf31398 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/batcher.rs @@ -0,0 +1,94 @@ +use apollo_batcher::metrics::{ + BATCHED_TRANSACTIONS, + LAST_BATCHED_BLOCK, + PROPOSAL_FAILED, + PROPOSAL_STARTED, + PROPOSAL_SUCCEEDED, + REJECTED_TRANSACTIONS, +}; +use apollo_infra::metrics::{ + BATCHER_LOCAL_MSGS_PROCESSED, + BATCHER_LOCAL_MSGS_RECEIVED, + BATCHER_LOCAL_QUEUE_DEPTH, + BATCHER_REMOTE_CLIENT_SEND_ATTEMPTS, + BATCHER_REMOTE_MSGS_PROCESSED, + BATCHER_REMOTE_MSGS_RECEIVED, + BATCHER_REMOTE_VALID_MSGS_RECEIVED, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_proposal_started() -> Panel { + Panel::from_counter(PROPOSAL_STARTED, PanelType::Stat) +} +fn get_panel_proposal_succeeded() -> Panel { + Panel::from_counter(PROPOSAL_SUCCEEDED, PanelType::Stat) +} +fn get_panel_proposal_failed() -> Panel { + Panel::from_counter(PROPOSAL_FAILED, PanelType::Stat) +} +fn get_panel_batched_transactions() -> Panel { + Panel::from_counter(BATCHED_TRANSACTIONS, PanelType::Stat) +} +fn get_panel_last_batched_block() -> Panel { + Panel::from_gauge(LAST_BATCHED_BLOCK, PanelType::Stat) +} +fn get_panel_batcher_local_msgs_received() -> Panel { + Panel::from_counter(BATCHER_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_batcher_local_msgs_processed() -> Panel { + Panel::from_counter(BATCHER_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_batcher_remote_msgs_received() -> Panel { + Panel::from_counter(BATCHER_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_batcher_remote_valid_msgs_received() -> Panel { + Panel::from_counter(BATCHER_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_batcher_remote_msgs_processed() -> Panel { + Panel::from_counter(BATCHER_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_batcher_local_queue_depth() -> Panel { + Panel::from_gauge(BATCHER_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_batcher_remote_client_send_attempts() -> Panel { + Panel::from_hist(BATCHER_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} +fn get_panel_rejection_ratio() -> Panel { + Panel::ratio_time_series( + "rejection_ratio", + "Ratio of rejected transactions out of all processed, over the last 5 minutes", + &REJECTED_TRANSACTIONS, + &[&REJECTED_TRANSACTIONS, &BATCHED_TRANSACTIONS], + "5m", + ) +} + +pub(crate) fn get_batcher_row() -> Row { + Row::new( + "Batcher", + vec![ + get_panel_proposal_started(), + get_panel_proposal_succeeded(), + get_panel_proposal_failed(), + get_panel_batched_transactions(), + get_panel_last_batched_block(), + get_panel_rejection_ratio(), + ], + ) +} + +pub(crate) fn get_batcher_infra_row() -> Row { + Row::new( + "Batcher Infra", + vec![ + get_panel_batcher_local_msgs_received(), + get_panel_batcher_local_msgs_processed(), + get_panel_batcher_local_queue_depth(), + get_panel_batcher_remote_msgs_received(), + get_panel_batcher_remote_valid_msgs_received(), + get_panel_batcher_remote_msgs_processed(), + get_panel_batcher_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/blockifier.rs b/crates/apollo_dashboard/src/panels/blockifier.rs new file mode 100644 index 00000000000..70c5e3aeafa --- /dev/null +++ b/crates/apollo_dashboard/src/panels/blockifier.rs @@ -0,0 +1,60 @@ +use blockifier::metrics::{ + BLOCKIFIER_METRIC_RATE_DURATION, + CALLS_RUNNING_NATIVE, + CLASS_CACHE_HITS, + CLASS_CACHE_MISSES, + NATIVE_CLASS_RETURNED, + NATIVE_COMPILATION_ERROR, + TOTAL_CALLS, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +// TODO(MatanL/Shahak): use clamp_min(X, 1) on denom to avoid division by zero. +fn get_panel_blockifier_state_reader_class_cache_miss_ratio() -> Panel { + Panel::ratio_time_series( + "class_cache_miss_ratio", + "The ratio of cache misses when requesting compiled classes from the Blockifier State \ + Reader", + &CLASS_CACHE_MISSES, + &[&CLASS_CACHE_MISSES, &CLASS_CACHE_HITS], + BLOCKIFIER_METRIC_RATE_DURATION, + ) +} + +// TODO(MatanL/Shahak): use clamp_min(X, 1) on denom to avoid division by zero. +fn get_panel_blockifier_state_reader_native_class_returned_ratio() -> Panel { + Panel::ratio_time_series( + "native_class_returned_ratio", + "The ratio of Native classes returned by the Blockifier", + &NATIVE_CLASS_RETURNED, + &[&CLASS_CACHE_HITS, &CLASS_CACHE_MISSES], + BLOCKIFIER_METRIC_RATE_DURATION, + ) +} + +fn get_panel_native_compilation_error() -> Panel { + Panel::from_counter(NATIVE_COMPILATION_ERROR, PanelType::Stat) +} + +fn get_panel_native_execution_ratio() -> Panel { + Panel::ratio_time_series( + "native_execution_ratio", + "The ratio of calls running Cairo Native in the Blockifier", + &CALLS_RUNNING_NATIVE, + &[&TOTAL_CALLS], + BLOCKIFIER_METRIC_RATE_DURATION, + ) +} + +pub(crate) fn get_blockifier_row() -> Row { + Row::new( + "Blockifier", + vec![ + get_panel_blockifier_state_reader_class_cache_miss_ratio(), + get_panel_blockifier_state_reader_native_class_returned_ratio(), + get_panel_native_compilation_error(), + get_panel_native_execution_ratio(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/class_manager.rs b/crates/apollo_dashboard/src/panels/class_manager.rs new file mode 100644 index 00000000000..604c43e0e29 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/class_manager.rs @@ -0,0 +1,48 @@ +use apollo_infra::metrics::{ + CLASS_MANAGER_LOCAL_MSGS_PROCESSED, + CLASS_MANAGER_LOCAL_MSGS_RECEIVED, + CLASS_MANAGER_LOCAL_QUEUE_DEPTH, + CLASS_MANAGER_REMOTE_CLIENT_SEND_ATTEMPTS, + CLASS_MANAGER_REMOTE_MSGS_PROCESSED, + CLASS_MANAGER_REMOTE_MSGS_RECEIVED, + CLASS_MANAGER_REMOTE_VALID_MSGS_RECEIVED, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_class_manager_local_msgs_received() -> Panel { + Panel::from_counter(CLASS_MANAGER_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_class_manager_local_msgs_processed() -> Panel { + Panel::from_counter(CLASS_MANAGER_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_class_manager_remote_msgs_received() -> Panel { + Panel::from_counter(CLASS_MANAGER_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_class_manager_remote_valid_msgs_received() -> Panel { + Panel::from_counter(CLASS_MANAGER_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_class_manager_remote_msgs_processed() -> Panel { + Panel::from_counter(CLASS_MANAGER_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_class_manager_local_queue_depth() -> Panel { + Panel::from_gauge(CLASS_MANAGER_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_class_manager_remote_client_send_attempts() -> Panel { + Panel::from_hist(CLASS_MANAGER_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} + +pub(crate) fn get_class_manager_infra_row() -> Row { + Row::new( + "Class Manager Infra", + vec![ + get_panel_class_manager_local_msgs_received(), + get_panel_class_manager_local_msgs_processed(), + get_panel_class_manager_local_queue_depth(), + get_panel_class_manager_remote_msgs_received(), + get_panel_class_manager_remote_valid_msgs_received(), + get_panel_class_manager_remote_msgs_processed(), + get_panel_class_manager_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/consensus.rs b/crates/apollo_dashboard/src/panels/consensus.rs new file mode 100644 index 00000000000..8d9ac03c989 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/consensus.rs @@ -0,0 +1,244 @@ +use apollo_consensus::metrics::{ + CONSENSUS_BLOCK_NUMBER, + CONSENSUS_BUILD_PROPOSAL_FAILED, + CONSENSUS_BUILD_PROPOSAL_TOTAL, + CONSENSUS_CACHED_VOTES, + CONSENSUS_CONFLICTING_VOTES, + CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS, + CONSENSUS_DECISIONS_REACHED_BY_SYNC, + CONSENSUS_HELD_LOCKS, + CONSENSUS_INBOUND_STREAM_EVICTED, + CONSENSUS_INBOUND_STREAM_FINISHED, + CONSENSUS_INBOUND_STREAM_STARTED, + CONSENSUS_MAX_CACHED_BLOCK_NUMBER, + CONSENSUS_NEW_VALUE_LOCKS, + CONSENSUS_OUTBOUND_STREAM_FINISHED, + CONSENSUS_OUTBOUND_STREAM_STARTED, + CONSENSUS_PROPOSALS_INVALID, + CONSENSUS_PROPOSALS_RECEIVED, + CONSENSUS_PROPOSALS_VALIDATED, + CONSENSUS_PROPOSALS_VALID_INIT, + CONSENSUS_REPROPOSALS, + CONSENSUS_ROUND, + CONSENSUS_ROUND_ABOVE_ZERO, + CONSENSUS_TIMEOUTS, + LABEL_NAME_TIMEOUT_REASON, +}; +use apollo_consensus_manager::metrics::{ + CONSENSUS_NUM_CONNECTED_PEERS, + CONSENSUS_PROPOSALS_NUM_RECEIVED_MESSAGES, + CONSENSUS_PROPOSALS_NUM_SENT_MESSAGES, + CONSENSUS_VOTES_NUM_RECEIVED_MESSAGES, + CONSENSUS_VOTES_NUM_SENT_MESSAGES, +}; +use apollo_consensus_orchestrator::metrics::{ + CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER, + CENDE_PREPARE_BLOB_FOR_NEXT_HEIGHT_LATENCY, + CENDE_WRITE_BLOB_FAILURE, + CENDE_WRITE_BLOB_SUCCESS, + CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY, + CONSENSUS_L1_DATA_GAS_MISMATCH, + CONSENSUS_L1_GAS_MISMATCH, + CONSENSUS_L2_GAS_PRICE, + CONSENSUS_NUM_BATCHES_IN_PROPOSAL, + CONSENSUS_NUM_TXS_IN_PROPOSAL, + LABEL_CENDE_FAILURE_REASON, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_consensus_block_number() -> Panel { + Panel::from_gauge(CONSENSUS_BLOCK_NUMBER, PanelType::TimeSeries) +} +fn get_panel_consensus_round() -> Panel { + Panel::from_gauge(CONSENSUS_ROUND, PanelType::TimeSeries) +} +fn get_panel_consensus_round_avg() -> Panel { + Panel::new( + "Average consensus round", + "Average consensus round (10m)", + vec![format!("avg_over_time({}[10m])", CONSENSUS_ROUND.get_name_with_filter())], + PanelType::TimeSeries, + ) +} +fn get_panel_consensus_round_above_zero() -> Panel { + Panel::from_counter(CONSENSUS_ROUND_ABOVE_ZERO, PanelType::TimeSeries) +} +fn get_panel_consensus_max_cached_block_number() -> Panel { + Panel::from_gauge(CONSENSUS_MAX_CACHED_BLOCK_NUMBER, PanelType::TimeSeries) +} +fn get_panel_consensus_cached_votes() -> Panel { + Panel::from_gauge(CONSENSUS_CACHED_VOTES, PanelType::TimeSeries) +} +fn get_panel_consensus_decisions_reached_by_consensus() -> Panel { + Panel::from_counter(CONSENSUS_DECISIONS_REACHED_BY_CONSENSUS, PanelType::TimeSeries) +} +fn get_panel_consensus_decisions_reached_by_sync() -> Panel { + Panel::from_counter(CONSENSUS_DECISIONS_REACHED_BY_SYNC, PanelType::TimeSeries) +} +fn get_panel_consensus_inbound_stream_started() -> Panel { + Panel::from_counter(CONSENSUS_INBOUND_STREAM_STARTED, PanelType::TimeSeries) +} +fn get_panel_consensus_inbound_stream_evicted() -> Panel { + Panel::from_counter(CONSENSUS_INBOUND_STREAM_EVICTED, PanelType::TimeSeries) +} +fn get_panel_consensus_inbound_stream_finished() -> Panel { + Panel::from_counter(CONSENSUS_INBOUND_STREAM_FINISHED, PanelType::TimeSeries) +} +fn get_panel_consensus_outbound_stream_started() -> Panel { + Panel::from_counter(CONSENSUS_OUTBOUND_STREAM_STARTED, PanelType::TimeSeries) +} +fn get_panel_consensus_outbound_stream_finished() -> Panel { + Panel::from_counter(CONSENSUS_OUTBOUND_STREAM_FINISHED, PanelType::TimeSeries) +} +fn get_panel_consensus_proposals_received() -> Panel { + Panel::from_counter(CONSENSUS_PROPOSALS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_consensus_proposals_valid_init() -> Panel { + Panel::from_counter(CONSENSUS_PROPOSALS_VALID_INIT, PanelType::TimeSeries) +} +fn get_panel_consensus_proposals_validated() -> Panel { + Panel::from_counter(CONSENSUS_PROPOSALS_VALIDATED, PanelType::TimeSeries) +} +fn get_panel_consensus_proposals_invalid() -> Panel { + Panel::from_counter(CONSENSUS_PROPOSALS_INVALID, PanelType::TimeSeries) +} +fn get_panel_consensus_build_proposal_total() -> Panel { + Panel::from_counter(CONSENSUS_BUILD_PROPOSAL_TOTAL, PanelType::TimeSeries) +} +fn get_panel_consensus_build_proposal_failed() -> Panel { + Panel::from_counter(CONSENSUS_BUILD_PROPOSAL_FAILED, PanelType::TimeSeries) +} +fn get_panel_consensus_reproposals() -> Panel { + Panel::from_counter(CONSENSUS_REPROPOSALS, PanelType::TimeSeries) +} +fn get_panel_consensus_new_value_locks() -> Panel { + Panel::from_counter(CONSENSUS_NEW_VALUE_LOCKS, PanelType::TimeSeries) +} +fn get_panel_consensus_held_locks() -> Panel { + Panel::from_counter(CONSENSUS_HELD_LOCKS, PanelType::TimeSeries) +} +fn get_panel_consensus_timeouts_by_type() -> Panel { + Panel::new( + CONSENSUS_TIMEOUTS.get_name(), + CONSENSUS_TIMEOUTS.get_description(), + vec![format!( + "sum by ({}) ({})", + LABEL_NAME_TIMEOUT_REASON, + CONSENSUS_TIMEOUTS.get_name_with_filter() + )], + PanelType::TimeSeries, + ) +} +fn get_panel_consensus_num_batches_in_proposal() -> Panel { + Panel::from_gauge(CONSENSUS_NUM_BATCHES_IN_PROPOSAL, PanelType::TimeSeries) +} +fn get_panel_consensus_num_txs_in_proposal() -> Panel { + Panel::from_gauge(CONSENSUS_NUM_TXS_IN_PROPOSAL, PanelType::TimeSeries) +} +fn get_panel_consensus_l2_gas_price() -> Panel { + Panel::from_gauge(CONSENSUS_L2_GAS_PRICE, PanelType::TimeSeries) +} +fn get_panel_consensus_num_connected_peers() -> Panel { + Panel::from_gauge(CONSENSUS_NUM_CONNECTED_PEERS, PanelType::TimeSeries) +} +fn get_panel_consensus_votes_num_sent_messages() -> Panel { + Panel::from_counter(CONSENSUS_VOTES_NUM_SENT_MESSAGES, PanelType::TimeSeries) +} +fn get_panel_consensus_votes_num_received_messages() -> Panel { + Panel::from_counter(CONSENSUS_VOTES_NUM_RECEIVED_MESSAGES, PanelType::TimeSeries) +} +fn get_panel_consensus_proposals_num_sent_messages() -> Panel { + Panel::from_counter(CONSENSUS_PROPOSALS_NUM_SENT_MESSAGES, PanelType::TimeSeries) +} +fn get_panel_consensus_proposals_num_received_messages() -> Panel { + Panel::from_counter(CONSENSUS_PROPOSALS_NUM_RECEIVED_MESSAGES, PanelType::TimeSeries) +} +fn get_panel_consensus_conflicting_votes() -> Panel { + Panel::from_counter(CONSENSUS_CONFLICTING_VOTES, PanelType::TimeSeries) +} +fn get_panel_cende_last_prepared_blob_block_number() -> Panel { + Panel::from_gauge(CENDE_LAST_PREPARED_BLOB_BLOCK_NUMBER, PanelType::TimeSeries) +} +fn get_panel_cende_prepare_blob_for_next_height_latency() -> Panel { + Panel::from_hist(CENDE_PREPARE_BLOB_FOR_NEXT_HEIGHT_LATENCY, PanelType::TimeSeries) +} +fn get_panel_cende_write_prev_height_blob_latency() -> Panel { + Panel::from_hist(CENDE_WRITE_PREV_HEIGHT_BLOB_LATENCY, PanelType::TimeSeries) +} +fn get_panel_cende_write_blob_success() -> Panel { + Panel::from_counter(CENDE_WRITE_BLOB_SUCCESS, PanelType::TimeSeries) +} +fn get_panel_cende_write_blob_failure() -> Panel { + Panel::new( + CENDE_WRITE_BLOB_FAILURE.get_name(), + CENDE_WRITE_BLOB_FAILURE.get_description(), + vec![format!( + "sum by ({}) ({})", + LABEL_CENDE_FAILURE_REASON, + CENDE_WRITE_BLOB_FAILURE.get_name_with_filter() + )], + PanelType::TimeSeries, + ) +} +fn get_panel_consensus_l1_data_gas_mismatch() -> Panel { + Panel::from_counter(CONSENSUS_L1_DATA_GAS_MISMATCH, PanelType::TimeSeries) +} +fn get_panel_consensus_l1_gas_mismatch() -> Panel { + Panel::from_counter(CONSENSUS_L1_GAS_MISMATCH, PanelType::TimeSeries) +} + +pub(crate) fn get_consensus_row() -> Row { + Row::new( + "Consensus", + vec![ + get_panel_consensus_block_number(), + get_panel_consensus_round(), + get_panel_consensus_round_avg(), + get_panel_consensus_round_above_zero(), + get_panel_consensus_max_cached_block_number(), + get_panel_consensus_cached_votes(), + get_panel_consensus_decisions_reached_by_consensus(), + get_panel_consensus_decisions_reached_by_sync(), + get_panel_consensus_proposals_received(), + get_panel_consensus_proposals_valid_init(), + get_panel_consensus_proposals_validated(), + get_panel_consensus_proposals_invalid(), + get_panel_consensus_build_proposal_total(), + get_panel_consensus_build_proposal_failed(), + get_panel_consensus_reproposals(), + get_panel_consensus_new_value_locks(), + get_panel_consensus_held_locks(), + get_panel_consensus_timeouts_by_type(), + get_panel_consensus_num_batches_in_proposal(), + get_panel_consensus_num_txs_in_proposal(), + get_panel_consensus_inbound_stream_started(), + get_panel_consensus_inbound_stream_evicted(), + get_panel_consensus_inbound_stream_finished(), + get_panel_consensus_outbound_stream_started(), + get_panel_consensus_outbound_stream_finished(), + get_panel_consensus_l2_gas_price(), + get_panel_cende_last_prepared_blob_block_number(), + get_panel_cende_prepare_blob_for_next_height_latency(), + get_panel_cende_write_prev_height_blob_latency(), + get_panel_cende_write_blob_success(), + get_panel_cende_write_blob_failure(), + get_panel_consensus_l1_data_gas_mismatch(), + get_panel_consensus_l1_gas_mismatch(), + ], + ) +} + +pub(crate) fn get_consensus_p2p_row() -> Row { + Row::new( + "ConsensusP2p", + vec![ + get_panel_consensus_num_connected_peers(), + get_panel_consensus_votes_num_sent_messages(), + get_panel_consensus_votes_num_received_messages(), + get_panel_consensus_proposals_num_sent_messages(), + get_panel_consensus_proposals_num_received_messages(), + get_panel_consensus_conflicting_votes(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/gateway.rs b/crates/apollo_dashboard/src/panels/gateway.rs new file mode 100644 index 00000000000..37960276e38 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/gateway.rs @@ -0,0 +1,144 @@ +use apollo_gateway::metrics::{ + GATEWAY_ADD_TX_LATENCY, + GATEWAY_TRANSACTIONS_FAILED, + GATEWAY_TRANSACTIONS_RECEIVED, + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL, + GATEWAY_VALIDATE_TX_LATENCY, + LABEL_NAME_SOURCE, + LABEL_NAME_TX_TYPE as GATEWAY_LABEL_NAME_TX_TYPE, +}; +use apollo_infra::metrics::{ + GATEWAY_LOCAL_MSGS_PROCESSED, + GATEWAY_LOCAL_MSGS_RECEIVED, + GATEWAY_LOCAL_QUEUE_DEPTH, + GATEWAY_REMOTE_CLIENT_SEND_ATTEMPTS, + GATEWAY_REMOTE_MSGS_PROCESSED, + GATEWAY_REMOTE_MSGS_RECEIVED, + GATEWAY_REMOTE_VALID_MSGS_RECEIVED, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_gateway_transactions_received_by_type() -> Panel { + Panel::new( + GATEWAY_TRANSACTIONS_RECEIVED.get_name(), + GATEWAY_TRANSACTIONS_RECEIVED.get_description(), + vec![format!( + "sum by ({}) ({}) ", + GATEWAY_LABEL_NAME_TX_TYPE, + GATEWAY_TRANSACTIONS_RECEIVED.get_name_with_filter() + )], + PanelType::Stat, + ) +} + +fn get_panel_gateway_local_msgs_received() -> Panel { + Panel::from_counter(GATEWAY_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_gateway_local_msgs_processed() -> Panel { + Panel::from_counter(GATEWAY_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_gateway_remote_msgs_received() -> Panel { + Panel::from_counter(GATEWAY_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_gateway_remote_valid_msgs_received() -> Panel { + Panel::from_counter(GATEWAY_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_gateway_remote_msgs_processed() -> Panel { + Panel::from_counter(GATEWAY_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_gateway_local_queue_depth() -> Panel { + Panel::from_gauge(GATEWAY_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_gateway_remote_client_send_attempts() -> Panel { + Panel::from_hist(GATEWAY_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} + +fn get_panel_gateway_transactions_received_by_source() -> Panel { + Panel::new( + GATEWAY_TRANSACTIONS_RECEIVED.get_name(), + GATEWAY_TRANSACTIONS_RECEIVED.get_description(), + vec![format!( + "sum by ({}) ({}) ", + LABEL_NAME_SOURCE, + GATEWAY_TRANSACTIONS_RECEIVED.get_name_with_filter() + )], + PanelType::Stat, + ) +} + +fn get_panel_gateway_transactions_received_rate() -> Panel { + Panel::new( + "gateway_transactions_received_rate (TPS)", + "The rate of transactions received by the gateway during the last 20 minutes", + vec![format!( + "sum(rate({}[20m])) or vector(0)", + GATEWAY_TRANSACTIONS_RECEIVED.get_name_with_filter() + )], + PanelType::TimeSeries, + ) +} + +fn get_panel_gateway_add_tx_latency() -> Panel { + Panel::from_hist(GATEWAY_ADD_TX_LATENCY, PanelType::TimeSeries) +} + +fn get_panel_gateway_validate_tx_latency() -> Panel { + Panel::from_hist(GATEWAY_VALIDATE_TX_LATENCY, PanelType::TimeSeries) +} + +fn get_panel_gateway_transactions_failed() -> Panel { + Panel::new( + GATEWAY_TRANSACTIONS_FAILED.get_name(), + GATEWAY_TRANSACTIONS_FAILED.get_description(), + vec![format!( + "sum by ({}) ({})", + GATEWAY_LABEL_NAME_TX_TYPE, + GATEWAY_TRANSACTIONS_FAILED.get_name_with_filter() + )], + PanelType::Stat, + ) +} + +fn get_panel_gateway_transactions_sent_to_mempool() -> Panel { + Panel::new( + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL.get_name(), + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL.get_description(), + vec![format!( + "sum by ({}) ({})", + GATEWAY_LABEL_NAME_TX_TYPE, + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL.get_name_with_filter() + )], + PanelType::Stat, + ) +} + +pub(crate) fn get_gateway_row() -> Row { + Row::new( + "Gateway", + vec![ + get_panel_gateway_transactions_received_by_type(), + get_panel_gateway_transactions_received_by_source(), + get_panel_gateway_transactions_received_rate(), + get_panel_gateway_add_tx_latency(), + get_panel_gateway_validate_tx_latency(), + get_panel_gateway_transactions_failed(), + get_panel_gateway_transactions_sent_to_mempool(), + ], + ) +} + +pub(crate) fn get_gateway_infra_row() -> Row { + Row::new( + "Gateway Infra", + vec![ + get_panel_gateway_local_msgs_received(), + get_panel_gateway_local_msgs_processed(), + get_panel_gateway_local_queue_depth(), + get_panel_gateway_remote_msgs_received(), + get_panel_gateway_remote_valid_msgs_received(), + get_panel_gateway_remote_msgs_processed(), + get_panel_gateway_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/http_server.rs b/crates/apollo_dashboard/src/panels/http_server.rs new file mode 100644 index 00000000000..587bd86ea7c --- /dev/null +++ b/crates/apollo_dashboard/src/panels/http_server.rs @@ -0,0 +1,61 @@ +use apollo_http_server::metrics::{ + ADDED_TRANSACTIONS_DEPRECATED_ERROR, + ADDED_TRANSACTIONS_FAILURE, + ADDED_TRANSACTIONS_INTERNAL_ERROR, + ADDED_TRANSACTIONS_SUCCESS, + ADDED_TRANSACTIONS_TOTAL, + HTTP_SERVER_ADD_TX_LATENCY, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_added_transactions_total() -> Panel { + Panel::from_counter(ADDED_TRANSACTIONS_TOTAL, PanelType::TimeSeries) +} + +fn get_panel_added_transactions_success() -> Panel { + Panel::from_counter(ADDED_TRANSACTIONS_SUCCESS, PanelType::TimeSeries) +} + +fn get_panel_added_transactions_failure() -> Panel { + Panel::from_counter(ADDED_TRANSACTIONS_FAILURE, PanelType::TimeSeries) +} + +fn get_panel_added_transactions_internal_error() -> Panel { + Panel::from_counter(ADDED_TRANSACTIONS_INTERNAL_ERROR, PanelType::TimeSeries) +} + +fn get_panel_added_transactions_deprecated_error() -> Panel { + Panel::from_counter(ADDED_TRANSACTIONS_DEPRECATED_ERROR, PanelType::TimeSeries) +} + +fn get_panel_http_server_transactions_received_rate() -> Panel { + Panel::new( + "http_server_transactions_received_rate (TPS)", + "The rate of transactions received by the HTTP Server during the last 20 minutes", + vec![format!( + "sum(rate({}[20m])) or vector(0)", + ADDED_TRANSACTIONS_TOTAL.get_name_with_filter() + )], + PanelType::TimeSeries, + ) +} + +fn get_panel_http_add_tx_latency() -> Panel { + Panel::from_hist(HTTP_SERVER_ADD_TX_LATENCY, PanelType::TimeSeries) +} + +pub(crate) fn get_http_server_row() -> Row { + Row::new( + "Http Server", + vec![ + get_panel_added_transactions_total(), + get_panel_http_server_transactions_received_rate(), + get_panel_added_transactions_success(), + get_panel_added_transactions_failure(), + get_panel_added_transactions_internal_error(), + get_panel_added_transactions_deprecated_error(), + get_panel_http_add_tx_latency(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/l1_gas_price.rs b/crates/apollo_dashboard/src/panels/l1_gas_price.rs new file mode 100644 index 00000000000..a817da407b4 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/l1_gas_price.rs @@ -0,0 +1,120 @@ +use apollo_infra::metrics::{ + L1_GAS_PRICE_PROVIDER_LOCAL_MSGS_PROCESSED, + L1_GAS_PRICE_PROVIDER_LOCAL_MSGS_RECEIVED, + L1_GAS_PRICE_PROVIDER_LOCAL_QUEUE_DEPTH, + L1_GAS_PRICE_PROVIDER_REMOTE_CLIENT_SEND_ATTEMPTS, + L1_GAS_PRICE_PROVIDER_REMOTE_MSGS_PROCESSED, + L1_GAS_PRICE_PROVIDER_REMOTE_MSGS_RECEIVED, + L1_GAS_PRICE_PROVIDER_REMOTE_VALID_MSGS_RECEIVED, +}; +use apollo_l1_gas_price::metrics::{ + ETH_TO_STRK_ERROR_COUNT, + ETH_TO_STRK_RATE, + ETH_TO_STRK_SUCCESS_COUNT, + L1_GAS_PRICE_PROVIDER_INSUFFICIENT_HISTORY, + L1_GAS_PRICE_SCRAPER_BASELAYER_ERROR_COUNT, + L1_GAS_PRICE_SCRAPER_REORG_DETECTED, + L1_GAS_PRICE_SCRAPER_SUCCESS_COUNT, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_l1_gas_price_provider_local_msgs_received() -> Panel { + Panel::from_counter(L1_GAS_PRICE_PROVIDER_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_l1_gas_price_provider_local_msgs_processed() -> Panel { + Panel::from_counter(L1_GAS_PRICE_PROVIDER_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_l1_gas_price_provider_remote_msgs_received() -> Panel { + Panel::from_counter(L1_GAS_PRICE_PROVIDER_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_l1_gas_price_provider_remote_valid_msgs_received() -> Panel { + Panel::from_counter(L1_GAS_PRICE_PROVIDER_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_l1_gas_price_provider_remote_msgs_processed() -> Panel { + Panel::from_counter(L1_GAS_PRICE_PROVIDER_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_l1_gas_price_provider_local_queue_depth() -> Panel { + Panel::from_gauge(L1_GAS_PRICE_PROVIDER_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_l1_gas_price_provider_remote_client_send_attempts() -> Panel { + Panel::from_hist(L1_GAS_PRICE_PROVIDER_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} + +fn get_panel_l1_gas_price_provider_insufficient_history() -> Panel { + Panel::from_counter(L1_GAS_PRICE_PROVIDER_INSUFFICIENT_HISTORY, PanelType::Stat) +} +fn get_panel_l1_gas_price_scraper_success_count() -> Panel { + Panel::from_counter(L1_GAS_PRICE_SCRAPER_SUCCESS_COUNT, PanelType::Stat) +} +fn get_panel_l1_gas_price_scraper_baselayer_error_count() -> Panel { + Panel::from_counter(L1_GAS_PRICE_SCRAPER_BASELAYER_ERROR_COUNT, PanelType::Stat) +} +fn get_panel_l1_gas_price_scraper_reorg_detected() -> Panel { + Panel::from_counter(L1_GAS_PRICE_SCRAPER_REORG_DETECTED, PanelType::Stat) +} +fn get_panel_eth_to_strk_error_count() -> Panel { + Panel::from_counter(ETH_TO_STRK_ERROR_COUNT, PanelType::Stat) +} +fn get_panel_eth_to_strk_success_count() -> Panel { + Panel::from_counter(ETH_TO_STRK_SUCCESS_COUNT, PanelType::Stat) +} + +fn get_panel_l1_gas_price_scraper_latest_scraped_block() -> Panel { + Panel::from_gauge( + apollo_l1_gas_price::metrics::L1_GAS_PRICE_SCRAPER_LATEST_SCRAPED_BLOCK, + PanelType::TimeSeries, + ) +} + +fn get_panel_eth_to_strk_rate() -> Panel { + Panel::from_gauge(ETH_TO_STRK_RATE, PanelType::TimeSeries) +} + +fn get_panel_l1_gas_price_latest_mean_value() -> Panel { + Panel::from_gauge( + apollo_l1_gas_price::metrics::L1_GAS_PRICE_LATEST_MEAN_VALUE, + PanelType::TimeSeries, + ) +} + +fn get_panel_l1_data_gas_price_latest_mean_value() -> Panel { + Panel::from_gauge( + apollo_l1_gas_price::metrics::L1_DATA_GAS_PRICE_LATEST_MEAN_VALUE, + PanelType::TimeSeries, + ) +} + +pub(crate) fn get_l1_gas_price_row() -> Row { + Row::new( + "L1 Gas Price", + vec![ + get_panel_eth_to_strk_error_count(), + get_panel_eth_to_strk_success_count(), + get_panel_eth_to_strk_rate(), + get_panel_l1_gas_price_provider_insufficient_history(), + get_panel_l1_gas_price_scraper_success_count(), + get_panel_l1_gas_price_scraper_baselayer_error_count(), + get_panel_l1_gas_price_scraper_reorg_detected(), + get_panel_l1_gas_price_scraper_latest_scraped_block(), + get_panel_eth_to_strk_rate(), + get_panel_l1_gas_price_latest_mean_value(), + get_panel_l1_data_gas_price_latest_mean_value(), + ], + ) +} + +pub(crate) fn get_l1_gas_price_infra_row() -> Row { + Row::new( + "L1 Gas Price Infra", + vec![ + get_panel_l1_gas_price_provider_local_msgs_received(), + get_panel_l1_gas_price_provider_local_msgs_processed(), + get_panel_l1_gas_price_provider_local_queue_depth(), + get_panel_l1_gas_price_provider_remote_msgs_received(), + get_panel_l1_gas_price_provider_remote_valid_msgs_received(), + get_panel_l1_gas_price_provider_remote_msgs_processed(), + get_panel_l1_gas_price_provider_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/l1_provider.rs b/crates/apollo_dashboard/src/panels/l1_provider.rs new file mode 100644 index 00000000000..eb8082996fc --- /dev/null +++ b/crates/apollo_dashboard/src/panels/l1_provider.rs @@ -0,0 +1,73 @@ +use apollo_infra::metrics::{ + L1_PROVIDER_LOCAL_MSGS_PROCESSED, + L1_PROVIDER_LOCAL_MSGS_RECEIVED, + L1_PROVIDER_LOCAL_QUEUE_DEPTH, + L1_PROVIDER_REMOTE_CLIENT_SEND_ATTEMPTS, + L1_PROVIDER_REMOTE_MSGS_PROCESSED, + L1_PROVIDER_REMOTE_MSGS_RECEIVED, + L1_PROVIDER_REMOTE_VALID_MSGS_RECEIVED, +}; +use apollo_l1_provider::metrics::{ + L1_MESSAGE_SCRAPER_BASELAYER_ERROR_COUNT, + L1_MESSAGE_SCRAPER_REORG_DETECTED, + L1_MESSAGE_SCRAPER_SUCCESS_COUNT, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_l1_provider_local_msgs_received() -> Panel { + Panel::from_counter(L1_PROVIDER_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_l1_provider_local_msgs_processed() -> Panel { + Panel::from_counter(L1_PROVIDER_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_l1_provider_remote_msgs_received() -> Panel { + Panel::from_counter(L1_PROVIDER_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_l1_provider_remote_valid_msgs_received() -> Panel { + Panel::from_counter(L1_PROVIDER_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_l1_provider_remote_msgs_processed() -> Panel { + Panel::from_counter(L1_PROVIDER_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_l1_provider_local_queue_depth() -> Panel { + Panel::from_gauge(L1_PROVIDER_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_l1_provider_remote_client_send_attempts() -> Panel { + Panel::from_hist(L1_PROVIDER_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} +fn get_panel_l1_message_scraper_success_count() -> Panel { + Panel::from_counter(L1_MESSAGE_SCRAPER_SUCCESS_COUNT, PanelType::TimeSeries) +} +fn get_panel_l1_message_scraper_baselayer_error_count() -> Panel { + Panel::from_counter(L1_MESSAGE_SCRAPER_BASELAYER_ERROR_COUNT, PanelType::TimeSeries) +} +fn get_panel_l1_message_scraper_reorg_detected() -> Panel { + Panel::from_counter(L1_MESSAGE_SCRAPER_REORG_DETECTED, PanelType::TimeSeries) +} + +pub(crate) fn get_l1_provider_row() -> Row { + Row::new( + "L1 Provider", + vec![ + get_panel_l1_message_scraper_success_count(), + get_panel_l1_message_scraper_baselayer_error_count(), + get_panel_l1_message_scraper_reorg_detected(), + ], + ) +} + +pub(crate) fn get_l1_provider_infra_row() -> Row { + Row::new( + "L1 Provider Infra", + vec![ + get_panel_l1_provider_local_msgs_received(), + get_panel_l1_provider_local_msgs_processed(), + get_panel_l1_provider_local_queue_depth(), + get_panel_l1_provider_remote_msgs_received(), + get_panel_l1_provider_remote_valid_msgs_received(), + get_panel_l1_provider_remote_msgs_processed(), + get_panel_l1_provider_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/mempool.rs b/crates/apollo_dashboard/src/panels/mempool.rs new file mode 100644 index 00000000000..8ada0f23c49 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/mempool.rs @@ -0,0 +1,173 @@ +use apollo_infra::metrics::{ + MEMPOOL_LOCAL_MSGS_PROCESSED, + MEMPOOL_LOCAL_MSGS_RECEIVED, + MEMPOOL_LOCAL_QUEUE_DEPTH, + MEMPOOL_REMOTE_CLIENT_SEND_ATTEMPTS, + MEMPOOL_REMOTE_MSGS_PROCESSED, + MEMPOOL_REMOTE_MSGS_RECEIVED, + MEMPOOL_REMOTE_VALID_MSGS_RECEIVED, +}; +use apollo_mempool::metrics::{ + LABEL_NAME_DROP_REASON, + LABEL_NAME_TX_TYPE as MEMPOOL_LABEL_NAME_TX_TYPE, + MEMPOOL_DELAYED_DECLARES_SIZE, + MEMPOOL_GET_TXS_SIZE, + MEMPOOL_PENDING_QUEUE_SIZE, + MEMPOOL_POOL_SIZE, + MEMPOOL_PRIORITY_QUEUE_SIZE, + MEMPOOL_TOTAL_SIZE_BYTES, + MEMPOOL_TRANSACTIONS_COMMITTED, + MEMPOOL_TRANSACTIONS_DROPPED, + MEMPOOL_TRANSACTIONS_RECEIVED, + TRANSACTION_TIME_SPENT_IN_MEMPOOL, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_mempool_local_msgs_received() -> Panel { + Panel::from_counter(MEMPOOL_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_mempool_local_msgs_processed() -> Panel { + Panel::from_counter(MEMPOOL_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_mempool_remote_msgs_received() -> Panel { + Panel::from_counter(MEMPOOL_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_mempool_remote_valid_msgs_received() -> Panel { + Panel::from_counter(MEMPOOL_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_mempool_remote_msgs_processed() -> Panel { + Panel::from_counter(MEMPOOL_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_mempool_local_queue_depth() -> Panel { + Panel::from_gauge(MEMPOOL_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_mempool_remote_client_send_attempts() -> Panel { + Panel::from_hist(MEMPOOL_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} +fn get_panel_mempool_transactions_received() -> Panel { + Panel::new( + MEMPOOL_TRANSACTIONS_RECEIVED.get_name(), + MEMPOOL_TRANSACTIONS_RECEIVED.get_description(), + vec![format!( + "sum by ({}) ({})", + MEMPOOL_LABEL_NAME_TX_TYPE, + MEMPOOL_TRANSACTIONS_RECEIVED.get_name_with_filter() + )], + PanelType::Stat, + ) +} +fn get_panel_mempool_transactions_received_rate() -> Panel { + Panel::new( + "mempool_transactions_received_rate (TPS)", + "The rate of transactions received by the mempool during the last 20 minutes", + vec![format!( + "sum(rate({}[20m])) or vector(0)", + MEMPOOL_TRANSACTIONS_RECEIVED.get_name_with_filter() + )], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_transactions_committed() -> Panel { + Panel::from_counter(MEMPOOL_TRANSACTIONS_COMMITTED, PanelType::Stat) +} +fn get_panel_mempool_transactions_dropped() -> Panel { + Panel::new( + MEMPOOL_TRANSACTIONS_DROPPED.get_name(), + MEMPOOL_TRANSACTIONS_DROPPED.get_description(), + vec![format!( + "sum by ({}) ({})", + LABEL_NAME_DROP_REASON, + MEMPOOL_TRANSACTIONS_DROPPED.get_name_with_filter() + )], + PanelType::Stat, + ) +} +fn get_panel_mempool_pool_size() -> Panel { + Panel::new( + MEMPOOL_POOL_SIZE.get_name(), + "The average size of the pool", + vec![format!("avg_over_time({}[2m])", MEMPOOL_POOL_SIZE.get_name_with_filter())], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_priority_queue_size() -> Panel { + Panel::new( + MEMPOOL_PRIORITY_QUEUE_SIZE.get_name(), + "The average size of the priority queue", + vec![format!("avg_over_time({}[2m])", MEMPOOL_PRIORITY_QUEUE_SIZE.get_name_with_filter())], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_pending_queue_size() -> Panel { + Panel::new( + MEMPOOL_PENDING_QUEUE_SIZE.get_name(), + "The average size of the pending queue", + vec![format!("avg_over_time({}[2m])", MEMPOOL_PENDING_QUEUE_SIZE.get_name_with_filter())], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_total_size_in_bytes() -> Panel { + Panel::new( + MEMPOOL_TOTAL_SIZE_BYTES.get_name(), + "The average total transaction size in bytes over time in the mempool", + vec![format!("avg_over_time({}[2m])", MEMPOOL_TOTAL_SIZE_BYTES.get_name_with_filter())], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_get_txs_size() -> Panel { + Panel::new( + MEMPOOL_GET_TXS_SIZE.get_name(), + "The average size of the get_txs", + vec![format!("avg_over_time({}[2m])", MEMPOOL_GET_TXS_SIZE.get_name_with_filter())], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_delayed_declares_size() -> Panel { + Panel::new( + MEMPOOL_DELAYED_DECLARES_SIZE.get_name(), + "The average number of delayed declare transactions", + vec![format!( + "avg_over_time({}[2m])", + MEMPOOL_DELAYED_DECLARES_SIZE.get_name_with_filter() + )], + PanelType::TimeSeries, + ) +} +fn get_panel_mempool_transaction_time_spent() -> Panel { + Panel::from_hist(TRANSACTION_TIME_SPENT_IN_MEMPOOL, PanelType::TimeSeries) +} + +pub(crate) fn get_mempool_row() -> Row { + Row::new( + "Mempool", + vec![ + get_panel_mempool_transactions_received(), + get_panel_mempool_transactions_received_rate(), + get_panel_mempool_transactions_dropped(), + get_panel_mempool_transactions_committed(), + get_panel_mempool_pool_size(), + get_panel_mempool_priority_queue_size(), + get_panel_mempool_pending_queue_size(), + get_panel_mempool_total_size_in_bytes(), + get_panel_mempool_get_txs_size(), + get_panel_mempool_delayed_declares_size(), + get_panel_mempool_transaction_time_spent(), + ], + ) +} + +pub(crate) fn get_mempool_infra_row() -> Row { + Row::new( + "Mempool Infra", + vec![ + get_panel_mempool_local_msgs_received(), + get_panel_mempool_local_msgs_processed(), + get_panel_mempool_local_queue_depth(), + get_panel_mempool_remote_msgs_received(), + get_panel_mempool_remote_valid_msgs_received(), + get_panel_mempool_remote_msgs_processed(), + get_panel_mempool_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/mempool_p2p.rs b/crates/apollo_dashboard/src/panels/mempool_p2p.rs new file mode 100644 index 00000000000..b4d2da676e8 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/mempool_p2p.rs @@ -0,0 +1,88 @@ +use apollo_infra::metrics::{ + MEMPOOL_P2P_LOCAL_MSGS_PROCESSED, + MEMPOOL_P2P_LOCAL_MSGS_RECEIVED, + MEMPOOL_P2P_LOCAL_QUEUE_DEPTH, + MEMPOOL_P2P_REMOTE_CLIENT_SEND_ATTEMPTS, + MEMPOOL_P2P_REMOTE_MSGS_PROCESSED, + MEMPOOL_P2P_REMOTE_MSGS_RECEIVED, + MEMPOOL_P2P_REMOTE_VALID_MSGS_RECEIVED, +}; +use apollo_mempool_p2p::metrics::{ + MEMPOOL_P2P_BROADCASTED_BATCH_SIZE, + MEMPOOL_P2P_NUM_CONNECTED_PEERS, + MEMPOOL_P2P_NUM_RECEIVED_MESSAGES, + MEMPOOL_P2P_NUM_SENT_MESSAGES, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_mempool_p2p_num_connected_peers() -> Panel { + Panel::from_gauge(MEMPOOL_P2P_NUM_CONNECTED_PEERS, PanelType::Stat) +} + +fn get_panel_mempool_p2p_num_sent_messages() -> Panel { + Panel::from_counter(MEMPOOL_P2P_NUM_SENT_MESSAGES, PanelType::Stat) +} + +fn get_panel_mempool_p2p_num_received_messages() -> Panel { + Panel::from_counter(MEMPOOL_P2P_NUM_RECEIVED_MESSAGES, PanelType::Stat) +} + +fn get_panel_mempool_p2p_broadcasted_batch_size() -> Panel { + Panel::from_hist(MEMPOOL_P2P_BROADCASTED_BATCH_SIZE, PanelType::Stat) +} + +fn get_panel_mempool_p2p_local_msgs_received() -> Panel { + Panel::from_counter(MEMPOOL_P2P_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} + +fn get_panel_mempool_p2p_local_msgs_processed() -> Panel { + Panel::from_counter(MEMPOOL_P2P_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} + +fn get_panel_mempool_p2p_remote_msgs_received() -> Panel { + Panel::from_counter(MEMPOOL_P2P_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} + +fn get_panel_mempool_p2p_remote_valid_msgs_received() -> Panel { + Panel::from_counter(MEMPOOL_P2P_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} + +fn get_panel_mempool_p2p_remote_msgs_processed() -> Panel { + Panel::from_counter(MEMPOOL_P2P_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} + +fn get_panel_mempool_p2p_local_queue_depth() -> Panel { + Panel::from_gauge(MEMPOOL_P2P_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} + +fn get_panel_mempool_p2p_remote_client_send_attempts() -> Panel { + Panel::from_hist(MEMPOOL_P2P_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} + +pub(crate) fn get_mempool_p2p_row() -> Row { + Row::new( + "MempoolP2p", + vec![ + get_panel_mempool_p2p_num_connected_peers(), + get_panel_mempool_p2p_num_sent_messages(), + get_panel_mempool_p2p_num_received_messages(), + get_panel_mempool_p2p_broadcasted_batch_size(), + ], + ) +} + +pub(crate) fn get_mempool_p2p_infra_row() -> Row { + Row::new( + "MempoolP2pInfra", + vec![ + get_panel_mempool_p2p_local_msgs_received(), + get_panel_mempool_p2p_local_msgs_processed(), + get_panel_mempool_p2p_local_queue_depth(), + get_panel_mempool_p2p_remote_msgs_received(), + get_panel_mempool_p2p_remote_valid_msgs_received(), + get_panel_mempool_p2p_remote_msgs_processed(), + get_panel_mempool_p2p_remote_client_send_attempts(), + ], + ) +} diff --git a/crates/apollo_dashboard/src/panels/sierra_compiler.rs b/crates/apollo_dashboard/src/panels/sierra_compiler.rs new file mode 100644 index 00000000000..ecace5821b0 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/sierra_compiler.rs @@ -0,0 +1,57 @@ +use apollo_compile_to_casm::metrics::COMPILATION_DURATION; +use apollo_infra::metrics::{ + SIERRA_COMPILER_LOCAL_MSGS_PROCESSED, + SIERRA_COMPILER_LOCAL_MSGS_RECEIVED, + SIERRA_COMPILER_LOCAL_QUEUE_DEPTH, + SIERRA_COMPILER_REMOTE_CLIENT_SEND_ATTEMPTS, + SIERRA_COMPILER_REMOTE_MSGS_PROCESSED, + SIERRA_COMPILER_REMOTE_MSGS_RECEIVED, + SIERRA_COMPILER_REMOTE_VALID_MSGS_RECEIVED, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_sierra_compiler_local_msgs_received() -> Panel { + Panel::from_counter(SIERRA_COMPILER_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_sierra_compiler_local_msgs_processed() -> Panel { + Panel::from_counter(SIERRA_COMPILER_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_sierra_compiler_remote_msgs_received() -> Panel { + Panel::from_counter(SIERRA_COMPILER_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_sierra_compiler_remote_valid_msgs_received() -> Panel { + Panel::from_counter(SIERRA_COMPILER_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_sierra_compiler_remote_msgs_processed() -> Panel { + Panel::from_counter(SIERRA_COMPILER_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_sierra_compiler_local_queue_depth() -> Panel { + Panel::from_gauge(SIERRA_COMPILER_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_sierra_compiler_remote_client_send_attempts() -> Panel { + Panel::from_hist(SIERRA_COMPILER_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} + +fn get_panel_compilation_duration() -> Panel { + Panel::from_hist(COMPILATION_DURATION, PanelType::TimeSeries) +} + +pub(crate) fn get_sierra_compiler_infra_row() -> Row { + Row::new( + "SierraCompilerInfra", + vec![ + get_panel_sierra_compiler_local_msgs_received(), + get_panel_sierra_compiler_local_msgs_processed(), + get_panel_sierra_compiler_local_queue_depth(), + get_panel_sierra_compiler_remote_msgs_received(), + get_panel_sierra_compiler_remote_valid_msgs_received(), + get_panel_sierra_compiler_remote_msgs_processed(), + get_panel_sierra_compiler_remote_client_send_attempts(), + ], + ) +} + +pub(crate) fn get_compile_to_casm_row() -> Row { + Row::new("Compile sierra to casm", vec![get_panel_compilation_duration()]) +} diff --git a/crates/apollo_dashboard/src/panels/state_sync.rs b/crates/apollo_dashboard/src/panels/state_sync.rs new file mode 100644 index 00000000000..fb0c7593eb2 --- /dev/null +++ b/crates/apollo_dashboard/src/panels/state_sync.rs @@ -0,0 +1,117 @@ +use apollo_infra::metrics::{ + STATE_SYNC_LOCAL_MSGS_PROCESSED, + STATE_SYNC_LOCAL_MSGS_RECEIVED, + STATE_SYNC_LOCAL_QUEUE_DEPTH, + STATE_SYNC_REMOTE_CLIENT_SEND_ATTEMPTS, + STATE_SYNC_REMOTE_MSGS_PROCESSED, + STATE_SYNC_REMOTE_MSGS_RECEIVED, + STATE_SYNC_REMOTE_VALID_MSGS_RECEIVED, +}; +use apollo_state_sync_metrics::metrics::{ + CENTRAL_SYNC_CENTRAL_BLOCK_MARKER, + P2P_SYNC_NUM_ACTIVE_INBOUND_SESSIONS, + P2P_SYNC_NUM_ACTIVE_OUTBOUND_SESSIONS, + P2P_SYNC_NUM_CONNECTED_PEERS, + STATE_SYNC_BODY_MARKER, + STATE_SYNC_CLASS_MANAGER_MARKER, + STATE_SYNC_HEADER_MARKER, + STATE_SYNC_PROCESSED_TRANSACTIONS, + STATE_SYNC_REVERTED_TRANSACTIONS, + STATE_SYNC_STATE_MARKER, +}; + +use crate::dashboard::{Panel, PanelType, Row}; + +fn get_panel_state_sync_local_msgs_received() -> Panel { + Panel::from_counter(STATE_SYNC_LOCAL_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_state_sync_local_msgs_processed() -> Panel { + Panel::from_counter(STATE_SYNC_LOCAL_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_state_sync_remote_msgs_received() -> Panel { + Panel::from_counter(STATE_SYNC_REMOTE_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_state_sync_remote_valid_msgs_received() -> Panel { + Panel::from_counter(STATE_SYNC_REMOTE_VALID_MSGS_RECEIVED, PanelType::TimeSeries) +} +fn get_panel_state_sync_remote_msgs_processed() -> Panel { + Panel::from_counter(STATE_SYNC_REMOTE_MSGS_PROCESSED, PanelType::TimeSeries) +} +fn get_panel_state_sync_local_queue_depth() -> Panel { + Panel::from_gauge(STATE_SYNC_LOCAL_QUEUE_DEPTH, PanelType::TimeSeries) +} +fn get_panel_state_sync_remote_client_send_attempts() -> Panel { + Panel::from_hist(STATE_SYNC_REMOTE_CLIENT_SEND_ATTEMPTS, PanelType::TimeSeries) +} + +fn get_panel_p2p_sync_num_connected_peers() -> Panel { + Panel::from_gauge(P2P_SYNC_NUM_CONNECTED_PEERS, PanelType::Stat) +} +fn get_panel_p2p_sync_num_active_inbound_sessions() -> Panel { + Panel::from_gauge(P2P_SYNC_NUM_ACTIVE_INBOUND_SESSIONS, PanelType::Stat) +} +fn get_panel_p2p_sync_num_active_outbound_sessions() -> Panel { + Panel::from_gauge(P2P_SYNC_NUM_ACTIVE_OUTBOUND_SESSIONS, PanelType::Stat) +} +fn get_panel_state_sync_processed_transactions() -> Panel { + Panel::from_counter(STATE_SYNC_PROCESSED_TRANSACTIONS, PanelType::Stat) +} +fn get_panel_state_sync_reverted_transactions() -> Panel { + Panel::from_counter(STATE_SYNC_REVERTED_TRANSACTIONS, PanelType::Stat) +} +fn get_panel_central_sync_central_block_marker() -> Panel { + Panel::from_gauge(CENTRAL_SYNC_CENTRAL_BLOCK_MARKER, PanelType::Stat) +} +fn get_panel_state_sync_body_marker() -> Panel { + Panel::from_gauge(STATE_SYNC_BODY_MARKER, PanelType::Stat) +} +fn get_panel_state_sync_class_manager_marker() -> Panel { + Panel::from_gauge(STATE_SYNC_CLASS_MANAGER_MARKER, PanelType::Stat) +} +fn get_panel_state_sync_header_marker() -> Panel { + Panel::from_gauge(STATE_SYNC_HEADER_MARKER, PanelType::Stat) +} +fn get_panel_state_sync_state_marker() -> Panel { + Panel::from_gauge(STATE_SYNC_STATE_MARKER, PanelType::Stat) +} + +pub(crate) fn get_state_sync_row() -> Row { + Row::new( + "State Sync", + vec![ + get_panel_state_sync_processed_transactions(), + get_panel_state_sync_reverted_transactions(), + get_panel_central_sync_central_block_marker(), + get_panel_state_sync_body_marker(), + get_panel_state_sync_class_manager_marker(), + get_panel_state_sync_header_marker(), + get_panel_state_sync_state_marker(), + ], + ) +} + +pub(crate) fn get_state_sync_infra_row() -> Row { + Row::new( + "StateSyncInfra", + vec![ + get_panel_state_sync_local_msgs_received(), + get_panel_state_sync_local_msgs_processed(), + get_panel_state_sync_local_queue_depth(), + get_panel_state_sync_remote_msgs_received(), + get_panel_state_sync_remote_valid_msgs_received(), + get_panel_state_sync_remote_msgs_processed(), + get_panel_state_sync_remote_client_send_attempts(), + ], + ) +} + +pub(crate) fn get_state_sync_p2p_row() -> Row { + Row::new( + "StateSyncP2p", + vec![ + get_panel_p2p_sync_num_connected_peers(), + get_panel_p2p_sync_num_active_inbound_sessions(), + get_panel_p2p_sync_num_active_outbound_sessions(), + ], + ) +} diff --git a/crates/apollo_deployments/Cargo.toml b/crates/apollo_deployments/Cargo.toml new file mode 100644 index 00000000000..583bd3a8ea5 --- /dev/null +++ b/crates/apollo_deployments/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "apollo_deployments" +version.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true + +[lints] +workspace = true + +[dependencies] +apollo_config.workspace = true +apollo_infra_utils.workspace = true +apollo_node.workspace = true +apollo_protobuf.workspace = true +indexmap.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +starknet_api.workspace = true +strum.workspace = true +strum_macros.workspace = true +url.workspace = true + +[dev-dependencies] +apollo_infra_utils = { workspace = true, features = ["testing"] } +apollo_node = { workspace = true, features = ["testing"] } +tempfile.workspace = true +url = { workspace = true, features = ["serde"] } diff --git a/crates/apollo_deployments/resources/base_app_config.json b/crates/apollo_deployments/resources/base_app_config.json new file mode 100644 index 00000000000..9c6c2ee42a7 --- /dev/null +++ b/crates/apollo_deployments/resources/base_app_config.json @@ -0,0 +1,243 @@ +{ + "base_layer_config.prague_blob_gas_calc": true, + "base_layer_config.timeout_millis": 1000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.l1_gas": 4400000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.message_segment_length": 3700, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.n_events": 5000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.n_txs": 2000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.sierra_gas": 4000000000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.proving_gas": 5000000000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.state_diff_size": 4000, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.pedersen": 10125, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.range_check": 70, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.ecdsa": 1666666, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.ec_op": 714875, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.bitwise": 583, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.keccak": 510707, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.poseidon": 6250, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.add_mod": 312, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.mul_mod": 604, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.range_check96": 56, + "batcher_config.block_builder_config.execute_config.n_workers": 28, + "batcher_config.block_builder_config.execute_config.stack_size": 62914560, + "batcher_config.block_builder_config.n_concurrent_txs": 100, + "batcher_config.block_builder_config.tx_polling_interval_millis": 1, + "batcher_config.contract_class_manager_config.cairo_native_run_config.channel_size": 2000, + "batcher_config.contract_class_manager_config.cairo_native_run_config.native_classes_whitelist": "All", + "batcher_config.contract_class_manager_config.cairo_native_run_config.panic_on_compilation_failure": false, + "batcher_config.contract_class_manager_config.cairo_native_run_config.run_cairo_native": false, + "batcher_config.contract_class_manager_config.cairo_native_run_config.wait_on_native_compilation": false, + "batcher_config.contract_class_manager_config.contract_cache_size": 2000, + "batcher_config.contract_class_manager_config.native_compiler_config.compiler_binary_path": "", + "batcher_config.contract_class_manager_config.native_compiler_config.compiler_binary_path.#is_none": true, + "batcher_config.contract_class_manager_config.native_compiler_config.max_cpu_time": 20, + "batcher_config.contract_class_manager_config.native_compiler_config.max_cpu_time.#is_none": false, + "batcher_config.contract_class_manager_config.native_compiler_config.max_file_size": 15728640, + "batcher_config.contract_class_manager_config.native_compiler_config.max_file_size.#is_none": false, + "batcher_config.contract_class_manager_config.native_compiler_config.max_memory_usage": 5368709120, + "batcher_config.contract_class_manager_config.native_compiler_config.max_memory_usage.#is_none": false, + "batcher_config.contract_class_manager_config.native_compiler_config.optimization_level": 2, + "batcher_config.input_stream_content_buffer_size": 4000, + "batcher_config.max_l1_handler_txs_per_block_proposal": 200, + "batcher_config.outstream_content_buffer_size": 64, + "batcher_config.pre_confirmed_block_writer_config.channel_buffer_capacity": 1000, + "batcher_config.pre_confirmed_block_writer_config.write_block_interval_millis": 50, + "batcher_config.storage.db_config.enforce_file_exists": false, + "batcher_config.storage.db_config.growth_step": 67108864, + "batcher_config.storage.db_config.max_size": 1099511627776, + "batcher_config.storage.db_config.min_size": 1048576, + "batcher_config.storage.db_config.path_prefix": "/data/batcher", + "batcher_config.storage.mmap_file_config.growth_step": 2147483648, + "batcher_config.storage.mmap_file_config.max_object_size": 1073741824, + "batcher_config.storage.mmap_file_config.max_size": 1099511627776, + "batcher_config.storage.scope": "StateOnly", + "class_manager_config.class_manager_config.cached_class_storage_config.class_cache_size": 128, + "class_manager_config.class_manager_config.cached_class_storage_config.deprecated_class_cache_size": 128, + "class_manager_config.class_manager_config.max_compiled_contract_class_object_size": 4089446, + "class_manager_config.class_storage_config.class_hash_storage_config.class_hash_db_config.enforce_file_exists": false, + "class_manager_config.class_storage_config.class_hash_storage_config.class_hash_db_config.growth_step": 67108864, + "class_manager_config.class_storage_config.class_hash_storage_config.class_hash_db_config.max_size": 1099511627776, + "class_manager_config.class_storage_config.class_hash_storage_config.class_hash_db_config.min_size": 1048576, + "class_manager_config.class_storage_config.class_hash_storage_config.class_hash_db_config.path_prefix": "/data/class_manager/class_hash_storage", + "class_manager_config.class_storage_config.class_hash_storage_config.mmap_file_config.growth_step": 2147483648, + "class_manager_config.class_storage_config.class_hash_storage_config.mmap_file_config.max_object_size": 1073741824, + "class_manager_config.class_storage_config.class_hash_storage_config.mmap_file_config.max_size": 1099511627776, + "class_manager_config.class_storage_config.class_hash_storage_config.scope": "StateOnly", + "class_manager_config.class_storage_config.persistent_root": "/data/class_manager/classes", + "compiler_config.max_bytecode_size": 81920, + "compiler_config.max_memory_usage": 5368709120, + "compiler_config.max_memory_usage.#is_none": false, + "consensus_manager_config.broadcast_buffer_size": 10000, + "consensus_manager_config.cende_config.skip_write_height": 1, + "consensus_manager_config.cende_config.skip_write_height.#is_none": false, + "consensus_manager_config.consensus_config.future_height_limit": 20, + "consensus_manager_config.consensus_config.future_height_round_limit": 5, + "consensus_manager_config.consensus_config.future_round_limit": 20, + "consensus_manager_config.consensus_config.startup_delay": 15, + "consensus_manager_config.consensus_config.sync_retry_interval": 1.0, + "consensus_manager_config.consensus_config.timeouts.precommit_timeout": 0.3, + "consensus_manager_config.consensus_config.timeouts.prevote_timeout": 0.3, + "consensus_manager_config.consensus_config.timeouts.proposal_timeout": 6.1, + "consensus_manager_config.context_config.block_timestamp_window_seconds": 1, + "consensus_manager_config.context_config.build_proposal_margin_millis": 1000, + "consensus_manager_config.context_config.builder_address": "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8", + "consensus_manager_config.context_config.constant_l2_gas_price": false, + "consensus_manager_config.context_config.l1_da_mode": true, + "consensus_manager_config.context_config.l1_data_gas_price_multiplier_ppt": 135, + "consensus_manager_config.context_config.l1_gas_tip_wei": 1000000000, + "consensus_manager_config.context_config.proposal_buffer_size": 512, + "consensus_manager_config.context_config.validate_proposal_margin_millis": 10000, + "consensus_manager_config.context_config.min_l1_gas_price_wei": 1000000000, + "consensus_manager_config.context_config.max_l1_gas_price_wei": 1000000000000, + "consensus_manager_config.context_config.min_l1_data_gas_price_wei": 1, + "consensus_manager_config.context_config.max_l1_data_gas_price_wei": 1000000000000, + "consensus_manager_config.eth_to_strk_oracle_config.lag_interval_seconds": 900, + "consensus_manager_config.eth_to_strk_oracle_config.max_cache_size": 100, + "consensus_manager_config.eth_to_strk_oracle_config.query_timeout_sec": 3, + "consensus_manager_config.immediate_active_height": 1, + "consensus_manager_config.assume_no_malicious_validators": true, + "consensus_manager_config.network_config.broadcasted_message_metadata_buffer_size": 100000, + "consensus_manager_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": 2, + "consensus_manager_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": 5, + "consensus_manager_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": 5, + "consensus_manager_config.network_config.discovery_config.heartbeat_interval": 100, + "consensus_manager_config.network_config.idle_connection_timeout": 120, + "consensus_manager_config.network_config.peer_manager_config.malicious_timeout_seconds": 0, + "consensus_manager_config.network_config.peer_manager_config.unstable_timeout_millis": 0, + "consensus_manager_config.network_config.port": 53080, + "consensus_manager_config.network_config.reported_peer_ids_buffer_size": 100000, + "consensus_manager_config.network_config.session_timeout": 120, + "consensus_manager_config.proposals_topic": "consensus_proposals", + "consensus_manager_config.stream_handler_config.channel_buffer_capacity": 1000, + "consensus_manager_config.stream_handler_config.max_streams": 100, + "consensus_manager_config.votes_topic": "consensus_votes", + "gateway_config.authorized_declarer_accounts": "", + "gateway_config.authorized_declarer_accounts.#is_none": true, + "gateway_config.block_declare": false, + "gateway_config.stateful_tx_validator_config.max_allowed_nonce_gap": 50, + "gateway_config.stateful_tx_validator_config.max_nonce_for_validation_skip": "0x1", + "gateway_config.stateful_tx_validator_config.min_gas_price_percentage": 100, + "gateway_config.stateful_tx_validator_config.reject_future_declare_txs": true, + "gateway_config.stateful_tx_validator_config.validate_resource_bounds_above_threshold": true, + "gateway_config.stateless_tx_validator_config.max_calldata_length": 5000, + "gateway_config.stateless_tx_validator_config.max_contract_bytecode_size": 81920, + "gateway_config.stateless_tx_validator_config.max_contract_class_object_size": 4089446, + "gateway_config.stateless_tx_validator_config.max_sierra_version.major": 1, + "gateway_config.stateless_tx_validator_config.max_sierra_version.minor": 7, + "gateway_config.stateless_tx_validator_config.max_sierra_version.patch": 0, + "gateway_config.stateless_tx_validator_config.max_signature_length": 4000, + "gateway_config.stateless_tx_validator_config.min_gas_price": 3000000000, + "gateway_config.stateless_tx_validator_config.min_sierra_version.major": 1, + "gateway_config.stateless_tx_validator_config.min_sierra_version.minor": 1, + "gateway_config.stateless_tx_validator_config.min_sierra_version.patch": 0, + "gateway_config.stateless_tx_validator_config.validate_non_zero_resource_bounds": true, + "http_server_config.ip": "0.0.0.0", + "http_server_config.port": 8080, + "l1_gas_price_provider_config.lag_margin_seconds": 300, + "l1_gas_price_provider_config.number_of_blocks_for_mean": 300, + "l1_gas_price_provider_config.storage_limit": 3000, + "l1_gas_price_provider_config.max_time_gap_seconds": 900, + "l1_gas_price_scraper_config.finality": 10, + "l1_gas_price_scraper_config.number_of_blocks_for_mean": 300, + "l1_gas_price_scraper_config.polling_interval": 120, + "l1_gas_price_scraper_config.starting_block": 0, + "l1_gas_price_scraper_config.starting_block.#is_none": true, + "l1_gas_price_scraper_config.startup_num_blocks_multiplier": 2, + "l1_provider_config.bootstrap_catch_up_height_override": 0, + "l1_provider_config.bootstrap_catch_up_height_override.#is_none": true, + "l1_provider_config.startup_sync_sleep_retry_interval_seconds": 2, + "l1_provider_config.l1_handler_cancellation_timelock_seconds": 300, + "l1_provider_config.new_l1_handler_cooldown_seconds": 245, + "l1_scraper_config.finality": 10, + "l1_scraper_config.polling_interval_seconds": 120, + "l1_scraper_config.startup_rewind_time_seconds": 3600, + "mempool_config.capacity_in_bytes": 1073741824, + "mempool_config.committed_nonce_retention_block_count": 100, + "mempool_config.declare_delay": 20, + "mempool_config.enable_fee_escalation": true, + "mempool_config.fee_escalation_percentage": 10, + "mempool_config.override_gas_price_threshold_check": false, + "mempool_config.transaction_ttl": 300, + "mempool_p2p_config.max_transaction_batch_size": 75, + "mempool_p2p_config.network_buffer_size": 10000, + "mempool_p2p_config.network_config.broadcasted_message_metadata_buffer_size": 100000, + "mempool_p2p_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": 2, + "mempool_p2p_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": 5, + "mempool_p2p_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": 5, + "mempool_p2p_config.network_config.discovery_config.heartbeat_interval": 100, + "mempool_p2p_config.network_config.idle_connection_timeout": 120, + "mempool_p2p_config.network_config.peer_manager_config.malicious_timeout_seconds": 0, + "mempool_p2p_config.network_config.peer_manager_config.unstable_timeout_millis": 0, + "mempool_p2p_config.network_config.port": 53200, + "mempool_p2p_config.network_config.reported_peer_ids_buffer_size": 100000, + "mempool_p2p_config.network_config.session_timeout": 120, + "mempool_p2p_config.transaction_batch_rate_millis": 100, + "monitoring_config.collect_metrics": true, + "monitoring_config.collect_profiling_metrics": true, + "monitoring_endpoint_config.collect_metrics": true, + "monitoring_endpoint_config.collect_profiling_metrics": true, + "monitoring_endpoint_config.ip": "0.0.0.0", + "monitoring_endpoint_config.port": 8082, + "revert_config.revert_up_to_and_including": 18446744073709551615, + "revert_config.should_revert": false, + "state_sync_config.central_sync_client_config.central_source_config.class_cache_size": 128, + "state_sync_config.central_sync_client_config.central_source_config.concurrent_requests": 10, + "state_sync_config.central_sync_client_config.central_source_config.max_classes_to_download": 20, + "state_sync_config.central_sync_client_config.central_source_config.max_state_updates_to_download": 20, + "state_sync_config.central_sync_client_config.central_source_config.max_state_updates_to_store_in_memory": 20, + "state_sync_config.central_sync_client_config.central_source_config.retry_config.max_retries": 10, + "state_sync_config.central_sync_client_config.central_source_config.retry_config.retry_base_millis": 30, + "state_sync_config.central_sync_client_config.central_source_config.retry_config.retry_max_delay_millis": 30000, + "state_sync_config.central_sync_client_config.sync_config.base_layer_propagation_sleep_duration": 10, + "state_sync_config.central_sync_client_config.sync_config.block_propagation_sleep_duration": 2, + "state_sync_config.central_sync_client_config.sync_config.blocks_max_stream_size": 1000, + "state_sync_config.central_sync_client_config.sync_config.collect_pending_data": false, + "state_sync_config.central_sync_client_config.sync_config.recoverable_error_sleep_duration": 3, + "state_sync_config.central_sync_client_config.sync_config.state_updates_max_stream_size": 1000, + "state_sync_config.central_sync_client_config.sync_config.store_sierras_and_casms": false, + "state_sync_config.central_sync_client_config.sync_config.verify_blocks": false, + "state_sync_config.network_config.advertised_multiaddr": "", + "state_sync_config.network_config.advertised_multiaddr.#is_none": true, + "state_sync_config.network_config.bootstrap_peer_multiaddr": "", + "state_sync_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "state_sync_config.network_config.broadcasted_message_metadata_buffer_size": 100000, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": 2, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": 5, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": 5, + "state_sync_config.network_config.discovery_config.heartbeat_interval": 100, + "state_sync_config.network_config.idle_connection_timeout": 120, + "state_sync_config.network_config.peer_manager_config.malicious_timeout_seconds": 1, + "state_sync_config.network_config.peer_manager_config.unstable_timeout_millis": 1000, + "state_sync_config.network_config.port": 53140, + "state_sync_config.network_config.reported_peer_ids_buffer_size": 100000, + "state_sync_config.network_config.session_timeout": 120, + "state_sync_config.p2p_sync_client_config.buffer_size": 100000, + "state_sync_config.p2p_sync_client_config.num_block_classes_per_query": 100, + "state_sync_config.p2p_sync_client_config.num_block_state_diffs_per_query": 100, + "state_sync_config.p2p_sync_client_config.num_block_transactions_per_query": 100, + "state_sync_config.p2p_sync_client_config.num_headers_per_query": 10000, + "state_sync_config.p2p_sync_client_config.wait_period_for_new_data": 50, + "state_sync_config.p2p_sync_client_config.wait_period_for_other_protocol": 50, + "state_sync_config.rpc_config.apollo_gateway_retry_config.max_retries": 10, + "state_sync_config.rpc_config.apollo_gateway_retry_config.retry_base_millis": 30, + "state_sync_config.rpc_config.apollo_gateway_retry_config.retry_max_delay_millis": 1000, + "state_sync_config.rpc_config.collect_metrics": false, + "state_sync_config.rpc_config.execution_config.default_initial_gas_cost": 10000000000, + "state_sync_config.rpc_config.max_events_chunk_size": 1000, + "state_sync_config.rpc_config.max_events_keys": 100, + "state_sync_config.rpc_config.ip": "0.0.0.0", + "state_sync_config.rpc_config.port": 8090, + "state_sync_config.storage_config.db_config.enforce_file_exists": false, + "state_sync_config.storage_config.db_config.growth_step": 67108864, + "state_sync_config.storage_config.db_config.max_size": 1099511627776, + "state_sync_config.storage_config.db_config.min_size": 1048576, + "state_sync_config.storage_config.db_config.path_prefix": "/data/state_sync", + "state_sync_config.storage_config.mmap_file_config.growth_step": 2147483648, + "state_sync_config.storage_config.mmap_file_config.max_object_size": 1073741824, + "state_sync_config.storage_config.mmap_file_config.max_size": 1099511627776, + "state_sync_config.storage_config.scope": "FullArchive", + "versioned_constants_overrides.invoke_tx_max_n_steps": 10000000, + "versioned_constants_overrides.max_n_events": 1000, + "versioned_constants_overrides.max_recursion_depth": 50, + "versioned_constants_overrides.validate_max_n_steps": 1000000 +} \ No newline at end of file diff --git a/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..0925fd894f7 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_0.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-mainnet-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-mainnet.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_0.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-mainnet-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-0" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..d8f79fe87eb --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_1.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-mainnet-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-mainnet.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_1.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-mainnet-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-1" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..e065b3c4d30 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_hybrid_2.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-mainnet-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-mainnet.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_2.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-mainnet-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/mainnet/deployment_config_override.json", + "deployments/mainnet/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-mainnet-2" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_override.json new file mode 100644 index 00000000000..a4c4fdcd424 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4", + "chain_id": "SN_MAIN", + "consensus_manager_config.context_config.num_validators": 3, + "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "https://feeder.alpha-mainnet.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" +} diff --git a/crates/apollo_deployments/resources/deployments/mainnet/hybrid_0.json b/crates/apollo_deployments/resources/deployments/mainnet/hybrid_0.json new file mode 100644 index 00000000000..1fc480691c0 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-mainnet-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-mainnet-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/mainnet/hybrid_1.json b/crates/apollo_deployments/resources/deployments/mainnet/hybrid_1.json new file mode 100644 index 00000000000..61634a5d501 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-mainnet-1.starknet.io/tcp/53080/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-mainnet-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-mainnet-1.starknet.io/tcp/53200/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-mainnet-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/mainnet/hybrid_2.json b/crates/apollo_deployments/resources/deployments/mainnet/hybrid_2.json new file mode 100644 index 00000000000..1556646b95b --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/mainnet/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-mainnet-2.starknet.io/tcp/53080/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-mainnet-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-mainnet-2.starknet.io/tcp/53200/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-mainnet-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..db08b876e5d --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_0.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-potc-2-sepolia-mock-sharp-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "batcher-8-64", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "potc-mock-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_0.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-potc-2-sepolia-mock-sharp-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-0" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..1edd5a1d11b --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_1.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-potc-2-sepolia-mock-sharp-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "batcher-8-64", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "potc-mock-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_1.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-potc-2-sepolia-mock-sharp-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-1" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..b789c5a8f0b --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_hybrid_2.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-potc-2-sepolia-mock-sharp-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "batcher-8-64", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "potc-mock-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_2.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-potc-2-sepolia-mock-sharp-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/potc2/deployment_config_override.json", + "deployments/potc2/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-potc-2-sepolia-mock-sharp-2" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_override.json new file mode 100644 index 00000000000..6093d8b265a --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0xd8A5518cf4AC3ECD3b4cec772478109679a73E78", + "chain_id": "PRIVATE_SN_POTC_MOCK_SEPOLIA", + "consensus_manager_config.context_config.num_validators": 3, + "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "https://feeder.potc-mock-sepolia-fgw.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7" +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/hybrid_0.json b/crates/apollo_deployments/resources/deployments/potc2/hybrid_0.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/hybrid_1.json b/crates/apollo_deployments/resources/deployments/potc2/hybrid_1.json new file mode 100644 index 00000000000..9d1870d6d5b --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-potc-2-sepolia-mock-sharp-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-potc-2-sepolia-mock-sharp-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/potc2/hybrid_2.json b/crates/apollo_deployments/resources/deployments/potc2/hybrid_2.json new file mode 100644 index 00000000000..c26c7a04417 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/potc2/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-potc-2-sepolia-mock-sharp-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-potc-2-sepolia-mock-sharp-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..dc513b03bc3 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_0.json @@ -0,0 +1,204 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 2, + "memory": 4 + }, + "limits": { + "cpu": 7, + "memory": 14 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "integration-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-0" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..1cc6f656584 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_1.json @@ -0,0 +1,204 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 2, + "memory": 4 + }, + "limits": { + "cpu": 7, + "memory": 14 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "integration-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-1" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..93942bf6062 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_2.json @@ -0,0 +1,204 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 2, + "memory": 4 + }, + "limits": { + "cpu": 7, + "memory": 14 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "integration-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-integration-2" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_override.json new file mode 100644 index 00000000000..1d0cf7163a1 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0x4737c0c1B4D5b1A687B42610DdabEE781152359c", + "chain_id": "SN_INTEGRATION_SEPOLIA", + "consensus_manager_config.context_config.num_validators": 3, + "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "https://feeder.integration-sepolia.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_0.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_0.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_1.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_1.json new file mode 100644 index 00000000000..80708430f9d --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_2.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_2.json new file mode 100644 index 00000000000..2f1bbfaa8ea --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..752cf1eb73c --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_0.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-sepolia-alpha-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..1ee7857b358 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_1.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-sepolia-alpha-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-sepolia-alpha-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..994cdf1975e --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_2.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-sepolia-alpha-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-sepolia-alpha-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_override.json new file mode 100644 index 00000000000..14e77f5aa15 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057", + "chain_id": "SN_SEPOLIA", + "consensus_manager_config.context_config.num_validators": 3, + "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "https://feeder.alpha-sepolia.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_0.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_0.json new file mode 100644 index 00000000000..7eeca042f06 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_1.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_1.json new file mode 100644 index 00000000000..6aa420d5344 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-1.starknet.io/tcp/53080/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-1.starknet.io/tcp/53200/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_2.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_2.json new file mode 100644 index 00000000000..c0583c68e16 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-2.starknet.io/tcp/53080/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-2.starknet.io/tcp/53200/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..ebe1340cccd --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_0.json @@ -0,0 +1,204 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [ + "apollo-stresstest-dev.sw-dev.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-0" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..441532cce57 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_1.json @@ -0,0 +1,204 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [ + "apollo-stresstest-dev.sw-dev.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-1" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..4a05e0763b3 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_2.json @@ -0,0 +1,204 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2d-56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [ + "apollo-stresstest-dev.sw-dev.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-stresstest-dev-2" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_override.json new file mode 100644 index 00000000000..b646503472e --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f", + "chain_id": "INTERNAL_STRESS_TEST", + "consensus_manager_config.context_config.num_validators": 3, + "eth_fee_token_address": "0x7e813ecf3e7b3e14f07bd2f68cb4a3d12110e3c75ec5a63de3d2dacf1852904", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "http://feeder-gateway.starknet-0-14-0-stress-test-03:9713/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x2208cce4221df1f35943958340abc812aa79a8f6a533bff4ee00416d3d06cd6" +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/hybrid_0.json b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_0.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/hybrid_1.json b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_1.json new file mode 100644 index 00000000000..c86a3528944 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/stress_test/hybrid_2.json b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_2.json new file mode 100644 index 00000000000..7958055b92b --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/deployments/testing/consolidated.json b/crates/apollo_deployments/resources/deployments/testing/consolidated.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/consolidated.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json new file mode 100644 index 00000000000..03d91d9cb57 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json @@ -0,0 +1,34 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Node", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/consolidated.json", + "services/consolidated/node.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": 1, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/testing/deployment_config_distributed.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_distributed.json new file mode 100644 index 00000000000..7604a8d69f3 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_distributed.json @@ -0,0 +1,277 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Batcher", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/batcher.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "ClassManager", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/class_manager.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "ConsensusManager", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/consensus_manager.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [], + "internal": true, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "StateSync", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", + "services/distributed/state_sync.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/testing/deployment_config_hybrid.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_hybrid.json new file mode 100644 index 00000000000..5900f658855 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_hybrid.json @@ -0,0 +1,190 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [], + "internal": true, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 1, + "storage": null, + "toleration": null, + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": null, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/testing/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_override.json new file mode 100644 index 00000000000..f3cee9c9d91 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "chain_id": "CHAIN_ID_SUBDIR", + "consensus_manager_config.context_config.num_validators": 1, + "eth_fee_token_address": "0x1001", + "l1_provider_config.provider_startup_height_override": 1, + "l1_provider_config.provider_startup_height_override.#is_none": false, + "starknet_url": "https://integration-sepolia.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": true, + "state_sync_config.network_config.#is_none": false, + "state_sync_config.p2p_sync_client_config.#is_none": false, + "strk_fee_token_address": "0x1002" +} diff --git a/crates/apollo_deployments/resources/deployments/testing/distributed.json b/crates/apollo_deployments/resources/deployments/testing/distributed.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/distributed.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/testing/hybrid.json b/crates/apollo_deployments/resources/deployments/testing/hybrid.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/hybrid.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..4869eb16c90 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_0.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-alpha-test-0.sw-dev.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 2, + "memory": 4 + }, + "limits": { + "cpu": 7, + "memory": 14 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [ + "sn-alpha-test-upgrade.gateway-proxy.sw-dev.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-0" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-0" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-0" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..29685d01d50 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_1.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-alpha-test-1.sw-dev.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 2, + "memory": 4 + }, + "limits": { + "cpu": 7, + "memory": 14 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [ + "sn-alpha-test-upgrade.gateway-proxy.sw-dev.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-1" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-alpha-test-1.sw-dev.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-1" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-1" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..dd0260b4695 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_2.json @@ -0,0 +1,212 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-alpha-test-2.sw-dev.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 2, + "memory": 4 + }, + "limits": { + "cpu": 7, + "memory": 14 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "sw-dev.io", + "alternative_names": [ + "sn-alpha-test-upgrade.gateway-proxy.sw-dev.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "L1", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", + "services/hybrid/l1.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-2" + }, + "anti_affinity": false, + "ports": {} + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-alpha-test-2.sw-dev.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-2" + }, + "anti_affinity": true, + "ports": {} + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-alpha-test-2" + }, + "anti_affinity": false, + "ports": {} + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_override.json new file mode 100644 index 00000000000..30f81067a6a --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_override.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.starknet_contract_address": "0x9b8A6361d204a0C1F93d5194763538057444d958", + "chain_id": "SN_GOERLI", + "consensus_manager_config.context_config.num_validators": 3, + "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "https://feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_0.json b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_0.json new file mode 100644 index 00000000000..cff8cf4a87d --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-0.sw-dev.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_1.json b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_1.json new file mode 100644 index 00000000000..36f7cadc228 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-1.sw-dev.io/tcp/53080/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-0.sw-dev.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-1.sw-dev.io/tcp/53200/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_2.json b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_2.json new file mode 100644 index 00000000000..8ee85a63ad6 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-2.sw-dev.io/tcp/53080/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-0.sw-dev.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-2.sw-dev.io/tcp/53200/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/services/consolidated/node.json b/crates/apollo_deployments/resources/services/consolidated/node.json new file mode 100644 index 00000000000..e5f2acdfd6b --- /dev/null +++ b/crates/apollo_deployments/resources/services/consolidated/node.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Enabled", + "components.gateway.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Enabled", + "components.l1_endpoint_monitor.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Enabled", + "components.l1_provider.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Enabled", + "components.mempool.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/distributed/batcher.json b/crates/apollo_deployments/resources/services/distributed/batcher.json new file mode 100644 index 00000000000..e69aca58b53 --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/batcher.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 15000, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "sequencer-batcher-service", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 15001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-classmanager-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Remote", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 15004, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "sequencer-l1-service", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Remote", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 15006, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "sequencer-mempool-service", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/distributed/class_manager.json b/crates/apollo_deployments/resources/services/distributed/class_manager.json new file mode 100644 index 00000000000..6fcd3290b94 --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/class_manager.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 15001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-classmanager-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Remote", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 15007, + "components.sierra_compiler.remote_client_config.idle_connections": 0, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "sequencer-sierracompiler-service", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/distributed/consensus_manager.json b/crates/apollo_deployments/resources/services/distributed/consensus_manager.json new file mode 100644 index 00000000000..4b5b3078682 --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/consensus_manager.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Remote", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 15000, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "sequencer-batcher-service", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 15001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-classmanager-service", + "components.consensus_manager.execution_mode": "Enabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Remote", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 15003, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "sequencer-l1-service", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Remote", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 15008, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-statesync-service" +} diff --git a/crates/apollo_deployments/resources/services/distributed/gateway.json b/crates/apollo_deployments/resources/services/distributed/gateway.json new file mode 100644 index 00000000000..ac5d76b74bd --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/gateway.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 15001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-classmanager-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 15002, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "sequencer-gateway-service", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Remote", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 15006, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "sequencer-mempool-service", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Remote", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 15008, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-statesync-service" +} diff --git a/crates/apollo_deployments/resources/services/distributed/http_server.json b/crates/apollo_deployments/resources/services/distributed/http_server.json new file mode 100644 index 00000000000..28ce63cb17d --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/http_server.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Disabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Remote", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 15002, + "components.gateway.remote_client_config.idle_connections": 0, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "sequencer-gateway-service", + "components.http_server.execution_mode": "Enabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/distributed/l1.json b/crates/apollo_deployments/resources/services/distributed/l1.json new file mode 100644 index 00000000000..d51f3241bd6 --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/l1.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Remote", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 15000, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "sequencer-batcher-service", + "components.class_manager.execution_mode": "Disabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 15005, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "sequencer-l1-service", + "components.l1_gas_price_provider.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 15003, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "sequencer-l1-service", + "components.l1_gas_price_scraper.execution_mode": "Enabled", + "components.l1_provider.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 15004, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "sequencer-l1-service", + "components.l1_scraper.execution_mode": "Enabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Remote", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 15008, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-statesync-service" +} diff --git a/crates/apollo_deployments/resources/services/distributed/mempool.json b/crates/apollo_deployments/resources/services/distributed/mempool.json new file mode 100644 index 00000000000..d33334be39f --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/mempool.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 15001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-classmanager-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Remote", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 15002, + "components.gateway.remote_client_config.idle_connections": 0, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "sequencer-gateway-service", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 15006, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "sequencer-mempool-service", + "components.mempool_p2p.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/distributed/sierra_compiler.json b/crates/apollo_deployments/resources/services/distributed/sierra_compiler.json new file mode 100644 index 00000000000..6f34c7f33ee --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/sierra_compiler.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Disabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 15007, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "sequencer-sierracompiler-service", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/distributed/state_sync.json b/crates/apollo_deployments/resources/services/distributed/state_sync.json new file mode 100644 index 00000000000..b82346fc612 --- /dev/null +++ b/crates/apollo_deployments/resources/services/distributed/state_sync.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 15001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-classmanager-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 15008, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-statesync-service" +} diff --git a/crates/apollo_deployments/resources/services/hybrid/core.json b/crates/apollo_deployments/resources/services/hybrid/core.json new file mode 100644 index 00000000000..62fa426cd07 --- /dev/null +++ b/crates/apollo_deployments/resources/services/hybrid/core.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 55000, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "sequencer-core-service", + "components.class_manager.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 55001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-core-service", + "components.consensus_manager.execution_mode": "Enabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Remote", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 55003, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "sequencer-core-service", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Remote", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 55004, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "sequencer-core-service", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Remote", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 55005, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "sequencer-mempool-service", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Remote", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 55006, + "components.sierra_compiler.remote_client_config.idle_connections": 0, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "sequencer-sierracompiler-service", + "components.state_sync.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 55007, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-core-service" +} diff --git a/crates/apollo_deployments/resources/services/hybrid/gateway.json b/crates/apollo_deployments/resources/services/hybrid/gateway.json new file mode 100644 index 00000000000..7b83873ec38 --- /dev/null +++ b/crates/apollo_deployments/resources/services/hybrid/gateway.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 55001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-core-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 55002, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "sequencer-gateway-service", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Remote", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 55005, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "sequencer-mempool-service", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Remote", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 55007, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-core-service" +} diff --git a/crates/apollo_deployments/resources/services/hybrid/http_server.json b/crates/apollo_deployments/resources/services/hybrid/http_server.json new file mode 100644 index 00000000000..6087517dfff --- /dev/null +++ b/crates/apollo_deployments/resources/services/hybrid/http_server.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Disabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Remote", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 55002, + "components.gateway.remote_client_config.idle_connections": 0, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "sequencer-gateway-service", + "components.http_server.execution_mode": "Enabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/hybrid/l1.json b/crates/apollo_deployments/resources/services/hybrid/l1.json new file mode 100644 index 00000000000..aca272c8e3d --- /dev/null +++ b/crates/apollo_deployments/resources/services/hybrid/l1.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Remote", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 55000, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "sequencer-core-service", + "components.class_manager.execution_mode": "Disabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 55003, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "sequencer-core-service", + "components.l1_gas_price_scraper.execution_mode": "Enabled", + "components.l1_provider.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 55004, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "sequencer-core-service", + "components.l1_scraper.execution_mode": "Enabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Remote", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 55007, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "sequencer-core-service" +} diff --git a/crates/apollo_deployments/resources/services/hybrid/mempool.json b/crates/apollo_deployments/resources/services/hybrid/mempool.json new file mode 100644 index 00000000000..144c03ced33 --- /dev/null +++ b/crates/apollo_deployments/resources/services/hybrid/mempool.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Remote", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 55001, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "sequencer-core-service", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Remote", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 55002, + "components.gateway.remote_client_config.idle_connections": 0, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "sequencer-gateway-service", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 55005, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "sequencer-mempool-service", + "components.mempool_p2p.execution_mode": "LocalExecutionWithRemoteDisabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "Disabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 0, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "localhost", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/services/hybrid/sierra_compiler.json b/crates/apollo_deployments/resources/services/hybrid/sierra_compiler.json new file mode 100644 index 00000000000..89ed0991839 --- /dev/null +++ b/crates/apollo_deployments/resources/services/hybrid/sierra_compiler.json @@ -0,0 +1,107 @@ +{ + "components.batcher.execution_mode": "Disabled", + "components.batcher.ip": "0.0.0.0", + "components.batcher.local_server_config.channel_capacity": 128, + "components.batcher.max_concurrency": 8, + "components.batcher.port": 0, + "components.batcher.remote_client_config.idle_connections": 10, + "components.batcher.remote_client_config.idle_timeout": 30, + "components.batcher.remote_client_config.retries": 150, + "components.batcher.remote_client_config.retry_interval": 1, + "components.batcher.url": "localhost", + "components.class_manager.execution_mode": "Disabled", + "components.class_manager.ip": "0.0.0.0", + "components.class_manager.local_server_config.channel_capacity": 128, + "components.class_manager.max_concurrency": 8, + "components.class_manager.port": 0, + "components.class_manager.remote_client_config.idle_connections": 10, + "components.class_manager.remote_client_config.idle_timeout": 30, + "components.class_manager.remote_client_config.retries": 150, + "components.class_manager.remote_client_config.retry_interval": 1, + "components.class_manager.url": "localhost", + "components.consensus_manager.execution_mode": "Disabled", + "components.gateway.execution_mode": "Disabled", + "components.gateway.ip": "0.0.0.0", + "components.gateway.local_server_config.channel_capacity": 128, + "components.gateway.max_concurrency": 8, + "components.gateway.port": 0, + "components.gateway.remote_client_config.idle_connections": 10, + "components.gateway.remote_client_config.idle_timeout": 30, + "components.gateway.remote_client_config.retries": 150, + "components.gateway.remote_client_config.retry_interval": 1, + "components.gateway.url": "localhost", + "components.http_server.execution_mode": "Disabled", + "components.l1_endpoint_monitor.execution_mode": "Disabled", + "components.l1_endpoint_monitor.ip": "0.0.0.0", + "components.l1_endpoint_monitor.local_server_config.channel_capacity": 128, + "components.l1_endpoint_monitor.max_concurrency": 8, + "components.l1_endpoint_monitor.port": 0, + "components.l1_endpoint_monitor.remote_client_config.idle_connections": 10, + "components.l1_endpoint_monitor.remote_client_config.idle_timeout": 30, + "components.l1_endpoint_monitor.remote_client_config.retries": 150, + "components.l1_endpoint_monitor.remote_client_config.retry_interval": 1, + "components.l1_endpoint_monitor.url": "localhost", + "components.l1_gas_price_provider.execution_mode": "Disabled", + "components.l1_gas_price_provider.ip": "0.0.0.0", + "components.l1_gas_price_provider.local_server_config.channel_capacity": 128, + "components.l1_gas_price_provider.max_concurrency": 8, + "components.l1_gas_price_provider.port": 0, + "components.l1_gas_price_provider.remote_client_config.idle_connections": 10, + "components.l1_gas_price_provider.remote_client_config.idle_timeout": 30, + "components.l1_gas_price_provider.remote_client_config.retries": 150, + "components.l1_gas_price_provider.remote_client_config.retry_interval": 1, + "components.l1_gas_price_provider.url": "localhost", + "components.l1_gas_price_scraper.execution_mode": "Disabled", + "components.l1_provider.execution_mode": "Disabled", + "components.l1_provider.ip": "0.0.0.0", + "components.l1_provider.local_server_config.channel_capacity": 128, + "components.l1_provider.max_concurrency": 8, + "components.l1_provider.port": 0, + "components.l1_provider.remote_client_config.idle_connections": 10, + "components.l1_provider.remote_client_config.idle_timeout": 30, + "components.l1_provider.remote_client_config.retries": 150, + "components.l1_provider.remote_client_config.retry_interval": 1, + "components.l1_provider.url": "localhost", + "components.l1_scraper.execution_mode": "Disabled", + "components.mempool.execution_mode": "Disabled", + "components.mempool.ip": "0.0.0.0", + "components.mempool.local_server_config.channel_capacity": 128, + "components.mempool.max_concurrency": 8, + "components.mempool.port": 0, + "components.mempool.remote_client_config.idle_connections": 10, + "components.mempool.remote_client_config.idle_timeout": 30, + "components.mempool.remote_client_config.retries": 150, + "components.mempool.remote_client_config.retry_interval": 1, + "components.mempool.url": "localhost", + "components.mempool_p2p.execution_mode": "Disabled", + "components.mempool_p2p.ip": "0.0.0.0", + "components.mempool_p2p.local_server_config.channel_capacity": 128, + "components.mempool_p2p.max_concurrency": 8, + "components.mempool_p2p.port": 0, + "components.mempool_p2p.remote_client_config.idle_connections": 10, + "components.mempool_p2p.remote_client_config.idle_timeout": 30, + "components.mempool_p2p.remote_client_config.retries": 150, + "components.mempool_p2p.remote_client_config.retry_interval": 1, + "components.mempool_p2p.url": "localhost", + "components.monitoring_endpoint.execution_mode": "Enabled", + "components.sierra_compiler.execution_mode": "LocalExecutionWithRemoteEnabled", + "components.sierra_compiler.ip": "0.0.0.0", + "components.sierra_compiler.local_server_config.channel_capacity": 128, + "components.sierra_compiler.max_concurrency": 8, + "components.sierra_compiler.port": 55006, + "components.sierra_compiler.remote_client_config.idle_connections": 10, + "components.sierra_compiler.remote_client_config.idle_timeout": 30, + "components.sierra_compiler.remote_client_config.retries": 150, + "components.sierra_compiler.remote_client_config.retry_interval": 1, + "components.sierra_compiler.url": "sequencer-sierracompiler-service", + "components.state_sync.execution_mode": "Disabled", + "components.state_sync.ip": "0.0.0.0", + "components.state_sync.local_server_config.channel_capacity": 128, + "components.state_sync.max_concurrency": 8, + "components.state_sync.port": 0, + "components.state_sync.remote_client_config.idle_connections": 10, + "components.state_sync.remote_client_config.idle_timeout": 30, + "components.state_sync.remote_client_config.retries": 150, + "components.state_sync.remote_client_config.retry_interval": 1, + "components.state_sync.url": "localhost" +} diff --git a/crates/apollo_deployments/resources/testing_secrets.json b/crates/apollo_deployments/resources/testing_secrets.json new file mode 100644 index 00000000000..e46eddf91bf --- /dev/null +++ b/crates/apollo_deployments/resources/testing_secrets.json @@ -0,0 +1,13 @@ +{ + "base_layer_config.node_url": "http://anvil-service.anvil.svc.cluster.local:8545", + "consensus_manager_config.eth_to_strk_oracle_config.url_header_list": "http://dummy-eth2strk-oracle-service.dummy-eth2strk-oracle.svc.cluster.local/eth_to_strk_oracle?timestamp=:9000", + "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", + "l1_endpoint_monitor_config.ordered_l1_endpoint_urls": "http://anvil-service.anvil.svc.cluster.local:8545", + "mempool_p2p_config.network_config.secret_key" : "0x0101010101010101010101010101010101010101010101010101010101010101", + "recorder_url": "http://dummy-recorder-service.dummy-recorder.svc.cluster.local:8080", + "state_sync_config.central_sync_client_config.central_source_config.http_headers": "", + "state_sync_config.network_config.secret_key" : "0x0101010101010101010101010101010101010101010101010101010101010101" +} + + + diff --git a/crates/apollo_deployments/src/addresses.rs b/crates/apollo_deployments/src/addresses.rs new file mode 100644 index 00000000000..12a50fd8b3d --- /dev/null +++ b/crates/apollo_deployments/src/addresses.rs @@ -0,0 +1,20 @@ +// Derived using `get_peer_id_from_secret_key` binary, where the secret key of node with index `id` +// is format!("0x010101010101010101010101010101010101010101010101010101010101010{}", id + 1) + +const PEER_IDS: [&str; 6] = [ + "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "12D3KooWFdTjV6DXVJfQFisTXadCsqGzCbEnJJWzc6mXSPwy9g54", + "12D3KooWJMukrrip9sUyto28eiofqxyXiw9sfTJuZeQfHUujWPX8", + "12D3KooWMqkzSDGNQg9WDDPdu7nQgAPpqTY3YqZ2XUYqJzmUhmVu", +]; + +pub(crate) fn get_peer_id(node_id: usize) -> String { + assert!(node_id < PEER_IDS.len(), "Node index out of bounds: {}", node_id); + PEER_IDS[node_id].to_string() +} + +pub(crate) fn get_p2p_address(dns: &str, port: u16, peer_id: &str) -> String { + format!("/dns/{}/tcp/{}/p2p/{}", dns, port, peer_id) +} diff --git a/crates/apollo_deployments/src/bin/deployment_generator.rs b/crates/apollo_deployments/src/bin/deployment_generator.rs new file mode 100644 index 00000000000..f099378c3ac --- /dev/null +++ b/crates/apollo_deployments/src/bin/deployment_generator.rs @@ -0,0 +1,15 @@ +use apollo_deployments::deployment_definitions::DEPLOYMENTS; +use apollo_deployments::service::NodeType; +use apollo_infra_utils::dumping::serialize_to_file; +use strum::IntoEnumIterator; + +/// Creates the deployment json file. +fn main() { + for node_type in NodeType::iter() { + node_type.dump_service_component_configs(None); + } + for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { + serialize_to_file(&deployment, deployment.deployment_file_path().to_str().unwrap()); + deployment.dump_config_override_files(); + } +} diff --git a/crates/apollo_deployments/src/config_override.rs b/crates/apollo_deployments/src/config_override.rs new file mode 100644 index 00000000000..418bee532d7 --- /dev/null +++ b/crates/apollo_deployments/src/config_override.rs @@ -0,0 +1,230 @@ +use std::path::Path; + +use apollo_infra_utils::dumping::serialize_to_file; +#[cfg(test)] +use apollo_infra_utils::dumping::serialize_to_file_test; +use serde::Serialize; +use serde_json::to_value; +use serde_with::with_prefix; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::deployment_definitions::{StateSyncConfig, StateSyncType}; +#[cfg(test)] +use crate::test_utils::FIX_BINARY_NAME; + +const DEPLOYMENT_FILE_NAME: &str = "deployment_config_override.json"; + +#[derive(Clone, Debug, Serialize, PartialEq)] +pub struct ConfigOverride { + deployment_config_override: DeploymentConfigOverride, + instance_config_override: InstanceConfigOverride, +} + +impl ConfigOverride { + pub const fn new( + deployment_config_override: DeploymentConfigOverride, + instance_config_override: InstanceConfigOverride, + ) -> Self { + Self { deployment_config_override, instance_config_override } + } + + fn config_files( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + create: bool, + ) -> ConfigOverrideWithPaths { + let deployment_path = deployment_config_override_dir.join(DEPLOYMENT_FILE_NAME); + let instance_path = deployment_config_override_dir.join(format!("{}.json", instance_name)); + + if create { + serialize_to_file( + to_value(&self.deployment_config_override).unwrap(), + deployment_path.to_str().unwrap(), + ); + + serialize_to_file( + to_value(&self.instance_config_override).unwrap(), + instance_path.to_str().unwrap(), + ); + } + + ConfigOverrideWithPaths { + #[cfg(test)] + deployment_config_override: self.deployment_config_override.clone(), + deployment_path: deployment_path.to_string_lossy().into_owned(), + #[cfg(test)] + instance_config_override: self.instance_config_override.clone(), + instance_path: instance_path.to_string_lossy().into_owned(), + } + } + + pub fn get_config_file_paths( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + ) -> Vec { + let config_override_with_paths = + self.config_files(deployment_config_override_dir, instance_name, false); + vec![config_override_with_paths.deployment_path, config_override_with_paths.instance_path] + } + + pub fn dump_config_files( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + ) -> Vec { + let config_override_with_paths = + self.config_files(deployment_config_override_dir, instance_name, true); + vec![config_override_with_paths.deployment_path, config_override_with_paths.instance_path] + } + + #[cfg(test)] + pub fn test_dump_config_files( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + ) { + let config_override_with_paths = + self.config_files(deployment_config_override_dir, instance_name, false); + + serialize_to_file_test( + to_value(config_override_with_paths.deployment_config_override).unwrap(), + &config_override_with_paths.deployment_path, + FIX_BINARY_NAME, + ); + + serialize_to_file_test( + to_value(config_override_with_paths.instance_config_override).unwrap(), + &config_override_with_paths.instance_path, + FIX_BINARY_NAME, + ); + } +} + +struct ConfigOverrideWithPaths { + #[cfg(test)] + deployment_config_override: DeploymentConfigOverride, + deployment_path: String, + #[cfg(test)] + instance_config_override: InstanceConfigOverride, + instance_path: String, +} + +#[derive(Clone, Debug, Serialize, PartialEq)] +pub struct DeploymentConfigOverride { + #[serde(rename = "base_layer_config.starknet_contract_address")] + starknet_contract_address: String, + chain_id: String, + eth_fee_token_address: String, + starknet_url: Url, + strk_fee_token_address: String, + #[serde(rename = "l1_provider_config.provider_startup_height_override")] + l1_provider_config_provider_startup_height_override: u64, + #[serde(rename = "l1_provider_config.provider_startup_height_override.#is_none")] + l1_provider_config_provider_startup_height_override_is_none: bool, + #[serde(rename = "consensus_manager_config.context_config.num_validators")] + consensus_manager_config_context_config_num_validators: usize, + #[serde(flatten)] + state_sync_config: StateSyncConfig, +} + +impl DeploymentConfigOverride { + #[allow(clippy::too_many_arguments)] + pub fn new( + starknet_contract_address: impl ToString, + chain_id: impl ToString, + eth_fee_token_address: impl ToString, + starknet_url: Url, + strk_fee_token_address: impl ToString, + l1_startup_height_override: Option, + consensus_manager_config_context_config_num_validators: usize, + state_sync_type: StateSyncType, + ) -> Self { + let ( + l1_provider_config_provider_startup_height_override, + l1_provider_config_provider_startup_height_override_is_none, + ) = match l1_startup_height_override { + Some(block_number) => (block_number.0, false), + None => (0, true), + }; + + Self { + starknet_contract_address: starknet_contract_address.to_string(), + chain_id: chain_id.to_string(), + eth_fee_token_address: eth_fee_token_address.to_string(), + starknet_url, + strk_fee_token_address: strk_fee_token_address.to_string(), + l1_provider_config_provider_startup_height_override, + l1_provider_config_provider_startup_height_override_is_none, + consensus_manager_config_context_config_num_validators, + state_sync_config: state_sync_type.get_state_sync_config(), + } + } +} + +#[derive(Clone, Debug, Serialize, PartialEq)] +pub struct NetworkConfigOverride { + // Bootstrap peer address. + #[serde(rename = "bootstrap_peer_multiaddr")] + bootstrap_peer_multiaddr: String, + #[serde(rename = "bootstrap_peer_multiaddr.#is_none")] + bootstrap_peer_multiaddr_is_none: bool, + + // Advertised self address. + #[serde(rename = "advertised_multiaddr")] + advertised_multiaddr: String, + #[serde(rename = "advertised_multiaddr.#is_none")] + advertised_multiaddr_is_none: bool, +} + +impl NetworkConfigOverride { + pub fn new( + bootstrap_peer_multiaddr: Option, + advertised_multiaddr: Option, + ) -> Self { + let (bootstrap_peer_multiaddr, bootstrap_peer_multiaddr_is_none) = + match bootstrap_peer_multiaddr { + Some(addr) => (addr, false), + None => ("".to_string(), true), + }; + let (advertised_multiaddr, advertised_multiaddr_is_none) = match advertised_multiaddr { + Some(addr) => (addr, false), + None => ("".to_string(), true), + }; + Self { + bootstrap_peer_multiaddr, + bootstrap_peer_multiaddr_is_none, + advertised_multiaddr, + advertised_multiaddr_is_none, + } + } +} + +// Serialization prefixes for the network config overrides +with_prefix!(consensus_prefix "consensus_manager_config.network_config."); +with_prefix!(mempool_prefix "mempool_p2p_config.network_config."); + +#[derive(Clone, Debug, Serialize, PartialEq)] +pub struct InstanceConfigOverride { + #[serde(flatten, with = "consensus_prefix")] + consensus_network_config_override: NetworkConfigOverride, + #[serde(flatten, with = "mempool_prefix")] + mempool_network_config_override: NetworkConfigOverride, + validator_id: String, +} + +impl InstanceConfigOverride { + pub fn new( + consensus_network_config_override: NetworkConfigOverride, + mempool_network_config_override: NetworkConfigOverride, + validator_id: impl ToString, + ) -> Self { + Self { + consensus_network_config_override, + mempool_network_config_override, + validator_id: validator_id.to_string(), + } + } +} diff --git a/crates/apollo_deployments/src/deployment.rs b/crates/apollo_deployments/src/deployment.rs new file mode 100644 index 00000000000..d6dfe58d7f2 --- /dev/null +++ b/crates/apollo_deployments/src/deployment.rs @@ -0,0 +1,129 @@ +use std::iter::once; +use std::path::PathBuf; + +use serde::Serialize; + +use crate::config_override::ConfigOverride; +use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH, CONFIG_BASE_DIR}; +use crate::k8s::{ExternalSecret, IngressParams, K8SServiceType, K8sServiceConfigParams}; +use crate::service::{NodeType, Service}; + +#[derive(Clone, Debug, Serialize)] +pub struct Deployment { + application_config_subdir: PathBuf, + services: Vec, + #[serde(skip_serializing)] + deployment_aux_data: DeploymentAuxData, +} + +impl Deployment { + #[allow(clippy::too_many_arguments)] + pub fn new( + node_type: NodeType, + environment: Environment, + instance_name: &str, + external_secret: Option, + config_override: ConfigOverride, + ingress_params: IngressParams, + k8s_service_config_params: Option, + ) -> Self { + let node_services = node_type.all_service_names(); + + let config_override_files = + config_override.get_config_file_paths(&environment.env_dir_path(), instance_name); + let config_filenames: Vec = + once(BASE_APP_CONFIG_PATH.to_string()).chain(config_override_files).collect(); + + let services = node_services + .iter() + .map(|node_service| { + node_service.create_service( + &environment, + &external_secret, + config_filenames.clone(), + ingress_params.clone(), + k8s_service_config_params.clone(), + ) + }) + .collect(); + Self { + application_config_subdir: CONFIG_BASE_DIR.into(), + services, + deployment_aux_data: DeploymentAuxData { + node_type, + environment, + instance_name: instance_name.to_string(), + config_override, + }, + } + } + + pub fn get_node_type(&self) -> &NodeType { + &self.deployment_aux_data.node_type + } + + pub fn get_all_services_config_paths(&self) -> Vec> { + self.services.iter().map(|service| service.get_service_config_paths()).collect() + } + + pub fn deployment_file_path(&self) -> PathBuf { + self.deployment_aux_data + .environment + .env_dir_path() + .join(format!("deployment_config_{}.json", self.deployment_aux_data.instance_name)) + } + + pub fn dump_config_override_files(&self) { + self.deployment_aux_data.config_override.dump_config_files( + &self.deployment_aux_data.environment.env_dir_path(), + &self.deployment_aux_data.instance_name, + ); + } + + #[cfg(test)] + pub fn test_dump_config_override_files(&self) { + self.deployment_aux_data.config_override.test_dump_config_files( + &self.deployment_aux_data.environment.env_dir_path(), + &self.deployment_aux_data.instance_name, + ); + } +} + +#[derive(Clone, Debug)] +struct DeploymentAuxData { + node_type: NodeType, + environment: Environment, + instance_name: String, + config_override: ConfigOverride, +} + +// Creates the service name in the format: .. +pub(crate) fn build_service_namespace_domain_address( + node_service: &str, + namespace: &str, + domain: &str, +) -> String { + format!("{}.{}.{}", node_service, namespace, domain) +} + +// TODO(Tsabary): when transitioning runnings nodes in different clusters, this enum should be +// removed, and the p2p address should always be `External`. +#[derive(Clone, Copy)] +pub enum P2PCommunicationType { + Internal, + External, +} + +impl P2PCommunicationType { + pub(crate) fn get_p2p_domain(&self, domain: &str) -> String { + match self { + P2PCommunicationType::Internal => "svc.cluster.local", + P2PCommunicationType::External => domain, + } + .to_string() + } + + pub(crate) fn get_k8s_service_type(&self) -> K8SServiceType { + K8SServiceType::LoadBalancer + } +} diff --git a/crates/apollo_deployments/src/deployment_definitions.rs b/crates/apollo_deployments/src/deployment_definitions.rs new file mode 100644 index 00000000000..3492a1c97ae --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions.rs @@ -0,0 +1,129 @@ +use std::fmt::{Display, Formatter, Result}; +use std::path::PathBuf; + +use serde::Serialize; +use strum_macros::{Display, EnumString}; + +use crate::deployment::Deployment; +use crate::deployment_definitions::mainnet::mainnet_hybrid_deployments; +use crate::deployment_definitions::potc2_sepolia::potc2_sepolia_hybrid_deployments; +use crate::deployment_definitions::sepolia_integration::sepolia_integration_hybrid_deployments; +use crate::deployment_definitions::sepolia_testnet::sepolia_testnet_hybrid_deployments; +use crate::deployment_definitions::stress_test::stress_test_hybrid_deployments; +use crate::deployment_definitions::testing::system_test_deployments; +use crate::deployment_definitions::upgrade_test::upgrade_test_hybrid_deployments; + +#[cfg(test)] +#[path = "deployment_definitions_test.rs"] +mod deployment_definitions_test; + +mod mainnet; +mod potc2_sepolia; +mod sepolia_integration; +mod sepolia_testnet; +mod stress_test; +mod testing; +mod upgrade_test; + +pub(crate) const CONFIG_BASE_DIR: &str = "crates/apollo_deployments/resources/"; +pub(crate) const DEPLOYMENT_CONFIG_DIR_NAME: &str = "deployments/"; +pub(crate) const BASE_APP_CONFIG_PATH: &str = + "crates/apollo_deployments/resources/base_app_config.json"; + +type DeploymentFn = fn() -> Vec; + +pub const DEPLOYMENTS: &[DeploymentFn] = &[ + potc2_sepolia_hybrid_deployments, + mainnet_hybrid_deployments, + sepolia_integration_hybrid_deployments, + sepolia_testnet_hybrid_deployments, + stress_test_hybrid_deployments, + system_test_deployments, + upgrade_test_hybrid_deployments, +]; + +#[derive(Clone, Debug, PartialEq)] +pub enum Environment { + CloudK8s(CloudK8sEnvironment), + LocalK8s, +} + +impl Display for Environment { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + match self { + Environment::CloudK8s(e) => write!(f, "{e}"), + Environment::LocalK8s => write!(f, "testing"), + } + } +} + +#[derive(EnumString, Clone, Display, PartialEq, Debug)] +#[strum(serialize_all = "snake_case")] +pub enum CloudK8sEnvironment { + Potc2, + Mainnet, + SepoliaIntegration, + SepoliaTestnet, + #[strum(serialize = "stress_test")] + StressTest, + #[strum(serialize = "upgrade_test")] + UpgradeTest, +} + +impl Environment { + pub fn env_dir_path(&self) -> PathBuf { + let env_str = match self { + Environment::CloudK8s(env) => env.to_string(), + Environment::LocalK8s => "testing".to_string(), + }; + PathBuf::from(CONFIG_BASE_DIR).join(DEPLOYMENT_CONFIG_DIR_NAME).join(env_str) + } +} + +#[derive(Clone, Debug, Serialize, PartialEq)] +pub struct StateSyncConfig { + #[serde(rename = "state_sync_config.central_sync_client_config.#is_none")] + state_sync_config_central_sync_client_config_is_none: bool, + #[serde(rename = "state_sync_config.p2p_sync_client_config.#is_none")] + state_sync_config_p2p_sync_client_config_is_none: bool, + #[serde(rename = "state_sync_config.network_config.#is_none")] + state_sync_config_network_config_is_none: bool, +} + +pub enum StateSyncType { + Central, + P2P, +} + +impl StateSyncType { + pub fn get_state_sync_config(&self) -> StateSyncConfig { + match self { + StateSyncType::Central => StateSyncConfig { + state_sync_config_central_sync_client_config_is_none: false, + state_sync_config_p2p_sync_client_config_is_none: true, + state_sync_config_network_config_is_none: true, + }, + StateSyncType::P2P => StateSyncConfig { + state_sync_config_central_sync_client_config_is_none: true, + state_sync_config_p2p_sync_client_config_is_none: false, + state_sync_config_network_config_is_none: false, + }, + } + } +} + +#[derive(Clone, Debug, Display, Serialize, PartialEq)] +pub enum ServicePort { + Batcher, + ClassManager, + Gateway, + L1EndpointMonitor, + L1GasPriceProvider, + L1Provider, + Mempool, + MempoolP2p, + SierraCompiler, + StateSync, + HttpServer, + MonitoringEndpoint, +} diff --git a/crates/apollo_deployments/src/deployment_definitions/mainnet.rs b/crates/apollo_deployments/src/deployment_definitions/mainnet.rs new file mode 100644 index 00000000000..76e767309c3 --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/mainnet.rs @@ -0,0 +1,60 @@ +use apollo_infra_utils::template::Template; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::DeploymentConfigOverride; +use crate::deployment::{Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, StateSyncType}; +use crate::deployments::hybrid::{hybrid_deployment, INSTANCE_NAME_FORMAT}; +use crate::k8s::K8sServiceConfigParams; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "alpha-mainnet.starknet.io"; +const INGRESS_DOMAIN: &str = "starknet.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-mainnet-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-mainnet-{}"); + +const STARKNET_CONTRACT_ADDRESS: &str = "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4"; +const CHAIN_ID: &str = "SN_MAIN"; +const ETH_FEE_TOKEN_ADDRESS: &str = + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"; +const STARKNET_GATEWAY_URL: &str = "https://feeder.alpha-mainnet.starknet.io"; +const STRK_FEE_TOKEN_ADDRESS: &str = + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d"; +const L1_STARTUP_HEIGHT_OVERRIDE: Option = None; +const STATE_SYNC_TYPE: StateSyncType = StateSyncType::Central; + +const P2P_COMMUNICATION_TYPE: P2PCommunicationType = P2PCommunicationType::External; +const DEPLOYMENT_ENVIRONMENT: Environment = Environment::CloudK8s(CloudK8sEnvironment::Mainnet); + +pub(crate) fn mainnet_hybrid_deployments() -> Vec { + NODE_IDS + .map(|i| { + hybrid_deployment( + i, + P2P_COMMUNICATION_TYPE, + DEPLOYMENT_ENVIRONMENT, + &INSTANCE_NAME_FORMAT, + &SECRET_NAME_FORMAT, + DeploymentConfigOverride::new( + STARKNET_CONTRACT_ADDRESS, + CHAIN_ID, + ETH_FEE_TOKEN_ADDRESS, + Url::parse(STARKNET_GATEWAY_URL).expect("Invalid URL"), + STRK_FEE_TOKEN_ADDRESS, + L1_STARTUP_HEIGHT_OVERRIDE, + NODE_IDS.len(), + STATE_SYNC_TYPE, + ), + &NODE_NAMESPACE_FORMAT, + INGRESS_DOMAIN, + HTTP_SERVER_INGRESS_ALTERNATIVE_NAME, + Some(K8sServiceConfigParams::new( + NODE_NAMESPACE_FORMAT.format(&[&i]), + INGRESS_DOMAIN.to_string(), + P2P_COMMUNICATION_TYPE, + )), + ) + }) + .to_vec() +} diff --git a/crates/apollo_deployments/src/deployment_definitions/potc2_sepolia.rs b/crates/apollo_deployments/src/deployment_definitions/potc2_sepolia.rs new file mode 100644 index 00000000000..61fd0a57d40 --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/potc2_sepolia.rs @@ -0,0 +1,60 @@ +use apollo_infra_utils::template::Template; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::DeploymentConfigOverride; +use crate::deployment::{Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, StateSyncType}; +use crate::deployments::hybrid::{hybrid_deployment, INSTANCE_NAME_FORMAT}; +use crate::k8s::K8sServiceConfigParams; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "potc-mock-sepolia.starknet.io"; +const INGRESS_DOMAIN: &str = "starknet.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-potc-2-sepolia-mock-sharp-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-potc-2-sepolia-mock-sharp-{}"); + +const STARKNET_CONTRACT_ADDRESS: &str = "0xd8A5518cf4AC3ECD3b4cec772478109679a73E78"; +const CHAIN_ID: &str = "PRIVATE_SN_POTC_MOCK_SEPOLIA"; +const ETH_FEE_TOKEN_ADDRESS: &str = + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"; +const STARKNET_GATEWAY_URL: &str = "https://feeder.potc-mock-sepolia-fgw.starknet.io/"; +const STRK_FEE_TOKEN_ADDRESS: &str = + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"; +const L1_STARTUP_HEIGHT_OVERRIDE: Option = None; +const STATE_SYNC_TYPE: StateSyncType = StateSyncType::Central; + +const P2P_COMMUNICATION_TYPE: P2PCommunicationType = P2PCommunicationType::Internal; +const DEPLOYMENT_ENVIRONMENT: Environment = Environment::CloudK8s(CloudK8sEnvironment::Potc2); + +pub(crate) fn potc2_sepolia_hybrid_deployments() -> Vec { + NODE_IDS + .map(|i| { + hybrid_deployment( + i, + P2P_COMMUNICATION_TYPE, + DEPLOYMENT_ENVIRONMENT, + &INSTANCE_NAME_FORMAT, + &SECRET_NAME_FORMAT, + DeploymentConfigOverride::new( + STARKNET_CONTRACT_ADDRESS, + CHAIN_ID, + ETH_FEE_TOKEN_ADDRESS, + Url::parse(STARKNET_GATEWAY_URL).expect("Invalid URL"), + STRK_FEE_TOKEN_ADDRESS, + L1_STARTUP_HEIGHT_OVERRIDE, + NODE_IDS.len(), + STATE_SYNC_TYPE, + ), + &NODE_NAMESPACE_FORMAT, + INGRESS_DOMAIN, + HTTP_SERVER_INGRESS_ALTERNATIVE_NAME, + Some(K8sServiceConfigParams::new( + NODE_NAMESPACE_FORMAT.format(&[&i]), + INGRESS_DOMAIN.to_string(), + P2P_COMMUNICATION_TYPE, + )), + ) + }) + .to_vec() +} diff --git a/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs b/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs new file mode 100644 index 00000000000..997122d6e01 --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs @@ -0,0 +1,56 @@ +use apollo_infra_utils::template::Template; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::DeploymentConfigOverride; +use crate::deployment::{Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, StateSyncType}; +use crate::deployments::hybrid::{hybrid_deployment, INSTANCE_NAME_FORMAT}; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "integration-sepolia.starknet.io"; +const INGRESS_DOMAIN: &str = "starknet.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-sepolia-integration-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-sepolia-integration-{}"); + +const STARKNET_CONTRACT_ADDRESS: &str = "0x4737c0c1B4D5b1A687B42610DdabEE781152359c"; +const CHAIN_ID: &str = "SN_INTEGRATION_SEPOLIA"; +const ETH_FEE_TOKEN_ADDRESS: &str = + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"; +const STARKNET_GATEWAY_URL: &str = "https://feeder.integration-sepolia.starknet.io"; +const STRK_FEE_TOKEN_ADDRESS: &str = + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d"; +const L1_STARTUP_HEIGHT_OVERRIDE: Option = None; +const STATE_SYNC_TYPE: StateSyncType = StateSyncType::Central; + +const P2P_COMMUNICATION_TYPE: P2PCommunicationType = P2PCommunicationType::Internal; +const DEPLOYMENT_ENVIRONMENT: Environment = + Environment::CloudK8s(CloudK8sEnvironment::SepoliaIntegration); + +pub(crate) fn sepolia_integration_hybrid_deployments() -> Vec { + NODE_IDS + .map(|i| { + hybrid_deployment( + i, + P2P_COMMUNICATION_TYPE, + DEPLOYMENT_ENVIRONMENT, + &INSTANCE_NAME_FORMAT, + &SECRET_NAME_FORMAT, + DeploymentConfigOverride::new( + STARKNET_CONTRACT_ADDRESS, + CHAIN_ID, + ETH_FEE_TOKEN_ADDRESS, + Url::parse(STARKNET_GATEWAY_URL).expect("Invalid URL"), + STRK_FEE_TOKEN_ADDRESS, + L1_STARTUP_HEIGHT_OVERRIDE, + NODE_IDS.len(), + STATE_SYNC_TYPE, + ), + &NODE_NAMESPACE_FORMAT, + INGRESS_DOMAIN, + HTTP_SERVER_INGRESS_ALTERNATIVE_NAME, + None, + ) + }) + .to_vec() +} diff --git a/crates/apollo_deployments/src/deployment_definitions/sepolia_testnet.rs b/crates/apollo_deployments/src/deployment_definitions/sepolia_testnet.rs new file mode 100644 index 00000000000..b4e9f78565c --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/sepolia_testnet.rs @@ -0,0 +1,61 @@ +use apollo_infra_utils::template::Template; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::DeploymentConfigOverride; +use crate::deployment::{Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, StateSyncType}; +use crate::deployments::hybrid::{hybrid_deployment, INSTANCE_NAME_FORMAT}; +use crate::k8s::K8sServiceConfigParams; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "alpha-sepolia.starknet.io"; +const INGRESS_DOMAIN: &str = "starknet.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-sepolia-alpha-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-sepolia-alpha-{}"); + +const STARKNET_CONTRACT_ADDRESS: &str = "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057"; +const CHAIN_ID: &str = "SN_SEPOLIA"; +const ETH_FEE_TOKEN_ADDRESS: &str = + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"; +const STARKNET_GATEWAY_URL: &str = "https://feeder.alpha-sepolia.starknet.io"; +const STRK_FEE_TOKEN_ADDRESS: &str = + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d"; +const L1_STARTUP_HEIGHT_OVERRIDE: Option = None; +const STATE_SYNC_TYPE: StateSyncType = StateSyncType::Central; + +const P2P_COMMUNICATION_TYPE: P2PCommunicationType = P2PCommunicationType::External; +const DEPLOYMENT_ENVIRONMENT: Environment = + Environment::CloudK8s(CloudK8sEnvironment::SepoliaTestnet); + +pub(crate) fn sepolia_testnet_hybrid_deployments() -> Vec { + NODE_IDS + .map(|i| { + hybrid_deployment( + i, + P2P_COMMUNICATION_TYPE, + DEPLOYMENT_ENVIRONMENT, + &INSTANCE_NAME_FORMAT, + &SECRET_NAME_FORMAT, + DeploymentConfigOverride::new( + STARKNET_CONTRACT_ADDRESS, + CHAIN_ID, + ETH_FEE_TOKEN_ADDRESS, + Url::parse(STARKNET_GATEWAY_URL).expect("Invalid URL"), + STRK_FEE_TOKEN_ADDRESS, + L1_STARTUP_HEIGHT_OVERRIDE, + NODE_IDS.len(), + STATE_SYNC_TYPE, + ), + &NODE_NAMESPACE_FORMAT, + INGRESS_DOMAIN, + HTTP_SERVER_INGRESS_ALTERNATIVE_NAME, + Some(K8sServiceConfigParams::new( + NODE_NAMESPACE_FORMAT.format(&[&i]), + INGRESS_DOMAIN.to_string(), + P2P_COMMUNICATION_TYPE, + )), + ) + }) + .to_vec() +} diff --git a/crates/apollo_deployments/src/deployment_definitions/stress_test.rs b/crates/apollo_deployments/src/deployment_definitions/stress_test.rs new file mode 100644 index 00000000000..76b93eaf906 --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/stress_test.rs @@ -0,0 +1,55 @@ +use apollo_infra_utils::template::Template; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::DeploymentConfigOverride; +use crate::deployment::{Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, StateSyncType}; +use crate::deployments::hybrid::{hybrid_deployment, INSTANCE_NAME_FORMAT}; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "apollo-stresstest-dev.sw-dev.io"; +const INGRESS_DOMAIN: &str = "sw-dev.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-stresstest-dev-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-stresstest-dev-{}"); + +const STARKNET_CONTRACT_ADDRESS: &str = "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f"; +const CHAIN_ID: &str = "INTERNAL_STRESS_TEST"; +const ETH_FEE_TOKEN_ADDRESS: &str = + "0x7e813ecf3e7b3e14f07bd2f68cb4a3d12110e3c75ec5a63de3d2dacf1852904"; +const STARKNET_GATEWAY_URL: &str = "http://feeder-gateway.starknet-0-14-0-stress-test-03:9713/"; +const STRK_FEE_TOKEN_ADDRESS: &str = + "0x2208cce4221df1f35943958340abc812aa79a8f6a533bff4ee00416d3d06cd6"; +const L1_STARTUP_HEIGHT_OVERRIDE: Option = None; +const STATE_SYNC_TYPE: StateSyncType = StateSyncType::Central; + +const P2P_COMMUNICATION_TYPE: P2PCommunicationType = P2PCommunicationType::Internal; +const DEPLOYMENT_ENVIRONMENT: Environment = Environment::CloudK8s(CloudK8sEnvironment::StressTest); + +pub(crate) fn stress_test_hybrid_deployments() -> Vec { + NODE_IDS + .map(|i| { + hybrid_deployment( + i, + P2P_COMMUNICATION_TYPE, + DEPLOYMENT_ENVIRONMENT, + &INSTANCE_NAME_FORMAT, + &SECRET_NAME_FORMAT, + DeploymentConfigOverride::new( + STARKNET_CONTRACT_ADDRESS, + CHAIN_ID, + ETH_FEE_TOKEN_ADDRESS, + Url::parse(STARKNET_GATEWAY_URL).expect("Invalid URL"), + STRK_FEE_TOKEN_ADDRESS, + L1_STARTUP_HEIGHT_OVERRIDE, + NODE_IDS.len(), + STATE_SYNC_TYPE, + ), + &NODE_NAMESPACE_FORMAT, + INGRESS_DOMAIN, + HTTP_SERVER_INGRESS_ALTERNATIVE_NAME, + None, + ) + }) + .to_vec() +} diff --git a/crates/apollo_deployments/src/deployment_definitions/testing.rs b/crates/apollo_deployments/src/deployment_definitions/testing.rs new file mode 100644 index 00000000000..0e9e12fe03b --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/testing.rs @@ -0,0 +1,89 @@ +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::{ + ConfigOverride, + DeploymentConfigOverride, + InstanceConfigOverride, + NetworkConfigOverride, +}; +use crate::deployment::Deployment; +use crate::deployment_definitions::{Environment, StateSyncType}; +use crate::k8s::IngressParams; +use crate::service::NodeType; + +const TESTING_INGRESS_DOMAIN: &str = "sw-dev.io"; +const TESTING_NODE_IDS: [usize; 1] = [0]; + +pub(crate) fn system_test_deployments() -> Vec { + vec![ + system_test_distributed_deployment(), + system_test_hybrid_deployment(), + system_test_consolidated_deployment(), + ] +} + +fn testing_deployment_config_override() -> DeploymentConfigOverride { + DeploymentConfigOverride::new( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "CHAIN_ID_SUBDIR", + "0x1001", + Url::parse("https://integration-sepolia.starknet.io/").expect("Invalid URL"), + "0x1002", + Some(BlockNumber(1)), + TESTING_NODE_IDS.len(), + StateSyncType::P2P, + ) +} + +fn testing_instance_config_override() -> InstanceConfigOverride { + InstanceConfigOverride::new( + NetworkConfigOverride::new(None, None), + NetworkConfigOverride::new(None, None), + "0x64", + ) +} + +fn testing_config_override() -> ConfigOverride { + ConfigOverride::new(testing_deployment_config_override(), testing_instance_config_override()) +} + +fn get_ingress_params() -> IngressParams { + IngressParams::new(TESTING_INGRESS_DOMAIN.to_string(), None) +} + +fn system_test_distributed_deployment() -> Deployment { + Deployment::new( + NodeType::Distributed, + Environment::LocalK8s, + "distributed", + None, + testing_config_override(), + get_ingress_params(), + None, + ) +} + +fn system_test_hybrid_deployment() -> Deployment { + Deployment::new( + NodeType::Hybrid, + Environment::LocalK8s, + "hybrid", + None, + testing_config_override(), + get_ingress_params(), + None, + ) +} + +fn system_test_consolidated_deployment() -> Deployment { + Deployment::new( + NodeType::Consolidated, + Environment::LocalK8s, + "consolidated", + None, + testing_config_override(), + get_ingress_params(), + None, + ) +} diff --git a/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs b/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs new file mode 100644 index 00000000000..aa07532acb9 --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs @@ -0,0 +1,60 @@ +use apollo_infra_utils::template::Template; +use starknet_api::block::BlockNumber; +use url::Url; + +use crate::config_override::DeploymentConfigOverride; +use crate::deployment::{Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, StateSyncType}; +use crate::deployments::hybrid::{hybrid_deployment, INSTANCE_NAME_FORMAT}; +use crate::k8s::K8sServiceConfigParams; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "sn-alpha-test-upgrade.gateway-proxy.sw-dev.io"; +const INGRESS_DOMAIN: &str = "sw-dev.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-alpha-test-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-alpha-test-{}"); + +const STARKNET_CONTRACT_ADDRESS: &str = "0x9b8A6361d204a0C1F93d5194763538057444d958"; +const CHAIN_ID: &str = "SN_GOERLI"; +const ETH_FEE_TOKEN_ADDRESS: &str = + "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16"; +const STARKNET_GATEWAY_URL: &str = "https://feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io"; +const STRK_FEE_TOKEN_ADDRESS: &str = + "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4"; +const L1_STARTUP_HEIGHT_OVERRIDE: Option = None; +const STATE_SYNC_TYPE: StateSyncType = StateSyncType::Central; + +const P2P_COMMUNICATION_TYPE: P2PCommunicationType = P2PCommunicationType::External; +const DEPLOYMENT_ENVIRONMENT: Environment = Environment::CloudK8s(CloudK8sEnvironment::UpgradeTest); + +pub(crate) fn upgrade_test_hybrid_deployments() -> Vec { + NODE_IDS + .map(|i| { + hybrid_deployment( + i, + P2P_COMMUNICATION_TYPE, + DEPLOYMENT_ENVIRONMENT, + &INSTANCE_NAME_FORMAT, + &SECRET_NAME_FORMAT, + DeploymentConfigOverride::new( + STARKNET_CONTRACT_ADDRESS, + CHAIN_ID, + ETH_FEE_TOKEN_ADDRESS, + Url::parse(STARKNET_GATEWAY_URL).expect("Invalid URL"), + STRK_FEE_TOKEN_ADDRESS, + L1_STARTUP_HEIGHT_OVERRIDE, + NODE_IDS.len(), + STATE_SYNC_TYPE, + ), + &NODE_NAMESPACE_FORMAT, + INGRESS_DOMAIN, + HTTP_SERVER_INGRESS_ALTERNATIVE_NAME, + Some(K8sServiceConfigParams::new( + NODE_NAMESPACE_FORMAT.format(&[&i]), + INGRESS_DOMAIN.to_string(), + P2P_COMMUNICATION_TYPE, + )), + ) + }) + .to_vec() +} diff --git a/crates/apollo_deployments/src/deployment_definitions_test.rs b/crates/apollo_deployments/src/deployment_definitions_test.rs new file mode 100644 index 00000000000..064d59e68b0 --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions_test.rs @@ -0,0 +1,203 @@ +use std::collections::{BTreeMap, BTreeSet}; +use std::env; +use std::fs::File; + +use apollo_config::CONFIG_FILE_ARG; +use apollo_infra_utils::dumping::{serialize_to_file, serialize_to_file_test}; +use apollo_infra_utils::path::resolve_project_relative_path; +use apollo_node::config::component_execution_config::{ + ActiveComponentExecutionMode, + ReactiveComponentExecutionMode, +}; +use apollo_node::config::config_utils::private_parameters; +use apollo_node::config::node_config::SequencerNodeConfig; +use serde_json::{to_value, Map, Value}; +use strum::IntoEnumIterator; +use tempfile::NamedTempFile; + +use crate::deployment_definitions::DEPLOYMENTS; +use crate::service::NodeType; +use crate::test_utils::{SecretsConfigOverride, FIX_BINARY_NAME}; + +const SECRETS_FOR_TESTING_ENV_PATH: &str = + "crates/apollo_deployments/resources/testing_secrets.json"; + +/// Test that the deployment file is up to date. +#[test] +fn deployment_files_are_up_to_date() { + env::set_current_dir(resolve_project_relative_path("").unwrap()) + .expect("Couldn't set working dir."); + + for node_type in NodeType::iter() { + node_type.test_dump_service_component_configs(None); + } + for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { + serialize_to_file_test( + &deployment, + deployment.deployment_file_path().to_str().unwrap(), + FIX_BINARY_NAME, + ); + deployment.test_dump_config_override_files(); + } +} + +// Test that each service config files constitute a valid config. +#[test] +fn load_and_process_service_config_files() { + env::set_current_dir(resolve_project_relative_path("").unwrap()) + .expect("Couldn't set working dir."); + + // Create a dummy secrets value to the config file paths. + let temp_file = NamedTempFile::new().unwrap(); + let temp_file_path = temp_file.path().to_str().unwrap(); + let secrets_config_override = SecretsConfigOverride::default(); + serialize_to_file(to_value(&secrets_config_override).unwrap(), temp_file_path); + + for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { + for mut service_config_paths in deployment.get_all_services_config_paths().into_iter() { + println!( + "Loading deployment {} in path {:?} with application files {:?} ... ", + deployment.get_node_type(), + deployment.deployment_file_path(), + service_config_paths + ); + + // Add the secrets config file path to the config load command. + service_config_paths.push(temp_file_path.to_string()); + + // Check that there are no duplicate entries in the config files. Although the node can + // override such values, we keep the deployment files clean by avoiding these. + + let mut key_to_files: BTreeMap> = BTreeMap::new(); + + for path in &service_config_paths { + let file = File::open(path).unwrap(); + let json_map: Map = serde_json::from_reader(file) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) + .unwrap(); + + for key in json_map.keys() { + key_to_files.entry(key.clone()).or_default().insert(path.to_string()); + } + } + + // Report duplicated keys + let mut has_duplicates = false; + for (key, files) in &key_to_files { + if files.len() > 1 { + has_duplicates = true; + println!("Key '{}' found in files: {:?}", key, files); + } + } + assert!(!has_duplicates, "Found duplicate keys in service config files."); + + // Load the config files into a command line argument format. + let config_file_args: Vec = service_config_paths + .clone() + .into_iter() + .flat_map(|path| vec![CONFIG_FILE_ARG.to_string(), path]) + .collect(); + + let mut config_load_command: Vec = vec!["command_name_placeholder".to_string()]; + config_load_command.extend(config_file_args); + let load_result = SequencerNodeConfig::load_and_process(config_load_command); + + load_result.unwrap_or_else(|err| { + panic!( + "Loading deployment in path {:?} with application config files {:?}\nResulted \ + in error: {}", + deployment.deployment_file_path(), + service_config_paths, + err + ); + }); + } + } +} + +/// Test that the private values in the apollo node config schema match the secrets config override +/// schema. +#[test] +fn secrets_config_and_private_parameters_config_schema_compatibility() { + let secrets_config_override = SecretsConfigOverride::default(); + let secrets_provided_by_config = to_value(&secrets_config_override) + .unwrap() + .as_object() + .unwrap() + .keys() + .cloned() + .collect::>(); + let secrets_required_by_schema = private_parameters(); + + let only_in_config: BTreeSet<_> = + secrets_provided_by_config.difference(&secrets_required_by_schema).collect(); + let only_in_schema: BTreeSet<_> = + secrets_required_by_schema.difference(&secrets_provided_by_config).collect(); + + if !(only_in_config.is_empty() && only_in_schema.is_empty()) { + panic!( + "Secrets config override schema mismatch:\nSecrets provided by config: {:?}\nSecrets \ + required by schema: {:?}\nOnly in config: {:?}\nOnly in schema: {:?}", + secrets_provided_by_config, secrets_required_by_schema, only_in_config, only_in_schema + ); + } + + let secrets_for_testing_file_path = + &resolve_project_relative_path(SECRETS_FOR_TESTING_ENV_PATH).unwrap(); + let secrets_for_testing: BTreeSet<_> = (serde_json::from_reader::<_, Map>( + File::open(secrets_for_testing_file_path).unwrap(), + ) + .unwrap()) + .keys() + .cloned() + .collect(); + + let only_in_secrets_for_testing: BTreeSet<_> = + secrets_for_testing.difference(&secrets_required_by_schema).collect(); + let only_in_schema: BTreeSet<_> = + secrets_required_by_schema.difference(&secrets_for_testing).collect(); + + if !(only_in_secrets_for_testing.is_empty() && only_in_schema.is_empty()) { + panic!( + "Secrets for testing and schema mismatch:\nSecrets for testing: {:?}\nSecrets \ + required by schema: {:?}\nOnly in testing: {:?}\nOnly in schema: {:?}", + secrets_provided_by_config, + secrets_required_by_schema, + only_in_secrets_for_testing, + only_in_schema + ); + } +} + +#[test] +fn l1_components_state_consistency() { + for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { + let node_type = deployment.get_node_type(); + let component_configs = node_type.get_component_configs(None); + + let l1_gas_price_provider_indicator = component_configs.values().any(|component_config| { + component_config.l1_gas_price_provider.execution_mode + != ReactiveComponentExecutionMode::Disabled + }); + let l1_provider_indicator = component_configs.values().any(|component_config| { + component_config.l1_provider.execution_mode != ReactiveComponentExecutionMode::Disabled + }); + + let l1_gas_price_scraper_indicator = component_configs.values().any(|component_config| { + component_config.l1_gas_price_scraper.execution_mode + != ActiveComponentExecutionMode::Disabled + }); + let l1_scraper_indicator = component_configs.values().any(|component_config| { + component_config.l1_scraper.execution_mode != ActiveComponentExecutionMode::Disabled + }); + + assert_eq!( + l1_gas_price_provider_indicator, l1_gas_price_scraper_indicator, + "L1 gas price provider and scraper should either be both enabled or both disabled." + ); + assert_eq!( + l1_provider_indicator, l1_scraper_indicator, + "L1 provider and scraper should either be both enabled or both disabled." + ); + } +} diff --git a/crates/apollo_deployments/src/deployments.rs b/crates/apollo_deployments/src/deployments.rs new file mode 100644 index 00000000000..04068511e38 --- /dev/null +++ b/crates/apollo_deployments/src/deployments.rs @@ -0,0 +1,5 @@ +pub mod consolidated; +pub mod distributed; +pub mod hybrid; + +pub(crate) const IDLE_CONNECTIONS_FOR_AUTOSCALED_SERVICES: usize = 0; diff --git a/crates/apollo_deployments/src/deployments/consolidated.rs b/crates/apollo_deployments/src/deployments/consolidated.rs new file mode 100644 index 00000000000..e7b3f78c60d --- /dev/null +++ b/crates/apollo_deployments/src/deployments/consolidated.rs @@ -0,0 +1,137 @@ +use std::collections::BTreeMap; + +use apollo_node::config::component_config::ComponentConfig; +use apollo_node::config::component_execution_config::{ + ActiveComponentExecutionConfig, + ReactiveComponentExecutionConfig, +}; +use indexmap::IndexMap; +use serde::Serialize; +use strum::Display; +use strum_macros::{AsRefStr, EnumIter}; + +use crate::deployment_definitions::{Environment, ServicePort}; +use crate::k8s::{ + get_ingress, + Controller, + Ingress, + IngressParams, + Resource, + Resources, + Toleration, +}; +use crate::service::{GetComponentConfigs, NodeService, ServiceNameInner}; + +const NODE_STORAGE: usize = 1000; +const TESTING_NODE_STORAGE: usize = 1; + +#[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, Serialize, AsRefStr, EnumIter)] +#[strum(serialize_all = "snake_case")] +pub enum ConsolidatedNodeServiceName { + Node, +} + +impl From for NodeService { + fn from(service: ConsolidatedNodeServiceName) -> Self { + NodeService::Consolidated(service) + } +} + +impl GetComponentConfigs for ConsolidatedNodeServiceName { + fn get_component_configs(_ports: Option>) -> IndexMap { + let mut component_config_map = IndexMap::new(); + component_config_map.insert( + NodeService::Consolidated(ConsolidatedNodeServiceName::Node), + get_consolidated_config(), + ); + component_config_map + } +} + +impl ServiceNameInner for ConsolidatedNodeServiceName { + fn get_controller(&self) -> Controller { + match self { + ConsolidatedNodeServiceName::Node => Controller::StatefulSet, + } + } + + fn get_autoscale(&self) -> bool { + match self { + ConsolidatedNodeServiceName::Node => false, + } + } + + fn get_toleration(&self, environment: &Environment) -> Option { + match environment { + Environment::CloudK8s(_) => Some(Toleration::ApolloCoreService), + Environment::LocalK8s => None, + } + } + + fn get_ingress( + &self, + environment: &Environment, + ingress_params: IngressParams, + ) -> Option { + match environment { + Environment::CloudK8s(_) => get_ingress(ingress_params, false), + Environment::LocalK8s => None, + } + } + + fn has_p2p_interface(&self) -> bool { + true + } + + fn get_storage(&self, environment: &Environment) -> Option { + match environment { + Environment::CloudK8s(_) => Some(NODE_STORAGE), + Environment::LocalK8s => Some(TESTING_NODE_STORAGE), + } + } + + fn get_resources(&self, environment: &Environment) -> Resources { + match environment { + Environment::CloudK8s(_) => Resources::new(Resource::new(2, 4), Resource::new(4, 8)), + Environment::LocalK8s => Resources::new(Resource::new(1, 2), Resource::new(4, 8)), + } + } + + fn get_replicas(&self, _environment: &Environment) -> usize { + 1 + } + + fn get_anti_affinity(&self, environment: &Environment) -> bool { + match environment { + Environment::CloudK8s(_) => true, + Environment::LocalK8s => false, + } + } + + // TODO(Nadin): Implement this method to return the actual ports used by the service. + fn get_ports(&self) -> BTreeMap { + BTreeMap::new() + } +} + +fn get_consolidated_config() -> ComponentConfig { + let base = ReactiveComponentExecutionConfig::local_with_remote_disabled(); + + ComponentConfig { + batcher: base.clone(), + class_manager: base.clone(), + gateway: base.clone(), + mempool: base.clone(), + mempool_p2p: base.clone(), + sierra_compiler: base.clone(), + state_sync: base.clone(), + l1_endpoint_monitor: base.clone(), + l1_provider: base.clone(), + l1_gas_price_provider: base.clone(), + consensus_manager: ActiveComponentExecutionConfig::enabled(), + http_server: ActiveComponentExecutionConfig::enabled(), + l1_scraper: ActiveComponentExecutionConfig::enabled(), + l1_gas_price_scraper: ActiveComponentExecutionConfig::enabled(), + monitoring_endpoint: ActiveComponentExecutionConfig::enabled(), + } +} diff --git a/crates/apollo_deployments/src/deployments/distributed.rs b/crates/apollo_deployments/src/deployments/distributed.rs new file mode 100644 index 00000000000..5a04a40e1b8 --- /dev/null +++ b/crates/apollo_deployments/src/deployments/distributed.rs @@ -0,0 +1,444 @@ +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr}; + +use apollo_node::config::component_config::ComponentConfig; +use apollo_node::config::component_execution_config::{ + ActiveComponentExecutionConfig, + ReactiveComponentExecutionConfig, +}; +use indexmap::IndexMap; +use serde::Serialize; +use strum::{Display, IntoEnumIterator}; +use strum_macros::{AsRefStr, EnumIter}; + +use crate::deployment_definitions::{Environment, ServicePort}; +use crate::deployments::IDLE_CONNECTIONS_FOR_AUTOSCALED_SERVICES; +use crate::k8s::{ + get_environment_ingress_internal, + get_ingress, + Controller, + Ingress, + IngressParams, + Resource, + Resources, + Toleration, +}; +use crate::service::{GetComponentConfigs, NodeService, ServiceNameInner}; +use crate::utils::determine_port_numbers; + +pub const DISTRIBUTED_NODE_REQUIRED_PORTS_NUM: usize = 9; + +const BASE_PORT: u16 = 15000; // TODO(Tsabary): arbitrary port, need to resolve. +const BATCHER_STORAGE: usize = 500; +const CLASS_MANAGER_STORAGE: usize = 500; +const STATE_SYNC_STORAGE: usize = 500; + +// TODO(Tsabary): define consts and functions whenever relevant. + +#[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, Serialize, AsRefStr, EnumIter)] +#[strum(serialize_all = "snake_case")] +pub enum DistributedNodeServiceName { + Batcher, + ClassManager, + ConsensusManager, + HttpServer, + Gateway, + L1, + Mempool, + SierraCompiler, + StateSync, +} + +// Implement conversion from `DistributedNodeServiceName` to `NodeService` +impl From for NodeService { + fn from(service: DistributedNodeServiceName) -> Self { + NodeService::Distributed(service) + } +} + +impl GetComponentConfigs for DistributedNodeServiceName { + fn get_component_configs(ports: Option>) -> IndexMap { + let ports = determine_port_numbers(ports, DISTRIBUTED_NODE_REQUIRED_PORTS_NUM, BASE_PORT); + + let batcher = DistributedNodeServiceName::Batcher.component_config_pair(ports[0]); + let class_manager = + DistributedNodeServiceName::ClassManager.component_config_pair(ports[1]); + let gateway = DistributedNodeServiceName::Gateway.component_config_pair(ports[2]); + let l1_gas_price_provider = DistributedNodeServiceName::L1.component_config_pair(ports[3]); + let l1_provider = DistributedNodeServiceName::L1.component_config_pair(ports[4]); + let l1_endpoint_monitor = DistributedNodeServiceName::L1.component_config_pair(ports[5]); + let mempool = DistributedNodeServiceName::Mempool.component_config_pair(ports[6]); + let sierra_compiler = + DistributedNodeServiceName::SierraCompiler.component_config_pair(ports[7]); + let state_sync = DistributedNodeServiceName::StateSync.component_config_pair(ports[8]); + + let mut component_config_map = IndexMap::::new(); + for inner_service_name in DistributedNodeServiceName::iter() { + let component_config = match inner_service_name { + DistributedNodeServiceName::Batcher => get_batcher_component_config( + batcher.local(), + class_manager.remote(), + l1_provider.remote(), + mempool.remote(), + ), + DistributedNodeServiceName::ClassManager => get_class_manager_component_config( + class_manager.local(), + sierra_compiler.remote(), + ), + DistributedNodeServiceName::ConsensusManager => { + get_consensus_manager_component_config( + batcher.remote(), + class_manager.remote(), + l1_gas_price_provider.remote(), + state_sync.remote(), + ) + } + DistributedNodeServiceName::HttpServer => { + get_http_server_component_config(gateway.remote()) + } + DistributedNodeServiceName::Gateway => get_gateway_component_config( + gateway.local(), + class_manager.remote(), + mempool.remote(), + state_sync.remote(), + ), + DistributedNodeServiceName::L1 => get_l1_component_config( + l1_gas_price_provider.local(), + l1_provider.local(), + l1_endpoint_monitor.local(), + state_sync.remote(), + batcher.remote(), + ), + DistributedNodeServiceName::Mempool => get_mempool_component_config( + mempool.local(), + class_manager.remote(), + gateway.remote(), + ), + DistributedNodeServiceName::SierraCompiler => { + get_sierra_compiler_component_config(sierra_compiler.local()) + } + DistributedNodeServiceName::StateSync => { + get_state_sync_component_config(state_sync.local(), class_manager.remote()) + } + }; + let node_service = inner_service_name.into(); + component_config_map.insert(node_service, component_config); + } + component_config_map + } +} + +// TODO(Tsabary): per each service, update all values. +impl ServiceNameInner for DistributedNodeServiceName { + fn get_controller(&self) -> Controller { + match self { + DistributedNodeServiceName::Batcher => Controller::StatefulSet, + DistributedNodeServiceName::ClassManager => Controller::StatefulSet, + DistributedNodeServiceName::ConsensusManager => Controller::StatefulSet, + DistributedNodeServiceName::HttpServer => Controller::Deployment, + DistributedNodeServiceName::Gateway => Controller::Deployment, + DistributedNodeServiceName::L1 => Controller::Deployment, + DistributedNodeServiceName::Mempool => Controller::Deployment, + DistributedNodeServiceName::SierraCompiler => Controller::Deployment, + DistributedNodeServiceName::StateSync => Controller::StatefulSet, + } + } + + fn get_autoscale(&self) -> bool { + match self { + DistributedNodeServiceName::Batcher => false, + DistributedNodeServiceName::ClassManager => false, + DistributedNodeServiceName::ConsensusManager => false, + DistributedNodeServiceName::HttpServer => false, + DistributedNodeServiceName::Gateway => true, + DistributedNodeServiceName::L1 => false, + DistributedNodeServiceName::Mempool => false, + DistributedNodeServiceName::SierraCompiler => true, + DistributedNodeServiceName::StateSync => false, + } + } + + fn get_toleration(&self, environment: &Environment) -> Option { + match environment { + Environment::CloudK8s(_) => match self { + DistributedNodeServiceName::Batcher => Some(Toleration::ApolloCoreService), + DistributedNodeServiceName::ClassManager => Some(Toleration::ApolloGeneralService), + DistributedNodeServiceName::ConsensusManager => Some(Toleration::ApolloCoreService), + DistributedNodeServiceName::HttpServer => Some(Toleration::ApolloGeneralService), + DistributedNodeServiceName::Gateway => Some(Toleration::ApolloGeneralService), + DistributedNodeServiceName::L1 => Some(Toleration::ApolloGeneralService), + DistributedNodeServiceName::Mempool => Some(Toleration::ApolloCoreService), + DistributedNodeServiceName::SierraCompiler => { + Some(Toleration::ApolloGeneralService) + } + DistributedNodeServiceName::StateSync => Some(Toleration::ApolloGeneralService), + }, + Environment::LocalK8s => None, + } + } + + fn get_ingress( + &self, + environment: &Environment, + ingress_params: IngressParams, + ) -> Option { + match self { + DistributedNodeServiceName::Batcher => None, + DistributedNodeServiceName::ClassManager => None, + DistributedNodeServiceName::ConsensusManager => None, + DistributedNodeServiceName::HttpServer => { + get_ingress(ingress_params, get_environment_ingress_internal(environment)) + } + DistributedNodeServiceName::Gateway => None, + DistributedNodeServiceName::L1 => None, + DistributedNodeServiceName::Mempool => None, + DistributedNodeServiceName::SierraCompiler => None, + DistributedNodeServiceName::StateSync => None, + } + } + + fn has_p2p_interface(&self) -> bool { + match self { + DistributedNodeServiceName::ConsensusManager + | DistributedNodeServiceName::Mempool + | DistributedNodeServiceName::StateSync => true, + DistributedNodeServiceName::Batcher + | DistributedNodeServiceName::ClassManager + | DistributedNodeServiceName::HttpServer + | DistributedNodeServiceName::Gateway + | DistributedNodeServiceName::L1 + | DistributedNodeServiceName::SierraCompiler => false, + } + } + + fn get_storage(&self, environment: &Environment) -> Option { + match environment { + Environment::CloudK8s(_) => match self { + DistributedNodeServiceName::Batcher => Some(BATCHER_STORAGE), + DistributedNodeServiceName::ClassManager => Some(CLASS_MANAGER_STORAGE), + DistributedNodeServiceName::ConsensusManager => None, + DistributedNodeServiceName::HttpServer => None, + DistributedNodeServiceName::Gateway => None, + DistributedNodeServiceName::L1 => None, + DistributedNodeServiceName::Mempool => None, + DistributedNodeServiceName::SierraCompiler => None, + DistributedNodeServiceName::StateSync => Some(STATE_SYNC_STORAGE), + }, + Environment::LocalK8s => None, + } + } + + fn get_resources(&self, _environment: &Environment) -> Resources { + Resources::new(Resource::new(1, 2), Resource::new(4, 8)) + } + + fn get_replicas(&self, _environment: &Environment) -> usize { + 1 + } + + fn get_anti_affinity(&self, environment: &Environment) -> bool { + match environment { + Environment::CloudK8s(_) => match self { + DistributedNodeServiceName::Batcher => true, + DistributedNodeServiceName::ClassManager => false, + DistributedNodeServiceName::ConsensusManager => false, + DistributedNodeServiceName::HttpServer => false, + DistributedNodeServiceName::Gateway => false, + DistributedNodeServiceName::L1 => false, + DistributedNodeServiceName::Mempool => true, + DistributedNodeServiceName::SierraCompiler => false, + DistributedNodeServiceName::StateSync => false, + }, + Environment::LocalK8s => false, + } + } + + // TODO(Nadin): Implement this method to return the actual ports used by the service. + fn get_ports(&self) -> BTreeMap { + BTreeMap::new() + } +} + +impl DistributedNodeServiceName { + // TODO(Tsabary): there's code duplication here that needs to be removed, especially with + // respect of the hybrid node. + + /// Returns a component execution config for a component that runs locally, and accepts inbound + /// connections from remote components. + fn component_config_for_local_service(&self, port: u16) -> ReactiveComponentExecutionConfig { + ReactiveComponentExecutionConfig::local_with_remote_enabled( + self.k8s_service_name(), + IpAddr::from(Ipv4Addr::UNSPECIFIED), + port, + ) + } + + /// Returns a component execution config for a component that is accessed remotely. + fn component_config_for_remote_service(&self, port: u16) -> ReactiveComponentExecutionConfig { + let mut base = ReactiveComponentExecutionConfig::remote( + self.k8s_service_name(), + IpAddr::from(Ipv4Addr::UNSPECIFIED), + port, + ); + match self { + DistributedNodeServiceName::Gateway | DistributedNodeServiceName::SierraCompiler => { + base.remote_client_config.idle_connections = + IDLE_CONNECTIONS_FOR_AUTOSCALED_SERVICES + } + DistributedNodeServiceName::Batcher + | DistributedNodeServiceName::ClassManager + | DistributedNodeServiceName::ConsensusManager + | DistributedNodeServiceName::HttpServer + | DistributedNodeServiceName::L1 + | DistributedNodeServiceName::Mempool + | DistributedNodeServiceName::StateSync => {} + }; + base + } + + fn component_config_pair(&self, port: u16) -> DistributedNodeServiceConfigPair { + DistributedNodeServiceConfigPair { + local: self.component_config_for_local_service(port), + remote: self.component_config_for_remote_service(port), + } + } +} + +/// Component config bundling for services of a distributed node: a config to run a component +/// locally while being accessible to other services, and a suitable config enabling such services +/// the access. +struct DistributedNodeServiceConfigPair { + local: ReactiveComponentExecutionConfig, + remote: ReactiveComponentExecutionConfig, +} + +impl DistributedNodeServiceConfigPair { + fn local(&self) -> ReactiveComponentExecutionConfig { + self.local.clone() + } + + fn remote(&self) -> ReactiveComponentExecutionConfig { + self.remote.clone() + } +} + +fn get_batcher_component_config( + batcher_local_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, + l1_provider_remote_config: ReactiveComponentExecutionConfig, + mempool_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.batcher = batcher_local_config; + config.class_manager = class_manager_remote_config; + config.l1_provider = l1_provider_remote_config; + config.mempool = mempool_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_class_manager_component_config( + class_manager_local_config: ReactiveComponentExecutionConfig, + sierra_compiler_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.class_manager = class_manager_local_config; + config.sierra_compiler = sierra_compiler_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_gateway_component_config( + gateway_local_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, + mempool_remote_config: ReactiveComponentExecutionConfig, + state_sync_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.gateway = gateway_local_config; + config.class_manager = class_manager_remote_config; + config.mempool = mempool_remote_config; + config.state_sync = state_sync_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_mempool_component_config( + mempool_local_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, + gateway_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.mempool = mempool_local_config; + config.mempool_p2p = ReactiveComponentExecutionConfig::local_with_remote_disabled(); + config.class_manager = class_manager_remote_config; + config.gateway = gateway_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_sierra_compiler_component_config( + sierra_compiler_local_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.sierra_compiler = sierra_compiler_local_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_state_sync_component_config( + state_sync_local_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.state_sync = state_sync_local_config; + config.class_manager = class_manager_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_consensus_manager_component_config( + batcher_remote_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, + l1_gas_price_provider_remote_config: ReactiveComponentExecutionConfig, + state_sync_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.consensus_manager = ActiveComponentExecutionConfig::enabled(); + config.batcher = batcher_remote_config; + config.class_manager = class_manager_remote_config; + config.l1_gas_price_provider = l1_gas_price_provider_remote_config; + config.state_sync = state_sync_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_http_server_component_config( + gateway_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.http_server = ActiveComponentExecutionConfig::enabled(); + config.gateway = gateway_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_l1_component_config( + l1_gas_price_provider_local_config: ReactiveComponentExecutionConfig, + l1_provider_local_config: ReactiveComponentExecutionConfig, + l1_endpoint_monitor_local_config: ReactiveComponentExecutionConfig, + state_sync_remote_config: ReactiveComponentExecutionConfig, + batcher_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + + config.l1_gas_price_provider = l1_gas_price_provider_local_config; + config.l1_gas_price_scraper = ActiveComponentExecutionConfig::enabled(); + config.l1_provider = l1_provider_local_config; + config.l1_scraper = ActiveComponentExecutionConfig::enabled(); + config.l1_endpoint_monitor = l1_endpoint_monitor_local_config; + config.state_sync = state_sync_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config.batcher = batcher_remote_config; + config +} diff --git a/crates/apollo_deployments/src/deployments/hybrid.rs b/crates/apollo_deployments/src/deployments/hybrid.rs new file mode 100644 index 00000000000..85554ad7c85 --- /dev/null +++ b/crates/apollo_deployments/src/deployments/hybrid.rs @@ -0,0 +1,592 @@ +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr}; + +use apollo_infra_utils::template::Template; +use apollo_node::config::component_config::ComponentConfig; +use apollo_node::config::component_execution_config::{ + ActiveComponentExecutionConfig, + ReactiveComponentExecutionConfig, +}; +use indexmap::IndexMap; +use serde::Serialize; +use strum::{Display, IntoEnumIterator}; +use strum_macros::{AsRefStr, EnumIter}; + +use crate::addresses::{get_p2p_address, get_peer_id}; +use crate::config_override::{ + ConfigOverride, + DeploymentConfigOverride, + InstanceConfigOverride, + NetworkConfigOverride, +}; +use crate::deployment::{build_service_namespace_domain_address, Deployment, P2PCommunicationType}; +use crate::deployment_definitions::{CloudK8sEnvironment, Environment, ServicePort}; +use crate::deployments::IDLE_CONNECTIONS_FOR_AUTOSCALED_SERVICES; +use crate::k8s::{ + get_environment_ingress_internal, + get_ingress, + Controller, + ExternalSecret, + Ingress, + IngressParams, + K8sServiceConfigParams, + Resource, + Resources, + Toleration, +}; +use crate::service::{GetComponentConfigs, NodeService, NodeType, ServiceNameInner}; +use crate::utils::{determine_port_numbers, get_validator_id}; + +pub const HYBRID_NODE_REQUIRED_PORTS_NUM: usize = 8; +pub(crate) const INSTANCE_NAME_FORMAT: Template = Template("hybrid_{}"); + +const BASE_PORT: u16 = 55000; // TODO(Tsabary): arbitrary port, need to resolve. +const CORE_STORAGE: usize = 1000; +const MAX_NODE_ID: usize = 9; // Currently supporting up to 9 nodes, to avoid more complicated string manipulations. + +#[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, Serialize, AsRefStr, EnumIter)] +#[strum(serialize_all = "snake_case")] +pub enum HybridNodeServiceName { + Core, // Comprises the batcher, class manager, consensus manager, and state sync. + HttpServer, + Gateway, + L1, // Comprises the various l1 components. + Mempool, + SierraCompiler, +} + +// Implement conversion from `HybridNodeServiceName` to `NodeService` +impl From for NodeService { + fn from(service: HybridNodeServiceName) -> Self { + NodeService::Hybrid(service) + } +} + +impl GetComponentConfigs for HybridNodeServiceName { + fn get_component_configs(ports: Option>) -> IndexMap { + let mut component_config_map = IndexMap::::new(); + + let ports = determine_port_numbers(ports, HYBRID_NODE_REQUIRED_PORTS_NUM, BASE_PORT); + + let batcher = HybridNodeServiceName::Core.component_config_pair(ports[0]); + let class_manager = HybridNodeServiceName::Core.component_config_pair(ports[1]); + let gateway = HybridNodeServiceName::Gateway.component_config_pair(ports[2]); + let l1_gas_price_provider = HybridNodeServiceName::Core.component_config_pair(ports[3]); + let l1_provider = HybridNodeServiceName::Core.component_config_pair(ports[4]); + let mempool = HybridNodeServiceName::Mempool.component_config_pair(ports[5]); + let sierra_compiler = HybridNodeServiceName::SierraCompiler.component_config_pair(ports[6]); + let state_sync = HybridNodeServiceName::Core.component_config_pair(ports[7]); + + for inner_service_name in HybridNodeServiceName::iter() { + let component_config = match inner_service_name { + HybridNodeServiceName::Core => get_core_component_config( + batcher.local(), + class_manager.local(), + l1_gas_price_provider.remote(), + l1_provider.remote(), + state_sync.local(), + mempool.remote(), + sierra_compiler.remote(), + ), + HybridNodeServiceName::HttpServer => { + get_http_server_component_config(gateway.remote()) + } + HybridNodeServiceName::Gateway => get_gateway_component_config( + gateway.local(), + class_manager.remote(), + mempool.remote(), + state_sync.remote(), + ), + HybridNodeServiceName::L1 => get_l1_component_config( + l1_gas_price_provider.local(), + l1_provider.local(), + batcher.remote(), + state_sync.remote(), + ), + HybridNodeServiceName::Mempool => get_mempool_component_config( + mempool.local(), + class_manager.remote(), + gateway.remote(), + ), + HybridNodeServiceName::SierraCompiler => { + get_sierra_compiler_component_config(sierra_compiler.local()) + } + }; + let node_service = inner_service_name.into(); + component_config_map.insert(node_service, component_config); + } + component_config_map + } +} + +// TODO(Tsabary): per each service, update all values. +impl ServiceNameInner for HybridNodeServiceName { + fn get_controller(&self) -> Controller { + match self { + HybridNodeServiceName::Core => Controller::StatefulSet, + HybridNodeServiceName::HttpServer => Controller::Deployment, + HybridNodeServiceName::Gateway => Controller::Deployment, + HybridNodeServiceName::L1 => Controller::Deployment, + HybridNodeServiceName::Mempool => Controller::Deployment, + HybridNodeServiceName::SierraCompiler => Controller::Deployment, + } + } + + fn get_autoscale(&self) -> bool { + match self { + HybridNodeServiceName::Core => false, + HybridNodeServiceName::HttpServer => false, + HybridNodeServiceName::Gateway => true, + HybridNodeServiceName::L1 => false, + HybridNodeServiceName::Mempool => false, + HybridNodeServiceName::SierraCompiler => true, + } + } + + fn get_toleration(&self, environment: &Environment) -> Option { + match environment { + Environment::CloudK8s(cloud_env) => match cloud_env { + CloudK8sEnvironment::SepoliaIntegration | CloudK8sEnvironment::UpgradeTest => { + match self { + HybridNodeServiceName::Core | HybridNodeServiceName::Mempool => { + Some(Toleration::ApolloCoreService) + } + HybridNodeServiceName::HttpServer + | HybridNodeServiceName::Gateway + | HybridNodeServiceName::L1 + | HybridNodeServiceName::SierraCompiler => { + Some(Toleration::ApolloGeneralService) + } + } + } + CloudK8sEnvironment::Mainnet + | CloudK8sEnvironment::SepoliaTestnet + | CloudK8sEnvironment::StressTest => match self { + HybridNodeServiceName::Core => Some(Toleration::ApolloCoreServiceC2D56), + HybridNodeServiceName::HttpServer + | HybridNodeServiceName::Gateway + | HybridNodeServiceName::L1 + | HybridNodeServiceName::SierraCompiler => { + Some(Toleration::ApolloGeneralService) + } + HybridNodeServiceName::Mempool => Some(Toleration::ApolloCoreService), + }, + CloudK8sEnvironment::Potc2 => match self { + HybridNodeServiceName::Core => Some(Toleration::Batcher864), + HybridNodeServiceName::HttpServer + | HybridNodeServiceName::Gateway + | HybridNodeServiceName::L1 + | HybridNodeServiceName::SierraCompiler => { + Some(Toleration::ApolloGeneralService) + } + HybridNodeServiceName::Mempool => Some(Toleration::ApolloCoreService), + }, + }, + Environment::LocalK8s => None, + } + } + + fn get_ingress( + &self, + environment: &Environment, + ingress_params: IngressParams, + ) -> Option { + match self { + HybridNodeServiceName::Core => None, + HybridNodeServiceName::HttpServer => { + get_ingress(ingress_params, get_environment_ingress_internal(environment)) + } + HybridNodeServiceName::Gateway => None, + HybridNodeServiceName::L1 => None, + HybridNodeServiceName::Mempool => None, + HybridNodeServiceName::SierraCompiler => None, + } + } + + fn has_p2p_interface(&self) -> bool { + match self { + HybridNodeServiceName::Core | HybridNodeServiceName::Mempool => true, + HybridNodeServiceName::HttpServer + | HybridNodeServiceName::Gateway + | HybridNodeServiceName::L1 + | HybridNodeServiceName::SierraCompiler => false, + } + } + + fn get_storage(&self, environment: &Environment) -> Option { + match environment { + Environment::CloudK8s(_) => match self { + HybridNodeServiceName::Core => Some(CORE_STORAGE), + HybridNodeServiceName::HttpServer + | HybridNodeServiceName::Gateway + | HybridNodeServiceName::L1 + | HybridNodeServiceName::Mempool + | HybridNodeServiceName::SierraCompiler => None, + }, + Environment::LocalK8s => None, + } + } + + fn get_resources(&self, environment: &Environment) -> Resources { + match environment { + Environment::CloudK8s(cloud_env) => match cloud_env { + CloudK8sEnvironment::SepoliaIntegration | CloudK8sEnvironment::UpgradeTest => { + match self { + HybridNodeServiceName::Core => { + Resources::new(Resource::new(2, 4), Resource::new(7, 14)) + } + HybridNodeServiceName::HttpServer => { + Resources::new(Resource::new(1, 2), Resource::new(4, 8)) + } + HybridNodeServiceName::Gateway => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + HybridNodeServiceName::L1 => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + HybridNodeServiceName::Mempool => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + HybridNodeServiceName::SierraCompiler => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + } + } + CloudK8sEnvironment::Potc2 + | CloudK8sEnvironment::Mainnet + | CloudK8sEnvironment::SepoliaTestnet + | CloudK8sEnvironment::StressTest => match self { + HybridNodeServiceName::Core => { + Resources::new(Resource::new(50, 200), Resource::new(50, 220)) + } + HybridNodeServiceName::HttpServer => { + Resources::new(Resource::new(1, 2), Resource::new(4, 8)) + } + HybridNodeServiceName::Gateway => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + HybridNodeServiceName::L1 => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + HybridNodeServiceName::Mempool => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + HybridNodeServiceName::SierraCompiler => { + Resources::new(Resource::new(1, 2), Resource::new(2, 4)) + } + }, + }, + Environment::LocalK8s => Resources::new(Resource::new(1, 2), Resource::new(4, 8)), + } + } + + fn get_replicas(&self, environment: &Environment) -> usize { + match environment { + Environment::CloudK8s(_) => match self { + HybridNodeServiceName::Core => 1, + HybridNodeServiceName::HttpServer => 1, + HybridNodeServiceName::Gateway => 2, + HybridNodeServiceName::L1 => 1, + HybridNodeServiceName::Mempool => 1, + HybridNodeServiceName::SierraCompiler => 2, + }, + Environment::LocalK8s => 1, + } + } + + fn get_anti_affinity(&self, environment: &Environment) -> bool { + match environment { + Environment::CloudK8s(_) => match self { + HybridNodeServiceName::Core => true, + HybridNodeServiceName::HttpServer => false, + HybridNodeServiceName::Gateway => false, + HybridNodeServiceName::L1 => false, + HybridNodeServiceName::Mempool => true, + HybridNodeServiceName::SierraCompiler => false, + }, + Environment::LocalK8s => false, + } + } + + // TODO(Nadin): Implement this method to return the actual ports used by the service. + fn get_ports(&self) -> BTreeMap { + BTreeMap::new() + } +} + +impl HybridNodeServiceName { + /// Returns a component execution config for a component that runs locally, and accepts inbound + /// connections from remote components. + fn component_config_for_local_service(&self, port: u16) -> ReactiveComponentExecutionConfig { + ReactiveComponentExecutionConfig::local_with_remote_enabled( + self.k8s_service_name(), + IpAddr::from(Ipv4Addr::UNSPECIFIED), + port, + ) + } + + /// Returns a component execution config for a component that is accessed remotely. + fn component_config_for_remote_service(&self, port: u16) -> ReactiveComponentExecutionConfig { + let mut base = ReactiveComponentExecutionConfig::remote( + self.k8s_service_name(), + IpAddr::from(Ipv4Addr::UNSPECIFIED), + port, + ); + match self { + HybridNodeServiceName::Gateway | HybridNodeServiceName::SierraCompiler => { + base.remote_client_config.idle_connections = + IDLE_CONNECTIONS_FOR_AUTOSCALED_SERVICES; + } + HybridNodeServiceName::Core + | HybridNodeServiceName::HttpServer + | HybridNodeServiceName::L1 + | HybridNodeServiceName::Mempool => {} + }; + base + } + + fn component_config_pair(&self, port: u16) -> HybridNodeServiceConfigPair { + HybridNodeServiceConfigPair { + local: self.component_config_for_local_service(port), + remote: self.component_config_for_remote_service(port), + } + } +} + +/// Component config bundling for services of a hybrid node: a config to run a component +/// locally while being accessible to other services, and a suitable config enabling such services +/// the access. +struct HybridNodeServiceConfigPair { + local: ReactiveComponentExecutionConfig, + remote: ReactiveComponentExecutionConfig, +} + +impl HybridNodeServiceConfigPair { + fn local(&self) -> ReactiveComponentExecutionConfig { + self.local.clone() + } + + fn remote(&self) -> ReactiveComponentExecutionConfig { + self.remote.clone() + } +} + +#[allow(clippy::too_many_arguments)] +fn get_core_component_config( + batcher_local_config: ReactiveComponentExecutionConfig, + class_manager_local_config: ReactiveComponentExecutionConfig, + l1_gas_price_provider_remote_config: ReactiveComponentExecutionConfig, + l1_provider_remote_config: ReactiveComponentExecutionConfig, + state_sync_local_config: ReactiveComponentExecutionConfig, + mempool_remote_config: ReactiveComponentExecutionConfig, + sierra_compiler_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.batcher = batcher_local_config; + config.class_manager = class_manager_local_config; + config.consensus_manager = ActiveComponentExecutionConfig::enabled(); + config.l1_gas_price_provider = l1_gas_price_provider_remote_config; + config.l1_provider = l1_provider_remote_config; + config.sierra_compiler = sierra_compiler_remote_config; + config.state_sync = state_sync_local_config; + config.mempool = mempool_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_gateway_component_config( + gateway_local_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, + mempool_remote_config: ReactiveComponentExecutionConfig, + state_sync_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.gateway = gateway_local_config; + config.class_manager = class_manager_remote_config; + config.mempool = mempool_remote_config; + config.state_sync = state_sync_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_l1_component_config( + l1_gas_price_provider_local_config: ReactiveComponentExecutionConfig, + l1_provider_local_config: ReactiveComponentExecutionConfig, + batcher_remote_config: ReactiveComponentExecutionConfig, + state_sync_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.batcher = batcher_remote_config; + config.l1_gas_price_provider = l1_gas_price_provider_local_config; + config.l1_gas_price_scraper = ActiveComponentExecutionConfig::enabled(); + config.l1_provider = l1_provider_local_config; + config.l1_scraper = ActiveComponentExecutionConfig::enabled(); + config.l1_endpoint_monitor = ReactiveComponentExecutionConfig::local_with_remote_disabled(); + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config.state_sync = state_sync_remote_config; + config +} + +fn get_mempool_component_config( + mempool_local_config: ReactiveComponentExecutionConfig, + class_manager_remote_config: ReactiveComponentExecutionConfig, + gateway_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.mempool = mempool_local_config; + config.mempool_p2p = ReactiveComponentExecutionConfig::local_with_remote_disabled(); + config.class_manager = class_manager_remote_config; + config.gateway = gateway_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_sierra_compiler_component_config( + sierra_compiler_local_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.sierra_compiler = sierra_compiler_local_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +fn get_http_server_component_config( + gateway_remote_config: ReactiveComponentExecutionConfig, +) -> ComponentConfig { + let mut config = ComponentConfig::disabled(); + config.http_server = ActiveComponentExecutionConfig::enabled(); + config.gateway = gateway_remote_config; + config.monitoring_endpoint = ActiveComponentExecutionConfig::enabled(); + config +} + +// TODO(Tsaabry): unify these into inner structs. +#[allow(clippy::too_many_arguments)] +pub(crate) fn hybrid_deployment( + id: usize, + p2p_communication_type: P2PCommunicationType, + environment: Environment, + instance_name_format: &Template, + secret_name_format: &Template, + deployment_config_override: DeploymentConfigOverride, + node_namespace_format: &Template, + ingress_domain: &str, + http_server_ingress_alternative_name: &str, + k8s_service_config_params: Option, +) -> Deployment { + Deployment::new( + NodeType::Hybrid, + environment, + &instance_name_format.format(&[&id]), + Some(ExternalSecret::new(secret_name_format.format(&[&id]))), + ConfigOverride::new( + deployment_config_override, + create_hybrid_instance_config_override( + id, + node_namespace_format, + p2p_communication_type, + ingress_domain, + ), + ), + IngressParams::new( + ingress_domain.to_string(), + Some(vec![http_server_ingress_alternative_name.into()]), + ), + k8s_service_config_params, + ) +} + +pub(crate) fn create_hybrid_instance_config_override( + node_id: usize, + node_namespace_format: &Template, + p2p_communication_type: P2PCommunicationType, + domain: &str, +) -> InstanceConfigOverride { + assert!( + node_id < MAX_NODE_ID, + "Node node_id {} exceeds the number of nodes {}", + node_id, + MAX_NODE_ID + ); + + // TODO(Tsabary): these ports should be derived from the hybrid deployment module, and used + // consistently throughout the code. + const CORE_SERVICE_PORT: u16 = 53080; + const MEMPOOL_SERVICE_PORT: u16 = 53200; + + let bootstrap_node_id = 0; + let bootstrap_peer_id = get_peer_id(bootstrap_node_id); + let node_peer_id = get_peer_id(node_id); + + let sanitized_domain = p2p_communication_type.get_p2p_domain(domain); + + let build_peer_address = + |node_service: HybridNodeServiceName, port: u16, node_id: usize, peer_id: &str| { + let domain = build_service_namespace_domain_address( + &node_service.k8s_service_name(), + &node_namespace_format.format(&[&node_id]), + &sanitized_domain, + ); + Some(get_p2p_address(&domain, port, peer_id)) + }; + + let (consensus_bootstrap_peer_multiaddr, mempool_bootstrap_peer_multiaddr) = match node_id { + 0 => { + // First node does not have a bootstrap peer. + (None, None) + } + _ => { + // Other nodes have the first node as a bootstrap peer. + ( + build_peer_address( + HybridNodeServiceName::Core, + CORE_SERVICE_PORT, + bootstrap_node_id, + &bootstrap_peer_id, + ), + build_peer_address( + HybridNodeServiceName::Mempool, + MEMPOOL_SERVICE_PORT, + bootstrap_node_id, + &bootstrap_peer_id, + ), + ) + } + }; + + let (consensus_advertised_multiaddr, mempool_advertised_multiaddr) = + match p2p_communication_type { + P2PCommunicationType::Internal => + // No advertised addresses for internal communication. + { + (None, None) + } + P2PCommunicationType::External => + // Advertised addresses for external communication. + { + ( + build_peer_address( + HybridNodeServiceName::Core, + CORE_SERVICE_PORT, + node_id, + &node_peer_id, + ), + build_peer_address( + HybridNodeServiceName::Mempool, + MEMPOOL_SERVICE_PORT, + node_id, + &node_peer_id, + ), + ) + } + }; + + InstanceConfigOverride::new( + NetworkConfigOverride::new( + consensus_bootstrap_peer_multiaddr, + consensus_advertised_multiaddr, + ), + NetworkConfigOverride::new(mempool_bootstrap_peer_multiaddr, mempool_advertised_multiaddr), + get_validator_id(node_id), + ) +} diff --git a/crates/apollo_deployments/src/k8s.rs b/crates/apollo_deployments/src/k8s.rs new file mode 100644 index 00000000000..4bb61ceff43 --- /dev/null +++ b/crates/apollo_deployments/src/k8s.rs @@ -0,0 +1,183 @@ +use serde::{Serialize, Serializer}; + +use crate::deployment::P2PCommunicationType; +use crate::deployment_definitions::Environment; + +// Controls whether external P2P communication is enabled. +const INTERNAL_ONLY_P2P_COMMUNICATION: bool = true; + +const INGRESS_ROUTE: &str = "/gateway"; +const INGRESS_PORT: u16 = 8080; + +#[derive(Clone, Copy, Debug, PartialEq, Serialize)] +pub enum Controller { + Deployment, + StatefulSet, +} + +#[derive(Clone, Copy, Debug, PartialEq, Serialize)] +pub enum K8SServiceType { + // TODO(Tsabary): remove dead_code annotations when instances require these variants. + #[allow(dead_code)] + ClusterIp, + LoadBalancer, + #[allow(dead_code)] + NodePort, +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct K8sServiceConfig { + #[serde(rename = "type")] + k8s_service_type: K8SServiceType, + external_dns_name: Option, + internal: bool, +} + +impl K8sServiceConfig { + pub fn new( + external_dns_name: Option, + p2p_communication_type: P2PCommunicationType, + ) -> Self { + Self { + k8s_service_type: p2p_communication_type.get_k8s_service_type(), + external_dns_name, + internal: INTERNAL_ONLY_P2P_COMMUNICATION, + } + } +} + +#[derive(Clone)] +pub struct K8sServiceConfigParams { + pub namespace: String, + pub domain: String, + pub p2p_communication_type: P2PCommunicationType, +} + +impl K8sServiceConfigParams { + pub fn new( + namespace: String, + domain: String, + p2p_communication_type: P2PCommunicationType, + ) -> Self { + Self { namespace, domain, p2p_communication_type } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct Ingress { + #[serde(flatten)] + ingress_params: IngressParams, + internal: bool, + rules: Vec, +} + +impl Ingress { + pub fn new(ingress_params: IngressParams, internal: bool, rules: Vec) -> Self { + Self { ingress_params, internal, rules } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct IngressParams { + domain: String, + #[serde(serialize_with = "serialize_none_as_empty_vec")] + alternative_names: Option>, +} + +fn serialize_none_as_empty_vec( + value: &Option>, + serializer: S, +) -> Result +where + S: Serializer, + T: Serialize, +{ + match value { + Some(v) => serializer.serialize_some(v), + None => serializer.serialize_some(&Vec::::new()), + } +} + +impl IngressParams { + pub fn new(domain: String, alternative_names: Option>) -> Self { + Self { domain, alternative_names } + } +} + +pub(crate) fn get_ingress(ingress_params: IngressParams, internal: bool) -> Option { + Some(Ingress::new( + ingress_params, + internal, + vec![IngressRule::new(String::from(INGRESS_ROUTE), INGRESS_PORT, None)], + )) +} + +pub(crate) fn get_environment_ingress_internal(environment: &Environment) -> bool { + match environment { + Environment::CloudK8s(_) => false, + Environment::LocalK8s => true, + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct IngressRule { + path: String, + port: u16, + backend: Option, +} + +impl IngressRule { + pub fn new(path: String, port: u16, backend: Option) -> Self { + Self { path, port, backend } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct ExternalSecret { + gcsm_key: String, +} + +impl ExternalSecret { + pub fn new(gcsm_key: impl ToString) -> Self { + Self { gcsm_key: gcsm_key.to_string() } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct Resource { + cpu: usize, + memory: usize, +} + +impl Resource { + pub fn new(cpu: usize, memory: usize) -> Self { + Self { cpu, memory } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct Resources { + requests: Resource, + limits: Resource, +} + +impl Resources { + pub fn new(requests: Resource, limits: Resource) -> Self { + Self { requests, limits } + } +} + +#[derive(Serialize, Debug, Clone, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub enum Toleration { + ApolloCoreService, + #[serde(rename = "apollo-core-service-c2d-16")] + ApolloCoreServiceC2D16, + #[serde(rename = "apollo-core-service-c2d-32")] + ApolloCoreServiceC2D32, + #[serde(rename = "apollo-core-service-c2d-56")] + ApolloCoreServiceC2D56, + ApolloGeneralService, + #[serde(rename = "batcher-8-64")] + Batcher864, +} diff --git a/crates/apollo_deployments/src/lib.rs b/crates/apollo_deployments/src/lib.rs new file mode 100644 index 00000000000..2069a5ce50f --- /dev/null +++ b/crates/apollo_deployments/src/lib.rs @@ -0,0 +1,11 @@ +// TODO(Tsabary): reduce visibility when possible. +pub(crate) mod addresses; +pub(crate) mod config_override; +pub(crate) mod deployment; +pub mod deployment_definitions; +pub mod deployments; +pub(crate) mod k8s; +pub mod service; +#[cfg(test)] +pub mod test_utils; +pub(crate) mod utils; diff --git a/crates/apollo_deployments/src/service.rs b/crates/apollo_deployments/src/service.rs new file mode 100644 index 00000000000..8d76e365639 --- /dev/null +++ b/crates/apollo_deployments/src/service.rs @@ -0,0 +1,387 @@ +use std::collections::BTreeMap; +use std::fmt::Display; +use std::iter::once; +use std::path::PathBuf; + +use apollo_config::dumping::{prepend_sub_config_name, SerializeConfig}; +use apollo_config::{ParamPath, SerializedParam}; +use apollo_infra_utils::dumping::serialize_to_file; +#[cfg(test)] +use apollo_infra_utils::dumping::serialize_to_file_test; +use apollo_node::config::component_config::ComponentConfig; +use apollo_node::config::config_utils::config_to_preset; +use indexmap::IndexMap; +use serde::ser::SerializeSeq; +use serde::{Serialize, Serializer}; +use serde_json::json; +use strum::{Display, EnumVariantNames, IntoEnumIterator}; +use strum_macros::{EnumDiscriminants, EnumIter, IntoStaticStr}; + +use crate::deployment::build_service_namespace_domain_address; +use crate::deployment_definitions::{Environment, ServicePort, CONFIG_BASE_DIR}; +use crate::deployments::consolidated::ConsolidatedNodeServiceName; +use crate::deployments::distributed::DistributedNodeServiceName; +use crate::deployments::hybrid::HybridNodeServiceName; +use crate::k8s::{ + Controller, + ExternalSecret, + Ingress, + IngressParams, + K8sServiceConfig, + K8sServiceConfigParams, + Resources, + Toleration, +}; +#[cfg(test)] +use crate::test_utils::FIX_BINARY_NAME; + +const SERVICES_DIR_NAME: &str = "services/"; + +#[derive(Clone, Debug, PartialEq, Serialize)] +pub struct Service { + #[serde(rename = "name")] + node_service: NodeService, + controller: Controller, + #[serde(serialize_with = "serialize_vec_strip_prefix")] + config_paths: Vec, + ingress: Option, + k8s_service_config: Option, + autoscale: bool, + replicas: usize, + storage: Option, + toleration: Option, + resources: Resources, + external_secret: Option, + anti_affinity: bool, + ports: BTreeMap, +} + +impl Service { + pub fn new( + node_service: NodeService, + external_secret: Option, + config_filenames: Vec, + ingress_params: IngressParams, + k8s_service_config_params: Option, + environment: Environment, + ) -> Self { + // Configs are loaded by order such that a config may override previous ones. + // We first list the base config, and then follow with the overrides, and finally, the + // service config file. + + // TODO(Tsabary): reduce visibility of relevant functions and consts. + + let service_file_path = node_service.get_service_file_path(); + + let config_paths = + config_filenames.iter().cloned().chain(once(service_file_path)).collect(); + + let controller = node_service.get_controller(); + let autoscale = node_service.get_autoscale(); + let toleration = node_service.get_toleration(&environment); + let ingress = node_service.get_ingress(&environment, ingress_params); + let k8s_service_config = node_service.get_k8s_service_config(k8s_service_config_params); + let storage = node_service.get_storage(&environment); + let resources = node_service.get_resources(&environment); + let replicas = node_service.get_replicas(&environment); + let anti_affinity = node_service.get_anti_affinity(&environment); + let ports = node_service.get_ports(); + Self { + node_service, + config_paths, + controller, + ingress, + k8s_service_config, + autoscale, + replicas, + storage, + toleration, + resources, + external_secret, + anti_affinity, + ports, + } + } + + pub fn get_service_config_paths(&self) -> Vec { + self.config_paths.clone() + } +} + +fn serialize_vec_strip_prefix(vec: &Vec, serializer: S) -> Result +where + S: Serializer, +{ + let mut seq = serializer.serialize_seq(Some(vec.len()))?; + + for s in vec { + if let Some(stripped) = s.strip_prefix(CONFIG_BASE_DIR) { + seq.serialize_element(stripped)?; + } else { + return Err(serde::ser::Error::custom(format!( + "Expected all items to start with '{}', got '{}'", + CONFIG_BASE_DIR, s + ))); + } + } + + seq.end() +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, EnumDiscriminants)] +#[strum_discriminants( + name(NodeType), + derive(IntoStaticStr, EnumIter, EnumVariantNames, Serialize, Display), + strum(serialize_all = "snake_case") +)] +pub enum NodeService { + Consolidated(ConsolidatedNodeServiceName), + Hybrid(HybridNodeServiceName), + Distributed(DistributedNodeServiceName), +} + +impl NodeService { + fn get_config_file_path(&self) -> String { + let mut name = self.as_inner().to_string(); + name.push_str(".json"); + name + } + + pub fn create_service( + &self, + environment: &Environment, + external_secret: &Option, + config_filenames: Vec, + ingress_params: IngressParams, + k8s_service_config_params: Option, + ) -> Service { + Service::new( + Into::::into(*self), + external_secret.clone(), + config_filenames, + ingress_params.clone(), + k8s_service_config_params, + environment.clone(), + ) + } + + fn as_inner(&self) -> &dyn ServiceNameInner { + match self { + NodeService::Consolidated(inner) => inner, + NodeService::Hybrid(inner) => inner, + NodeService::Distributed(inner) => inner, + } + } + + pub fn get_controller(&self) -> Controller { + self.as_inner().get_controller() + } + + pub fn get_autoscale(&self) -> bool { + self.as_inner().get_autoscale() + } + + pub fn get_toleration(&self, environment: &Environment) -> Option { + self.as_inner().get_toleration(environment) + } + + pub fn get_ingress( + &self, + environment: &Environment, + ingress_params: IngressParams, + ) -> Option { + self.as_inner().get_ingress(environment, ingress_params) + } + + pub fn get_k8s_service_config( + &self, + k8s_service_config_params: Option, + ) -> Option { + self.as_inner().get_k8s_service_config(k8s_service_config_params) + } + + pub fn get_storage(&self, environment: &Environment) -> Option { + self.as_inner().get_storage(environment) + } + + pub fn get_resources(&self, environment: &Environment) -> Resources { + self.as_inner().get_resources(environment) + } + + pub fn get_replicas(&self, environment: &Environment) -> usize { + self.as_inner().get_replicas(environment) + } + + pub fn get_anti_affinity(&self, environment: &Environment) -> bool { + // TODO(Tsabary): implement anti-affinity logic. + self.as_inner().get_anti_affinity(environment) + } + + // Kubernetes service name as defined by CDK8s. + pub fn k8s_service_name(&self) -> String { + self.as_inner().k8s_service_name() + } + + pub fn get_service_file_path(&self) -> String { + PathBuf::from(CONFIG_BASE_DIR) + .join(SERVICES_DIR_NAME) + .join(NodeType::from(self).get_folder_name()) + .join(self.get_config_file_path()) + .to_string_lossy() + .to_string() + } + + pub fn get_ports(&self) -> BTreeMap { + self.as_inner().get_ports() + } +} + +pub(crate) trait ServiceNameInner: Display { + fn get_controller(&self) -> Controller; + + fn get_autoscale(&self) -> bool; + + fn get_toleration(&self, environment: &Environment) -> Option; + + fn get_ingress( + &self, + environment: &Environment, + ingress_params: IngressParams, + ) -> Option; + + fn get_k8s_service_config( + &self, + k8s_service_config_params: Option, + ) -> Option { + if self.has_p2p_interface() { + if let Some(K8sServiceConfigParams { namespace, domain, p2p_communication_type }) = + k8s_service_config_params + { + let service_namespace_domain = build_service_namespace_domain_address( + &self.k8s_service_name(), + &namespace, + &domain, + ); + return Some(K8sServiceConfig::new( + Some(service_namespace_domain), + p2p_communication_type, + )); + } + } + None + } + + fn has_p2p_interface(&self) -> bool; + + fn get_storage(&self, environment: &Environment) -> Option; + + fn get_resources(&self, environment: &Environment) -> Resources; + + fn get_replicas(&self, environment: &Environment) -> usize; + + fn get_anti_affinity(&self, environment: &Environment) -> bool; + + fn get_ports(&self) -> BTreeMap; + + // Kubernetes service name as defined by CDK8s. + fn k8s_service_name(&self) -> String { + let formatted_service_name = self.to_string().replace('_', ""); + format!("sequencer-{}-service", formatted_service_name) + } +} + +impl NodeType { + fn get_folder_name(&self) -> String { + self.to_string() + } + + pub fn all_service_names(&self) -> Vec { + match self { + // TODO(Tsabary): find a way to avoid this code duplication. + Self::Consolidated => { + ConsolidatedNodeServiceName::iter().map(NodeService::Consolidated).collect() + } + Self::Hybrid => HybridNodeServiceName::iter().map(NodeService::Hybrid).collect(), + Self::Distributed => { + DistributedNodeServiceName::iter().map(NodeService::Distributed).collect() + } + } + } + + pub fn get_component_configs( + &self, + ports: Option>, + ) -> IndexMap { + match self { + // TODO(Tsabary): avoid this code duplication. + Self::Consolidated => ConsolidatedNodeServiceName::get_component_configs(ports), + Self::Hybrid => HybridNodeServiceName::get_component_configs(ports), + Self::Distributed => DistributedNodeServiceName::get_component_configs(ports), + } + } + + fn dump_component_configs_with(&self, ports: Option>, writer: SerdeFn) + where + SerdeFn: Fn(&serde_json::Value, &str), + { + let component_configs = self.get_component_configs(ports); + for (node_service, config) in component_configs { + let wrapper = ComponentConfigsSerializationWrapper::from(config); + let flattened = config_to_preset(&json!(wrapper.dump())); + let file_path = node_service.get_service_file_path(); + writer(&flattened, &file_path); + } + } + + pub fn dump_service_component_configs(&self, ports: Option>) { + self.dump_component_configs_with(ports, |map, path| { + serialize_to_file(map, path); + }); + } + + #[cfg(test)] + pub fn test_dump_service_component_configs(&self, ports: Option>) { + self.dump_component_configs_with(ports, |map, path| { + serialize_to_file_test(map, path, FIX_BINARY_NAME); + }); + } +} + +pub trait GetComponentConfigs { + // TODO(Tsabary): replace IndexMap with regular HashMap. Currently using IndexMap as the + // integration test relies on indices rather than service names. + fn get_component_configs(ports: Option>) -> IndexMap; +} + +impl Serialize for NodeService { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Serialize only the inner value. + match self { + NodeService::Consolidated(inner) => inner.serialize(serializer), + NodeService::Hybrid(inner) => inner.serialize(serializer), + NodeService::Distributed(inner) => inner.serialize(serializer), + } + } +} + +// A helper struct for serializing the components config in the same hierarchy as of its +// serialization as part of the entire config, i.e., by prepending "components.". +#[derive(Clone, Debug, Default, Serialize)] +struct ComponentConfigsSerializationWrapper { + components: ComponentConfig, +} + +impl From for ComponentConfigsSerializationWrapper { + fn from(value: ComponentConfig) -> Self { + ComponentConfigsSerializationWrapper { components: value } + } +} + +impl SerializeConfig for ComponentConfigsSerializationWrapper { + fn dump(&self) -> BTreeMap { + prepend_sub_config_name(self.components.dump(), "components") + } +} diff --git a/crates/apollo_deployments/src/test_utils.rs b/crates/apollo_deployments/src/test_utils.rs new file mode 100644 index 00000000000..10a162e97e1 --- /dev/null +++ b/crates/apollo_deployments/src/test_utils.rs @@ -0,0 +1,116 @@ +use apollo_config::converters::{ + serialize_optional_list_with_url_and_headers, + serialize_optional_vec_u8, + serialize_slice, + UrlAndHeaders, +}; +use serde::{Serialize, Serializer}; +use url::Url; + +pub(crate) const FIX_BINARY_NAME: &str = "deployment_generator"; + +#[derive(Serialize)] +pub struct SecretsConfigOverride { + #[serde(rename = "base_layer_config.node_url")] + base_layer_config_node_url: Url, + #[serde( + rename = "consensus_manager_config.eth_to_strk_oracle_config.url_header_list", + serialize_with = "serialize_optional_list_with_url_and_headers_wrapper" + )] + consensus_manager_config_eth_to_strk_oracle_config_url_header_list: Option>, + #[serde( + rename = "consensus_manager_config.network_config.secret_key", + serialize_with = "serialize_optional_vec_u8_wrapper" + )] + consensus_manager_config_network_config_secret_key: Option>, + #[serde( + rename = "l1_endpoint_monitor_config.ordered_l1_endpoint_urls", + serialize_with = "serialize_slice_wrapper" + )] + l1_endpoint_monitor_config_ordered_l1_endpoint_urls: Vec, + #[serde( + rename = "mempool_p2p_config.network_config.secret_key", + serialize_with = "serialize_optional_vec_u8_wrapper" + )] + mempool_p2p_config_network_config_secret_key: Option>, + recorder_url: Url, + #[serde( + rename = "state_sync_config.central_sync_client_config.central_source_config.http_headers" + )] + state_sync_config_central_sync_client_config_central_source_config_http_headers: String, + #[serde( + rename = "state_sync_config.network_config.secret_key", + serialize_with = "serialize_optional_vec_u8_wrapper" + )] + state_sync_config_network_config_secret_key: Option>, +} + +impl Default for SecretsConfigOverride { + fn default() -> Self { + Self { + base_layer_config_node_url: Url::parse("https://arbitrary.url.com").unwrap(), + consensus_manager_config_eth_to_strk_oracle_config_url_header_list: Some(vec![ + UrlAndHeaders { + url: Url::parse("https://arbitrary.eth_to_strk_oracle.url").unwrap(), + headers: Default::default(), + }, + ]), + consensus_manager_config_network_config_secret_key: None, + l1_endpoint_monitor_config_ordered_l1_endpoint_urls: vec![ + Url::parse("https://arbitrary.ordered_l1_endpoint_1.url").unwrap(), + Url::parse("https://arbitrary.ordered_l1_endpoint_2.url").unwrap(), + ], + mempool_p2p_config_network_config_secret_key: None, + recorder_url: Url::parse("https://arbitrary.recorder.url").unwrap(), + state_sync_config_central_sync_client_config_central_source_config_http_headers: "" + .to_string(), + state_sync_config_network_config_secret_key: None, + } + } +} + +// Wrapper function for the custom `serialize_optional_list_with_url_and_headers` function, to be +// compatible with serde's `serialize_with` attribute. It first applies the custom serialization +// logic to convert the optional list of `UrlAndHeaders` into a `String`, and then serializes that +// string. +fn serialize_optional_list_with_url_and_headers_wrapper( + list: &Option>, + serializer: S, +) -> Result +where + S: Serializer, +{ + // Call the implemented custom serialization function + let s = serialize_optional_list_with_url_and_headers(list); + // Serialize the returned String + serializer.serialize_str(&s) +} + +// Wrapper function for the custom `serialize_slice` function, to be compatible with serde's +// `serialize_with` attribute. It first applies the custom serialization logic to convert the slice +// of `Url` into a `String`, and then serializes that string. +fn serialize_slice_wrapper(urls: &[Url], serializer: S) -> Result +where + S: Serializer, +{ + // Call the implemented custom serialization function + let s = serialize_slice(urls); + // Serialize the returned String + serializer.serialize_str(&s) +} + +// Wrapper function for the custom `serialize_optional_vec_u8` function, to be compatible with +// serde's `serialize_with` attribute. It first applies the custom serialization logic to convert +// the optional u8 vector into a `String`, and then serializes that string. +pub fn serialize_optional_vec_u8_wrapper( + value: &Option>, + serializer: S, +) -> Result +where + S: Serializer, +{ + // Call the implemented custom serialization function + let s = serialize_optional_vec_u8(value); + // Serialize the returned String + serializer.serialize_str(&s) +} diff --git a/crates/apollo_deployments/src/utils.rs b/crates/apollo_deployments/src/utils.rs new file mode 100644 index 00000000000..989f225455a --- /dev/null +++ b/crates/apollo_deployments/src/utils.rs @@ -0,0 +1,33 @@ +use std::collections::HashSet; + +use apollo_protobuf::consensus::DEFAULT_VALIDATOR_ID; + +pub(crate) fn get_validator_id(id: usize) -> String { + format!("0x{:x}", id + usize::try_from(DEFAULT_VALIDATOR_ID).unwrap()) +} + +/// Returns a validated or generated vector of port numbers of length `n`. +/// If `ports` is `Some`, asserts it has length `n` and all unique values. +/// If `None`, generates a sequence of `n` values starting from `start`. +pub(crate) fn determine_port_numbers( + ports: Option>, + required_ports_num: usize, + base_port_for_generation: u16, +) -> Vec { + match ports { + Some(v) => { + assert!( + v.len() == required_ports_num, + "Expected vector of length {}, got {}", + required_ports_num, + v.len() + ); + + let unique: HashSet<_> = v.iter().cloned().collect(); + assert!(unique.len() == v.len(), "Vector contains duplicate values: {:?}", v); + + v + } + None => (base_port_for_generation..).take(required_ports_num).collect(), + } +} diff --git a/crates/apollo_gateway/Cargo.toml b/crates/apollo_gateway/Cargo.toml new file mode 100644 index 00000000000..af4873b807c --- /dev/null +++ b/crates/apollo_gateway/Cargo.toml @@ -0,0 +1,75 @@ +[package] +edition.workspace = true +license.workspace = true +name = "apollo_gateway" +repository.workspace = true +version.workspace = true + +[lints] +workspace = true + +[features] +testing = ["blockifier/testing", "blockifier_test_utils"] + +[dependencies] +apollo_class_manager_types.workspace = true +apollo_compilation_utils.workspace = true +apollo_config.workspace = true +apollo_gateway_types.workspace = true +apollo_infra.workspace = true +apollo_mempool_types.workspace = true +apollo_metrics.workspace = true +apollo_network_types.workspace = true +apollo_proc_macros.workspace = true +apollo_rpc.workspace = true +apollo_state_sync_types.workspace = true +async-trait.workspace = true +axum.workspace = true +blockifier.workspace = true +blockifier_test_utils = { workspace = true, optional = true } +cairo-lang-starknet-classes.workspace = true +clap.workspace = true +futures.workspace = true +lazy_static.workspace = true +mempool_test_utils.workspace = true +num-rational.workspace = true +reqwest.workspace = true +serde.workspace = true +serde_json.workspace = true +starknet-types-core.workspace = true +starknet_api.workspace = true +strum.workspace = true +strum_macros.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tokio.workspace = true +tracing.workspace = true +validator.workspace = true + +[dev-dependencies] +apollo_class_manager_types = { workspace = true, features = ["testing"] } +apollo_mempool.workspace = true +apollo_mempool_types = { workspace = true, features = ["testing"] } +apollo_metrics = { workspace = true, features = ["testing"] } +apollo_network_types = { workspace = true, features = ["testing"] } +apollo_state_sync_types = { workspace = true, features = ["testing"] } +apollo_test_utils.workspace = true +assert_matches.workspace = true +blockifier = { workspace = true, features = ["testing"] } +blockifier_test_utils.workspace = true +cairo-lang-sierra-to-casm.workspace = true +criterion = { workspace = true, features = ["async_tokio"] } +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +mockall.workspace = true +mockito.workspace = true +num-bigint.workspace = true +pretty_assertions.workspace = true +rstest.workspace = true +tracing-test.workspace = true + +[[bench]] +harness = false +name = "apollo_gateway" +path = "benches/main.rs" +required-features = ["testing"] diff --git a/crates/apollo_gateway/benches/main.rs b/crates/apollo_gateway/benches/main.rs new file mode 100644 index 00000000000..85b86d580ac --- /dev/null +++ b/crates/apollo_gateway/benches/main.rs @@ -0,0 +1,35 @@ +//! Benchmark module for the starknet gateway crate. It provides functionalities to benchmark +//! the performance of the gateway service, including declare, deploy account and invoke +//! transactions. +//! +//! There are four benchmark functions in this flow: `declare_benchmark`, +//! `deploy_account_benchmark`, `invoke_benchmark` and `gateway_benchmark` which combines all of the +//! types. Each of the functions measure the performance of the gateway handling randomly created +//! txs of the respective type. +//! +//! Run the benchmarks using `cargo bench --bench apollo_gateway`. + +// import the Gateway test utilities. +mod utils; + +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use utils::{BenchTestSetup, BenchTestSetupConfig}; + +fn invoke_benchmark(criterion: &mut Criterion) { + let tx_generator_config = BenchTestSetupConfig::default(); + let n_txs = tx_generator_config.n_txs; + + let test_setup = BenchTestSetup::new(tx_generator_config); + criterion.bench_with_input( + BenchmarkId::new("invoke", n_txs), + &test_setup, + |bencher, test_setup| { + bencher + .to_async(tokio::runtime::Runtime::new().unwrap()) + .iter(|| test_setup.send_txs_to_gateway()); + }, + ); +} + +criterion_group!(benches, invoke_benchmark); +criterion_main!(benches); diff --git a/crates/apollo_gateway/benches/utils.rs b/crates/apollo_gateway/benches/utils.rs new file mode 100644 index 00000000000..a566534fb79 --- /dev/null +++ b/crates/apollo_gateway/benches/utils.rs @@ -0,0 +1,112 @@ +use std::sync::Arc; + +use apollo_class_manager_types::transaction_converter::TransactionConverter; +use apollo_class_manager_types::EmptyClassManagerClient; +use apollo_gateway::config::GatewayConfig; +use apollo_gateway::gateway::Gateway; +use apollo_gateway::state_reader_test_utils::local_test_state_reader_factory; +use apollo_mempool_types::communication::MockMempoolClient; +use blockifier::context::ChainInfo; +use blockifier_test_utils::cairo_versions::{CairoVersion, RunnableCairo1}; +use blockifier_test_utils::calldata::create_trivial_calldata; +use blockifier_test_utils::contracts::FeatureContract; +use mempool_test_utils::starknet_api_test_utils::test_valid_resource_bounds; +use starknet_api::core::ContractAddress; +use starknet_api::invoke_tx_args; +use starknet_api::rpc_transaction::RpcTransaction; +use starknet_api::test_utils::invoke::rpc_invoke_tx; +use starknet_api::test_utils::NonceManager; + +const N_TXS: usize = 100; + +// TODO(Arni): Use `AccountTransactionGenerator` from `starknet_api_test_utils`. +struct TransactionGenerator { + nonce_manager: NonceManager, + sender_address: ContractAddress, + test_contract_address: ContractAddress, +} + +impl TransactionGenerator { + fn new(cairo_version: CairoVersion) -> Self { + let account_contract = FeatureContract::AccountWithoutValidations(cairo_version); + let test_contract = FeatureContract::TestContract(cairo_version); + let sender_address = account_contract.get_instance_address(0); + let test_contract_address = test_contract.get_instance_address(0); + Self { nonce_manager: NonceManager::default(), sender_address, test_contract_address } + } + + fn generate_invoke(&mut self) -> RpcTransaction { + let invoke_args = invoke_tx_args!( + nonce: self.nonce_manager.next(self.sender_address), + sender_address: self.sender_address, + resource_bounds: test_valid_resource_bounds(), + calldata: create_trivial_calldata(self.test_contract_address), + ); + rpc_invoke_tx(invoke_args) + } +} + +pub struct BenchTestSetupConfig { + pub n_txs: usize, + pub gateway_config: GatewayConfig, +} + +impl Default for BenchTestSetupConfig { + fn default() -> Self { + Self { + n_txs: N_TXS, + gateway_config: GatewayConfig { + chain_info: ChainInfo::create_for_testing(), + ..Default::default() + }, + } + } +} + +pub struct BenchTestSetup { + gateway: Gateway, + txs: Vec, +} + +impl BenchTestSetup { + pub fn new(config: BenchTestSetupConfig) -> Self { + let cairo_version = CairoVersion::Cairo1(RunnableCairo1::Casm); + let mut tx_generator = TransactionGenerator::new(cairo_version); + + let mut txs: Vec = Vec::with_capacity(config.n_txs); + for _ in 0..config.n_txs { + txs.push(tx_generator. + // TODO(Arni): Do something smarter than generate raw invoke. + generate_invoke()); + } + + let state_reader_factory = local_test_state_reader_factory(cairo_version, false); + let mut mempool_client = MockMempoolClient::new(); + // TODO(noamsp): use MockTransactionConverter + let class_manager_client = Arc::new(EmptyClassManagerClient); + let transaction_converter = TransactionConverter::new( + class_manager_client.clone(), + config.gateway_config.chain_info.chain_id.clone(), + ); + mempool_client.expect_add_tx().returning(|_| Ok(())); + + let gateway_business_logic = Gateway::new( + config.gateway_config, + Arc::new(state_reader_factory), + Arc::new(mempool_client), + transaction_converter, + ); + + Self { gateway: gateway_business_logic, txs } + } + + pub async fn send_txs_to_gateway(&self) { + for tx in &self.txs { + let _tx_hash = self + .gateway + .add_tx(tx.clone(), None) + .await + .expect("Some txs has failed in the gateway."); + } + } +} diff --git a/crates/apollo_gateway/src/communication.rs b/crates/apollo_gateway/src/communication.rs new file mode 100644 index 00000000000..b562bd70e2c --- /dev/null +++ b/crates/apollo_gateway/src/communication.rs @@ -0,0 +1,30 @@ +use apollo_gateway_types::communication::{GatewayRequest, GatewayResponse}; +use apollo_gateway_types::errors::GatewayError; +use apollo_infra::component_definitions::ComponentRequestHandler; +use apollo_infra::component_server::{ConcurrentLocalComponentServer, RemoteComponentServer}; +use async_trait::async_trait; + +use crate::gateway::Gateway; + +pub type LocalGatewayServer = + ConcurrentLocalComponentServer; +pub type RemoteGatewayServer = RemoteComponentServer; + +#[async_trait] +impl ComponentRequestHandler for Gateway { + async fn handle_request(&mut self, request: GatewayRequest) -> GatewayResponse { + match request { + GatewayRequest::AddTransaction(gateway_input) => { + let p2p_message_metadata = gateway_input.message_metadata.clone(); + GatewayResponse::AddTransaction( + self.add_tx(gateway_input.rpc_tx, gateway_input.message_metadata) + .await + .map_err(|source| GatewayError::DeprecatedGatewayError { + source, + p2p_message_metadata, + }), + ) + } + } + } +} diff --git a/crates/starknet_gateway/src/compiler_version.rs b/crates/apollo_gateway/src/compiler_version.rs similarity index 94% rename from crates/starknet_gateway/src/compiler_version.rs rename to crates/apollo_gateway/src/compiler_version.rs index 2b3dbcc6c40..e791c71fb2f 100644 --- a/crates/starknet_gateway/src/compiler_version.rs +++ b/crates/apollo_gateway/src/compiler_version.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; +use apollo_compilation_utils::class_utils::sierra_program_as_felts_to_big_uint_as_hex; +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use cairo_lang_starknet_classes::compiler_version::VersionId as CairoLangVersionId; use cairo_lang_starknet_classes::contract_class::version_id_from_serialized_sierra_program; -use papyrus_config::dumping::{ser_param, SerializeConfig}; -use papyrus_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use serde::{Deserialize, Serialize}; -use starknet_sierra_multicompile::utils::sierra_program_as_felts_to_big_uint_as_hex; use starknet_types_core::felt::Felt; use thiserror::Error; diff --git a/crates/apollo_gateway/src/config.rs b/crates/apollo_gateway/src/config.rs new file mode 100644 index 00000000000..8933e8f71a6 --- /dev/null +++ b/crates/apollo_gateway/src/config.rs @@ -0,0 +1,300 @@ +use std::collections::BTreeMap; +use std::str::FromStr; + +use apollo_config::dumping::{ + prepend_sub_config_name, + ser_optional_param, + ser_param, + SerializeConfig, +}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use blockifier::blockifier_versioned_constants::VersionedConstantsOverrides; +use blockifier::context::ChainInfo; +use serde::{de, Deserialize, Deserializer, Serialize}; +use starknet_api::core::{ContractAddress, Nonce}; +use starknet_types_core::felt::Felt; +use validator::Validate; + +use crate::compiler_version::VersionId; + +const JSON_RPC_VERSION: &str = "2.0"; + +#[derive(Clone, Debug, Default, Serialize, Deserialize, Validate, PartialEq)] +pub struct GatewayConfig { + pub stateless_tx_validator_config: StatelessTransactionValidatorConfig, + pub stateful_tx_validator_config: StatefulTransactionValidatorConfig, + pub chain_info: ChainInfo, + pub block_declare: bool, + #[serde(default, deserialize_with = "deserialize_optional_contract_addresses")] + pub authorized_declarer_accounts: Option>, +} + +impl SerializeConfig for GatewayConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::from_iter([ser_param( + "block_declare", + &self.block_declare, + "If true, the gateway will block declare transactions.", + ParamPrivacyInput::Public, + )]); + dump.extend(prepend_sub_config_name( + self.stateless_tx_validator_config.dump(), + "stateless_tx_validator_config", + )); + dump.extend(prepend_sub_config_name( + self.stateful_tx_validator_config.dump(), + "stateful_tx_validator_config", + )); + dump.extend(prepend_sub_config_name(self.chain_info.dump(), "chain_info")); + dump.extend(ser_optional_param( + &self.authorized_declarer_accounts.as_ref().map(|accounts| { + accounts.iter().map(|addr| addr.0.to_string()).collect::>().join(",") + }), + "".to_string(), + "authorized_declarer_accounts", + "Authorized declarer accounts. If set, only these accounts can declare new contracts. \ + Addresses are in hex format and separated by a comma with no space.", + ParamPrivacyInput::Public, + )); + dump + } +} + +impl GatewayConfig { + pub fn is_authorized_declarer(&self, declarer_address: &ContractAddress) -> bool { + match &self.authorized_declarer_accounts { + Some(allowed_accounts) => allowed_accounts.contains(declarer_address), + None => true, + } + } +} + +fn deserialize_optional_contract_addresses<'de, D>( + de: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = match Option::deserialize(de)? { + Some(addresses) => addresses, + None => return Ok(None), + }; + + if raw.is_empty() { + return Err(de::Error::custom( + "Empty string is not a valid input for contract addresses. The config field \ + `gateway_config.authorized_declarer_accounts.#is_none` is false and should be true \ + if you don't want to use this feature.", + )); + } + + let mut result = Vec::new(); + for addresses_str in raw.split(',') { + let felt = Felt::from_str(addresses_str).map_err(|err| { + de::Error::custom(format!("Failed to parse Felt from '{}': {}", addresses_str, err)) + })?; + + let addr = ContractAddress::try_from(felt).map_err(|err| { + de::Error::custom(format!("Invalid contract address '{}': {}", addresses_str, err)) + })?; + + result.push(addr); + } + + Ok(Some(result)) +} + +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct StatelessTransactionValidatorConfig { + // TODO(Arni): Align the name of this field with the mempool config, and all other places where + // validation is skipped during the systems bootstrap phase. + // If true, validates that the resource bounds are not zero. + pub validate_non_zero_resource_bounds: bool, + // TODO(AlonH): Remove this field and use the one from the versioned constants. + pub min_gas_price: u128, + pub max_calldata_length: usize, + pub max_signature_length: usize, + + // Declare txs specific config. + pub max_contract_bytecode_size: usize, + pub max_contract_class_object_size: usize, + pub min_sierra_version: VersionId, + pub max_sierra_version: VersionId, +} + +impl Default for StatelessTransactionValidatorConfig { + fn default() -> Self { + StatelessTransactionValidatorConfig { + validate_non_zero_resource_bounds: true, + min_gas_price: 3_000_000_000, + max_calldata_length: 4000, + max_signature_length: 4000, + max_contract_bytecode_size: 81920, + max_contract_class_object_size: 4089446, + min_sierra_version: VersionId::new(1, 1, 0), + max_sierra_version: VersionId::new(1, 5, usize::MAX), + } + } +} + +impl SerializeConfig for StatelessTransactionValidatorConfig { + fn dump(&self) -> BTreeMap { + let members = BTreeMap::from_iter([ + ser_param( + "validate_non_zero_resource_bounds", + &self.validate_non_zero_resource_bounds, + "If true, validates that at least one resource bound (L1, L2, or L1 Data) is \ + non-zero.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_signature_length", + &self.max_signature_length, + "Limitation of signature length.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_calldata_length", + &self.max_calldata_length, + "Limitation of calldata length.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_contract_bytecode_size", + &self.max_contract_bytecode_size, + "Limitation of contract class bytecode size.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_contract_class_object_size", + &self.max_contract_class_object_size, + "Limitation of contract class object size.", + ParamPrivacyInput::Public, + ), + ser_param( + "min_gas_price", + &self.min_gas_price, + "Minimum gas price for transactions.", + ParamPrivacyInput::Public, + ), + ]); + vec![ + members, + prepend_sub_config_name(self.min_sierra_version.dump(), "min_sierra_version"), + prepend_sub_config_name(self.max_sierra_version.dump(), "max_sierra_version"), + ] + .into_iter() + .flatten() + .collect() + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct RpcStateReaderConfig { + pub url: String, + pub json_rpc_version: String, +} + +impl RpcStateReaderConfig { + pub fn from_url(url: String) -> Self { + Self { url, ..Default::default() } + } +} + +impl Default for RpcStateReaderConfig { + fn default() -> Self { + Self { url: Default::default(), json_rpc_version: JSON_RPC_VERSION.to_string() } + } +} + +#[cfg(any(feature = "testing", test))] +impl RpcStateReaderConfig { + pub fn create_for_testing() -> Self { + Self::from_url("http://localhost:8080".to_string()) + } +} + +impl SerializeConfig for RpcStateReaderConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param("url", &self.url, "The url of the rpc server.", ParamPrivacyInput::Public), + ser_param( + "json_rpc_version", + &self.json_rpc_version, + "The json rpc version.", + ParamPrivacyInput::Public, + ), + ]) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct StatefulTransactionValidatorConfig { + // TODO(Arni): Align the name of this field with the mempool config, and all other places where + // validation is skipped during the systems bootstrap phase. + // If true, ensures the L2 gas price exceeds a dynamically calculated threshold based on + // EIP-1559 network usage. + pub validate_resource_bounds_above_threshold: bool, + pub max_allowed_nonce_gap: u32, + pub reject_future_declare_txs: bool, + pub max_nonce_for_validation_skip: Nonce, + pub versioned_constants_overrides: VersionedConstantsOverrides, + // Minimum gas price as percentage of threshold to accept transactions. + pub min_gas_price_percentage: u8, // E.g., 80 to require 80% of threshold. +} + +impl Default for StatefulTransactionValidatorConfig { + fn default() -> Self { + StatefulTransactionValidatorConfig { + validate_resource_bounds_above_threshold: true, + max_allowed_nonce_gap: 50, + reject_future_declare_txs: true, + max_nonce_for_validation_skip: Nonce(Felt::ONE), + min_gas_price_percentage: 100, + versioned_constants_overrides: VersionedConstantsOverrides::default(), + } + } +} + +impl SerializeConfig for StatefulTransactionValidatorConfig { + fn dump(&self) -> BTreeMap { + let mut dump = BTreeMap::from_iter([ + ser_param( + "validate_resource_bounds_above_threshold", + &self.validate_resource_bounds_above_threshold, + "If true, ensures the L2 gas price exceeds a dynamically calculated threshold \ + based on EIP-1559 network usage.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_nonce_for_validation_skip", + &self.max_nonce_for_validation_skip, + "Maximum nonce for which the validation is skipped.", + ParamPrivacyInput::Public, + ), + ser_param( + "max_allowed_nonce_gap", + &self.max_allowed_nonce_gap, + "The maximum allowed gap between the account nonce and the transaction nonce.", + ParamPrivacyInput::Public, + ), + ser_param( + "reject_future_declare_txs", + &self.reject_future_declare_txs, + "If true, rejects declare transactions with future nonces.", + ParamPrivacyInput::Public, + ), + ser_param( + "min_gas_price_percentage", + &self.min_gas_price_percentage, + "Minimum gas price as percentage of threshold to accept transactions.", + ParamPrivacyInput::Public, + ), + ]); + dump.append(&mut prepend_sub_config_name( + self.versioned_constants_overrides.dump(), + "versioned_constants_overrides", + )); + dump + } +} diff --git a/crates/apollo_gateway/src/errors.rs b/crates/apollo_gateway/src/errors.rs new file mode 100644 index 00000000000..c6858c8e2aa --- /dev/null +++ b/crates/apollo_gateway/src/errors.rs @@ -0,0 +1,314 @@ +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_gateway_types::errors::GatewaySpecError; +use apollo_mempool_types::communication::{MempoolClientError, MempoolClientResult}; +use apollo_mempool_types::errors::MempoolError; +use axum::http::StatusCode; +use blockifier::state::errors::StateError; +use serde_json::{Error as SerdeError, Value}; +use starknet_api::block::GasPrice; +use starknet_api::transaction::fields::AllResourceBounds; +use starknet_api::StarknetApiError; +use thiserror::Error; +use tracing::{debug, error, warn}; + +use crate::compiler_version::{VersionId, VersionIdError}; +use crate::rpc_objects::{RpcErrorCode, RpcErrorResponse}; + +pub type GatewayResult = Result; + +#[derive(Debug, Error)] +#[cfg_attr(test, derive(PartialEq))] +pub enum StatelessTransactionValidatorError { + #[error( + "Calldata length exceeded maximum: length {calldata_length} + (allowed length: {max_calldata_length})." + )] + CalldataTooLong { calldata_length: usize, max_calldata_length: usize }, + #[error( + "Cannot declare contract class with bytecode size of {contract_bytecode_size}; max \ + allowed size: {max_contract_bytecode_size}." + )] + ContractBytecodeSizeTooLarge { + contract_bytecode_size: usize, + max_contract_bytecode_size: usize, + }, + #[error( + "Cannot declare contract class with size of {contract_class_object_size}; max allowed \ + size: {max_contract_class_object_size}." + )] + ContractClassObjectSizeTooLarge { + contract_class_object_size: usize, + max_contract_class_object_size: usize, + }, + #[error("Entry points must be unique and sorted.")] + EntryPointsNotUniquelySorted, + #[error("Invalid {field_name} data availability mode.")] + InvalidDataAvailabilityMode { field_name: String }, + #[error(transparent)] + InvalidSierraVersion(#[from] VersionIdError), + #[error( + "Signature length exceeded maximum: length {signature_length} + (allowed length: {max_signature_length})." + )] + SignatureTooLong { signature_length: usize, max_signature_length: usize }, + #[error(transparent)] + StarknetApiError(#[from] StarknetApiError), + #[error( + "Sierra versions older than {min_version} or newer than {max_version} are not supported. \ + The Sierra version of the declared contract is {version}." + )] + UnsupportedSierraVersion { version: VersionId, min_version: VersionId, max_version: VersionId }, + #[error("The field {field_name} should be empty.")] + NonEmptyField { field_name: String }, + #[error( + "At least one resource bound (L1, L2, or L1 Data) must be non-zero. Got: + {resource_bounds:?}." + )] + ZeroResourceBounds { resource_bounds: AllResourceBounds }, + #[error( + "Max gas price is too low: {gas_price:?}, minimum required gas price: {min_gas_price:?}." + )] + MaxGasPriceTooLow { gas_price: GasPrice, min_gas_price: u128 }, +} + +impl From for GatewaySpecError { + fn from(e: StatelessTransactionValidatorError) -> Self { + match e { + StatelessTransactionValidatorError::ContractClassObjectSizeTooLarge { .. } + | StatelessTransactionValidatorError::ContractBytecodeSizeTooLarge { .. } => { + GatewaySpecError::ContractClassSizeIsTooLarge + } + StatelessTransactionValidatorError::UnsupportedSierraVersion { .. } => { + GatewaySpecError::UnsupportedContractClassVersion + } + StatelessTransactionValidatorError::CalldataTooLong { .. } + | StatelessTransactionValidatorError::EntryPointsNotUniquelySorted + | StatelessTransactionValidatorError::InvalidDataAvailabilityMode { .. } + | StatelessTransactionValidatorError::InvalidSierraVersion(..) + | StatelessTransactionValidatorError::NonEmptyField { .. } + | StatelessTransactionValidatorError::SignatureTooLong { .. } + | StatelessTransactionValidatorError::StarknetApiError(..) + | StatelessTransactionValidatorError::ZeroResourceBounds { .. } + | StatelessTransactionValidatorError::MaxGasPriceTooLow { .. } => { + GatewaySpecError::ValidationFailure { data: e.to_string() } + } + } + } +} + +impl From for StarknetError { + fn from(e: StatelessTransactionValidatorError) -> Self { + let message = format!("{}", e); + let code = match e { + StatelessTransactionValidatorError::ContractBytecodeSizeTooLarge { .. } => { + StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::ContractBytecodeSizeTooLarge, + ) + } + StatelessTransactionValidatorError::ContractClassObjectSizeTooLarge { .. } => { + StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::ContractClassObjectSizeTooLarge, + ) + } + StatelessTransactionValidatorError::UnsupportedSierraVersion { .. } => { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.INVALID_CONTRACT_CLASS".to_string(), + ) + } + StatelessTransactionValidatorError::CalldataTooLong { .. } => { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.CALLDATA_TOO_LONG".to_string(), + ) + } + StatelessTransactionValidatorError::EntryPointsNotUniquelySorted => + // Error does not exist in deprecated GW. + { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.ENTRY_POINTS_NOT_UNIQUELY_SORTED".to_string(), + ) + } + + StatelessTransactionValidatorError::InvalidDataAvailabilityMode { .. } => + // Error does not exist in deprecated GW. + { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.INVALID_DATA_AVAILABILITY_MODE".to_string(), + ) + } + + StatelessTransactionValidatorError::InvalidSierraVersion(..) => + // Error does not exist in deprecated GW. + { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.INVALID_SIERRA_VERSION".to_string(), + ) + } + StatelessTransactionValidatorError::NonEmptyField { .. } => + // Error does not exist in deprecated GW. + { + StarknetErrorCode::UnknownErrorCode("StarknetErrorCode.NON_EMPTY_FIELD".to_string()) + } + + StatelessTransactionValidatorError::SignatureTooLong { .. } => { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.SIGNATURE_TOO_LONG".to_string(), + ) + } + StatelessTransactionValidatorError::StarknetApiError(..) => + // TODO(yair): map SN_API errors to the correct error codes. + { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.STARKNET_API_ERROR".to_string(), + ) + } + StatelessTransactionValidatorError::ZeroResourceBounds { .. } + | StatelessTransactionValidatorError::MaxGasPriceTooLow { .. } => { + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InsufficientMaxFee) + } + }; + StarknetError { code, message } + } +} + +/// Converts a mempool client result to a gateway result. Some errors variants are unreachable in +/// Gateway context, and some are not considered errors from the gateway's perspective. +pub fn mempool_client_result_to_gw_spec_result( + value: MempoolClientResult<()>, +) -> Result<(), GatewaySpecError> { + let err = match value { + Ok(()) => return Ok(()), + Err(err) => err, + }; + match err { + MempoolClientError::ClientError(client_error) => { + error!("Mempool client error: {}", client_error); + Err(GatewaySpecError::UnexpectedError { data: "Internal error".to_owned() }) + } + MempoolClientError::MempoolError(mempool_error) => { + debug!("Mempool error: {}", mempool_error); + match mempool_error { + MempoolError::DuplicateNonce { .. } + | MempoolError::NonceTooLarge { .. } + | MempoolError::NonceTooOld { .. } => { + Err(GatewaySpecError::InvalidTransactionNonce) + } + MempoolError::DuplicateTransaction { .. } => Err(GatewaySpecError::DuplicateTx), + // TODO(Dafna): change to a more appropriate error, once we have it. + MempoolError::MempoolFull => { + Err(GatewaySpecError::UnexpectedError { data: "Mempool full".to_owned() }) + } + MempoolError::P2pPropagatorClientError { .. } => { + // Not an error from the gateway's perspective. + warn!("P2p propagator client error: {}", mempool_error); + Ok(()) + } + MempoolError::TransactionNotFound { .. } => { + // This error is not expected to happen within the gateway, only from other + // mempool clients. + unreachable!("Unexpected mempool error in gateway context: {}", mempool_error); + } + } + } + } +} + +pub fn mempool_client_err_to_deprecated_gw_err(err: MempoolClientError) -> StarknetError { + let message = format!("{}", err); + let code = match err { + MempoolClientError::ClientError(client_error) => { + error!("Mempool client error: {}", client_error); + return StarknetError::internal(&message); + } + MempoolClientError::MempoolError(mempool_error) => { + debug!("Mempool error: {}", mempool_error); + match mempool_error { + MempoolError::DuplicateNonce { .. } => StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::InvalidTransactionNonce, + ), + MempoolError::NonceTooLarge(..) => + // We didn't have this kind of an error. + { + StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.NONCE_TOO_LARGE".to_string(), + ) + } + MempoolError::NonceTooOld { .. } => StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::InvalidTransactionNonce, + ), + MempoolError::DuplicateTransaction { .. } => { + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::DuplicatedTransaction) + } + MempoolError::MempoolFull => StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::TransactionLimitExceeded, + ), + MempoolError::P2pPropagatorClientError { .. } => { + // Not an error from the gateway's perspective. + return StarknetError::internal(&message); + } + MempoolError::TransactionNotFound { .. } => { + // This error is not expected to happen within the gateway, only from other + // mempool clients. + unreachable!("Unexpected mempool error in gateway context: {}", mempool_error); + } + } + } + }; + StarknetError { code, message } +} + +/// Converts a mempool client result to a gateway result. Some errors variants are unreachable in +/// Gateway context, and some are not considered errors from the gateway's perspective. +pub fn mempool_client_result_to_deprecated_gw_result( + value: MempoolClientResult<()>, +) -> GatewayResult<()> { + value.map_err(mempool_client_err_to_deprecated_gw_err) +} + +pub type StatelessTransactionValidatorResult = Result; + +pub type StatefulTransactionValidatorResult = Result; + +#[derive(Debug, Error)] +pub enum RPCStateReaderError { + #[error("Block not found for request {0}")] + BlockNotFound(Value), + #[error("Class hash not found for request {0}")] + ClassHashNotFound(Value), + #[error("Contract address not found for request {0}")] + ContractAddressNotFound(Value), + #[error("Failed to parse gas price {:?}", 0)] + GasPriceParsingFailure(GasPrice), + #[error("Invalid params: {0:?}")] + InvalidParams(RpcErrorResponse), + #[error("RPC error: {0}")] + RPCError(StatusCode), + #[error(transparent)] + ReqwestError(#[from] reqwest::Error), + #[error("Unexpected error code: {0}")] + UnexpectedErrorCode(RpcErrorCode), +} + +pub type RPCStateReaderResult = Result; + +impl From for StateError { + fn from(err: RPCStateReaderError) -> Self { + match err { + RPCStateReaderError::ClassHashNotFound(request) => { + match serde_json::from_value(request["params"]["class_hash"].clone()) { + Ok(class_hash) => StateError::UndeclaredClassHash(class_hash), + Err(e) => serde_err_to_state_err(e), + } + } + _ => StateError::StateReadError(err.to_string()), + } + } +} + +// Converts a serde error to the error type of the state reader. +pub fn serde_err_to_state_err(err: SerdeError) -> StateError { + StateError::StateReadError(format!("Failed to parse rpc result {:?}", err.to_string())) +} diff --git a/crates/apollo_gateway/src/gateway.rs b/crates/apollo_gateway/src/gateway.rs new file mode 100644 index 00000000000..00b50e0bdc2 --- /dev/null +++ b/crates/apollo_gateway/src/gateway.rs @@ -0,0 +1,353 @@ +use std::clone::Clone; +use std::sync::Arc; + +use apollo_class_manager_types::transaction_converter::{ + TransactionConverter, + TransactionConverterError, + TransactionConverterTrait, +}; +use apollo_class_manager_types::SharedClassManagerClient; +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_gateway_types::gateway_types::{ + DeclareGatewayOutput, + DeployAccountGatewayOutput, + GatewayOutput, + InvokeGatewayOutput, +}; +use apollo_infra::component_definitions::ComponentStarter; +use apollo_mempool_types::communication::{AddTransactionArgsWrapper, SharedMempoolClient}; +use apollo_mempool_types::mempool_types::{AccountState, AddTransactionArgs}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_proc_macros::sequencer_latency_histogram; +use apollo_state_sync_types::communication::SharedStateSyncClient; +use axum::async_trait; +use blockifier::context::ChainInfo; +use num_rational::Ratio; +use starknet_api::block::NonzeroGasPrice; +use starknet_api::executable_transaction::ValidateCompiledClassHashError; +use starknet_api::rpc_transaction::{ + InternalRpcTransaction, + InternalRpcTransactionWithoutTxHash, + RpcDeclareTransaction, + RpcTransaction, +}; +use starknet_api::transaction::fields::ValidResourceBounds; +use tracing::{debug, error, info, instrument, warn, Span}; + +use crate::config::GatewayConfig; +use crate::errors::{mempool_client_result_to_deprecated_gw_result, GatewayResult}; +use crate::metrics::{register_metrics, GatewayMetricHandle, GATEWAY_ADD_TX_LATENCY}; +use crate::state_reader::StateReaderFactory; +use crate::stateful_transaction_validator::StatefulTransactionValidator; +use crate::stateless_transaction_validator::StatelessTransactionValidator; +use crate::sync_state_reader::SyncStateReaderFactory; + +#[cfg(test)] +#[path = "gateway_test.rs"] +pub mod gateway_test; + +#[derive(Clone)] +pub struct Gateway { + pub config: Arc, + pub stateless_tx_validator: Arc, + pub stateful_tx_validator: Arc, + pub state_reader_factory: Arc, + pub mempool_client: SharedMempoolClient, + pub transaction_converter: Arc, + pub chain_info: Arc, +} + +impl Gateway { + pub fn new( + config: GatewayConfig, + state_reader_factory: Arc, + mempool_client: SharedMempoolClient, + transaction_converter: TransactionConverter, + ) -> Self { + Self { + config: Arc::new(config.clone()), + stateless_tx_validator: Arc::new(StatelessTransactionValidator { + config: config.stateless_tx_validator_config.clone(), + }), + stateful_tx_validator: Arc::new(StatefulTransactionValidator { + config: config.stateful_tx_validator_config.clone(), + }), + state_reader_factory, + mempool_client, + chain_info: Arc::new(config.chain_info.clone()), + transaction_converter: Arc::new(transaction_converter), + } + } + + #[instrument(skip_all, fields(is_p2p = p2p_message_metadata.is_some()), ret)] + #[sequencer_latency_histogram(GATEWAY_ADD_TX_LATENCY, true)] + pub async fn add_tx( + &self, + tx: RpcTransaction, + p2p_message_metadata: Option, + ) -> GatewayResult { + debug!("Processing tx: {:?}", tx); + + if let RpcTransaction::Declare(ref declare_tx) = tx { + self.check_declare_permissions(declare_tx)?; + } + + let mut metric_counters = GatewayMetricHandle::new(&tx, &p2p_message_metadata); + metric_counters.count_transaction_received(); + + let blocking_task = + ProcessTxBlockingTask::new(self, tx.clone(), tokio::runtime::Handle::current()); + // Run the blocking task in the current span. + let curr_span = Span::current(); + let add_tx_args = + tokio::task::spawn_blocking(move || curr_span.in_scope(|| blocking_task.process_tx())) + .await + .map_err(|join_err| { + error!("Failed to process tx: {}", join_err); + StarknetError::internal(&join_err.to_string()) + })? + .inspect_err(|starknet_error| { + info!( + "Gateway validation failed for tx: {:?} with error: {}", + tx, starknet_error + ); + })?; + + let gateway_output = create_gateway_output(&add_tx_args.tx); + + let add_tx_args = AddTransactionArgsWrapper { args: add_tx_args, p2p_message_metadata }; + mempool_client_result_to_deprecated_gw_result( + self.mempool_client.add_tx(add_tx_args).await, + )?; + + metric_counters.transaction_sent_to_mempool(); + + Ok(gateway_output) + } + + fn check_declare_permissions( + &self, + declare_tx: &RpcDeclareTransaction, + ) -> Result<(), StarknetError> { + // TODO(noamsp): Return same error as in Python gateway. + if self.config.block_declare { + return Err(StarknetError { + code: StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.BLOCKED_TRANSACTION_TYPE".to_string(), + ), + message: "Transaction type is temporarily blocked.".to_string(), + }); + } + let RpcDeclareTransaction::V3(declare_v3_tx) = declare_tx; + if !self.config.is_authorized_declarer(&declare_v3_tx.sender_address) { + return Err(StarknetError { + code: StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::UnauthorizedDeclare, + ), + message: format!( + "Account address {} is not allowed to declare contracts.", + &declare_v3_tx.sender_address + ), + }); + } + Ok(()) + } +} + +/// CPU-intensive transaction processing, spawned in a blocking thread to avoid blocking other tasks +/// from running. +struct ProcessTxBlockingTask { + stateless_tx_validator: Arc, + stateful_tx_validator: Arc, + state_reader_factory: Arc, + mempool_client: SharedMempoolClient, + chain_info: Arc, + tx: RpcTransaction, + transaction_converter: Arc, + runtime: tokio::runtime::Handle, +} + +impl ProcessTxBlockingTask { + pub fn new(gateway: &Gateway, tx: RpcTransaction, runtime: tokio::runtime::Handle) -> Self { + Self { + stateless_tx_validator: gateway.stateless_tx_validator.clone(), + stateful_tx_validator: gateway.stateful_tx_validator.clone(), + state_reader_factory: gateway.state_reader_factory.clone(), + mempool_client: gateway.mempool_client.clone(), + chain_info: gateway.chain_info.clone(), + tx, + transaction_converter: gateway.transaction_converter.clone(), + runtime, + } + } + + // TODO(Arni): Make into async function and remove all block_on calls once we manage removing + // the spawn_blocking call. + fn process_tx(self) -> GatewayResult { + // TODO(Arni, 1/5/2024): Perform congestion control. + + // Perform stateless validations. + self.stateless_tx_validator.validate(&self.tx)?; + + let internal_tx = self + .runtime + .block_on(self.transaction_converter.convert_rpc_tx_to_internal_rpc_tx(self.tx)) + .map_err(|e| { + warn!("Failed to convert RPC transaction to internal RPC transaction: {}", e); + match e { + TransactionConverterError::ValidateCompiledClassHashError(err) => { + convert_compiled_class_hash_error(err) + } + other => { + // TODO(yair): Fix this. Need to map the errors better. + StarknetError::internal(&other.to_string()) + } + } + })?; + + let executable_tx = self + .runtime + .block_on( + self.transaction_converter + .convert_internal_rpc_tx_to_executable_tx(internal_tx.clone()), + ) + .map_err(|e| { + warn!( + "Failed to convert internal RPC transaction to executable transaction: {}", + e + ); + // TODO(yair): Fix this. + StarknetError::internal(&e.to_string()) + })?; + + let mut validator = self + .stateful_tx_validator + .instantiate_validator(self.state_reader_factory.as_ref(), &self.chain_info)?; + + // Skip this validation during the systems bootstrap phase. + if self.stateless_tx_validator.config.validate_non_zero_resource_bounds { + // TODO(Arni): get next_l2_gas_price from the block header. + let previous_block_l2_gas_price = + validator.block_context().block_info().gas_prices.strk_gas_prices.l2_gas_price; + validate_tx_l2_gas_price_within_threshold( + executable_tx.resource_bounds(), + previous_block_l2_gas_price, + self.stateful_tx_validator.config.min_gas_price_percentage, + )?; + } + + let address = executable_tx.contract_address(); + let nonce = validator.get_nonce(address).map_err(|e| { + error!("Failed to get nonce for sender address {}: {}", address, e); + // TODO(yair): Fix this. Need to map the errors better. + StarknetError::internal(&e.to_string()) + })?; + + self.stateful_tx_validator + .run_validate(&executable_tx, nonce, self.mempool_client, validator, self.runtime) + .map_err(|e| StarknetError { + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::ValidateFailure), + message: e.to_string(), + })?; + + // TODO(Arni): Add the Sierra and the Casm to the mempool input. + Ok(AddTransactionArgs { tx: internal_tx, account_state: AccountState { address, nonce } }) + } +} + +// TODO(Arni): Consider running this validation for all gas prices. +fn validate_tx_l2_gas_price_within_threshold( + tx_resource_bounds: ValidResourceBounds, + previous_block_l2_gas_price: NonzeroGasPrice, + min_gas_price_percentage: u8, +) -> GatewayResult<()> { + match tx_resource_bounds { + ValidResourceBounds::AllResources(tx_resource_bounds) => { + let tx_l2_gas_price = tx_resource_bounds.l2_gas.max_price_per_unit; + let gas_price_threshold_multiplier = + Ratio::new(min_gas_price_percentage.into(), 100_u128); + let threshold = + (gas_price_threshold_multiplier * previous_block_l2_gas_price.get().0).to_integer(); + if tx_l2_gas_price.0 < threshold { + return Err(StarknetError { + // We didn't have this kind of an error. + code: StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.GAS_PRICE_TOO_LOW".to_string(), + ), + message: format!( + "Transaction L2 gas price {} is below the required threshold {}.", + tx_l2_gas_price, threshold + ), + }); + } + } + ValidResourceBounds::L1Gas(_) => { + // No validation required for legacy transactions. + } + } + + Ok(()) +} + +fn convert_compiled_class_hash_error(error: ValidateCompiledClassHashError) -> StarknetError { + let ValidateCompiledClassHashError::CompiledClassHashMismatch { + computed_class_hash, + supplied_class_hash, + } = error; + StarknetError { + code: StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.INVALID_COMPILED_CLASS_HASH".to_string(), + ), + message: format!( + "Computed compiled class hash: {computed_class_hash} does not match the given value: \ + {supplied_class_hash}.", + ), + } +} + +pub fn create_gateway( + config: GatewayConfig, + shared_state_sync_client: SharedStateSyncClient, + mempool_client: SharedMempoolClient, + class_manager_client: SharedClassManagerClient, + runtime: tokio::runtime::Handle, +) -> Gateway { + let state_reader_factory = Arc::new(SyncStateReaderFactory { + shared_state_sync_client, + class_manager_client: class_manager_client.clone(), + runtime, + }); + let transaction_converter = + TransactionConverter::new(class_manager_client, config.chain_info.chain_id.clone()); + + Gateway::new(config, state_reader_factory, mempool_client, transaction_converter) +} + +#[async_trait] +impl ComponentStarter for Gateway { + async fn start(&mut self) { + register_metrics(); + } +} + +fn create_gateway_output(internal_rpc_tx: &InternalRpcTransaction) -> GatewayOutput { + let transaction_hash = internal_rpc_tx.tx_hash; + match &internal_rpc_tx.tx { + InternalRpcTransactionWithoutTxHash::Declare(declare_tx) => GatewayOutput::Declare( + DeclareGatewayOutput::new(transaction_hash, declare_tx.class_hash), + ), + InternalRpcTransactionWithoutTxHash::DeployAccount(deploy_account_tx) => { + GatewayOutput::DeployAccount(DeployAccountGatewayOutput::new( + transaction_hash, + deploy_account_tx.contract_address, + )) + } + InternalRpcTransactionWithoutTxHash::Invoke(_) => { + GatewayOutput::Invoke(InvokeGatewayOutput::new(transaction_hash)) + } + } +} diff --git a/crates/apollo_gateway/src/gateway_test.rs b/crates/apollo_gateway/src/gateway_test.rs new file mode 100644 index 00000000000..9181210e91e --- /dev/null +++ b/crates/apollo_gateway/src/gateway_test.rs @@ -0,0 +1,530 @@ +use std::collections::HashSet; +use std::fs::File; +use std::sync::{Arc, LazyLock}; + +use apollo_class_manager_types::transaction_converter::TransactionConverter; +use apollo_class_manager_types::{ClassHashes, EmptyClassManagerClient, MockClassManagerClient}; +use apollo_config::dumping::SerializeConfig; +use apollo_config::loading::load_and_process_config; +use apollo_gateway_types::deprecated_gateway_error::{KnownStarknetErrorCode, StarknetErrorCode}; +use apollo_gateway_types::gateway_types::{ + DeclareGatewayOutput, + DeployAccountGatewayOutput, + GatewayOutput, + InvokeGatewayOutput, +}; +use apollo_mempool_types::communication::{ + AddTransactionArgsWrapper, + MempoolClientError, + MempoolClientResult, + MockMempoolClient, +}; +use apollo_mempool_types::errors::MempoolError; +use apollo_mempool_types::mempool_types::{AccountState, AddTransactionArgs}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_test_utils::{get_rng, GetTestInstance}; +use assert_matches::assert_matches; +use blockifier::context::ChainInfo; +use blockifier::test_utils::initial_test_state::fund_account; +use blockifier_test_utils::cairo_versions::{CairoVersion, RunnableCairo1}; +use blockifier_test_utils::calldata::create_trivial_calldata; +use blockifier_test_utils::contracts::FeatureContract; +use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; +use clap::Command; +use mempool_test_utils::starknet_api_test_utils::{ + contract_class, + declare_tx, + test_valid_resource_bounds, + VALID_ACCOUNT_BALANCE, +}; +use metrics_exporter_prometheus::PrometheusBuilder; +use mockall::predicate::eq; +use rstest::{fixture, rstest}; +use starknet_api::contract_class::{ContractClass, SierraVersion}; +use starknet_api::core::{CompiledClassHash, ContractAddress, Nonce}; +use starknet_api::rpc_transaction::{ + RpcDeclareTransaction, + RpcTransaction, + RpcTransactionLabelValue, +}; +use starknet_api::test_utils::declare::DeclareTxArgsWithContractClass; +use starknet_api::test_utils::deploy_account::DeployAccountTxArgs; +use starknet_api::test_utils::invoke::InvokeTxArgs; +use starknet_api::test_utils::{TestingTxArgs, CHAIN_ID_FOR_TESTS}; +use starknet_api::transaction::fields::TransactionSignature; +use starknet_api::transaction::TransactionHash; +use starknet_api::{ + contract_address, + declare_tx_args, + deploy_account_tx_args, + invoke_tx_args, + nonce, +}; +use starknet_types_core::felt::Felt; +use strum::VariantNames; +use tempfile::TempDir; + +use crate::config::{ + GatewayConfig, + StatefulTransactionValidatorConfig, + StatelessTransactionValidatorConfig, +}; +use crate::errors::GatewayResult; +use crate::gateway::Gateway; +use crate::metrics::{ + register_metrics, + GatewayMetricHandle, + SourceLabelValue, + GATEWAY_ADD_TX_LATENCY, + GATEWAY_TRANSACTIONS_FAILED, + GATEWAY_TRANSACTIONS_RECEIVED, + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL, + LABEL_NAME_SOURCE, + LABEL_NAME_TX_TYPE, +}; +use crate::state_reader_test_utils::{local_test_state_reader_factory, TestStateReaderFactory}; + +#[fixture] +fn config() -> GatewayConfig { + GatewayConfig { + stateless_tx_validator_config: StatelessTransactionValidatorConfig::default(), + stateful_tx_validator_config: StatefulTransactionValidatorConfig::default(), + chain_info: ChainInfo::create_for_testing(), + block_declare: false, + authorized_declarer_accounts: None, + } +} + +#[fixture] +fn state_reader_factory() -> TestStateReaderFactory { + local_test_state_reader_factory(CairoVersion::Cairo1(RunnableCairo1::Casm), true) +} + +#[fixture] +fn mock_dependencies( + config: GatewayConfig, + state_reader_factory: TestStateReaderFactory, +) -> MockDependencies { + let mock_mempool_client = MockMempoolClient::new(); + // TODO(noamsp): use MockTransactionConverter + let mock_class_manager_client = MockClassManagerClient::new(); + MockDependencies { + config, + state_reader_factory, + mock_mempool_client, + mock_class_manager_client, + } +} + +struct MockDependencies { + config: GatewayConfig, + state_reader_factory: TestStateReaderFactory, + mock_mempool_client: MockMempoolClient, + mock_class_manager_client: MockClassManagerClient, +} + +impl MockDependencies { + fn gateway(self) -> Gateway { + register_metrics(); + let chain_id = self.config.chain_info.chain_id.clone(); + Gateway::new( + self.config, + Arc::new(self.state_reader_factory), + Arc::new(self.mock_mempool_client), + TransactionConverter::new(Arc::new(self.mock_class_manager_client), chain_id), + ) + } + + fn expect_add_tx(&mut self, args: AddTransactionArgsWrapper, result: MempoolClientResult<()>) { + self.mock_mempool_client.expect_add_tx().once().with(eq(args)).return_once(|_| result); + } +} + +fn account_contract() -> FeatureContract { + FeatureContract::AccountWithoutValidations(CairoVersion::Cairo1(RunnableCairo1::Casm)) +} + +fn invoke_args() -> InvokeTxArgs { + let cairo_version = CairoVersion::Cairo1(RunnableCairo1::Casm); + let test_contract = FeatureContract::TestContract(cairo_version); + let mut args = invoke_tx_args!( + resource_bounds: test_valid_resource_bounds(), + sender_address: account_contract().get_instance_address(0), + calldata: create_trivial_calldata(test_contract.get_instance_address(0)) + ); + let internal_tx = args.get_internal_tx(); + args.tx_hash = internal_tx.tx.calculate_transaction_hash(&CHAIN_ID_FOR_TESTS).unwrap(); + args +} + +/// Make a deploy account transaction with a default salt. +fn deploy_account_args() -> DeployAccountTxArgs { + let mut args = deploy_account_tx_args!( + class_hash: account_contract().get_class_hash(), + resource_bounds: test_valid_resource_bounds(), + ); + let internal_tx = args.get_internal_tx(); + args.tx_hash = internal_tx.tx.calculate_transaction_hash(&CHAIN_ID_FOR_TESTS).unwrap(); + args +} + +fn declare_args() -> DeclareTxArgsWithContractClass { + let contract_class = contract_class(); + let mut args = DeclareTxArgsWithContractClass { + args: declare_tx_args!( + signature: TransactionSignature(vec![Felt::ZERO].into()), + sender_address: account_contract().get_instance_address(0), + resource_bounds: test_valid_resource_bounds(), + class_hash: contract_class.calculate_class_hash(), + compiled_class_hash: default_compiled_contract_class().compiled_class_hash(), + ), + contract_class, + }; + let internal_tx = args.get_internal_tx(); + args.args.tx_hash = internal_tx.tx.calculate_transaction_hash(&CHAIN_ID_FOR_TESTS).unwrap(); + args +} + +fn default_compiled_contract_class() -> ContractClass { + let casm = CasmContractClass { + prime: Default::default(), + compiler_version: Default::default(), + bytecode: Default::default(), + bytecode_segment_lengths: Default::default(), + hints: Default::default(), + pythonic_hints: Default::default(), + entry_points_by_type: Default::default(), + }; + let sierra_version = SierraVersion::default(); + ContractClass::V1((casm, sierra_version)) +} + +/// Setup MockClassManagerClient to expect the addition and retrieval of the test contract +/// class. Returns the compiled class hash of the contract class that the mock will return. +fn setup_class_manager_client_mock( + mock_class_manager_client: &mut MockClassManagerClient, + rpc_tx: RpcTransaction, +) { + let RpcTransaction::Declare(RpcDeclareTransaction::V3(declare_tx)) = rpc_tx else { + return; + }; + + let contract_class = declare_tx.contract_class; + let class_hash = contract_class.calculate_class_hash(); + let casm = default_compiled_contract_class(); + let executable_class_hash = casm.compiled_class_hash(); + + mock_class_manager_client + .expect_add_class() + .once() + .with(eq(contract_class.clone())) + .return_once(move |_| Ok(ClassHashes { class_hash, executable_class_hash })); + mock_class_manager_client + .expect_get_sierra() + .once() + .with(eq(class_hash)) + .return_once(move |_| Ok(Some(contract_class))); + mock_class_manager_client + .expect_get_executable() + .once() + .with(eq(class_hash)) + .return_once(move |_| Ok(Some(casm))); +} + +fn check_positive_add_tx_result(tx_args: impl TestingTxArgs, result: GatewayOutput) { + let rpc_tx = tx_args.get_rpc_tx(); + let expected_internal_tx = tx_args.get_internal_tx(); + let tx_hash = expected_internal_tx.tx_hash(); + assert_eq!( + result, + match rpc_tx { + RpcTransaction::Declare(RpcDeclareTransaction::V3(tx)) => { + GatewayOutput::Declare(DeclareGatewayOutput::new( + tx_hash, + tx.contract_class.calculate_class_hash(), + )) + } + RpcTransaction::DeployAccount(_) => { + let address = expected_internal_tx.contract_address(); + GatewayOutput::DeployAccount(DeployAccountGatewayOutput::new(tx_hash, address)) + } + RpcTransaction::Invoke(_) => GatewayOutput::Invoke(InvokeGatewayOutput::new(tx_hash)), + } + ); +} + +static P2P_MESSAGE_METADATA: LazyLock> = + LazyLock::new(|| Some(BroadcastedMessageMetadata::get_test_instance(&mut get_rng()))); +fn p2p_message_metadata() -> Option { + P2P_MESSAGE_METADATA.clone() +} + +async fn setup_mock_state( + mock_dependencies: &mut MockDependencies, + tx_args: &impl TestingTxArgs, + expected_mempool_result: Result<(), MempoolClientError>, +) { + let input_tx = tx_args.get_rpc_tx(); + let expected_internal_tx = tx_args.get_internal_tx(); + + setup_class_manager_client_mock( + &mut mock_dependencies.mock_class_manager_client, + input_tx.clone(), + ); + + let address = expected_internal_tx.contract_address(); + fund_account( + &mock_dependencies.config.chain_info, + address, + VALID_ACCOUNT_BALANCE, + &mut mock_dependencies.state_reader_factory.state_reader.blockifier_state_reader, + ); + + let mempool_add_tx_args = AddTransactionArgs { + tx: expected_internal_tx.clone(), + account_state: AccountState { address, nonce: *input_tx.nonce() }, + }; + mock_dependencies.expect_add_tx( + AddTransactionArgsWrapper { + args: mempool_add_tx_args, + p2p_message_metadata: p2p_message_metadata(), + }, + expected_mempool_result, + ); +} + +struct AddTxResults { + result: GatewayResult, + metric_handle_for_queries: GatewayMetricHandle, + metrics: String, +} + +async fn run_add_tx_and_extract_metrics( + mock_dependencies: MockDependencies, + tx_args: &impl TestingTxArgs, +) -> AddTxResults { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + + let input_tx = tx_args.get_rpc_tx(); + let gateway = mock_dependencies.gateway(); + let result = gateway.add_tx(input_tx.clone(), p2p_message_metadata()).await; + + let metric_handle_for_queries = GatewayMetricHandle::new(&input_tx, &p2p_message_metadata()); + let metrics = recorder.handle().render(); + + AddTxResults { result, metric_handle_for_queries, metrics } +} + +// TODO(AlonH): add test with Some broadcasted message metadata +#[rstest] +#[case::tx_with_duplicate_tx_hash( + Err(MempoolClientError::MempoolError(MempoolError::DuplicateTransaction { tx_hash: TransactionHash::default() })), + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::DuplicatedTransaction) +)] +#[case::tx_with_duplicate_nonce( + Err(MempoolClientError::MempoolError(MempoolError::DuplicateNonce { address: ContractAddress::default(), nonce: Nonce::default() })), + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InvalidTransactionNonce) +)] +#[case::tx_with_nonce_too_old( + Err(MempoolClientError::MempoolError(MempoolError::NonceTooOld { address: ContractAddress::default(), tx_nonce: Nonce::default(), account_nonce: nonce!(1) })), + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InvalidTransactionNonce) +)] +#[case::tx_with_nonce_too_large( + Err(MempoolClientError::MempoolError(MempoolError::NonceTooLarge(Nonce::default()))), + StarknetErrorCode::UnknownErrorCode("StarknetErrorCode.NONCE_TOO_LARGE".to_string()) +)] +#[tokio::test] +async fn test_add_tx_negative( + mut mock_dependencies: MockDependencies, + #[values(invoke_args(), deploy_account_args(), declare_args())] tx_args: impl TestingTxArgs, + #[case] expected_mempool_result: Result<(), MempoolClientError>, + #[case] expected_error_code: StarknetErrorCode, +) { + setup_mock_state(&mut mock_dependencies, &tx_args, expected_mempool_result).await; + + let AddTxResults { result, metric_handle_for_queries, metrics } = + run_add_tx_and_extract_metrics(mock_dependencies, &tx_args).await; + + assert_eq!( + metric_handle_for_queries.get_metric_value(GATEWAY_TRANSACTIONS_RECEIVED, &metrics), + 1 + ); + assert_eq!( + metric_handle_for_queries.get_metric_value(GATEWAY_TRANSACTIONS_FAILED, &metrics), + 1 + ); + assert_eq!(result.unwrap_err().code, expected_error_code); +} + +#[rstest] +#[tokio::test] +async fn test_add_tx_positive( + mut mock_dependencies: MockDependencies, + #[values(invoke_args(), deploy_account_args(), declare_args())] tx_args: impl TestingTxArgs, +) { + setup_mock_state(&mut mock_dependencies, &tx_args, Ok(())).await; + + let AddTxResults { result, metric_handle_for_queries, metrics } = + run_add_tx_and_extract_metrics(mock_dependencies, &tx_args).await; + + assert_eq!( + metric_handle_for_queries.get_metric_value(GATEWAY_TRANSACTIONS_RECEIVED, &metrics), + 1 + ); + assert_eq!( + metric_handle_for_queries.get_metric_value(GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL, &metrics), + 1 + ); + check_positive_add_tx_result(tx_args, result.unwrap()); +} +// Gateway spec errors tests. +// TODO(Arni): Add tests for all the error cases. Check the response (use `into_response` on the +// result of `add_tx`). +// TODO(shahak): Test that when an error occurs in handle_request, then it returns the given p2p +// metadata. +// TODO(noamsp): Remove ignore from compiled_class_hash_mismatch once class manager component is +// implemented. +#[rstest] +#[tokio::test] +#[ignore] +async fn test_compiled_class_hash_mismatch(mock_dependencies: MockDependencies) { + let mut declare_tx = + assert_matches!(declare_tx(), RpcTransaction::Declare(RpcDeclareTransaction::V3(tx)) => tx); + declare_tx.compiled_class_hash = CompiledClassHash::default(); + let tx = RpcTransaction::Declare(RpcDeclareTransaction::V3(declare_tx)); + + let gateway = mock_dependencies.gateway(); + + let err = gateway.add_tx(tx, None).await.unwrap_err(); + let expected_code = StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.INVALID_COMPILED_CLASS_HASH".to_string(), + ); + assert_eq!(err.code, expected_code); +} + +#[rstest] +#[tokio::test] +async fn test_block_declare_config( + mut config: GatewayConfig, + state_reader_factory: TestStateReaderFactory, +) { + config.block_declare = true; + let gateway = Gateway::new( + config, + Arc::new(state_reader_factory), + Arc::new(MockMempoolClient::new()), + TransactionConverter::new( + Arc::new(EmptyClassManagerClient), + ChainInfo::create_for_testing().chain_id, + ), + ); + + let result = gateway.add_tx(declare_tx(), None).await; + let expected_code = StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.BLOCKED_TRANSACTION_TYPE".to_string(), + ); + assert_eq!(result.unwrap_err().code, expected_code); +} + +#[test] +fn test_register_metrics() { + let recorder = PrometheusBuilder::new().build_recorder(); + let _recorder_guard = metrics::set_default_local_recorder(&recorder); + register_metrics(); + let metrics = recorder.handle().render(); + for tx_type in RpcTransactionLabelValue::VARIANTS { + for source in SourceLabelValue::VARIANTS { + let labels: &[(&str, &str); 2] = + &[(LABEL_NAME_TX_TYPE, tx_type), (LABEL_NAME_SOURCE, source)]; + + assert_eq!( + GATEWAY_TRANSACTIONS_RECEIVED + .parse_numeric_metric::(&metrics, labels) + .unwrap(), + 0 + ); + assert_eq!( + GATEWAY_TRANSACTIONS_FAILED.parse_numeric_metric::(&metrics, labels).unwrap(), + 0 + ); + assert_eq!( + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL + .parse_numeric_metric::(&metrics, labels) + .unwrap(), + 0 + ); + assert_eq!(GATEWAY_ADD_TX_LATENCY.parse_histogram_metric(&metrics).unwrap().sum, 0.0); + assert_eq!(GATEWAY_ADD_TX_LATENCY.parse_histogram_metric(&metrics).unwrap().count, 0); + } + } +} + +#[rstest] +#[tokio::test] +async fn test_unauthorized_declare_config( + mut config: GatewayConfig, + state_reader_factory: TestStateReaderFactory, +) { + let authorized_address = contract_address!("0x1"); + config.authorized_declarer_accounts = Some(vec![authorized_address]); + + let gateway = Gateway::new( + config, + Arc::new(state_reader_factory), + Arc::new(MockMempoolClient::new()), + TransactionConverter::new( + Arc::new(EmptyClassManagerClient), + ChainInfo::create_for_testing().chain_id, + ), + ); + + let rpc_declare_tx = declare_tx(); + + // Ensure the sender address is different from the authorized address. + assert_ne!( + rpc_declare_tx.calculate_sender_address().unwrap(), + authorized_address, + "Sender address should not be authorized" + ); + + let gateway_output_code_error = gateway.add_tx(rpc_declare_tx, None).await.unwrap_err().code; + let expected_code_error = + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::UnauthorizedDeclare); + + assert_eq!(gateway_output_code_error, expected_code_error); +} + +#[rstest] +#[case::two_addresses( + Some(vec![ + contract_address!("0x1"), + contract_address!("0x2"), + ]) +)] +#[case::one_address( + Some(vec![ + contract_address!("0x1"), + ]) +)] +#[case::none(None)] +fn test_full_cycle_dump_deserialize_authorized_declarer_accounts( + #[case] authorized_declarer_accounts: Option>, +) { + let original_config = GatewayConfig { authorized_declarer_accounts, ..Default::default() }; + + // Create a temporary file to dump the config. + let file_path = TempDir::new().unwrap().path().join("config.json"); + original_config.dump_to_file(&vec![], &HashSet::new(), file_path.to_str().unwrap()).unwrap(); + + // Load the config from the dumped config file. + let loaded_config = load_and_process_config::( + File::open(file_path).unwrap(), // Config file to load. + Command::new(""), // Unused CLI context. + vec![], // No override CLI args. + false, // Use schema defaults. + ) + .unwrap(); + + assert_eq!(loaded_config, original_config); +} diff --git a/crates/apollo_gateway/src/lib.rs b/crates/apollo_gateway/src/lib.rs new file mode 100644 index 00000000000..83359e64877 --- /dev/null +++ b/crates/apollo_gateway/src/lib.rs @@ -0,0 +1,20 @@ +pub mod communication; +mod compiler_version; +pub mod config; +pub mod errors; +pub mod gateway; +pub mod metrics; +pub mod rpc_objects; +pub mod rpc_state_reader; +#[cfg(test)] +mod rpc_state_reader_test; +pub mod state_reader; +#[cfg(any(feature = "testing", test))] +pub mod state_reader_test_utils; +mod stateful_transaction_validator; +mod stateless_transaction_validator; +mod sync_state_reader; +#[cfg(test)] +mod sync_state_reader_test; +#[cfg(test)] +mod test_utils; diff --git a/crates/apollo_gateway/src/metrics.rs b/crates/apollo_gateway/src/metrics.rs new file mode 100644 index 00000000000..51d9791cc88 --- /dev/null +++ b/crates/apollo_gateway/src/metrics.rs @@ -0,0 +1,94 @@ +#[cfg(test)] +use apollo_metrics::metrics::LabeledMetricCounter; +use apollo_metrics::{define_metrics, generate_permutation_labels}; +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use starknet_api::rpc_transaction::{RpcTransaction, RpcTransactionLabelValue}; +use strum::{EnumVariantNames, VariantNames}; +use strum_macros::IntoStaticStr; + +pub const LABEL_NAME_TX_TYPE: &str = "tx_type"; +pub const LABEL_NAME_SOURCE: &str = "source"; + +generate_permutation_labels! { + TRANSACTION_TYPE_AND_SOURCE_LABELS, + (LABEL_NAME_TX_TYPE, RpcTransactionLabelValue), + (LABEL_NAME_SOURCE, SourceLabelValue), +} + +define_metrics!( + Gateway => { + LabeledMetricCounter { GATEWAY_TRANSACTIONS_RECEIVED, "gateway_transactions_received", "Counter of transactions received", init = 0 , labels = TRANSACTION_TYPE_AND_SOURCE_LABELS}, + LabeledMetricCounter { GATEWAY_TRANSACTIONS_FAILED, "gateway_transactions_failed", "Counter of failed transactions", init = 0 , labels = TRANSACTION_TYPE_AND_SOURCE_LABELS}, + LabeledMetricCounter { GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL, "gateway_transactions_sent_to_mempool", "Counter of transactions sent to the mempool", init = 0 , labels = TRANSACTION_TYPE_AND_SOURCE_LABELS}, + MetricHistogram { GATEWAY_ADD_TX_LATENCY, "gateway_add_tx_latency", "Latency of gateway add_tx function in secs" }, + MetricHistogram { GATEWAY_VALIDATE_TX_LATENCY, "gateway_validate_tx_latency", "Latency of gateway validate function in secs" }, + }, +); + +#[derive(Clone, Copy, Debug, IntoStaticStr, EnumVariantNames)] +#[strum(serialize_all = "snake_case")] +pub enum SourceLabelValue { + Http, + P2p, +} + +enum TransactionStatus { + SentToMempool, + Failed, +} + +pub(crate) struct GatewayMetricHandle { + tx_type: RpcTransactionLabelValue, + source: SourceLabelValue, + tx_status: TransactionStatus, +} + +impl GatewayMetricHandle { + pub fn new( + tx: &RpcTransaction, + p2p_message_metadata: &Option, + ) -> Self { + let tx_type = RpcTransactionLabelValue::from(tx); + let source = match p2p_message_metadata { + Some(_) => SourceLabelValue::P2p, + None => SourceLabelValue::Http, + }; + Self { tx_type, source, tx_status: TransactionStatus::Failed } + } + + fn label(&self) -> Vec<(&'static str, &'static str)> { + vec![(LABEL_NAME_TX_TYPE, self.tx_type.into()), (LABEL_NAME_SOURCE, self.source.into())] + } + + pub fn count_transaction_received(&self) { + GATEWAY_TRANSACTIONS_RECEIVED.increment(1, &self.label()); + } + + pub fn transaction_sent_to_mempool(&mut self) { + self.tx_status = TransactionStatus::SentToMempool; + } + + #[cfg(test)] + pub fn get_metric_value(&self, metric_counter: LabeledMetricCounter, metrics: &str) -> u64 { + metric_counter.parse_numeric_metric::(metrics, &self.label()).unwrap() + } +} + +impl Drop for GatewayMetricHandle { + fn drop(&mut self) { + match self.tx_status { + TransactionStatus::SentToMempool => { + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL.increment(1, &self.label()) + } + TransactionStatus::Failed => GATEWAY_TRANSACTIONS_FAILED.increment(1, &self.label()), + } + } +} + +pub(crate) fn register_metrics() { + GATEWAY_TRANSACTIONS_RECEIVED.register(); + GATEWAY_TRANSACTIONS_FAILED.register(); + GATEWAY_TRANSACTIONS_SENT_TO_MEMPOOL.register(); + GATEWAY_ADD_TX_LATENCY.register(); + GATEWAY_VALIDATE_TX_LATENCY.register(); +} diff --git a/crates/starknet_gateway/src/rpc_objects.rs b/crates/apollo_gateway/src/rpc_objects.rs similarity index 93% rename from crates/starknet_gateway/src/rpc_objects.rs rename to crates/apollo_gateway/src/rpc_objects.rs index 78ec71f82a0..81b4be46d4a 100644 --- a/crates/starknet_gateway/src/rpc_objects.rs +++ b/crates/apollo_gateway/src/rpc_objects.rs @@ -7,6 +7,7 @@ use starknet_api::block::{ BlockNumber, BlockTimestamp, GasPrice, + GasPricePerToken, NonzeroGasPrice, }; use starknet_api::core::{ClassHash, ContractAddress, GlobalRoot}; @@ -65,12 +66,6 @@ pub struct GetBlockWithTxHashesParams { pub block_id: BlockId, } -#[derive(Debug, Default, Deserialize, Serialize)] -pub struct ResourcePrice { - pub price_in_wei: GasPrice, - pub price_in_fri: GasPrice, -} - #[derive(Debug, Default, Deserialize, Serialize)] pub struct BlockHeader { pub block_hash: BlockHash, @@ -79,9 +74,9 @@ pub struct BlockHeader { pub sequencer_address: ContractAddress, pub new_root: GlobalRoot, pub timestamp: BlockTimestamp, - pub l1_gas_price: ResourcePrice, - pub l1_data_gas_price: ResourcePrice, - pub l2_gas_price: ResourcePrice, + pub l1_gas_price: GasPricePerToken, + pub l1_data_gas_price: GasPricePerToken, + pub l2_gas_price: GasPricePerToken, pub l1_da_mode: L1DataAvailabilityMode, pub starknet_version: String, } diff --git a/crates/starknet_gateway/src/rpc_state_reader.rs b/crates/apollo_gateway/src/rpc_state_reader.rs similarity index 96% rename from crates/starknet_gateway/src/rpc_state_reader.rs rename to crates/apollo_gateway/src/rpc_state_reader.rs index e6abf8cd0a1..80b7e739d3d 100644 --- a/crates/starknet_gateway/src/rpc_state_reader.rs +++ b/crates/apollo_gateway/src/rpc_state_reader.rs @@ -1,3 +1,5 @@ +use apollo_rpc::CompiledContractClass; +use apollo_state_sync_types::communication::StateSyncClientResult; use blockifier::execution::contract_class::{ CompiledClassV0, CompiledClassV1, @@ -5,7 +7,6 @@ use blockifier::execution::contract_class::{ }; use blockifier::state::errors::StateError; use blockifier::state::state_api::{StateReader as BlockifierStateReader, StateResult}; -use papyrus_rpc::CompiledContractClass; use reqwest::blocking::Client as BlockingClient; use serde::Serialize; use serde_json::{json, Value}; @@ -185,8 +186,10 @@ pub struct RpcStateReaderFactory { } impl StateReaderFactory for RpcStateReaderFactory { - fn get_state_reader_from_latest_block(&self) -> Box { - Box::new(RpcStateReader::from_latest(&self.config)) + fn get_state_reader_from_latest_block( + &self, + ) -> StateSyncClientResult> { + Ok(Box::new(RpcStateReader::from_latest(&self.config))) } fn get_state_reader(&self, block_number: BlockNumber) -> Box { diff --git a/crates/starknet_gateway/src/rpc_state_reader_test.rs b/crates/apollo_gateway/src/rpc_state_reader_test.rs similarity index 94% rename from crates/starknet_gateway/src/rpc_state_reader_test.rs rename to crates/apollo_gateway/src/rpc_state_reader_test.rs index cc0a915554c..285b3ac9c4a 100644 --- a/crates/starknet_gateway/src/rpc_state_reader_test.rs +++ b/crates/apollo_gateway/src/rpc_state_reader_test.rs @@ -1,11 +1,11 @@ +use apollo_rpc::CompiledContractClass; use blockifier::blockifier::block::validated_gas_prices; use blockifier::execution::contract_class::RunnableCompiledClass; use blockifier::state::state_api::StateReader; use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; -use papyrus_rpc::CompiledContractClass; use serde::Serialize; use serde_json::json; -use starknet_api::block::{BlockInfo, BlockNumber}; +use starknet_api::block::{BlockInfo, BlockNumber, GasPricePerToken}; use starknet_api::contract_class::SierraVersion; use starknet_api::{class_hash, contract_address, felt, nonce}; @@ -18,7 +18,6 @@ use crate::rpc_objects::{ GetCompiledClassParams, GetNonceParams, GetStorageAtParams, - ResourcePrice, RpcResponse, RpcSuccessResponse, }; @@ -57,9 +56,10 @@ async fn test_get_block_info() { let config = RpcStateReaderConfig { url: server.url(), ..Default::default() }; // GasPrice must be non-zero. - let l1_gas_price = ResourcePrice { price_in_wei: 1_u8.into(), price_in_fri: 1_u8.into() }; - let l1_data_gas_price = ResourcePrice { price_in_wei: 1_u8.into(), price_in_fri: 1_u8.into() }; - let l2_gas_price = ResourcePrice { price_in_wei: 1_u8.into(), price_in_fri: 1_u8.into() }; + let l1_gas_price = GasPricePerToken { price_in_wei: 1_u8.into(), price_in_fri: 1_u8.into() }; + let l1_data_gas_price = + GasPricePerToken { price_in_wei: 1_u8.into(), price_in_fri: 1_u8.into() }; + let l2_gas_price = GasPricePerToken { price_in_wei: 1_u8.into(), price_in_fri: 1_u8.into() }; let gas_prices = validated_gas_prices( l1_gas_price.price_in_wei.try_into().unwrap(), l1_gas_price.price_in_fri.try_into().unwrap(), diff --git a/crates/apollo_gateway/src/state_reader.rs b/crates/apollo_gateway/src/state_reader.rs new file mode 100644 index 00000000000..89fdebff828 --- /dev/null +++ b/crates/apollo_gateway/src/state_reader.rs @@ -0,0 +1,57 @@ +use apollo_state_sync_types::communication::StateSyncClientResult; +use blockifier::execution::contract_class::RunnableCompiledClass; +use blockifier::state::errors::StateError; +use blockifier::state::state_api::{StateReader as BlockifierStateReader, StateResult}; +#[cfg(test)] +use mockall::automock; +use starknet_api::block::{BlockInfo, BlockNumber}; +use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; +use starknet_api::state::StorageKey; +use starknet_types_core::felt::Felt; + +pub trait MempoolStateReader: BlockifierStateReader + Send + Sync { + fn get_block_info(&self) -> Result; +} + +#[cfg_attr(test, automock)] +pub trait StateReaderFactory: Send + Sync { + fn get_state_reader_from_latest_block( + &self, + ) -> StateSyncClientResult>; + fn get_state_reader(&self, block_number: BlockNumber) -> Box; +} + +// By default, a Box does not implement the trait of the object it contains. +// Therefore, for using the Box, that the StateReaderFactory creates, +// we need to implement the MempoolStateReader trait for Box. +impl MempoolStateReader for Box { + fn get_block_info(&self) -> Result { + self.as_ref().get_block_info() + } +} + +impl BlockifierStateReader for Box { + fn get_storage_at( + &self, + contract_address: ContractAddress, + key: StorageKey, + ) -> StateResult { + self.as_ref().get_storage_at(contract_address, key) + } + + fn get_nonce_at(&self, contract_address: ContractAddress) -> StateResult { + self.as_ref().get_nonce_at(contract_address) + } + + fn get_class_hash_at(&self, contract_address: ContractAddress) -> StateResult { + self.as_ref().get_class_hash_at(contract_address) + } + + fn get_compiled_class(&self, class_hash: ClassHash) -> StateResult { + self.as_ref().get_compiled_class(class_hash) + } + + fn get_compiled_class_hash(&self, class_hash: ClassHash) -> StateResult { + self.as_ref().get_compiled_class_hash(class_hash) + } +} diff --git a/crates/starknet_gateway/src/state_reader_test_utils.rs b/crates/apollo_gateway/src/state_reader_test_utils.rs similarity index 85% rename from crates/starknet_gateway/src/state_reader_test_utils.rs rename to crates/apollo_gateway/src/state_reader_test_utils.rs index 80c8fc8b516..932a7412cb9 100644 --- a/crates/starknet_gateway/src/state_reader_test_utils.rs +++ b/crates/apollo_gateway/src/state_reader_test_utils.rs @@ -1,11 +1,13 @@ +use apollo_state_sync_types::communication::StateSyncClientResult; use blockifier::context::BlockContext; use blockifier::execution::contract_class::RunnableCompiledClass; use blockifier::state::errors::StateError; use blockifier::state::state_api::{StateReader as BlockifierStateReader, StateResult}; -use blockifier::test_utils::contracts::FeatureContract; use blockifier::test_utils::dict_state_reader::DictStateReader; use blockifier::test_utils::initial_test_state::test_state; -use blockifier::test_utils::{CairoVersion, BALANCE}; +use blockifier_test_utils::cairo_versions::CairoVersion; +use blockifier_test_utils::contracts::FeatureContract; +use mempool_test_utils::starknet_api_test_utils::VALID_ACCOUNT_BALANCE; use starknet_api::block::{BlockInfo, BlockNumber}; use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; use starknet_api::state::StorageKey; @@ -57,8 +59,10 @@ pub struct TestStateReaderFactory { } impl StateReaderFactory for TestStateReaderFactory { - fn get_state_reader_from_latest_block(&self) -> Box { - Box::new(self.state_reader.clone()) + fn get_state_reader_from_latest_block( + &self, + ) -> StateSyncClientResult> { + Ok(Box::new(self.state_reader.clone())) } fn get_state_reader(&self, _block_number: BlockNumber) -> Box { @@ -71,7 +75,7 @@ pub fn local_test_state_reader_factory( zero_balance: bool, ) -> TestStateReaderFactory { let block_context = BlockContext::create_for_testing(); - let account_balance = if zero_balance { Fee(0) } else { BALANCE }; + let account_balance = if zero_balance { Fee(0) } else { VALID_ACCOUNT_BALANCE }; let account_contract = FeatureContract::AccountWithoutValidations(cairo_version); let test_contract = FeatureContract::TestContract(cairo_version); diff --git a/crates/apollo_gateway/src/stateful_transaction_validator.rs b/crates/apollo_gateway/src/stateful_transaction_validator.rs new file mode 100644 index 00000000000..684be012d2a --- /dev/null +++ b/crates/apollo_gateway/src/stateful_transaction_validator.rs @@ -0,0 +1,188 @@ +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_gateway_types::errors::GatewaySpecError; +use apollo_mempool_types::communication::SharedMempoolClient; +use apollo_proc_macros::sequencer_latency_histogram; +use blockifier::blockifier::stateful_validator::{ + StatefulValidator, + StatefulValidatorResult as BlockifierStatefulValidatorResult, +}; +use blockifier::blockifier_versioned_constants::VersionedConstants; +use blockifier::bouncer::BouncerConfig; +use blockifier::context::{BlockContext, ChainInfo}; +use blockifier::state::cached_state::CachedState; +use blockifier::transaction::account_transaction::{AccountTransaction, ExecutionFlags}; +use blockifier::transaction::transactions::enforce_fee; +#[cfg(test)] +use mockall::automock; +use starknet_api::block::BlockInfo; +use starknet_api::core::Nonce; +use starknet_api::executable_transaction::{ + AccountTransaction as ExecutableTransaction, + InvokeTransaction as ExecutableInvokeTransaction, +}; +use starknet_types_core::felt::Felt; +use tracing::{debug, error}; + +use crate::config::StatefulTransactionValidatorConfig; +use crate::errors::{mempool_client_err_to_deprecated_gw_err, StatefulTransactionValidatorResult}; +use crate::metrics::GATEWAY_VALIDATE_TX_LATENCY; +use crate::state_reader::{MempoolStateReader, StateReaderFactory}; + +#[cfg(test)] +#[path = "stateful_transaction_validator_test.rs"] +mod stateful_transaction_validator_test; + +pub struct StatefulTransactionValidator { + pub config: StatefulTransactionValidatorConfig, +} + +type BlockifierStatefulValidator = StatefulValidator>; + +// TODO(yair): move the trait to Blockifier. +#[cfg_attr(test, automock)] +pub trait StatefulTransactionValidatorTrait { + #[allow(clippy::result_large_err)] + fn validate(&mut self, account_tx: AccountTransaction) + -> BlockifierStatefulValidatorResult<()>; +} + +impl StatefulTransactionValidatorTrait for BlockifierStatefulValidator { + #[sequencer_latency_histogram(GATEWAY_VALIDATE_TX_LATENCY, true)] + #[allow(clippy::result_large_err)] + fn validate( + &mut self, + account_tx: AccountTransaction, + ) -> BlockifierStatefulValidatorResult<()> { + self.perform_validations(account_tx) + } +} + +impl StatefulTransactionValidator { + pub fn run_validate( + &self, + executable_tx: &ExecutableTransaction, + account_nonce: Nonce, + mempool_client: SharedMempoolClient, + mut validator: V, + runtime: tokio::runtime::Handle, + ) -> StatefulTransactionValidatorResult<()> { + if !self.is_valid_nonce(executable_tx, account_nonce) { + let tx_nonce = executable_tx.nonce(); + debug!( + "Transaction nonce is invalid. Transaction nonce: {tx_nonce}, account_nonce: \ + {account_nonce}", + ); + return Err(StarknetError { + code: StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::InvalidTransactionNonce, + ), + message: format!( + "Invalid transaction nonce. Expected: {account_nonce}, got: {tx_nonce}." + ), + }); + } + + let skip_validate = + skip_stateful_validations(executable_tx, account_nonce, mempool_client, runtime)?; + let only_query = false; + let charge_fee = enforce_fee(executable_tx, only_query); + let strict_nonce_check = false; + let execution_flags = + ExecutionFlags { only_query, charge_fee, validate: !skip_validate, strict_nonce_check }; + + let account_tx = AccountTransaction { tx: executable_tx.clone(), execution_flags }; + validator.validate(account_tx).map_err(|e| StarknetError { + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::ValidateFailure), + message: e.to_string(), + })?; + Ok(()) + } + + pub fn instantiate_validator( + &self, + state_reader_factory: &dyn StateReaderFactory, + chain_info: &ChainInfo, + ) -> StatefulTransactionValidatorResult { + // TODO(yael 6/5/2024): consider storing the block_info as part of the + // StatefulTransactionValidator and update it only once a new block is created. + let latest_block_info = get_latest_block_info(state_reader_factory)?; + let state_reader = state_reader_factory.get_state_reader(latest_block_info.block_number); + let state = CachedState::new(state_reader); + let versioned_constants = VersionedConstants::get_versioned_constants( + self.config.versioned_constants_overrides.clone(), + ); + let mut block_info = latest_block_info; + block_info.block_number = block_info.block_number.unchecked_next(); + // TODO(yael 21/4/24): create the block context using pre_process_block once we will be + // able to read the block_hash of 10 blocks ago from papyrus. + let block_context = BlockContext::new( + block_info, + chain_info.clone(), + versioned_constants, + BouncerConfig::max(), + ); + + Ok(BlockifierStatefulValidator::create(state, block_context)) + } + + fn is_valid_nonce(&self, executable_tx: &ExecutableTransaction, account_nonce: Nonce) -> bool { + let incoming_tx_nonce = executable_tx.nonce(); + + // Declare transactions must have the same nonce as the account nonce. + if self.config.reject_future_declare_txs + && matches!(executable_tx, ExecutableTransaction::Declare(_)) + { + return incoming_tx_nonce == account_nonce; + } + + let max_allowed_nonce = + Nonce(account_nonce.0 + Felt::from(self.config.max_allowed_nonce_gap)); + account_nonce <= incoming_tx_nonce && incoming_tx_nonce <= max_allowed_nonce + } +} + +/// Check if validation of an invoke transaction should be skipped due to deploy_account not being +/// processed yet. This feature is used to improve UX for users sending deploy_account + invoke at +/// once. +fn skip_stateful_validations( + tx: &ExecutableTransaction, + account_nonce: Nonce, + mempool_client: SharedMempoolClient, + runtime: tokio::runtime::Handle, +) -> StatefulTransactionValidatorResult { + if let ExecutableTransaction::Invoke(ExecutableInvokeTransaction { tx, .. }) = tx { + // check if the transaction nonce is 1, meaning it is post deploy_account, and the + // account nonce is zero, meaning the account was not deployed yet. + if tx.nonce() == Nonce(Felt::ONE) && account_nonce == Nonce(Felt::ZERO) { + // We verify that a deploy_account transaction exists for this account. It is sufficient + // to check if the account exists in the mempool since it means that either it has a + // deploy_account transaction or transactions with future nonces that passed + // validations. + return runtime + .block_on(mempool_client.account_tx_in_pool_or_recent_block(tx.sender_address())) + .map_err(mempool_client_err_to_deprecated_gw_err); + } + } + + Ok(false) +} + +pub fn get_latest_block_info( + state_reader_factory: &dyn StateReaderFactory, +) -> StatefulTransactionValidatorResult { + let state_reader = state_reader_factory + .get_state_reader_from_latest_block() + .map_err(|e| { + error!("Failed to get state reader from latest block: {}", e); + GatewaySpecError::UnexpectedError { data: "Internal server error.".to_owned() } + }) + .map_err(|e| StarknetError::internal(&e.to_string()))?; + state_reader.get_block_info().map_err(|e| { + error!("Failed to get latest block info: {}", e); + StarknetError::internal(&e.to_string()) + }) +} diff --git a/crates/apollo_gateway/src/stateful_transaction_validator_test.rs b/crates/apollo_gateway/src/stateful_transaction_validator_test.rs new file mode 100644 index 00000000000..e556175a476 --- /dev/null +++ b/crates/apollo_gateway/src/stateful_transaction_validator_test.rs @@ -0,0 +1,295 @@ +use std::sync::Arc; + +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_mempool_types::communication::MockMempoolClient; +use blockifier::blockifier::stateful_validator::{ + StatefulValidatorError as BlockifierStatefulValidatorError, + StatefulValidatorResult as BlockifierStatefulValidatorResult, +}; +use blockifier::context::ChainInfo; +use blockifier::test_utils::contracts::FeatureContractTrait; +use blockifier::transaction::errors::{TransactionFeeError, TransactionPreValidationError}; +use blockifier::transaction::test_utils::calculate_class_info_for_testing; +use blockifier_test_utils::cairo_versions::{CairoVersion, RunnableCairo1}; +use blockifier_test_utils::contracts::FeatureContract; +use mempool_test_utils::starknet_api_test_utils::{ + executable_invoke_tx as create_executable_invoke_tx, + VALID_L1_GAS_MAX_AMOUNT, + VALID_L1_GAS_MAX_PRICE_PER_UNIT, +}; +use mockall::predicate::eq; +use num_bigint::BigUint; +use pretty_assertions::assert_eq; +use rstest::{fixture, rstest}; +use starknet_api::block::GasPrice; +use starknet_api::core::Nonce; +use starknet_api::executable_transaction::AccountTransaction; +use starknet_api::execution_resources::GasAmount; +use starknet_api::test_utils::declare::executable_declare_tx; +use starknet_api::test_utils::deploy_account::executable_deploy_account_tx; +use starknet_api::test_utils::invoke::executable_invoke_tx; +use starknet_api::transaction::fields::Resource; +use starknet_api::{declare_tx_args, deploy_account_tx_args, invoke_tx_args, nonce}; + +use crate::config::StatefulTransactionValidatorConfig; +use crate::state_reader::{MockStateReaderFactory, StateReaderFactory}; +use crate::state_reader_test_utils::local_test_state_reader_factory; +use crate::stateful_transaction_validator::{ + MockStatefulTransactionValidatorTrait, + StatefulTransactionValidator, +}; + +pub const STATEFUL_VALIDATOR_FEE_ERROR: BlockifierStatefulValidatorError = + BlockifierStatefulValidatorError::TransactionPreValidationError( + TransactionPreValidationError::TransactionFeeError( + TransactionFeeError::GasBoundsExceedBalance { + resource: Resource::L1DataGas, + max_amount: GasAmount(VALID_L1_GAS_MAX_AMOUNT), + max_price: GasPrice(VALID_L1_GAS_MAX_PRICE_PER_UNIT), + balance: BigUint::ZERO, + }, + ), + ); + +#[fixture] +fn stateful_validator() -> StatefulTransactionValidator { + StatefulTransactionValidator { config: StatefulTransactionValidatorConfig::default() } +} + +// TODO(Arni): consider testing declare and deploy account. +#[rstest] +#[case::valid_tx( + create_executable_invoke_tx(CairoVersion::Cairo1(RunnableCairo1::Casm)), + Ok(()) +)] +#[case::invalid_tx( + create_executable_invoke_tx(CairoVersion::Cairo1(RunnableCairo1::Casm)), + Err(STATEFUL_VALIDATOR_FEE_ERROR) +)] +#[tokio::test] +async fn test_stateful_tx_validator( + #[case] executable_tx: AccountTransaction, + #[case] expected_result: BlockifierStatefulValidatorResult<()>, + stateful_validator: StatefulTransactionValidator, +) { + let expected_result_as_stateful_transaction_result = expected_result + .as_ref() + .map(|validate_result| *validate_result) + .map_err(|blockifier_error| StarknetError { + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::ValidateFailure), + message: format!("{}", blockifier_error), + }); + + let mut mock_validator = MockStatefulTransactionValidatorTrait::new(); + mock_validator.expect_validate().return_once(|_| expected_result.map(|_| ())); + + let account_nonce = nonce!(0); + let mut mock_mempool_client = MockMempoolClient::new(); + mock_mempool_client.expect_account_tx_in_pool_or_recent_block().returning(|_| { + // The mempool does not have any transactions from the sender. + Ok(false) + }); + let mempool_client = Arc::new(mock_mempool_client); + let runtime = tokio::runtime::Handle::current(); + + tokio::task::spawn_blocking(move || { + let result = stateful_validator.run_validate( + &executable_tx, + account_nonce, + mempool_client, + mock_validator, + runtime, + ); + assert_eq!(result, expected_result_as_stateful_transaction_result); + }) + .await + .unwrap(); +} + +#[rstest] +fn test_instantiate_validator(stateful_validator: StatefulTransactionValidator) { + let state_reader_factory = + local_test_state_reader_factory(CairoVersion::Cairo1(RunnableCairo1::Casm), false); + + let mut mock_state_reader_factory = MockStateReaderFactory::new(); + + // Make sure stateful_validator uses the latest block in the initial call. + let latest_state_reader = state_reader_factory.get_state_reader_from_latest_block(); + mock_state_reader_factory + .expect_get_state_reader_from_latest_block() + .return_once(|| latest_state_reader); + + // Make sure stateful_validator uses the latest block in the following calls to the + // state_reader. + let latest_block = state_reader_factory.state_reader.block_info.block_number; + let state_reader = state_reader_factory.get_state_reader(latest_block); + mock_state_reader_factory + .expect_get_state_reader() + .with(eq(latest_block)) + .return_once(move |_| state_reader); + + let blockifier_validator = stateful_validator + .instantiate_validator(&mock_state_reader_factory, &ChainInfo::create_for_testing()); + assert!(blockifier_validator.is_ok()); +} + +#[rstest] +#[case::should_skip_validation( + executable_invoke_tx(invoke_tx_args!(nonce: nonce!(1))), + nonce!(0), + true, + false +)] +#[case::should_not_skip_validation_nonce_zero( + executable_invoke_tx(invoke_tx_args!(nonce: nonce!(0))), + nonce!(0), + true, + true +)] +#[case::should_not_skip_validation_nonce_over_one( + executable_invoke_tx(invoke_tx_args!(nonce: nonce!(2))), + nonce!(0), + true, + true +)] +// TODO(Arni): Fix this test case. Ideally, we would have a non-invoke transaction with tx_nonce 1 +// and account_nonce 0. For deploy account the tx_nonce is always 0. Replace with a declare tx. +#[case::should_not_skip_validation_non_invoke( + executable_deploy_account_tx(deploy_account_tx_args!()), + nonce!(0), + true, + true + +)] +#[case::should_not_skip_validation_account_nonce_1( + executable_invoke_tx(invoke_tx_args!(nonce: nonce!(1))), + nonce!(1), + true, + true +)] +#[case::should_not_skip_validation_no_tx_in_mempool( + executable_invoke_tx(invoke_tx_args!(nonce: nonce!(1))), + nonce!(0), + false, + true +)] +#[tokio::test] +async fn test_skip_stateful_validation( + #[case] executable_tx: AccountTransaction, + #[case] sender_nonce: Nonce, + #[case] contains_tx: bool, + #[case] should_validate: bool, + stateful_validator: StatefulTransactionValidator, +) { + let mut mock_validator = MockStatefulTransactionValidatorTrait::new(); + mock_validator + .expect_validate() + .withf(move |tx| tx.execution_flags.validate == should_validate) + .returning(|_| Ok(())); + let mut mock_mempool_client = MockMempoolClient::new(); + mock_mempool_client + .expect_account_tx_in_pool_or_recent_block() + .returning(move |_| Ok(contains_tx)); + let mempool_client = Arc::new(mock_mempool_client); + let runtime = tokio::runtime::Handle::current(); + + tokio::task::spawn_blocking(move || { + let _ = stateful_validator.run_validate( + &executable_tx, + sender_nonce, + mempool_client, + mock_validator, + runtime, + ); + }) + .await + .unwrap(); +} + +#[rstest] +#[case::nonce_equal_to_account_nonce(0, 1, 1, Ok(()))] +#[case::nonce_in_allowed_range(10, 1, 11, Ok(()))] +#[case::nonce_beyond_allowed_gap( + 10, + 1, + 12, + Err(StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InvalidTransactionNonce)) +)] +#[case::nonce_less_then_account_nonce( + 0, + 1, + 0, + Err(StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InvalidTransactionNonce)) +)] +#[tokio::test] +async fn test_is_valid_nonce( + #[case] max_allowed_nonce_gap: u32, + #[case] account_nonce: u32, + #[case] tx_nonce: u32, + #[case] expected_result_code: Result<(), StarknetErrorCode>, +) { + let stateful_validator = StatefulTransactionValidator { + config: StatefulTransactionValidatorConfig { max_allowed_nonce_gap, ..Default::default() }, + }; + + let mut mock_validator = MockStatefulTransactionValidatorTrait::new(); + mock_validator.expect_validate().return_once(|_| Ok(())); + + let executable_tx = executable_invoke_tx(invoke_tx_args!(nonce: nonce!(tx_nonce))); + let result = tokio::task::spawn_blocking(move || { + stateful_validator.run_validate( + &executable_tx, + nonce!(account_nonce), + Arc::new(MockMempoolClient::new()), + mock_validator, + tokio::runtime::Handle::current(), + ) + }) + .await + .unwrap() + .map_err(|err| err.code); + assert_eq!(result, expected_result_code); +} + +#[rstest] +#[case::nonce_equal_to_account_nonce(0, Ok(()))] +#[case::nonce_greater_then_account_nonce( + 1, + Err(StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InvalidTransactionNonce)) +)] +#[case::nonce_less_then_account_nonce(-1, Err(StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::InvalidTransactionNonce)))] +#[tokio::test] +async fn test_reject_future_declares( + stateful_validator: StatefulTransactionValidator, + #[case] account_nonce_diff: i32, + #[case] expected_result_code: Result<(), StarknetErrorCode>, +) { + let mut mock_validator = MockStatefulTransactionValidatorTrait::new(); + mock_validator.expect_validate().return_once(|_| Ok(())); + + let account_nonce = 10; + let executable_tx = executable_declare_tx( + declare_tx_args!(nonce: nonce!(account_nonce + account_nonce_diff)), + calculate_class_info_for_testing( + FeatureContract::Empty(CairoVersion::Cairo1(RunnableCairo1::Casm)).get_class(), + ), + ); + + let result = tokio::task::spawn_blocking(move || { + stateful_validator.run_validate( + &executable_tx, + nonce!(account_nonce), + Arc::new(MockMempoolClient::new()), + mock_validator, + tokio::runtime::Handle::current(), + ) + }) + .await + .unwrap() + .map_err(|err| err.code); + assert_eq!(result, expected_result_code); +} diff --git a/crates/starknet_gateway/src/stateless_transaction_validator.rs b/crates/apollo_gateway/src/stateless_transaction_validator.rs similarity index 89% rename from crates/starknet_gateway/src/stateless_transaction_validator.rs rename to crates/apollo_gateway/src/stateless_transaction_validator.rs index 871e2a3d3ce..c17f94902ee 100644 --- a/crates/starknet_gateway/src/stateless_transaction_validator.rs +++ b/crates/apollo_gateway/src/stateless_transaction_validator.rs @@ -1,6 +1,4 @@ -use starknet_api::block::GasPrice; use starknet_api::data_availability::DataAvailabilityMode; -use starknet_api::execution_resources::GasAmount; use starknet_api::rpc_transaction::{ RpcDeclareTransaction, RpcDeployAccountTransaction, @@ -8,7 +6,7 @@ use starknet_api::rpc_transaction::{ RpcTransaction, }; use starknet_api::state::EntryPoint; -use starknet_api::transaction::fields::{AllResourceBounds, Resource}; +use starknet_api::transaction::fields::{Fee, Tip, ValidResourceBounds}; use starknet_types_core::felt::Felt; use tracing::{instrument, Level}; @@ -49,16 +47,22 @@ impl StatelessTransactionValidator { &self, tx: &RpcTransaction, ) -> StatelessTransactionValidatorResult<()> { - let resource_bounds_mapping = tx.resource_bounds(); - - if self.config.validate_non_zero_l1_gas_fee { - validate_resource_is_non_zero(resource_bounds_mapping, Resource::L1Gas)?; + if !self.config.validate_non_zero_resource_bounds { + return Ok(()); } - if self.config.validate_non_zero_l2_gas_fee { - validate_resource_is_non_zero(resource_bounds_mapping, Resource::L2Gas)?; + + let resource_bounds = *tx.resource_bounds(); + // The resource bounds should be positive even without the tip. + if ValidResourceBounds::AllResources(resource_bounds).max_possible_fee(Tip::ZERO) == Fee(0) + { + return Err(StatelessTransactionValidatorError::ZeroResourceBounds { resource_bounds }); } - if self.config.validate_non_zero_l1_data_gas_fee { - validate_resource_is_non_zero(resource_bounds_mapping, Resource::L1DataGas)?; + + if resource_bounds.l2_gas.max_price_per_unit.0 < self.config.min_gas_price { + return Err(StatelessTransactionValidatorError::MaxGasPriceTooLow { + gas_price: resource_bounds.l2_gas.max_price_per_unit, + min_gas_price: self.config.min_gas_price, + }); } Ok(()) @@ -239,6 +243,13 @@ impl StatelessTransactionValidator { &self, contract_class: &starknet_api::state::SierraContractClass, ) -> StatelessTransactionValidatorResult<()> { + if contract_class.sierra_program.len() > self.config.max_contract_bytecode_size { + return Err(StatelessTransactionValidatorError::ContractBytecodeSizeTooLarge { + contract_bytecode_size: contract_class.sierra_program.len(), + max_contract_bytecode_size: self.config.max_contract_bytecode_size, + }); + } + let contract_class_object_size = serde_json::to_string(&contract_class) .expect("Unexpected error serializing contract class.") .len(); @@ -270,20 +281,3 @@ impl StatelessTransactionValidator { Err(StatelessTransactionValidatorError::EntryPointsNotUniquelySorted) } } - -fn validate_resource_is_non_zero( - all_resource_bounds: &AllResourceBounds, - resource: Resource, -) -> StatelessTransactionValidatorResult<()> { - let resource_bounds = all_resource_bounds.get_bound(resource); - if resource_bounds.max_amount == GasAmount(0) - || resource_bounds.max_price_per_unit == GasPrice(0) - { - return Err(StatelessTransactionValidatorError::ZeroResourceBounds { - resource, - resource_bounds, - }); - } - - Ok(()) -} diff --git a/crates/starknet_gateway/src/stateless_transaction_validator_test.rs b/crates/apollo_gateway/src/stateless_transaction_validator_test.rs similarity index 87% rename from crates/starknet_gateway/src/stateless_transaction_validator_test.rs rename to crates/apollo_gateway/src/stateless_transaction_validator_test.rs index 57319f1d3a6..96dc3e9c0e7 100644 --- a/crates/starknet_gateway/src/stateless_transaction_validator_test.rs +++ b/crates/apollo_gateway/src/stateless_transaction_validator_test.rs @@ -3,6 +3,7 @@ use std::vec; use assert_matches::assert_matches; use rstest::rstest; +use starknet_api::block::GasPrice; use starknet_api::core::{EntryPointSelector, L2_ADDRESS_UPPER_BOUND}; use starknet_api::data_availability::DataAvailabilityMode; use starknet_api::rpc_transaction::EntryPointByType; @@ -12,7 +13,6 @@ use starknet_api::transaction::fields::{ AccountDeploymentData, AllResourceBounds, PaymasterData, - Resource, ResourceBounds, TransactionSignature, }; @@ -34,37 +34,30 @@ use crate::test_utils::{ NON_EMPTY_RESOURCE_BOUNDS, }; +static DEFAULT_VALIDATOR_CONFIG: LazyLock = + LazyLock::new(StatelessTransactionValidatorConfig::default); static MIN_SIERRA_VERSION: LazyLock = LazyLock::new(|| VersionId::new(1, 1, 0)); static MAX_SIERRA_VERSION: LazyLock = LazyLock::new(|| VersionId::new(1, 5, usize::MAX)); static DEFAULT_VALIDATOR_CONFIG_FOR_TESTING: LazyLock = LazyLock::new(|| StatelessTransactionValidatorConfig { - validate_non_zero_l1_gas_fee: false, - validate_non_zero_l2_gas_fee: false, - validate_non_zero_l1_data_gas_fee: false, + validate_non_zero_resource_bounds: false, + min_gas_price: 0, max_calldata_length: 1, max_signature_length: 1, + max_contract_bytecode_size: 100000, max_contract_class_object_size: 100000, min_sierra_version: *MIN_SIERRA_VERSION, max_sierra_version: *MAX_SIERRA_VERSION, }); #[rstest] -#[case::ignore_resource_bounds( - StatelessTransactionValidatorConfig { - validate_non_zero_l1_gas_fee: false, - validate_non_zero_l2_gas_fee: false, - ..*DEFAULT_VALIDATOR_CONFIG_FOR_TESTING - }, - RpcTransactionArgs::default() -)] #[case::valid_l1_gas( StatelessTransactionValidatorConfig { - validate_non_zero_l1_gas_fee: true, - validate_non_zero_l2_gas_fee: false, + validate_non_zero_resource_bounds: true, ..*DEFAULT_VALIDATOR_CONFIG_FOR_TESTING }, - RpcTransactionArgs{ + RpcTransactionArgs { resource_bounds: AllResourceBounds { l1_gas: NON_EMPTY_RESOURCE_BOUNDS, ..Default::default() @@ -74,8 +67,7 @@ static DEFAULT_VALIDATOR_CONFIG_FOR_TESTING: LazyLock 1); + let tx_validator = StatelessTransactionValidator { + config: StatelessTransactionValidatorConfig { + max_contract_bytecode_size: sierra_program.len() - 1, + ..*DEFAULT_VALIDATOR_CONFIG_FOR_TESTING + }, + }; + + let tx = rpc_declare_tx( + declare_tx_args!(), + SierraContractClass { sierra_program, ..Default::default() }, + ); + + assert_matches!( + tx_validator.validate(&tx), + Err(StatelessTransactionValidatorError::ContractBytecodeSizeTooLarge { .. }) + ); +} + #[rstest] #[case::valid( vec![ diff --git a/crates/apollo_gateway/src/sync_state_reader.rs b/crates/apollo_gateway/src/sync_state_reader.rs new file mode 100644 index 00000000000..7a5e2d25500 --- /dev/null +++ b/crates/apollo_gateway/src/sync_state_reader.rs @@ -0,0 +1,186 @@ +use apollo_class_manager_types::SharedClassManagerClient; +use apollo_state_sync_types::communication::{ + SharedStateSyncClient, + StateSyncClientError, + StateSyncClientResult, +}; +use apollo_state_sync_types::errors::StateSyncError; +use blockifier::execution::contract_class::RunnableCompiledClass; +use blockifier::state::errors::StateError; +use blockifier::state::state_api::{StateReader as BlockifierStateReader, StateResult}; +use futures::executor::block_on; +use starknet_api::block::{BlockInfo, BlockNumber, GasPriceVector, GasPrices}; +use starknet_api::contract_class::ContractClass; +use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::state::StorageKey; +use starknet_types_core::felt::Felt; + +use crate::state_reader::{MempoolStateReader, StateReaderFactory}; + +pub(crate) struct SyncStateReader { + block_number: BlockNumber, + state_sync_client: SharedStateSyncClient, + class_manager_client: SharedClassManagerClient, + runtime: tokio::runtime::Handle, +} + +impl SyncStateReader { + pub fn from_number( + state_sync_client: SharedStateSyncClient, + class_manager_client: SharedClassManagerClient, + block_number: BlockNumber, + runtime: tokio::runtime::Handle, + ) -> Self { + Self { block_number, state_sync_client, class_manager_client, runtime } + } +} + +impl MempoolStateReader for SyncStateReader { + fn get_block_info(&self) -> StateResult { + let block = block_on(self.state_sync_client.get_block(self.block_number)) + .map_err(|e| StateError::StateReadError(e.to_string()))? + .ok_or(StateError::StateReadError("Block not found".to_string()))?; + + let block_header = block.block_header_without_hash; + let block_info = BlockInfo { + block_number: block_header.block_number, + block_timestamp: block_header.timestamp, + sequencer_address: block_header.sequencer.0, + gas_prices: GasPrices { + eth_gas_prices: GasPriceVector { + l1_gas_price: block_header.l1_gas_price.price_in_wei.try_into()?, + l1_data_gas_price: block_header.l1_data_gas_price.price_in_wei.try_into()?, + l2_gas_price: block_header.l2_gas_price.price_in_wei.try_into()?, + }, + strk_gas_prices: GasPriceVector { + l1_gas_price: block_header.l1_gas_price.price_in_fri.try_into()?, + l1_data_gas_price: block_header.l1_data_gas_price.price_in_fri.try_into()?, + l2_gas_price: block_header.l2_gas_price.price_in_fri.try_into()?, + }, + }, + use_kzg_da: match block_header.l1_da_mode { + L1DataAvailabilityMode::Blob => true, + L1DataAvailabilityMode::Calldata => false, + }, + }; + + Ok(block_info) + } +} + +impl BlockifierStateReader for SyncStateReader { + fn get_storage_at( + &self, + contract_address: ContractAddress, + key: StorageKey, + ) -> StateResult { + let res = self.runtime.block_on(self.state_sync_client.get_storage_at( + self.block_number, + contract_address, + key, + )); + + match res { + Ok(value) => Ok(value), + Err(StateSyncClientError::StateSyncError(StateSyncError::ContractNotFound(_))) => { + Ok(Felt::default()) + } + Err(e) => Err(StateError::StateReadError(e.to_string())), + } + } + + fn get_nonce_at(&self, contract_address: ContractAddress) -> StateResult { + let res = self + .runtime + .block_on(self.state_sync_client.get_nonce_at(self.block_number, contract_address)); + + match res { + Ok(value) => Ok(value), + Err(StateSyncClientError::StateSyncError(StateSyncError::ContractNotFound(_))) => { + Ok(Nonce::default()) + } + Err(e) => Err(StateError::StateReadError(e.to_string())), + } + } + + fn get_compiled_class(&self, class_hash: ClassHash) -> StateResult { + let is_class_declared = self + .runtime + .block_on(self.state_sync_client.is_class_declared_at(self.block_number, class_hash)) + .map_err(|e| StateError::StateReadError(e.to_string()))?; + + if !is_class_declared { + return Err(StateError::UndeclaredClassHash(class_hash)); + } + + let contract_class = self + .runtime + .block_on(self.class_manager_client.get_executable(class_hash)) + .map_err(|e| StateError::StateReadError(e.to_string()))? + .expect( + "Class with hash {class_hash:?} doesn't appear in class manager even though it \ + was declared", + ); + + match contract_class { + ContractClass::V1(casm_contract_class) => { + Ok(RunnableCompiledClass::V1(casm_contract_class.try_into()?)) + } + ContractClass::V0(deprecated_contract_class) => { + Ok(RunnableCompiledClass::V0(deprecated_contract_class.try_into()?)) + } + } + } + + fn get_class_hash_at(&self, contract_address: ContractAddress) -> StateResult { + let res = self.runtime.block_on( + self.state_sync_client.get_class_hash_at(self.block_number, contract_address), + ); + + match res { + Ok(value) => Ok(value), + Err(StateSyncClientError::StateSyncError(StateSyncError::ContractNotFound(_))) => { + Ok(ClassHash::default()) + } + Err(e) => Err(StateError::StateReadError(e.to_string())), + } + } + + fn get_compiled_class_hash(&self, _class_hash: ClassHash) -> StateResult { + todo!() + } +} + +pub struct SyncStateReaderFactory { + pub shared_state_sync_client: SharedStateSyncClient, + pub class_manager_client: SharedClassManagerClient, + pub runtime: tokio::runtime::Handle, +} + +impl StateReaderFactory for SyncStateReaderFactory { + fn get_state_reader_from_latest_block( + &self, + ) -> StateSyncClientResult> { + let latest_block_number = self + .runtime + .block_on(self.shared_state_sync_client.get_latest_block_number())? + .ok_or(StateSyncClientError::StateSyncError(StateSyncError::EmptyState))?; + + Ok(Box::new(SyncStateReader::from_number( + self.shared_state_sync_client.clone(), + self.class_manager_client.clone(), + latest_block_number, + self.runtime.clone(), + ))) + } + + fn get_state_reader(&self, block_number: BlockNumber) -> Box { + Box::new(SyncStateReader::from_number( + self.shared_state_sync_client.clone(), + self.class_manager_client.clone(), + block_number, + self.runtime.clone(), + )) + } +} diff --git a/crates/apollo_gateway/src/sync_state_reader_test.rs b/crates/apollo_gateway/src/sync_state_reader_test.rs new file mode 100644 index 00000000000..cdaf8806399 --- /dev/null +++ b/crates/apollo_gateway/src/sync_state_reader_test.rs @@ -0,0 +1,290 @@ +use std::sync::Arc; + +use apollo_class_manager_types::{ + ClassManagerClientResult, + ExecutableClass, + MockClassManagerClient, +}; +use apollo_state_sync_types::communication::{MockStateSyncClient, StateSyncClientResult}; +use apollo_state_sync_types::state_sync_types::SyncBlock; +use apollo_test_utils::{get_rng, GetTestInstance}; +use blockifier::execution::contract_class::RunnableCompiledClass; +use blockifier::state::errors::StateError; +use blockifier::state::state_api::{StateReader, StateResult}; +use cairo_lang_starknet_classes::casm_contract_class::CasmContractClass; +use lazy_static::lazy_static; +use mockall::predicate; +use rstest::rstest; +use starknet_api::block::{ + BlockHeaderWithoutHash, + BlockInfo, + BlockNumber, + BlockTimestamp, + GasPricePerToken, + GasPriceVector, + GasPrices, + NonzeroGasPrice, +}; +use starknet_api::contract_class::{ContractClass, SierraVersion}; +use starknet_api::core::{ClassHash, SequencerContractAddress}; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::{class_hash, contract_address, felt, nonce, storage_key}; + +use crate::state_reader::MempoolStateReader; +use crate::sync_state_reader::SyncStateReader; +#[tokio::test] +async fn test_get_block_info() { + let mut mock_state_sync_client = MockStateSyncClient::new(); + let mock_class_manager_client = MockClassManagerClient::new(); + let block_number = BlockNumber(1); + let block_timestamp = BlockTimestamp(2); + let sequencer_address = contract_address!("0x3"); + let l1_gas_price = GasPricePerToken { price_in_wei: 4_u8.into(), price_in_fri: 5_u8.into() }; + let l1_data_gas_price = + GasPricePerToken { price_in_wei: 6_u8.into(), price_in_fri: 7_u8.into() }; + let l2_gas_price = GasPricePerToken { price_in_wei: 8_u8.into(), price_in_fri: 9_u8.into() }; + let l1_da_mode = L1DataAvailabilityMode::get_test_instance(&mut get_rng()); + + mock_state_sync_client.expect_get_block().times(1).with(predicate::eq(block_number)).returning( + move |_| { + Ok(Some(SyncBlock { + state_diff: Default::default(), + account_transaction_hashes: Default::default(), + l1_transaction_hashes: Default::default(), + block_header_without_hash: BlockHeaderWithoutHash { + block_number, + l1_gas_price, + l1_data_gas_price, + l2_gas_price, + sequencer: SequencerContractAddress(sequencer_address), + timestamp: block_timestamp, + l1_da_mode, + ..Default::default() + }, + })) + }, + ); + + let state_sync_reader = SyncStateReader::from_number( + Arc::new(mock_state_sync_client), + Arc::new(mock_class_manager_client), + block_number, + tokio::runtime::Handle::current(), + ); + let result = state_sync_reader.get_block_info().unwrap(); + + assert_eq!( + result, + BlockInfo { + block_number, + block_timestamp, + sequencer_address, + gas_prices: GasPrices { + eth_gas_prices: GasPriceVector { + l1_gas_price: NonzeroGasPrice::new_unchecked(l1_gas_price.price_in_wei), + l1_data_gas_price: NonzeroGasPrice::new_unchecked( + l1_data_gas_price.price_in_wei + ), + l2_gas_price: NonzeroGasPrice::new_unchecked(l2_gas_price.price_in_wei), + }, + strk_gas_prices: GasPriceVector { + l1_gas_price: NonzeroGasPrice::new_unchecked(l1_gas_price.price_in_fri), + l1_data_gas_price: NonzeroGasPrice::new_unchecked( + l1_data_gas_price.price_in_fri + ), + l2_gas_price: NonzeroGasPrice::new_unchecked(l2_gas_price.price_in_fri), + }, + }, + use_kzg_da: match l1_da_mode { + L1DataAvailabilityMode::Blob => true, + L1DataAvailabilityMode::Calldata => false, + }, + } + ); +} + +#[tokio::test] +async fn test_get_storage_at() { + let mut mock_state_sync_client = MockStateSyncClient::new(); + let mock_class_manager_client = MockClassManagerClient::new(); + let block_number = BlockNumber(1); + let contract_address = contract_address!("0x2"); + let storage_key = storage_key!("0x3"); + let value = felt!("0x4"); + mock_state_sync_client + .expect_get_storage_at() + .times(1) + .with( + predicate::eq(block_number), + predicate::eq(contract_address), + predicate::eq(storage_key), + ) + .returning(move |_, _, _| Ok(value)); + + let state_sync_reader = SyncStateReader::from_number( + Arc::new(mock_state_sync_client), + Arc::new(mock_class_manager_client), + block_number, + tokio::runtime::Handle::current(), + ); + + let result = tokio::task::spawn_blocking(move || { + state_sync_reader.get_storage_at(contract_address, storage_key) + }) + .await + .unwrap() + .unwrap(); + assert_eq!(result, value); +} + +#[tokio::test] +async fn test_get_nonce_at() { + let mut mock_state_sync_client = MockStateSyncClient::new(); + let mock_class_manager_client = MockClassManagerClient::new(); + let block_number = BlockNumber(1); + let contract_address = contract_address!("0x2"); + let expected_result = nonce!(0x3); + + mock_state_sync_client + .expect_get_nonce_at() + .times(1) + .with(predicate::eq(block_number), predicate::eq(contract_address)) + .returning(move |_, _| Ok(expected_result)); + + let state_sync_reader = SyncStateReader::from_number( + Arc::new(mock_state_sync_client), + Arc::new(mock_class_manager_client), + block_number, + tokio::runtime::Handle::current(), + ); + + let result = + tokio::task::spawn_blocking(move || state_sync_reader.get_nonce_at(contract_address)) + .await + .unwrap() + .unwrap(); + assert_eq!(result, expected_result); +} + +#[tokio::test] +async fn test_get_class_hash_at() { + let mut mock_state_sync_client = MockStateSyncClient::new(); + let mock_class_manager_client = MockClassManagerClient::new(); + let block_number = BlockNumber(1); + let contract_address = contract_address!("0x2"); + let expected_result = class_hash!("0x3"); + + mock_state_sync_client + .expect_get_class_hash_at() + .times(1) + .with(predicate::eq(block_number), predicate::eq(contract_address)) + .returning(move |_, _| Ok(expected_result)); + + let state_sync_reader = SyncStateReader::from_number( + Arc::new(mock_state_sync_client), + Arc::new(mock_class_manager_client), + block_number, + tokio::runtime::Handle::current(), + ); + + let result = + tokio::task::spawn_blocking(move || state_sync_reader.get_class_hash_at(contract_address)) + .await + .unwrap() + .unwrap(); + assert_eq!(result, expected_result); +} + +fn dummy_casm_contract_class() -> CasmContractClass { + CasmContractClass { + compiler_version: "0.0.0".to_string(), + prime: Default::default(), + bytecode: Default::default(), + bytecode_segment_lengths: Default::default(), + hints: Default::default(), + pythonic_hints: Default::default(), + entry_points_by_type: Default::default(), + } +} + +lazy_static! { + static ref DUMMY_CLASS_HASH: ClassHash = class_hash!("0x2"); +} + +fn assert_eq_state_result( + a: &StateResult, + b: &StateResult, +) { + match (a, b) { + (Ok(a), Ok(b)) => assert_eq!(a, b), + (Err(StateError::UndeclaredClassHash(a)), Err(StateError::UndeclaredClassHash(b))) => { + assert_eq!(a, b) + } + _ => panic!("StateResult mismatch (or unsupported comparison): {a:?} vs {b:?}"), + } +} + +#[rstest] +#[case::class_declared( + Ok(Some(ContractClass::V1((dummy_casm_contract_class(), SierraVersion::default())))), + Ok(true), + Ok(RunnableCompiledClass::V1((dummy_casm_contract_class(), SierraVersion::default()).try_into().unwrap())) +)] +#[case::class_not_declared_but_in_class_manager( + Ok(Some(ContractClass::V1((dummy_casm_contract_class(), SierraVersion::default())))), + Ok(false), + Err(StateError::UndeclaredClassHash(*DUMMY_CLASS_HASH)) +)] +#[case::class_not_declared( + Ok(None), + Ok(false), + Err(StateError::UndeclaredClassHash(*DUMMY_CLASS_HASH)) +)] +#[tokio::test] +async fn test_get_compiled_class( + #[case] class_manager_client_result: ClassManagerClientResult>, + #[case] sync_client_result: StateSyncClientResult, + #[case] expected_result: StateResult, +) { + let mut mock_state_sync_client = MockStateSyncClient::new(); + let mut mock_class_manager_client = MockClassManagerClient::new(); + + let class_hash = *DUMMY_CLASS_HASH; + let block_number = BlockNumber(1); + + mock_class_manager_client + .expect_get_executable() + .times(0..=1) + .with(predicate::eq(class_hash)) + .returning(move |_| class_manager_client_result.clone()); + + mock_state_sync_client + .expect_is_class_declared_at() + .times(1) + .with(predicate::eq(block_number), predicate::eq(class_hash)) + .return_once(move |_, _| sync_client_result); + + let state_sync_reader = SyncStateReader::from_number( + Arc::new(mock_state_sync_client), + Arc::new(mock_class_manager_client), + block_number, + tokio::runtime::Handle::current(), + ); + let result = + tokio::task::spawn_blocking(move || state_sync_reader.get_compiled_class(class_hash)) + .await + .unwrap(); + + assert_eq_state_result(&result, &expected_result); +} + +#[tokio::test] +#[should_panic] +async fn test_get_compiled_class_panics_when_class_exists_in_sync_but_not_in_class_manager() { + test_get_compiled_class( + Ok(None), + Ok(true), + Err(StateError::UndeclaredClassHash(*DUMMY_CLASS_HASH)), + ) + .await; +} diff --git a/crates/starknet_gateway/src/test_utils.rs b/crates/apollo_gateway/src/test_utils.rs similarity index 100% rename from crates/starknet_gateway/src/test_utils.rs rename to crates/apollo_gateway/src/test_utils.rs diff --git a/crates/apollo_gateway_types/Cargo.toml b/crates/apollo_gateway_types/Cargo.toml new file mode 100644 index 00000000000..e9adccf2eea --- /dev/null +++ b/crates/apollo_gateway_types/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "apollo_gateway_types" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[features] +testing = ["mockall"] + +[lints] +workspace = true + +[dependencies] +apollo_infra.workspace = true +apollo_network_types.workspace = true +apollo_proc_macros.workspace = true +apollo_rpc.workspace = true +async-trait.workspace = true +enum-assoc.workspace = true +enum-iterator = { workspace = true } +mockall = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +starknet_api.workspace = true +strum_macros.workspace = true +thiserror.workspace = true +tracing.workspace = true + +[dev-dependencies] +mockall.workspace = true diff --git a/crates/apollo_gateway_types/src/communication.rs b/crates/apollo_gateway_types/src/communication.rs new file mode 100644 index 00000000000..5cac143858e --- /dev/null +++ b/crates/apollo_gateway_types/src/communication.rs @@ -0,0 +1,70 @@ +use std::sync::Arc; + +use apollo_infra::component_client::{ClientError, LocalComponentClient, RemoteComponentClient}; +use apollo_infra::component_definitions::{ComponentClient, ComponentRequestAndResponseSender}; +use apollo_infra::impl_debug_for_infra_requests_and_responses; +use apollo_proc_macros::handle_all_response_variants; +use async_trait::async_trait; +#[cfg(any(feature = "testing", test))] +use mockall::automock; +use serde::{Deserialize, Serialize}; +use strum_macros::AsRefStr; +use thiserror::Error; + +use crate::errors::GatewayError; +use crate::gateway_types::{GatewayInput, GatewayOutput, GatewayResult}; + +pub type LocalGatewayClient = LocalComponentClient; +pub type RemoteGatewayClient = RemoteComponentClient; +pub type GatewayClientResult = Result; +pub type GatewayRequestAndResponseSender = + ComponentRequestAndResponseSender; +pub type SharedGatewayClient = Arc; +use tracing::{error, instrument}; + +/// Serves as the gateway's shared interface. Requires `Send + Sync` to allow transferring +/// and sharing resources (inputs, futures) across threads. +#[cfg_attr(any(feature = "testing", test), automock)] +#[async_trait] +pub trait GatewayClient: Send + Sync { + async fn add_tx(&self, gateway_input: GatewayInput) -> GatewayClientResult; +} + +#[derive(Clone, Serialize, Deserialize, AsRefStr)] +pub enum GatewayRequest { + AddTransaction(GatewayInput), +} + +impl_debug_for_infra_requests_and_responses!(GatewayRequest); + +#[derive(Clone, Serialize, Deserialize, AsRefStr)] +pub enum GatewayResponse { + AddTransaction(GatewayResult), +} +impl_debug_for_infra_requests_and_responses!(GatewayResponse); + +#[derive(Clone, Debug, Error)] +pub enum GatewayClientError { + #[error(transparent)] + ClientError(#[from] ClientError), + #[error(transparent)] + GatewayError(#[from] GatewayError), +} + +#[async_trait] +impl GatewayClient for ComponentClientType +where + ComponentClientType: Send + Sync + ComponentClient, +{ + #[instrument(skip(self))] + async fn add_tx(&self, gateway_input: GatewayInput) -> GatewayClientResult { + let request = GatewayRequest::AddTransaction(gateway_input); + handle_all_response_variants!( + GatewayResponse, + AddTransaction, + GatewayClientError, + GatewayError, + Direct + ) + } +} diff --git a/crates/apollo_gateway_types/src/deprecated_gateway_error.rs b/crates/apollo_gateway_types/src/deprecated_gateway_error.rs new file mode 100644 index 00000000000..095c80d8556 --- /dev/null +++ b/crates/apollo_gateway_types/src/deprecated_gateway_error.rs @@ -0,0 +1,106 @@ +#[cfg(any(feature = "testing", test))] +use enum_iterator::Sequence; +use serde::de::Error; +use serde::{Deserialize, Deserializer, Serialize}; + +/// Error codes returned by the starknet gateway. +#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] +#[serde(untagged)] +pub enum StarknetErrorCode { + #[serde(deserialize_with = "deserialize_unknown_error_code")] + UnknownErrorCode(String), + KnownErrorCode(KnownStarknetErrorCode), +} + +// This struct is needed because #[serde(other)] supports only unit variants and because +// #[serde(field_identifier)] doesn't work with serializable types. +// The issue requesting that #[serde(other)] will deserialize the variant with the unknown tag's +// content is: https://github.com/serde-rs/serde/issues/1701 +#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] +#[cfg_attr(any(test, feature = "testing"), derive(Sequence))] +pub enum KnownStarknetErrorCode { + #[serde(rename = "StarknetErrorCode.UNDECLARED_CLASS")] + UndeclaredClass, + #[serde(rename = "StarknetErrorCode.BLOCK_NOT_FOUND")] + BlockNotFound, + #[serde(rename = "StarkErrorCode.MALFORMED_REQUEST")] + MalformedRequest, + #[serde(rename = "StarknetErrorCode.OUT_OF_RANGE_CLASS_HASH")] + OutOfRangeClassHash, + #[serde(rename = "StarknetErrorCode.CLASS_ALREADY_DECLARED")] + ClassAlreadyDeclared, + #[serde(rename = "StarknetErrorCode.COMPILATION_FAILED")] + CompilationFailed, + #[serde(rename = "StarknetErrorCode.CONTRACT_BYTECODE_SIZE_TOO_LARGE")] + ContractBytecodeSizeTooLarge, + #[serde(rename = "StarknetErrorCode.CONTRACT_CLASS_OBJECT_SIZE_TOO_LARGE")] + ContractClassObjectSizeTooLarge, + #[serde(rename = "StarknetErrorCode.DUPLICATED_TRANSACTION")] + DuplicatedTransaction, + #[serde(rename = "StarknetErrorCode.ENTRY_POINT_NOT_FOUND_IN_CONTRACT")] + EntryPointNotFoundInContract, + #[serde(rename = "StarknetErrorCode.INSUFFICIENT_ACCOUNT_BALANCE")] + InsufficientAccountBalance, + #[serde(rename = "StarknetErrorCode.INSUFFICIENT_MAX_FEE")] + InsufficientMaxFee, + #[serde(rename = "StarknetErrorCode.INVALID_COMPILED_CLASS_HASH")] + InvalidCompiledClassHash, + #[serde(rename = "StarknetErrorCode.INVALID_CONTRACT_CLASS_VERSION")] + InvalidContractClassVersion, + #[serde(rename = "StarknetErrorCode.INVALID_TRANSACTION_NONCE")] + InvalidTransactionNonce, + #[serde(rename = "StarknetErrorCode.INVALID_TRANSACTION_VERSION")] + InvalidTransactionVersion, + #[serde(rename = "StarknetErrorCode.VALIDATE_FAILURE")] + ValidateFailure, + #[serde(rename = "StarknetErrorCode.TRANSACTION_LIMIT_EXCEEDED")] + TransactionLimitExceeded, + #[serde(rename = "StarknetErrorCode.UNAUTHORIZED_DECLARE")] + UnauthorizedDeclare, +} + +/// A client error wrapping error codes returned by the starknet gateway. +#[derive(thiserror::Error, Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] +pub struct StarknetError { + pub code: StarknetErrorCode, + pub message: String, +} + +impl StarknetError { + pub fn internal(message: &str) -> Self { + Self { code: Self::internal_error_code(), message: message.to_string() } + } + + pub fn is_internal(&self) -> bool { + self.code == Self::internal_error_code() + } + + fn internal_error_code() -> StarknetErrorCode { + StarknetErrorCode::UnknownErrorCode("StarknetErrorCode.InternalError".to_string()) + } +} + +impl std::fmt::Display for StarknetError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +pub fn deserialize_unknown_error_code<'de, D>(de: D) -> Result +where + D: Deserializer<'de>, +{ + let string: String = Deserialize::deserialize(de)?; + let string_as_json = format!("\"{string}\""); + match serde_json::from_str::(&string_as_json) { + Ok(_) => Err(D::Error::custom( + "Trying to serialize a known Starknet error code into UnknownErrorCode", + )), + Err(json_err) => { + if json_err.is_data() { + return Ok(string); + } + Err(D::Error::custom(json_err)) + } + } +} diff --git a/crates/apollo_gateway_types/src/deprecated_gateway_error_test.rs b/crates/apollo_gateway_types/src/deprecated_gateway_error_test.rs new file mode 100644 index 00000000000..aa290f396fd --- /dev/null +++ b/crates/apollo_gateway_types/src/deprecated_gateway_error_test.rs @@ -0,0 +1,90 @@ +use serde_json::{Map, Value as JsonValue}; + +use crate::deprecated_gateway_error::{KnownStarknetErrorCode, StarknetError, StarknetErrorCode}; + +fn deserialize_starknet_error(code: &str, message: &str) -> StarknetError { + serde_json::from_value::(JsonValue::Object(Map::from_iter([ + ("code".to_string(), JsonValue::String(code.to_string())), + ("message".to_string(), JsonValue::String(message.to_string())), + ]))) + .unwrap() +} + +#[test] +fn known_error_code_deserialization() { + const MESSAGE: &str = "message"; + for (code_str, known_code) in [ + ("StarknetErrorCode.UNDECLARED_CLASS", KnownStarknetErrorCode::UndeclaredClass), + ("StarknetErrorCode.BLOCK_NOT_FOUND", KnownStarknetErrorCode::BlockNotFound), + ("StarkErrorCode.MALFORMED_REQUEST", KnownStarknetErrorCode::MalformedRequest), + ("StarknetErrorCode.OUT_OF_RANGE_CLASS_HASH", KnownStarknetErrorCode::OutOfRangeClassHash), + ("StarknetErrorCode.CLASS_ALREADY_DECLARED", KnownStarknetErrorCode::ClassAlreadyDeclared), + ("StarknetErrorCode.COMPILATION_FAILED", KnownStarknetErrorCode::CompilationFailed), + ( + "StarknetErrorCode.CONTRACT_BYTECODE_SIZE_TOO_LARGE", + KnownStarknetErrorCode::ContractBytecodeSizeTooLarge, + ), + ( + "StarknetErrorCode.CONTRACT_CLASS_OBJECT_SIZE_TOO_LARGE", + KnownStarknetErrorCode::ContractClassObjectSizeTooLarge, + ), + ("StarknetErrorCode.DUPLICATED_TRANSACTION", KnownStarknetErrorCode::DuplicatedTransaction), + ( + "StarknetErrorCode.ENTRY_POINT_NOT_FOUND_IN_CONTRACT", + KnownStarknetErrorCode::EntryPointNotFoundInContract, + ), + ( + "StarknetErrorCode.INSUFFICIENT_ACCOUNT_BALANCE", + KnownStarknetErrorCode::InsufficientAccountBalance, + ), + ("StarknetErrorCode.INSUFFICIENT_MAX_FEE", KnownStarknetErrorCode::InsufficientMaxFee), + ( + "StarknetErrorCode.INVALID_COMPILED_CLASS_HASH", + KnownStarknetErrorCode::InvalidCompiledClassHash, + ), + ( + "StarknetErrorCode.INVALID_CONTRACT_CLASS_VERSION", + KnownStarknetErrorCode::InvalidContractClassVersion, + ), + ( + "StarknetErrorCode.INVALID_TRANSACTION_NONCE", + KnownStarknetErrorCode::InvalidTransactionNonce, + ), + ( + "StarknetErrorCode.INVALID_TRANSACTION_VERSION", + KnownStarknetErrorCode::InvalidTransactionVersion, + ), + ("StarknetErrorCode.VALIDATE_FAILURE", KnownStarknetErrorCode::ValidateFailure), + ( + "StarknetErrorCode.TRANSACTION_LIMIT_EXCEEDED", + KnownStarknetErrorCode::TransactionLimitExceeded, + ), + ] { + let starknet_error = deserialize_starknet_error(code_str, MESSAGE); + let expected_starknet_error = StarknetError { + code: StarknetErrorCode::KnownErrorCode(known_code), + message: MESSAGE.to_string(), + }; + assert_eq!(expected_starknet_error, starknet_error); + } +} + +#[test] +fn unknown_error_code_deserialization() { + const MESSAGE: &str = "message"; + const CODE_STR: &str = "StarknetErrorCode.MADE_UP_CODE_FOR_TEST"; + let starknet_error = deserialize_starknet_error(CODE_STR, MESSAGE); + let expected_starknet_error = StarknetError { + code: StarknetErrorCode::UnknownErrorCode(CODE_STR.to_string()), + message: MESSAGE.to_string(), + }; + assert_eq!(expected_starknet_error, starknet_error); +} + +// This test is needed because bugs can happen in the custom deserialization of UnknownErrorCode +#[test] +fn starknet_error_code_invalid_json_format_fails() { + assert!( + serde_json::from_str::("A string not surrounded with quotes").is_err() + ); +} diff --git a/crates/apollo_gateway_types/src/errors.rs b/crates/apollo_gateway_types/src/errors.rs new file mode 100644 index 00000000000..54502ebd06f --- /dev/null +++ b/crates/apollo_gateway_types/src/errors.rs @@ -0,0 +1,83 @@ +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use apollo_rpc::error::{ + unexpected_error, + validation_failure, + JsonRpcError, + CLASS_ALREADY_DECLARED, + CLASS_HASH_NOT_FOUND, + COMPILATION_FAILED, + COMPILED_CLASS_HASH_MISMATCH, + CONTRACT_CLASS_SIZE_IS_TOO_LARGE, + DUPLICATE_TX, + INSUFFICIENT_ACCOUNT_BALANCE, + INSUFFICIENT_MAX_FEE, + INVALID_TRANSACTION_NONCE, + NON_ACCOUNT, + UNSUPPORTED_CONTRACT_CLASS_VERSION, + UNSUPPORTED_TX_VERSION, +}; +use enum_assoc::Assoc; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use crate::deprecated_gateway_error::StarknetError; + +/// Error returned by the gateway, adhering to the Starknet RPC error format. +// To get JsonRpcError from GatewaySpecError, use `into_rpc` method. +// TODO(yair): apollo_rpc has a test that the add_tx functions return the correct error. Make sure +// it is tested when we have a single gateway. +#[derive(Debug, Clone, Eq, PartialEq, Assoc, Error, Serialize, Deserialize)] +#[func(pub fn into_rpc(self) -> JsonRpcError)] +pub enum GatewaySpecError { + #[assoc(into_rpc = CLASS_ALREADY_DECLARED)] + ClassAlreadyDeclared, + #[assoc(into_rpc = CLASS_HASH_NOT_FOUND)] + ClassHashNotFound, + #[assoc(into_rpc = COMPILED_CLASS_HASH_MISMATCH)] + CompiledClassHashMismatch, + #[assoc(into_rpc = COMPILATION_FAILED)] + CompilationFailed, + #[assoc(into_rpc = CONTRACT_CLASS_SIZE_IS_TOO_LARGE)] + ContractClassSizeIsTooLarge, + #[assoc(into_rpc = DUPLICATE_TX)] + DuplicateTx, + #[assoc(into_rpc = INSUFFICIENT_ACCOUNT_BALANCE)] + InsufficientAccountBalance, + #[assoc(into_rpc = INSUFFICIENT_MAX_FEE)] + InsufficientMaxFee, + #[assoc(into_rpc = INVALID_TRANSACTION_NONCE)] + InvalidTransactionNonce, + #[assoc(into_rpc = NON_ACCOUNT)] + NonAccount, + #[assoc(into_rpc = unexpected_error(_data))] + UnexpectedError { data: String }, + #[assoc(into_rpc = UNSUPPORTED_CONTRACT_CLASS_VERSION)] + UnsupportedContractClassVersion, + #[assoc(into_rpc = UNSUPPORTED_TX_VERSION)] + UnsupportedTxVersion, + #[assoc(into_rpc = validation_failure(_data))] + ValidationFailure { data: String }, +} + +impl std::fmt::Display for GatewaySpecError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let as_rpc = self.clone().into_rpc(); + write!( + f, + "{}: {}. data: {}", + as_rpc.code, + as_rpc.message, + serde_json::to_string(&as_rpc.data).unwrap() + ) + } +} + +#[derive(Clone, Debug, Error, PartialEq, Eq, Serialize, Deserialize)] +pub enum GatewayError { + // Corresponds to the deprecated gateway errors. + #[error("{source:?}")] + DeprecatedGatewayError { + source: StarknetError, + p2p_message_metadata: Option, + }, +} diff --git a/crates/apollo_gateway_types/src/gateway_types.rs b/crates/apollo_gateway_types/src/gateway_types.rs new file mode 100644 index 00000000000..a4bb4266f89 --- /dev/null +++ b/crates/apollo_gateway_types/src/gateway_types.rs @@ -0,0 +1,76 @@ +use apollo_network_types::network_types::BroadcastedMessageMetadata; +use serde::{Deserialize, Serialize}; +use starknet_api::core::{ClassHash, ContractAddress}; +use starknet_api::rpc_transaction::RpcTransaction; +use starknet_api::transaction::TransactionHash; + +use crate::errors::GatewayError; + +const TRANSACTION_RECEIVED: &str = "TRANSACTION_RECEIVED"; +pub const SUPPORTED_TRANSACTION_VERSIONS: [u64; 1] = [3]; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct GatewayInput { + pub rpc_tx: RpcTransaction, + pub message_metadata: Option, +} + +// TODO(Arni): Create an object that supports the RPC spec. Something like: `DeprecatedResponse` and +// `RpcSpecResponse`. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GatewayOutput { + Declare(DeclareGatewayOutput), + DeployAccount(DeployAccountGatewayOutput), + Invoke(InvokeGatewayOutput), +} + +impl GatewayOutput { + pub fn transaction_hash(&self) -> TransactionHash { + match self { + GatewayOutput::Declare(output) => output.transaction_hash, + GatewayOutput::DeployAccount(output) => output.transaction_hash, + GatewayOutput::Invoke(output) => output.transaction_hash, + } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct DeclareGatewayOutput { + pub transaction_hash: TransactionHash, + pub class_hash: ClassHash, + code: String, +} + +impl DeclareGatewayOutput { + pub fn new(transaction_hash: TransactionHash, class_hash: ClassHash) -> Self { + Self { transaction_hash, class_hash, code: TRANSACTION_RECEIVED.to_string() } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct DeployAccountGatewayOutput { + pub transaction_hash: TransactionHash, + pub address: ContractAddress, + code: String, +} + +impl DeployAccountGatewayOutput { + pub fn new(transaction_hash: TransactionHash, address: ContractAddress) -> Self { + Self { transaction_hash, address, code: TRANSACTION_RECEIVED.to_string() } + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct InvokeGatewayOutput { + pub transaction_hash: TransactionHash, + code: String, +} + +impl InvokeGatewayOutput { + pub fn new(transaction_hash: TransactionHash) -> Self { + Self { transaction_hash, code: TRANSACTION_RECEIVED.to_string() } + } +} + +pub type GatewayResult = Result; diff --git a/crates/apollo_gateway_types/src/lib.rs b/crates/apollo_gateway_types/src/lib.rs new file mode 100644 index 00000000000..ab94e3f2efe --- /dev/null +++ b/crates/apollo_gateway_types/src/lib.rs @@ -0,0 +1,6 @@ +pub mod communication; +pub mod deprecated_gateway_error; +#[cfg(test)] +mod deprecated_gateway_error_test; +pub mod errors; +pub mod gateway_types; diff --git a/crates/apollo_http_server/Cargo.toml b/crates/apollo_http_server/Cargo.toml new file mode 100644 index 00000000000..ac054f44581 --- /dev/null +++ b/crates/apollo_http_server/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "apollo_http_server" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[features] +testing = [ + "apollo_gateway_types/testing", + "blockifier_test_utils", + "mempool_test_utils", + "reqwest", + "starknet_api/testing", +] + +[lints] +workspace = true + +[dependencies] +apollo_config.workspace = true +apollo_gateway_types.workspace = true +apollo_infra.workspace = true +apollo_infra_utils.workspace = true +apollo_metrics.workspace = true +apollo_proc_macros.workspace = true +axum.workspace = true +blockifier_reexecution.workspace = true +blockifier_test_utils = { workspace = true, optional = true } +futures.workspace = true +hyper.workspace = true +mempool_test_utils = { workspace = true, optional = true } +regex.workspace = true +reqwest = { workspace = true, optional = true } +serde.workspace = true +serde_json.workspace = true +starknet_api.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["rt"] } +tracing.workspace = true +validator.workspace = true + +[dev-dependencies] +apollo_gateway_types = { workspace = true, features = ["testing"] } +apollo_metrics = { workspace = true, features = ["testing"] } +assert_matches.workspace = true +base64.workspace = true +blockifier = { workspace = true, features = ["testing"] } +blockifier_test_utils.workspace = true +flate2.workspace = true +mempool_test_utils.workspace = true +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +reqwest.workspace = true +rstest.workspace = true +serde_json.workspace = true +starknet-types-core.workspace = true +tracing-test.workspace = true diff --git a/crates/apollo_http_server/resources/deprecated_gateway/declare_tx.json b/crates/apollo_http_server/resources/deprecated_gateway/declare_tx.json new file mode 100644 index 00000000000..fad04d21592 --- /dev/null +++ b/crates/apollo_http_server/resources/deprecated_gateway/declare_tx.json @@ -0,0 +1,42 @@ +{ + "version": "0x3", + "signature": ["0x7"], + "nonce": "0x6", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + }, + "L1_DATA_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + }, + "L2_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + } + }, + "tip": "0x0", + "paymaster_data": [], + "contract_class": { + "contract_class_version": "0.1.0", + "sierra_program": "H4sIAAFx4mcC/4tWMqgwUtJRAFLGSrEAduCMSg4AAAA=", + "entry_points_by_type": { + "CONSTRUCTOR": [ + { + "selector": "0x4", + "function_idx": 5 + } + ], + "L1_HANDLER": [], + "EXTERNAL": [] + }, + "abi": "abi" + }, + "compiled_class_hash": "0x1e61", + "sender_address": "0x3e7", + "account_deployment_data": [], + "type": "DECLARE" +} diff --git a/crates/apollo_http_server/resources/deprecated_gateway/deploy_account_tx.json b/crates/apollo_http_server/resources/deprecated_gateway/deploy_account_tx.json new file mode 100644 index 00000000000..455cb4980bb --- /dev/null +++ b/crates/apollo_http_server/resources/deprecated_gateway/deploy_account_tx.json @@ -0,0 +1,27 @@ +{ + "version": "0x3", + "signature": ["0x6"], + "nonce": "0x0", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + }, + "L1_DATA_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + }, + "L2_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + } + }, + "tip": "0x0", + "paymaster_data": [], + "class_hash": "0xaa17", + "contract_address_salt": "0x14d", + "constructor_calldata": ["0x7", "0x8"], + "type": "DEPLOY_ACCOUNT" +} diff --git a/crates/apollo_http_server/resources/deprecated_gateway/invoke_tx.json b/crates/apollo_http_server/resources/deprecated_gateway/invoke_tx.json new file mode 100644 index 00000000000..6b3bfdb75bf --- /dev/null +++ b/crates/apollo_http_server/resources/deprecated_gateway/invoke_tx.json @@ -0,0 +1,30 @@ +{ + "version": "0x3", + "signature": [], + "nonce": "0x1", + "nonce_data_availability_mode": 0, + "fee_data_availability_mode": 0, + "resource_bounds": { + "L1_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + }, + "L1_DATA_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + }, + "L2_GAS": { + "max_amount": "0x10000000000", + "max_price_per_unit": "0x10000000000" + } + }, + "tip": "0x0", + "paymaster_data": [], + "sender_address": "0x6", + "calldata": [ + "0x5", + "0x6" + ], + "account_deployment_data": [], + "type": "INVOKE_FUNCTION" + } diff --git a/crates/apollo_http_server/resources/expected_gateway_response/declare_gateway_output.json b/crates/apollo_http_server/resources/expected_gateway_response/declare_gateway_output.json new file mode 100644 index 00000000000..e73d62d545d --- /dev/null +++ b/crates/apollo_http_server/resources/expected_gateway_response/declare_gateway_output.json @@ -0,0 +1,5 @@ +{ + "transaction_hash": "0x1", + "class_hash": "0x2", + "code": "TRANSACTION_RECEIVED" +} \ No newline at end of file diff --git a/crates/apollo_http_server/resources/expected_gateway_response/deploy_account_gateway_output.json b/crates/apollo_http_server/resources/expected_gateway_response/deploy_account_gateway_output.json new file mode 100644 index 00000000000..e696a64c572 --- /dev/null +++ b/crates/apollo_http_server/resources/expected_gateway_response/deploy_account_gateway_output.json @@ -0,0 +1,5 @@ +{ + "transaction_hash": "0x1", + "address": "0x3", + "code": "TRANSACTION_RECEIVED" +} \ No newline at end of file diff --git a/crates/apollo_http_server/resources/expected_gateway_response/invoke_gateway_output.json b/crates/apollo_http_server/resources/expected_gateway_response/invoke_gateway_output.json new file mode 100644 index 00000000000..b4148d24eca --- /dev/null +++ b/crates/apollo_http_server/resources/expected_gateway_response/invoke_gateway_output.json @@ -0,0 +1,4 @@ +{ + "transaction_hash": "0x1", + "code": "TRANSACTION_RECEIVED" +} \ No newline at end of file diff --git a/crates/apollo_http_server/src/communication.rs b/crates/apollo_http_server/src/communication.rs new file mode 100644 index 00000000000..6d307ac0d5b --- /dev/null +++ b/crates/apollo_http_server/src/communication.rs @@ -0,0 +1,5 @@ +use apollo_infra::component_server::WrapperServer; + +use crate::http_server::HttpServer as HttpServerComponent; + +pub type HttpServer = WrapperServer; diff --git a/crates/apollo_http_server/src/config.rs b/crates/apollo_http_server/src/config.rs new file mode 100644 index 00000000000..c21f13286f0 --- /dev/null +++ b/crates/apollo_http_server/src/config.rs @@ -0,0 +1,29 @@ +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr}; + +use apollo_config::dumping::{ser_param, SerializeConfig}; +use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; +use serde::{Deserialize, Serialize}; +use validator::Validate; + +/// The http server connection related configuration. +#[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] +pub struct HttpServerConfig { + pub ip: IpAddr, + pub port: u16, +} + +impl SerializeConfig for HttpServerConfig { + fn dump(&self) -> BTreeMap { + BTreeMap::from_iter([ + ser_param("ip", &self.ip.to_string(), "The http server ip.", ParamPrivacyInput::Public), + ser_param("port", &self.port, "The http server port.", ParamPrivacyInput::Public), + ]) + } +} + +impl Default for HttpServerConfig { + fn default() -> Self { + Self { ip: IpAddr::from(Ipv4Addr::UNSPECIFIED), port: 8080 } + } +} diff --git a/crates/apollo_http_server/src/deprecated_gateway_transaction.rs b/crates/apollo_http_server/src/deprecated_gateway_transaction.rs new file mode 100644 index 00000000000..5a57d7a5911 --- /dev/null +++ b/crates/apollo_http_server/src/deprecated_gateway_transaction.rs @@ -0,0 +1,340 @@ +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "testing", test))] +use starknet_api::compression_utils::compress_and_encode; +use starknet_api::compression_utils::{decode_and_decompress, CompressionError}; +use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; +use starknet_api::data_availability::DataAvailabilityMode; +use starknet_api::rpc_transaction::{ + EntryPointByType, + RpcDeclareTransaction, + RpcDeclareTransactionV3, + RpcDeployAccountTransaction, + RpcDeployAccountTransactionV3, + RpcInvokeTransaction, + RpcInvokeTransactionV3, + RpcTransaction, +}; +use starknet_api::state::SierraContractClass; +use starknet_api::transaction::fields::{ + AccountDeploymentData, + AllResourceBounds, + Calldata, + ContractAddressSalt, + PaymasterData, + ResourceBounds, + Tip, + TransactionSignature, +}; + +// TODO(Yael): remove the deprecated_gateway_transaction once we decide to support only transactions +// in the Rpc spec format. + +#[cfg(test)] +#[path = "deprecated_gateway_transaction_test.rs"] +mod deprecated_gateway_transaction_test; + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Hash)] +#[serde(tag = "type")] +#[serde(deny_unknown_fields)] +pub enum DeprecatedGatewayTransactionV3 { + #[serde(rename = "DECLARE")] + Declare(DeprecatedGatewayDeclareTransaction), + #[serde(rename = "DEPLOY_ACCOUNT")] + DeployAccount(DeprecatedGatewayDeployAccountTransaction), + #[serde(rename = "INVOKE_FUNCTION")] + Invoke(DeprecatedGatewayInvokeTransaction), +} + +impl TryFrom for RpcTransaction { + type Error = CompressionError; + + fn try_from(deprecated_tx: DeprecatedGatewayTransactionV3) -> Result { + Ok(match deprecated_tx { + DeprecatedGatewayTransactionV3::Declare(DeprecatedGatewayDeclareTransaction::V3( + deprecated_declare_tx, + )) => RpcTransaction::Declare(RpcDeclareTransaction::V3( + deprecated_declare_tx.try_into()?, + )), + DeprecatedGatewayTransactionV3::DeployAccount( + DeprecatedGatewayDeployAccountTransaction::V3(deprecated_deploy_account_tx), + ) => RpcTransaction::DeployAccount(RpcDeployAccountTransaction::V3( + deprecated_deploy_account_tx.into(), + )), + DeprecatedGatewayTransactionV3::Invoke(DeprecatedGatewayInvokeTransaction::V3( + deprecated_invoke_tx, + )) => RpcTransaction::Invoke(RpcInvokeTransaction::V3(deprecated_invoke_tx.into())), + }) + } +} + +#[cfg(any(feature = "testing", test))] +impl From for DeprecatedGatewayTransactionV3 { + fn from(value: RpcTransaction) -> Self { + match value { + RpcTransaction::Declare(RpcDeclareTransaction::V3(declare_tx)) => { + DeprecatedGatewayTransactionV3::Declare(DeprecatedGatewayDeclareTransaction::V3( + declare_tx.into(), + )) + } + RpcTransaction::DeployAccount(RpcDeployAccountTransaction::V3(deploy_account_tx)) => { + DeprecatedGatewayTransactionV3::DeployAccount( + DeprecatedGatewayDeployAccountTransaction::V3(deploy_account_tx.into()), + ) + } + RpcTransaction::Invoke(RpcInvokeTransaction::V3(invoke_tx)) => { + DeprecatedGatewayTransactionV3::Invoke(DeprecatedGatewayInvokeTransaction::V3( + invoke_tx.into(), + )) + } + } + } +} + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Hash)] +#[serde(tag = "version")] +pub enum DeprecatedGatewayInvokeTransaction { + #[serde(rename = "0x3")] + V3(DeprecatedGatewayInvokeTransactionV3), +} + +#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct DeprecatedGatewayInvokeTransactionV3 { + pub sender_address: ContractAddress, + pub calldata: Calldata, + pub signature: TransactionSignature, + pub nonce: Nonce, + pub resource_bounds: DeprecatedGatewayAllResourceBounds, + pub tip: Tip, + pub paymaster_data: PaymasterData, + pub account_deployment_data: AccountDeploymentData, + pub nonce_data_availability_mode: DataAvailabilityMode, + pub fee_data_availability_mode: DataAvailabilityMode, +} + +impl From for RpcInvokeTransactionV3 { + fn from(deprecated_invoke_tx: DeprecatedGatewayInvokeTransactionV3) -> Self { + RpcInvokeTransactionV3 { + sender_address: deprecated_invoke_tx.sender_address, + calldata: deprecated_invoke_tx.calldata, + signature: deprecated_invoke_tx.signature, + nonce: deprecated_invoke_tx.nonce, + resource_bounds: deprecated_invoke_tx.resource_bounds.into(), + tip: deprecated_invoke_tx.tip, + paymaster_data: deprecated_invoke_tx.paymaster_data, + account_deployment_data: deprecated_invoke_tx.account_deployment_data, + nonce_data_availability_mode: deprecated_invoke_tx.nonce_data_availability_mode, + fee_data_availability_mode: deprecated_invoke_tx.fee_data_availability_mode, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl From for DeprecatedGatewayInvokeTransactionV3 { + fn from(value: RpcInvokeTransactionV3) -> Self { + Self { + calldata: value.calldata, + tip: value.tip, + resource_bounds: value.resource_bounds.into(), + paymaster_data: value.paymaster_data, + sender_address: value.sender_address, + signature: value.signature, + nonce: value.nonce, + account_deployment_data: value.account_deployment_data, + nonce_data_availability_mode: value.nonce_data_availability_mode, + fee_data_availability_mode: value.fee_data_availability_mode, + } + } +} + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Hash)] +#[serde(tag = "version")] +pub enum DeprecatedGatewayDeployAccountTransaction { + #[serde(rename = "0x3")] + V3(DeprecatedGatewayDeployAccountTransactionV3), +} + +#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct DeprecatedGatewayDeployAccountTransactionV3 { + pub signature: TransactionSignature, + pub nonce: Nonce, + pub class_hash: ClassHash, + pub contract_address_salt: ContractAddressSalt, + pub constructor_calldata: Calldata, + pub resource_bounds: DeprecatedGatewayAllResourceBounds, + pub tip: Tip, + pub paymaster_data: PaymasterData, + pub nonce_data_availability_mode: DataAvailabilityMode, + pub fee_data_availability_mode: DataAvailabilityMode, +} + +impl From for RpcDeployAccountTransactionV3 { + fn from(deprecated_deploy_account_tx: DeprecatedGatewayDeployAccountTransactionV3) -> Self { + RpcDeployAccountTransactionV3 { + signature: deprecated_deploy_account_tx.signature, + nonce: deprecated_deploy_account_tx.nonce, + class_hash: deprecated_deploy_account_tx.class_hash, + contract_address_salt: deprecated_deploy_account_tx.contract_address_salt, + constructor_calldata: deprecated_deploy_account_tx.constructor_calldata, + resource_bounds: deprecated_deploy_account_tx.resource_bounds.into(), + tip: deprecated_deploy_account_tx.tip, + paymaster_data: deprecated_deploy_account_tx.paymaster_data, + nonce_data_availability_mode: deprecated_deploy_account_tx.nonce_data_availability_mode, + fee_data_availability_mode: deprecated_deploy_account_tx.fee_data_availability_mode, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl From for DeprecatedGatewayDeployAccountTransactionV3 { + fn from(value: RpcDeployAccountTransactionV3) -> Self { + Self { + signature: value.signature, + nonce: value.nonce, + class_hash: value.class_hash, + contract_address_salt: value.contract_address_salt, + constructor_calldata: value.constructor_calldata, + resource_bounds: value.resource_bounds.into(), + tip: value.tip, + paymaster_data: value.paymaster_data, + nonce_data_availability_mode: value.nonce_data_availability_mode, + fee_data_availability_mode: value.fee_data_availability_mode, + } + } +} + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Hash)] +#[serde(tag = "version")] +pub enum DeprecatedGatewayDeclareTransaction { + #[serde(rename = "0x3")] + V3(DeprecatedGatewayDeclareTransactionV3), +} + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Hash)] +pub struct DeprecatedGatewayDeclareTransactionV3 { + pub sender_address: ContractAddress, + pub compiled_class_hash: CompiledClassHash, + pub signature: TransactionSignature, + pub nonce: Nonce, + pub contract_class: DeprecatedGatewaySierraContractClass, + pub resource_bounds: DeprecatedGatewayAllResourceBounds, + pub tip: Tip, + pub paymaster_data: PaymasterData, + pub account_deployment_data: AccountDeploymentData, + pub nonce_data_availability_mode: DataAvailabilityMode, + pub fee_data_availability_mode: DataAvailabilityMode, +} + +impl TryFrom for RpcDeclareTransactionV3 { + type Error = CompressionError; + + fn try_from( + deprecated_declare_tx: DeprecatedGatewayDeclareTransactionV3, + ) -> Result { + Ok(RpcDeclareTransactionV3 { + sender_address: deprecated_declare_tx.sender_address, + compiled_class_hash: deprecated_declare_tx.compiled_class_hash, + signature: deprecated_declare_tx.signature, + nonce: deprecated_declare_tx.nonce, + contract_class: deprecated_declare_tx.contract_class.try_into()?, + resource_bounds: deprecated_declare_tx.resource_bounds.into(), + tip: deprecated_declare_tx.tip, + paymaster_data: deprecated_declare_tx.paymaster_data, + account_deployment_data: deprecated_declare_tx.account_deployment_data, + nonce_data_availability_mode: deprecated_declare_tx.nonce_data_availability_mode, + fee_data_availability_mode: deprecated_declare_tx.fee_data_availability_mode, + }) + } +} + +#[cfg(any(feature = "testing", test))] +impl From for DeprecatedGatewayDeclareTransactionV3 { + fn from(value: RpcDeclareTransactionV3) -> Self { + Self { + sender_address: value.sender_address, + compiled_class_hash: value.compiled_class_hash, + signature: value.signature, + nonce: value.nonce, + contract_class: value.contract_class.try_into().expect( + "Failed to convert SierraContractClass to DeprecatedGatewaySierraContractClass", + ), + resource_bounds: value.resource_bounds.into(), + tip: value.tip, + paymaster_data: value.paymaster_data, + account_deployment_data: value.account_deployment_data, + nonce_data_availability_mode: value.nonce_data_availability_mode, + fee_data_availability_mode: value.fee_data_availability_mode, + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize, Hash)] +pub struct DeprecatedGatewaySierraContractClass { + // The sierra program is compressed and encoded in base64. + pub sierra_program: String, + pub contract_class_version: String, + pub entry_points_by_type: EntryPointByType, + pub abi: String, +} + +impl TryFrom for SierraContractClass { + type Error = CompressionError; + + fn try_from( + rest_sierra_contract_class: DeprecatedGatewaySierraContractClass, + ) -> Result { + let sierra_program = decode_and_decompress(&rest_sierra_contract_class.sierra_program)?; + Ok(SierraContractClass { + sierra_program, + contract_class_version: rest_sierra_contract_class.contract_class_version, + entry_points_by_type: rest_sierra_contract_class.entry_points_by_type, + abi: rest_sierra_contract_class.abi, + }) + } +} + +#[cfg(any(feature = "testing", test))] +impl TryFrom for DeprecatedGatewaySierraContractClass { + type Error = CompressionError; + + fn try_from(sierra_contract_class: SierraContractClass) -> Result { + let sierra_program = + compress_and_encode(serde_json::to_value(sierra_contract_class.sierra_program)?)?; + Ok(DeprecatedGatewaySierraContractClass { + sierra_program, + contract_class_version: sierra_contract_class.contract_class_version, + entry_points_by_type: sierra_contract_class.entry_points_by_type, + abi: sierra_contract_class.abi, + }) + } +} + +#[derive( + Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, +)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub struct DeprecatedGatewayAllResourceBounds { + pub l1_gas: ResourceBounds, + pub l2_gas: ResourceBounds, + pub l1_data_gas: ResourceBounds, +} + +impl From for AllResourceBounds { + fn from(deprecated_all_resource_bounds: DeprecatedGatewayAllResourceBounds) -> Self { + AllResourceBounds { + l1_gas: deprecated_all_resource_bounds.l1_gas, + l2_gas: deprecated_all_resource_bounds.l2_gas, + l1_data_gas: deprecated_all_resource_bounds.l1_data_gas, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl From for DeprecatedGatewayAllResourceBounds { + fn from(all_resource_bounds: AllResourceBounds) -> Self { + DeprecatedGatewayAllResourceBounds { + l1_gas: all_resource_bounds.l1_gas, + l2_gas: all_resource_bounds.l2_gas, + l1_data_gas: all_resource_bounds.l1_data_gas, + } + } +} diff --git a/crates/apollo_http_server/src/deprecated_gateway_transaction_test.rs b/crates/apollo_http_server/src/deprecated_gateway_transaction_test.rs new file mode 100644 index 00000000000..b0d86d252cb --- /dev/null +++ b/crates/apollo_http_server/src/deprecated_gateway_transaction_test.rs @@ -0,0 +1,90 @@ +use std::io::Write; + +use assert_matches::assert_matches; +use rstest::rstest; +use starknet_api::compression_utils::CompressionError; +use starknet_api::rpc_transaction::RpcDeclareTransactionV3; +use starknet_api::test_utils::read_json_file; + +use crate::deprecated_gateway_transaction::{ + DeprecatedGatewayDeclareTransaction, + DeprecatedGatewayDeployAccountTransaction, + DeprecatedGatewayInvokeTransaction, +}; + +// Utils. + +const DEPRECATED_GATEWAY_INVOKE_TX_JSON_PATH: &str = "deprecated_gateway/invoke_tx.json"; +const DEPRECATED_GATEWAY_DEPLOY_ACCOUNT_TX_JSON_PATH: &str = + "deprecated_gateway/deploy_account_tx.json"; +const DEPRECATED_GATEWAY_DECLARE_TX_JSON_PATH: &str = "deprecated_gateway/declare_tx.json"; + +fn deprecated_gateway_declare_tx() -> DeprecatedGatewayDeclareTransaction { + serde_json::from_value(read_json_file(DEPRECATED_GATEWAY_DECLARE_TX_JSON_PATH)) + .expect("Failed to deserialize json to RestDeclareTransactionV3") +} + +// Tests. + +#[test] +fn deprecated_gateway_invoke_tx_deserialization() { + let _: DeprecatedGatewayInvokeTransaction = + serde_json::from_value(read_json_file(DEPRECATED_GATEWAY_INVOKE_TX_JSON_PATH)) + .expect("Failed to deserialize json to RestInvokeTransactionV3"); +} + +#[test] +fn deprecated_gateway_deploy_account_tx_deserialization() { + let _: DeprecatedGatewayDeployAccountTransaction = + serde_json::from_value(read_json_file(DEPRECATED_GATEWAY_DEPLOY_ACCOUNT_TX_JSON_PATH)) + .expect("Failed to deserialize json to RestDeployAccountTransactionV3"); +} + +#[test] +fn deprecated_gateway_declare_tx_conversion() { + let deprecate_tx = deprecated_gateway_declare_tx(); + let deprecate_declare_tx = assert_matches!( + deprecate_tx, + DeprecatedGatewayDeclareTransaction::V3(deprecated_declare_tx) => + deprecated_declare_tx + ); + // TODO(Arni): Assert the deprecated transaction was converted to the expected RPC transaction. + let _declare_tx: RpcDeclareTransactionV3 = deprecate_declare_tx.try_into().unwrap(); +} + +fn create_malformed_sierra_program_for_serde_error() -> String { + let invalid_json = b"arbitrary"; + let mut encoder = flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default()); + encoder.write_all(invalid_json).unwrap(); + let compressed = encoder.finish().unwrap(); + base64::encode(compressed) +} + +#[rstest] +#[case::io_error( + base64::encode("arbitrary"), + |error| assert_matches!(error, CompressionError::Io(..)) +)] +#[case::serde_error( + create_malformed_sierra_program_for_serde_error(), + |error| assert_matches!(error, CompressionError::Serde(..)) +)] +#[case::decode_error( + "arbitrary".to_string(), + |error| assert_matches!(error, CompressionError::Decode(base64::DecodeError::InvalidLength)) +)] +fn deprecated_gateway_declare_tx_negative_flow_conversion( + #[case] sierra_program: String, + #[case] assert_expected_error_fn: impl Fn(CompressionError), +) { + let deprecate_tx = deprecated_gateway_declare_tx(); + let mut deprecate_declare_tx = assert_matches!( + deprecate_tx, + DeprecatedGatewayDeclareTransaction::V3(deprecated_declare_tx) => + deprecated_declare_tx + ); + + deprecate_declare_tx.contract_class.sierra_program = sierra_program; + let error = RpcDeclareTransactionV3::try_from(deprecate_declare_tx).unwrap_err(); + assert_expected_error_fn(error); +} diff --git a/crates/apollo_http_server/src/errors.rs b/crates/apollo_http_server/src/errors.rs new file mode 100644 index 00000000000..d4f869a0919 --- /dev/null +++ b/crates/apollo_http_server/src/errors.rs @@ -0,0 +1,107 @@ +use apollo_gateway_types::communication::GatewayClientError; +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_gateway_types::errors::GatewayError; +use axum::response::{IntoResponse, Response}; +use hyper::StatusCode; +use regex::Regex; +use starknet_api::compression_utils::CompressionError; +use thiserror::Error; +use tracing::{debug, error}; + +/// Errors originating from `[`HttpServer::run`]` command. +#[derive(Debug, Error)] +pub enum HttpServerRunError { + #[error(transparent)] + ServerStartupError(#[from] hyper::Error), +} + +/// Errors that may occur during the runtime of the HTTP server. +#[derive(Error, Debug)] +pub enum HttpServerError { + #[error(transparent)] + GatewayClientError(#[from] GatewayClientError), + #[error(transparent)] + DeserializationError(#[from] serde_json::Error), + #[error(transparent)] + DecompressionError(#[from] CompressionError), +} + +impl IntoResponse for HttpServerError { + fn into_response(self) -> Response { + match self { + HttpServerError::GatewayClientError(e) => gw_client_err_into_response(e), + HttpServerError::DeserializationError(e) => serde_error_into_response(e), + HttpServerError::DecompressionError(e) => compression_error_into_response(e), + } + } +} + +fn compression_error_into_response(err: CompressionError) -> Response { + debug!("Failed to decompress the transaction: {}", err); + let (response_code, deprecated_gateway_error) = ( + StatusCode::BAD_REQUEST, + StarknetError { + code: StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.INVALID_PROGRAM".to_string(), + ), + message: "Invalid compressed program.".to_string(), + }, + ); + let response_body = serialize_error(&deprecated_gateway_error); + (response_code, response_body).into_response() +} + +fn serde_error_into_response(err: serde_json::Error) -> Response { + debug!("Failed to deserialize transaction: {}", err); + let (response_code, deprecated_gateway_error) = ( + StatusCode::BAD_REQUEST, + StarknetError { + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::MalformedRequest), + message: err.to_string(), + }, + ); + let response_body = serialize_error(&deprecated_gateway_error); + (response_code, response_body).into_response() +} + +fn gw_client_err_into_response(err: GatewayClientError) -> Response { + let (response_code, deprecated_gateway_error) = match err { + GatewayClientError::ClientError(e) => { + error!("Encountered a ClientError: {}", e); + (StatusCode::INTERNAL_SERVER_ERROR, StarknetError::internal("Internal error")) + } + GatewayClientError::GatewayError(GatewayError::DeprecatedGatewayError { + source, + p2p_message_metadata: _, + }) => { + // TODO(yair): Find out what is the p2p_message_metadata and whether it needs to be + // added to the error response. + (StatusCode::BAD_REQUEST, source) + } + }; + + let response_body = serialize_error(&deprecated_gateway_error); + + (response_code, response_body).into_response() +} + +/// Serializes a `StarknetError` into an HTTP response, encode the error message +/// to defend potential Cross-Site risks. +fn serialize_error(error: &StarknetError) -> Response { + let quote_re = Regex::new(r#"[\"`]"#).unwrap(); // " and ` => ' (single quote) + let sanitize_re = Regex::new(r#"[^a-zA-Z0-9 :.,\[\]\(\)\{\}'_]"#).unwrap(); // All other non-alphanumeric characters except [:.,[](){}]_ => ' ' (space) + + let mut message = error.message.clone(); + message = quote_re.replace_all(&message, "'").to_string(); + message = sanitize_re.replace_all(&message, " ").to_string(); + + let sanitized_error = StarknetError { code: error.code.clone(), message }; + + serde_json::to_vec(&sanitized_error) + .expect("Expecting a serializable StarknetError.") + .into_response() +} diff --git a/crates/apollo_http_server/src/http_server.rs b/crates/apollo_http_server/src/http_server.rs new file mode 100644 index 00000000000..ef857b772fc --- /dev/null +++ b/crates/apollo_http_server/src/http_server.rs @@ -0,0 +1,243 @@ +use std::clone::Clone; +use std::net::SocketAddr; +use std::string::String; + +use apollo_gateway_types::communication::{GatewayClientError, SharedGatewayClient}; +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_gateway_types::errors::GatewayError; +use apollo_gateway_types::gateway_types::{ + GatewayInput, + GatewayOutput, + SUPPORTED_TRANSACTION_VERSIONS, +}; +use apollo_infra::component_definitions::ComponentStarter; +use apollo_infra_utils::type_name::short_type_name; +use apollo_proc_macros::sequencer_latency_histogram; +use axum::extract::State; +use axum::http::HeaderMap; +use axum::routing::{get, post}; +use axum::{async_trait, Json, Router}; +use blockifier_reexecution::state_reader::serde_utils::deserialize_transaction_json_to_starknet_api_tx; +use serde::de::Error; +use starknet_api::rpc_transaction::RpcTransaction; +use starknet_api::serde_utils::bytes_from_hex_str; +use starknet_api::transaction::fields::ValidResourceBounds; +use tracing::{debug, info, instrument}; + +use crate::config::HttpServerConfig; +use crate::deprecated_gateway_transaction::DeprecatedGatewayTransactionV3; +use crate::errors::{HttpServerError, HttpServerRunError}; +use crate::metrics::{ + init_metrics, + ADDED_TRANSACTIONS_DEPRECATED_ERROR, + ADDED_TRANSACTIONS_FAILURE, + ADDED_TRANSACTIONS_INTERNAL_ERROR, + ADDED_TRANSACTIONS_SUCCESS, + ADDED_TRANSACTIONS_TOTAL, + HTTP_SERVER_ADD_TX_LATENCY, +}; + +#[cfg(test)] +#[path = "http_server_test.rs"] +pub mod http_server_test; + +pub type HttpServerResult = Result; + +const CLIENT_REGION_HEADER: &str = "X-Client-Region"; + +pub struct HttpServer { + pub config: HttpServerConfig, + app_state: AppState, +} + +#[derive(Clone)] +pub struct AppState { + pub gateway_client: SharedGatewayClient, +} + +impl HttpServer { + pub fn new(config: HttpServerConfig, gateway_client: SharedGatewayClient) -> Self { + let app_state = AppState { gateway_client }; + HttpServer { config, app_state } + } + + pub async fn run(&mut self) -> Result<(), HttpServerRunError> { + init_metrics(); + + // Parses the bind address from HttpServerConfig, returning an error for invalid addresses. + let HttpServerConfig { ip, port } = self.config; + let addr = SocketAddr::new(ip, port); + let app = self.app(); + info!("HttpServer running using socket: {}", addr); + + // Create a server that runs forever. + Ok(axum::Server::bind(&addr).serve(app.into_make_service()).await?) + } + + // TODO(Yael): consider supporting both formats in the same endpoint if possible. + pub fn app(&self) -> Router { + Router::new() + // Json Rpc endpoint + .route("/gateway/add_rpc_transaction", post(add_rpc_tx)) + .with_state(self.app_state.clone()) + // Rest api endpoint + .route("/gateway/add_transaction", post(add_tx)) + .with_state(self.app_state.clone()) + // TODO(shahak): Remove this once we fix the centralized simulator to not use is_alive + // and is_ready. + .route( + "/gateway/is_alive", + get(|| futures::future::ready("Gateway is alive".to_owned())) + ) + .route( + "/gateway/is_ready", + get(|| futures::future::ready("Gateway is ready".to_owned())) + ) + } +} + +// HttpServer handlers. + +#[instrument(skip(app_state))] +async fn add_rpc_tx( + State(app_state): State, + headers: HeaderMap, + Json(tx): Json, +) -> HttpServerResult> { + ADDED_TRANSACTIONS_TOTAL.increment(1); + add_tx_inner(app_state, headers, tx).await +} + +#[instrument(skip(app_state))] +#[sequencer_latency_histogram(HTTP_SERVER_ADD_TX_LATENCY, true)] +async fn add_tx( + State(app_state): State, + headers: HeaderMap, + tx: String, +) -> HttpServerResult> { + ADDED_TRANSACTIONS_TOTAL.increment(1); + validate_supported_tx_version(&tx).inspect_err(|e| { + debug!("Error while validating transaction version: {}", e); + increment_failure_metrics(e); + })?; + let tx: DeprecatedGatewayTransactionV3 = serde_json::from_str(&tx).inspect_err(|e| { + debug!("Error while parsing transaction: {}", e); + check_supported_resource_bounds_and_increment_metrics(&tx); + })?; + let rpc_tx = tx.try_into().inspect_err(|e| { + debug!("Error while converting deprecated gateway transaction into RPC transaction: {}", e); + })?; + + add_tx_inner(app_state, headers, rpc_tx).await +} + +#[allow(clippy::result_large_err)] +fn validate_supported_tx_version(tx: &str) -> HttpServerResult<()> { + let tx_json_value: serde_json::Value = serde_json::from_str(tx)?; + let tx_version_json = tx_json_value + .get("version") + .ok_or_else(|| serde_json::Error::custom("Missing version field"))?; + let tx_version = tx_version_json + .as_str() + .ok_or_else(|| serde_json::Error::custom("Version field is not valid"))?; + let tx_version = + u64::from_be_bytes(bytes_from_hex_str::<8, true>(tx_version).map_err(|_| { + serde_json::Error::custom(format!( + "Version field is not a valid hex string: {tx_version}" + )) + })?); + if !SUPPORTED_TRANSACTION_VERSIONS.contains(&tx_version) { + ADDED_TRANSACTIONS_DEPRECATED_ERROR.increment(1); + return Err(HttpServerError::GatewayClientError(GatewayClientError::GatewayError( + GatewayError::DeprecatedGatewayError { + source: StarknetError { + code: StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::InvalidTransactionVersion, + ), + message: format!( + "Transaction version {tx_version} is not supported. Supported versions: \ + {SUPPORTED_TRANSACTION_VERSIONS:?}." + ), + }, + p2p_message_metadata: None, + }, + ))); + } + Ok(()) +} + +fn check_supported_resource_bounds_and_increment_metrics(tx: &str) { + if let Ok(tx_json_value) = serde_json::from_str(tx) { + if let Ok(transaction) = deserialize_transaction_json_to_starknet_api_tx(tx_json_value) { + if let Some(ValidResourceBounds::L1Gas(_)) = transaction.resource_bounds() { + ADDED_TRANSACTIONS_DEPRECATED_ERROR.increment(1); + } + } + } + ADDED_TRANSACTIONS_FAILURE.increment(1); +} + +async fn add_tx_inner( + app_state: AppState, + headers: HeaderMap, + tx: RpcTransaction, +) -> HttpServerResult> { + let gateway_input: GatewayInput = GatewayInput { rpc_tx: tx, message_metadata: None }; + let add_tx_result = app_state.gateway_client.add_tx(gateway_input).await.map_err(|e| { + debug!("Error while adding transaction: {}", e); + HttpServerError::from(e) + }); + + let region = + headers.get(CLIENT_REGION_HEADER).and_then(|region| region.to_str().ok()).unwrap_or("N/A"); + record_added_transactions(&add_tx_result, region); + Ok(Json(add_tx_result?)) +} + +fn record_added_transactions(add_tx_result: &HttpServerResult, region: &str) { + match add_tx_result { + Ok(gateway_output) => { + // TODO(Arni): Reconsider the tracing level for this log. + info!( + transaction_hash = %gateway_output.transaction_hash(), + region = %region, + "Recorded transaction" + ); + ADDED_TRANSACTIONS_SUCCESS.increment(1); + } + Err(err) => increment_failure_metrics(err), + } +} + +pub fn create_http_server( + config: HttpServerConfig, + gateway_client: SharedGatewayClient, +) -> HttpServer { + HttpServer::new(config, gateway_client) +} + +#[async_trait] +impl ComponentStarter for HttpServer { + async fn start(&mut self) { + info!("Starting component {}.", short_type_name::()); + self.run().await.unwrap_or_else(|e| panic!("Failed to start HttpServer component: {:?}", e)) + } +} + +fn increment_failure_metrics(err: &HttpServerError) { + ADDED_TRANSACTIONS_FAILURE.increment(1); + let HttpServerError::GatewayClientError(gateway_client_error) = err else { + return; + }; + // TODO(shahak): add unit test for ADDED_TRANSACTIONS_INTERNAL_ERROR + if matches!(gateway_client_error, GatewayClientError::ClientError(_)) + || matches!(gateway_client_error, GatewayClientError::GatewayError( + GatewayError::DeprecatedGatewayError { source, .. }) if source.is_internal()) + { + ADDED_TRANSACTIONS_INTERNAL_ERROR.increment(1); + } +} diff --git a/crates/apollo_http_server/src/http_server_test.rs b/crates/apollo_http_server/src/http_server_test.rs new file mode 100644 index 00000000000..66a99c682f2 --- /dev/null +++ b/crates/apollo_http_server/src/http_server_test.rs @@ -0,0 +1,314 @@ +use apollo_gateway_types::communication::{GatewayClientError, MockGatewayClient}; +use apollo_gateway_types::deprecated_gateway_error::{ + KnownStarknetErrorCode, + StarknetError, + StarknetErrorCode, +}; +use apollo_gateway_types::errors::GatewayError; +use apollo_gateway_types::gateway_types::{ + DeclareGatewayOutput, + DeployAccountGatewayOutput, + GatewayOutput, + InvokeGatewayOutput, +}; +use apollo_infra::component_client::ClientError; +use axum::body::{Bytes, HttpBody}; +use axum::response::{IntoResponse, Response}; +use axum::Json; +use hyper::StatusCode; +use rstest::rstest; +use serde_json::Value; +use starknet_api::test_utils::read_json_file; +use starknet_api::transaction::TransactionHash; +use starknet_api::{class_hash, contract_address, tx_hash}; +use starknet_types_core::felt::Felt; +use tracing_test::traced_test; + +use crate::errors::HttpServerError; +use crate::http_server::CLIENT_REGION_HEADER; +use crate::test_utils::{ + add_tx_http_client, + deprecated_gateway_declare_tx, + deprecated_gateway_deploy_account_tx, + deprecated_gateway_invoke_tx, + rpc_invoke_tx, + GatewayTransaction, + TransactionSerialization, +}; + +const DEPRECATED_GATEWAY_INVOKE_TX_RESPONSE_JSON_PATH: &str = + "expected_gateway_response/invoke_gateway_output.json"; +const DEPRECATED_GATEWAY_DECLARE_TX_RESPONSE_JSON_PATH: &str = + "expected_gateway_response/declare_gateway_output.json"; +const DEPRECATED_GATEWAY_DEPLOY_ACCOUNT_TX_RESPONSE_JSON_PATH: &str = + "expected_gateway_response/deploy_account_gateway_output.json"; + +const EXPECTED_TX_HASH: TransactionHash = TransactionHash(Felt::ONE); + +// The http_server is oblivious to the GateWayOutput type, so we always return invoke. +pub fn default_gateway_output() -> GatewayOutput { + GatewayOutput::Invoke(InvokeGatewayOutput::new(EXPECTED_TX_HASH)) +} + +#[rstest] +#[case::invoke( + GatewayOutput::Invoke(InvokeGatewayOutput::new(tx_hash!(1_u64))), + DEPRECATED_GATEWAY_INVOKE_TX_RESPONSE_JSON_PATH, +)] +#[case::declare( + GatewayOutput::Declare(DeclareGatewayOutput::new(tx_hash!(1_u64), class_hash!(2_u64))), + DEPRECATED_GATEWAY_DECLARE_TX_RESPONSE_JSON_PATH, + +)] +#[case::deploy_account( + GatewayOutput::DeployAccount(DeployAccountGatewayOutput::new( + tx_hash!(1_u64), + contract_address!(3_u64) + )), + DEPRECATED_GATEWAY_DEPLOY_ACCOUNT_TX_RESPONSE_JSON_PATH, +)] +#[tokio::test] +async fn gateway_output_json_conversion( + #[case] gateway_output: GatewayOutput, + #[case] expected_serialized_response_path: &str, +) { + let response = Json(gateway_output).into_response(); + + let status_code = response.status(); + let response_bytes = &to_bytes(response).await; + + assert_eq!(status_code, StatusCode::OK, "{response_bytes:?}"); + let gateway_response: GatewayOutput = serde_json::from_slice(response_bytes).unwrap(); + + let expected_gateway_response = + serde_json::from_value(read_json_file(expected_serialized_response_path)) + .expect("Failed to deserialize json to GatewayOutput"); + assert_eq!(gateway_response, expected_gateway_response); +} + +async fn to_bytes(res: Response) -> Bytes { + res.into_body().collect().await.unwrap().to_bytes() +} + +#[tokio::test] +async fn error_into_response() { + let error = HttpServerError::DeserializationError( + serde_json::from_str::("invalid json").unwrap_err(), + ); + let response = error.into_response(); + + let status = response.status(); + let body = to_bytes(response).await; + let json: Value = serde_json::from_slice(&body).unwrap(); + + assert!(!status.is_success(), "{:?}", status); + assert_eq!( + json.get("code").unwrap(), + &serde_json::to_value(&KnownStarknetErrorCode::MalformedRequest).unwrap() + ); +} + +#[traced_test] +#[rstest] +#[case::add_deprecated_gateway_tx(0, deprecated_gateway_invoke_tx())] +#[case::add_rpc_tx(1, rpc_invoke_tx())] +#[tokio::test] +/// Test that when an add transaction HTTP request is sent to the server, the region of the http +/// request is recorded to the info log. +async fn record_region_test(#[case] index: u16, #[case] tx: impl GatewayTransaction) { + let mut mock_gateway_client = MockGatewayClient::new(); + // Set the successful response. + let tx_hash_1 = TransactionHash(Felt::ONE); + let tx_hash_2 = TransactionHash(Felt::TWO); + mock_gateway_client + .expect_add_tx() + .times(1) + .return_const(Ok(GatewayOutput::Invoke(InvokeGatewayOutput::new(tx_hash_1)))); + mock_gateway_client + .expect_add_tx() + .times(1) + .return_const(Ok(GatewayOutput::Invoke(InvokeGatewayOutput::new(tx_hash_2)))); + + // TODO(Yael): avoid the hardcoded node offset index, consider dynamic allocation. + let http_client = add_tx_http_client(mock_gateway_client, 1 + index).await; + + // Send a transaction to the server, without a region. + http_client.add_tx(tx.clone()).await; + assert!(logs_contain( + format!("Recorded transaction transaction_hash={} region={}", tx_hash_1, "N/A").as_str() + )); + + // Send transaction to the server, with a region. + let region = "test"; + http_client.add_tx_with_headers(tx, [(CLIENT_REGION_HEADER, region)]).await; + assert!(logs_contain( + format!("Recorded transaction transaction_hash={} region={}", tx_hash_2, region).as_str() + )); +} + +#[traced_test] +#[rstest] +#[case::add_deprecated_gateway_tx(0, deprecated_gateway_invoke_tx())] +#[case::add_rpc_tx(1, rpc_invoke_tx())] +#[tokio::test] +/// Test that when an "add_tx" HTTP request is sent to the server, and it fails in the Gateway, no +/// record of the region is logged. +async fn record_region_gateway_failing_tx(#[case] index: u16, #[case] tx: impl GatewayTransaction) { + let mut mock_gateway_client = MockGatewayClient::new(); + // Set the failed response. + mock_gateway_client.expect_add_tx().times(1).return_const(Err( + GatewayClientError::ClientError(ClientError::UnexpectedResponse( + "mock response".to_string(), + )), + )); + + let http_client = add_tx_http_client(mock_gateway_client, 3 + index).await; + + // Send a transaction to the server. + http_client.add_tx(tx).await; + assert!(!logs_contain("Recorded transaction transaction_hash=")); +} + +#[rstest] +#[case::add_deprecated_gateway_invoke(0, deprecated_gateway_invoke_tx())] +#[case::add_deprecated_gateway_deploy_account(1, deprecated_gateway_deploy_account_tx())] +#[case::add_deprecated_gateway_declare(2, deprecated_gateway_declare_tx())] +#[case::add_rpc_invoke(3, rpc_invoke_tx())] +#[tokio::test] +async fn test_response(#[case] index: u16, #[case] tx: impl GatewayTransaction) { + let mut mock_gateway_client = MockGatewayClient::new(); + + // Set the successful response. + mock_gateway_client.expect_add_tx().times(1).return_const(Ok(default_gateway_output())); + + // Set the failed response. + let expected_error = StarknetError { + // The error code needs to be mapped to a BAD_REQUEST response (status code 400). + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::ClassAlreadyDeclared), + message: "Arbitrary".to_string(), + }; + let expected_err_str = serde_json::to_string(&expected_error).unwrap(); + mock_gateway_client.expect_add_tx().times(1).return_const(Err( + GatewayClientError::GatewayError(GatewayError::DeprecatedGatewayError { + source: expected_error, + p2p_message_metadata: None, + }), + )); + + // Set the failed Gateway ClientError response. + let expected_gateway_client_err_str = + serde_json::to_string(&StarknetError::internal("Internal error")).unwrap(); + + mock_gateway_client.expect_add_tx().times(1).return_const(Err( + // The error code needs to be mapped to a INTERNAL_SERVER_ERROR response (status code 500). + GatewayClientError::ClientError(ClientError::UnexpectedResponse( + "mock response".to_string(), + )), + )); + + let http_client = add_tx_http_client(mock_gateway_client, 5 + index).await; + + // Test a successful response. + let tx_hash = http_client.assert_add_tx_success(tx.clone()).await; + assert_eq!(tx_hash, EXPECTED_TX_HASH); + + // Test a failed bad request response. + let error_str = http_client.assert_add_tx_error(tx.clone(), StatusCode::BAD_REQUEST).await; + assert_eq!(error_str, expected_err_str); + + // Test a failed internal server error response. + let error_str = http_client.assert_add_tx_error(tx, StatusCode::INTERNAL_SERVER_ERROR).await; + assert_eq!(error_str, expected_gateway_client_err_str); +} + +#[rstest] +#[case::missing_version( + 0, + None, + StarknetError { + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::MalformedRequest), + message: "Missing version field".to_string(), + } +)] +#[case::bad_version( + 1, + Some("bad version"), + StarknetError { + code: StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::MalformedRequest), + message: "Version field is not a valid hex string: bad version".to_string(), + } +)] +#[case::old_version(2, Some("0x1"), StarknetError { + code: StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::InvalidTransactionVersion, + ), + message: "Transaction version 1 is not supported. Supported versions: [3]." + .to_string(), + }, +)] +#[case::newer_version(3, Some("0x4"), StarknetError { + code: StarknetErrorCode::KnownErrorCode( + KnownStarknetErrorCode::InvalidTransactionVersion, + ), + message: "Transaction version 4 is not supported. Supported versions: [3]." + .to_string(), + } +)] +#[tokio::test] +async fn test_unsupported_tx_version( + #[case] index: u16, + #[case] version: Option<&str>, + #[case] expected_err: StarknetError, +) { + // Set the tx version to the given version. + let mut tx_json = + TransactionSerialization(serde_json::to_value(deprecated_gateway_invoke_tx()).unwrap()); + let as_object = tx_json.0.as_object_mut().unwrap(); + if let Some(version) = version { + as_object.insert("version".to_string(), Value::String(version.to_string())).unwrap(); + } else { + as_object.remove("version").unwrap(); + } + + let mock_gateway_client = MockGatewayClient::new(); + let http_client = add_tx_http_client(mock_gateway_client, 9 + index).await; + + let serialized_err = http_client.assert_add_tx_error(tx_json, StatusCode::BAD_REQUEST).await; + let starknet_error = serde_json::from_str::(&serialized_err).unwrap(); + assert_eq!(starknet_error, expected_err); +} + +#[tokio::test] +async fn sanitizing_error_message() { + // Set the tx version to be a problematic text. + let mut tx_json = + TransactionSerialization(serde_json::to_value(deprecated_gateway_invoke_tx()).unwrap()); + let tx_object = tx_json.0.as_object_mut().unwrap(); + let malicious_version: &'static str = "\"'`[](){}_!@#$%^&*+=~"; + tx_object.insert("version".to_string(), Value::String(malicious_version.to_string())).unwrap(); + + let mock_gateway_client = MockGatewayClient::new(); + let http_client = add_tx_http_client(mock_gateway_client, 13).await; + + let serialized_err = http_client.assert_add_tx_error(tx_json, StatusCode::BAD_REQUEST).await; + let starknet_error: StarknetError = + serde_json::from_str(&serialized_err).expect("Expected valid StarknetError JSON"); + + assert_eq!( + starknet_error.code, + StarknetErrorCode::KnownErrorCode(KnownStarknetErrorCode::MalformedRequest) + ); + + // Make sure the original payload is NOT included directly. + assert!( + !starknet_error.message.contains("