diff --git a/.github/workflows/check-specrefs.yml b/.github/workflows/check-specrefs.yml new file mode 100644 index 00000000000..dffc338e47e --- /dev/null +++ b/.github/workflows/check-specrefs.yml @@ -0,0 +1,43 @@ +name: Check Spec References +on: [push, pull_request] + +jobs: + check-specrefs: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Check version consistency + run: | + MAKEFILE_VERSION=$(grep 'CONSENSUS_SPECS_TEST_VERSION ?=' testing/ef_tests/Makefile | sed 's/.*?= //') + ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //') + if [ "$MAKEFILE_VERSION" != "$ETHSPECIFY_VERSION" ]; then + echo "Version mismatch between Makefile and ethspecify" + echo " testing/ef_tests/Makefile: $MAKEFILE_VERSION" + echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION" + exit 1 + else + echo "Versions match: $MAKEFILE_VERSION" + fi + + - name: Install ethspecify + run: python3 -mpip install ethspecify + + - name: Update spec references + run: ethspecify process --path=specrefs + + - name: Check for differences + run: | + if ! git diff --exit-code -- specrefs >/dev/null; then + echo "Spec references are out-of-date!" + echo "" + git --no-pager diff -- specrefs + exit 1 + else + echo "Spec references are up-to-date!" + fi + + - name: Check spec references + run: ethspecify check --path=specrefs diff --git a/specrefs/.ethspecify.yml b/specrefs/.ethspecify.yml new file mode 100644 index 00000000000..6635283a47b --- /dev/null +++ b/specrefs/.ethspecify.yml @@ -0,0 +1,434 @@ +version: v1.6.1 +style: full + +specrefs: + search_root: .. + auto_standardize_names: true + auto_add_missing_entries: true + + files: + - configs.yml + - constants.yml + - containers.yml + - dataclasses.yml + - functions.yml + - presets.yml + +exceptions: + configs: + # Not implemented: gloas + - AGGREGATE_DUE_BPS_GLOAS#gloas + - ATTESTATION_DUE_BPS_GLOAS#gloas + - CONTRIBUTION_DUE_BPS_GLOAS#gloas + - MAX_REQUEST_PAYLOADS#gloas + - PAYLOAD_ATTESTATION_DUE_BPS#gloas + - SYNC_MESSAGE_DUE_BPS_GLOAS#gloas + + constants: + # Not implemented: phase0 + - BASIS_POINTS#phase0 + - ENDIANNESS#phase0 + - ENDIANNESS#phase0 + - ETH_TO_GWEI#phase0 + - ETH_TO_GWEI#phase0 + - GENESIS_EPOCH#phase0 + - JUSTIFICATION_BITS_LENGTH#phase0 + - MAX_CONCURRENT_REQUESTS#phase0 + - NODE_ID_BITS#phase0 + - SAFETY_DECAY#phase0 + - UINT64_MAX#phase0 + - UINT64_MAX_SQRT#phase0 + + # Not implemented: altair + - G2_POINT_AT_INFINITY#altair + - MAX_REQUEST_LIGHT_CLIENT_UPDATES#altair + + # Not implemented: bellatrix + - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix + + # Not implemented: deneb + - BLS_MODULUS#deneb + - BYTES_PER_COMMITMENT#deneb + - BYTES_PER_FIELD_ELEMENT#deneb + - BYTES_PER_PROOF#deneb + - FIAT_SHAMIR_PROTOCOL_DOMAIN#deneb + - G1_POINT_AT_INFINITY#deneb + - KZG_ENDIANNESS#deneb + - KZG_SETUP_G2_LENGTH#deneb + - KZG_SETUP_G2_MONOMIAL#deneb + - PRIMITIVE_ROOT_OF_UNITY#deneb + - RANDOM_CHALLENGE_KZG_BATCH_DOMAIN#deneb + + # Not implemented: electra + - CONSOLIDATION_REQUEST_TYPE#electra + - DEPOSIT_REQUEST_TYPE#electra + - WITHDRAWAL_REQUEST_TYPE#electra + + # Not implemented: fulu + - RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu + - UINT256_MAX#fulu + + # Not implemented: gloas + - BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas + - BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas + - BUILDER_WITHDRAWAL_PREFIX#gloas + - DOMAIN_BEACON_BUILDER#gloas + - DOMAIN_PTC_ATTESTER#gloas + - PAYLOAD_STATUS_EMPTY#gloas + - PAYLOAD_STATUS_FULL#gloas + - PAYLOAD_STATUS_PENDING#gloas + + containers: + # Not implemented: phase0 + - Eth1Block#phase0 + + # Not implemented: bellatrix + - PowBlock#bellatrix + + # Not implemented: capella + - LightClientBootstrap#capella + - LightClientFinalityUpdate#capella + - LightClientOptimisticUpdate#capella + - LightClientUpdate#capella + + # Not implemented: fulu + - MatrixEntry#fulu + + # Not implemented: gloas + - BeaconBlockBody#gloas + - BeaconState#gloas + - BuilderPendingPayment#gloas + - BuilderPendingWithdrawal#gloas + - DataColumnSidecar#gloas + - ExecutionPayloadBid#gloas + - ExecutionPayloadEnvelope#gloas + - ExecutionPayloadHeader#gloas + - ForkChoiceNode#gloas + - IndexedPayloadAttestation#gloas + - PayloadAttestation#gloas + - PayloadAttestationData#gloas + - PayloadAttestationMessage#gloas + - SignedExecutionPayloadBid#gloas + - SignedExecutionPayloadEnvelope#gloas + + dataclasses: + # Not implemented: phase0 + - LatestMessage#phase0 + + # Not implemented: altair + - LightClientStore#altair + + # Not implemented: bellatrix + - OptimisticStore#bellatrix + + # Not implemented: capella + - LightClientStore#capella + + # Not implemented: gloas + - LatestMessage#gloas + - Store#gloas + + functions: + # Functions implemented by KZG library for EIP-4844 + - bit_reversal_permutation#deneb + - blob_to_kzg_commitment#deneb + - blob_to_polynomial#deneb + - bls_field_to_bytes#deneb + - bytes_to_bls_field#deneb + - bytes_to_kzg_commitment#deneb + - bytes_to_kzg_proof#deneb + - compute_blob_kzg_proof#deneb + - compute_challenge#deneb + - compute_kzg_proof#deneb + - compute_kzg_proof_impl#deneb + - compute_powers#deneb + - compute_quotient_eval_within_domain#deneb + - compute_roots_of_unity#deneb + - evaluate_polynomial_in_evaluation_form#deneb + - g1_lincomb#deneb + - hash_to_bls_field#deneb + - is_power_of_two#deneb + - multi_exp#deneb + - reverse_bits#deneb + - validate_kzg_g1#deneb + - verify_blob_kzg_proof#deneb + - verify_blob_kzg_proof_batch#deneb + - verify_kzg_proof#deneb + - verify_kzg_proof_batch#deneb + - verify_kzg_proof_impl#deneb + + # Functions implemented by KZG library for EIP-7594 + - _fft_field#fulu + - add_polynomialcoeff#fulu + - cell_to_coset_evals#fulu + - compute_cells#fulu + - compute_cells_and_kzg_proofs#fulu + - compute_cells_and_kzg_proofs_polynomialcoeff#fulu + - compute_kzg_proof_multi_impl#fulu + - compute_verify_cell_kzg_proof_batch_challenge#fulu + - construct_vanishing_polynomial#fulu + - coset_evals_to_cell#fulu + - coset_fft_field#fulu + - coset_for_cell#fulu + - coset_shift_for_cell#fulu + - divide_polynomialcoeff#fulu + - evaluate_polynomialcoeff#fulu + - fft_field#fulu + - interpolate_polynomialcoeff#fulu + - multiply_polynomialcoeff#fulu + - polynomial_eval_to_coeff#fulu + - recover_cells_and_kzg_proofs#fulu + - recover_polynomialcoeff#fulu + - vanishing_polynomialcoeff#fulu + - verify_cell_kzg_proof_batch#fulu + - verify_cell_kzg_proof_batch_impl#fulu + + # Not implemented: phase0 + - bytes_to_uint64#phase0 + - check_if_validator_active#phase0 + - compute_new_state_root#phase0 + - compute_pulled_up_tip#phase0 + - compute_subscribed_subnet#phase0 + - compute_subscribed_subnets#phase0 + - compute_time_at_slot#phase0 + - compute_weak_subjectivity_period#phase0 + - filter_block_tree#phase0 + - get_aggregate_and_proof#phase0 + - get_aggregate_and_proof_signature#phase0 + - get_aggregate_due_ms#phase0 + - get_aggregate_signature#phase0 + - get_attestation_component_deltas#phase0 + - get_attestation_deltas#phase0 + - get_attestation_due_ms#phase0 + - get_attestation_signature#phase0 + - get_attesting_balance#phase0 + - get_block_signature#phase0 + - get_checkpoint_block#phase0 + - get_committee_assignment#phase0 + - get_current_store_epoch#phase0 + - get_eligible_validator_indices#phase0 + - get_epoch_signature#phase0 + - get_eth1_vote#phase0 + - get_filtered_block_tree#phase0 + - get_finality_delay#phase0 + - get_head_deltas#phase0 + - get_inactivity_penalty_deltas#phase0 + - get_inclusion_delay_deltas#phase0 + - get_matching_head_attestations#phase0 + - get_matching_source_attestations#phase0 + - get_matching_target_attestations#phase0 + - get_proposer_head#phase0 + - get_proposer_reorg_cutoff_ms#phase0 + - get_proposer_reward#phase0 + - get_proposer_score#phase0 + - get_slot_component_duration_ms#phase0 + - get_slot_signature#phase0 + - get_slots_since_genesis#phase0 + - get_source_deltas#phase0 + - get_target_deltas#phase0 + - get_unslashed_attesting_indices#phase0 + - get_validator_from_deposit#phase0 + - get_voting_source#phase0 + - get_weight#phase0 + - initialize_beacon_state_from_eth1#phase0 + - integer_squareroot#phase0 + - is_candidate_block#phase0 + - is_ffg_competitive#phase0 + - is_finalization_ok#phase0 + - is_head_late#phase0 + - is_head_weak#phase0 + - is_parent_strong#phase0 + - is_proposer#phase0 + - is_proposing_on_time#phase0 + - is_shuffling_stable#phase0 + - is_slashable_attestation_data#phase0 + - is_valid_genesis_state#phase0 + - is_valid_merkle_branch#phase0 + - is_within_weak_subjectivity_period#phase0 + - on_tick_per_slot#phase0 + - process_slots#phase0 + - seconds_to_milliseconds#phase0 + - state_transition#phase0 + - store_target_checkpoint_state#phase0 + - update_latest_messages#phase0 + - update_unrealized_checkpoints#phase0 + - voting_period_start_time#phase0 + - xor#phase0 + + # Not implemented: altair + - apply_light_client_update#altair + - compute_merkle_proof#altair + - create_light_client_bootstrap#altair + - create_light_client_finality_update#altair + - create_light_client_optimistic_update#altair + - create_light_client_update#altair + - current_sync_committee_gindex_at_slot#altair + - eth_aggregate_pubkeys#altair + - finalized_root_gindex_at_slot#altair + - get_contribution_and_proof#altair + - get_contribution_and_proof_signature#altair + - get_contribution_due_ms#altair + - get_flag_index_deltas#altair + - get_inactivity_penalty_deltas#altair + - get_index_for_new_validator#altair + - get_next_sync_committee#altair + - get_safety_threshold#altair + - get_subtree_index#altair + - get_sync_committee_message#altair + - get_sync_committee_selection_proof#altair + - get_sync_message_due_ms#altair + - get_sync_subcommittee_pubkeys#altair + - get_unslashed_participating_indices#altair + - initialize_light_client_store#altair + - is_assigned_to_sync_committee#altair + - is_better_update#altair + - is_finality_update#altair + - is_next_sync_committee_known#altair + - is_sync_committee_aggregator#altair + - is_sync_committee_update#altair + - is_valid_light_client_header#altair + - is_valid_normalized_merkle_branch#altair + - next_sync_committee_gindex_at_slot#altair + - process_light_client_finality_update#altair + - process_light_client_optimistic_update#altair + - process_light_client_store_force_update#altair + - process_light_client_update#altair + - set_or_append_list#altair + - validate_light_client_update#altair + + # Not implemented: bellatrix + - get_inactivity_penalty_deltas#bellatrix + - get_pow_block_at_terminal_total_difficulty#bellatrix + - get_terminal_pow_block#bellatrix + - is_execution_block#bellatrix + - is_optimistic_candidate_block#bellatrix + - latest_verified_ancestor#bellatrix + - should_override_forkchoice_update#bellatrix + + # Not implemented: capella + - get_lc_execution_root#capella + - is_valid_light_client_header#capella + - upgrade_lc_bootstrap_to_capella#capella + - upgrade_lc_finality_update_to_capella#capella + - upgrade_lc_header_to_capella#capella + - upgrade_lc_optimistic_update_to_capella#capella + - upgrade_lc_store_to_capella#capella + - upgrade_lc_update_to_capella#capella + + # Not implemented: deneb + - compute_signed_block_header#deneb + - compute_subnet_for_blob_sidecar#deneb + - get_blob_sidecars#deneb + - get_lc_execution_root#deneb + - get_validator_activation_churn_limit#deneb + - is_valid_light_client_header#deneb + - process_voluntary_exit#deneb + - upgrade_lc_bootstrap_to_deneb#deneb + - upgrade_lc_finality_update_to_deneb#deneb + - upgrade_lc_header_to_deneb#deneb + - upgrade_lc_optimistic_update_to_deneb#deneb + - upgrade_lc_store_to_deneb#deneb + - upgrade_lc_update_to_deneb#deneb + + # Not implemented: electra + - apply_pending_deposit#electra + - compute_subnet_for_blob_sidecar#electra + - compute_weak_subjectivity_period#electra + - current_sync_committee_gindex_at_slot#electra + - finalized_root_gindex_at_slot#electra + - get_eth1_pending_deposit_count#electra + - get_eth1_vote#electra + - get_execution_requests#electra + - get_lc_execution_root#electra + - get_validator_from_deposit#electra + - is_within_weak_subjectivity_period#electra + - next_sync_committee_gindex_at_slot#electra + - normalize_merkle_branch#electra + - process_pending_deposits#electra + - process_voluntary_exit#electra + - upgrade_lc_bootstrap_to_electra#electra + - upgrade_lc_finality_update_to_electra#electra + - upgrade_lc_header_to_electra#electra + - upgrade_lc_optimistic_update_to_electra#electra + - upgrade_lc_store_to_electra#electra + - upgrade_lc_update_to_electra#electra + + # Not implemented: fulu + - compute_matrix#fulu + - get_data_column_sidecars#fulu + - get_data_column_sidecars_from_block#fulu + - get_data_column_sidecars_from_column_sidecar#fulu + + # Not implemented: gloas + - compute_balance_weighted_acceptance#gloas + - compute_balance_weighted_selection#gloas + - compute_proposer_indices#gloas + - get_aggregate_due_ms#gloas + - get_ancestor#gloas + - get_attestation_due_ms#gloas + - get_attestation_participation_flag_indices#gloas + - get_builder_payment_quorum_threshold#gloas + - get_checkpoint_block#gloas + - get_contribution_due_ms#gloas + - get_data_column_sidecars#gloas + - get_data_column_sidecars_from_block#gloas + - get_data_column_sidecars_from_column_sidecar#gloas + - get_execution_payload_bid_signature#gloas + - get_execution_payload_envelope_signature#gloas + - get_expected_withdrawals#gloas + - get_forkchoice_store#gloas + - get_head#gloas + - get_indexed_payload_attestation#gloas + - get_next_sync_committee_indices#gloas + - get_node_children#gloas + - get_parent_payload_status#gloas + - get_payload_attestation_due_ms#gloas + - get_payload_attestation_message_signature#gloas + - get_payload_status_tiebreaker#gloas + - get_pending_balance_to_withdraw#gloas + - get_ptc#gloas + - get_ptc_assignment#gloas + - get_sync_message_due_ms#gloas + - get_weight#gloas + - has_builder_withdrawal_credential#gloas + - has_compounding_withdrawal_credential#gloas + - is_attestation_same_slot#gloas + - is_builder_payment_withdrawable#gloas + - is_builder_withdrawal_credential#gloas + - is_merge_transition_complete#gloas + - is_parent_block_full#gloas + - is_parent_node_full#gloas + - is_payload_timely#gloas + - is_supporting_vote#gloas + - is_valid_indexed_payload_attestation#gloas + - notify_ptc_messages#gloas + - on_block#gloas + - on_execution_payload#gloas + - on_payload_attestation_message#gloas + - prepare_execution_payload#gloas + - process_attestation#gloas + - process_block#gloas + - process_builder_pending_payments#gloas + - process_epoch#gloas + - process_execution_payload#gloas + - process_execution_payload_bid#gloas + - process_operations#gloas + - process_payload_attestation#gloas + - process_proposer_slashing#gloas + - process_slot#gloas + - process_withdrawals#gloas + - should_extend_payload#gloas + - update_latest_messages#gloas + - upgrade_to_gloas#gloas + - validate_merge_block#gloas + - validate_on_attestation#gloas + - verify_data_column_sidecar#gloas + - verify_data_column_sidecar_inclusion_proof#gloas + - verify_execution_payload_bid_signature#gloas + - verify_execution_payload_envelope_signature#gloas + + presets: + # Not implemented: gloas + - BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas + - MAX_PAYLOAD_ATTESTATIONS#gloas + - PTC_SIZE#gloas diff --git a/specrefs/README.md b/specrefs/README.md new file mode 100644 index 00000000000..c8518322aa1 --- /dev/null +++ b/specrefs/README.md @@ -0,0 +1,35 @@ +# Specification References + +This directory contains specification reference tracking files managed by +[ethspecify](https://github.com/jtraglia/ethspecify). + +## Installation + +Install `ethspecify` with the following command: + +```bash +pipx install ethspecify +``` + +> [!NOTE] +> You can run `ethspecify ` in the `specrefs` directory or +> `ethspecify --path=specrefs` from the project's root directory. + +## Maintenance + +When adding support for a new specification version, follow these steps: + +0. Change directory into the `specrefs` directory. +1. Update the version in `.ethspecify.yml` configuration. +2. Run `ethspecify process` to update/populate specrefs. +3. Run `ethspecify check` to check specrefs. +4. If there are errors, use the error message as a guide to fix the issue. If + there are new specrefs with empty sources, implement/locate each item and + update each specref source list. If you choose not to implement an item, + add an exception to the appropriate section the the `.ethspecify.yml` + configuration. +5. Repeat steps 3 and 4 until `ethspecify check` passes. +6. Run `git diff` to view updated specrefs. If an object/function/etc has + changed, make the necessary updates to the implementation. +7. Lastly, in the project's root directory, run `act -j check-specrefs` to + ensure everything is correct. diff --git a/specrefs/configs.yml b/specrefs/configs.yml new file mode 100644 index 00000000000..fec19373105 --- /dev/null +++ b/specrefs/configs.yml @@ -0,0 +1,920 @@ +- name: AGGREGATE_DUE_BPS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: AGGREGATE_DUE_BPS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*aggregate_due_bps:' + regex: true + spec: | + + AGGREGATE_DUE_BPS: uint64 = 6667 + + +- name: AGGREGATE_DUE_BPS_GLOAS#gloas + sources: [] + spec: | + + AGGREGATE_DUE_BPS_GLOAS: uint64 = 5000 + + +- name: ALTAIR_FORK_EPOCH#altair + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ALTAIR_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*altair_fork_epoch:' + regex: true + spec: | + + ALTAIR_FORK_EPOCH: Epoch = 74240 + + +- name: ALTAIR_FORK_VERSION#altair + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ALTAIR_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*altair_fork_version:' + regex: true + spec: | + + ALTAIR_FORK_VERSION: Version = '0x01000000' + + +- name: ATTESTATION_DUE_BPS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ATTESTATION_DUE_BPS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*attestation_due_bps:' + regex: true + spec: | + + ATTESTATION_DUE_BPS: uint64 = 3333 + + +- name: ATTESTATION_DUE_BPS_GLOAS#gloas + sources: [] + spec: | + + ATTESTATION_DUE_BPS_GLOAS: uint64 = 2500 + + +- name: ATTESTATION_PROPAGATION_SLOT_RANGE#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ATTESTATION_PROPAGATION_SLOT_RANGE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*attestation_propagation_slot_range:' + regex: true + spec: | + + ATTESTATION_PROPAGATION_SLOT_RANGE = 32 + + +- name: ATTESTATION_SUBNET_COUNT#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ATTESTATION_SUBNET_COUNT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*attestation_subnet_count:' + regex: true + spec: | + + ATTESTATION_SUBNET_COUNT = 64 + + +- name: ATTESTATION_SUBNET_EXTRA_BITS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ATTESTATION_SUBNET_EXTRA_BITS: + spec: | + + ATTESTATION_SUBNET_EXTRA_BITS = 0 + + +- name: ATTESTATION_SUBNET_PREFIX_BITS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ATTESTATION_SUBNET_PREFIX_BITS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*attestation_subnet_prefix_bits:' + regex: true + spec: | + + ATTESTATION_SUBNET_PREFIX_BITS: int = 6 + + +- name: BALANCE_PER_ADDITIONAL_CUSTODY_GROUP#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*balance_per_additional_custody_group:' + regex: true + spec: | + + BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: Gwei = 32000000000 + + +- name: BELLATRIX_FORK_EPOCH#bellatrix + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: BELLATRIX_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*bellatrix_fork_epoch:' + regex: true + spec: | + + BELLATRIX_FORK_EPOCH: Epoch = 144896 + + +- name: BELLATRIX_FORK_VERSION#bellatrix + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: BELLATRIX_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*bellatrix_fork_version:' + regex: true + spec: | + + BELLATRIX_FORK_VERSION: Version = '0x02000000' + + +- name: BLOB_SCHEDULE#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: BLOB_SCHEDULE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*blob_schedule:' + regex: true + spec: | + + BLOB_SCHEDULE: tuple[frozendict[str, Any], ...] = ( + frozendict({ + "EPOCH": 412672, + "MAX_BLOBS_PER_BLOCK": 15, + }), + frozendict({ + "EPOCH": 419072, + "MAX_BLOBS_PER_BLOCK": 21, + }), + ) + + +- name: BLOB_SIDECAR_SUBNET_COUNT#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: BLOB_SIDECAR_SUBNET_COUNT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*blob_sidecar_subnet_count:' + regex: true + spec: | + + BLOB_SIDECAR_SUBNET_COUNT = 6 + + +- name: BLOB_SIDECAR_SUBNET_COUNT_ELECTRA#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*blob_sidecar_subnet_count_electra:' + regex: true + spec: | + + BLOB_SIDECAR_SUBNET_COUNT_ELECTRA = 9 + + +- name: CAPELLA_FORK_EPOCH#capella + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: CAPELLA_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*capella_fork_epoch:' + regex: true + spec: | + + CAPELLA_FORK_EPOCH: Epoch = 194048 + + +- name: CAPELLA_FORK_VERSION#capella + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: CAPELLA_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*capella_fork_version:' + regex: true + spec: | + + CAPELLA_FORK_VERSION: Version = '0x03000000' + + +- name: CHURN_LIMIT_QUOTIENT#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: CHURN_LIMIT_QUOTIENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*churn_limit_quotient:' + regex: true + spec: | + + CHURN_LIMIT_QUOTIENT: uint64 = 65536 + + +- name: CONTRIBUTION_DUE_BPS#altair + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: CONTRIBUTION_DUE_BPS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*contribution_due_bps:' + regex: true + spec: | + + CONTRIBUTION_DUE_BPS: uint64 = 6667 + + +- name: CONTRIBUTION_DUE_BPS_GLOAS#gloas + sources: [] + spec: | + + CONTRIBUTION_DUE_BPS_GLOAS: uint64 = 5000 + + +- name: CUSTODY_REQUIREMENT#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ^CUSTODY_REQUIREMENT: + regex: true + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*custody_requirement:' + regex: true + spec: | + + CUSTODY_REQUIREMENT = 4 + + +- name: DATA_COLUMN_SIDECAR_SUBNET_COUNT#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: DATA_COLUMN_SIDECAR_SUBNET_COUNT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*data_column_sidecar_subnet_count:' + regex: true + spec: | + + DATA_COLUMN_SIDECAR_SUBNET_COUNT = 128 + + +- name: DENEB_FORK_EPOCH#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: DENEB_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*deneb_fork_epoch:' + regex: true + spec: | + + DENEB_FORK_EPOCH: Epoch = 269568 + + +- name: DENEB_FORK_VERSION#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: DENEB_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*deneb_fork_version:' + regex: true + spec: | + + DENEB_FORK_VERSION: Version = '0x04000000' + + +- name: EJECTION_BALANCE#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: EJECTION_BALANCE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*ejection_balance:' + regex: true + spec: | + + EJECTION_BALANCE: Gwei = 16000000000 + + +- name: ELECTRA_FORK_EPOCH#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ELECTRA_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*electra_fork_epoch:' + regex: true + spec: | + + ELECTRA_FORK_EPOCH: Epoch = 364032 + + +- name: ELECTRA_FORK_VERSION#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ELECTRA_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*electra_fork_version:' + regex: true + spec: | + + ELECTRA_FORK_VERSION: Version = '0x05000000' + + +- name: EPOCHS_PER_SUBNET_SUBSCRIPTION#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: EPOCHS_PER_SUBNET_SUBSCRIPTION: + spec: | + + EPOCHS_PER_SUBNET_SUBSCRIPTION = 256 + + +- name: ETH1_FOLLOW_DISTANCE#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ETH1_FOLLOW_DISTANCE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*eth1_follow_distance:' + regex: true + spec: | + + ETH1_FOLLOW_DISTANCE: uint64 = 2048 + + +- name: FULU_FORK_EPOCH#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: FULU_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*fulu_fork_epoch:' + regex: true + spec: | + + FULU_FORK_EPOCH: Epoch = 411392 + + +- name: FULU_FORK_VERSION#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: FULU_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*fulu_fork_version:' + regex: true + spec: | + + FULU_FORK_VERSION: Version = '0x06000000' + + +- name: GENESIS_DELAY#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: GENESIS_DELAY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*genesis_delay:' + regex: true + spec: | + + GENESIS_DELAY: uint64 = 604800 + + +- name: GENESIS_FORK_VERSION#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: GENESIS_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*genesis_fork_version:' + regex: true + spec: | + + GENESIS_FORK_VERSION: Version = '0x00000000' + + +- name: GLOAS_FORK_EPOCH#gloas + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: GLOAS_FORK_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*gloas_fork_epoch:' + regex: true + spec: | + + GLOAS_FORK_EPOCH: Epoch = 18446744073709551615 + + +- name: GLOAS_FORK_VERSION#gloas + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: GLOAS_FORK_VERSION: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*gloas_fork_version:' + regex: true + spec: | + + GLOAS_FORK_VERSION: Version = '0x07000000' + + +- name: INACTIVITY_SCORE_BIAS#altair + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: INACTIVITY_SCORE_BIAS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*inactivity_score_bias:' + regex: true + spec: | + + INACTIVITY_SCORE_BIAS: uint64 = 4 + + +- name: INACTIVITY_SCORE_RECOVERY_RATE#altair + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: INACTIVITY_SCORE_RECOVERY_RATE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*inactivity_score_recovery_rate:' + regex: true + spec: | + + INACTIVITY_SCORE_RECOVERY_RATE: uint64 = 16 + + +- name: MAXIMUM_GOSSIP_CLOCK_DISPARITY#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAXIMUM_GOSSIP_CLOCK_DISPARITY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*maximum_gossip_clock_disparity:' + regex: true + spec: | + + MAXIMUM_GOSSIP_CLOCK_DISPARITY = 500 + + +- name: MAX_BLOBS_PER_BLOCK#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: ^MAX_BLOBS_PER_BLOCK: + regex: true + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_blobs_per_block:' + regex: true + spec: | + + MAX_BLOBS_PER_BLOCK: uint64 = 6 + + +- name: MAX_BLOBS_PER_BLOCK_ELECTRA#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_BLOBS_PER_BLOCK_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_blobs_per_block_electra:' + regex: true + spec: | + + MAX_BLOBS_PER_BLOCK_ELECTRA: uint64 = 9 + + +- name: MAX_PAYLOAD_SIZE#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_PAYLOAD_SIZE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_payload_size:' + regex: true + spec: | + + MAX_PAYLOAD_SIZE = 10485760 + + +- name: MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_per_epoch_activation_churn_limit:' + regex: true + spec: | + + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: uint64 = 8 + + +- name: MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_per_epoch_activation_exit_churn_limit:' + regex: true + spec: | + + MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: Gwei = 256000000000 + + +- name: MAX_REQUEST_BLOB_SIDECARS#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_REQUEST_BLOB_SIDECARS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_request_blob_sidecars:' + regex: true + spec: | + + MAX_REQUEST_BLOB_SIDECARS = 768 + + +- name: MAX_REQUEST_BLOB_SIDECARS_ELECTRA#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_REQUEST_BLOB_SIDECARS_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_request_blob_sidecars_electra:' + regex: true + spec: | + + MAX_REQUEST_BLOB_SIDECARS_ELECTRA = 1152 + + +- name: MAX_REQUEST_BLOCKS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_REQUEST_BLOCKS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_request_blocks:' + regex: true + spec: | + + MAX_REQUEST_BLOCKS = 1024 + + +- name: MAX_REQUEST_BLOCKS_DENEB#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_REQUEST_BLOCKS_DENEB: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_request_blocks_deneb:' + regex: true + spec: | + + MAX_REQUEST_BLOCKS_DENEB = 128 + + +- name: MAX_REQUEST_DATA_COLUMN_SIDECARS#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MAX_REQUEST_DATA_COLUMN_SIDECARS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_request_data_column_sidecars:' + regex: true + spec: | + + MAX_REQUEST_DATA_COLUMN_SIDECARS = 16384 + + +- name: MAX_REQUEST_PAYLOADS#gloas + sources: [] + spec: | + + MAX_REQUEST_PAYLOADS = 128 + + +- name: MESSAGE_DOMAIN_INVALID_SNAPPY#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MESSAGE_DOMAIN_INVALID_SNAPPY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*message_domain_invalid_snappy:' + regex: true + spec: | + + MESSAGE_DOMAIN_INVALID_SNAPPY: DomainType = '0x00000000' + + +- name: MESSAGE_DOMAIN_VALID_SNAPPY#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MESSAGE_DOMAIN_VALID_SNAPPY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*message_domain_valid_snappy:' + regex: true + spec: | + + MESSAGE_DOMAIN_VALID_SNAPPY: DomainType = '0x01000000' + + +- name: MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS#deneb + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_epochs_for_blob_sidecars_requests:' + regex: true + spec: | + + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS = 4096 + + +- name: MIN_EPOCHS_FOR_BLOCK_REQUESTS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_EPOCHS_FOR_BLOCK_REQUESTS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_epochs_for_block_requests:' + regex: true + spec: | + + MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 + + +- name: MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_epochs_for_data_column_sidecars_requests:' + regex: true + spec: | + + MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS = 4096 + + +- name: MIN_GENESIS_ACTIVE_VALIDATOR_COUNT#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_genesis_active_validator_count:' + regex: true + spec: | + + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: uint64 = 16384 + + +- name: MIN_GENESIS_TIME#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_GENESIS_TIME: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_genesis_time:' + regex: true + spec: | + + MIN_GENESIS_TIME: uint64 = 1606824000 + + +- name: MIN_PER_EPOCH_CHURN_LIMIT#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_PER_EPOCH_CHURN_LIMIT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_per_epoch_churn_limit:' + regex: true + spec: | + + MIN_PER_EPOCH_CHURN_LIMIT: uint64 = 4 + + +- name: MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA#electra + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_per_epoch_churn_limit_electra:' + regex: true + spec: | + + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: Gwei = 128000000000 + + +- name: MIN_VALIDATOR_WITHDRAWABILITY_DELAY#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: MIN_VALIDATOR_WITHDRAWABILITY_DELAY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_validator_withdrawability_delay:' + regex: true + spec: | + + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: uint64 = 256 + + +- name: NUMBER_OF_CUSTODY_GROUPS#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: NUMBER_OF_CUSTODY_GROUPS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*number_of_custody_groups:' + regex: true + spec: | + + NUMBER_OF_CUSTODY_GROUPS = 128 + + +- name: PAYLOAD_ATTESTATION_DUE_BPS#gloas + sources: [] + spec: | + + PAYLOAD_ATTESTATION_DUE_BPS: uint64 = 7500 + + +- name: PROPOSER_REORG_CUTOFF_BPS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: PROPOSER_REORG_CUTOFF_BPS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*proposer_reorg_cutoff_bps:' + regex: true + spec: | + + PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667 + + +- name: PROPOSER_SCORE_BOOST#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: PROPOSER_SCORE_BOOST: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*proposer_score_boost:' + regex: true + spec: | + + PROPOSER_SCORE_BOOST: uint64 = 40 + + +- name: REORG_HEAD_WEIGHT_THRESHOLD#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: REORG_HEAD_WEIGHT_THRESHOLD: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*reorg_head_weight_threshold:' + regex: true + spec: | + + REORG_HEAD_WEIGHT_THRESHOLD: uint64 = 20 + + +- name: REORG_MAX_EPOCHS_SINCE_FINALIZATION#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: REORG_MAX_EPOCHS_SINCE_FINALIZATION: + regex: true + spec: | + + REORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = 2 + + +- name: REORG_PARENT_WEIGHT_THRESHOLD#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: REORG_PARENT_WEIGHT_THRESHOLD: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*reorg_parent_weight_threshold:' + regex: true + spec: | + + REORG_PARENT_WEIGHT_THRESHOLD: uint64 = 160 + + +- name: SAMPLES_PER_SLOT#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SAMPLES_PER_SLOT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*samples_per_slot:' + regex: true + spec: | + + SAMPLES_PER_SLOT = 8 + + +- name: SECONDS_PER_ETH1_BLOCK#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SECONDS_PER_ETH1_BLOCK: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*seconds_per_eth1_block:' + regex: true + spec: | + + SECONDS_PER_ETH1_BLOCK: uint64 = 14 + + +- name: SECONDS_PER_SLOT#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SECONDS_PER_SLOT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*seconds_per_slot:' + regex: true + spec: | + + SECONDS_PER_SLOT: uint64 = 12 + + +- name: SHARD_COMMITTEE_PERIOD#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SHARD_COMMITTEE_PERIOD: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*shard_committee_period:' + regex: true + spec: | + + SHARD_COMMITTEE_PERIOD: uint64 = 256 + + +- name: SLOT_DURATION_MS#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SLOT_DURATION_MS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*slot_duration_ms:' + regex: true + spec: | + + SLOT_DURATION_MS: uint64 = 12000 + + +- name: SUBNETS_PER_NODE#phase0 + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SUBNETS_PER_NODE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*subnets_per_node:' + regex: true + spec: | + + SUBNETS_PER_NODE = 2 + + +- name: SYNC_MESSAGE_DUE_BPS#altair + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: SYNC_MESSAGE_DUE_BPS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*sync_message_due_bps:' + regex: true + spec: | + + SYNC_MESSAGE_DUE_BPS: uint64 = 3333 + + +- name: SYNC_MESSAGE_DUE_BPS_GLOAS#gloas + sources: [] + spec: | + + SYNC_MESSAGE_DUE_BPS_GLOAS: uint64 = 2500 + + +- name: TERMINAL_BLOCK_HASH#bellatrix + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: TERMINAL_BLOCK_HASH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*terminal_block_hash:' + regex: true + spec: | + + TERMINAL_BLOCK_HASH: Hash32 = '0x0000000000000000000000000000000000000000000000000000000000000000' + + +- name: TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH#bellatrix + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*terminal_block_hash_activation_epoch:' + regex: true + spec: | + + TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH = 18446744073709551615 + + +- name: TERMINAL_TOTAL_DIFFICULTY#bellatrix + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: TERMINAL_TOTAL_DIFFICULTY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*terminal_total_difficulty:' + regex: true + spec: | + + TERMINAL_TOTAL_DIFFICULTY = 58750000000000000000000 + + +- name: VALIDATOR_CUSTODY_REQUIREMENT#fulu + sources: + - file: common/eth2_network_config/built_in_network_configs/mainnet/config.yaml + search: VALIDATOR_CUSTODY_REQUIREMENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*validator_custody_requirement:' + regex: true + spec: | + + VALIDATOR_CUSTODY_REQUIREMENT = 8 + diff --git a/specrefs/constants.yml b/specrefs/constants.yml new file mode 100644 index 00000000000..158b39338a5 --- /dev/null +++ b/specrefs/constants.yml @@ -0,0 +1,603 @@ +- name: BASE_REWARDS_PER_EPOCH#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*base_rewards_per_epoch:' + regex: true + spec: | + + BASE_REWARDS_PER_EPOCH: uint64 = 4 + + +- name: BASIS_POINTS#phase0 + sources: [] + spec: | + + BASIS_POINTS: uint64 = 10000 + + +- name: BLS_MODULUS#deneb + sources: [] + spec: | + + BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513 + + +- name: BLS_WITHDRAWAL_PREFIX#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*bls_withdrawal_prefix_byte:' + regex: true + spec: | + + BLS_WITHDRAWAL_PREFIX: Bytes1 = '0x00' + + +- name: BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas + sources: [] + spec: | + + BUILDER_PAYMENT_THRESHOLD_DENOMINATOR: uint64 = 10 + + +- name: BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas + sources: [] + spec: | + + BUILDER_PAYMENT_THRESHOLD_NUMERATOR: uint64 = 6 + + +- name: BUILDER_WITHDRAWAL_PREFIX#gloas + sources: [] + spec: | + + BUILDER_WITHDRAWAL_PREFIX: Bytes1 = '0x03' + + +- name: BYTES_PER_COMMITMENT#deneb + sources: [] + spec: | + + BYTES_PER_COMMITMENT: uint64 = 48 + + +- name: BYTES_PER_FIELD_ELEMENT#deneb + sources: [] + spec: | + + BYTES_PER_FIELD_ELEMENT: uint64 = 32 + + +- name: BYTES_PER_PROOF#deneb + sources: [] + spec: | + + BYTES_PER_PROOF: uint64 = 48 + + +- name: COMPOUNDING_WITHDRAWAL_PREFIX#electra + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*compounding_withdrawal_prefix_byte:' + regex: true + spec: | + + COMPOUNDING_WITHDRAWAL_PREFIX: Bytes1 = '0x02' + + +- name: CONSOLIDATION_REQUEST_TYPE#electra + sources: [] + spec: | + + CONSOLIDATION_REQUEST_TYPE: Bytes1 = '0x02' + + +- name: DEPOSIT_CONTRACT_TREE_DEPTH#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*deposit_contract_tree_depth:' + regex: true + spec: | + + DEPOSIT_CONTRACT_TREE_DEPTH: uint64 = 2**5 + + +- name: DEPOSIT_REQUEST_TYPE#electra + sources: [] + spec: | + + DEPOSIT_REQUEST_TYPE: Bytes1 = '0x00' + + +- name: DOMAIN_AGGREGATE_AND_PROOF#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_aggregate_and_proof:' + regex: true + spec: | + + DOMAIN_AGGREGATE_AND_PROOF: DomainType = '0x06000000' + + +- name: DOMAIN_APPLICATION_MASK#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_application_mask:' + regex: true + spec: | + + DOMAIN_APPLICATION_MASK: DomainType = '0x00000001' + + +- name: DOMAIN_BEACON_ATTESTER#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_beacon_attester:' + regex: true + spec: | + + DOMAIN_BEACON_ATTESTER: DomainType = '0x01000000' + + +- name: DOMAIN_BEACON_BUILDER#gloas + sources: [] + spec: | + + DOMAIN_BEACON_BUILDER: DomainType = '0x1B000000' + + +- name: DOMAIN_BEACON_PROPOSER#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_beacon_proposer:' + regex: true + spec: | + + DOMAIN_BEACON_PROPOSER: DomainType = '0x00000000' + + +- name: DOMAIN_BLS_TO_EXECUTION_CHANGE#capella + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_bls_to_execution_change:' + regex: true + spec: | + + DOMAIN_BLS_TO_EXECUTION_CHANGE: DomainType = '0x0A000000' + + +- name: DOMAIN_CONTRIBUTION_AND_PROOF#altair + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_contribution_and_proof:' + regex: true + spec: | + + DOMAIN_CONTRIBUTION_AND_PROOF: DomainType = '0x09000000' + + +- name: DOMAIN_DEPOSIT#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_deposit:' + regex: true + spec: | + + DOMAIN_DEPOSIT: DomainType = '0x03000000' + + +- name: DOMAIN_PTC_ATTESTER#gloas + sources: [] + spec: | + + DOMAIN_PTC_ATTESTER: DomainType = '0x0C000000' + + +- name: DOMAIN_RANDAO#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_randao:' + regex: true + spec: | + + DOMAIN_RANDAO: DomainType = '0x02000000' + + +- name: DOMAIN_SELECTION_PROOF#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_selection_proof:' + regex: true + spec: | + + DOMAIN_SELECTION_PROOF: DomainType = '0x05000000' + + +- name: DOMAIN_SYNC_COMMITTEE#altair + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_sync_committee:' + regex: true + spec: | + + DOMAIN_SYNC_COMMITTEE: DomainType = '0x07000000' + + +- name: DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF#altair + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_sync_committee_selection_proof:' + regex: true + spec: | + + DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: DomainType = '0x08000000' + + +- name: DOMAIN_VOLUNTARY_EXIT#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*domain_voluntary_exit:' + regex: true + spec: | + + DOMAIN_VOLUNTARY_EXIT: DomainType = '0x04000000' + + +- name: ENDIANNESS#phase0 + sources: [] + spec: | + + ENDIANNESS = 'little' + + +- name: ETH1_ADDRESS_WITHDRAWAL_PREFIX#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*eth1_address_withdrawal_prefix_byte:' + regex: true + spec: | + + ETH1_ADDRESS_WITHDRAWAL_PREFIX: Bytes1 = '0x01' + + +- name: ETH_TO_GWEI#phase0 + sources: [] + spec: | + + ETH_TO_GWEI: uint64 = 10**9 + + +- name: FAR_FUTURE_EPOCH#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*far_future_epoch:' + regex: true + spec: | + + FAR_FUTURE_EPOCH: Epoch = 2**64 - 1 + + +- name: FIAT_SHAMIR_PROTOCOL_DOMAIN#deneb + sources: [] + spec: | + + FIAT_SHAMIR_PROTOCOL_DOMAIN = b'FSBLOBVERIFY_V1_' + + +- name: FULL_EXIT_REQUEST_AMOUNT#electra + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*full_exit_request_amount:' + regex: true + spec: | + + FULL_EXIT_REQUEST_AMOUNT: uint64 = 0 + + +- name: G1_POINT_AT_INFINITY#deneb + sources: [] + spec: | + + G1_POINT_AT_INFINITY: Bytes48 = b'\xc0' + b'\x00' * 47 + + +- name: G2_POINT_AT_INFINITY#altair + sources: [] + spec: | + + G2_POINT_AT_INFINITY: BLSSignature = b'\xc0' + b'\x00' * 95 + + +- name: GENESIS_EPOCH#phase0 + sources: [] + spec: | + + GENESIS_EPOCH: Epoch = 0 + + +- name: GENESIS_SLOT#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*genesis_slot:' + regex: true + spec: | + + GENESIS_SLOT: Slot = 0 + + +- name: INTERVALS_PER_SLOT#phase0 + sources: + - file: consensus/types/src/core/consts.rs + search: pub const INTERVALS_PER_SLOT: + spec: | + + INTERVALS_PER_SLOT: uint64 = 3 + + +- name: JUSTIFICATION_BITS_LENGTH#phase0 + sources: [] + spec: | + + JUSTIFICATION_BITS_LENGTH: uint64 = 4 + + +- name: KZG_ENDIANNESS#deneb + sources: [] + spec: | + + KZG_ENDIANNESS = 'big' + + +- name: KZG_SETUP_G2_LENGTH#deneb + sources: [] + spec: | + + KZG_SETUP_G2_LENGTH = 65 + + +- name: KZG_SETUP_G2_MONOMIAL#deneb + sources: [] + spec: | + + KZG_SETUP_G2_MONOMIAL: Vector[G2Point, KZG_SETUP_G2_LENGTH] = ['0x93e02b6052719f607dacd3a088274f65596bd0d09920b61ab5da61bbdc7f5049334cf11213945d57e5ac7d055d042b7e024aa2b2f08f0a91260805272dc51051c6e47ad4fa403b02b4510b647ae3d1770bac0326a805bbefd48056c8c121bdb8', '0xb5bfd7dd8cdeb128843bc287230af38926187075cbfbefa81009a2ce615ac53d2914e5870cb452d2afaaab24f3499f72185cbfee53492714734429b7b38608e23926c911cceceac9a36851477ba4c60b087041de621000edc98edada20c1def2', '0xb5337ba0ce5d37224290916e268e2060e5c14f3f9fc9e1ec3af5a958e7a0303122500ce18f1a4640bf66525bd10e763501fe986d86649d8d45143c08c3209db3411802c226e9fe9a55716ac4a0c14f9dcef9e70b2bb309553880dc5025eab3cc', '0xb3c1dcdc1f62046c786f0b82242ef283e7ed8f5626f72542aa2c7a40f14d9094dd1ebdbd7457ffdcdac45fd7da7e16c51200b06d791e5e43e257e45efdf0bd5b06cd2333beca2a3a84354eb48662d83aef5ecf4e67658c851c10b13d8d87c874', '0x954d91c7688983382609fca9e211e461f488a5971fd4e40d7e2892037268eacdfd495cfa0a7ed6eb0eb11ac3ae6f651716757e7526abe1e06c64649d80996fd3105c20c4c94bc2b22d97045356fe9d791f21ea6428ac48db6f9e68e30d875280', '0x88a6b6bb26c51cf9812260795523973bb90ce80f6820b6c9048ab366f0fb96e48437a7f7cb62aedf64b11eb4dfefebb0147608793133d32003cb1f2dc47b13b5ff45f1bb1b2408ea45770a08dbfaec60961acb8119c47b139a13b8641e2c9487', '0x85cd7be9728bd925d12f47fb04b32d9fad7cab88788b559f053e69ca18e463113ecc8bbb6dbfb024835f901b3a957d3108d6770fb26d4c8be0a9a619f6e3a4bf15cbfd48e61593490885f6cee30e4300c5f9cf5e1c08e60a2d5b023ee94fcad0', '0x80477dba360f04399821a48ca388c0fa81102dd15687fea792ee8c1114e00d1bc4839ad37ac58900a118d863723acfbe08126ea883be87f50e4eabe3b5e72f5d9e041db8d9b186409fd4df4a7dde38c0e0a3b1ae29b098e5697e7f110b6b27e4', '0xb7a6aec08715a9f8672a2b8c367e407be37e59514ac19dd4f0942a68007bba3923df22da48702c63c0d6b3efd3c2d04e0fe042d8b5a54d562f9f33afc4865dcbcc16e99029e25925580e87920c399e710d438ac1ce3a6dc9b0d76c064a01f6f7', '0xac1b001edcea02c8258aeffbf9203114c1c874ad88dae1184fadd7d94cd09053649efd0ca413400e6e9b5fa4eac33261000af88b6bd0d2abf877a4f0355d2fb4d6007adb181695201c5432e50b850b51b3969f893bddf82126c5a71b042b7686', '0x90043fda4de53fb364fab2c04be5296c215599105ecff0c12e4917c549257125775c29f2507124d15f56e30447f367db0596c33237242c02d83dfd058735f1e3c1ff99069af55773b6d51d32a68bf75763f59ec4ee7267932ae426522b8aaab6', '0xa8660ce853e9dc08271bf882e29cd53397d63b739584dda5263da4c7cc1878d0cf6f3e403557885f557e184700575fee016ee8542dec22c97befe1d10f414d22e84560741cdb3e74c30dda9b42eeaaf53e27822de2ee06e24e912bf764a9a533', '0x8fe3921a96d0d065e8aa8fce9aa42c8e1461ca0470688c137be89396dd05103606dab6cdd2a4591efd6addf72026c12e065da7be276dee27a7e30afa2bd81c18f1516e7f068f324d0bad9570b95f6bd02c727cd2343e26db0887c3e4e26dceda', '0x8ae1ad97dcb9c192c9a3933541b40447d1dc4eebf380151440bbaae1e120cc5cdf1bcea55180b128d8e180e3af623815191d063cc0d7a47d55fb7687b9d87040bf7bc1a7546b07c61db5ccf1841372d7c2fe4a5431ffff829f3c2eb590b0b710', '0x8c2fa96870a88150f7876c931e2d3cc2adeaaaf5c73ef5fa1cf9dfa0991ae4819f9321af7e916e5057d87338e630a2f21242c29d76963cf26035b548d2a63d8ad7bd6efefa01c1df502cbdfdfe0334fb21ceb9f686887440f713bf17a89b8081', '0xb9aa98e2f02bb616e22ee5dd74c7d1049321ac9214d093a738159850a1dbcc7138cb8d26ce09d8296368fd5b291d74fa17ac7cc1b80840fdd4ee35e111501e3fa8485b508baecda7c1ab7bd703872b7d64a2a40b3210b6a70e8a6ffe0e5127e3', '0x9292db67f8771cdc86854a3f614a73805bf3012b48f1541e704ea4015d2b6b9c9aaed36419769c87c49f9e3165f03edb159c23b3a49c4390951f78e1d9b0ad997129b17cdb57ea1a6638794c0cca7d239f229e589c5ae4f9fe6979f7f8cba1d7', '0x91cd9e86550f230d128664f7312591fee6a84c34f5fc7aed557bcf986a409a6de722c4330453a305f06911d2728626e611acfdf81284f77f60a3a1595053a9479964fd713117e27c0222cc679674b03bc8001501aaf9b506196c56de29429b46', '0xa9516b73f605cc31b89c68b7675dc451e6364595243d235339437f556cf22d745d4250c1376182273be2d99e02c10eee047410a43eff634d051aeb784e76cb3605d8e079b9eb6ad1957dfdf77e1cd32ce4a573c9dfcc207ca65af6eb187f6c3d', '0xa9667271f7d191935cc8ad59ef3ec50229945faea85bfdfb0d582090f524436b348aaa0183b16a6231c00332fdac2826125b8c857a2ed9ec66821cfe02b3a2279be2412441bc2e369b255eb98614e4be8490799c4df22f18d47d24ec70bba5f7', '0xa4371144d2aa44d70d3cb9789096d3aa411149a6f800cb46f506461ee8363c8724667974252f28aea61b6030c05930ac039c1ee64bb4bd56532a685cae182bf2ab935eee34718cffcb46cae214c77aaca11dbb1320faf23c47247db1da04d8dc', '0x89a7eb441892260b7e81168c386899cd84ffc4a2c5cad2eae0d1ab9e8b5524662e6f660fe3f8bfe4c92f60b060811bc605b14c5631d16709266886d7885a5eb5930097127ec6fb2ebbaf2df65909cf48f253b3d5e22ae48d3e9a2fd2b01f447e', '0x9648c42ca97665b5eccb49580d8532df05eb5a68db07f391a2340769b55119eaf4c52fe4f650c09250fa78a76c3a1e271799b8333cc2628e3d4b4a6a3e03da1f771ecf6516dd63236574a7864ff07e319a6f11f153406280d63af9e2b5713283', '0x9663bf6dd446ea7a90658ee458578d4196dc0b175ef7fcfa75f44d41670850774c2e46c5a6be132a2c072a3c0180a24f0305d1acac49d2d79878e5cda80c57feda3d01a6af12e78b5874e2a4b3717f11c97503b41a4474e2e95b179113726199', '0xb212aeb4814e0915b432711b317923ed2b09e076aaf558c3ae8ef83f9e15a83f9ea3f47805b2750ab9e8106cb4dc6ad003522c84b03dc02829978a097899c773f6fb31f7fe6b8f2d836d96580f216fec20158f1590c3e0d7850622e15194db05', '0x925f005059bf07e9ceccbe66c711b048e236ade775720d0fe479aebe6e23e8af281225ad18e62458dc1b03b42ad4ca290d4aa176260604a7aad0d9791337006fbdebe23746f8060d42876f45e4c83c3643931392fde1cd13ff8bddf8111ef974', '0x9553edb22b4330c568e156a59ef03b26f5c326424f830fe3e8c0b602f08c124730ffc40bc745bec1a22417adb22a1a960243a10565c2be3066bfdb841d1cd14c624cd06e0008f4beb83f972ce6182a303bee3fcbcabc6cfe48ec5ae4b7941bfc', '0x935f5a404f0a78bdcce709899eda0631169b366a669e9b58eacbbd86d7b5016d044b8dfc59ce7ed8de743ae16c2343b50e2f925e88ba6319e33c3fc76b314043abad7813677b4615c8a97eb83cc79de4fedf6ccbcfa4d4cbf759a5a84e4d9742', '0xa5b014ab936eb4be113204490e8b61cd38d71da0dec7215125bcd131bf3ab22d0a32ce645bca93e7b3637cf0c2db3d6601a0ddd330dc46f9fae82abe864ffc12d656c88eb50c20782e5bb6f75d18760666f43943abb644b881639083e122f557', '0x935b7298ae52862fa22bf03bfc1795b34c70b181679ae27de08a9f5b4b884f824ef1b276b7600efa0d2f1d79e4a470d51692fd565c5cf8343dd80e5d3336968fc21c09ba9348590f6206d4424eb229e767547daefa98bc3aa9f421158dee3f2a', '0x9830f92446e708a8f6b091cc3c38b653505414f8b6507504010a96ffda3bcf763d5331eb749301e2a1437f00e2415efb01b799ad4c03f4b02de077569626255ac1165f96ea408915d4cf7955047620da573e5c439671d1fa5c833fb11de7afe6', '0x840dcc44f673fff3e387af2bb41e89640f2a70bcd2b92544876daa92143f67c7512faf5f90a04b7191de01f3e2b1bde00622a20dc62ca23bbbfaa6ad220613deff43908382642d4d6a86999f662efd64b1df448b68c847cfa87630a3ffd2ec76', '0x92950c895ed54f7f876b2fda17ecc9c41b7accfbdd42c210cc5b475e0737a7279f558148531b5c916e310604a1de25a80940c94fe5389ae5d6a5e9c371be67bceea1877f5401725a6595bcf77ece60905151b6dfcb68b75ed2e708c73632f4fd', '0x8010246bf8e94c25fd029b346b5fbadb404ef6f44a58fd9dd75acf62433d8cc6db66974f139a76e0c26dddc1f329a88214dbb63276516cf325c7869e855d07e0852d622c332ac55609ba1ec9258c45746a2aeb1af0800141ee011da80af175d4', '0xb0f1bad257ebd187bdc3f37b23f33c6a5d6a8e1f2de586080d6ada19087b0e2bf23b79c1b6da1ee82271323f5bdf3e1b018586b54a5b92ab6a1a16bb3315190a3584a05e6c37d5ca1e05d702b9869e27f513472bcdd00f4d0502a107773097da', '0x9636d24f1ede773ce919f309448dd7ce023f424afd6b4b69cb98c2a988d849a283646dc3e469879daa1b1edae91ae41f009887518e7eb5578f88469321117303cd3ac2d7aee4d9cb5f82ab9ae3458e796dfe7c24284b05815acfcaa270ff22e2', '0xb373feb5d7012fd60578d7d00834c5c81df2a23d42794fed91aa9535a4771fde0341c4da882261785e0caca40bf83405143085e7f17e55b64f6c5c809680c20b050409bf3702c574769127c854d27388b144b05624a0e24a1cbcc4d08467005b', '0xb15680648949ce69f82526e9b67d9b55ce5c537dc6ab7f3089091a9a19a6b90df7656794f6edc87fb387d21573ffc847062623685931c2790a508cbc8c6b231dd2c34f4d37d4706237b1407673605a604bcf6a50cc0b1a2db20485e22b02c17e', '0x8817e46672d40c8f748081567b038a3165f87994788ec77ee8daea8587f5540df3422f9e120e94339be67f186f50952504cb44f61e30a5241f1827e501b2de53c4c64473bcc79ab887dd277f282fbfe47997a930dd140ac08b03efac88d81075', '0xa6e4ef6c1d1098f95aae119905f87eb49b909d17f9c41bcfe51127aa25fee20782ea884a7fdf7d5e9c245b5a5b32230b07e0dbf7c6743bf52ee20e2acc0b269422bd6cf3c07115df4aa85b11b2c16630a07c974492d9cdd0ec325a3fabd95044', '0x8634aa7c3d00e7f17150009698ce440d8e1b0f13042b624a722ace68ead870c3d2212fbee549a2c190e384d7d6ac37ce14ab962c299ea1218ef1b1489c98906c91323b94c587f1d205a6edd5e9d05b42d591c26494a6f6a029a2aadb5f8b6f67', '0x821a58092900bdb73decf48e13e7a5012a3f88b06288a97b855ef51306406e7d867d613d9ec738ebacfa6db344b677d21509d93f3b55c2ebf3a2f2a6356f875150554c6fff52e62e3e46f7859be971bf7dd9d5b3e1d799749c8a97c2e04325df', '0x8dba356577a3a388f782e90edb1a7f3619759f4de314ad5d95c7cc6e197211446819c4955f99c5fc67f79450d2934e3c09adefc91b724887e005c5190362245eec48ce117d0a94d6fa6db12eda4ba8dde608fbbd0051f54dcf3bb057adfb2493', '0xa32a690dc95c23ed9fb46443d9b7d4c2e27053a7fcc216d2b0020a8cf279729c46114d2cda5772fd60a97016a07d6c5a0a7eb085a18307d34194596f5b541cdf01b2ceb31d62d6b55515acfd2b9eec92b27d082fbc4dc59fc63b551eccdb8468', '0xa040f7f4be67eaf0a1d658a3175d65df21a7dbde99bfa893469b9b43b9d150fc2e333148b1cb88cfd0447d88fa1a501d126987e9fdccb2852ecf1ba907c2ca3d6f97b055e354a9789854a64ecc8c2e928382cf09dda9abde42bbdf92280cdd96', '0x864baff97fa60164f91f334e0c9be00a152a416556b462f96d7c43b59fe1ebaff42f0471d0bf264976f8aa6431176eb905bd875024cf4f76c13a70bede51dc3e47e10b9d5652d30d2663b3af3f08d5d11b9709a0321aba371d2ef13174dcfcaf', '0x95a46f32c994133ecc22db49bad2c36a281d6b574c83cfee6680b8c8100466ca034b815cfaedfbf54f4e75188e661df901abd089524e1e0eb0bf48d48caa9dd97482d2e8c1253e7e8ac250a32fd066d5b5cb08a8641bdd64ecfa48289dca83a3', '0xa2cce2be4d12144138cb91066e0cd0542c80b478bf467867ebef9ddaf3bd64e918294043500bf5a9f45ee089a8d6ace917108d9ce9e4f41e7e860cbce19ac52e791db3b6dde1c4b0367377b581f999f340e1d6814d724edc94cb07f9c4730774', '0xb145f203eee1ac0a1a1731113ffa7a8b0b694ef2312dabc4d431660f5e0645ef5838e3e624cfe1228cfa248d48b5760501f93e6ab13d3159fc241427116c4b90359599a4cb0a86d0bb9190aa7fabff482c812db966fd2ce0a1b48cb8ac8b3bca', '0xadabe5d215c608696e03861cbd5f7401869c756b3a5aadc55f41745ad9478145d44393fec8bb6dfc4ad9236dc62b9ada0f7ca57fe2bae1b71565dbf9536d33a68b8e2090b233422313cc96afc7f1f7e0907dc7787806671541d6de8ce47c4cd0', '0xae7845fa6b06db53201c1080e01e629781817f421f28956589c6df3091ec33754f8a4bd4647a6bb1c141ac22731e3c1014865d13f3ed538dcb0f7b7576435133d9d03be655f8fbb4c9f7d83e06d1210aedd45128c2b0c9bab45a9ddde1c862a5', '0x9159eaa826a24adfa7adf6e8d2832120ebb6eccbeb3d0459ffdc338548813a2d239d22b26451fda98cc0c204d8e1ac69150b5498e0be3045300e789bcb4e210d5cd431da4bdd915a21f407ea296c20c96608ded0b70d07188e96e6c1a7b9b86b', '0xa9fc6281e2d54b46458ef564ffaed6944bff71e389d0acc11fa35d3fcd8e10c1066e0dde5b9b6516f691bb478e81c6b20865281104dcb640e29dc116daae2e884f1fe6730d639dbe0e19a532be4fb337bf52ae8408446deb393d224eee7cfa50', '0x84291a42f991bfb36358eedead3699d9176a38f6f63757742fdbb7f631f2c70178b1aedef4912fed7b6cf27e88ddc7eb0e2a6aa4b999f3eb4b662b93f386c8d78e9ac9929e21f4c5e63b12991fcde93aa64a735b75b535e730ff8dd2abb16e04', '0xa1b7fcacae181495d91765dfddf26581e8e39421579c9cbd0dd27a40ea4c54af3444a36bf85a11dda2114246eaddbdd619397424bb1eb41b5a15004b902a590ede5742cd850cf312555be24d2df8becf48f5afba5a8cd087cb7be0a521728386', '0x92feaaf540dbd84719a4889a87cdd125b7e995a6782911931fef26da9afcfbe6f86aaf5328fe1f77631491ce6239c5470f44c7791506c6ef1626803a5794e76d2be0af92f7052c29ac6264b7b9b51f267ad820afc6f881460521428496c6a5f1', '0xa525c925bfae1b89320a5054acc1fa11820f73d0cf28d273092b305467b2831fab53b6daf75fb926f332782d50e2522a19edcd85be5eb72f1497193c952d8cd0bcc5d43b39363b206eae4cb1e61668bde28a3fb2fc1e0d3d113f6dfadb799717', '0x98752bb6f5a44213f40eda6aa4ff124057c1b13b6529ab42fe575b9afa66e59b9c0ed563fb20dff62130c436c3e905ee17dd8433ba02c445b1d67182ab6504a90bbe12c26a754bbf734665c622f76c62fe2e11dd43ce04fd2b91a8463679058b', '0xa9aa9a84729f7c44219ff9e00e651e50ddea3735ef2a73fdf8ed8cd271961d8ed7af5cd724b713a89a097a3fe65a3c0202f69458a8b4c157c62a85668b12fc0d3957774bc9b35f86c184dd03bfefd5c325da717d74192cc9751c2073fe9d170e', '0xb221c1fd335a4362eff504cd95145f122bf93ea02ae162a3fb39c75583fc13a932d26050e164da97cff3e91f9a7f6ff80302c19dd1916f24acf6b93b62f36e9665a8785413b0c7d930c7f1668549910f849bca319b00e59dd01e5dec8d2edacc', '0xa71e2b1e0b16d754b848f05eda90f67bedab37709550171551050c94efba0bfc282f72aeaaa1f0330041461f5e6aa4d11537237e955e1609a469d38ed17f5c2a35a1752f546db89bfeff9eab78ec944266f1cb94c1db3334ab48df716ce408ef', '0xb990ae72768779ba0b2e66df4dd29b3dbd00f901c23b2b4a53419226ef9232acedeb498b0d0687c463e3f1eead58b20b09efcefa566fbfdfe1c6e48d32367936142d0a734143e5e63cdf86be7457723535b787a9cfcfa32fe1d61ad5a2617220', '0x8d27e7fbff77d5b9b9bbc864d5231fecf817238a6433db668d5a62a2c1ee1e5694fdd90c3293c06cc0cb15f7cbeab44d0d42be632cb9ff41fc3f6628b4b62897797d7b56126d65b694dcf3e298e3561ac8813fbd7296593ced33850426df42db', '0xa92039a08b5502d5b211a7744099c9f93fa8c90cedcb1d05e92f01886219dd464eb5fb0337496ad96ed09c987da4e5f019035c5b01cc09b2a18b8a8dd419bc5895388a07e26958f6bd26751929c25f89b8eb4a299d822e2d26fec9ef350e0d3c', '0x92dcc5a1c8c3e1b28b1524e3dd6dbecd63017c9201da9dbe077f1b82adc08c50169f56fc7b5a3b28ec6b89254de3e2fd12838a761053437883c3e01ba616670cea843754548ef84bcc397de2369adcca2ab54cd73c55dc68d87aec3fc2fe4f10'] + + +- name: MAX_CONCURRENT_REQUESTS#phase0 + sources: [] + spec: | + + MAX_CONCURRENT_REQUESTS = 2 + + +- name: MAX_REQUEST_LIGHT_CLIENT_UPDATES#altair + sources: [] + spec: | + + MAX_REQUEST_LIGHT_CLIENT_UPDATES = 2**7 + + +- name: NODE_ID_BITS#phase0 + sources: [] + spec: | + + NODE_ID_BITS = 256 + + +- name: PARTICIPATION_FLAG_WEIGHTS#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const PARTICIPATION_FLAG_WEIGHTS: + spec: | + + PARTICIPATION_FLAG_WEIGHTS = [TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT] + + +- name: PAYLOAD_STATUS_EMPTY#gloas + sources: [] + spec: | + + PAYLOAD_STATUS_EMPTY: PayloadStatus = 1 + + +- name: PAYLOAD_STATUS_FULL#gloas + sources: [] + spec: | + + PAYLOAD_STATUS_FULL: PayloadStatus = 2 + + +- name: PAYLOAD_STATUS_PENDING#gloas + sources: [] + spec: | + + PAYLOAD_STATUS_PENDING: PayloadStatus = 0 + + +- name: PRIMITIVE_ROOT_OF_UNITY#deneb + sources: [] + spec: | + + PRIMITIVE_ROOT_OF_UNITY = 7 + + +- name: PROPOSER_WEIGHT#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const PROPOSER_WEIGHT: + spec: | + + PROPOSER_WEIGHT: uint64 = 8 + + +- name: RANDOM_CHALLENGE_KZG_BATCH_DOMAIN#deneb + sources: [] + spec: | + + RANDOM_CHALLENGE_KZG_BATCH_DOMAIN = b'RCKZGBATCH___V1_' + + +- name: RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu + sources: [] + spec: | + + RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN = b'RCKZGCBATCH__V1_' + + +- name: SAFETY_DECAY#phase0 + sources: [] + spec: | + + SAFETY_DECAY: uint64 = 10 + + +- name: SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix + sources: [] + spec: | + + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY = 128 + + +- name: SYNC_COMMITTEE_SUBNET_COUNT#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const SYNC_COMMITTEE_SUBNET_COUNT: + spec: | + + SYNC_COMMITTEE_SUBNET_COUNT = 4 + + +- name: SYNC_REWARD_WEIGHT#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const SYNC_REWARD_WEIGHT: + spec: | + + SYNC_REWARD_WEIGHT: uint64 = 2 + + +- name: TARGET_AGGREGATORS_PER_COMMITTEE#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub target_aggregators_per_committee: + spec: | + + TARGET_AGGREGATORS_PER_COMMITTEE = 2**4 + + +- name: TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE: + spec: | + + TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE = 2**4 + + +- name: TIMELY_HEAD_FLAG_INDEX#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TIMELY_HEAD_FLAG_INDEX: + spec: | + + TIMELY_HEAD_FLAG_INDEX = 2 + + +- name: TIMELY_HEAD_WEIGHT#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TIMELY_HEAD_WEIGHT: + spec: | + + TIMELY_HEAD_WEIGHT: uint64 = 14 + + +- name: TIMELY_SOURCE_FLAG_INDEX#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TIMELY_SOURCE_FLAG_INDEX: + spec: | + + TIMELY_SOURCE_FLAG_INDEX = 0 + + +- name: TIMELY_SOURCE_WEIGHT#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TIMELY_SOURCE_WEIGHT: + spec: | + + TIMELY_SOURCE_WEIGHT: uint64 = 14 + + +- name: TIMELY_TARGET_FLAG_INDEX#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TIMELY_TARGET_FLAG_INDEX: + spec: | + + TIMELY_TARGET_FLAG_INDEX = 1 + + +- name: TIMELY_TARGET_WEIGHT#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const TIMELY_TARGET_WEIGHT: + spec: | + + TIMELY_TARGET_WEIGHT: uint64 = 26 + + +- name: UINT256_MAX#fulu + sources: [] + spec: | + + UINT256_MAX: uint256 = 2**256 - 1 + + +- name: UINT64_MAX#phase0 + sources: [] + spec: | + + UINT64_MAX: uint64 = 2**64 - 1 + + +- name: UINT64_MAX_SQRT#phase0 + sources: [] + spec: | + + UINT64_MAX_SQRT: uint64 = 4294967295 + + +- name: UNSET_DEPOSIT_REQUESTS_START_INDEX#electra + sources: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*unset_deposit_requests_start_index:' + regex: true + spec: | + + UNSET_DEPOSIT_REQUESTS_START_INDEX: uint64 = 2**64 - 1 + + +- name: VERSIONED_HASH_VERSION_KZG#deneb + sources: + - file: crypto/kzg/src/kzg_commitment.rs + search: pub const VERSIONED_HASH_VERSION_KZG: + spec: | + + VERSIONED_HASH_VERSION_KZG: Bytes1 = '0x01' + + +- name: WEIGHT_DENOMINATOR#altair + sources: + - file: consensus/types/src/core/consts.rs + search: pub const WEIGHT_DENOMINATOR: + spec: | + + WEIGHT_DENOMINATOR: uint64 = 64 + + +- name: WITHDRAWAL_REQUEST_TYPE#electra + sources: [] + spec: | + + WITHDRAWAL_REQUEST_TYPE: Bytes1 = '0x01' + diff --git a/specrefs/containers.yml b/specrefs/containers.yml new file mode 100644 index 00000000000..2a67d623297 --- /dev/null +++ b/specrefs/containers.yml @@ -0,0 +1,1600 @@ +- name: AggregateAndProof#phase0 + sources: + - file: consensus/types/src/attestation/aggregate_and_proof.rs + search: pub struct AggregateAndProof + spec: | + + class AggregateAndProof(Container): + aggregator_index: ValidatorIndex + aggregate: Attestation + selection_proof: BLSSignature + + +- name: AggregateAndProof#electra + sources: + - file: consensus/types/src/attestation/aggregate_and_proof.rs + search: pub struct AggregateAndProof + spec: | + + class AggregateAndProof(Container): + aggregator_index: ValidatorIndex + # [Modified in Electra:EIP7549] + aggregate: Attestation + selection_proof: BLSSignature + + +- name: Attestation#phase0 + sources: + - file: consensus/types/src/attestation/attestation.rs + search: pub struct Attestation + spec: | + + class Attestation(Container): + aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] + data: AttestationData + signature: BLSSignature + + +- name: Attestation#electra + sources: + - file: consensus/types/src/attestation/attestation.rs + search: pub struct Attestation + spec: | + + class Attestation(Container): + # [Modified in Electra:EIP7549] + aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT] + data: AttestationData + signature: BLSSignature + # [New in Electra:EIP7549] + committee_bits: Bitvector[MAX_COMMITTEES_PER_SLOT] + + +- name: AttestationData#phase0 + sources: + - file: consensus/types/src/attestation/attestation_data.rs + search: pub struct AttestationData + spec: | + + class AttestationData(Container): + slot: Slot + index: CommitteeIndex + beacon_block_root: Root + source: Checkpoint + target: Checkpoint + + +- name: AttesterSlashing#phase0 + sources: + - file: consensus/types/src/slashing/attester_slashing.rs + search: pub struct AttesterSlashing + spec: | + + class AttesterSlashing(Container): + attestation_1: IndexedAttestation + attestation_2: IndexedAttestation + + +- name: AttesterSlashing#electra + sources: + - file: consensus/types/src/slashing/attester_slashing.rs + search: pub struct AttesterSlashing + spec: | + + class AttesterSlashing(Container): + # [Modified in Electra:EIP7549] + attestation_1: IndexedAttestation + # [Modified in Electra:EIP7549] + attestation_2: IndexedAttestation + + +- name: BLSToExecutionChange#capella + sources: + - file: consensus/types/src/execution/bls_to_execution_change.rs + search: pub struct BlsToExecutionChange + spec: | + + class BLSToExecutionChange(Container): + validator_index: ValidatorIndex + from_bls_pubkey: BLSPubkey + to_execution_address: ExecutionAddress + + +- name: BeaconBlock#phase0 + sources: + - file: consensus/types/src/block/beacon_block.rs + search: pub struct BeaconBlock + spec: | + + class BeaconBlock(Container): + slot: Slot + proposer_index: ValidatorIndex + parent_root: Root + state_root: Root + body: BeaconBlockBody + + +- name: BeaconBlockBody#phase0 + sources: + - file: consensus/types/src/block/beacon_block_body.rs + search: pub struct BeaconBlockBody + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] + attestations: List[Attestation, MAX_ATTESTATIONS] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + + +- name: BeaconBlockBody#altair + sources: + - file: consensus/types/src/block/beacon_block_body.rs + search: pub struct BeaconBlockBody + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] + attestations: List[Attestation, MAX_ATTESTATIONS] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + # [New in Altair] + sync_aggregate: SyncAggregate + + +- name: BeaconBlockBody#bellatrix + sources: + - file: consensus/types/src/block/beacon_block_body.rs + search: pub struct BeaconBlockBody + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] + attestations: List[Attestation, MAX_ATTESTATIONS] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + sync_aggregate: SyncAggregate + # [New in Bellatrix] + execution_payload: ExecutionPayload + + +- name: BeaconBlockBody#capella + sources: + - file: consensus/types/src/block/beacon_block_body.rs + search: pub struct BeaconBlockBody + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] + attestations: List[Attestation, MAX_ATTESTATIONS] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + sync_aggregate: SyncAggregate + execution_payload: ExecutionPayload + # [New in Capella] + bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES] + + +- name: BeaconBlockBody#deneb + sources: + - file: consensus/types/src/block/beacon_block_body.rs + search: pub struct BeaconBlockBody + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] + attestations: List[Attestation, MAX_ATTESTATIONS] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + sync_aggregate: SyncAggregate + # [Modified in Deneb:EIP4844] + execution_payload: ExecutionPayload + bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES] + # [New in Deneb:EIP4844] + blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + + +- name: BeaconBlockBody#electra + sources: + - file: consensus/types/src/block/beacon_block_body.rs + search: pub struct BeaconBlockBody + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + # [Modified in Electra:EIP7549] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS_ELECTRA] + # [Modified in Electra:EIP7549] + attestations: List[Attestation, MAX_ATTESTATIONS_ELECTRA] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + sync_aggregate: SyncAggregate + execution_payload: ExecutionPayload + bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES] + blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + # [New in Electra] + execution_requests: ExecutionRequests + + +- name: BeaconBlockBody#gloas + sources: [] + spec: | + + class BeaconBlockBody(Container): + randao_reveal: BLSSignature + eth1_data: Eth1Data + graffiti: Bytes32 + proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] + attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS_ELECTRA] + attestations: List[Attestation, MAX_ATTESTATIONS_ELECTRA] + deposits: List[Deposit, MAX_DEPOSITS] + voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] + sync_aggregate: SyncAggregate + # [Modified in Gloas:EIP7732] + # Removed `execution_payload` + bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES] + # [Modified in Gloas:EIP7732] + # Removed `blob_kzg_commitments` + # [Modified in Gloas:EIP7732] + # Removed `execution_requests` + # [New in Gloas:EIP7732] + signed_execution_payload_bid: SignedExecutionPayloadBid + # [New in Gloas:EIP7732] + payload_attestations: List[PayloadAttestation, MAX_PAYLOAD_ATTESTATIONS] + + +- name: BeaconBlockHeader#phase0 + sources: + - file: consensus/types/src/block/beacon_block_header.rs + search: pub struct BeaconBlockHeader + spec: | + + class BeaconBlockHeader(Container): + slot: Slot + proposer_index: ValidatorIndex + parent_root: Root + state_root: Root + body_root: Root + + +- name: BeaconState#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] + current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + + +- name: BeaconState#altair + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + # [Modified in Altair] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # [Modified in Altair] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # [New in Altair] + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # [New in Altair] + current_sync_committee: SyncCommittee + # [New in Altair] + next_sync_committee: SyncCommittee + + +- name: BeaconState#bellatrix + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # [New in Bellatrix] + latest_execution_payload_header: ExecutionPayloadHeader + + +- name: BeaconState#capella + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # [Modified in Capella] + latest_execution_payload_header: ExecutionPayloadHeader + # [New in Capella] + next_withdrawal_index: WithdrawalIndex + # [New in Capella] + next_withdrawal_validator_index: ValidatorIndex + # [New in Capella] + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] + + +- name: BeaconState#deneb + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # [Modified in Deneb:EIP4844] + latest_execution_payload_header: ExecutionPayloadHeader + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] + + +- name: BeaconState#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + latest_execution_payload_header: ExecutionPayloadHeader + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] + # [New in Electra:EIP6110] + deposit_requests_start_index: uint64 + # [New in Electra:EIP7251] + deposit_balance_to_consume: Gwei + # [New in Electra:EIP7251] + exit_balance_to_consume: Gwei + # [New in Electra:EIP7251] + earliest_exit_epoch: Epoch + # [New in Electra:EIP7251] + consolidation_balance_to_consume: Gwei + # [New in Electra:EIP7251] + earliest_consolidation_epoch: Epoch + # [New in Electra:EIP7251] + pending_deposits: List[PendingDeposit, PENDING_DEPOSITS_LIMIT] + # [New in Electra:EIP7251] + pending_partial_withdrawals: List[PendingPartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT] + # [New in Electra:EIP7251] + pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT] + + +- name: BeaconState#fulu + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub struct BeaconState + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + latest_execution_payload_header: ExecutionPayloadHeader + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] + deposit_requests_start_index: uint64 + deposit_balance_to_consume: Gwei + exit_balance_to_consume: Gwei + earliest_exit_epoch: Epoch + consolidation_balance_to_consume: Gwei + earliest_consolidation_epoch: Epoch + pending_deposits: List[PendingDeposit, PENDING_DEPOSITS_LIMIT] + pending_partial_withdrawals: List[PendingPartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT] + pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT] + # [New in Fulu:EIP7917] + proposer_lookahead: Vector[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH] + + +- name: BeaconState#gloas + sources: [] + spec: | + + class BeaconState(Container): + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # [Modified in Gloas:EIP7732] + # Removed `latest_execution_payload_header` + # [New in Gloas:EIP7732] + latest_execution_payload_bid: ExecutionPayloadBid + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] + deposit_requests_start_index: uint64 + deposit_balance_to_consume: Gwei + exit_balance_to_consume: Gwei + earliest_exit_epoch: Epoch + consolidation_balance_to_consume: Gwei + earliest_consolidation_epoch: Epoch + pending_deposits: List[PendingDeposit, PENDING_DEPOSITS_LIMIT] + pending_partial_withdrawals: List[PendingPartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT] + pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT] + proposer_lookahead: Vector[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH] + # [New in Gloas:EIP7732] + execution_payload_availability: Bitvector[SLOTS_PER_HISTORICAL_ROOT] + # [New in Gloas:EIP7732] + builder_pending_payments: Vector[BuilderPendingPayment, 2 * SLOTS_PER_EPOCH] + # [New in Gloas:EIP7732] + builder_pending_withdrawals: List[BuilderPendingWithdrawal, BUILDER_PENDING_WITHDRAWALS_LIMIT] + # [New in Gloas:EIP7732] + latest_block_hash: Hash32 + # [New in Gloas:EIP7732] + latest_withdrawals_root: Root + + +- name: BlobIdentifier#deneb + sources: + - file: consensus/types/src/data/blob_sidecar.rs + search: pub struct BlobIdentifier + spec: | + + class BlobIdentifier(Container): + block_root: Root + index: BlobIndex + + +- name: BlobSidecar#deneb + sources: + - file: consensus/types/src/data/blob_sidecar.rs + search: pub struct BlobSidecar + spec: | + + class BlobSidecar(Container): + index: BlobIndex + blob: Blob + kzg_commitment: KZGCommitment + kzg_proof: KZGProof + signed_block_header: SignedBeaconBlockHeader + kzg_commitment_inclusion_proof: Vector[Bytes32, KZG_COMMITMENT_INCLUSION_PROOF_DEPTH] + + +- name: BuilderPendingPayment#gloas + sources: [] + spec: | + + class BuilderPendingPayment(Container): + weight: Gwei + withdrawal: BuilderPendingWithdrawal + + +- name: BuilderPendingWithdrawal#gloas + sources: [] + spec: | + + class BuilderPendingWithdrawal(Container): + fee_recipient: ExecutionAddress + amount: Gwei + builder_index: ValidatorIndex + withdrawable_epoch: Epoch + + +- name: Checkpoint#phase0 + sources: + - file: consensus/types/src/attestation/checkpoint.rs + search: pub struct Checkpoint + spec: | + + class Checkpoint(Container): + epoch: Epoch + root: Root + + +- name: ConsolidationRequest#electra + sources: + - file: consensus/types/src/consolidation/consolidation_request.rs + search: pub struct ConsolidationRequest + spec: | + + class ConsolidationRequest(Container): + source_address: ExecutionAddress + source_pubkey: BLSPubkey + target_pubkey: BLSPubkey + + +- name: ContributionAndProof#altair + sources: + - file: consensus/types/src/sync_committee/contribution_and_proof.rs + search: pub struct ContributionAndProof + spec: | + + class ContributionAndProof(Container): + aggregator_index: ValidatorIndex + contribution: SyncCommitteeContribution + selection_proof: BLSSignature + + +- name: DataColumnSidecar#fulu + sources: + - file: consensus/types/src/data/data_column_sidecar.rs + search: pub struct DataColumnSidecar + spec: | + + class DataColumnSidecar(Container): + index: ColumnIndex + column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK] + kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK] + signed_block_header: SignedBeaconBlockHeader + kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH] + + +- name: DataColumnSidecar#gloas + sources: [] + spec: | + + class DataColumnSidecar(Container): + index: ColumnIndex + column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK] + kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK] + # [Modified in Gloas:EIP7732] + # Removed `signed_block_header` + # [Modified in Gloas:EIP7732] + # Removed `kzg_commitments_inclusion_proof` + # [New in Gloas:EIP7732] + slot: Slot + # [New in Gloas:EIP7732] + beacon_block_root: Root + + +- name: DataColumnsByRootIdentifier#fulu + sources: + - file: consensus/types/src/data/data_column_sidecar.rs + search: pub struct DataColumnsByRootIdentifier + spec: | + + class DataColumnsByRootIdentifier(Container): + block_root: Root + columns: List[ColumnIndex, NUMBER_OF_COLUMNS] + + +- name: Deposit#phase0 + sources: + - file: consensus/types/src/deposit/deposit.rs + search: pub struct Deposit + spec: | + + class Deposit(Container): + proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH + 1] + data: DepositData + + +- name: DepositData#phase0 + sources: + - file: consensus/types/src/deposit/deposit_data.rs + search: pub struct DepositData + spec: | + + class DepositData(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + signature: BLSSignature + + +- name: DepositMessage#phase0 + sources: + - file: consensus/types/src/deposit/deposit_message.rs + search: pub struct DepositMessage + spec: | + + class DepositMessage(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + + +- name: DepositRequest#electra + sources: + - file: consensus/types/src/deposit/deposit_request.rs + search: pub struct DepositRequest + spec: | + + class DepositRequest(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + signature: BLSSignature + index: uint64 + + +- name: Eth1Block#phase0 + sources: [] + spec: | + + class Eth1Block(Container): + timestamp: uint64 + deposit_root: Root + deposit_count: uint64 + + +- name: Eth1Data#phase0 + sources: + - file: consensus/types/src/execution/eth1_data.rs + search: pub struct Eth1Data + spec: | + + class Eth1Data(Container): + deposit_root: Root + deposit_count: uint64 + block_hash: Hash32 + + +- name: ExecutionPayload#bellatrix + sources: + - file: consensus/types/src/execution/execution_payload.rs + search: pub struct ExecutionPayload + spec: | + + class ExecutionPayload(Container): + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + block_hash: Hash32 + transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + + +- name: ExecutionPayload#capella + sources: + - file: consensus/types/src/execution/execution_payload.rs + search: pub struct ExecutionPayload + spec: | + + class ExecutionPayload(Container): + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + block_hash: Hash32 + transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + # [New in Capella] + withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] + + +- name: ExecutionPayload#deneb + sources: + - file: consensus/types/src/execution/execution_payload.rs + search: pub struct ExecutionPayload + spec: | + + class ExecutionPayload(Container): + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + block_hash: Hash32 + transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] + # [New in Deneb:EIP4844] + blob_gas_used: uint64 + # [New in Deneb:EIP4844] + excess_blob_gas: uint64 + + +- name: ExecutionPayloadBid#gloas + sources: [] + spec: | + + class ExecutionPayloadBid(Container): + parent_block_hash: Hash32 + parent_block_root: Root + block_hash: Hash32 + prev_randao: Bytes32 + fee_recipient: ExecutionAddress + gas_limit: uint64 + builder_index: ValidatorIndex + slot: Slot + value: Gwei + execution_payment: Gwei + blob_kzg_commitments_root: Root + + +- name: ExecutionPayloadEnvelope#gloas + sources: [] + spec: | + + class ExecutionPayloadEnvelope(Container): + payload: ExecutionPayload + execution_requests: ExecutionRequests + builder_index: ValidatorIndex + beacon_block_root: Root + slot: Slot + blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + state_root: Root + + +- name: ExecutionPayloadHeader#bellatrix + sources: + - file: consensus/types/src/execution/execution_payload_header.rs + search: pub struct ExecutionPayloadHeader + spec: | + + class ExecutionPayloadHeader(Container): + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + block_hash: Hash32 + transactions_root: Root + + +- name: ExecutionPayloadHeader#capella + sources: + - file: consensus/types/src/execution/execution_payload_header.rs + search: pub struct ExecutionPayloadHeader + spec: | + + class ExecutionPayloadHeader(Container): + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + block_hash: Hash32 + transactions_root: Root + # [New in Capella] + withdrawals_root: Root + + +- name: ExecutionPayloadHeader#deneb + sources: + - file: consensus/types/src/execution/execution_payload_header.rs + search: pub struct ExecutionPayloadHeader + spec: | + + class ExecutionPayloadHeader(Container): + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + block_hash: Hash32 + transactions_root: Root + withdrawals_root: Root + # [New in Deneb:EIP4844] + blob_gas_used: uint64 + # [New in Deneb:EIP4844] + excess_blob_gas: uint64 + + +- name: ExecutionRequests#electra + sources: + - file: consensus/types/src/execution/execution_requests.rs + search: pub struct ExecutionRequests + spec: | + + class ExecutionRequests(Container): + # [New in Electra:EIP6110] + deposits: List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD] + # [New in Electra:EIP7002:EIP7251] + withdrawals: List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD] + # [New in Electra:EIP7251] + consolidations: List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD] + + +- name: Fork#phase0 + sources: + - file: consensus/types/src/fork/fork.rs + search: pub struct Fork + spec: | + + class Fork(Container): + previous_version: Version + current_version: Version + epoch: Epoch + + +- name: ForkChoiceNode#gloas + sources: [] + spec: | + + class ForkChoiceNode(Container): + root: Root + payload_status: PayloadStatus # One of PAYLOAD_STATUS_* values + + +- name: ForkData#phase0 + sources: + - file: consensus/types/src/fork/fork_data.rs + search: pub struct ForkData + spec: | + + class ForkData(Container): + current_version: Version + genesis_validators_root: Root + + +- name: HistoricalBatch#phase0 + sources: + - file: consensus/types/src/state/historical_batch.rs + search: pub struct HistoricalBatch + spec: | + + class HistoricalBatch(Container): + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + + +- name: HistoricalSummary#capella + sources: + - file: consensus/types/src/state/historical_summary.rs + search: pub struct HistoricalSummary + spec: | + + class HistoricalSummary(Container): + block_summary_root: Root + state_summary_root: Root + + +- name: IndexedAttestation#phase0 + sources: + - file: consensus/types/src/attestation/indexed_attestation.rs + search: pub struct IndexedAttestation + spec: | + + class IndexedAttestation(Container): + attesting_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE] + data: AttestationData + signature: BLSSignature + + +- name: IndexedAttestation#electra + sources: + - file: consensus/types/src/attestation/indexed_attestation.rs + search: pub struct IndexedAttestation + spec: | + + class IndexedAttestation(Container): + # [Modified in Electra:EIP7549] + attesting_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT] + data: AttestationData + signature: BLSSignature + + +- name: IndexedPayloadAttestation#gloas + sources: [] + spec: | + + class IndexedPayloadAttestation(Container): + attesting_indices: List[ValidatorIndex, PTC_SIZE] + data: PayloadAttestationData + signature: BLSSignature + + +- name: LightClientBootstrap#altair + sources: + - file: consensus/types/src/light_client/light_client_bootstrap.rs + search: pub struct LightClientBootstrap + spec: | + + class LightClientBootstrap(Container): + # Header matching the requested beacon block root + header: LightClientHeader + # Current sync committee corresponding to `header.beacon.state_root` + current_sync_committee: SyncCommittee + current_sync_committee_branch: CurrentSyncCommitteeBranch + + +- name: LightClientBootstrap#capella + sources: [] + spec: | + + class LightClientBootstrap(Container): + # [Modified in Capella] + header: LightClientHeader + current_sync_committee: SyncCommittee + current_sync_committee_branch: CurrentSyncCommitteeBranch + + +- name: LightClientFinalityUpdate#altair + sources: + - file: consensus/types/src/light_client/light_client_finality_update.rs + search: pub struct LightClientFinalityUpdate + spec: | + + class LightClientFinalityUpdate(Container): + # Header attested to by the sync committee + attested_header: LightClientHeader + # Finalized header corresponding to `attested_header.beacon.state_root` + finalized_header: LightClientHeader + finality_branch: FinalityBranch + # Sync committee aggregate signature + sync_aggregate: SyncAggregate + # Slot at which the aggregate signature was created (untrusted) + signature_slot: Slot + + +- name: LightClientFinalityUpdate#capella + sources: [] + spec: | + + class LightClientFinalityUpdate(Container): + # [Modified in Capella] + attested_header: LightClientHeader + # [Modified in Capella] + finalized_header: LightClientHeader + finality_branch: FinalityBranch + sync_aggregate: SyncAggregate + signature_slot: Slot + + +- name: LightClientHeader#altair + sources: + - file: consensus/types/src/light_client/light_client_header.rs + search: pub struct LightClientHeader + spec: | + + class LightClientHeader(Container): + beacon: BeaconBlockHeader + + +- name: LightClientHeader#capella + sources: + - file: consensus/types/src/light_client/light_client_header.rs + search: pub struct LightClientHeader + spec: | + + class LightClientHeader(Container): + beacon: BeaconBlockHeader + # [New in Capella] + execution: ExecutionPayloadHeader + # [New in Capella] + execution_branch: ExecutionBranch + + +- name: LightClientOptimisticUpdate#altair + sources: + - file: consensus/types/src/light_client/light_client_optimistic_update.rs + search: pub struct LightClientOptimisticUpdate + spec: | + + class LightClientOptimisticUpdate(Container): + # Header attested to by the sync committee + attested_header: LightClientHeader + # Sync committee aggregate signature + sync_aggregate: SyncAggregate + # Slot at which the aggregate signature was created (untrusted) + signature_slot: Slot + + +- name: LightClientOptimisticUpdate#capella + sources: [] + spec: | + + class LightClientOptimisticUpdate(Container): + # [Modified in Capella] + attested_header: LightClientHeader + sync_aggregate: SyncAggregate + signature_slot: Slot + + +- name: LightClientUpdate#altair + sources: + - file: consensus/types/src/light_client/light_client_update.rs + search: pub struct LightClientUpdate + spec: | + + class LightClientUpdate(Container): + # Header attested to by the sync committee + attested_header: LightClientHeader + # Next sync committee corresponding to `attested_header.beacon.state_root` + next_sync_committee: SyncCommittee + next_sync_committee_branch: NextSyncCommitteeBranch + # Finalized header corresponding to `attested_header.beacon.state_root` + finalized_header: LightClientHeader + finality_branch: FinalityBranch + # Sync committee aggregate signature + sync_aggregate: SyncAggregate + # Slot at which the aggregate signature was created (untrusted) + signature_slot: Slot + + +- name: LightClientUpdate#capella + sources: [] + spec: | + + class LightClientUpdate(Container): + # [Modified in Capella] + attested_header: LightClientHeader + next_sync_committee: SyncCommittee + next_sync_committee_branch: NextSyncCommitteeBranch + # [Modified in Capella] + finalized_header: LightClientHeader + finality_branch: FinalityBranch + sync_aggregate: SyncAggregate + signature_slot: Slot + + +- name: MatrixEntry#fulu + sources: [] + spec: | + + class MatrixEntry(Container): + cell: Cell + kzg_proof: KZGProof + column_index: ColumnIndex + row_index: RowIndex + + +- name: PayloadAttestation#gloas + sources: [] + spec: | + + class PayloadAttestation(Container): + aggregation_bits: Bitvector[PTC_SIZE] + data: PayloadAttestationData + signature: BLSSignature + + +- name: PayloadAttestationData#gloas + sources: [] + spec: | + + class PayloadAttestationData(Container): + beacon_block_root: Root + slot: Slot + payload_present: boolean + blob_data_available: boolean + + +- name: PayloadAttestationMessage#gloas + sources: [] + spec: | + + class PayloadAttestationMessage(Container): + validator_index: ValidatorIndex + data: PayloadAttestationData + signature: BLSSignature + + +- name: PendingAttestation#phase0 + sources: + - file: consensus/types/src/attestation/pending_attestation.rs + search: pub struct PendingAttestation + spec: | + + class PendingAttestation(Container): + aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] + data: AttestationData + inclusion_delay: Slot + proposer_index: ValidatorIndex + + +- name: PendingConsolidation#electra + sources: + - file: consensus/types/src/consolidation/pending_consolidation.rs + search: pub struct PendingConsolidation + spec: | + + class PendingConsolidation(Container): + source_index: ValidatorIndex + target_index: ValidatorIndex + + +- name: PendingDeposit#electra + sources: + - file: consensus/types/src/deposit/pending_deposit.rs + search: pub struct PendingDeposit + spec: | + + class PendingDeposit(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + signature: BLSSignature + slot: Slot + + +- name: PendingPartialWithdrawal#electra + sources: + - file: consensus/types/src/withdrawal/pending_partial_withdrawal.rs + search: pub struct PendingPartialWithdrawal + spec: | + + class PendingPartialWithdrawal(Container): + validator_index: ValidatorIndex + amount: Gwei + withdrawable_epoch: Epoch + + +- name: PowBlock#bellatrix + sources: [] + spec: | + + class PowBlock(Container): + block_hash: Hash32 + parent_hash: Hash32 + total_difficulty: uint256 + + +- name: ProposerSlashing#phase0 + sources: + - file: consensus/types/src/slashing/proposer_slashing.rs + search: pub struct ProposerSlashing + spec: | + + class ProposerSlashing(Container): + signed_header_1: SignedBeaconBlockHeader + signed_header_2: SignedBeaconBlockHeader + + +- name: SignedAggregateAndProof#phase0 + sources: + - file: consensus/types/src/attestation/signed_aggregate_and_proof.rs + search: pub struct SignedAggregateAndProof + spec: | + + class SignedAggregateAndProof(Container): + message: AggregateAndProof + signature: BLSSignature + + +- name: SignedAggregateAndProof#electra + sources: + - file: consensus/types/src/attestation/signed_aggregate_and_proof.rs + search: pub struct SignedAggregateAndProof + spec: | + + class SignedAggregateAndProof(Container): + # [Modified in Electra:EIP7549] + message: AggregateAndProof + signature: BLSSignature + + +- name: SignedBLSToExecutionChange#capella + sources: + - file: consensus/types/src/execution/signed_bls_to_execution_change.rs + search: pub struct SignedBlsToExecutionChange + spec: | + + class SignedBLSToExecutionChange(Container): + message: BLSToExecutionChange + signature: BLSSignature + + +- name: SignedBeaconBlock#phase0 + sources: + - file: consensus/types/src/block/signed_beacon_block.rs + search: 'pub struct SignedBeaconBlock = FullPayload> {' + spec: | + + class SignedBeaconBlock(Container): + message: BeaconBlock + signature: BLSSignature + + +- name: SignedBeaconBlockHeader#phase0 + sources: + - file: consensus/types/src/block/signed_beacon_block_header.rs + search: pub struct SignedBeaconBlockHeader + spec: | + + class SignedBeaconBlockHeader(Container): + message: BeaconBlockHeader + signature: BLSSignature + + +- name: SignedContributionAndProof#altair + sources: + - file: consensus/types/src/sync_committee/signed_contribution_and_proof.rs + search: pub struct SignedContributionAndProof + spec: | + + class SignedContributionAndProof(Container): + message: ContributionAndProof + signature: BLSSignature + + +- name: SignedExecutionPayloadBid#gloas + sources: [] + spec: | + + class SignedExecutionPayloadBid(Container): + message: ExecutionPayloadBid + signature: BLSSignature + + +- name: SignedExecutionPayloadEnvelope#gloas + sources: [] + spec: | + + class SignedExecutionPayloadEnvelope(Container): + message: ExecutionPayloadEnvelope + signature: BLSSignature + + +- name: SignedVoluntaryExit#phase0 + sources: + - file: consensus/types/src/exit/signed_voluntary_exit.rs + search: pub struct SignedVoluntaryExit + spec: | + + class SignedVoluntaryExit(Container): + message: VoluntaryExit + signature: BLSSignature + + +- name: SigningData#phase0 + sources: + - file: consensus/types/src/core/signing_data.rs + search: pub struct SigningData + spec: | + + class SigningData(Container): + object_root: Root + domain: Domain + + +- name: SingleAttestation#electra + sources: + - file: consensus/types/src/attestation/attestation.rs + search: pub struct SingleAttestation + spec: | + + class SingleAttestation(Container): + committee_index: CommitteeIndex + attester_index: ValidatorIndex + data: AttestationData + signature: BLSSignature + + +- name: SyncAggregate#altair + sources: + - file: consensus/types/src/sync_committee/sync_aggregate.rs + search: pub struct SyncAggregate + spec: | + + class SyncAggregate(Container): + sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE] + sync_committee_signature: BLSSignature + + +- name: SyncAggregatorSelectionData#altair + sources: + - file: consensus/types/src/sync_committee/sync_aggregator_selection_data.rs + search: pub struct SyncAggregatorSelectionData + spec: | + + class SyncAggregatorSelectionData(Container): + slot: Slot + subcommittee_index: uint64 + + +- name: SyncCommittee#altair + sources: + - file: consensus/types/src/sync_committee/sync_committee.rs + search: pub struct SyncCommittee + spec: | + + class SyncCommittee(Container): + pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE] + aggregate_pubkey: BLSPubkey + + +- name: SyncCommitteeContribution#altair + sources: + - file: consensus/types/src/sync_committee/sync_committee_contribution.rs + search: pub struct SyncCommitteeContribution + spec: | + + class SyncCommitteeContribution(Container): + slot: Slot + beacon_block_root: Root + subcommittee_index: uint64 + aggregation_bits: Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT] + signature: BLSSignature + + +- name: SyncCommitteeMessage#altair + sources: + - file: consensus/types/src/sync_committee/sync_committee_message.rs + search: pub struct SyncCommitteeMessage + spec: | + + class SyncCommitteeMessage(Container): + slot: Slot + beacon_block_root: Root + validator_index: ValidatorIndex + signature: BLSSignature + + +- name: Validator#phase0 + sources: + - file: consensus/types/src/validator/validator.rs + search: pub struct Validator + spec: | + + class Validator(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + effective_balance: Gwei + slashed: boolean + activation_eligibility_epoch: Epoch + activation_epoch: Epoch + exit_epoch: Epoch + withdrawable_epoch: Epoch + + +- name: VoluntaryExit#phase0 + sources: + - file: consensus/types/src/exit/voluntary_exit.rs + search: pub struct VoluntaryExit + spec: | + + class VoluntaryExit(Container): + epoch: Epoch + validator_index: ValidatorIndex + + +- name: Withdrawal#capella + sources: + - file: consensus/types/src/withdrawal/withdrawal.rs + search: pub struct Withdrawal + spec: | + + class Withdrawal(Container): + index: WithdrawalIndex + validator_index: ValidatorIndex + address: ExecutionAddress + amount: Gwei + + +- name: WithdrawalRequest#electra + sources: + - file: consensus/types/src/withdrawal/withdrawal_request.rs + search: pub struct WithdrawalRequest + spec: | + + class WithdrawalRequest(Container): + source_address: ExecutionAddress + validator_pubkey: BLSPubkey + amount: Gwei + diff --git a/specrefs/dataclasses.yml b/specrefs/dataclasses.yml new file mode 100644 index 00000000000..2dcaa48fd9e --- /dev/null +++ b/specrefs/dataclasses.yml @@ -0,0 +1,290 @@ +- name: BlobParameters#fulu + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub struct BlobParameters + spec: | + + class BlobParameters: + epoch: Epoch + max_blobs_per_block: uint64 + + +- name: BlobsBundle#deneb + sources: + - file: common/eth2/src/types.rs + search: pub struct BlobsBundle + spec: | + + class BlobsBundle(object): + commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK] + blobs: List[Blob, MAX_BLOB_COMMITMENTS_PER_BLOCK] + + +- name: BlobsBundle#fulu + sources: + - file: common/eth2/src/types.rs + search: pub struct BlobsBundle + spec: | + + class BlobsBundle(object): + commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] + # [Modified in Fulu:EIP7594] + proofs: List[KZGProof, FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK] + blobs: List[Blob, MAX_BLOB_COMMITMENTS_PER_BLOCK] + + +- name: GetPayloadResponse#bellatrix + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct GetPayloadResponse + spec: | + + class GetPayloadResponse(object): + execution_payload: ExecutionPayload + + +- name: GetPayloadResponse#capella + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct GetPayloadResponse + spec: | + + class GetPayloadResponse(object): + execution_payload: ExecutionPayload + block_value: uint256 + + +- name: GetPayloadResponse#deneb + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct GetPayloadResponse + spec: | + + class GetPayloadResponse(object): + execution_payload: ExecutionPayload + block_value: uint256 + # [New in Deneb:EIP4844] + blobs_bundle: BlobsBundle + + +- name: GetPayloadResponse#electra + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct GetPayloadResponse + spec: | + + class GetPayloadResponse(object): + execution_payload: ExecutionPayload + block_value: uint256 + blobs_bundle: BlobsBundle + # [New in Electra] + execution_requests: Sequence[bytes] + + +- name: GetPayloadResponse#fulu + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct GetPayloadResponse + spec: | + + class GetPayloadResponse(object): + execution_payload: ExecutionPayload + block_value: uint256 + # [Modified in Fulu:EIP7594] + blobs_bundle: BlobsBundle + execution_requests: Sequence[bytes] + + +- name: LatestMessage#phase0 + sources: [] + spec: | + + @dataclass(eq=True, frozen=True) + class LatestMessage(object): + epoch: Epoch + root: Root + + +- name: LatestMessage#gloas + sources: [] + spec: | + + @dataclass(eq=True, frozen=True) + class LatestMessage(object): + slot: Slot + root: Root + payload_present: boolean + + +- name: LightClientStore#altair + sources: [] + spec: | + + class LightClientStore(object): + # Header that is finalized + finalized_header: LightClientHeader + # Sync committees corresponding to the finalized header + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # Best available header to switch finalized head to if we see nothing else + best_valid_update: Optional[LightClientUpdate] + # Most recent available reasonably-safe header + optimistic_header: LightClientHeader + # Max number of active participants in a sync committee (used to calculate safety threshold) + previous_max_active_participants: uint64 + current_max_active_participants: uint64 + + +- name: LightClientStore#capella + sources: [] + spec: | + + class LightClientStore(object): + # [Modified in Capella] + finalized_header: LightClientHeader + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # [Modified in Capella] + best_valid_update: Optional[LightClientUpdate] + # [Modified in Capella] + optimistic_header: LightClientHeader + previous_max_active_participants: uint64 + current_max_active_participants: uint64 + + +- name: NewPayloadRequest#bellatrix + sources: + - file: beacon_node/execution_layer/src/engine_api/new_payload_request.rs + search: pub struct NewPayloadRequest + spec: | + + class NewPayloadRequest(object): + execution_payload: ExecutionPayload + + +- name: NewPayloadRequest#deneb + sources: + - file: beacon_node/execution_layer/src/engine_api/new_payload_request.rs + search: pub struct NewPayloadRequest + spec: | + + class NewPayloadRequest(object): + execution_payload: ExecutionPayload + versioned_hashes: Sequence[VersionedHash] + parent_beacon_block_root: Root + + +- name: NewPayloadRequest#electra + sources: + - file: beacon_node/execution_layer/src/engine_api/new_payload_request.rs + search: pub struct NewPayloadRequest + spec: | + + class NewPayloadRequest(object): + execution_payload: ExecutionPayload + versioned_hashes: Sequence[VersionedHash] + parent_beacon_block_root: Root + # [New in Electra] + execution_requests: ExecutionRequests + + +- name: OptimisticStore#bellatrix + sources: [] + spec: | + + class OptimisticStore(object): + optimistic_roots: Set[Root] + head_block_root: Root + blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) + block_states: Dict[Root, BeaconState] = field(default_factory=dict) + + +- name: PayloadAttributes#bellatrix + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct PayloadAttributes + spec: | + + class PayloadAttributes(object): + timestamp: uint64 + prev_randao: Bytes32 + suggested_fee_recipient: ExecutionAddress + + +- name: PayloadAttributes#capella + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct PayloadAttributes + spec: | + + class PayloadAttributes(object): + timestamp: uint64 + prev_randao: Bytes32 + suggested_fee_recipient: ExecutionAddress + # [New in Capella] + withdrawals: Sequence[Withdrawal] + + +- name: PayloadAttributes#deneb + sources: + - file: beacon_node/execution_layer/src/engine_api.rs + search: pub struct PayloadAttributes + spec: | + + class PayloadAttributes(object): + timestamp: uint64 + prev_randao: Bytes32 + suggested_fee_recipient: ExecutionAddress + withdrawals: Sequence[Withdrawal] + # [New in Deneb:EIP4788] + parent_beacon_block_root: Root + + +- name: Store#phase0 + sources: + - file: beacon_node/beacon_chain/src/beacon_fork_choice_store.rs + search: pub struct BeaconForkChoiceStore + spec: | + + class Store(object): + time: uint64 + genesis_time: uint64 + justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + unrealized_justified_checkpoint: Checkpoint + unrealized_finalized_checkpoint: Checkpoint + proposer_boost_root: Root + equivocating_indices: Set[ValidatorIndex] + blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) + block_states: Dict[Root, BeaconState] = field(default_factory=dict) + block_timeliness: Dict[Root, boolean] = field(default_factory=dict) + checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict) + latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict) + unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict) + + +- name: Store#gloas + sources: [] + spec: | + + class Store(object): + time: uint64 + genesis_time: uint64 + justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + unrealized_justified_checkpoint: Checkpoint + unrealized_finalized_checkpoint: Checkpoint + proposer_boost_root: Root + equivocating_indices: Set[ValidatorIndex] + blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) + block_states: Dict[Root, BeaconState] = field(default_factory=dict) + block_timeliness: Dict[Root, boolean] = field(default_factory=dict) + checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict) + latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict) + unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict) + # [New in Gloas:EIP7732] + execution_payload_states: Dict[Root, BeaconState] = field(default_factory=dict) + # [New in Gloas:EIP7732] + ptc_vote: Dict[Root, Vector[boolean, PTC_SIZE]] = field(default_factory=dict) + diff --git a/specrefs/functions.yml b/specrefs/functions.yml new file mode 100644 index 00000000000..5789b3d247f --- /dev/null +++ b/specrefs/functions.yml @@ -0,0 +1,11816 @@ +- name: _fft_field#fulu + sources: [] + spec: | + + def _fft_field( + vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement] + ) -> Sequence[BLSFieldElement]: + if len(vals) == 1: + return vals + L = _fft_field(vals[::2], roots_of_unity[::2]) + R = _fft_field(vals[1::2], roots_of_unity[::2]) + o = [BLSFieldElement(0) for _ in vals] + for i, (x, y) in enumerate(zip(L, R)): + y_times_root = y * roots_of_unity[i] + o[i] = x + y_times_root + o[i + len(L)] = x - y_times_root + return o + + +- name: add_flag#altair + sources: + - file: consensus/types/src/attestation/participation_flags.rs + search: pub fn add_flag( + spec: | + + def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags: + """ + Return a new ``ParticipationFlags`` adding ``flag_index`` to ``flags``. + """ + flag = ParticipationFlags(2**flag_index) + return flags | flag + + +- name: add_polynomialcoeff#fulu + sources: [] + spec: | + + def add_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff: + """ + Sum the coefficient form polynomials ``a`` and ``b``. + """ + a, b = (a, b) if len(a) >= len(b) else (b, a) + length_a, length_b = len(a), len(b) + return PolynomialCoeff( + [a[i] + (b[i] if i < length_b else BLSFieldElement(0)) for i in range(length_a)] + ) + + +- name: add_validator_to_registry#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn add_validator_to_registry( + spec: | + + def add_validator_to_registry( + state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 + ) -> None: + state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) + state.balances.append(amount) + + +- name: add_validator_to_registry#altair + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn add_validator_to_registry( + spec: | + + def add_validator_to_registry( + state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 + ) -> None: + index = get_index_for_new_validator(state) + validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) + set_or_append_list(state.validators, index, validator) + set_or_append_list(state.balances, index, amount) + # [New in Altair] + set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) + set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) + set_or_append_list(state.inactivity_scores, index, uint64(0)) + + +- name: add_validator_to_registry#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn add_validator_to_registry( + spec: | + + def add_validator_to_registry( + state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 + ) -> None: + index = get_index_for_new_validator(state) + # [Modified in Electra:EIP7251] + validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) + set_or_append_list(state.validators, index, validator) + set_or_append_list(state.balances, index, amount) + set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) + set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) + set_or_append_list(state.inactivity_scores, index, uint64(0)) + + +- name: apply_deposit#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn apply_deposit< + spec: | + + def apply_deposit( + state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature, + ) -> None: + validator_pubkeys = [v.pubkey for v in state.validators] + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, + ) + # Fork-agnostic domain since deposits are valid across forks + domain = compute_domain(DOMAIN_DEPOSIT) + signing_root = compute_signing_root(deposit_message, domain) + if bls.Verify(pubkey, signing_root, signature): + add_validator_to_registry(state, pubkey, withdrawal_credentials, amount) + else: + # Increase balance by deposit amount + index = ValidatorIndex(validator_pubkeys.index(pubkey)) + increase_balance(state, index, amount) + + +- name: apply_deposit#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn apply_deposit< + spec: | + + def apply_deposit( + state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature, + ) -> None: + validator_pubkeys = [v.pubkey for v in state.validators] + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature): + # [Modified in Electra:EIP7251] + add_validator_to_registry(state, pubkey, withdrawal_credentials, Gwei(0)) + else: + return + + # [Modified in Electra:EIP7251] + # Increase balance by deposit amount + state.pending_deposits.append( + PendingDeposit( + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, + signature=signature, + slot=GENESIS_SLOT, # Use GENESIS_SLOT to distinguish from a pending deposit request + ) + ) + + +- name: apply_light_client_update#altair + sources: [] + spec: | + + def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None: + store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot) + update_finalized_period = compute_sync_committee_period_at_slot( + update.finalized_header.beacon.slot + ) + if not is_next_sync_committee_known(store): + assert update_finalized_period == store_period + store.next_sync_committee = update.next_sync_committee + elif update_finalized_period == store_period + 1: + store.current_sync_committee = store.next_sync_committee + store.next_sync_committee = update.next_sync_committee + store.previous_max_active_participants = store.current_max_active_participants + store.current_max_active_participants = 0 + if update.finalized_header.beacon.slot > store.finalized_header.beacon.slot: + store.finalized_header = update.finalized_header + if store.finalized_header.beacon.slot > store.optimistic_header.beacon.slot: + store.optimistic_header = store.finalized_header + + +- name: apply_pending_deposit#electra + sources: [] + spec: | + + def apply_pending_deposit(state: BeaconState, deposit: PendingDeposit) -> None: + """ + Applies ``deposit`` to the ``state``. + """ + validator_pubkeys = [v.pubkey for v in state.validators] + if deposit.pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + if is_valid_deposit_signature( + deposit.pubkey, deposit.withdrawal_credentials, deposit.amount, deposit.signature + ): + add_validator_to_registry( + state, deposit.pubkey, deposit.withdrawal_credentials, deposit.amount + ) + else: + validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey)) + increase_balance(state, validator_index, deposit.amount) + + +- name: bit_reversal_permutation#deneb + sources: [] + spec: | + + def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]: + """ + Return a copy with bit-reversed permutation. The permutation is an involution (inverts itself). + + The input and output are a sequence of generic type ``T`` objects. + """ + return [sequence[reverse_bits(i, len(sequence))] for i in range(len(sequence))] + + +- name: blob_to_kzg_commitment#deneb + sources: [] + spec: | + + def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment: + """ + Public method. + """ + assert len(blob) == BYTES_PER_BLOB + return g1_lincomb(bit_reversal_permutation(KZG_SETUP_G1_LAGRANGE), blob_to_polynomial(blob)) + + +- name: blob_to_polynomial#deneb + sources: [] + spec: | + + def blob_to_polynomial(blob: Blob) -> Polynomial: + """ + Convert a blob to list of BLS field scalars. + """ + polynomial = Polynomial() + for i in range(FIELD_ELEMENTS_PER_BLOB): + value = bytes_to_bls_field( + blob[i * BYTES_PER_FIELD_ELEMENT : (i + 1) * BYTES_PER_FIELD_ELEMENT] + ) + polynomial[i] = value + return polynomial + + +- name: block_to_light_client_header#altair + sources: + - file: consensus/types/src/light_client/light_client_header.rs + search: 'impl LightClientHeaderAltair {[\s\S]*?pub fn block_to_light_client_header' + regex: true + spec: | + + def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: + return LightClientHeader( + beacon=BeaconBlockHeader( + slot=block.message.slot, + proposer_index=block.message.proposer_index, + parent_root=block.message.parent_root, + state_root=block.message.state_root, + body_root=hash_tree_root(block.message.body), + ), + ) + + +- name: block_to_light_client_header#capella + sources: + - file: consensus/types/src/light_client/light_client_header.rs + search: 'impl LightClientHeaderCapella {[\s\S]*?pub fn block_to_light_client_header' + regex: true + spec: | + + def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: + epoch = compute_epoch_at_slot(block.message.slot) + + if epoch >= CAPELLA_FORK_EPOCH: + payload = block.message.body.execution_payload + execution_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + ) + execution_branch = ExecutionBranch( + compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX) + ) + else: + # Note that during fork transitions, `finalized_header` may still point to earlier forks. + # While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`), + # it was not included in the corresponding light client data. To ensure compatibility + # with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data. + execution_header = ExecutionPayloadHeader() + execution_branch = ExecutionBranch() + + return LightClientHeader( + beacon=BeaconBlockHeader( + slot=block.message.slot, + proposer_index=block.message.proposer_index, + parent_root=block.message.parent_root, + state_root=block.message.state_root, + body_root=hash_tree_root(block.message.body), + ), + execution=execution_header, + execution_branch=execution_branch, + ) + + +- name: block_to_light_client_header#deneb + sources: + - file: consensus/types/src/light_client/light_client_header.rs + search: 'impl LightClientHeaderDeneb {[\s\S]*?pub fn block_to_light_client_header' + regex: true + spec: | + + def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: + epoch = compute_epoch_at_slot(block.message.slot) + + if epoch >= CAPELLA_FORK_EPOCH: + payload = block.message.body.execution_payload + execution_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + ) + + # [New in Deneb:EIP4844] + if epoch >= DENEB_FORK_EPOCH: + execution_header.blob_gas_used = payload.blob_gas_used + execution_header.excess_blob_gas = payload.excess_blob_gas + + execution_branch = ExecutionBranch( + compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX) + ) + else: + # Note that during fork transitions, `finalized_header` may still point to earlier forks. + # While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`), + # it was not included in the corresponding light client data. To ensure compatibility + # with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data. + execution_header = ExecutionPayloadHeader() + execution_branch = ExecutionBranch() + + return LightClientHeader( + beacon=BeaconBlockHeader( + slot=block.message.slot, + proposer_index=block.message.proposer_index, + parent_root=block.message.parent_root, + state_root=block.message.state_root, + body_root=hash_tree_root(block.message.body), + ), + execution=execution_header, + execution_branch=execution_branch, + ) + + +- name: bls_field_to_bytes#deneb + sources: [] + spec: | + + def bls_field_to_bytes(x: BLSFieldElement) -> Bytes32: + return int.to_bytes(int(x), 32, KZG_ENDIANNESS) + + +- name: bytes_to_bls_field#deneb + sources: [] + spec: | + + def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement: + """ + Convert untrusted bytes to a trusted and validated BLS scalar field element. + This function does not accept inputs greater than the BLS modulus. + """ + field_element = int.from_bytes(b, KZG_ENDIANNESS) + assert field_element < BLS_MODULUS + return BLSFieldElement(field_element) + + +- name: bytes_to_kzg_commitment#deneb + sources: [] + spec: | + + def bytes_to_kzg_commitment(b: Bytes48) -> KZGCommitment: + """ + Convert untrusted bytes into a trusted and validated KZGCommitment. + """ + validate_kzg_g1(b) + return KZGCommitment(b) + + +- name: bytes_to_kzg_proof#deneb + sources: [] + spec: | + + def bytes_to_kzg_proof(b: Bytes48) -> KZGProof: + """ + Convert untrusted bytes into a trusted and validated KZGProof. + """ + validate_kzg_g1(b) + return KZGProof(b) + + +- name: bytes_to_uint64#phase0 + sources: [] + spec: | + + def bytes_to_uint64(data: bytes) -> uint64: + """ + Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian. + """ + return uint64(int.from_bytes(data, ENDIANNESS)) + + +- name: calculate_committee_fraction#phase0 + sources: + - file: consensus/proto_array/src/proto_array.rs + search: pub fn calculate_committee_fraction< + spec: | + + def calculate_committee_fraction(state: BeaconState, committee_percent: uint64) -> Gwei: + committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH + return Gwei((committee_weight * committee_percent) // 100) + + +- name: cell_to_coset_evals#fulu + sources: [] + spec: | + + def cell_to_coset_evals(cell: Cell) -> CosetEvals: + """ + Convert an untrusted ``Cell`` into a trusted ``CosetEvals``. + """ + evals = CosetEvals() + for i in range(FIELD_ELEMENTS_PER_CELL): + start = i * BYTES_PER_FIELD_ELEMENT + end = (i + 1) * BYTES_PER_FIELD_ELEMENT + evals[i] = bytes_to_bls_field(cell[start:end]) + return evals + + +- name: check_if_validator_active#phase0 + sources: [] + spec: | + + def check_if_validator_active(state: BeaconState, validator_index: ValidatorIndex) -> bool: + validator = state.validators[validator_index] + return is_active_validator(validator, get_current_epoch(state)) + + +- name: compute_activation_exit_epoch#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn compute_activation_exit_epoch( + spec: | + + def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: + """ + Return the epoch during which validator activations and exits initiated in ``epoch`` take effect. + """ + return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) + + +- name: compute_balance_weighted_acceptance#gloas + sources: [] + spec: | + + def compute_balance_weighted_acceptance( + state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64 + ) -> bool: + """ + Return whether to accept the selection of the validator ``index``, with probability + proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``. + """ + MAX_RANDOM_VALUE = 2**16 - 1 + random_bytes = hash(seed + uint_to_bytes(i // 16)) + offset = i % 16 * 2 + random_value = bytes_to_uint64(random_bytes[offset : offset + 2]) + effective_balance = state.validators[index].effective_balance + return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value + + +- name: compute_balance_weighted_selection#gloas + sources: [] + spec: | + + def compute_balance_weighted_selection( + state: BeaconState, + indices: Sequence[ValidatorIndex], + seed: Bytes32, + size: uint64, + shuffle_indices: bool, + ) -> Sequence[ValidatorIndex]: + """ + Return ``size`` indices sampled by effective balance, using ``indices`` + as candidates. If ``shuffle_indices`` is ``True``, candidate indices + are themselves sampled from ``indices`` by shuffling it, otherwise + ``indices`` is traversed in order. + """ + total = uint64(len(indices)) + assert total > 0 + selected: List[ValidatorIndex] = [] + i = uint64(0) + while len(selected) < size: + next_index = i % total + if shuffle_indices: + next_index = compute_shuffled_index(next_index, total, seed) + candidate_index = indices[next_index] + if compute_balance_weighted_acceptance(state, candidate_index, seed, i): + selected.append(candidate_index) + i += 1 + return selected + + +- name: compute_blob_kzg_proof#deneb + sources: [] + spec: | + + def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof: + """ + Given a blob, return the KZG proof that is used to verify it against the commitment. + This method does not verify that the commitment is correct with respect to `blob`. + Public method. + """ + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + commitment = bytes_to_kzg_commitment(commitment_bytes) + polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(blob, commitment) + proof, _ = compute_kzg_proof_impl(polynomial, evaluation_challenge) + return proof + + +- name: compute_cells#fulu + sources: [] + spec: | + + def compute_cells(blob: Blob) -> Vector[Cell, CELLS_PER_EXT_BLOB]: + """ + Given a blob, extend it and return all the cells of the extended blob. + + Public method. + """ + assert len(blob) == BYTES_PER_BLOB + + polynomial = blob_to_polynomial(blob) + polynomial_coeff = polynomial_eval_to_coeff(polynomial) + + cells = [] + for i in range(CELLS_PER_EXT_BLOB): + coset = coset_for_cell(CellIndex(i)) + ys = CosetEvals([evaluate_polynomialcoeff(polynomial_coeff, z) for z in coset]) + cells.append(coset_evals_to_cell(CosetEvals(ys))) + return cells + + +- name: compute_cells_and_kzg_proofs#fulu + sources: [] + spec: | + + def compute_cells_and_kzg_proofs( + blob: Blob, + ) -> Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]: + """ + Compute all the cell proofs for an extended blob. This is an inefficient O(n^2) algorithm, + for performant implementation the FK20 algorithm that runs in O(n log n) should be + used instead. + + Public method. + """ + assert len(blob) == BYTES_PER_BLOB + + polynomial = blob_to_polynomial(blob) + polynomial_coeff = polynomial_eval_to_coeff(polynomial) + return compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff) + + +- name: compute_cells_and_kzg_proofs_polynomialcoeff#fulu + sources: [] + spec: | + + def compute_cells_and_kzg_proofs_polynomialcoeff( + polynomial_coeff: PolynomialCoeff, + ) -> Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]: + """ + Helper function which computes cells/proofs for a polynomial in coefficient form. + """ + cells, proofs = [], [] + for i in range(CELLS_PER_EXT_BLOB): + coset = coset_for_cell(CellIndex(i)) + proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset) + cells.append(coset_evals_to_cell(CosetEvals(ys))) + proofs.append(proof) + return cells, proofs + + +- name: compute_challenge#deneb + sources: [] + spec: | + + def compute_challenge(blob: Blob, commitment: KZGCommitment) -> BLSFieldElement: + """ + Return the Fiat-Shamir challenge required by the rest of the protocol. + """ + + # Append the degree of the polynomial as a domain separator + degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 16, KZG_ENDIANNESS) + data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + + data += blob + data += commitment + + # Transcript has been prepared: time to create the challenge + return hash_to_bls_field(data) + + +- name: compute_columns_for_custody_group#fulu + sources: + - file: consensus/types/src/data/data_column_custody_group.rs + search: pub fn compute_columns_for_custody_group< + spec: | + + def compute_columns_for_custody_group(custody_group: CustodyIndex) -> Sequence[ColumnIndex]: + assert custody_group < NUMBER_OF_CUSTODY_GROUPS + columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS + return [ + ColumnIndex(NUMBER_OF_CUSTODY_GROUPS * i + custody_group) for i in range(columns_per_group) + ] + + +- name: compute_committee#phase0 + sources: + - file: consensus/types/src/state/committee_cache.rs + search: fn compute_committee( + spec: | + + def compute_committee( + indices: Sequence[ValidatorIndex], seed: Bytes32, index: uint64, count: uint64 + ) -> Sequence[ValidatorIndex]: + """ + Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``. + """ + start = (len(indices) * index) // count + end = (len(indices) * uint64(index + 1)) // count + return [ + indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] + for i in range(start, end) + ] + + +- name: compute_consolidation_epoch_and_update_churn#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn compute_consolidation_epoch_and_update_churn( + spec: | + + def compute_consolidation_epoch_and_update_churn( + state: BeaconState, consolidation_balance: Gwei + ) -> Epoch: + earliest_consolidation_epoch = max( + state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)) + ) + per_epoch_consolidation_churn = get_consolidation_churn_limit(state) + # New epoch for consolidations. + if state.earliest_consolidation_epoch < earliest_consolidation_epoch: + consolidation_balance_to_consume = per_epoch_consolidation_churn + else: + consolidation_balance_to_consume = state.consolidation_balance_to_consume + + # Consolidation doesn't fit in the current earliest epoch. + if consolidation_balance > consolidation_balance_to_consume: + balance_to_process = consolidation_balance - consolidation_balance_to_consume + additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1 + earliest_consolidation_epoch += additional_epochs + consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn + + # Consume the balance and update state variables. + state.consolidation_balance_to_consume = ( + consolidation_balance_to_consume - consolidation_balance + ) + state.earliest_consolidation_epoch = earliest_consolidation_epoch + + return state.earliest_consolidation_epoch + + +- name: compute_domain#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn compute_domain( + spec: | + + def compute_domain( + domain_type: DomainType, fork_version: Version = None, genesis_validators_root: Root = None + ) -> Domain: + """ + Return the domain for the ``domain_type`` and ``fork_version``. + """ + if fork_version is None: + fork_version = GENESIS_FORK_VERSION + if genesis_validators_root is None: + genesis_validators_root = Root() # all bytes zero by default + fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) + return Domain(domain_type + fork_data_root[:28]) + + +- name: compute_epoch_at_slot#phase0 + sources: + - file: consensus/types/src/core/slot_epoch.rs + search: pub fn epoch(self, slots_per_epoch: + spec: | + + def compute_epoch_at_slot(slot: Slot) -> Epoch: + """ + Return the epoch number at ``slot``. + """ + return Epoch(slot // SLOTS_PER_EPOCH) + + +- name: compute_exit_epoch_and_update_churn#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn compute_exit_epoch_and_update_churn( + spec: | + + def compute_exit_epoch_and_update_churn(state: BeaconState, exit_balance: Gwei) -> Epoch: + earliest_exit_epoch = max( + state.earliest_exit_epoch, compute_activation_exit_epoch(get_current_epoch(state)) + ) + per_epoch_churn = get_activation_exit_churn_limit(state) + # New epoch for exits. + if state.earliest_exit_epoch < earliest_exit_epoch: + exit_balance_to_consume = per_epoch_churn + else: + exit_balance_to_consume = state.exit_balance_to_consume + + # Exit doesn't fit in the current earliest epoch. + if exit_balance > exit_balance_to_consume: + balance_to_process = exit_balance - exit_balance_to_consume + additional_epochs = (balance_to_process - 1) // per_epoch_churn + 1 + earliest_exit_epoch += additional_epochs + exit_balance_to_consume += additional_epochs * per_epoch_churn + + # Consume the balance and update state variables. + state.exit_balance_to_consume = exit_balance_to_consume - exit_balance + state.earliest_exit_epoch = earliest_exit_epoch + + return state.earliest_exit_epoch + + +- name: compute_fork_data_root#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn compute_fork_data_root( + spec: | + + def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: + """ + Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. + This is used primarily in signature domains to avoid collisions across forks/chains. + """ + return hash_tree_root( + ForkData( + current_version=current_version, + genesis_validators_root=genesis_validators_root, + ) + ) + + +- name: compute_fork_digest#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn compute_fork_digest( + spec: | + + def compute_fork_digest( + genesis_validators_root: Root, + epoch: Epoch, + ) -> ForkDigest: + """ + Return the 4-byte fork digest for the ``genesis_validators_root`` at a given ``epoch``. + + This is a digest primarily used for domain separation on the p2p layer. + 4-bytes suffices for practical separation of forks/chains. + """ + fork_version = compute_fork_version(epoch) + base_digest = compute_fork_data_root(fork_version, genesis_validators_root) + return ForkDigest(base_digest[:4]) + + +- name: compute_fork_digest#fulu + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn compute_fork_digest( + spec: | + + def compute_fork_digest( + genesis_validators_root: Root, + epoch: Epoch, + ) -> ForkDigest: + """ + Return the 4-byte fork digest for the ``genesis_validators_root`` at a given ``epoch``. + + This is a digest primarily used for domain separation on the p2p layer. + 4-bytes suffices for practical separation of forks/chains. + """ + fork_version = compute_fork_version(epoch) + base_digest = compute_fork_data_root(fork_version, genesis_validators_root) + + # [Modified in Fulu:EIP7892] + # Bitmask digest with hash of blob parameters + blob_parameters = get_blob_parameters(epoch) + return ForkDigest( + bytes( + xor( + base_digest, + hash( + uint_to_bytes(uint64(blob_parameters.epoch)) + + uint_to_bytes(uint64(blob_parameters.max_blobs_per_block)) + ), + ) + )[:4] + ) + + +- name: compute_fork_version#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#altair + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#bellatrix + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#capella + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#deneb + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= DENEB_FORK_EPOCH: + return DENEB_FORK_VERSION + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#electra + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= ELECTRA_FORK_EPOCH: + return ELECTRA_FORK_VERSION + if epoch >= DENEB_FORK_EPOCH: + return DENEB_FORK_VERSION + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#fulu + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= FULU_FORK_EPOCH: + return FULU_FORK_VERSION + if epoch >= ELECTRA_FORK_EPOCH: + return ELECTRA_FORK_VERSION + if epoch >= DENEB_FORK_EPOCH: + return DENEB_FORK_VERSION + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_fork_version#gloas + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn fork_version_for_epoch( + spec: | + + def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= GLOAS_FORK_EPOCH: + return GLOAS_FORK_VERSION + if epoch >= FULU_FORK_EPOCH: + return FULU_FORK_VERSION + if epoch >= ELECTRA_FORK_EPOCH: + return ELECTRA_FORK_VERSION + if epoch >= DENEB_FORK_EPOCH: + return DENEB_FORK_VERSION + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION + + +- name: compute_kzg_proof#deneb + sources: [] + spec: | + + def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]: + """ + Compute KZG proof at point `z` for the polynomial represented by `blob`. + Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z). + Public method. + """ + assert len(blob) == BYTES_PER_BLOB + assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT + polynomial = blob_to_polynomial(blob) + proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z_bytes)) + return proof, int(y).to_bytes(BYTES_PER_FIELD_ELEMENT, KZG_ENDIANNESS) + + +- name: compute_kzg_proof_impl#deneb + sources: [] + spec: | + + def compute_kzg_proof_impl( + polynomial: Polynomial, z: BLSFieldElement + ) -> Tuple[KZGProof, BLSFieldElement]: + """ + Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`. + """ + roots_of_unity_brp = bit_reversal_permutation(compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB)) + + # For all x_i, compute p(x_i) - p(z) + y = evaluate_polynomial_in_evaluation_form(polynomial, z) + polynomial_shifted = [p - y for p in polynomial] + + # For all x_i, compute (x_i - z) + denominator_poly = [x - z for x in roots_of_unity_brp] + + # Compute the quotient polynomial directly in evaluation form + quotient_polynomial = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_BLOB + for i, (a, b) in enumerate(zip(polynomial_shifted, denominator_poly)): + if b == BLSFieldElement(0): + # The denominator is zero hence `z` is a root of unity: we must handle it as a special case + quotient_polynomial[i] = compute_quotient_eval_within_domain( + roots_of_unity_brp[i], polynomial, y + ) + else: + # Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z). + quotient_polynomial[i] = a / b + + return KZGProof( + g1_lincomb(bit_reversal_permutation(KZG_SETUP_G1_LAGRANGE), quotient_polynomial) + ), y + + +- name: compute_kzg_proof_multi_impl#fulu + sources: [] + spec: | + + def compute_kzg_proof_multi_impl( + polynomial_coeff: PolynomialCoeff, zs: Coset + ) -> Tuple[KZGProof, CosetEvals]: + """ + Compute a KZG multi-evaluation proof for a set of `k` points. + + This is done by committing to the following quotient polynomial: + Q(X) = f(X) - I(X) / Z(X) + Where: + - I(X) is the degree `k-1` polynomial that agrees with f(x) at all `k` points + - Z(X) is the degree `k` polynomial that evaluates to zero on all `k` points + + We further note that since the degree of I(X) is less than the degree of Z(X), + the computation can be simplified in monomial form to Q(X) = f(X) / Z(X). + """ + + # For all points, compute the evaluation of those points + ys = CosetEvals([evaluate_polynomialcoeff(polynomial_coeff, z) for z in zs]) + + # Compute Z(X) + denominator_poly = vanishing_polynomialcoeff(zs) + + # Compute the quotient polynomial directly in monomial form + quotient_polynomial = divide_polynomialcoeff(polynomial_coeff, denominator_poly) + + return KZGProof( + g1_lincomb(KZG_SETUP_G1_MONOMIAL[: len(quotient_polynomial)], quotient_polynomial) + ), ys + + +- name: compute_matrix#fulu + sources: [] + spec: | + + def compute_matrix(blobs: Sequence[Blob]) -> Sequence[MatrixEntry]: + """ + Return the full, flattened sequence of matrix entries. + + This helper demonstrates the relationship between blobs and the matrix of cells/proofs. + The data structure for storing cells/proofs is implementation-dependent. + """ + matrix = [] + for blob_index, blob in enumerate(blobs): + cells, proofs = compute_cells_and_kzg_proofs(blob) + for cell_index, (cell, proof) in enumerate(zip(cells, proofs)): + matrix.append( + MatrixEntry( + cell=cell, + kzg_proof=proof, + row_index=blob_index, + column_index=cell_index, + ) + ) + return matrix + + +- name: compute_merkle_proof#altair + sources: [] + spec: | + + def compute_merkle_proof(object: SSZObject, index: GeneralizedIndex) -> Sequence[Bytes32]: ... + + +- name: compute_new_state_root#phase0 + sources: [] + spec: | + + def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root: + temp_state: BeaconState = state.copy() + signed_block = SignedBeaconBlock(message=block) + state_transition(temp_state, signed_block, validate_result=False) + return hash_tree_root(temp_state) + + +- name: compute_on_chain_aggregate#electra + sources: + - file: beacon_node/operation_pool/src/attestation_storage.rs + search: pub fn compute_on_chain_aggregate( + spec: | + + def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation: + aggregates = sorted( + network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0] + ) + + data = aggregates[0].data + aggregation_bits = Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT]() + for a in aggregates: + for b in a.aggregation_bits: + aggregation_bits.append(b) + + signature = bls.Aggregate([a.signature for a in aggregates]) + + committee_indices = [get_committee_indices(a.committee_bits)[0] for a in aggregates] + committee_flags = [(index in committee_indices) for index in range(0, MAX_COMMITTEES_PER_SLOT)] + committee_bits = Bitvector[MAX_COMMITTEES_PER_SLOT](committee_flags) + + return Attestation( + aggregation_bits=aggregation_bits, + data=data, + committee_bits=committee_bits, + signature=signature, + ) + + +- name: compute_powers#deneb + sources: [] + spec: | + + def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]: + """ + Return ``x`` to power of [0, n-1], if n > 0. When n==0, an empty array is returned. + """ + current_power = BLSFieldElement(1) + powers = [] + for _ in range(n): + powers.append(current_power) + current_power = current_power * x + return powers + + +- name: compute_proposer_index#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn compute_proposer_index( + spec: | + + def compute_proposer_index( + state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32 + ) -> ValidatorIndex: + """ + Return from ``indices`` a random index sampled by effective balance. + """ + assert len(indices) > 0 + MAX_RANDOM_BYTE = 2**8 - 1 + i = uint64(0) + total = uint64(len(indices)) + while True: + candidate_index = indices[compute_shuffled_index(i % total, total, seed)] + random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] + effective_balance = state.validators[candidate_index].effective_balance + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: + return candidate_index + i += 1 + + +- name: compute_proposer_index#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn compute_proposer_index( + spec: | + + def compute_proposer_index( + state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32 + ) -> ValidatorIndex: + """ + Return from ``indices`` a random index sampled by effective balance. + """ + assert len(indices) > 0 + # [Modified in Electra] + MAX_RANDOM_VALUE = 2**16 - 1 + i = uint64(0) + total = uint64(len(indices)) + while True: + candidate_index = indices[compute_shuffled_index(i % total, total, seed)] + # [Modified in Electra] + random_bytes = hash(seed + uint_to_bytes(i // 16)) + offset = i % 16 * 2 + random_value = bytes_to_uint64(random_bytes[offset : offset + 2]) + effective_balance = state.validators[candidate_index].effective_balance + # [Modified in Electra:EIP7251] + if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value: + return candidate_index + i += 1 + + +- name: compute_proposer_indices#fulu + sources: + - file: consensus/types/src/state/beacon_state.rs + search: fn compute_proposer_indices( + spec: | + + def compute_proposer_indices( + state: BeaconState, epoch: Epoch, seed: Bytes32, indices: Sequence[ValidatorIndex] + ) -> Vector[ValidatorIndex, SLOTS_PER_EPOCH]: + """ + Return the proposer indices for the given ``epoch``. + """ + start_slot = compute_start_slot_at_epoch(epoch) + seeds = [hash(seed + uint_to_bytes(Slot(start_slot + i))) for i in range(SLOTS_PER_EPOCH)] + return [compute_proposer_index(state, indices, seed) for seed in seeds] + + +- name: compute_proposer_indices#gloas + sources: [] + spec: | + + def compute_proposer_indices( + state: BeaconState, epoch: Epoch, seed: Bytes32, indices: Sequence[ValidatorIndex] + ) -> Vector[ValidatorIndex, SLOTS_PER_EPOCH]: + """ + Return the proposer indices for the given ``epoch``. + """ + start_slot = compute_start_slot_at_epoch(epoch) + seeds = [hash(seed + uint_to_bytes(Slot(start_slot + i))) for i in range(SLOTS_PER_EPOCH)] + # [Modified in Gloas:EIP7732] + return [ + compute_balance_weighted_selection(state, indices, seed, size=1, shuffle_indices=True)[0] + for seed in seeds + ] + + +- name: compute_pulled_up_tip#phase0 + sources: [] + spec: | + + def compute_pulled_up_tip(store: Store, block_root: Root) -> None: + state = store.block_states[block_root].copy() + # Pull up the post-state of the block to the next epoch boundary + process_justification_and_finalization(state) + + store.unrealized_justifications[block_root] = state.current_justified_checkpoint + update_unrealized_checkpoints( + store, state.current_justified_checkpoint, state.finalized_checkpoint + ) + + # If the block is from a prior epoch, apply the realized values + block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot) + current_epoch = get_current_store_epoch(store) + if block_epoch < current_epoch: + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + +- name: compute_quotient_eval_within_domain#deneb + sources: [] + spec: | + + def compute_quotient_eval_within_domain( + z: BLSFieldElement, polynomial: Polynomial, y: BLSFieldElement + ) -> BLSFieldElement: + """ + Given `y == p(z)` for a polynomial `p(x)`, compute `q(z)`: the KZG quotient polynomial evaluated at `z` for the + special case where `z` is in roots of unity. + + For more details, read https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html section "Dividing + when one of the points is zero". The code below computes q(x_m) for the roots of unity special case. + """ + roots_of_unity_brp = bit_reversal_permutation(compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB)) + result = BLSFieldElement(0) + for i, omega_i in enumerate(roots_of_unity_brp): + if omega_i == z: # skip the evaluation point in the sum + continue + + f_i = polynomial[i] - y + numerator = f_i * omega_i + denominator = z * (z - omega_i) + result += numerator / denominator + + return result + + +- name: compute_roots_of_unity#deneb + sources: [] + spec: | + + def compute_roots_of_unity(order: uint64) -> Sequence[BLSFieldElement]: + """ + Return roots of unity of ``order``. + """ + assert (BLS_MODULUS - 1) % int(order) == 0 + root_of_unity = BLSFieldElement( + pow(PRIMITIVE_ROOT_OF_UNITY, (BLS_MODULUS - 1) // int(order), BLS_MODULUS) + ) + return compute_powers(root_of_unity, order) + + +- name: compute_shuffled_index#phase0 + sources: + - file: consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs + search: pub fn compute_shuffled_index( + spec: | + + def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64: + """ + Return the shuffled index corresponding to ``seed`` (and ``index_count``). + """ + assert index < index_count + + # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) + # See the 'generalized domain' algorithm on page 3 + for current_round in range(SHUFFLE_ROUND_COUNT): + pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count + flip = (pivot + index_count - index) % index_count + position = max(index, flip) + source = hash( + seed + uint_to_bytes(uint8(current_round)) + uint_to_bytes(uint32(position // 256)) + ) + byte = uint8(source[(position % 256) // 8]) + bit = (byte >> (position % 8)) % 2 + index = flip if bit else index + + return index + + +- name: compute_signed_block_header#deneb + sources: [] + spec: | + + def compute_signed_block_header(signed_block: SignedBeaconBlock) -> SignedBeaconBlockHeader: + block = signed_block.message + block_header = BeaconBlockHeader( + slot=block.slot, + proposer_index=block.proposer_index, + parent_root=block.parent_root, + state_root=block.state_root, + body_root=hash_tree_root(block.body), + ) + return SignedBeaconBlockHeader(message=block_header, signature=signed_block.signature) + + +- name: compute_signing_root#phase0 + sources: + - file: consensus/types/src/core/signing_data.rs + search: fn signing_root( + spec: | + + def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: + """ + Return the signing root for the corresponding signing data. + """ + return hash_tree_root( + SigningData( + object_root=hash_tree_root(ssz_object), + domain=domain, + ) + ) + + +- name: compute_slots_since_epoch_start#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn compute_slots_since_epoch_start< + spec: | + + def compute_slots_since_epoch_start(slot: Slot) -> int: + return slot - compute_start_slot_at_epoch(compute_epoch_at_slot(slot)) + + +- name: compute_start_slot_at_epoch#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: fn compute_start_slot_at_epoch< + spec: | + + def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: + """ + Return the start slot of ``epoch``. + """ + return Slot(epoch * SLOTS_PER_EPOCH) + + +- name: compute_subnet_for_attestation#phase0 + sources: + - file: consensus/types/src/attestation/subnet_id.rs + search: pub fn compute_subnet_for_attestation< + spec: | + + def compute_subnet_for_attestation( + committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex + ) -> SubnetID: + """ + Compute the correct subnet for an attestation for Phase 0. + Note, this mimics expected future behavior where attestations will be mapped to their shard subnet. + """ + slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH) + committees_since_epoch_start = committees_per_slot * slots_since_epoch_start + + return SubnetID((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT) + + +- name: compute_subnet_for_blob_sidecar#deneb + sources: [] + spec: | + + def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: + return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT) + + +- name: compute_subnet_for_blob_sidecar#electra + sources: [] + spec: | + + def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: + return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) + + +- name: compute_subnet_for_data_column_sidecar#fulu + sources: + - file: consensus/types/src/data/data_column_subnet_id.rs + search: pub fn from_column_index( + spec: | + + def compute_subnet_for_data_column_sidecar(column_index: ColumnIndex) -> SubnetID: + return SubnetID(column_index % DATA_COLUMN_SIDECAR_SUBNET_COUNT) + + +- name: compute_subnets_for_sync_committee#altair + sources: + - file: consensus/types/src/sync_committee/sync_subnet_id.rs + search: pub fn compute_subnets_for_sync_committee< + spec: | + + def compute_subnets_for_sync_committee( + state: BeaconState, validator_index: ValidatorIndex + ) -> Set[SubnetID]: + next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) + if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period( + next_slot_epoch + ): + sync_committee = state.current_sync_committee + else: + sync_committee = state.next_sync_committee + + target_pubkey = state.validators[validator_index].pubkey + sync_committee_indices = [ + index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey + ] + return set( + [ + SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) + for index in sync_committee_indices + ] + ) + + +- name: compute_subscribed_subnet#phase0 + sources: [] + spec: | + + def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID: + node_id_prefix = node_id >> (NODE_ID_BITS - ATTESTATION_SUBNET_PREFIX_BITS) + node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION + permutation_seed = hash( + uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)) + ) + permutated_prefix = compute_shuffled_index( + node_id_prefix, + 1 << ATTESTATION_SUBNET_PREFIX_BITS, + permutation_seed, + ) + return SubnetID((permutated_prefix + index) % ATTESTATION_SUBNET_COUNT) + + +- name: compute_subscribed_subnets#phase0 + sources: [] + spec: | + + def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]: + return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)] + + +- name: compute_sync_committee_period#altair + sources: + - file: consensus/types/src/light_client/light_client_update.rs + search: fn compute_sync_committee_period_at_slot + spec: | + + def compute_sync_committee_period(epoch: Epoch) -> uint64: + return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD + + +- name: compute_sync_committee_period_at_slot#altair + sources: + - file: consensus/types/src/light_client/light_client_update.rs + search: fn compute_sync_committee_period_at_slot< + spec: | + + def compute_sync_committee_period_at_slot(slot: Slot) -> uint64: + return compute_sync_committee_period(compute_epoch_at_slot(slot)) + + +- name: compute_time_at_slot#phase0 + sources: [] + spec: | + + def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: + slots_since_genesis = slot - GENESIS_SLOT + return uint64(state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT) + + +- name: compute_verify_cell_kzg_proof_batch_challenge#fulu + sources: [] + spec: | + + def compute_verify_cell_kzg_proof_batch_challenge( + commitments: Sequence[KZGCommitment], + commitment_indices: Sequence[CommitmentIndex], + cell_indices: Sequence[CellIndex], + cosets_evals: Sequence[CosetEvals], + proofs: Sequence[KZGProof], + ) -> BLSFieldElement: + """ + Compute a random challenge ``r`` used in the universal verification equation. To compute the + challenge, ``RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN`` and all data that can influence the + verification is hashed together to deterministically generate a "random" field element via + the Fiat-Shamir heuristic. + """ + hashinput = RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN + hashinput += int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, KZG_ENDIANNESS) + hashinput += int.to_bytes(FIELD_ELEMENTS_PER_CELL, 8, KZG_ENDIANNESS) + hashinput += int.to_bytes(len(commitments), 8, KZG_ENDIANNESS) + hashinput += int.to_bytes(len(cell_indices), 8, KZG_ENDIANNESS) + for commitment in commitments: + hashinput += commitment + for k, coset_evals in enumerate(cosets_evals): + hashinput += int.to_bytes(commitment_indices[k], 8, KZG_ENDIANNESS) + hashinput += int.to_bytes(cell_indices[k], 8, KZG_ENDIANNESS) + for coset_eval in coset_evals: + hashinput += bls_field_to_bytes(coset_eval) + hashinput += proofs[k] + return hash_to_bls_field(hashinput) + + +- name: compute_weak_subjectivity_period#phase0 + sources: [] + spec: | + + def compute_weak_subjectivity_period(state: BeaconState) -> uint64: + """ + Returns the weak subjectivity period for the current ``state``. + This computation takes into account the effect of: + - validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and + - validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch). + A detailed calculation can be found at: + https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf + """ + ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY + N = len(get_active_validator_indices(state, get_current_epoch(state))) + t = get_total_active_balance(state) // N // ETH_TO_GWEI + T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI + delta = get_validator_churn_limit(state) + Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH + D = SAFETY_DECAY + + if T * (200 + 3 * D) < t * (200 + 12 * D): + epochs_for_validator_set_churn = ( + N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T)) + ) + epochs_for_balance_top_ups = N * (200 + 3 * D) // (600 * Delta) + ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups) + else: + ws_period += 3 * N * D * t // (200 * Delta * (T - t)) + + return ws_period + + +- name: compute_weak_subjectivity_period#electra + sources: [] + spec: | + + def compute_weak_subjectivity_period(state: BeaconState) -> uint64: + """ + Returns the weak subjectivity period for the current ``state``. + This computation takes into account the effect of: + - validator set churn (bounded by ``get_balance_churn_limit()`` per epoch) + A detailed calculation can be found at: + https://notes.ethereum.org/@CarlBeek/electra_weak_subjectivity + """ + t = get_total_active_balance(state) + delta = get_balance_churn_limit(state) + epochs_for_validator_set_churn = SAFETY_DECAY * t // (2 * delta * 100) + return MIN_VALIDATOR_WITHDRAWABILITY_DELAY + epochs_for_validator_set_churn + + +- name: construct_vanishing_polynomial#fulu + sources: [] + spec: | + + def construct_vanishing_polynomial( + missing_cell_indices: Sequence[CellIndex], + ) -> Sequence[BLSFieldElement]: + """ + Given the cells indices that are missing from the data, compute the polynomial that vanishes at every point that + corresponds to a missing field element. + + This method assumes that all of the cells cannot be missing. In this case the vanishing polynomial + could be computed as Z(x) = x^n - 1, where `n` is FIELD_ELEMENTS_PER_EXT_BLOB. + + We never encounter this case however because this method is used solely for recovery and recovery only + works if at least half of the cells are available. + """ + # Get the small domain + roots_of_unity_reduced = compute_roots_of_unity(CELLS_PER_EXT_BLOB) + + # Compute polynomial that vanishes at all the missing cells (over the small domain) + short_zero_poly = vanishing_polynomialcoeff( + [ + roots_of_unity_reduced[reverse_bits(missing_cell_index, CELLS_PER_EXT_BLOB)] + for missing_cell_index in missing_cell_indices + ] + ) + + # Extend vanishing polynomial to full domain using the closed form of the vanishing polynomial over a coset + zero_poly_coeff = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_EXT_BLOB + for i, coeff in enumerate(short_zero_poly): + zero_poly_coeff[i * FIELD_ELEMENTS_PER_CELL] = coeff + + return zero_poly_coeff + + +- name: coset_evals_to_cell#fulu + sources: [] + spec: | + + def coset_evals_to_cell(coset_evals: CosetEvals) -> Cell: + """ + Convert a trusted ``CosetEval`` into an untrusted ``Cell``. + """ + cell = [] + for i in range(FIELD_ELEMENTS_PER_CELL): + cell += bls_field_to_bytes(coset_evals[i]) + return Cell(cell) + + +- name: coset_fft_field#fulu + sources: [] + spec: | + + def coset_fft_field( + vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement], inv: bool = False + ) -> Sequence[BLSFieldElement]: + """ + Computes an FFT/IFFT over a coset of the roots of unity. + This is useful for when one wants to divide by a polynomial which + vanishes on one or more elements in the domain. + """ + vals = [v for v in vals] # copy + + def shift_vals( + vals: Sequence[BLSFieldElement], factor: BLSFieldElement + ) -> Sequence[BLSFieldElement]: + """ + Multiply each entry in `vals` by succeeding powers of `factor` + i.e., [vals[0] * factor^0, vals[1] * factor^1, ..., vals[n] * factor^n] + """ + updated_vals: List[BLSFieldElement] = [] + shift = BLSFieldElement(1) + for i in range(len(vals)): + updated_vals.append(vals[i] * shift) + shift = shift * factor + return updated_vals + + # This is the coset generator; it is used to compute a FFT/IFFT over a coset of + # the roots of unity. + shift_factor = BLSFieldElement(PRIMITIVE_ROOT_OF_UNITY) + if inv: + vals = fft_field(vals, roots_of_unity, inv) + return shift_vals(vals, shift_factor.inverse()) + else: + vals = shift_vals(vals, shift_factor) + return fft_field(vals, roots_of_unity, inv) + + +- name: coset_for_cell#fulu + sources: [] + spec: | + + def coset_for_cell(cell_index: CellIndex) -> Coset: + """ + Get the coset for a given ``cell_index``. + Precisely, consider the group of roots of unity of order FIELD_ELEMENTS_PER_CELL * CELLS_PER_EXT_BLOB. + Let G = {1, g, g^2, ...} denote its subgroup of order FIELD_ELEMENTS_PER_CELL. + Then, the coset is defined as h * G = {h, hg, hg^2, ...}. + This function, returns the coset. + """ + assert cell_index < CELLS_PER_EXT_BLOB + roots_of_unity_brp = bit_reversal_permutation( + compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB) + ) + return Coset( + roots_of_unity_brp[ + FIELD_ELEMENTS_PER_CELL * cell_index : FIELD_ELEMENTS_PER_CELL * (cell_index + 1) + ] + ) + + +- name: coset_shift_for_cell#fulu + sources: [] + spec: | + + def coset_shift_for_cell(cell_index: CellIndex) -> BLSFieldElement: + """ + Get the shift that determines the coset for a given ``cell_index``. + Precisely, consider the group of roots of unity of order FIELD_ELEMENTS_PER_CELL * CELLS_PER_EXT_BLOB. + Let G = {1, g, g^2, ...} denote its subgroup of order FIELD_ELEMENTS_PER_CELL. + Then, the coset is defined as h * G = {h, hg, hg^2, ...} for an element h. + This function returns h. + """ + assert cell_index < CELLS_PER_EXT_BLOB + roots_of_unity_brp = bit_reversal_permutation( + compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB) + ) + return roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_index] + + +- name: create_light_client_bootstrap#altair + sources: [] + spec: | + + def create_light_client_bootstrap( + state: BeaconState, block: SignedBeaconBlock + ) -> LightClientBootstrap: + assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH + + assert state.slot == state.latest_block_header.slot + header = state.latest_block_header.copy() + header.state_root = hash_tree_root(state) + assert hash_tree_root(header) == hash_tree_root(block.message) + + return LightClientBootstrap( + header=block_to_light_client_header(block), + current_sync_committee=state.current_sync_committee, + current_sync_committee_branch=CurrentSyncCommitteeBranch( + compute_merkle_proof(state, current_sync_committee_gindex_at_slot(state.slot)) + ), + ) + + +- name: create_light_client_finality_update#altair + sources: [] + spec: | + + def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate: + return LightClientFinalityUpdate( + attested_header=update.attested_header, + finalized_header=update.finalized_header, + finality_branch=update.finality_branch, + sync_aggregate=update.sync_aggregate, + signature_slot=update.signature_slot, + ) + + +- name: create_light_client_optimistic_update#altair + sources: [] + spec: | + + def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate: + return LightClientOptimisticUpdate( + attested_header=update.attested_header, + sync_aggregate=update.sync_aggregate, + signature_slot=update.signature_slot, + ) + + +- name: create_light_client_update#altair + sources: [] + spec: | + + def create_light_client_update( + state: BeaconState, + block: SignedBeaconBlock, + attested_state: BeaconState, + attested_block: SignedBeaconBlock, + finalized_block: Optional[SignedBeaconBlock], + ) -> LightClientUpdate: + assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH + assert ( + sum(block.message.body.sync_aggregate.sync_committee_bits) + >= MIN_SYNC_COMMITTEE_PARTICIPANTS + ) + + assert state.slot == state.latest_block_header.slot + header = state.latest_block_header.copy() + header.state_root = hash_tree_root(state) + assert hash_tree_root(header) == hash_tree_root(block.message) + update_signature_period = compute_sync_committee_period_at_slot(block.message.slot) + + assert attested_state.slot == attested_state.latest_block_header.slot + attested_header = attested_state.latest_block_header.copy() + attested_header.state_root = hash_tree_root(attested_state) + assert ( + hash_tree_root(attested_header) + == hash_tree_root(attested_block.message) + == block.message.parent_root + ) + update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot) + + update = LightClientUpdate() + + update.attested_header = block_to_light_client_header(attested_block) + + # `next_sync_committee` is only useful if the message is signed by the current sync committee + if update_attested_period == update_signature_period: + update.next_sync_committee = attested_state.next_sync_committee + update.next_sync_committee_branch = NextSyncCommitteeBranch( + compute_merkle_proof( + attested_state, next_sync_committee_gindex_at_slot(attested_state.slot) + ) + ) + + # Indicate finality whenever possible + if finalized_block is not None: + if finalized_block.message.slot != GENESIS_SLOT: + update.finalized_header = block_to_light_client_header(finalized_block) + assert ( + hash_tree_root(update.finalized_header.beacon) + == attested_state.finalized_checkpoint.root + ) + else: + assert attested_state.finalized_checkpoint.root == Bytes32() + update.finality_branch = FinalityBranch( + compute_merkle_proof(attested_state, finalized_root_gindex_at_slot(attested_state.slot)) + ) + + update.sync_aggregate = block.message.body.sync_aggregate + update.signature_slot = block.message.slot + + return update + + +- name: current_sync_committee_gindex_at_slot#altair + sources: [] + spec: | + + def current_sync_committee_gindex_at_slot(_slot: Slot) -> GeneralizedIndex: + return CURRENT_SYNC_COMMITTEE_GINDEX + + +- name: current_sync_committee_gindex_at_slot#electra + sources: [] + spec: | + + def current_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex: + epoch = compute_epoch_at_slot(slot) + + # [Modified in Electra] + if epoch >= ELECTRA_FORK_EPOCH: + return CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA + return CURRENT_SYNC_COMMITTEE_GINDEX + + +- name: decrease_balance#phase0 + sources: + - file: consensus/state_processing/src/common/mod.rs + search: pub fn decrease_balance< + spec: | + + def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. + """ + state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta + + +- name: divide_polynomialcoeff#fulu + sources: [] + spec: | + + def divide_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff: + """ + Long polynomial division for two coefficient form polynomials ``a`` and ``b``. + """ + a = PolynomialCoeff(a[:]) # copy + o = PolynomialCoeff([]) + apos = len(a) - 1 + bpos = len(b) - 1 + diff = apos - bpos + while diff >= 0: + quot = a[apos] / b[bpos] + o.insert(0, quot) + for i in range(bpos, -1, -1): + a[diff + i] = a[diff + i] - b[i] * quot + apos -= 1 + diff -= 1 + return o + + +- name: eth_aggregate_pubkeys#altair + sources: [] + spec: | + + def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: + """ + Return the aggregate public key for the public keys in ``pubkeys``. + + Note: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input + elliptic curve points that must be decoded from the input ``BLSPubkey``s. + This implementation is for demonstrative purposes only and ignores encoding/decoding concerns. + Refer to the BLS signature draft standard for more information. + """ + assert len(pubkeys) > 0 + # Ensure that the given inputs are valid pubkeys + assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys) + + result = copy(pubkeys[0]) + for pubkey in pubkeys[1:]: + result += pubkey + return result + + +- name: eth_fast_aggregate_verify#altair + sources: + - file: crypto/bls/src/generic_aggregate_signature.rs + search: pub fn eth_fast_aggregate_verify( + spec: | + + def eth_fast_aggregate_verify( + pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature + ) -> bool: + """ + Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. + """ + if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: + return True + return bls.FastAggregateVerify(pubkeys, message, signature) + + +- name: evaluate_polynomial_in_evaluation_form#deneb + sources: [] + spec: | + + def evaluate_polynomial_in_evaluation_form( + polynomial: Polynomial, z: BLSFieldElement + ) -> BLSFieldElement: + """ + Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``. + - When ``z`` is in the domain, the evaluation can be found by indexing the polynomial at the + position that ``z`` is in the domain. + - When ``z`` is not in the domain, the barycentric formula is used: + f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i]) + """ + width = len(polynomial) + assert width == FIELD_ELEMENTS_PER_BLOB + inverse_width = BLSFieldElement(width).inverse() + + roots_of_unity_brp = bit_reversal_permutation(compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB)) + + # If we are asked to evaluate within the domain, we already know the answer + if z in roots_of_unity_brp: + eval_index = roots_of_unity_brp.index(z) + return polynomial[eval_index] + + result = BLSFieldElement(0) + for i in range(width): + a = polynomial[i] * roots_of_unity_brp[i] + b = z - roots_of_unity_brp[i] + result += a / b + r = z.pow(BLSFieldElement(width)) - BLSFieldElement(1) + result = result * r * inverse_width + return result + + +- name: evaluate_polynomialcoeff#fulu + sources: [] + spec: | + + def evaluate_polynomialcoeff( + polynomial_coeff: PolynomialCoeff, z: BLSFieldElement + ) -> BLSFieldElement: + """ + Evaluate a coefficient form polynomial at ``z`` using Horner's schema. + """ + y = BLSFieldElement(0) + for coef in polynomial_coeff[::-1]: + y = y * z + coef + return y + + +- name: fft_field#fulu + sources: [] + spec: | + + def fft_field( + vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement], inv: bool = False + ) -> Sequence[BLSFieldElement]: + if inv: + # Inverse FFT + invlen = BLSFieldElement(len(vals)).pow(BLSFieldElement(BLS_MODULUS - 2)) + return [ + x * invlen + for x in _fft_field(vals, list(roots_of_unity[0:1]) + list(roots_of_unity[:0:-1])) + ] + else: + # Regular FFT + return _fft_field(vals, roots_of_unity) + + +- name: filter_block_tree#phase0 + sources: [] + spec: | + + def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: + block = store.blocks[block_root] + children = [ + root for root in store.blocks.keys() if store.blocks[root].parent_root == block_root + ] + + # If any children branches contain expected finalized/justified checkpoints, + # add to filtered block-tree and signal viability to parent. + if any(children): + filter_block_tree_result = [filter_block_tree(store, child, blocks) for child in children] + if any(filter_block_tree_result): + blocks[block_root] = block + return True + return False + + current_epoch = get_current_store_epoch(store) + voting_source = get_voting_source(store, block_root) + + # The voting source should be either at the same height as the store's justified checkpoint or + # not more than two epochs ago + correct_justified = ( + store.justified_checkpoint.epoch == GENESIS_EPOCH + or voting_source.epoch == store.justified_checkpoint.epoch + or voting_source.epoch + 2 >= current_epoch + ) + + finalized_checkpoint_block = get_checkpoint_block( + store, + block_root, + store.finalized_checkpoint.epoch, + ) + + correct_finalized = ( + store.finalized_checkpoint.epoch == GENESIS_EPOCH + or store.finalized_checkpoint.root == finalized_checkpoint_block + ) + + # If expected finalized/justified, add to viable block-tree and signal viability to parent. + if correct_justified and correct_finalized: + blocks[block_root] = block + return True + + # Otherwise, branch not viable + return False + + +- name: finalized_root_gindex_at_slot#altair + sources: [] + spec: | + + def finalized_root_gindex_at_slot(_slot: Slot) -> GeneralizedIndex: + return FINALIZED_ROOT_GINDEX + + +- name: finalized_root_gindex_at_slot#electra + sources: [] + spec: | + + def finalized_root_gindex_at_slot(slot: Slot) -> GeneralizedIndex: + epoch = compute_epoch_at_slot(slot) + + # [Modified in Electra] + if epoch >= ELECTRA_FORK_EPOCH: + return FINALIZED_ROOT_GINDEX_ELECTRA + return FINALIZED_ROOT_GINDEX + + +- name: g1_lincomb#deneb + sources: [] + spec: | + + def g1_lincomb( + points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement] + ) -> KZGCommitment: + """ + BLS multiscalar multiplication in G1. This can be naively implemented using double-and-add. + """ + assert len(points) == len(scalars) + + if len(points) == 0: + return bls.G1_to_bytes48(bls.Z1()) + + points_g1 = [] + for point in points: + points_g1.append(bls.bytes48_to_G1(point)) + + result = bls.multi_exp(points_g1, scalars) + return KZGCommitment(bls.G1_to_bytes48(result)) + + +- name: get_activation_exit_churn_limit#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_activation_exit_churn_limit( + spec: | + + def get_activation_exit_churn_limit(state: BeaconState) -> Gwei: + """ + Return the churn limit for the current epoch dedicated to activations and exits. + """ + return min(MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT, get_balance_churn_limit(state)) + + +- name: get_active_validator_indices#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_active_validator_indices( + - file: consensus/types/src/state/committee_cache.rs + search: pub fn get_active_validator_indices< + spec: | + + def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: + """ + Return the sequence of active validator indices at ``epoch``. + """ + return [ + ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch) + ] + + +- name: get_aggregate_and_proof#phase0 + sources: [] + spec: | + + def get_aggregate_and_proof( + state: BeaconState, aggregator_index: ValidatorIndex, aggregate: Attestation, privkey: int + ) -> AggregateAndProof: + return AggregateAndProof( + aggregator_index=aggregator_index, + aggregate=aggregate, + selection_proof=get_slot_signature(state, aggregate.data.slot, privkey), + ) + + +- name: get_aggregate_and_proof_signature#phase0 + sources: [] + spec: | + + def get_aggregate_and_proof_signature( + state: BeaconState, aggregate_and_proof: AggregateAndProof, privkey: int + ) -> BLSSignature: + aggregate = aggregate_and_proof.aggregate + domain = get_domain( + state, DOMAIN_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot) + ) + signing_root = compute_signing_root(aggregate_and_proof, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_aggregate_due_ms#phase0 + sources: [] + spec: | + + def get_aggregate_due_ms(epoch: Epoch) -> uint64: + return get_slot_component_duration_ms(AGGREGATE_DUE_BPS) + + +- name: get_aggregate_due_ms#gloas + sources: [] + spec: | + + def get_aggregate_due_ms(epoch: Epoch) -> uint64: + # [New in Gloas] + if epoch >= GLOAS_FORK_EPOCH: + return get_slot_component_duration_ms(AGGREGATE_DUE_BPS_GLOAS) + return get_slot_component_duration_ms(AGGREGATE_DUE_BPS) + + +- name: get_aggregate_signature#phase0 + sources: [] + spec: | + + def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature: + signatures = [attestation.signature for attestation in attestations] + return bls.Aggregate(signatures) + + +- name: get_ancestor#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: fn get_ancestor( + spec: | + + def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: + block = store.blocks[root] + if block.slot > slot: + return get_ancestor(store, block.parent_root, slot) + return root + + +- name: get_ancestor#gloas + sources: [] + spec: | + + def get_ancestor(store: Store, root: Root, slot: Slot) -> ForkChoiceNode: + """ + Returns the beacon block root and the payload status of the ancestor of the beacon block + with ``root`` at ``slot``. If the beacon block with ``root`` is already at ``slot`` or we are + requesting an ancestor "in the future", it returns ``PAYLOAD_STATUS_PENDING``. + """ + block = store.blocks[root] + if block.slot <= slot: + return ForkChoiceNode(root=root, payload_status=PAYLOAD_STATUS_PENDING) + + parent = store.blocks[block.parent_root] + if parent.slot > slot: + return get_ancestor(store, block.parent_root, slot) + else: + return ForkChoiceNode( + root=block.parent_root, + payload_status=get_parent_payload_status(store, block), + ) + + +- name: get_attestation_component_deltas#phase0 + sources: [] + spec: | + + def get_attestation_component_deltas( + state: BeaconState, attestations: Sequence[PendingAttestation] + ) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Helper with shared logic for use by get source, target, and head deltas functions + """ + rewards = [Gwei(0)] * len(state.validators) + penalties = [Gwei(0)] * len(state.validators) + total_balance = get_total_active_balance(state) + unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations) + attesting_balance = get_total_balance(state, unslashed_attesting_indices) + for index in get_eligible_validator_indices(state): + if index in unslashed_attesting_indices: + increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow + if is_in_inactivity_leak(state): + # Since full base reward will be canceled out by inactivity penalty deltas, + # optimal participation receives full base reward compensation here. + rewards[index] += get_base_reward(state, index) + else: + reward_numerator = get_base_reward(state, index) * (attesting_balance // increment) + rewards[index] += reward_numerator // (total_balance // increment) + else: + penalties[index] += get_base_reward(state, index) + return rewards, penalties + + +- name: get_attestation_deltas#phase0 + sources: [] + spec: | + + def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return attestation reward/penalty deltas for each validator. + """ + source_rewards, source_penalties = get_source_deltas(state) + target_rewards, target_penalties = get_target_deltas(state) + head_rewards, head_penalties = get_head_deltas(state) + inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state) + _, inactivity_penalties = get_inactivity_penalty_deltas(state) + + rewards = [ + source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i] + for i in range(len(state.validators)) + ] + + penalties = [ + source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i] + for i in range(len(state.validators)) + ] + + return rewards, penalties + + +- name: get_attestation_due_ms#phase0 + sources: [] + spec: | + + def get_attestation_due_ms(epoch: Epoch) -> uint64: + return get_slot_component_duration_ms(ATTESTATION_DUE_BPS) + + +- name: get_attestation_due_ms#gloas + sources: [] + spec: | + + def get_attestation_due_ms(epoch: Epoch) -> uint64: + # [New in Gloas] + if epoch >= GLOAS_FORK_EPOCH: + return get_slot_component_duration_ms(ATTESTATION_DUE_BPS_GLOAS) + return get_slot_component_duration_ms(ATTESTATION_DUE_BPS) + + +- name: get_attestation_participation_flag_indices#altair + sources: + - file: consensus/state_processing/src/common/get_attestation_participation.rs + search: pub fn get_attestation_participation_flag_indices< + spec: | + + def get_attestation_participation_flag_indices( + state: BeaconState, data: AttestationData, inclusion_delay: uint64 + ) -> Sequence[int]: + """ + Return the flag indices that are satisfied by an attestation. + """ + # Matching source + if data.target.epoch == get_current_epoch(state): + justified_checkpoint = state.current_justified_checkpoint + else: + justified_checkpoint = state.previous_justified_checkpoint + is_matching_source = data.source == justified_checkpoint + + # Matching target + target_root = get_block_root(state, data.target.epoch) + target_root_matches = data.target.root == target_root + is_matching_target = is_matching_source and target_root_matches + + # Matching head + head_root = get_block_root_at_slot(state, data.slot) + head_root_matches = data.beacon_block_root == head_root + is_matching_head = is_matching_target and head_root_matches + + assert is_matching_source + + participation_flag_indices = [] + if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): + participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) + if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH: + participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) + if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: + participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) + + return participation_flag_indices + + +- name: get_attestation_participation_flag_indices#deneb + sources: + - file: consensus/state_processing/src/common/get_attestation_participation.rs + search: pub fn get_attestation_participation_flag_indices< + spec: | + + def get_attestation_participation_flag_indices( + state: BeaconState, data: AttestationData, inclusion_delay: uint64 + ) -> Sequence[int]: + """ + Return the flag indices that are satisfied by an attestation. + """ + # Matching source + if data.target.epoch == get_current_epoch(state): + justified_checkpoint = state.current_justified_checkpoint + else: + justified_checkpoint = state.previous_justified_checkpoint + is_matching_source = data.source == justified_checkpoint + + # Matching target + target_root = get_block_root(state, data.target.epoch) + target_root_matches = data.target.root == target_root + is_matching_target = is_matching_source and target_root_matches + + # Matching head + head_root = get_block_root_at_slot(state, data.slot) + head_root_matches = data.beacon_block_root == head_root + is_matching_head = is_matching_target and head_root_matches + + assert is_matching_source + + participation_flag_indices = [] + if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): + participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) + # [Modified in Deneb:EIP7045] + if is_matching_target: + participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) + if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: + participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) + + return participation_flag_indices + + +- name: get_attestation_participation_flag_indices#gloas + sources: [] + spec: | + + def get_attestation_participation_flag_indices( + state: BeaconState, data: AttestationData, inclusion_delay: uint64 + ) -> Sequence[int]: + """ + Return the flag indices that are satisfied by an attestation. + """ + # Matching source + if data.target.epoch == get_current_epoch(state): + justified_checkpoint = state.current_justified_checkpoint + else: + justified_checkpoint = state.previous_justified_checkpoint + is_matching_source = data.source == justified_checkpoint + + # Matching target + target_root = get_block_root(state, data.target.epoch) + target_root_matches = data.target.root == target_root + is_matching_target = is_matching_source and target_root_matches + + # [New in Gloas:EIP7732] + if is_attestation_same_slot(state, data): + assert data.index == 0 + payload_matches = True + else: + slot_index = data.slot % SLOTS_PER_HISTORICAL_ROOT + payload_index = state.execution_payload_availability[slot_index] + payload_matches = data.index == payload_index + + # Matching head + head_root = get_block_root_at_slot(state, data.slot) + head_root_matches = data.beacon_block_root == head_root + # [Modified in Gloas:EIP7732] + is_matching_head = is_matching_target and head_root_matches and payload_matches + + assert is_matching_source + + participation_flag_indices = [] + if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): + participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) + if is_matching_target: + participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) + if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: + participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) + + return participation_flag_indices + + +- name: get_attestation_signature#phase0 + sources: [] + spec: | + + def get_attestation_signature( + state: BeaconState, attestation_data: AttestationData, privkey: int + ) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch) + signing_root = compute_signing_root(attestation_data, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_attesting_balance#phase0 + sources: [] + spec: | + + def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei: + """ + Return the combined effective balance of the set of unslashed validators participating in ``attestations``. + Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. + """ + return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) + + +- name: get_attesting_indices#phase0 + sources: + - file: consensus/state_processing/src/common/get_attesting_indices.rs + search: pub mod attesting_indices_base(.|\n)*?pub fn get_attesting_indices< + regex: true + spec: | + + def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[ValidatorIndex]: + """ + Return the set of attesting indices corresponding to ``data`` and ``bits``. + """ + committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) + return set(index for i, index in enumerate(committee) if attestation.aggregation_bits[i]) + + +- name: get_attesting_indices#electra + sources: + - file: consensus/state_processing/src/common/get_attesting_indices.rs + search: pub mod attesting_indices_electra(.|\n)*?pub fn get_attesting_indices< + regex: true + spec: | + + def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[ValidatorIndex]: + """ + Return the set of attesting indices corresponding to ``aggregation_bits`` and ``committee_bits``. + """ + output: Set[ValidatorIndex] = set() + committee_indices = get_committee_indices(attestation.committee_bits) + committee_offset = 0 + for committee_index in committee_indices: + committee = get_beacon_committee(state, attestation.data.slot, committee_index) + committee_attesters = set( + attester_index + for i, attester_index in enumerate(committee) + if attestation.aggregation_bits[committee_offset + i] + ) + output = output.union(committee_attesters) + + committee_offset += len(committee) + + return output + + +- name: get_balance_churn_limit#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_balance_churn_limit( + spec: | + + def get_balance_churn_limit(state: BeaconState) -> Gwei: + """ + Return the churn limit for the current epoch. + """ + churn = max( + MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA, get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT + ) + return churn - churn % EFFECTIVE_BALANCE_INCREMENT + + +- name: get_base_reward#phase0 + sources: + - file: consensus/state_processing/src/common/base.rs + search: pub fn get_base_reward( + spec: | + + def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: + total_balance = get_total_active_balance(state) + effective_balance = state.validators[index].effective_balance + return Gwei( + effective_balance + * BASE_REWARD_FACTOR + // integer_squareroot(total_balance) + // BASE_REWARDS_PER_EPOCH + ) + + +- name: get_base_reward#altair + sources: + - file: consensus/state_processing/src/common/altair.rs + search: pub fn get_base_reward( + spec: | + + def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: + """ + Return the base reward for the validator defined by ``index`` with respect to the current ``state``. + """ + increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT + return Gwei(increments * get_base_reward_per_increment(state)) + + +- name: get_base_reward_per_increment#altair + sources: + - file: consensus/state_processing/src/common/altair.rs + search: fn get_base_reward_per_increment( + spec: | + + def get_base_reward_per_increment(state: BeaconState) -> Gwei: + return Gwei( + EFFECTIVE_BALANCE_INCREMENT + * BASE_REWARD_FACTOR + // integer_squareroot(get_total_active_balance(state)) + ) + + +- name: get_beacon_committee#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_beacon_committee( + spec: | + + def get_beacon_committee( + state: BeaconState, slot: Slot, index: CommitteeIndex + ) -> Sequence[ValidatorIndex]: + """ + Return the beacon committee at ``slot`` for ``index``. + """ + epoch = compute_epoch_at_slot(slot) + committees_per_slot = get_committee_count_per_slot(state, epoch) + return compute_committee( + indices=get_active_validator_indices(state, epoch), + seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), + index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, + count=committees_per_slot * SLOTS_PER_EPOCH, + ) + + +- name: get_beacon_proposer_index#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_beacon_proposer_index( + spec: | + + def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: + """ + Return the beacon proposer index at the current slot. + """ + epoch = get_current_epoch(state) + seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) + indices = get_active_validator_indices(state, epoch) + return compute_proposer_index(state, indices, seed) + + +- name: get_beacon_proposer_index#fulu + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_beacon_proposer_index( + spec: | + + def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: + """ + Return the beacon proposer index at the current slot. + """ + return state.proposer_lookahead[state.slot % SLOTS_PER_EPOCH] + + +- name: get_beacon_proposer_indices#fulu + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_beacon_proposer_indices( + spec: | + + def get_beacon_proposer_indices( + state: BeaconState, epoch: Epoch + ) -> Vector[ValidatorIndex, SLOTS_PER_EPOCH]: + """ + Return the proposer indices for the given ``epoch``. + """ + indices = get_active_validator_indices(state, epoch) + seed = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + return compute_proposer_indices(state, epoch, seed, indices) + + +- name: get_blob_parameters#fulu + sources: + - file: consensus/types/src/core/chain_spec.rs + search: fn get_blob_parameters( + spec: | + + def get_blob_parameters(epoch: Epoch) -> BlobParameters: + """ + Return the blob parameters at a given epoch. + """ + for entry in sorted(BLOB_SCHEDULE, key=lambda e: e["EPOCH"], reverse=True): + if epoch >= entry["EPOCH"]: + return BlobParameters(entry["EPOCH"], entry["MAX_BLOBS_PER_BLOCK"]) + return BlobParameters(ELECTRA_FORK_EPOCH, MAX_BLOBS_PER_BLOCK_ELECTRA) + + +- name: get_blob_sidecars#deneb + sources: [] + spec: | + + def get_blob_sidecars( + signed_block: SignedBeaconBlock, blobs: Sequence[Blob], blob_kzg_proofs: Sequence[KZGProof] + ) -> Sequence[BlobSidecar]: + block = signed_block.message + signed_block_header = compute_signed_block_header(signed_block) + return [ + BlobSidecar( + index=index, + blob=blob, + kzg_commitment=block.body.blob_kzg_commitments[index], + kzg_proof=blob_kzg_proofs[index], + signed_block_header=signed_block_header, + kzg_commitment_inclusion_proof=compute_merkle_proof( + block.body, + get_generalized_index(BeaconBlockBody, "blob_kzg_commitments", index), + ), + ) + for index, blob in enumerate(blobs) + ] + + +- name: get_block_root#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_block_root( + spec: | + + def get_block_root(state: BeaconState, epoch: Epoch) -> Root: + """ + Return the block root at the start of a recent ``epoch``. + """ + return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) + + +- name: get_block_root_at_slot#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_block_root_at_epoch( + spec: | + + def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root: + """ + Return the block root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] + + +- name: get_block_signature#phase0 + sources: [] + spec: | + + def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(block.slot)) + signing_root = compute_signing_root(block, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_builder_payment_quorum_threshold#gloas + sources: [] + spec: | + + def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64: + """ + Calculate the quorum threshold for builder payments. + """ + per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH + quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR + return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR) + + +- name: get_checkpoint_block#phase0 + sources: [] + spec: | + + def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root: + """ + Compute the checkpoint block for epoch ``epoch`` in the chain of block ``root`` + """ + epoch_first_slot = compute_start_slot_at_epoch(epoch) + return get_ancestor(store, root, epoch_first_slot) + + +- name: get_checkpoint_block#gloas + sources: [] + spec: | + + def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root: + """ + Compute the checkpoint block for epoch ``epoch`` in the chain of block ``root`` + """ + epoch_first_slot = compute_start_slot_at_epoch(epoch) + return get_ancestor(store, root, epoch_first_slot).root + + +- name: get_committee_assignment#phase0 + sources: [] + spec: | + + def get_committee_assignment( + state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex + ) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]: + """ + Return the committee assignment in the ``epoch`` for ``validator_index``. + ``assignment`` returned is a tuple of the following form: + * ``assignment[0]`` is the list of validators in the committee + * ``assignment[1]`` is the index to which the committee is assigned + * ``assignment[2]`` is the slot at which the committee is assigned + Return None if no assignment. + """ + next_epoch = Epoch(get_current_epoch(state) + 1) + assert epoch <= next_epoch + + start_slot = compute_start_slot_at_epoch(epoch) + committee_count_per_slot = get_committee_count_per_slot(state, epoch) + for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): + for index in range(committee_count_per_slot): + committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) + if validator_index in committee: + return committee, CommitteeIndex(index), Slot(slot) + return None + + +- name: get_committee_count_per_slot#phase0 + sources: + - file: consensus/types/src/core/eth_spec.rs + search: fn get_committee_count_per_slot( + spec: | + + def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: + """ + Return the number of committees in each slot for the given ``epoch``. + """ + return max( + uint64(1), + min( + MAX_COMMITTEES_PER_SLOT, + uint64(len(get_active_validator_indices(state, epoch))) + // SLOTS_PER_EPOCH + // TARGET_COMMITTEE_SIZE, + ), + ) + + +- name: get_committee_indices#electra + sources: + - file: consensus/state_processing/src/common/get_attesting_indices.rs + search: pub fn get_committee_indices< + spec: | + + def get_committee_indices(committee_bits: Bitvector) -> Sequence[CommitteeIndex]: + return [CommitteeIndex(index) for index, bit in enumerate(committee_bits) if bit] + + +- name: get_consolidation_churn_limit#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_consolidation_churn_limit( + spec: | + + def get_consolidation_churn_limit(state: BeaconState) -> Gwei: + return get_balance_churn_limit(state) - get_activation_exit_churn_limit(state) + + +- name: get_contribution_and_proof#altair + sources: [] + spec: | + + def get_contribution_and_proof( + state: BeaconState, + aggregator_index: ValidatorIndex, + contribution: SyncCommitteeContribution, + privkey: int, + ) -> ContributionAndProof: + selection_proof = get_sync_committee_selection_proof( + state, + contribution.slot, + contribution.subcommittee_index, + privkey, + ) + return ContributionAndProof( + aggregator_index=aggregator_index, + contribution=contribution, + selection_proof=selection_proof, + ) + + +- name: get_contribution_and_proof_signature#altair + sources: [] + spec: | + + def get_contribution_and_proof_signature( + state: BeaconState, contribution_and_proof: ContributionAndProof, privkey: int + ) -> BLSSignature: + contribution = contribution_and_proof.contribution + domain = get_domain( + state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot) + ) + signing_root = compute_signing_root(contribution_and_proof, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_contribution_due_ms#altair + sources: [] + spec: | + + def get_contribution_due_ms(epoch: Epoch) -> uint64: + return get_slot_component_duration_ms(CONTRIBUTION_DUE_BPS) + + +- name: get_contribution_due_ms#gloas + sources: [] + spec: | + + def get_contribution_due_ms(epoch: Epoch) -> uint64: + # [New in Gloas] + if epoch >= GLOAS_FORK_EPOCH: + return get_slot_component_duration_ms(CONTRIBUTION_DUE_BPS_GLOAS) + return get_slot_component_duration_ms(CONTRIBUTION_DUE_BPS) + + +- name: get_current_epoch#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn current_epoch( + spec: | + + def get_current_epoch(state: BeaconState) -> Epoch: + """ + Return the current epoch. + """ + return compute_epoch_at_slot(state.slot) + + +- name: get_current_slot#phase0 + sources: + - file: beacon_node/beacon_chain/src/test_utils.rs + search: pub fn get_current_slot( + spec: | + + def get_current_slot(store: Store) -> Slot: + return Slot(GENESIS_SLOT + get_slots_since_genesis(store)) + + +- name: get_current_store_epoch#phase0 + sources: [] + spec: | + + def get_current_store_epoch(store: Store) -> Epoch: + return compute_epoch_at_slot(get_current_slot(store)) + + +- name: get_custody_groups#fulu + sources: + - file: consensus/types/src/data/data_column_custody_group.rs + search: pub fn get_custody_groups( + spec: | + + def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence[CustodyIndex]: + assert custody_group_count <= NUMBER_OF_CUSTODY_GROUPS + + # Skip computation if all groups are custodied + if custody_group_count == NUMBER_OF_CUSTODY_GROUPS: + return [CustodyIndex(i) for i in range(NUMBER_OF_CUSTODY_GROUPS)] + + current_id = uint256(node_id) + custody_groups: List[CustodyIndex] = [] + while len(custody_groups) < custody_group_count: + custody_group = CustodyIndex( + bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8]) % NUMBER_OF_CUSTODY_GROUPS + ) + if custody_group not in custody_groups: + custody_groups.append(custody_group) + if current_id == UINT256_MAX: + # Overflow prevention + current_id = uint256(0) + else: + current_id += 1 + + assert len(custody_groups) == len(set(custody_groups)) + return sorted(custody_groups) + + +- name: get_data_column_sidecars#fulu + sources: [] + spec: | + + def get_data_column_sidecars( + signed_block_header: SignedBeaconBlockHeader, + kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK], + kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH], + cells_and_kzg_proofs: Sequence[ + Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] + ], + ) -> Sequence[DataColumnSidecar]: + """ + Given a signed block header and the commitments, inclusion proof, cells/proofs associated with + each blob in the block, assemble the sidecars which can be distributed to peers. + """ + assert len(cells_and_kzg_proofs) == len(kzg_commitments) + + sidecars = [] + for column_index in range(NUMBER_OF_COLUMNS): + column_cells, column_proofs = [], [] + for cells, proofs in cells_and_kzg_proofs: + column_cells.append(cells[column_index]) + column_proofs.append(proofs[column_index]) + sidecars.append( + DataColumnSidecar( + index=column_index, + column=column_cells, + kzg_commitments=kzg_commitments, + kzg_proofs=column_proofs, + signed_block_header=signed_block_header, + kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof, + ) + ) + return sidecars + + +- name: get_data_column_sidecars#gloas + sources: [] + spec: | + + def get_data_column_sidecars( + # [Modified in Gloas:EIP7732] + # Removed `signed_block_header` + # [New in Gloas:EIP7732] + beacon_block_root: Root, + # [New in Gloas:EIP7732] + slot: Slot, + kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK], + # [Modified in Gloas:EIP7732] + # Removed `kzg_commitments_inclusion_proof` + cells_and_kzg_proofs: Sequence[ + Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] + ], + ) -> Sequence[DataColumnSidecar]: + """ + Given a beacon block root and the commitments, cells/proofs associated with + each blob in the block, assemble the sidecars which can be distributed to peers. + """ + assert len(cells_and_kzg_proofs) == len(kzg_commitments) + + sidecars = [] + for column_index in range(NUMBER_OF_COLUMNS): + column_cells, column_proofs = [], [] + for cells, proofs in cells_and_kzg_proofs: + column_cells.append(cells[column_index]) + column_proofs.append(proofs[column_index]) + sidecars.append( + DataColumnSidecar( + index=column_index, + column=column_cells, + kzg_commitments=kzg_commitments, + kzg_proofs=column_proofs, + slot=slot, + beacon_block_root=beacon_block_root, + ) + ) + return sidecars + + +- name: get_data_column_sidecars_from_block#fulu + sources: [] + spec: | + + def get_data_column_sidecars_from_block( + signed_block: SignedBeaconBlock, + cells_and_kzg_proofs: Sequence[ + Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] + ], + ) -> Sequence[DataColumnSidecar]: + """ + Given a signed block and the cells/proofs associated with each blob in the + block, assemble the sidecars which can be distributed to peers. + """ + blob_kzg_commitments = signed_block.message.body.blob_kzg_commitments + signed_block_header = compute_signed_block_header(signed_block) + kzg_commitments_inclusion_proof = compute_merkle_proof( + signed_block.message.body, + get_generalized_index(BeaconBlockBody, "blob_kzg_commitments"), + ) + return get_data_column_sidecars( + signed_block_header, + blob_kzg_commitments, + kzg_commitments_inclusion_proof, + cells_and_kzg_proofs, + ) + + +- name: get_data_column_sidecars_from_block#gloas + sources: [] + spec: | + + def get_data_column_sidecars_from_block( + signed_block: SignedBeaconBlock, + # [New in Gloas:EIP7732] + blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK], + cells_and_kzg_proofs: Sequence[ + Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] + ], + ) -> Sequence[DataColumnSidecar]: + """ + Given a signed block and the cells/proofs associated with each blob in the + block, assemble the sidecars which can be distributed to peers. + """ + beacon_block_root = hash_tree_root(signed_block.message) + return get_data_column_sidecars( + beacon_block_root, + signed_block.message.slot, + blob_kzg_commitments, + cells_and_kzg_proofs, + ) + + +- name: get_data_column_sidecars_from_column_sidecar#fulu + sources: [] + spec: | + + def get_data_column_sidecars_from_column_sidecar( + sidecar: DataColumnSidecar, + cells_and_kzg_proofs: Sequence[ + Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] + ], + ) -> Sequence[DataColumnSidecar]: + """ + Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding + to the commitments it contains, assemble all sidecars for distribution to peers. + """ + assert len(cells_and_kzg_proofs) == len(sidecar.kzg_commitments) + + return get_data_column_sidecars( + sidecar.signed_block_header, + sidecar.kzg_commitments, + sidecar.kzg_commitments_inclusion_proof, + cells_and_kzg_proofs, + ) + + +- name: get_data_column_sidecars_from_column_sidecar#gloas + sources: [] + spec: | + + def get_data_column_sidecars_from_column_sidecar( + sidecar: DataColumnSidecar, + cells_and_kzg_proofs: Sequence[ + Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] + ], + ) -> Sequence[DataColumnSidecar]: + """ + Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding + to the commitments it contains, assemble all sidecars for distribution to peers. + """ + assert len(cells_and_kzg_proofs) == len(sidecar.kzg_commitments) + + return get_data_column_sidecars( + sidecar.beacon_block_root, + sidecar.slot, + sidecar.kzg_commitments, + cells_and_kzg_proofs, + ) + + +- name: get_domain#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn get_domain( + spec: | + + def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch = None) -> Domain: + """ + Return the signature domain (fork version concatenated with domain type) of a message. + """ + epoch = get_current_epoch(state) if epoch is None else epoch + fork_version = ( + state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version + ) + return compute_domain(domain_type, fork_version, state.genesis_validators_root) + + +- name: get_eligible_validator_indices#phase0 + sources: [] + spec: | + + def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]: + previous_epoch = get_previous_epoch(state) + return [ + ValidatorIndex(index) + for index, v in enumerate(state.validators) + if is_active_validator(v, previous_epoch) + or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch) + ] + + +- name: get_epoch_signature#phase0 + sources: [] + spec: | + + def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: + domain = get_domain(state, DOMAIN_RANDAO, compute_epoch_at_slot(block.slot)) + signing_root = compute_signing_root(compute_epoch_at_slot(block.slot), domain) + return bls.Sign(privkey, signing_root) + + +- name: get_eth1_pending_deposit_count#electra + sources: [] + spec: | + + def get_eth1_pending_deposit_count(state: BeaconState) -> uint64: + eth1_deposit_index_limit = min( + state.eth1_data.deposit_count, state.deposit_requests_start_index + ) + if state.eth1_deposit_index < eth1_deposit_index_limit: + return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) + else: + return uint64(0) + + +- name: get_eth1_vote#phase0 + sources: [] + spec: | + + def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data: + period_start = voting_period_start_time(state) + # `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height + votes_to_consider = [ + get_eth1_data(block) + for block in eth1_chain + if ( + is_candidate_block(block, period_start) + # Ensure cannot move back to earlier deposit contract states + and get_eth1_data(block).deposit_count >= state.eth1_data.deposit_count + ) + ] + + # Valid votes already cast during this period + valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider] + + # Default vote on latest eth1 block data in the period range unless eth1 chain is not live + # Non-substantive casting for linter + state_eth1_data: Eth1Data = state.eth1_data + default_vote = ( + votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data + ) + + return max( + valid_votes, + # Tiebreak by smallest distance + key=lambda v: ( + valid_votes.count(v), + -valid_votes.index(v), + ), + default=default_vote, + ) + + +- name: get_eth1_vote#electra + sources: [] + spec: | + + def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data: + # [New in Electra:EIP6110] + if state.eth1_deposit_index == state.deposit_requests_start_index: + return state.eth1_data + + period_start = voting_period_start_time(state) + # `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height + votes_to_consider = [ + get_eth1_data(block) + for block in eth1_chain + if ( + is_candidate_block(block, period_start) + # Ensure cannot move back to earlier deposit contract states + and get_eth1_data(block).deposit_count >= state.eth1_data.deposit_count + ) + ] + + # Valid votes already cast during this period + valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider] + + # Default vote on latest eth1 block data in the period range unless eth1 chain is not live + # Non-substantive casting for linter + state_eth1_data: Eth1Data = state.eth1_data + default_vote = ( + votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data + ) + + return max( + valid_votes, + # Tiebreak by smallest distance + key=lambda v: ( + valid_votes.count(v), + -valid_votes.index(v), + ), + default=default_vote, + ) + + +- name: get_execution_payload#bellatrix + sources: + - file: beacon_node/beacon_chain/src/execution_payload.rs + search: pub fn get_execution_payload< + spec: | + + def get_execution_payload( + payload_id: Optional[PayloadId], execution_engine: ExecutionEngine + ) -> ExecutionPayload: + if payload_id is None: + # Pre-merge, empty payload + return ExecutionPayload() + else: + return execution_engine.get_payload(payload_id).execution_payload + + +- name: get_execution_payload_bid_signature#gloas + sources: [] + spec: | + + def get_execution_payload_bid_signature( + state: BeaconState, bid: ExecutionPayloadBid, privkey: int + ) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_BUILDER, compute_epoch_at_slot(bid.slot)) + signing_root = compute_signing_root(bid, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_execution_payload_envelope_signature#gloas + sources: [] + spec: | + + def get_execution_payload_envelope_signature( + state: BeaconState, envelope: ExecutionPayloadEnvelope, privkey: int + ) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_BUILDER, compute_epoch_at_slot(state.slot)) + signing_root = compute_signing_root(envelope, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_execution_requests#electra + sources: [] + spec: | + + def get_execution_requests(execution_requests_list: Sequence[bytes]) -> ExecutionRequests: + deposits = [] + withdrawals = [] + consolidations = [] + + request_types = [ + DEPOSIT_REQUEST_TYPE, + WITHDRAWAL_REQUEST_TYPE, + CONSOLIDATION_REQUEST_TYPE, + ] + + prev_request_type = None + for request in execution_requests_list: + request_type, request_data = request[0:1], request[1:] + + # Check that the request type is valid + assert request_type in request_types + # Check that the request data is not empty + assert len(request_data) != 0 + # Check that requests are in strictly ascending order + # Each successive type must be greater than the last with no duplicates + assert prev_request_type is None or prev_request_type < request_type + prev_request_type = request_type + + if request_type == DEPOSIT_REQUEST_TYPE: + deposits = ssz_deserialize( + List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD], request_data + ) + elif request_type == WITHDRAWAL_REQUEST_TYPE: + withdrawals = ssz_deserialize( + List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD], request_data + ) + elif request_type == CONSOLIDATION_REQUEST_TYPE: + consolidations = ssz_deserialize( + List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD], request_data + ) + + return ExecutionRequests( + deposits=deposits, + withdrawals=withdrawals, + consolidations=consolidations, + ) + + +- name: get_execution_requests_list#electra + sources: + - file: consensus/types/src/execution/execution_requests.rs + search: pub fn get_execution_requests_list( + spec: | + + def get_execution_requests_list(execution_requests: ExecutionRequests) -> Sequence[bytes]: + requests = [ + (DEPOSIT_REQUEST_TYPE, execution_requests.deposits), + (WITHDRAWAL_REQUEST_TYPE, execution_requests.withdrawals), + (CONSOLIDATION_REQUEST_TYPE, execution_requests.consolidations), + ] + + return [ + request_type + ssz_serialize(request_data) + for request_type, request_data in requests + if len(request_data) != 0 + ] + + +- name: get_expected_withdrawals#capella + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn get_expected_withdrawals< + spec: | + + def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]: + epoch = get_current_epoch(state) + withdrawal_index = state.next_withdrawal_index + validator_index = state.next_withdrawal_validator_index + withdrawals: List[Withdrawal] = [] + bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) + for _ in range(bound): + validator = state.validators[validator_index] + balance = state.balances[validator_index] + if is_fully_withdrawable_validator(validator, balance, epoch): + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=balance, + ) + ) + withdrawal_index += WithdrawalIndex(1) + elif is_partially_withdrawable_validator(validator, balance): + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=balance - MAX_EFFECTIVE_BALANCE, + ) + ) + withdrawal_index += WithdrawalIndex(1) + if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + break + validator_index = ValidatorIndex((validator_index + 1) % len(state.validators)) + return withdrawals + + +- name: get_expected_withdrawals#electra + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn get_expected_withdrawals< + spec: | + + def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]: + epoch = get_current_epoch(state) + withdrawal_index = state.next_withdrawal_index + validator_index = state.next_withdrawal_validator_index + withdrawals: List[Withdrawal] = [] + processed_partial_withdrawals_count = 0 + + # [New in Electra:EIP7251] + # Consume pending partial withdrawals + for withdrawal in state.pending_partial_withdrawals: + if ( + withdrawal.withdrawable_epoch > epoch + or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP + ): + break + + validator = state.validators[withdrawal.validator_index] + has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE + total_withdrawn = sum( + w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index + ) + balance = state.balances[withdrawal.validator_index] - total_withdrawn + has_excess_balance = balance > MIN_ACTIVATION_BALANCE + if ( + validator.exit_epoch == FAR_FUTURE_EPOCH + and has_sufficient_effective_balance + and has_excess_balance + ): + withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount) + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=withdrawal.validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=withdrawable_balance, + ) + ) + withdrawal_index += WithdrawalIndex(1) + + processed_partial_withdrawals_count += 1 + + # Sweep for remaining. + bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) + for _ in range(bound): + validator = state.validators[validator_index] + # [Modified in Electra:EIP7251] + total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index) + balance = state.balances[validator_index] - total_withdrawn + if is_fully_withdrawable_validator(validator, balance, epoch): + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=balance, + ) + ) + withdrawal_index += WithdrawalIndex(1) + elif is_partially_withdrawable_validator(validator, balance): + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + # [Modified in Electra:EIP7251] + amount=balance - get_max_effective_balance(validator), + ) + ) + withdrawal_index += WithdrawalIndex(1) + if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + break + validator_index = ValidatorIndex((validator_index + 1) % len(state.validators)) + return withdrawals, processed_partial_withdrawals_count + + +- name: get_expected_withdrawals#gloas + sources: [] + spec: | + + def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64, uint64]: + epoch = get_current_epoch(state) + withdrawal_index = state.next_withdrawal_index + validator_index = state.next_withdrawal_validator_index + withdrawals: List[Withdrawal] = [] + processed_partial_withdrawals_count = 0 + processed_builder_withdrawals_count = 0 + + # [New in Gloas:EIP7732] + # Sweep for builder payments + for withdrawal in state.builder_pending_withdrawals: + if ( + withdrawal.withdrawable_epoch > epoch + or len(withdrawals) + 1 == MAX_WITHDRAWALS_PER_PAYLOAD + ): + break + if is_builder_payment_withdrawable(state, withdrawal): + total_withdrawn = sum( + w.amount for w in withdrawals if w.validator_index == withdrawal.builder_index + ) + balance = state.balances[withdrawal.builder_index] - total_withdrawn + builder = state.validators[withdrawal.builder_index] + if builder.slashed: + withdrawable_balance = min(balance, withdrawal.amount) + elif balance > MIN_ACTIVATION_BALANCE: + withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount) + else: + withdrawable_balance = 0 + + if withdrawable_balance > 0: + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=withdrawal.builder_index, + address=withdrawal.fee_recipient, + amount=withdrawable_balance, + ) + ) + withdrawal_index += WithdrawalIndex(1) + processed_builder_withdrawals_count += 1 + + # Sweep for pending partial withdrawals + bound = min( + len(withdrawals) + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP, + MAX_WITHDRAWALS_PER_PAYLOAD - 1, + ) + for withdrawal in state.pending_partial_withdrawals: + if withdrawal.withdrawable_epoch > epoch or len(withdrawals) == bound: + break + + validator = state.validators[withdrawal.validator_index] + has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE + total_withdrawn = sum( + w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index + ) + balance = state.balances[withdrawal.validator_index] - total_withdrawn + has_excess_balance = balance > MIN_ACTIVATION_BALANCE + if ( + validator.exit_epoch == FAR_FUTURE_EPOCH + and has_sufficient_effective_balance + and has_excess_balance + ): + withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount) + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=withdrawal.validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=withdrawable_balance, + ) + ) + withdrawal_index += WithdrawalIndex(1) + + processed_partial_withdrawals_count += 1 + + # Sweep for remaining. + bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) + for _ in range(bound): + validator = state.validators[validator_index] + total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index) + balance = state.balances[validator_index] - total_withdrawn + if is_fully_withdrawable_validator(validator, balance, epoch): + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=balance, + ) + ) + withdrawal_index += WithdrawalIndex(1) + elif is_partially_withdrawable_validator(validator, balance): + withdrawals.append( + Withdrawal( + index=withdrawal_index, + validator_index=validator_index, + address=ExecutionAddress(validator.withdrawal_credentials[12:]), + amount=balance - get_max_effective_balance(validator), + ) + ) + withdrawal_index += WithdrawalIndex(1) + if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + break + validator_index = ValidatorIndex((validator_index + 1) % len(state.validators)) + return ( + withdrawals, + processed_builder_withdrawals_count, + processed_partial_withdrawals_count, + ) + + +- name: get_filtered_block_tree#phase0 + sources: [] + spec: | + + def get_filtered_block_tree(store: Store) -> Dict[Root, BeaconBlock]: + """ + Retrieve a filtered block tree from ``store``, only returning branches + whose leaf state's justified/finalized info agrees with that in ``store``. + """ + base = store.justified_checkpoint.root + blocks: Dict[Root, BeaconBlock] = {} + filter_block_tree(store, base, blocks) + return blocks + + +- name: get_finality_delay#phase0 + sources: [] + spec: | + + def get_finality_delay(state: BeaconState) -> uint64: + return get_previous_epoch(state) - state.finalized_checkpoint.epoch + + +- name: get_flag_index_deltas#altair + sources: [] + spec: | + + def get_flag_index_deltas( + state: BeaconState, flag_index: int + ) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return the deltas for a given ``flag_index`` by scanning through the participation flags. + """ + rewards = [Gwei(0)] * len(state.validators) + penalties = [Gwei(0)] * len(state.validators) + previous_epoch = get_previous_epoch(state) + unslashed_participating_indices = get_unslashed_participating_indices( + state, flag_index, previous_epoch + ) + weight = PARTICIPATION_FLAG_WEIGHTS[flag_index] + unslashed_participating_balance = get_total_balance(state, unslashed_participating_indices) + unslashed_participating_increments = ( + unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT + ) + active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT + for index in get_eligible_validator_indices(state): + base_reward = get_base_reward(state, index) + if index in unslashed_participating_indices: + if not is_in_inactivity_leak(state): + reward_numerator = base_reward * weight * unslashed_participating_increments + rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR)) + elif flag_index != TIMELY_HEAD_FLAG_INDEX: + penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR) + return rewards, penalties + + +- name: get_forkchoice_store#phase0 + sources: + - file: beacon_node/beacon_chain/src/beacon_fork_choice_store.rs + search: pub fn get_forkchoice_store( + spec: | + + def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store: + assert anchor_block.state_root == hash_tree_root(anchor_state) + anchor_root = hash_tree_root(anchor_block) + anchor_epoch = get_current_epoch(anchor_state) + justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) + finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) + proposer_boost_root = Root() + return Store( + time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot), + genesis_time=anchor_state.genesis_time, + justified_checkpoint=justified_checkpoint, + finalized_checkpoint=finalized_checkpoint, + unrealized_justified_checkpoint=justified_checkpoint, + unrealized_finalized_checkpoint=finalized_checkpoint, + proposer_boost_root=proposer_boost_root, + equivocating_indices=set(), + blocks={anchor_root: copy(anchor_block)}, + block_states={anchor_root: copy(anchor_state)}, + checkpoint_states={justified_checkpoint: copy(anchor_state)}, + unrealized_justifications={anchor_root: justified_checkpoint}, + ) + + +- name: get_forkchoice_store#gloas + sources: [] + spec: | + + def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store: + assert anchor_block.state_root == hash_tree_root(anchor_state) + anchor_root = hash_tree_root(anchor_block) + anchor_epoch = get_current_epoch(anchor_state) + justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) + finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) + proposer_boost_root = Root() + return Store( + time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot), + genesis_time=anchor_state.genesis_time, + justified_checkpoint=justified_checkpoint, + finalized_checkpoint=finalized_checkpoint, + unrealized_justified_checkpoint=justified_checkpoint, + unrealized_finalized_checkpoint=finalized_checkpoint, + proposer_boost_root=proposer_boost_root, + equivocating_indices=set(), + blocks={anchor_root: copy(anchor_block)}, + block_states={anchor_root: copy(anchor_state)}, + checkpoint_states={justified_checkpoint: copy(anchor_state)}, + unrealized_justifications={anchor_root: justified_checkpoint}, + # [New in Gloas:EIP7732] + execution_payload_states={anchor_root: copy(anchor_state)}, + ptc_vote={anchor_root: Vector[boolean, PTC_SIZE]()}, + ) + + +- name: get_head#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn get_head( + spec: | + + def get_head(store: Store) -> Root: + # Get filtered block tree that only includes viable branches + blocks = get_filtered_block_tree(store) + # Execute the LMD-GHOST fork choice + head = store.justified_checkpoint.root + while True: + children = [root for root in blocks.keys() if blocks[root].parent_root == head] + if len(children) == 0: + return head + # Sort by latest attesting balance with ties broken lexicographically + # Ties broken by favoring block with lexicographically higher root + head = max(children, key=lambda root: (get_weight(store, root), root)) + + +- name: get_head#gloas + sources: [] + spec: | + + def get_head(store: Store) -> ForkChoiceNode: + # Get filtered block tree that only includes viable branches + blocks = get_filtered_block_tree(store) + # Execute the LMD-GHOST fork-choice + head = ForkChoiceNode( + root=store.justified_checkpoint.root, + payload_status=PAYLOAD_STATUS_PENDING, + ) + + while True: + children = get_node_children(store, blocks, head) + if len(children) == 0: + return head + # Sort by latest attesting balance with ties broken lexicographically + head = max( + children, + key=lambda child: ( + get_weight(store, child), + child.root, + get_payload_status_tiebreaker(store, child), + ), + ) + + +- name: get_head_deltas#phase0 + sources: [] + spec: | + + def get_head_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return attester micro-rewards/penalties for head-vote for each validator. + """ + matching_head_attestations = get_matching_head_attestations(state, get_previous_epoch(state)) + return get_attestation_component_deltas(state, matching_head_attestations) + + +- name: get_inactivity_penalty_deltas#phase0 + sources: [] + spec: | + + def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return inactivity reward/penalty deltas for each validator. + """ + penalties = [Gwei(0) for _ in range(len(state.validators))] + if is_in_inactivity_leak(state): + matching_target_attestations = get_matching_target_attestations( + state, get_previous_epoch(state) + ) + matching_target_attesting_indices = get_unslashed_attesting_indices( + state, matching_target_attestations + ) + for index in get_eligible_validator_indices(state): + # If validator is performing optimally this cancels all rewards for a neutral balance + base_reward = get_base_reward(state, index) + penalties[index] += Gwei( + BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index) + ) + if index not in matching_target_attesting_indices: + effective_balance = state.validators[index].effective_balance + penalties[index] += Gwei( + effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT + ) + + # No rewards associated with inactivity penalties + rewards = [Gwei(0) for _ in range(len(state.validators))] + return rewards, penalties + + +- name: get_inactivity_penalty_deltas#altair + sources: [] + spec: | + + def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return the inactivity penalty deltas by considering timely target participation flags and inactivity scores. + """ + rewards = [Gwei(0) for _ in range(len(state.validators))] + penalties = [Gwei(0) for _ in range(len(state.validators))] + previous_epoch = get_previous_epoch(state) + matching_target_indices = get_unslashed_participating_indices( + state, TIMELY_TARGET_FLAG_INDEX, previous_epoch + ) + for index in get_eligible_validator_indices(state): + if index not in matching_target_indices: + penalty_numerator = ( + state.validators[index].effective_balance * state.inactivity_scores[index] + ) + penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR + penalties[index] += Gwei(penalty_numerator // penalty_denominator) + return rewards, penalties + + +- name: get_inactivity_penalty_deltas#bellatrix + sources: [] + spec: | + + def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return the inactivity penalty deltas by considering timely target participation flags and inactivity scores. + """ + rewards = [Gwei(0) for _ in range(len(state.validators))] + penalties = [Gwei(0) for _ in range(len(state.validators))] + previous_epoch = get_previous_epoch(state) + matching_target_indices = get_unslashed_participating_indices( + state, TIMELY_TARGET_FLAG_INDEX, previous_epoch + ) + for index in get_eligible_validator_indices(state): + if index not in matching_target_indices: + penalty_numerator = ( + state.validators[index].effective_balance * state.inactivity_scores[index] + ) + # [Modified in Bellatrix] + penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_BELLATRIX + penalties[index] += Gwei(penalty_numerator // penalty_denominator) + return rewards, penalties + + +- name: get_inclusion_delay_deltas#phase0 + sources: [] + spec: | + + def get_inclusion_delay_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return proposer and inclusion delay micro-rewards/penalties for each validator. + """ + rewards = [Gwei(0) for _ in range(len(state.validators))] + matching_source_attestations = get_matching_source_attestations( + state, get_previous_epoch(state) + ) + for index in get_unslashed_attesting_indices(state, matching_source_attestations): + attestation = min( + [a for a in matching_source_attestations if index in get_attesting_indices(state, a)], + key=lambda a: a.inclusion_delay, + ) + rewards[attestation.proposer_index] += get_proposer_reward(state, index) + max_attester_reward = Gwei( + get_base_reward(state, index) - get_proposer_reward(state, index) + ) + rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay) + + # No penalties associated with inclusion delay + penalties = [Gwei(0) for _ in range(len(state.validators))] + return rewards, penalties + + +- name: get_index_for_new_validator#altair + sources: [] + spec: | + + def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: + return ValidatorIndex(len(state.validators)) + + +- name: get_indexed_attestation#phase0 + sources: + - file: consensus/state_processing/src/common/get_attesting_indices.rs + search: 'pub fn get_indexed_attestation\((\n|.)*?AttestationBase' + regex: true + - file: consensus/state_processing/src/common/get_attesting_indices.rs + search: 'pub fn get_indexed_attestation\((\n|.)*?AttestationElectra' + regex: true + spec: | + + def get_indexed_attestation(state: BeaconState, attestation: Attestation) -> IndexedAttestation: + """ + Return the indexed attestation corresponding to ``attestation``. + """ + attesting_indices = get_attesting_indices(state, attestation) + + return IndexedAttestation( + attesting_indices=sorted(attesting_indices), + data=attestation.data, + signature=attestation.signature, + ) + + +- name: get_indexed_payload_attestation#gloas + sources: [] + spec: | + + def get_indexed_payload_attestation( + state: BeaconState, slot: Slot, payload_attestation: PayloadAttestation + ) -> IndexedPayloadAttestation: + """ + Return the indexed payload attestation corresponding to ``payload_attestation``. + """ + ptc = get_ptc(state, slot) + bits = payload_attestation.aggregation_bits + attesting_indices = [index for i, index in enumerate(ptc) if bits[i]] + + return IndexedPayloadAttestation( + attesting_indices=sorted(attesting_indices), + data=payload_attestation.data, + signature=payload_attestation.signature, + ) + + +- name: get_lc_execution_root#capella + sources: [] + spec: | + + def get_lc_execution_root(header: LightClientHeader) -> Root: + epoch = compute_epoch_at_slot(header.beacon.slot) + + if epoch >= CAPELLA_FORK_EPOCH: + return hash_tree_root(header.execution) + + return Root() + + +- name: get_lc_execution_root#deneb + sources: [] + spec: | + + def get_lc_execution_root(header: LightClientHeader) -> Root: + epoch = compute_epoch_at_slot(header.beacon.slot) + + # [New in Deneb] + if epoch >= DENEB_FORK_EPOCH: + return hash_tree_root(header.execution) + + # [Modified in Deneb] + if epoch >= CAPELLA_FORK_EPOCH: + execution_header = capella.ExecutionPayloadHeader( + parent_hash=header.execution.parent_hash, + fee_recipient=header.execution.fee_recipient, + state_root=header.execution.state_root, + receipts_root=header.execution.receipts_root, + logs_bloom=header.execution.logs_bloom, + prev_randao=header.execution.prev_randao, + block_number=header.execution.block_number, + gas_limit=header.execution.gas_limit, + gas_used=header.execution.gas_used, + timestamp=header.execution.timestamp, + extra_data=header.execution.extra_data, + base_fee_per_gas=header.execution.base_fee_per_gas, + block_hash=header.execution.block_hash, + transactions_root=header.execution.transactions_root, + withdrawals_root=header.execution.withdrawals_root, + ) + return hash_tree_root(execution_header) + + return Root() + + +- name: get_lc_execution_root#electra + sources: [] + spec: | + + def get_lc_execution_root(header: LightClientHeader) -> Root: + epoch = compute_epoch_at_slot(header.beacon.slot) + + # [New in Electra] + if epoch >= ELECTRA_FORK_EPOCH: + return hash_tree_root(header.execution) + + # [Modified in Electra] + if epoch >= DENEB_FORK_EPOCH: + execution_header = deneb.ExecutionPayloadHeader( + parent_hash=header.execution.parent_hash, + fee_recipient=header.execution.fee_recipient, + state_root=header.execution.state_root, + receipts_root=header.execution.receipts_root, + logs_bloom=header.execution.logs_bloom, + prev_randao=header.execution.prev_randao, + block_number=header.execution.block_number, + gas_limit=header.execution.gas_limit, + gas_used=header.execution.gas_used, + timestamp=header.execution.timestamp, + extra_data=header.execution.extra_data, + base_fee_per_gas=header.execution.base_fee_per_gas, + block_hash=header.execution.block_hash, + transactions_root=header.execution.transactions_root, + withdrawals_root=header.execution.withdrawals_root, + blob_gas_used=header.execution.blob_gas_used, + excess_blob_gas=header.execution.excess_blob_gas, + ) + return hash_tree_root(execution_header) + + if epoch >= CAPELLA_FORK_EPOCH: + execution_header = capella.ExecutionPayloadHeader( + parent_hash=header.execution.parent_hash, + fee_recipient=header.execution.fee_recipient, + state_root=header.execution.state_root, + receipts_root=header.execution.receipts_root, + logs_bloom=header.execution.logs_bloom, + prev_randao=header.execution.prev_randao, + block_number=header.execution.block_number, + gas_limit=header.execution.gas_limit, + gas_used=header.execution.gas_used, + timestamp=header.execution.timestamp, + extra_data=header.execution.extra_data, + base_fee_per_gas=header.execution.base_fee_per_gas, + block_hash=header.execution.block_hash, + transactions_root=header.execution.transactions_root, + withdrawals_root=header.execution.withdrawals_root, + ) + return hash_tree_root(execution_header) + + return Root() + + +- name: get_matching_head_attestations#phase0 + sources: [] + spec: | + + def get_matching_head_attestations( + state: BeaconState, epoch: Epoch + ) -> Sequence[PendingAttestation]: + return [ + a + for a in get_matching_target_attestations(state, epoch) + if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot) + ] + + +- name: get_matching_source_attestations#phase0 + sources: [] + spec: | + + def get_matching_source_attestations( + state: BeaconState, epoch: Epoch + ) -> Sequence[PendingAttestation]: + assert epoch in (get_previous_epoch(state), get_current_epoch(state)) + return ( + state.current_epoch_attestations + if epoch == get_current_epoch(state) + else state.previous_epoch_attestations + ) + + +- name: get_matching_target_attestations#phase0 + sources: [] + spec: | + + def get_matching_target_attestations( + state: BeaconState, epoch: Epoch + ) -> Sequence[PendingAttestation]: + return [ + a + for a in get_matching_source_attestations(state, epoch) + if a.data.target.root == get_block_root(state, epoch) + ] + + +- name: get_max_effective_balance#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn get_max_effective_balance( + spec: | + + def get_max_effective_balance(validator: Validator) -> Gwei: + """ + Get max effective balance for ``validator``. + """ + if has_compounding_withdrawal_credential(validator): + return MAX_EFFECTIVE_BALANCE_ELECTRA + else: + return MIN_ACTIVATION_BALANCE + + +- name: get_next_sync_committee#altair + sources: [] + spec: | + + def get_next_sync_committee(state: BeaconState) -> SyncCommittee: + """ + Return the next sync committee, with possible pubkey duplicates. + """ + indices = get_next_sync_committee_indices(state) + pubkeys = [state.validators[index].pubkey for index in indices] + aggregate_pubkey = eth_aggregate_pubkeys(pubkeys) + return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) + + +- name: get_next_sync_committee_indices#altair + sources: + - file: consensus/types/src/state/beacon_state.rs + search: fn get_next_sync_committee_indices( + spec: | + + def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: + """ + Return the sync committee indices, with possible duplicates, for the next sync committee. + """ + epoch = Epoch(get_current_epoch(state) + 1) + + MAX_RANDOM_BYTE = 2**8 - 1 + active_validator_indices = get_active_validator_indices(state, epoch) + active_validator_count = uint64(len(active_validator_indices)) + seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) + i = 0 + sync_committee_indices: List[ValidatorIndex] = [] + while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: + shuffled_index = compute_shuffled_index( + uint64(i % active_validator_count), active_validator_count, seed + ) + candidate_index = active_validator_indices[shuffled_index] + random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] + effective_balance = state.validators[candidate_index].effective_balance + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: + sync_committee_indices.append(candidate_index) + i += 1 + return sync_committee_indices + + +- name: get_next_sync_committee_indices#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: fn get_next_sync_committee_indices( + spec: | + + def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: + """ + Return the sync committee indices, with possible duplicates, for the next sync committee. + """ + epoch = Epoch(get_current_epoch(state) + 1) + + # [Modified in Electra] + MAX_RANDOM_VALUE = 2**16 - 1 + active_validator_indices = get_active_validator_indices(state, epoch) + active_validator_count = uint64(len(active_validator_indices)) + seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) + i = uint64(0) + sync_committee_indices: List[ValidatorIndex] = [] + while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: + shuffled_index = compute_shuffled_index( + uint64(i % active_validator_count), active_validator_count, seed + ) + candidate_index = active_validator_indices[shuffled_index] + # [Modified in Electra] + random_bytes = hash(seed + uint_to_bytes(i // 16)) + offset = i % 16 * 2 + random_value = bytes_to_uint64(random_bytes[offset : offset + 2]) + effective_balance = state.validators[candidate_index].effective_balance + # [Modified in Electra:EIP7251] + if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value: + sync_committee_indices.append(candidate_index) + i += 1 + return sync_committee_indices + + +- name: get_next_sync_committee_indices#gloas + sources: [] + spec: | + + def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: + """ + Return the sync committee indices, with possible duplicates, for the next sync committee. + """ + epoch = Epoch(get_current_epoch(state) + 1) + seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) + indices = get_active_validator_indices(state, epoch) + return compute_balance_weighted_selection( + state, indices, seed, size=SYNC_COMMITTEE_SIZE, shuffle_indices=True + ) + + +- name: get_node_children#gloas + sources: [] + spec: | + + def get_node_children( + store: Store, blocks: Dict[Root, BeaconBlock], node: ForkChoiceNode + ) -> Sequence[ForkChoiceNode]: + if node.payload_status == PAYLOAD_STATUS_PENDING: + children = [ForkChoiceNode(root=node.root, payload_status=PAYLOAD_STATUS_EMPTY)] + if node.root in store.execution_payload_states: + children.append(ForkChoiceNode(root=node.root, payload_status=PAYLOAD_STATUS_FULL)) + return children + else: + return [ + ForkChoiceNode(root=root, payload_status=PAYLOAD_STATUS_PENDING) + for root in blocks.keys() + if ( + blocks[root].parent_root == node.root + and node.payload_status == get_parent_payload_status(store, blocks[root]) + ) + ] + + +- name: get_parent_payload_status#gloas + sources: [] + spec: | + + def get_parent_payload_status(store: Store, block: BeaconBlock) -> PayloadStatus: + parent = store.blocks[block.parent_root] + parent_block_hash = block.body.signed_execution_payload_bid.message.parent_block_hash + message_block_hash = parent.body.signed_execution_payload_bid.message.block_hash + return PAYLOAD_STATUS_FULL if parent_block_hash == message_block_hash else PAYLOAD_STATUS_EMPTY + + +- name: get_payload_attestation_due_ms#gloas + sources: [] + spec: | + + def get_payload_attestation_due_ms(epoch: Epoch) -> uint64: + return get_slot_component_duration_ms(PAYLOAD_ATTESTATION_DUE_BPS) + + +- name: get_payload_attestation_message_signature#gloas + sources: [] + spec: | + + def get_payload_attestation_message_signature( + state: BeaconState, attestation: PayloadAttestationMessage, privkey: int + ) -> BLSSignature: + domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)) + signing_root = compute_signing_root(attestation.data, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_payload_status_tiebreaker#gloas + sources: [] + spec: | + + def get_payload_status_tiebreaker(store: Store, node: ForkChoiceNode) -> uint8: + if node.payload_status == PAYLOAD_STATUS_PENDING or store.blocks[ + node.root + ].slot + 1 != get_current_slot(store): + return node.payload_status + else: + # To decide on a payload from the previous slot, choose + # between FULL and EMPTY based on `should_extend_payload` + if node.payload_status == PAYLOAD_STATUS_EMPTY: + return 1 + else: + return 2 if should_extend_payload(store, node.root) else 0 + + +- name: get_pending_balance_to_withdraw#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_pending_balance_to_withdraw( + spec: | + + def get_pending_balance_to_withdraw(state: BeaconState, validator_index: ValidatorIndex) -> Gwei: + return sum( + withdrawal.amount + for withdrawal in state.pending_partial_withdrawals + if withdrawal.validator_index == validator_index + ) + + +- name: get_pending_balance_to_withdraw#gloas + sources: [] + spec: | + + def get_pending_balance_to_withdraw(state: BeaconState, validator_index: ValidatorIndex) -> Gwei: + return ( + sum( + withdrawal.amount + for withdrawal in state.pending_partial_withdrawals + if withdrawal.validator_index == validator_index + ) + # [New in Gloas:EIP7732] + + sum( + withdrawal.amount + for withdrawal in state.builder_pending_withdrawals + if withdrawal.builder_index == validator_index + ) + # [New in Gloas:EIP7732] + + sum( + payment.withdrawal.amount + for payment in state.builder_pending_payments + if payment.withdrawal.builder_index == validator_index + ) + ) + + +- name: get_pow_block_at_terminal_total_difficulty#bellatrix + sources: [] + spec: | + + def get_pow_block_at_terminal_total_difficulty( + pow_chain: Dict[Hash32, PowBlock], + ) -> Optional[PowBlock]: + # `pow_chain` abstractly represents all blocks in the PoW chain + for block in pow_chain.values(): + block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY + if block_reached_ttd: + # If genesis block, no parent exists so reaching TTD alone qualifies as valid terminal block + if block.parent_hash == Hash32(): + return block + parent = pow_chain[block.parent_hash] + parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY + if not parent_reached_ttd: + return block + + return None + + +- name: get_previous_epoch#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn previous_epoch( + spec: | + + def get_previous_epoch(state: BeaconState) -> Epoch: + """` + Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``). + """ + current_epoch = get_current_epoch(state) + return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1) + + +- name: get_proposer_head#phase0 + sources: [] + spec: | + + def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root: + head_block = store.blocks[head_root] + parent_root = head_block.parent_root + parent_block = store.blocks[parent_root] + + # Only re-org the head block if it arrived later than the attestation deadline. + head_late = is_head_late(store, head_root) + + # Do not re-org on an epoch boundary where the proposer shuffling could change. + shuffling_stable = is_shuffling_stable(slot) + + # Ensure that the FFG information of the new head will be competitive with the current head. + ffg_competitive = is_ffg_competitive(store, head_root, parent_root) + + # Do not re-org if the chain is not finalizing with acceptable frequency. + finalization_ok = is_finalization_ok(store, slot) + + # Only re-org if we are proposing on-time. + proposing_on_time = is_proposing_on_time(store) + + # Only re-org a single slot at most. + parent_slot_ok = parent_block.slot + 1 == head_block.slot + current_time_ok = head_block.slot + 1 == slot + single_slot_reorg = parent_slot_ok and current_time_ok + + # Check that the head has few enough votes to be overpowered by our proposer boost. + assert store.proposer_boost_root != head_root # ensure boost has worn off + head_weak = is_head_weak(store, head_root) + + # Check that the missing votes are assigned to the parent and not being hoarded. + parent_strong = is_parent_strong(store, parent_root) + + if all( + [ + head_late, + shuffling_stable, + ffg_competitive, + finalization_ok, + proposing_on_time, + single_slot_reorg, + head_weak, + parent_strong, + ] + ): + # We can re-org the current head by building upon its parent block. + return parent_root + else: + return head_root + + +- name: get_proposer_reorg_cutoff_ms#phase0 + sources: [] + spec: | + + def get_proposer_reorg_cutoff_ms(epoch: Epoch) -> uint64: + return get_slot_component_duration_ms(PROPOSER_REORG_CUTOFF_BPS) + + +- name: get_proposer_reward#phase0 + sources: [] + spec: | + + def get_proposer_reward(state: BeaconState, attesting_index: ValidatorIndex) -> Gwei: + return Gwei(get_base_reward(state, attesting_index) // PROPOSER_REWARD_QUOTIENT) + + +- name: get_proposer_score#phase0 + sources: [] + spec: | + + def get_proposer_score(store: Store) -> Gwei: + justified_checkpoint_state = store.checkpoint_states[store.justified_checkpoint] + committee_weight = get_total_active_balance(justified_checkpoint_state) // SLOTS_PER_EPOCH + return (committee_weight * PROPOSER_SCORE_BOOST) // 100 + + +- name: get_ptc#gloas + sources: [] + spec: | + + def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]: + """ + Get the payload timeliness committee for the given ``slot``. + """ + epoch = compute_epoch_at_slot(slot) + seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot)) + indices: List[ValidatorIndex] = [] + # Concatenate all committees for this slot in order + committees_per_slot = get_committee_count_per_slot(state, epoch) + for i in range(committees_per_slot): + committee = get_beacon_committee(state, slot, CommitteeIndex(i)) + indices.extend(committee) + return compute_balance_weighted_selection( + state, indices, seed, size=PTC_SIZE, shuffle_indices=False + ) + + +- name: get_ptc_assignment#gloas + sources: [] + spec: | + + def get_ptc_assignment( + state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex + ) -> Optional[Slot]: + """ + Returns the slot during the requested epoch in which the validator with + index `validator_index` is a member of the PTC. Returns None if no + assignment is found. + """ + next_epoch = Epoch(get_current_epoch(state) + 1) + assert epoch <= next_epoch + + start_slot = compute_start_slot_at_epoch(epoch) + for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): + if validator_index in get_ptc(state, Slot(slot)): + return Slot(slot) + return None + + +- name: get_randao_mix#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_randao_mix( + spec: | + + def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32: + """ + Return the randao mix at a recent ``epoch``. + """ + return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] + + +- name: get_safety_threshold#altair + sources: [] + spec: | + + def get_safety_threshold(store: LightClientStore) -> uint64: + return ( + max( + store.previous_max_active_participants, + store.current_max_active_participants, + ) + // 2 + ) + + +- name: get_seed#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_seed( + spec: | + + def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32: + """ + Return the seed at ``epoch``. + """ + mix = get_randao_mix( + state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1) + ) # Avoid underflow + return hash(domain_type + uint_to_bytes(epoch) + mix) + + +- name: get_slot_component_duration_ms#phase0 + sources: [] + spec: | + + def get_slot_component_duration_ms(basis_points: uint64) -> uint64: + """ + Calculate the duration of a slot component in milliseconds. + """ + return basis_points * SLOT_DURATION_MS // BASIS_POINTS + + +- name: get_slot_signature#phase0 + sources: [] + spec: | + + def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature: + domain = get_domain(state, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot)) + signing_root = compute_signing_root(slot, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_slots_since_genesis#phase0 + sources: [] + spec: | + + def get_slots_since_genesis(store: Store) -> int: + return (store.time - store.genesis_time) // SECONDS_PER_SLOT + + +- name: get_source_deltas#phase0 + sources: [] + spec: | + + def get_source_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return attester micro-rewards/penalties for source-vote for each validator. + """ + matching_source_attestations = get_matching_source_attestations( + state, get_previous_epoch(state) + ) + return get_attestation_component_deltas(state, matching_source_attestations) + + +- name: get_subtree_index#altair + sources: [] + spec: | + + def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64: + return uint64(generalized_index % 2 ** (floorlog2(generalized_index))) + + +- name: get_sync_committee_message#altair + sources: [] + spec: | + + def get_sync_committee_message( + state: BeaconState, block_root: Root, validator_index: ValidatorIndex, privkey: int + ) -> SyncCommitteeMessage: + epoch = get_current_epoch(state) + domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch) + signing_root = compute_signing_root(block_root, domain) + signature = bls.Sign(privkey, signing_root) + + return SyncCommitteeMessage( + slot=state.slot, + beacon_block_root=block_root, + validator_index=validator_index, + signature=signature, + ) + + +- name: get_sync_committee_selection_proof#altair + sources: [] + spec: | + + def get_sync_committee_selection_proof( + state: BeaconState, slot: Slot, subcommittee_index: uint64, privkey: int + ) -> BLSSignature: + domain = get_domain(state, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, compute_epoch_at_slot(slot)) + signing_data = SyncAggregatorSelectionData( + slot=slot, + subcommittee_index=subcommittee_index, + ) + signing_root = compute_signing_root(signing_data, domain) + return bls.Sign(privkey, signing_root) + + +- name: get_sync_message_due_ms#altair + sources: [] + spec: | + + def get_sync_message_due_ms(epoch: Epoch) -> uint64: + return get_slot_component_duration_ms(SYNC_MESSAGE_DUE_BPS) + + +- name: get_sync_message_due_ms#gloas + sources: [] + spec: | + + def get_sync_message_due_ms(epoch: Epoch) -> uint64: + # [New in Gloas] + if epoch >= GLOAS_FORK_EPOCH: + return get_slot_component_duration_ms(SYNC_MESSAGE_DUE_BPS_GLOAS) + return get_slot_component_duration_ms(SYNC_MESSAGE_DUE_BPS) + + +- name: get_sync_subcommittee_pubkeys#altair + sources: [] + spec: | + + def get_sync_subcommittee_pubkeys( + state: BeaconState, subcommittee_index: uint64 + ) -> Sequence[BLSPubkey]: + # Committees assigned to `slot` sign for `slot - 1` + # This creates the exceptional logic below when transitioning between sync committee periods + next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) + if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period( + next_slot_epoch + ): + sync_committee = state.current_sync_committee + else: + sync_committee = state.next_sync_committee + + # Return pubkeys for the subcommittee index + sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT + i = subcommittee_index * sync_subcommittee_size + return sync_committee.pubkeys[i : i + sync_subcommittee_size] + + +- name: get_target_deltas#phase0 + sources: [] + spec: | + + def get_target_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: + """ + Return attester micro-rewards/penalties for target-vote for each validator. + """ + matching_target_attestations = get_matching_target_attestations( + state, get_previous_epoch(state) + ) + return get_attestation_component_deltas(state, matching_target_attestations) + + +- name: get_terminal_pow_block#bellatrix + sources: [] + spec: | + + def get_terminal_pow_block(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]: + if TERMINAL_BLOCK_HASH != Hash32(): + # Terminal block hash override takes precedence over terminal total difficulty + if TERMINAL_BLOCK_HASH in pow_chain: + return pow_chain[TERMINAL_BLOCK_HASH] + else: + return None + + return get_pow_block_at_terminal_total_difficulty(pow_chain) + + +- name: get_total_active_balance#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_total_active_balance( + spec: | + + def get_total_active_balance(state: BeaconState) -> Gwei: + """ + Return the combined effective balance of the active validators. + Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. + """ + return get_total_balance( + state, set(get_active_validator_indices(state, get_current_epoch(state))) + ) + + +- name: get_total_balance#phase0 + sources: + - file: beacon_node/beacon_chain/src/validator_monitor.rs + search: fn get_total_balance( + spec: | + + def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: + """ + Return the combined effective balance of the ``indices``. + ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. + Math safe up to ~10B ETH, after which this overflows uint64. + """ + return Gwei( + max( + EFFECTIVE_BALANCE_INCREMENT, + sum([state.validators[index].effective_balance for index in indices]), + ) + ) + + +- name: get_unslashed_attesting_indices#phase0 + sources: [] + spec: | + + def get_unslashed_attesting_indices( + state: BeaconState, attestations: Sequence[PendingAttestation] + ) -> Set[ValidatorIndex]: + output: Set[ValidatorIndex] = set() + for a in attestations: + output = output.union(get_attesting_indices(state, a)) + return set(filter(lambda index: not state.validators[index].slashed, output)) + + +- name: get_unslashed_participating_indices#altair + sources: [] + spec: | + + def get_unslashed_participating_indices( + state: BeaconState, flag_index: int, epoch: Epoch + ) -> Set[ValidatorIndex]: + """ + Return the set of validator indices that are both active and unslashed for the given ``flag_index`` and ``epoch``. + """ + assert epoch in (get_previous_epoch(state), get_current_epoch(state)) + if epoch == get_current_epoch(state): + epoch_participation = state.current_epoch_participation + else: + epoch_participation = state.previous_epoch_participation + active_validator_indices = get_active_validator_indices(state, epoch) + participating_indices = [ + i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index) + ] + return set(filter(lambda index: not state.validators[index].slashed, participating_indices)) + + +- name: get_validator_activation_churn_limit#deneb + sources: [] + spec: | + + def get_validator_activation_churn_limit(state: BeaconState) -> uint64: + """ + Return the validator activation churn limit for the current epoch. + """ + return min(MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT, get_validator_churn_limit(state)) + + +- name: get_validator_churn_limit#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn get_validator_churn_limit( + spec: | + + def get_validator_churn_limit(state: BeaconState) -> uint64: + """ + Return the validator churn limit for the current epoch. + """ + active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) + return max( + MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT + ) + + +- name: get_validator_from_deposit#phase0 + sources: [] + spec: | + + def get_validator_from_deposit( + pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 + ) -> Validator: + effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + + return Validator( + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + effective_balance=effective_balance, + slashed=False, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + ) + + +- name: get_validator_from_deposit#electra + sources: [] + spec: | + + def get_validator_from_deposit( + pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 + ) -> Validator: + validator = Validator( + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + effective_balance=Gwei(0), + slashed=False, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + ) + + # [Modified in Electra:EIP7251] + max_effective_balance = get_max_effective_balance(validator) + validator.effective_balance = min( + amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance + ) + + return validator + + +- name: get_validators_custody_requirement#fulu + sources: + - file: beacon_node/beacon_chain/src/custody_context.rs + search: fn get_validators_custody_requirement( + spec: | + + def get_validators_custody_requirement( + state: BeaconState, validator_indices: Sequence[ValidatorIndex] + ) -> uint64: + total_node_balance = sum( + state.validators[index].effective_balance for index in validator_indices + ) + count = total_node_balance // BALANCE_PER_ADDITIONAL_CUSTODY_GROUP + return min(max(count, VALIDATOR_CUSTODY_REQUIREMENT), NUMBER_OF_CUSTODY_GROUPS) + + +- name: get_voting_source#phase0 + sources: [] + spec: | + + def get_voting_source(store: Store, block_root: Root) -> Checkpoint: + """ + Compute the voting source checkpoint in event that block with root ``block_root`` is the head block + """ + block = store.blocks[block_root] + current_epoch = get_current_store_epoch(store) + block_epoch = compute_epoch_at_slot(block.slot) + if current_epoch > block_epoch: + # The block is from a prior epoch, the voting source will be pulled-up + return store.unrealized_justifications[block_root] + else: + # The block is not from a prior epoch, therefore the voting source is not pulled up + head_state = store.block_states[block_root] + return head_state.current_justified_checkpoint + + +- name: get_weight#phase0 + sources: [] + spec: | + + def get_weight(store: Store, root: Root) -> Gwei: + state = store.checkpoint_states[store.justified_checkpoint] + unslashed_and_active_indices = [ + i + for i in get_active_validator_indices(state, get_current_epoch(state)) + if not state.validators[i].slashed + ] + attestation_score = Gwei( + sum( + state.validators[i].effective_balance + for i in unslashed_and_active_indices + if ( + i in store.latest_messages + and i not in store.equivocating_indices + and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) + == root + ) + ) + ) + if store.proposer_boost_root == Root(): + # Return only attestation score if ``proposer_boost_root`` is not set + return attestation_score + + # Calculate proposer score if ``proposer_boost_root`` is set + proposer_score = Gwei(0) + # Boost is applied if ``root`` is an ancestor of ``proposer_boost_root`` + if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root: + proposer_score = get_proposer_score(store) + return attestation_score + proposer_score + + +- name: get_weight#gloas + sources: [] + spec: | + + def get_weight(store: Store, node: ForkChoiceNode) -> Gwei: + if node.payload_status == PAYLOAD_STATUS_PENDING or store.blocks[ + node.root + ].slot + 1 != get_current_slot(store): + state = store.checkpoint_states[store.justified_checkpoint] + unslashed_and_active_indices = [ + i + for i in get_active_validator_indices(state, get_current_epoch(state)) + if not state.validators[i].slashed + ] + attestation_score = Gwei( + sum( + state.validators[i].effective_balance + for i in unslashed_and_active_indices + if ( + i in store.latest_messages + and i not in store.equivocating_indices + and is_supporting_vote(store, node, store.latest_messages[i]) + ) + ) + ) + + if store.proposer_boost_root == Root(): + # Return only attestation score if `proposer_boost_root` is not set + return attestation_score + + # Calculate proposer score if `proposer_boost_root` is set + proposer_score = Gwei(0) + + # `proposer_boost_root` is treated as a vote for the + # proposer's block in the current slot. Proposer boost + # is applied accordingly to all ancestors + message = LatestMessage( + slot=get_current_slot(store), + root=store.proposer_boost_root, + payload_present=False, + ) + if is_supporting_vote(store, node, message): + proposer_score = get_proposer_score(store) + + return attestation_score + proposer_score + else: + return Gwei(0) + + +- name: has_builder_withdrawal_credential#gloas + sources: [] + spec: | + + def has_builder_withdrawal_credential(validator: Validator) -> bool: + """ + Check if ``validator`` has an 0x03 prefixed "builder" withdrawal credential. + """ + return is_builder_withdrawal_credential(validator.withdrawal_credentials) + + +- name: has_compounding_withdrawal_credential#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn has_compounding_withdrawal_credential( + spec: | + + def has_compounding_withdrawal_credential(validator: Validator) -> bool: + """ + Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential. + """ + return is_compounding_withdrawal_credential(validator.withdrawal_credentials) + + +- name: has_compounding_withdrawal_credential#gloas + sources: [] + spec: | + + def has_compounding_withdrawal_credential(validator: Validator) -> bool: + """ + Check if ``validator`` has an 0x02 or 0x03 prefixed withdrawal credential. + """ + if is_compounding_withdrawal_credential(validator.withdrawal_credentials): + return True + if is_builder_withdrawal_credential(validator.withdrawal_credentials): + return True + return False + + +- name: has_eth1_withdrawal_credential#capella + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn has_eth1_withdrawal_credential( + spec: | + + def has_eth1_withdrawal_credential(validator: Validator) -> bool: + """ + Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. + """ + return validator.withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX + + +- name: has_execution_withdrawal_credential#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn has_execution_withdrawal_credential( + spec: | + + def has_execution_withdrawal_credential(validator: Validator) -> bool: + """ + Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential. + """ + return ( + has_eth1_withdrawal_credential(validator) # 0x01 + or has_compounding_withdrawal_credential(validator) # 0x02 + ) + + +- name: has_flag#altair + sources: + - file: consensus/types/src/attestation/participation_flags.rs + search: pub fn has_flag( + spec: | + + def has_flag(flags: ParticipationFlags, flag_index: int) -> bool: + """ + Return whether ``flags`` has ``flag_index`` set. + """ + flag = ParticipationFlags(2**flag_index) + return flags & flag == flag + + +- name: hash_to_bls_field#deneb + sources: [] + spec: | + + def hash_to_bls_field(data: bytes) -> BLSFieldElement: + """ + Hash ``data`` and convert the output to a BLS scalar field element. + The output is not uniform over the BLS field. + """ + hashed_data = hash(data) + return BLSFieldElement(int.from_bytes(hashed_data, KZG_ENDIANNESS) % BLS_MODULUS) + + +- name: increase_balance#phase0 + sources: + - file: consensus/state_processing/src/common/mod.rs + search: pub fn increase_balance< + spec: | + + def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Increase the validator balance at index ``index`` by ``delta``. + """ + state.balances[index] += delta + + +- name: initialize_beacon_state_from_eth1#phase0 + sources: [] + spec: | + + def initialize_beacon_state_from_eth1( + eth1_block_hash: Hash32, eth1_timestamp: uint64, deposits: Sequence[Deposit] + ) -> BeaconState: + fork = Fork( + previous_version=GENESIS_FORK_VERSION, + current_version=GENESIS_FORK_VERSION, + epoch=GENESIS_EPOCH, + ) + state = BeaconState( + genesis_time=eth1_timestamp + GENESIS_DELAY, + fork=fork, + eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), + latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + randao_mixes=[eth1_block_hash] + * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + ) + + # Process deposits + leaves = list(map(lambda deposit: deposit.data, deposits)) + for index, deposit in enumerate(deposits): + deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[: index + 1]) + state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) + process_deposit(state, deposit) + + # Process activations + for index, validator in enumerate(state.validators): + balance = state.balances[index] + validator.effective_balance = min( + balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE + ) + if validator.effective_balance == MAX_EFFECTIVE_BALANCE: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + + # Set genesis validators root for domain separation and chain versioning + state.genesis_validators_root = hash_tree_root(state.validators) + + return state + + +- name: initialize_light_client_store#altair + sources: [] + spec: | + + def initialize_light_client_store( + trusted_block_root: Root, bootstrap: LightClientBootstrap + ) -> LightClientStore: + assert is_valid_light_client_header(bootstrap.header) + assert hash_tree_root(bootstrap.header.beacon) == trusted_block_root + + assert is_valid_normalized_merkle_branch( + leaf=hash_tree_root(bootstrap.current_sync_committee), + branch=bootstrap.current_sync_committee_branch, + gindex=current_sync_committee_gindex_at_slot(bootstrap.header.beacon.slot), + root=bootstrap.header.beacon.state_root, + ) + + return LightClientStore( + finalized_header=bootstrap.header, + current_sync_committee=bootstrap.current_sync_committee, + next_sync_committee=SyncCommittee(), + best_valid_update=None, + optimistic_header=bootstrap.header, + previous_max_active_participants=0, + current_max_active_participants=0, + ) + + +- name: initialize_proposer_lookahead#fulu + sources: + - file: consensus/state_processing/src/upgrade/fulu.rs + search: fn initialize_proposer_lookahead< + spec: | + + def initialize_proposer_lookahead( + state: electra.BeaconState, + ) -> Vector[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH]: + """ + Return the proposer indices for the full available lookahead starting from current epoch. + Used to initialize the ``proposer_lookahead`` field in the beacon state at genesis and after forks. + """ + current_epoch = get_current_epoch(state) + lookahead = [] + for i in range(MIN_SEED_LOOKAHEAD + 1): + lookahead.extend(get_beacon_proposer_indices(state, Epoch(current_epoch + i))) + return lookahead + + +- name: initiate_validator_exit#phase0 + sources: + - file: consensus/state_processing/src/common/initiate_validator_exit.rs + search: pub fn initiate_validator_exit< + spec: | + + def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: + """ + Initiate the exit of the validator with index ``index``. + """ + # Return if validator already initiated exit + validator = state.validators[index] + if validator.exit_epoch != FAR_FUTURE_EPOCH: + return + + # Compute exit queue epoch + exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH] + exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))]) + exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch]) + if exit_queue_churn >= get_validator_churn_limit(state): + exit_queue_epoch += Epoch(1) + + # Set validator exit epoch and withdrawable epoch + validator.exit_epoch = exit_queue_epoch + validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) + + +- name: initiate_validator_exit#electra + sources: + - file: consensus/state_processing/src/common/initiate_validator_exit.rs + search: pub fn initiate_validator_exit< + spec: | + + def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: + """ + Initiate the exit of the validator with index ``index``. + """ + # Return if validator already initiated exit + validator = state.validators[index] + if validator.exit_epoch != FAR_FUTURE_EPOCH: + return + + # Compute exit queue epoch [Modified in Electra:EIP7251] + exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance) + + # Set validator exit epoch and withdrawable epoch + validator.exit_epoch = exit_queue_epoch + validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) + + +- name: integer_squareroot#phase0 + sources: [] + spec: | + + def integer_squareroot(n: uint64) -> uint64: + """ + Return the largest integer ``x`` such that ``x**2 <= n``. + """ + if n == UINT64_MAX: + return UINT64_MAX_SQRT + x = n + y = (x + 1) // 2 + while y < x: + x = y + y = (x + n // x) // 2 + return x + + +- name: interpolate_polynomialcoeff#fulu + sources: [] + spec: | + + def interpolate_polynomialcoeff( + xs: Sequence[BLSFieldElement], ys: Sequence[BLSFieldElement] + ) -> PolynomialCoeff: + """ + Lagrange interpolation: Finds the lowest degree polynomial that takes the value ``ys[i]`` at ``x[i]`` for all i. + Outputs a coefficient form polynomial. Leading coefficients may be zero. + """ + assert len(xs) == len(ys) + + r = PolynomialCoeff([BLSFieldElement(0)]) + for i in range(len(xs)): + summand = PolynomialCoeff([ys[i]]) + for j in range(len(ys)): + if j != i: + weight_adjustment = (xs[i] - xs[j]).inverse() + summand = multiply_polynomialcoeff( + summand, PolynomialCoeff([-weight_adjustment * xs[j], weight_adjustment]) + ) + r = add_polynomialcoeff(r, summand) + return r + + +- name: is_active_validator#phase0 + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_active_at( + spec: | + + def is_active_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is active. + """ + return validator.activation_epoch <= epoch < validator.exit_epoch + + +- name: is_aggregator#phase0 + sources: + - file: consensus/types/src/attestation/selection_proof.rs + search: pub fn is_aggregator( + spec: | + + def is_aggregator( + state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature + ) -> bool: + committee = get_beacon_committee(state, slot, index) + modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE) + return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0 + + +- name: is_assigned_to_sync_committee#altair + sources: [] + spec: | + + def is_assigned_to_sync_committee( + state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex + ) -> bool: + sync_committee_period = compute_sync_committee_period(epoch) + current_epoch = get_current_epoch(state) + current_sync_committee_period = compute_sync_committee_period(current_epoch) + next_sync_committee_period = current_sync_committee_period + 1 + assert sync_committee_period in (current_sync_committee_period, next_sync_committee_period) + + pubkey = state.validators[validator_index].pubkey + if sync_committee_period == current_sync_committee_period: + return pubkey in state.current_sync_committee.pubkeys + else: # sync_committee_period == next_sync_committee_period + return pubkey in state.next_sync_committee.pubkeys + + +- name: is_attestation_same_slot#gloas + sources: [] + spec: | + + def is_attestation_same_slot(state: BeaconState, data: AttestationData) -> bool: + """ + Check if the attestation is for the block proposed at the attestation slot. + """ + if data.slot == 0: + return True + + blockroot = data.beacon_block_root + slot_blockroot = get_block_root_at_slot(state, data.slot) + prev_blockroot = get_block_root_at_slot(state, Slot(data.slot - 1)) + + return blockroot == slot_blockroot and blockroot != prev_blockroot + + +- name: is_better_update#altair + sources: [] + spec: | + + def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdate) -> bool: + # Compare supermajority (> 2/3) sync committee participation + max_active_participants = len(new_update.sync_aggregate.sync_committee_bits) + new_num_active_participants = sum(new_update.sync_aggregate.sync_committee_bits) + old_num_active_participants = sum(old_update.sync_aggregate.sync_committee_bits) + new_has_supermajority = new_num_active_participants * 3 >= max_active_participants * 2 + old_has_supermajority = old_num_active_participants * 3 >= max_active_participants * 2 + if new_has_supermajority != old_has_supermajority: + return new_has_supermajority + if not new_has_supermajority and new_num_active_participants != old_num_active_participants: + return new_num_active_participants > old_num_active_participants + + # Compare presence of relevant sync committee + new_has_relevant_sync_committee = is_sync_committee_update(new_update) and ( + compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot) + == compute_sync_committee_period_at_slot(new_update.signature_slot) + ) + old_has_relevant_sync_committee = is_sync_committee_update(old_update) and ( + compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot) + == compute_sync_committee_period_at_slot(old_update.signature_slot) + ) + if new_has_relevant_sync_committee != old_has_relevant_sync_committee: + return new_has_relevant_sync_committee + + # Compare indication of any finality + new_has_finality = is_finality_update(new_update) + old_has_finality = is_finality_update(old_update) + if new_has_finality != old_has_finality: + return new_has_finality + + # Compare sync committee finality + if new_has_finality: + new_has_sync_committee_finality = compute_sync_committee_period_at_slot( + new_update.finalized_header.beacon.slot + ) == compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot) + old_has_sync_committee_finality = compute_sync_committee_period_at_slot( + old_update.finalized_header.beacon.slot + ) == compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot) + if new_has_sync_committee_finality != old_has_sync_committee_finality: + return new_has_sync_committee_finality + + # Tiebreaker 1: Sync committee participation beyond supermajority + if new_num_active_participants != old_num_active_participants: + return new_num_active_participants > old_num_active_participants + + # Tiebreaker 2: Prefer older data (fewer changes to best) + if new_update.attested_header.beacon.slot != old_update.attested_header.beacon.slot: + return new_update.attested_header.beacon.slot < old_update.attested_header.beacon.slot + + # Tiebreaker 3: Prefer updates with earlier signature slots + return new_update.signature_slot < old_update.signature_slot + + +- name: is_builder_payment_withdrawable#gloas + sources: [] + spec: | + + def is_builder_payment_withdrawable( + state: BeaconState, withdrawal: BuilderPendingWithdrawal + ) -> bool: + """ + Check if the builder is slashed and not yet withdrawable. + """ + builder = state.validators[withdrawal.builder_index] + current_epoch = compute_epoch_at_slot(state.slot) + return builder.withdrawable_epoch >= current_epoch or not builder.slashed + + +- name: is_builder_withdrawal_credential#gloas + sources: [] + spec: | + + def is_builder_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool: + return withdrawal_credentials[:1] == BUILDER_WITHDRAWAL_PREFIX + + +- name: is_candidate_block#phase0 + sources: [] + spec: | + + def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool: + return ( + block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= period_start + and block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 >= period_start + ) + + +- name: is_compounding_withdrawal_credential#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_compounding_withdrawal_credential( + spec: | + + def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool: + return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX + + +- name: is_data_available#deneb + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: async fn check_gossip_blob_availability_and_import( + - file: beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs + search: pub fn put_kzg_verified_blobs< + spec: | + + def is_data_available( + beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment] + ) -> bool: + # `retrieve_blobs_and_proofs` is implementation and context dependent + # It returns all the blobs for the given block root, and raises an exception if not available + # Note: the p2p network does not guarantee sidecar retrieval outside of + # `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` + blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) + + return verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, proofs) + + +- name: is_data_available#fulu + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: async fn check_gossip_data_columns_availability_and_import( + - file: beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs + search: pub fn put_kzg_verified_data_columns< + spec: | + + def is_data_available(beacon_block_root: Root) -> bool: + # `retrieve_column_sidecars` is implementation and context dependent, replacing + # `retrieve_blobs_and_proofs`. For the given block root, it returns all column + # sidecars to sample, or raises an exception if they are not available. + # The p2p network does not guarantee sidecar retrieval outside of + # `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epochs. + column_sidecars = retrieve_column_sidecars(beacon_block_root) + return all( + verify_data_column_sidecar(column_sidecar) + and verify_data_column_sidecar_kzg_proofs(column_sidecar) + for column_sidecar in column_sidecars + ) + + +- name: is_eligible_for_activation#phase0 + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_eligible_for_activation< + spec: | + + def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: + """ + Check if ``validator`` is eligible for activation. + """ + return ( + # Placement in queue is finalized + validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch + # Has not yet been activated + and validator.activation_epoch == FAR_FUTURE_EPOCH + ) + + +- name: is_eligible_for_activation_queue#phase0 + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_eligible_for_activation_queue( + spec: | + + def is_eligible_for_activation_queue(validator: Validator) -> bool: + """ + Check if ``validator`` is eligible to be placed into the activation queue. + """ + return ( + validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH + and validator.effective_balance == MAX_EFFECTIVE_BALANCE + ) + + +- name: is_eligible_for_activation_queue#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_eligible_for_activation_queue( + spec: | + + def is_eligible_for_activation_queue(validator: Validator) -> bool: + """ + Check if ``validator`` is eligible to be placed into the activation queue. + """ + return ( + validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH + # [Modified in Electra:EIP7251] + and validator.effective_balance >= MIN_ACTIVATION_BALANCE + ) + + +- name: is_execution_block#bellatrix + sources: [] + spec: | + + def is_execution_block(block: BeaconBlock) -> bool: + return block.body.execution_payload != ExecutionPayload() + + +- name: is_execution_enabled#bellatrix + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn is_execution_enabled< + spec: | + + def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool: + return is_merge_transition_block(state, body) or is_merge_transition_complete(state) + + +- name: is_ffg_competitive#phase0 + sources: [] + spec: | + + def is_ffg_competitive(store: Store, head_root: Root, parent_root: Root) -> bool: + return ( + store.unrealized_justifications[head_root] == store.unrealized_justifications[parent_root] + ) + + +- name: is_finality_update#altair + sources: [] + spec: | + + def is_finality_update(update: LightClientUpdate) -> bool: + return update.finality_branch != FinalityBranch() + + +- name: is_finalization_ok#phase0 + sources: [] + spec: | + + def is_finalization_ok(store: Store, slot: Slot) -> bool: + epochs_since_finalization = compute_epoch_at_slot(slot) - store.finalized_checkpoint.epoch + return epochs_since_finalization <= REORG_MAX_EPOCHS_SINCE_FINALIZATION + + +- name: is_fully_withdrawable_validator#capella + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_fully_withdrawable_validator( + spec: | + + def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool: + """ + Check if ``validator`` is fully withdrawable. + """ + return ( + has_eth1_withdrawal_credential(validator) + and validator.withdrawable_epoch <= epoch + and balance > 0 + ) + + +- name: is_fully_withdrawable_validator#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_fully_withdrawable_validator( + spec: | + + def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool: + """ + Check if ``validator`` is fully withdrawable. + """ + return ( + # [Modified in Electra:EIP7251] + has_execution_withdrawal_credential(validator) + and validator.withdrawable_epoch <= epoch + and balance > 0 + ) + + +- name: is_head_late#phase0 + sources: [] + spec: | + + def is_head_late(store: Store, head_root: Root) -> bool: + return not store.block_timeliness[head_root] + + +- name: is_head_weak#phase0 + sources: [] + spec: | + + def is_head_weak(store: Store, head_root: Root) -> bool: + justified_state = store.checkpoint_states[store.justified_checkpoint] + reorg_threshold = calculate_committee_fraction(justified_state, REORG_HEAD_WEIGHT_THRESHOLD) + head_weight = get_weight(store, head_root) + return head_weight < reorg_threshold + + +- name: is_in_inactivity_leak#phase0 + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn is_in_inactivity_leak( + spec: | + + def is_in_inactivity_leak(state: BeaconState) -> bool: + return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY + + +- name: is_merge_transition_block#bellatrix + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn is_merge_transition_block< + spec: | + + def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool: + return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload() + + +- name: is_merge_transition_complete#bellatrix + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn is_merge_transition_complete< + spec: | + + def is_merge_transition_complete(state: BeaconState) -> bool: + return state.latest_execution_payload_header != ExecutionPayloadHeader() + + +- name: is_next_sync_committee_known#altair + sources: [] + spec: | + + def is_next_sync_committee_known(store: LightClientStore) -> bool: + return store.next_sync_committee != SyncCommittee() + + +- name: is_optimistic#bellatrix + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn is_optimistic( + spec: | + + def is_optimistic(opt_store: OptimisticStore, block: BeaconBlock) -> bool: + return hash_tree_root(block) in opt_store.optimistic_roots + + +- name: is_optimistic_candidate_block#bellatrix + sources: [] + spec: | + + def is_optimistic_candidate_block( + opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock + ) -> bool: + if is_execution_block(opt_store.blocks[block.parent_root]): + return True + + if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot: + return True + + return False + + +- name: is_parent_block_full#gloas + sources: [] + spec: | + + def is_parent_block_full(state: BeaconState) -> bool: + return state.latest_execution_payload_bid.block_hash == state.latest_block_hash + + +- name: is_parent_node_full#gloas + sources: [] + spec: | + + def is_parent_node_full(store: Store, block: BeaconBlock) -> bool: + return get_parent_payload_status(store, block) == PAYLOAD_STATUS_FULL + + +- name: is_parent_strong#phase0 + sources: [] + spec: | + + def is_parent_strong(store: Store, parent_root: Root) -> bool: + justified_state = store.checkpoint_states[store.justified_checkpoint] + parent_threshold = calculate_committee_fraction(justified_state, REORG_PARENT_WEIGHT_THRESHOLD) + parent_weight = get_weight(store, parent_root) + return parent_weight > parent_threshold + + +- name: is_partially_withdrawable_validator#capella + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_partially_withdrawable_validator( + spec: | + + def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool: + """ + Check if ``validator`` is partially withdrawable. + """ + has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE + has_excess_balance = balance > MAX_EFFECTIVE_BALANCE + return ( + has_eth1_withdrawal_credential(validator) + and has_max_effective_balance + and has_excess_balance + ) + + +- name: is_partially_withdrawable_validator#electra + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_partially_withdrawable_validator( + spec: | + + def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool: + """ + Check if ``validator`` is partially withdrawable. + """ + max_effective_balance = get_max_effective_balance(validator) + # [Modified in Electra:EIP7251] + has_max_effective_balance = validator.effective_balance == max_effective_balance + # [Modified in Electra:EIP7251] + has_excess_balance = balance > max_effective_balance + return ( + # [Modified in Electra:EIP7251] + has_execution_withdrawal_credential(validator) + and has_max_effective_balance + and has_excess_balance + ) + + +- name: is_payload_timely#gloas + sources: [] + spec: | + + def is_payload_timely(store: Store, root: Root) -> bool: + """ + Return whether the execution payload for the beacon block with root ``root`` + was voted as present by the PTC, and was locally determined to be available. + """ + # The beacon block root must be known + assert root in store.ptc_vote + + # If the payload is not locally available, the payload + # is not considered available regardless of the PTC vote + if root not in store.execution_payload_states: + return False + + return sum(store.ptc_vote[root]) > PAYLOAD_TIMELY_THRESHOLD + + +- name: is_power_of_two#deneb + sources: [] + spec: | + + def is_power_of_two(value: int) -> bool: + """ + Check if ``value`` is a power of two integer. + """ + return (value > 0) and (value & (value - 1) == 0) + + +- name: is_proposer#phase0 + sources: [] + spec: | + + def is_proposer(state: BeaconState, validator_index: ValidatorIndex) -> bool: + return get_beacon_proposer_index(state) == validator_index + + +- name: is_proposing_on_time#phase0 + sources: [] + spec: | + + def is_proposing_on_time(store: Store) -> bool: + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + proposer_reorg_cutoff_ms = get_proposer_reorg_cutoff_ms(epoch) + return time_into_slot_ms <= proposer_reorg_cutoff_ms + + +- name: is_shuffling_stable#phase0 + sources: [] + spec: | + + def is_shuffling_stable(slot: Slot) -> bool: + return slot % SLOTS_PER_EPOCH != 0 + + +- name: is_slashable_attestation_data#phase0 + sources: [] + spec: | + + def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool: + """ + Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules. + """ + return ( + # Double vote + (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) + or + # Surround vote + (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch) + ) + + +- name: is_slashable_validator#phase0 + sources: + - file: consensus/types/src/validator/validator.rs + search: pub fn is_slashable_at( + spec: | + + def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is slashable. + """ + return (not validator.slashed) and ( + validator.activation_epoch <= epoch < validator.withdrawable_epoch + ) + + +- name: is_supporting_vote#gloas + sources: [] + spec: | + + def is_supporting_vote(store: Store, node: ForkChoiceNode, message: LatestMessage) -> bool: + """ + Returns whether a vote for ``message.root`` supports the chain containing the beacon block ``node.root`` with the + payload contents indicated by ``node.payload_status`` as head during slot ``node.slot``. + """ + block = store.blocks[node.root] + if node.root == message.root: + if node.payload_status == PAYLOAD_STATUS_PENDING: + return True + if message.slot <= block.slot: + return False + if message.payload_present: + return node.payload_status == PAYLOAD_STATUS_FULL + else: + return node.payload_status == PAYLOAD_STATUS_EMPTY + + else: + ancestor = get_ancestor(store, message.root, block.slot) + return node.root == ancestor.root and ( + node.payload_status == PAYLOAD_STATUS_PENDING + or node.payload_status == ancestor.payload_status + ) + + +- name: is_sync_committee_aggregator#altair + sources: [] + spec: | + + def is_sync_committee_aggregator(signature: BLSSignature) -> bool: + modulo = max( + 1, + SYNC_COMMITTEE_SIZE + // SYNC_COMMITTEE_SUBNET_COUNT + // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, + ) + return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0 + + +- name: is_sync_committee_update#altair + sources: [] + spec: | + + def is_sync_committee_update(update: LightClientUpdate) -> bool: + return update.next_sync_committee_branch != NextSyncCommitteeBranch() + + +- name: is_valid_deposit_signature#electra + sources: + - file: consensus/state_processing/src/per_block_processing/verify_deposit.rs + search: pub fn is_valid_deposit_signature( + spec: | + + def is_valid_deposit_signature( + pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature + ) -> bool: + deposit_message = DepositMessage( + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, + ) + # Fork-agnostic domain since deposits are valid across forks + domain = compute_domain(DOMAIN_DEPOSIT) + signing_root = compute_signing_root(deposit_message, domain) + return bls.Verify(pubkey, signing_root, signature) + + +- name: is_valid_genesis_state#phase0 + sources: [] + spec: | + + def is_valid_genesis_state(state: BeaconState) -> bool: + if state.genesis_time < MIN_GENESIS_TIME: + return False + if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: + return False + return True + + +- name: is_valid_indexed_attestation#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs + search: pub fn is_valid_indexed_attestation< + spec: | + + def is_valid_indexed_attestation( + state: BeaconState, indexed_attestation: IndexedAttestation + ) -> bool: + """ + Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature. + """ + # Verify indices are sorted and unique + indices = indexed_attestation.attesting_indices + if len(indices) == 0 or not indices == sorted(set(indices)): + return False + # Verify aggregate signature + pubkeys = [state.validators[i].pubkey for i in indices] + domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch) + signing_root = compute_signing_root(indexed_attestation.data, domain) + return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature) + + +- name: is_valid_indexed_payload_attestation#gloas + sources: [] + spec: | + + def is_valid_indexed_payload_attestation( + state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation + ) -> bool: + """ + Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has + a valid aggregate signature. + """ + # Verify indices are non-empty and sorted + indices = indexed_payload_attestation.attesting_indices + if len(indices) == 0 or not indices == sorted(indices): + return False + + # Verify aggregate signature + pubkeys = [state.validators[i].pubkey for i in indices] + domain = get_domain(state, DOMAIN_PTC_ATTESTER, None) + signing_root = compute_signing_root(indexed_payload_attestation.data, domain) + return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature) + + +- name: is_valid_light_client_header#altair + sources: [] + spec: | + + def is_valid_light_client_header(_header: LightClientHeader) -> bool: + return True + + +- name: is_valid_light_client_header#capella + sources: [] + spec: | + + def is_valid_light_client_header(header: LightClientHeader) -> bool: + epoch = compute_epoch_at_slot(header.beacon.slot) + + if epoch < CAPELLA_FORK_EPOCH: + return ( + header.execution == ExecutionPayloadHeader() + and header.execution_branch == ExecutionBranch() + ) + + return is_valid_merkle_branch( + leaf=get_lc_execution_root(header), + branch=header.execution_branch, + depth=floorlog2(EXECUTION_PAYLOAD_GINDEX), + index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX), + root=header.beacon.body_root, + ) + + +- name: is_valid_light_client_header#deneb + sources: [] + spec: | + + def is_valid_light_client_header(header: LightClientHeader) -> bool: + epoch = compute_epoch_at_slot(header.beacon.slot) + + # [New in Deneb:EIP4844] + if epoch < DENEB_FORK_EPOCH: + if header.execution.blob_gas_used != uint64(0): + return False + if header.execution.excess_blob_gas != uint64(0): + return False + + if epoch < CAPELLA_FORK_EPOCH: + return ( + header.execution == ExecutionPayloadHeader() + and header.execution_branch == ExecutionBranch() + ) + + return is_valid_merkle_branch( + leaf=get_lc_execution_root(header), + branch=header.execution_branch, + depth=floorlog2(EXECUTION_PAYLOAD_GINDEX), + index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX), + root=header.beacon.body_root, + ) + + +- name: is_valid_merkle_branch#phase0 + sources: [] + spec: | + + def is_valid_merkle_branch( + leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root + ) -> bool: + """ + Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``. + """ + value = leaf + for i in range(depth): + if index // (2**i) % 2: + value = hash(branch[i] + value) + else: + value = hash(value + branch[i]) + return value == root + + +- name: is_valid_normalized_merkle_branch#altair + sources: [] + spec: | + + def is_valid_normalized_merkle_branch( + leaf: Bytes32, branch: Sequence[Bytes32], gindex: GeneralizedIndex, root: Root + ) -> bool: + depth = floorlog2(gindex) + index = get_subtree_index(gindex) + num_extra = len(branch) - depth + for i in range(num_extra): + if branch[i] != Bytes32(): + return False + return is_valid_merkle_branch(leaf, branch[num_extra:], depth, index, root) + + +- name: is_valid_switch_to_compounding_request#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: fn is_valid_switch_to_compounding_request< + spec: | + + def is_valid_switch_to_compounding_request( + state: BeaconState, consolidation_request: ConsolidationRequest + ) -> bool: + # Switch to compounding requires source and target be equal + if consolidation_request.source_pubkey != consolidation_request.target_pubkey: + return False + + # Verify pubkey exists + source_pubkey = consolidation_request.source_pubkey + validator_pubkeys = [v.pubkey for v in state.validators] + if source_pubkey not in validator_pubkeys: + return False + + source_validator = state.validators[ValidatorIndex(validator_pubkeys.index(source_pubkey))] + + # Verify request has been authorized + if source_validator.withdrawal_credentials[12:] != consolidation_request.source_address: + return False + + # Verify source withdrawal credentials + if not has_eth1_withdrawal_credential(source_validator): + return False + + # Verify the source is active + current_epoch = get_current_epoch(state) + if not is_active_validator(source_validator, current_epoch): + return False + + # Verify exit for source has not been initiated + if source_validator.exit_epoch != FAR_FUTURE_EPOCH: + return False + + return True + + +- name: is_valid_terminal_pow_block#bellatrix + sources: + - file: beacon_node/execution_layer/src/lib.rs + search: fn is_valid_terminal_pow_block( + spec: | + + def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool: + is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY + is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY + return is_total_difficulty_reached and is_parent_total_difficulty_valid + + +- name: is_within_weak_subjectivity_period#phase0 + sources: [] + spec: | + + def is_within_weak_subjectivity_period( + store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint + ) -> bool: + # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint + assert get_block_root(ws_state, ws_checkpoint.epoch) == ws_checkpoint.root + assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch + + ws_period = compute_weak_subjectivity_period(ws_state) + ws_state_epoch = compute_epoch_at_slot(ws_state.slot) + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + return current_epoch <= ws_state_epoch + ws_period + + +- name: is_within_weak_subjectivity_period#electra + sources: [] + spec: | + + def is_within_weak_subjectivity_period( + store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint + ) -> bool: + # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint + assert get_block_root(ws_state, ws_checkpoint.epoch) == ws_checkpoint.root + assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch + + # [Modified in Electra] + ws_period = compute_weak_subjectivity_period(ws_state) + ws_state_epoch = compute_epoch_at_slot(ws_state.slot) + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + return current_epoch <= ws_state_epoch + ws_period + + +- name: kzg_commitment_to_versioned_hash#deneb + sources: + - file: consensus/state_processing/src/per_block_processing/deneb.rs + search: pub fn kzg_commitment_to_versioned_hash( + spec: | + + def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> VersionedHash: + return VERSIONED_HASH_VERSION_KZG + hash(kzg_commitment)[1:] + + +- name: latest_verified_ancestor#bellatrix + sources: [] + spec: | + + def latest_verified_ancestor(opt_store: OptimisticStore, block: BeaconBlock) -> BeaconBlock: + # It is assumed that the `block` parameter is never an INVALIDATED block. + while True: + if not is_optimistic(opt_store, block) or block.parent_root == Root(): + return block + block = opt_store.blocks[block.parent_root] + + +- name: max_compressed_len#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn max_compressed_len( + spec: | + + def max_compressed_len(n: uint64) -> uint64: + # Worst-case compressed length for a given payload of size n when using snappy: + # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + return uint64(32 + n + n / 6) + + +- name: max_message_size#phase0 + sources: + - file: consensus/types/src/core/chain_spec.rs + search: pub fn max_message_size( + spec: | + + def max_message_size() -> uint64: + # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small. + return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024) + + +- name: multi_exp#deneb + sources: [] + spec: | + + def multi_exp(_points: Sequence[TPoint], _integers: Sequence[uint64]) -> Sequence[TPoint]: ... + + +- name: multiply_polynomialcoeff#fulu + sources: [] + spec: | + + def multiply_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff: + """ + Multiplies the coefficient form polynomials ``a`` and ``b``. + """ + assert len(a) + len(b) <= FIELD_ELEMENTS_PER_EXT_BLOB + + r = PolynomialCoeff([BLSFieldElement(0)]) + for power, coef in enumerate(a): + summand = PolynomialCoeff([BLSFieldElement(0)] * power + [coef * x for x in b]) + r = add_polynomialcoeff(r, summand) + return r + + +- name: next_sync_committee_gindex_at_slot#altair + sources: [] + spec: | + + def next_sync_committee_gindex_at_slot(_slot: Slot) -> GeneralizedIndex: + return NEXT_SYNC_COMMITTEE_GINDEX + + +- name: next_sync_committee_gindex_at_slot#electra + sources: [] + spec: | + + def next_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex: + epoch = compute_epoch_at_slot(slot) + + # [Modified in Electra] + if epoch >= ELECTRA_FORK_EPOCH: + return NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA + return NEXT_SYNC_COMMITTEE_GINDEX + + +- name: normalize_merkle_branch#electra + sources: [] + spec: | + + def normalize_merkle_branch( + branch: Sequence[Bytes32], gindex: GeneralizedIndex + ) -> Sequence[Bytes32]: + depth = floorlog2(gindex) + num_extra = depth - len(branch) + return [Bytes32()] * num_extra + [*branch] + + +- name: notify_ptc_messages#gloas + sources: [] + spec: | + + def notify_ptc_messages( + store: Store, state: BeaconState, payload_attestations: Sequence[PayloadAttestation] + ) -> None: + """ + Extracts a list of ``PayloadAttestationMessage`` from ``payload_attestations`` and updates the store with them + These Payload attestations are assumed to be in the beacon block hence signature verification is not needed + """ + if state.slot == 0: + return + for payload_attestation in payload_attestations: + indexed_payload_attestation = get_indexed_payload_attestation( + state, Slot(state.slot - 1), payload_attestation + ) + for idx in indexed_payload_attestation.attesting_indices: + on_payload_attestation_message( + store, + PayloadAttestationMessage( + validator_index=idx, + data=payload_attestation.data, + signature=BLSSignature(), + ), + is_from_block=True, + ) + + +- name: on_attestation#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_attestation( + spec: | + + def on_attestation(store: Store, attestation: Attestation, is_from_block: bool = False) -> None: + """ + Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire. + + An ``attestation`` that is asserted as invalid may be valid at a later time, + consider scheduling it for later processing in such case. + """ + validate_on_attestation(store, attestation, is_from_block) + + store_target_checkpoint_state(store, attestation.data.target) + + # Get state at the `target` to fully validate attestation + target_state = store.checkpoint_states[attestation.data.target] + indexed_attestation = get_indexed_attestation(target_state, attestation) + assert is_valid_indexed_attestation(target_state, indexed_attestation) + + # Update latest messages for attesting indices + update_latest_messages(store, indexed_attestation.attesting_indices, attestation) + + +- name: on_attester_slashing#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_attester_slashing( + spec: | + + def on_attester_slashing(store: Store, attester_slashing: AttesterSlashing) -> None: + """ + Run ``on_attester_slashing`` immediately upon receiving a new ``AttesterSlashing`` + from either within a block or directly on the wire. + """ + attestation_1 = attester_slashing.attestation_1 + attestation_2 = attester_slashing.attestation_2 + assert is_slashable_attestation_data(attestation_1.data, attestation_2.data) + state = store.block_states[store.justified_checkpoint.root] + assert is_valid_indexed_attestation(state, attestation_1) + assert is_valid_indexed_attestation(state, attestation_2) + + indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices) + for index in indices: + store.equivocating_indices.add(index) + + +- name: on_block#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_block< + spec: | + + def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + # Make a copy of the state to avoid mutability issues + pre_state = copy(store.block_states[block.parent_root]) + # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. + assert get_current_slot(store) >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block + + # Check the block is valid and compute the post-state + state = pre_state.copy() + block_root = hash_tree_root(block) + state_transition(state, signed_block, True) + # Add new block to the store + store.blocks[block_root] = block + # Add new state for this block to the store + store.block_states[block_root] = state + + # Add block timeliness to the store + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + attestation_threshold_ms = get_attestation_due_ms(epoch) + is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms + is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval + store.block_timeliness[hash_tree_root(block)] = is_timely + + # Add proposer score boost if the block is timely and not conflicting with an existing block + is_first_block = store.proposer_boost_root == Root() + if is_timely and is_first_block: + store.proposer_boost_root = hash_tree_root(block) + + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality + compute_pulled_up_tip(store, block_root) + + +- name: on_block#bellatrix + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_block< + spec: | + + def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + """ + Run ``on_block`` upon receiving a new block. + + A block that is asserted as invalid due to unavailable PoW block may be valid at a later time, + consider scheduling it for later processing in such case. + """ + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + # Make a copy of the state to avoid mutability issues + pre_state = copy(store.block_states[block.parent_root]) + # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. + assert get_current_slot(store) >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block + + # Check the block is valid and compute the post-state + state = pre_state.copy() + block_root = hash_tree_root(block) + state_transition(state, signed_block, True) + + # [New in Bellatrix] + if is_merge_transition_block(pre_state, block.body): + validate_merge_block(block) + + # Add new block to the store + store.blocks[block_root] = block + # Add new state for this block to the store + store.block_states[block_root] = state + + # Add block timeliness to the store + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + attestation_threshold_ms = get_attestation_due_ms(epoch) + is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms + is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval + store.block_timeliness[hash_tree_root(block)] = is_timely + + # Add proposer score boost if the block is timely and not conflicting with an existing block + is_first_block = store.proposer_boost_root == Root() + if is_timely and is_first_block: + store.proposer_boost_root = hash_tree_root(block) + + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) + + +- name: on_block#capella + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_block< + spec: | + + def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + """ + Run ``on_block`` upon receiving a new block. + """ + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. + assert get_current_slot(store) >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block + + # Check the block is valid and compute the post-state + # Make a copy of the state to avoid mutability issues + state = copy(store.block_states[block.parent_root]) + block_root = hash_tree_root(block) + state_transition(state, signed_block, True) + + # Add new block to the store + store.blocks[block_root] = block + # Add new state for this block to the store + store.block_states[block_root] = state + + # Add block timeliness to the store + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + attestation_threshold_ms = get_attestation_due_ms(epoch) + is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms + is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval + store.block_timeliness[hash_tree_root(block)] = is_timely + + # Add proposer score boost if the block is timely and not conflicting with an existing block + is_first_block = store.proposer_boost_root == Root() + if is_timely and is_first_block: + store.proposer_boost_root = hash_tree_root(block) + + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) + + +- name: on_block#deneb + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_block< + spec: | + + def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + """ + Run ``on_block`` upon receiving a new block. + """ + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. + assert get_current_slot(store) >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block + + # [New in Deneb:EIP4844] + # Check if blob data is available + # If not, this payload MAY be queued and subsequently considered when blob data becomes available + assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments) + + # Check the block is valid and compute the post-state + # Make a copy of the state to avoid mutability issues + state = copy(store.block_states[block.parent_root]) + block_root = hash_tree_root(block) + state_transition(state, signed_block, True) + + # Add new block to the store + store.blocks[block_root] = block + # Add new state for this block to the store + store.block_states[block_root] = state + + # Add block timeliness to the store + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + attestation_threshold_ms = get_attestation_due_ms(epoch) + is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms + is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval + store.block_timeliness[hash_tree_root(block)] = is_timely + + # Add proposer score boost if the block is timely and not conflicting with an existing block + is_first_block = store.proposer_boost_root == Root() + if is_timely and is_first_block: + store.proposer_boost_root = hash_tree_root(block) + + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) + + +- name: on_block#fulu + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: pub fn on_block< + spec: | + + def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + """ + Run ``on_block`` upon receiving a new block. + """ + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + # Make a copy of the state to avoid mutability issues + state = copy(store.block_states[block.parent_root]) + # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. + assert get_current_slot(store) >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block + + # [Modified in Fulu:EIP7594] + # Check if blob data is available + # If not, this payload MAY be queued and subsequently considered when blob data becomes available + assert is_data_available(hash_tree_root(block)) + + # Check the block is valid and compute the post-state + block_root = hash_tree_root(block) + state_transition(state, signed_block, True) + + # Add new block to the store + store.blocks[block_root] = block + # Add new state for this block to the store + store.block_states[block_root] = state + + # Add block timeliness to the store + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + attestation_threshold_ms = get_attestation_due_ms(epoch) + is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms + is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval + store.block_timeliness[hash_tree_root(block)] = is_timely + + # Add proposer score boost if the block is timely and not conflicting with an existing block + is_first_block = store.proposer_boost_root == Root() + if is_timely and is_first_block: + store.proposer_boost_root = hash_tree_root(block) + + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) + + +- name: on_block#gloas + sources: [] + spec: | + + def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + """ + Run ``on_block`` upon receiving a new block. + """ + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + + # Check if this blocks builds on empty or full parent block + parent_block = store.blocks[block.parent_root] + bid = block.body.signed_execution_payload_bid.message + parent_bid = parent_block.body.signed_execution_payload_bid.message + # Make a copy of the state to avoid mutability issues + if is_parent_node_full(store, block): + assert block.parent_root in store.execution_payload_states + state = copy(store.execution_payload_states[block.parent_root]) + else: + assert bid.parent_block_hash == parent_bid.parent_block_hash + state = copy(store.block_states[block.parent_root]) + + # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. + current_slot = get_current_slot(store) + assert current_slot >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block + + # Check the block is valid and compute the post-state + block_root = hash_tree_root(block) + state_transition(state, signed_block, True) + + # Add new block to the store + store.blocks[block_root] = block + # Add new state for this block to the store + store.block_states[block_root] = state + # Add a new PTC voting for this block to the store + store.ptc_vote[block_root] = [False] * PTC_SIZE + + # Notify the store about the payload_attestations in the block + notify_ptc_messages(store, state, block.body.payload_attestations) + # Add proposer score boost if the block is timely + seconds_since_genesis = store.time - store.genesis_time + time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS + epoch = get_current_store_epoch(store) + attestation_threshold_ms = get_attestation_due_ms(epoch) + is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms + is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval + store.block_timeliness[hash_tree_root(block)] = is_timely + + # Add proposer score boost if the block is timely and not conflicting with an existing block + is_first_block = store.proposer_boost_root == Root() + if is_timely and is_first_block: + store.proposer_boost_root = hash_tree_root(block) + + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) + + +- name: on_execution_payload#gloas + sources: [] + spec: | + + def on_execution_payload(store: Store, signed_envelope: SignedExecutionPayloadEnvelope) -> None: + """ + Run ``on_execution_payload`` upon receiving a new execution payload. + """ + envelope = signed_envelope.message + # The corresponding beacon block root needs to be known + assert envelope.beacon_block_root in store.block_states + + # Check if blob data is available + # If not, this payload MAY be queued and subsequently considered when blob data becomes available + assert is_data_available(envelope.beacon_block_root) + + # Make a copy of the state to avoid mutability issues + state = copy(store.block_states[envelope.beacon_block_root]) + + # Process the execution payload + process_execution_payload(state, signed_envelope, EXECUTION_ENGINE) + + # Add new state for this payload to the store + store.execution_payload_states[envelope.beacon_block_root] = state + + +- name: on_payload_attestation_message#gloas + sources: [] + spec: | + + def on_payload_attestation_message( + store: Store, ptc_message: PayloadAttestationMessage, is_from_block: bool = False + ) -> None: + """ + Run ``on_payload_attestation_message`` upon receiving a new ``ptc_message`` directly on the wire. + """ + # The beacon block root must be known + data = ptc_message.data + # PTC attestation must be for a known block. If block is unknown, delay consideration until the block is found + state = store.block_states[data.beacon_block_root] + ptc = get_ptc(state, data.slot) + # PTC votes can only change the vote for their assigned beacon block, return early otherwise + if data.slot != state.slot: + return + # Check that the attester is from the PTC + assert ptc_message.validator_index in ptc + + # Verify the signature and check that its for the current slot if it is coming from the wire + if not is_from_block: + # Check that the attestation is for the current slot + assert data.slot == get_current_slot(store) + # Verify the signature + assert is_valid_indexed_payload_attestation( + state, + IndexedPayloadAttestation( + attesting_indices=[ptc_message.validator_index], + data=data, + signature=ptc_message.signature, + ), + ) + # Update the ptc vote for the block + ptc_index = ptc.index(ptc_message.validator_index) + ptc_vote = store.ptc_vote[data.beacon_block_root] + ptc_vote[ptc_index] = data.payload_present + + +- name: on_tick#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: fn on_tick( + spec: | + + def on_tick(store: Store, time: uint64) -> None: + # If the ``store.time`` falls behind, while loop catches up slot by slot + # to ensure that every previous slot is processed with ``on_tick_per_slot`` + tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT + while get_current_slot(store) < tick_slot: + previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT + on_tick_per_slot(store, previous_time) + on_tick_per_slot(store, time) + + +- name: on_tick_per_slot#phase0 + sources: [] + spec: | + + def on_tick_per_slot(store: Store, time: uint64) -> None: + previous_slot = get_current_slot(store) + + # Update store time + store.time = time + + current_slot = get_current_slot(store) + + # If this is a new slot, reset store.proposer_boost_root + if current_slot > previous_slot: + store.proposer_boost_root = Root() + + # If a new epoch, pull-up justification and finalization from previous epoch + if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: + update_checkpoints( + store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint + ) + + +- name: polynomial_eval_to_coeff#fulu + sources: [] + spec: | + + def polynomial_eval_to_coeff(polynomial: Polynomial) -> PolynomialCoeff: + """ + Interpolates a polynomial (given in evaluation form) to a polynomial in coefficient form. + """ + roots_of_unity = compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB) + return PolynomialCoeff( + fft_field(bit_reversal_permutation(polynomial), roots_of_unity, inv=True) + ) + + +- name: prepare_execution_payload#bellatrix + sources: + - file: beacon_node/beacon_chain/src/execution_payload.rs + search: pub async fn prepare_execution_payload< + spec: | + + def prepare_execution_payload( + state: BeaconState, + safe_block_hash: Hash32, + finalized_block_hash: Hash32, + suggested_fee_recipient: ExecutionAddress, + execution_engine: ExecutionEngine, + pow_chain: Optional[Dict[Hash32, PowBlock]] = None, + ) -> Optional[PayloadId]: + if not is_merge_transition_complete(state): + assert pow_chain is not None + is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32() + is_activation_epoch_reached = ( + get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH + ) + if is_terminal_block_hash_set and not is_activation_epoch_reached: + # Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed + return None + + terminal_pow_block = get_terminal_pow_block(pow_chain) + if terminal_pow_block is None: + # Pre-merge, no prepare payload call is needed + return None + # Signify merge via producing on top of the terminal PoW block + parent_hash = terminal_pow_block.block_hash + else: + # Post-merge, normal payload + parent_hash = state.latest_execution_payload_header.block_hash + + # Set the forkchoice head and initiate the payload build process + payload_attributes = PayloadAttributes( + timestamp=compute_time_at_slot(state, state.slot), + prev_randao=get_randao_mix(state, get_current_epoch(state)), + suggested_fee_recipient=suggested_fee_recipient, + ) + return execution_engine.notify_forkchoice_updated( + head_block_hash=parent_hash, + safe_block_hash=safe_block_hash, + finalized_block_hash=finalized_block_hash, + payload_attributes=payload_attributes, + ) + + +- name: prepare_execution_payload#capella + sources: + - file: beacon_node/beacon_chain/src/execution_payload.rs + search: pub async fn prepare_execution_payload< + spec: | + + def prepare_execution_payload( + state: BeaconState, + safe_block_hash: Hash32, + finalized_block_hash: Hash32, + suggested_fee_recipient: ExecutionAddress, + execution_engine: ExecutionEngine, + ) -> Optional[PayloadId]: + # [Modified in Capella] + # Removed `is_merge_transition_complete` check + parent_hash = state.latest_execution_payload_header.block_hash + + # Set the forkchoice head and initiate the payload build process + payload_attributes = PayloadAttributes( + timestamp=compute_time_at_slot(state, state.slot), + prev_randao=get_randao_mix(state, get_current_epoch(state)), + suggested_fee_recipient=suggested_fee_recipient, + # [New in Capella] + withdrawals=get_expected_withdrawals(state), + ) + return execution_engine.notify_forkchoice_updated( + head_block_hash=parent_hash, + safe_block_hash=safe_block_hash, + finalized_block_hash=finalized_block_hash, + payload_attributes=payload_attributes, + ) + + +- name: prepare_execution_payload#deneb + sources: + - file: beacon_node/beacon_chain/src/execution_payload.rs + search: pub async fn prepare_execution_payload< + spec: | + + def prepare_execution_payload( + state: BeaconState, + safe_block_hash: Hash32, + finalized_block_hash: Hash32, + suggested_fee_recipient: ExecutionAddress, + execution_engine: ExecutionEngine, + ) -> Optional[PayloadId]: + # Verify consistency of the parent hash with respect to the previous execution payload header + parent_hash = state.latest_execution_payload_header.block_hash + + # Set the forkchoice head and initiate the payload build process + payload_attributes = PayloadAttributes( + timestamp=compute_time_at_slot(state, state.slot), + prev_randao=get_randao_mix(state, get_current_epoch(state)), + suggested_fee_recipient=suggested_fee_recipient, + withdrawals=get_expected_withdrawals(state), + # [New in Deneb:EIP4788] + parent_beacon_block_root=hash_tree_root(state.latest_block_header), + ) + return execution_engine.notify_forkchoice_updated( + head_block_hash=parent_hash, + safe_block_hash=safe_block_hash, + finalized_block_hash=finalized_block_hash, + payload_attributes=payload_attributes, + ) + + +- name: prepare_execution_payload#electra + sources: + - file: beacon_node/beacon_chain/src/execution_payload.rs + search: pub async fn prepare_execution_payload< + spec: | + + def prepare_execution_payload( + state: BeaconState, + safe_block_hash: Hash32, + finalized_block_hash: Hash32, + suggested_fee_recipient: ExecutionAddress, + execution_engine: ExecutionEngine, + ) -> Optional[PayloadId]: + # Verify consistency of the parent hash with respect to the previous execution payload header + parent_hash = state.latest_execution_payload_header.block_hash + + # [Modified in EIP7251] + # Set the forkchoice head and initiate the payload build process + withdrawals, _ = get_expected_withdrawals(state) + + payload_attributes = PayloadAttributes( + timestamp=compute_time_at_slot(state, state.slot), + prev_randao=get_randao_mix(state, get_current_epoch(state)), + suggested_fee_recipient=suggested_fee_recipient, + withdrawals=withdrawals, + parent_beacon_block_root=hash_tree_root(state.latest_block_header), + ) + return execution_engine.notify_forkchoice_updated( + head_block_hash=parent_hash, + safe_block_hash=safe_block_hash, + finalized_block_hash=finalized_block_hash, + payload_attributes=payload_attributes, + ) + + +- name: prepare_execution_payload#gloas + sources: [] + spec: | + + def prepare_execution_payload( + state: BeaconState, + safe_block_hash: Hash32, + finalized_block_hash: Hash32, + suggested_fee_recipient: ExecutionAddress, + execution_engine: ExecutionEngine, + ) -> Optional[PayloadId]: + # Verify consistency of the parent hash with respect to the previous execution payload bid + parent_hash = state.latest_execution_payload_bid.block_hash + + # [Modified in Gloas:EIP7732] + # Set the forkchoice head and initiate the payload build process + withdrawals, _, _ = get_expected_withdrawals(state) + + payload_attributes = PayloadAttributes( + timestamp=compute_time_at_slot(state, state.slot), + prev_randao=get_randao_mix(state, get_current_epoch(state)), + suggested_fee_recipient=suggested_fee_recipient, + withdrawals=withdrawals, + parent_beacon_block_root=hash_tree_root(state.latest_block_header), + ) + return execution_engine.notify_forkchoice_updated( + head_block_hash=parent_hash, + safe_block_hash=safe_block_hash, + finalized_block_hash=finalized_block_hash, + payload_attributes=payload_attributes, + ) + + +- name: process_attestation#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub mod base[\s\S]*?pub fn process_attestations< + regex: true + spec: | + + def process_attestation(state: BeaconState, attestation: Attestation) -> None: + data = attestation.data + assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH + assert data.index < get_committee_count_per_slot(state, data.target.epoch) + + committee = get_beacon_committee(state, data.slot, data.index) + assert len(attestation.aggregation_bits) == len(committee) + + pending_attestation = PendingAttestation( + data=data, + aggregation_bits=attestation.aggregation_bits, + inclusion_delay=state.slot - data.slot, + proposer_index=get_beacon_proposer_index(state), + ) + + if data.target.epoch == get_current_epoch(state): + assert data.source == state.current_justified_checkpoint + state.current_epoch_attestations.append(pending_attestation) + else: + assert data.source == state.previous_justified_checkpoint + state.previous_epoch_attestations.append(pending_attestation) + + # Verify signature + assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) + + +- name: process_attestation#altair + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub mod altair_deneb[\s\S]*?pub fn process_attestations< + regex: true + spec: | + + def process_attestation(state: BeaconState, attestation: Attestation) -> None: + data = attestation.data + assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH + assert data.index < get_committee_count_per_slot(state, data.target.epoch) + + committee = get_beacon_committee(state, data.slot, data.index) + assert len(attestation.aggregation_bits) == len(committee) + + # Participation flag indices + participation_flag_indices = get_attestation_participation_flag_indices( + state, data, state.slot - data.slot + ) + + # Verify signature + assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) + + # Update epoch participation flags + if data.target.epoch == get_current_epoch(state): + epoch_participation = state.current_epoch_participation + else: + epoch_participation = state.previous_epoch_participation + + proposer_reward_numerator = 0 + for index in get_attesting_indices(state, attestation): + for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): + if flag_index in participation_flag_indices and not has_flag( + epoch_participation[index], flag_index + ): + epoch_participation[index] = add_flag(epoch_participation[index], flag_index) + proposer_reward_numerator += get_base_reward(state, index) * weight + + # Reward proposer + proposer_reward_denominator = ( + (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT + ) + proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + + +- name: process_attestation#deneb + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub mod altair_deneb[\s\S]*?pub fn process_attestations< + regex: true + spec: | + + def process_attestation(state: BeaconState, attestation: Attestation) -> None: + data = attestation.data + assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) + # [Modified in Deneb:EIP7045] + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot + assert data.index < get_committee_count_per_slot(state, data.target.epoch) + + committee = get_beacon_committee(state, data.slot, data.index) + assert len(attestation.aggregation_bits) == len(committee) + + # Participation flag indices + participation_flag_indices = get_attestation_participation_flag_indices( + state, data, state.slot - data.slot + ) + + # Verify signature + assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) + + # Update epoch participation flags + if data.target.epoch == get_current_epoch(state): + epoch_participation = state.current_epoch_participation + else: + epoch_participation = state.previous_epoch_participation + + proposer_reward_numerator = 0 + for index in get_attesting_indices(state, attestation): + for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): + if flag_index in participation_flag_indices and not has_flag( + epoch_participation[index], flag_index + ): + epoch_participation[index] = add_flag(epoch_participation[index], flag_index) + proposer_reward_numerator += get_base_reward(state, index) * weight + + # Reward proposer + proposer_reward_denominator = ( + (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT + ) + proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + + +- name: process_attestation#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: 'pub fn process_attestations>\([^{]*block_body: BeaconBlockBodyRef' + regex: true + spec: | + + def process_attestation(state: BeaconState, attestation: Attestation) -> None: + data = attestation.data + assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot + + # [Modified in Electra:EIP7549] + assert data.index == 0 + committee_indices = get_committee_indices(attestation.committee_bits) + committee_offset = 0 + for committee_index in committee_indices: + assert committee_index < get_committee_count_per_slot(state, data.target.epoch) + committee = get_beacon_committee(state, data.slot, committee_index) + committee_attesters = set( + attester_index + for i, attester_index in enumerate(committee) + if attestation.aggregation_bits[committee_offset + i] + ) + assert len(committee_attesters) > 0 + committee_offset += len(committee) + + # Bitfield length matches total number of participants + assert len(attestation.aggregation_bits) == committee_offset + + # Participation flag indices + participation_flag_indices = get_attestation_participation_flag_indices( + state, data, state.slot - data.slot + ) + + # Verify signature + assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) + + # Update epoch participation flags + if data.target.epoch == get_current_epoch(state): + epoch_participation = state.current_epoch_participation + else: + epoch_participation = state.previous_epoch_participation + + proposer_reward_numerator = 0 + for index in get_attesting_indices(state, attestation): + for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): + if flag_index in participation_flag_indices and not has_flag( + epoch_participation[index], flag_index + ): + epoch_participation[index] = add_flag(epoch_participation[index], flag_index) + proposer_reward_numerator += get_base_reward(state, index) * weight + + # Reward proposer + proposer_reward_denominator = ( + (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT + ) + proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + + +- name: process_attestation#gloas + sources: [] + spec: | + + def process_attestation(state: BeaconState, attestation: Attestation) -> None: + data = attestation.data + assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot + + # [Modified in Gloas:EIP7732] + assert data.index < 2 + committee_indices = get_committee_indices(attestation.committee_bits) + committee_offset = 0 + for committee_index in committee_indices: + assert committee_index < get_committee_count_per_slot(state, data.target.epoch) + committee = get_beacon_committee(state, data.slot, committee_index) + committee_attesters = set( + attester_index + for i, attester_index in enumerate(committee) + if attestation.aggregation_bits[committee_offset + i] + ) + assert len(committee_attesters) > 0 + committee_offset += len(committee) + + # Bitfield length matches total number of participants + assert len(attestation.aggregation_bits) == committee_offset + + # Participation flag indices + participation_flag_indices = get_attestation_participation_flag_indices( + state, data, state.slot - data.slot + ) + + # Verify signature + assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) + + # [Modified in Gloas:EIP7732] + if data.target.epoch == get_current_epoch(state): + current_epoch_target = True + epoch_participation = state.current_epoch_participation + payment = state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH] + else: + current_epoch_target = False + epoch_participation = state.previous_epoch_participation + payment = state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH] + + proposer_reward_numerator = 0 + for index in get_attesting_indices(state, attestation): + # [New in Gloas:EIP7732] + # For same-slot attestations, check if we are setting any new flags. + # If we are, this validator has not contributed to this slot's quorum yet. + will_set_new_flag = False + + for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): + if flag_index in participation_flag_indices and not has_flag( + epoch_participation[index], flag_index + ): + epoch_participation[index] = add_flag(epoch_participation[index], flag_index) + proposer_reward_numerator += get_base_reward(state, index) * weight + # [New in Gloas:EIP7732] + will_set_new_flag = True + + # [New in Gloas:EIP7732] + # Add weight for same-slot attestations when any new flag is set. + # This ensures each validator contributes exactly once per slot. + if ( + will_set_new_flag + and is_attestation_same_slot(state, data) + and payment.withdrawal.amount > 0 + ): + payment.weight += state.validators[index].effective_balance + + # Reward proposer + proposer_reward_denominator = ( + (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT + ) + proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + + # [New in Gloas:EIP7732] + # Update builder payment weight + if current_epoch_target: + state.builder_pending_payments[SLOTS_PER_EPOCH + data.slot % SLOTS_PER_EPOCH] = payment + else: + state.builder_pending_payments[data.slot % SLOTS_PER_EPOCH] = payment + + +- name: process_attester_slashing#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_attester_slashings< + spec: | + + def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None: + attestation_1 = attester_slashing.attestation_1 + attestation_2 = attester_slashing.attestation_2 + assert is_slashable_attestation_data(attestation_1.data, attestation_2.data) + assert is_valid_indexed_attestation(state, attestation_1) + assert is_valid_indexed_attestation(state, attestation_2) + + slashed_any = False + indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices) + for index in sorted(indices): + if is_slashable_validator(state.validators[index], get_current_epoch(state)): + slash_validator(state, index) + slashed_any = True + assert slashed_any + + +- name: process_block#phase0 + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: pub async fn process_block< + spec: | + + def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) + + +- name: process_block#altair + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: pub async fn process_block< + spec: | + + def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + process_randao(state, block.body) + process_eth1_data(state, block.body) + # [Modified in Altair] + process_operations(state, block.body) + # [New in Altair] + process_sync_aggregate(state, block.body.sync_aggregate) + + +- name: process_block#bellatrix + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: pub async fn process_block< + spec: | + + def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + if is_execution_enabled(state, block.body): + # [New in Bellatrix] + process_execution_payload(state, block.body, EXECUTION_ENGINE) + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) + process_sync_aggregate(state, block.body.sync_aggregate) + + +- name: process_block#capella + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: pub async fn process_block< + spec: | + + def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + # [Modified in Capella] + # Removed `is_execution_enabled` call + # [New in Capella] + process_withdrawals(state, block.body.execution_payload) + # [Modified in Capella] + process_execution_payload(state, block.body, EXECUTION_ENGINE) + process_randao(state, block.body) + process_eth1_data(state, block.body) + # [Modified in Capella] + process_operations(state, block.body) + process_sync_aggregate(state, block.body.sync_aggregate) + + +- name: process_block#electra + sources: + - file: beacon_node/beacon_chain/src/beacon_chain.rs + search: pub async fn process_block< + spec: | + + def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + # [Modified in Electra:EIP7251] + process_withdrawals(state, block.body.execution_payload) + # [Modified in Electra:EIP6110] + process_execution_payload(state, block.body, EXECUTION_ENGINE) + process_randao(state, block.body) + process_eth1_data(state, block.body) + # [Modified in Electra:EIP6110:EIP7002:EIP7549:EIP7251] + process_operations(state, block.body) + process_sync_aggregate(state, block.body.sync_aggregate) + + +- name: process_block#gloas + sources: [] + spec: | + + def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + # [Modified in Gloas:EIP7732] + process_withdrawals(state) + # [Modified in Gloas:EIP7732] + # Removed `process_execution_payload` + # [New in Gloas:EIP7732] + process_execution_payload_bid(state, block) + process_randao(state, block.body) + process_eth1_data(state, block.body) + # [Modified in Gloas:EIP7732] + process_operations(state, block.body) + process_sync_aggregate(state, block.body.sync_aggregate) + + +- name: process_block_header#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_block_header< + spec: | + + def process_block_header(state: BeaconState, block: BeaconBlock) -> None: + # Verify that the slots match + assert block.slot == state.slot + # Verify that the block is newer than latest block header + assert block.slot > state.latest_block_header.slot + # Verify that proposer index is the correct index + assert block.proposer_index == get_beacon_proposer_index(state) + # Verify that the parent matches + assert block.parent_root == hash_tree_root(state.latest_block_header) + # Cache current block as the new latest block + state.latest_block_header = BeaconBlockHeader( + slot=block.slot, + proposer_index=block.proposer_index, + parent_root=block.parent_root, + state_root=Bytes32(), # Overwritten in the next process_slot call + body_root=hash_tree_root(block.body), + ) + + # Verify proposer is not slashed + proposer = state.validators[block.proposer_index] + assert not proposer.slashed + + +- name: process_bls_to_execution_change#capella + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_bls_to_execution_changes< + spec: | + + def process_bls_to_execution_change( + state: BeaconState, signed_address_change: SignedBLSToExecutionChange + ) -> None: + address_change = signed_address_change.message + + assert address_change.validator_index < len(state.validators) + + validator = state.validators[address_change.validator_index] + + assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX + assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:] + + # Fork-agnostic domain since address changes are valid across forks + domain = compute_domain( + DOMAIN_BLS_TO_EXECUTION_CHANGE, genesis_validators_root=state.genesis_validators_root + ) + signing_root = compute_signing_root(address_change, domain) + assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) + + validator.withdrawal_credentials = ( + ETH1_ADDRESS_WITHDRAWAL_PREFIX + b"\x00" * 11 + address_change.to_execution_address + ) + + +- name: process_builder_pending_payments#gloas + sources: [] + spec: | + + def process_builder_pending_payments(state: BeaconState) -> None: + """ + Processes the builder pending payments from the previous epoch. + """ + quorum = get_builder_payment_quorum_threshold(state) + for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]: + if payment.weight > quorum: + amount = payment.withdrawal.amount + exit_queue_epoch = compute_exit_epoch_and_update_churn(state, amount) + withdrawable_epoch = exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + payment.withdrawal.withdrawable_epoch = Epoch(withdrawable_epoch) + state.builder_pending_withdrawals.append(payment.withdrawal) + + old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:] + new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)] + state.builder_pending_payments = old_payments + new_payments + + +- name: process_consolidation_request#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_consolidation_request< + spec: | + + def process_consolidation_request( + state: BeaconState, consolidation_request: ConsolidationRequest + ) -> None: + if is_valid_switch_to_compounding_request(state, consolidation_request): + validator_pubkeys = [v.pubkey for v in state.validators] + request_source_pubkey = consolidation_request.source_pubkey + source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey)) + switch_to_compounding_validator(state, source_index) + return + + # Verify that source != target, so a consolidation cannot be used as an exit + if consolidation_request.source_pubkey == consolidation_request.target_pubkey: + return + # If the pending consolidations queue is full, consolidation requests are ignored + if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT: + return + # If there is too little available consolidation churn limit, consolidation requests are ignored + if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE: + return + + validator_pubkeys = [v.pubkey for v in state.validators] + # Verify pubkeys exists + request_source_pubkey = consolidation_request.source_pubkey + request_target_pubkey = consolidation_request.target_pubkey + if request_source_pubkey not in validator_pubkeys: + return + if request_target_pubkey not in validator_pubkeys: + return + source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey)) + target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey)) + source_validator = state.validators[source_index] + target_validator = state.validators[target_index] + + # Verify source withdrawal credentials + has_correct_credential = has_execution_withdrawal_credential(source_validator) + is_correct_source_address = ( + source_validator.withdrawal_credentials[12:] == consolidation_request.source_address + ) + if not (has_correct_credential and is_correct_source_address): + return + + # Verify that target has compounding withdrawal credentials + if not has_compounding_withdrawal_credential(target_validator): + return + + # Verify the source and the target are active + current_epoch = get_current_epoch(state) + if not is_active_validator(source_validator, current_epoch): + return + if not is_active_validator(target_validator, current_epoch): + return + # Verify exits for source and target have not been initiated + if source_validator.exit_epoch != FAR_FUTURE_EPOCH: + return + if target_validator.exit_epoch != FAR_FUTURE_EPOCH: + return + # Verify the source has been active long enough + if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD: + return + # Verify the source has no pending withdrawals in the queue + if get_pending_balance_to_withdraw(state, source_index) > 0: + return + + # Initiate source validator exit and append pending consolidation + source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn( + state, source_validator.effective_balance + ) + source_validator.withdrawable_epoch = Epoch( + source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + ) + state.pending_consolidations.append( + PendingConsolidation(source_index=source_index, target_index=target_index) + ) + + +- name: process_deposit#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_deposits< + spec: | + + def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # Verify the Merkle branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(deposit.data), + branch=deposit.proof, + # Add 1 for the List length mix-in + depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, + index=state.eth1_deposit_index, + root=state.eth1_data.deposit_root, + ) + + # Deposits must be processed in order + state.eth1_deposit_index += 1 + + apply_deposit( + state=state, + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + signature=deposit.data.signature, + ) + + +- name: process_deposit#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_deposits< + spec: | + + def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # Verify the Merkle branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(deposit.data), + branch=deposit.proof, + # Add 1 for the List length mix-in + depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, + index=state.eth1_deposit_index, + root=state.eth1_data.deposit_root, + ) + + # Deposits must be processed in order + state.eth1_deposit_index += 1 + + # [Modified in Electra:EIP7251] + apply_deposit( + state=state, + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + signature=deposit.data.signature, + ) + + +- name: process_deposit_request#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_deposit_requests< + spec: | + + def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None: + # Set deposit request start index + if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX: + state.deposit_requests_start_index = deposit_request.index + + # Create pending deposit + state.pending_deposits.append( + PendingDeposit( + pubkey=deposit_request.pubkey, + withdrawal_credentials=deposit_request.withdrawal_credentials, + amount=deposit_request.amount, + signature=deposit_request.signature, + slot=state.slot, + ) + ) + + +- name: process_effective_balance_updates#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs + search: pub fn process_effective_balance_updates< + spec: | + + def process_effective_balance_updates(state: BeaconState) -> None: + # Update effective balances with hysteresis + for index, validator in enumerate(state.validators): + balance = state.balances[index] + HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) + DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER + UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER + if ( + balance + DOWNWARD_THRESHOLD < validator.effective_balance + or validator.effective_balance + UPWARD_THRESHOLD < balance + ): + validator.effective_balance = min( + balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE + ) + + +- name: process_effective_balance_updates#electra + sources: + - file: consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs + search: pub fn process_effective_balance_updates< + spec: | + + def process_effective_balance_updates(state: BeaconState) -> None: + # Update effective balances with hysteresis + for index, validator in enumerate(state.validators): + balance = state.balances[index] + HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) + DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER + UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER + # [Modified in Electra:EIP7251] + max_effective_balance = get_max_effective_balance(validator) + + if ( + balance + DOWNWARD_THRESHOLD < validator.effective_balance + or validator.effective_balance + UPWARD_THRESHOLD < balance + ): + validator.effective_balance = min( + balance - balance % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance + ) + + +- name: process_epoch#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing.rs + search: pub fn process_epoch< + - file: consensus/state_processing/src/per_epoch_processing/base.rs + search: pub fn process_epoch< + spec: | + + def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + process_slashings(state) + process_eth1_data_reset(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_roots_update(state) + process_participation_record_updates(state) + + +- name: process_epoch#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing.rs + search: pub fn process_epoch< + - file: consensus/state_processing/src/per_epoch_processing/altair.rs + search: pub fn process_epoch< + spec: | + + def process_epoch(state: BeaconState) -> None: + # [Modified in Altair] + process_justification_and_finalization(state) + # [New in Altair] + process_inactivity_updates(state) + # [Modified in Altair] + process_rewards_and_penalties(state) + process_registry_updates(state) + # [Modified in Altair] + process_slashings(state) + process_eth1_data_reset(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_roots_update(state) + # [New in Altair] + process_participation_flag_updates(state) + # [New in Altair] + process_sync_committee_updates(state) + + +- name: process_epoch#capella + sources: + - file: consensus/state_processing/src/per_epoch_processing.rs + search: pub fn process_epoch< + spec: | + + def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_inactivity_updates(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + process_slashings(state) + process_eth1_data_reset(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + # [Modified in Capella] + process_historical_summaries_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) + + +- name: process_epoch#electra + sources: + - file: consensus/state_processing/src/per_epoch_processing.rs + search: pub fn process_epoch< + spec: | + + def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_inactivity_updates(state) + process_rewards_and_penalties(state) + # [Modified in Electra:EIP7251] + process_registry_updates(state) + # [Modified in Electra:EIP7251] + process_slashings(state) + process_eth1_data_reset(state) + # [New in Electra:EIP7251] + process_pending_deposits(state) + # [New in Electra:EIP7251] + process_pending_consolidations(state) + # [Modified in Electra:EIP7251] + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_summaries_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) + + +- name: process_epoch#fulu + sources: + - file: consensus/state_processing/src/per_epoch_processing.rs + search: pub fn process_epoch< + spec: | + + def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_inactivity_updates(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + process_slashings(state) + process_eth1_data_reset(state) + process_pending_deposits(state) + process_pending_consolidations(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_summaries_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) + # [New in Fulu:EIP7917] + process_proposer_lookahead(state) + + +- name: process_epoch#gloas + sources: [] + spec: | + + def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_inactivity_updates(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + process_slashings(state) + process_eth1_data_reset(state) + process_pending_deposits(state) + process_pending_consolidations(state) + # [New in Gloas:EIP7732] + process_builder_pending_payments(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_summaries_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) + process_proposer_lookahead(state) + + +- name: process_eth1_data#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_eth1_data< + spec: | + + def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: + state.eth1_data_votes.append(body.eth1_data) + if ( + state.eth1_data_votes.count(body.eth1_data) * 2 + > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH + ): + state.eth1_data = body.eth1_data + + +- name: process_eth1_data_reset#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/resets.rs + search: pub fn process_eth1_data_reset< + spec: | + + def process_eth1_data_reset(state: BeaconState) -> None: + next_epoch = Epoch(get_current_epoch(state) + 1) + # Reset eth1 data votes + if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: + state.eth1_data_votes = [] + + +- name: process_execution_payload#bellatrix + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_execution_payload< + spec: | + + def process_execution_payload( + state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine + ) -> None: + payload = body.execution_payload + + # Verify consistency of the parent hash with respect to the previous execution payload header + if is_merge_transition_complete(state): + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_time_at_slot(state, state.slot) + # Verify the execution payload is valid + assert execution_engine.verify_and_notify_new_payload( + NewPayloadRequest(execution_payload=payload) + ) + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + ) + + +- name: process_execution_payload#capella + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_execution_payload< + spec: | + + def process_execution_payload( + state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine + ) -> None: + payload = body.execution_payload + # [Modified in Capella] + # Removed `is_merge_transition_complete` check + # Verify consistency of the parent hash with respect to the previous execution payload header + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_time_at_slot(state, state.slot) + # Verify the execution payload is valid + assert execution_engine.verify_and_notify_new_payload( + NewPayloadRequest(execution_payload=payload) + ) + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + # [New in Capella] + withdrawals_root=hash_tree_root(payload.withdrawals), + ) + + +- name: process_execution_payload#deneb + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_execution_payload< + spec: | + + def process_execution_payload( + state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine + ) -> None: + payload = body.execution_payload + + # Verify consistency of the parent hash with respect to the previous execution payload header + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_time_at_slot(state, state.slot) + # [New in Deneb:EIP4844] + # Verify commitments are under limit + assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK + + # [New in Deneb:EIP4844] + # Compute list of versioned hashes + versioned_hashes = [ + kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments + ] + + # Verify the execution payload is valid + assert execution_engine.verify_and_notify_new_payload( + NewPayloadRequest( + execution_payload=payload, + # [New in Deneb:EIP4844] + versioned_hashes=versioned_hashes, + # [New in Deneb:EIP4788] + parent_beacon_block_root=state.latest_block_header.parent_root, + ) + ) + + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + # [New in Deneb:EIP4844] + blob_gas_used=payload.blob_gas_used, + # [New in Deneb:EIP4844] + excess_blob_gas=payload.excess_blob_gas, + ) + + +- name: process_execution_payload#electra + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_execution_payload< + spec: | + + def process_execution_payload( + state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine + ) -> None: + payload = body.execution_payload + + # Verify consistency of the parent hash with respect to the previous execution payload header + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_time_at_slot(state, state.slot) + # [Modified in Electra:EIP7691] + # Verify commitments are under limit + assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA + + # Compute list of versioned hashes + versioned_hashes = [ + kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments + ] + + # Verify the execution payload is valid + assert execution_engine.verify_and_notify_new_payload( + NewPayloadRequest( + execution_payload=payload, + versioned_hashes=versioned_hashes, + parent_beacon_block_root=state.latest_block_header.parent_root, + # [New in Electra] + execution_requests=body.execution_requests, + ) + ) + + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + blob_gas_used=payload.blob_gas_used, + excess_blob_gas=payload.excess_blob_gas, + ) + + +- name: process_execution_payload#fulu + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_execution_payload< + spec: | + + def process_execution_payload( + state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine + ) -> None: + payload = body.execution_payload + + # Verify consistency of the parent hash with respect to the previous execution payload header + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_time_at_slot(state, state.slot) + # [Modified in Fulu:EIP7892] + # Verify commitments are under limit + assert ( + len(body.blob_kzg_commitments) + <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block + ) + + # Compute list of versioned hashes + versioned_hashes = [ + kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments + ] + + # Verify the execution payload is valid + assert execution_engine.verify_and_notify_new_payload( + NewPayloadRequest( + execution_payload=payload, + versioned_hashes=versioned_hashes, + parent_beacon_block_root=state.latest_block_header.parent_root, + execution_requests=body.execution_requests, + ) + ) + + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + blob_gas_used=payload.blob_gas_used, + excess_blob_gas=payload.excess_blob_gas, + ) + + +- name: process_execution_payload#gloas + sources: [] + spec: | + + def process_execution_payload( + state: BeaconState, + # [Modified in Gloas:EIP7732] + # Removed `body` + # [New in Gloas:EIP7732] + signed_envelope: SignedExecutionPayloadEnvelope, + execution_engine: ExecutionEngine, + # [New in Gloas:EIP7732] + verify: bool = True, + ) -> None: + envelope = signed_envelope.message + payload = envelope.payload + + # Verify signature + if verify: + assert verify_execution_payload_envelope_signature(state, signed_envelope) + + # Cache latest block header state root + previous_state_root = hash_tree_root(state) + if state.latest_block_header.state_root == Root(): + state.latest_block_header.state_root = previous_state_root + + # Verify consistency with the beacon block + assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header) + assert envelope.slot == state.slot + + # Verify consistency with the committed bid + committed_bid = state.latest_execution_payload_bid + assert envelope.builder_index == committed_bid.builder_index + assert committed_bid.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments) + assert committed_bid.prev_randao == payload.prev_randao + + # Verify the withdrawals root + assert hash_tree_root(payload.withdrawals) == state.latest_withdrawals_root + + # Verify the gas_limit + assert committed_bid.gas_limit == payload.gas_limit + # Verify the block hash + assert committed_bid.block_hash == payload.block_hash + # Verify consistency of the parent hash with respect to the previous execution payload + assert payload.parent_hash == state.latest_block_hash + # Verify timestamp + assert payload.timestamp == compute_time_at_slot(state, state.slot) + # Verify commitments are under limit + assert ( + len(envelope.blob_kzg_commitments) + <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block + ) + # Verify the execution payload is valid + versioned_hashes = [ + kzg_commitment_to_versioned_hash(commitment) for commitment in envelope.blob_kzg_commitments + ] + requests = envelope.execution_requests + assert execution_engine.verify_and_notify_new_payload( + NewPayloadRequest( + execution_payload=payload, + versioned_hashes=versioned_hashes, + parent_beacon_block_root=state.latest_block_header.parent_root, + execution_requests=requests, + ) + ) + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(requests.deposits, process_deposit_request) + for_ops(requests.withdrawals, process_withdrawal_request) + for_ops(requests.consolidations, process_consolidation_request) + + # Queue the builder payment + payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] + amount = payment.withdrawal.amount + if amount > 0: + exit_queue_epoch = compute_exit_epoch_and_update_churn(state, amount) + payment.withdrawal.withdrawable_epoch = Epoch( + exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + ) + state.builder_pending_withdrawals.append(payment.withdrawal) + state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = ( + BuilderPendingPayment() + ) + + # Cache the execution payload hash + state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1 + state.latest_block_hash = payload.block_hash + + # Verify the state root + if verify: + assert envelope.state_root == hash_tree_root(state) + + +- name: process_execution_payload_bid#gloas + sources: [] + spec: | + + def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None: + signed_bid = block.body.signed_execution_payload_bid + bid = signed_bid.message + builder_index = bid.builder_index + builder = state.validators[builder_index] + + amount = bid.value + # For self-builds, amount must be zero regardless of withdrawal credential prefix + if builder_index == block.proposer_index: + assert amount == 0 + assert signed_bid.signature == bls.G2_POINT_AT_INFINITY + else: + # Non-self builds require builder withdrawal credential + assert has_builder_withdrawal_credential(builder) + assert verify_execution_payload_bid_signature(state, signed_bid) + + assert is_active_validator(builder, get_current_epoch(state)) + assert not builder.slashed + + # Check that the builder is active, non-slashed, and has funds to cover the bid + pending_payments = sum( + payment.withdrawal.amount + for payment in state.builder_pending_payments + if payment.withdrawal.builder_index == builder_index + ) + pending_withdrawals = sum( + withdrawal.amount + for withdrawal in state.builder_pending_withdrawals + if withdrawal.builder_index == builder_index + ) + assert ( + amount == 0 + or state.balances[builder_index] + >= amount + pending_payments + pending_withdrawals + MIN_ACTIVATION_BALANCE + ) + + # Verify that the bid is for the current slot + assert bid.slot == block.slot + # Verify that the bid is for the right parent block + assert bid.parent_block_hash == state.latest_block_hash + assert bid.parent_block_root == block.parent_root + assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state)) + + # Record the pending payment if there is some payment + if amount > 0: + pending_payment = BuilderPendingPayment( + weight=0, + withdrawal=BuilderPendingWithdrawal( + fee_recipient=bid.fee_recipient, + amount=amount, + builder_index=builder_index, + withdrawable_epoch=FAR_FUTURE_EPOCH, + ), + ) + state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = ( + pending_payment + ) + + # Cache the signed execution payload bid + state.latest_execution_payload_bid = bid + + +- name: process_historical_roots_update#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs + search: pub fn process_historical_roots_update< + spec: | + + def process_historical_roots_update(state: BeaconState) -> None: + # Set historical root accumulator + next_epoch = Epoch(get_current_epoch(state) + 1) + if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: + historical_batch = HistoricalBatch( + block_roots=state.block_roots, state_roots=state.state_roots + ) + state.historical_roots.append(hash_tree_root(historical_batch)) + + +- name: process_historical_summaries_update#capella + sources: + - file: consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs + search: pub fn process_historical_summaries_update< + spec: | + + def process_historical_summaries_update(state: BeaconState) -> None: + # Set historical block root accumulator. + next_epoch = Epoch(get_current_epoch(state) + 1) + if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: + historical_summary = HistoricalSummary( + block_summary_root=hash_tree_root(state.block_roots), + state_summary_root=hash_tree_root(state.state_roots), + ) + state.historical_summaries.append(historical_summary) + + +- name: process_inactivity_updates#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs + search: pub fn process_inactivity_updates_slow< + spec: | + + def process_inactivity_updates(state: BeaconState) -> None: + # Skip the genesis epoch as score updates are based on the previous epoch participation + if get_current_epoch(state) == GENESIS_EPOCH: + return + + for index in get_eligible_validator_indices(state): + # Increase the inactivity score of inactive validators + if index in get_unslashed_participating_indices( + state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state) + ): + state.inactivity_scores[index] -= min(1, state.inactivity_scores[index]) + else: + state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS + # Decrease the inactivity score of all eligible validators during a leak-free epoch + if not is_in_inactivity_leak(state): + state.inactivity_scores[index] -= min( + INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index] + ) + + +- name: process_justification_and_finalization#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs + search: pub fn process_justification_and_finalization< + spec: | + + def process_justification_and_finalization(state: BeaconState) -> None: + # Initial FFG checkpoint values have a `0x00` stub for `root`. + # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. + if get_current_epoch(state) <= GENESIS_EPOCH + 1: + return + previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) + current_attestations = get_matching_target_attestations(state, get_current_epoch(state)) + total_active_balance = get_total_active_balance(state) + previous_target_balance = get_attesting_balance(state, previous_attestations) + current_target_balance = get_attesting_balance(state, current_attestations) + weigh_justification_and_finalization( + state, total_active_balance, previous_target_balance, current_target_balance + ) + + +- name: process_justification_and_finalization#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs + search: pub fn process_justification_and_finalization< + spec: | + + def process_justification_and_finalization(state: BeaconState) -> None: + # Initial FFG checkpoint values have a `0x00` stub for `root`. + # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. + if get_current_epoch(state) <= GENESIS_EPOCH + 1: + return + previous_indices = get_unslashed_participating_indices( + state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state) + ) + current_indices = get_unslashed_participating_indices( + state, TIMELY_TARGET_FLAG_INDEX, get_current_epoch(state) + ) + total_active_balance = get_total_active_balance(state) + previous_target_balance = get_total_balance(state, previous_indices) + current_target_balance = get_total_balance(state, current_indices) + weigh_justification_and_finalization( + state, total_active_balance, previous_target_balance, current_target_balance + ) + + +- name: process_light_client_finality_update#altair + sources: [] + spec: | + + def process_light_client_finality_update( + store: LightClientStore, + finality_update: LightClientFinalityUpdate, + current_slot: Slot, + genesis_validators_root: Root, + ) -> None: + update = LightClientUpdate( + attested_header=finality_update.attested_header, + next_sync_committee=SyncCommittee(), + next_sync_committee_branch=NextSyncCommitteeBranch(), + finalized_header=finality_update.finalized_header, + finality_branch=finality_update.finality_branch, + sync_aggregate=finality_update.sync_aggregate, + signature_slot=finality_update.signature_slot, + ) + process_light_client_update(store, update, current_slot, genesis_validators_root) + + +- name: process_light_client_optimistic_update#altair + sources: [] + spec: | + + def process_light_client_optimistic_update( + store: LightClientStore, + optimistic_update: LightClientOptimisticUpdate, + current_slot: Slot, + genesis_validators_root: Root, + ) -> None: + update = LightClientUpdate( + attested_header=optimistic_update.attested_header, + next_sync_committee=SyncCommittee(), + next_sync_committee_branch=NextSyncCommitteeBranch(), + finalized_header=LightClientHeader(), + finality_branch=FinalityBranch(), + sync_aggregate=optimistic_update.sync_aggregate, + signature_slot=optimistic_update.signature_slot, + ) + process_light_client_update(store, update, current_slot, genesis_validators_root) + + +- name: process_light_client_store_force_update#altair + sources: [] + spec: | + + def process_light_client_store_force_update(store: LightClientStore, current_slot: Slot) -> None: + if ( + current_slot > store.finalized_header.beacon.slot + UPDATE_TIMEOUT + and store.best_valid_update is not None + ): + # Forced best update when the update timeout has elapsed. + # Because the apply logic waits for `finalized_header.beacon.slot` to indicate sync committee finality, + # the `attested_header` may be treated as `finalized_header` in extended periods of non-finality + # to guarantee progression into later sync committee periods according to `is_better_update`. + if ( + store.best_valid_update.finalized_header.beacon.slot + <= store.finalized_header.beacon.slot + ): + store.best_valid_update.finalized_header = store.best_valid_update.attested_header + apply_light_client_update(store, store.best_valid_update) + store.best_valid_update = None + + +- name: process_light_client_update#altair + sources: [] + spec: | + + def process_light_client_update( + store: LightClientStore, + update: LightClientUpdate, + current_slot: Slot, + genesis_validators_root: Root, + ) -> None: + validate_light_client_update(store, update, current_slot, genesis_validators_root) + + sync_committee_bits = update.sync_aggregate.sync_committee_bits + + # Update the best update in case we have to force-update to it if the timeout elapses + if store.best_valid_update is None or is_better_update(update, store.best_valid_update): + store.best_valid_update = update + + # Track the maximum number of active participants in the committee signatures + store.current_max_active_participants = max( + store.current_max_active_participants, + sum(sync_committee_bits), + ) + + # Update the optimistic header + if ( + sum(sync_committee_bits) > get_safety_threshold(store) + and update.attested_header.beacon.slot > store.optimistic_header.beacon.slot + ): + store.optimistic_header = update.attested_header + + # Update finalized header + update_has_finalized_next_sync_committee = ( + not is_next_sync_committee_known(store) + and is_sync_committee_update(update) + and is_finality_update(update) + and ( + compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot) + == compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) + ) + ) + if sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2 and ( + update.finalized_header.beacon.slot > store.finalized_header.beacon.slot + or update_has_finalized_next_sync_committee + ): + # Normal update through 2/3 threshold + apply_light_client_update(store, update) + store.best_valid_update = None + + +- name: process_operations#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_operations< + spec: | + + def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Verify that outstanding deposits are processed up to the maximum number of deposits + assert len(body.deposits) == min( + MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index + ) + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) + for_ops(body.voluntary_exits, process_voluntary_exit) + + +- name: process_operations#capella + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_operations< + spec: | + + def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Verify that outstanding deposits are processed up to the maximum number of deposits + assert len(body.deposits) == min( + MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index + ) + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) + for_ops(body.voluntary_exits, process_voluntary_exit) + # [New in Capella] + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) + + +- name: process_operations#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_operations< + spec: | + + def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # [Modified in Electra:EIP6110] + # Disable former deposit mechanism once all prior deposits are processed + eth1_deposit_index_limit = min( + state.eth1_data.deposit_count, state.deposit_requests_start_index + ) + if state.eth1_deposit_index < eth1_deposit_index_limit: + assert len(body.deposits) == min( + MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index + ) + else: + assert len(body.deposits) == 0 + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + # [Modified in Electra:EIP7549] + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) + # [Modified in Electra:EIP7251] + for_ops(body.voluntary_exits, process_voluntary_exit) + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) + # [New in Electra:EIP6110] + for_ops(body.execution_requests.deposits, process_deposit_request) + # [New in Electra:EIP7002:EIP7251] + for_ops(body.execution_requests.withdrawals, process_withdrawal_request) + # [New in Electra:EIP7251] + for_ops(body.execution_requests.consolidations, process_consolidation_request) + + +- name: process_operations#gloas + sources: [] + spec: | + + def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Disable former deposit mechanism once all prior deposits are processed + eth1_deposit_index_limit = min( + state.eth1_data.deposit_count, state.deposit_requests_start_index + ) + if state.eth1_deposit_index < eth1_deposit_index_limit: + assert len(body.deposits) == min( + MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index + ) + else: + assert len(body.deposits) == 0 + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + # [Modified in Gloas:EIP7732] + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + # [Modified in Gloas:EIP7732] + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) + for_ops(body.voluntary_exits, process_voluntary_exit) + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) + # [Modified in Gloas:EIP7732] + # Removed `process_deposit_request` + # [Modified in Gloas:EIP7732] + # Removed `process_withdrawal_request` + # [Modified in Gloas:EIP7732] + # Removed `process_consolidation_request` + # [New in Gloas:EIP7732] + for_ops(body.payload_attestations, process_payload_attestation) + + +- name: process_participation_flag_updates#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs + search: pub fn process_participation_flag_updates< + spec: | + + def process_participation_flag_updates(state: BeaconState) -> None: + state.previous_epoch_participation = state.current_epoch_participation + state.current_epoch_participation = [ + ParticipationFlags(0b0000_0000) for _ in range(len(state.validators)) + ] + + +- name: process_participation_record_updates#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/base/participation_record_updates.rs + search: pub fn process_participation_record_updates< + spec: | + + def process_participation_record_updates(state: BeaconState) -> None: + # Rotate current/previous epoch attestations + state.previous_epoch_attestations = state.current_epoch_attestations + state.current_epoch_attestations = [] + + +- name: process_payload_attestation#gloas + sources: [] + spec: | + + def process_payload_attestation( + state: BeaconState, payload_attestation: PayloadAttestation + ) -> None: + data = payload_attestation.data + + # Check that the attestation is for the parent beacon block + assert data.beacon_block_root == state.latest_block_header.parent_root + # Check that the attestation is for the previous slot + assert data.slot + 1 == state.slot + # Verify signature + indexed_payload_attestation = get_indexed_payload_attestation( + state, data.slot, payload_attestation + ) + assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation) + + +- name: process_pending_consolidations#electra + sources: + - file: consensus/state_processing/src/per_epoch_processing/single_pass.rs + search: fn process_pending_consolidations< + spec: | + + def process_pending_consolidations(state: BeaconState) -> None: + next_epoch = Epoch(get_current_epoch(state) + 1) + next_pending_consolidation = 0 + for pending_consolidation in state.pending_consolidations: + source_validator = state.validators[pending_consolidation.source_index] + if source_validator.slashed: + next_pending_consolidation += 1 + continue + if source_validator.withdrawable_epoch > next_epoch: + break + + # Calculate the consolidated balance + source_effective_balance = min( + state.balances[pending_consolidation.source_index], source_validator.effective_balance + ) + + # Move active balance to target. Excess balance is withdrawable. + decrease_balance(state, pending_consolidation.source_index, source_effective_balance) + increase_balance(state, pending_consolidation.target_index, source_effective_balance) + next_pending_consolidation += 1 + + state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:] + + +- name: process_pending_deposits#electra + sources: [] + spec: | + + def process_pending_deposits(state: BeaconState) -> None: + next_epoch = Epoch(get_current_epoch(state) + 1) + available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit( + state + ) + processed_amount = 0 + next_deposit_index = 0 + deposits_to_postpone = [] + is_churn_limit_reached = False + finalized_slot = compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) + + for deposit in state.pending_deposits: + # Do not process deposit requests if Eth1 bridge deposits are not yet applied. + if ( + # Is deposit request + deposit.slot > GENESIS_SLOT + and + # There are pending Eth1 bridge deposits + state.eth1_deposit_index < state.deposit_requests_start_index + ): + break + + # Check if deposit has been finalized, otherwise, stop processing. + if deposit.slot > finalized_slot: + break + + # Check if number of processed deposits has not reached the limit, otherwise, stop processing. + if next_deposit_index >= MAX_PENDING_DEPOSITS_PER_EPOCH: + break + + # Read validator state + is_validator_exited = False + is_validator_withdrawn = False + validator_pubkeys = [v.pubkey for v in state.validators] + if deposit.pubkey in validator_pubkeys: + validator = state.validators[ValidatorIndex(validator_pubkeys.index(deposit.pubkey))] + is_validator_exited = validator.exit_epoch < FAR_FUTURE_EPOCH + is_validator_withdrawn = validator.withdrawable_epoch < next_epoch + + if is_validator_withdrawn: + # Deposited balance will never become active. Increase balance but do not consume churn + apply_pending_deposit(state, deposit) + elif is_validator_exited: + # Validator is exiting, postpone the deposit until after withdrawable epoch + deposits_to_postpone.append(deposit) + else: + # Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch. + is_churn_limit_reached = processed_amount + deposit.amount > available_for_processing + if is_churn_limit_reached: + break + + # Consume churn and apply deposit. + processed_amount += deposit.amount + apply_pending_deposit(state, deposit) + + # Regardless of how the deposit was handled, we move on in the queue. + next_deposit_index += 1 + + state.pending_deposits = state.pending_deposits[next_deposit_index:] + deposits_to_postpone + + # Accumulate churn only if the churn limit has been hit. + if is_churn_limit_reached: + state.deposit_balance_to_consume = available_for_processing - processed_amount + else: + state.deposit_balance_to_consume = Gwei(0) + + +- name: process_proposer_lookahead#fulu + sources: + - file: consensus/state_processing/src/per_epoch_processing/single_pass.rs + search: pub fn process_proposer_lookahead< + spec: | + + def process_proposer_lookahead(state: BeaconState) -> None: + last_epoch_start = len(state.proposer_lookahead) - SLOTS_PER_EPOCH + # Shift out proposers in the first epoch + state.proposer_lookahead[:last_epoch_start] = state.proposer_lookahead[SLOTS_PER_EPOCH:] + # Fill in the last epoch with new proposer indices + last_epoch_proposers = get_beacon_proposer_indices( + state, Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1) + ) + state.proposer_lookahead[last_epoch_start:] = last_epoch_proposers + + +- name: process_proposer_slashing#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_proposer_slashings< + spec: | + + def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: + header_1 = proposer_slashing.signed_header_1.message + header_2 = proposer_slashing.signed_header_2.message + + # Verify header slots match + assert header_1.slot == header_2.slot + # Verify header proposer indices match + assert header_1.proposer_index == header_2.proposer_index + # Verify the headers are different + assert header_1 != header_2 + # Verify the proposer is slashable + proposer = state.validators[header_1.proposer_index] + assert is_slashable_validator(proposer, get_current_epoch(state)) + # Verify signatures + for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2): + domain = get_domain( + state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot) + ) + signing_root = compute_signing_root(signed_header.message, domain) + assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature) + + slash_validator(state, header_1.proposer_index) + + +- name: process_proposer_slashing#gloas + sources: [] + spec: | + + def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: + header_1 = proposer_slashing.signed_header_1.message + header_2 = proposer_slashing.signed_header_2.message + + # Verify header slots match + assert header_1.slot == header_2.slot + # Verify header proposer indices match + assert header_1.proposer_index == header_2.proposer_index + # Verify the headers are different + assert header_1 != header_2 + # Verify the proposer is slashable + proposer = state.validators[header_1.proposer_index] + assert is_slashable_validator(proposer, get_current_epoch(state)) + # Verify signatures + for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2): + domain = get_domain( + state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot) + ) + signing_root = compute_signing_root(signed_header.message, domain) + assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature) + + # [New in Gloas:EIP7732] + # Remove the BuilderPendingPayment corresponding to + # this proposal if it is still in the 2-epoch window. + slot = header_1.slot + proposal_epoch = compute_epoch_at_slot(slot) + if proposal_epoch == get_current_epoch(state): + payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH + state.builder_pending_payments[payment_index] = BuilderPendingPayment() + elif proposal_epoch == get_previous_epoch(state): + payment_index = slot % SLOTS_PER_EPOCH + state.builder_pending_payments[payment_index] = BuilderPendingPayment() + + slash_validator(state, header_1.proposer_index) + + +- name: process_randao#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_randao< + spec: | + + def process_randao(state: BeaconState, body: BeaconBlockBody) -> None: + epoch = get_current_epoch(state) + # Verify RANDAO reveal + proposer = state.validators[get_beacon_proposer_index(state)] + signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO)) + assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal) + # Mix in RANDAO reveal + mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal)) + state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix + + +- name: process_randao_mixes_reset#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/resets.rs + search: pub fn process_randao_mixes_reset< + spec: | + + def process_randao_mixes_reset(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + next_epoch = Epoch(current_epoch + 1) + # Set randao mix + state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix( + state, current_epoch + ) + + +- name: process_registry_updates#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/registry_updates.rs + search: pub fn process_registry_updates< + spec: | + + def process_registry_updates(state: BeaconState) -> None: + # Process activation eligibility and ejections + for index, validator in enumerate(state.validators): + if is_eligible_for_activation_queue(validator): + validator.activation_eligibility_epoch = get_current_epoch(state) + 1 + + if ( + is_active_validator(validator, get_current_epoch(state)) + and validator.effective_balance <= EJECTION_BALANCE + ): + initiate_validator_exit(state, ValidatorIndex(index)) + + # Queue validators eligible for activation and not yet dequeued for activation + activation_queue = sorted( + [ + index + for index, validator in enumerate(state.validators) + if is_eligible_for_activation(state, validator) + ], + # Order by the sequence of activation_eligibility_epoch setting and then index + key=lambda index: (state.validators[index].activation_eligibility_epoch, index), + ) + # Dequeued validators for activation up to churn limit + for index in activation_queue[: get_validator_churn_limit(state)]: + validator = state.validators[index] + validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) + + +- name: process_registry_updates#deneb + sources: + - file: consensus/state_processing/src/per_epoch_processing/registry_updates.rs + search: pub fn process_registry_updates< + spec: | + + def process_registry_updates(state: BeaconState) -> None: + # Process activation eligibility and ejections + for index, validator in enumerate(state.validators): + if is_eligible_for_activation_queue(validator): + validator.activation_eligibility_epoch = get_current_epoch(state) + 1 + + if ( + is_active_validator(validator, get_current_epoch(state)) + and validator.effective_balance <= EJECTION_BALANCE + ): + initiate_validator_exit(state, ValidatorIndex(index)) + + # Queue validators eligible for activation and not yet dequeued for activation + activation_queue = sorted( + [ + index + for index, validator in enumerate(state.validators) + if is_eligible_for_activation(state, validator) + ], + # Order by the sequence of activation_eligibility_epoch setting and then index + key=lambda index: (state.validators[index].activation_eligibility_epoch, index), + ) + # Dequeued validators for activation up to activation churn limit + # [Modified in Deneb:EIP7514] + for index in activation_queue[: get_validator_activation_churn_limit(state)]: + validator = state.validators[index] + validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) + + +- name: process_registry_updates#electra + sources: + - file: consensus/state_processing/src/per_epoch_processing/registry_updates.rs + search: pub fn process_registry_updates< + spec: | + + def process_registry_updates(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + activation_epoch = compute_activation_exit_epoch(current_epoch) + + # Process activation eligibility, ejections, and activations + for index, validator in enumerate(state.validators): + # [Modified in Electra:EIP7251] + if is_eligible_for_activation_queue(validator): + validator.activation_eligibility_epoch = current_epoch + 1 + elif ( + is_active_validator(validator, current_epoch) + and validator.effective_balance <= EJECTION_BALANCE + ): + # [Modified in Electra:EIP7251] + initiate_validator_exit(state, ValidatorIndex(index)) + elif is_eligible_for_activation(state, validator): + validator.activation_epoch = activation_epoch + + +- name: process_rewards_and_penalties#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs + search: pub fn process_rewards_and_penalties< + spec: | + + def process_rewards_and_penalties(state: BeaconState) -> None: + # No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch + if get_current_epoch(state) == GENESIS_EPOCH: + return + + rewards, penalties = get_attestation_deltas(state) + for index in range(len(state.validators)): + increase_balance(state, ValidatorIndex(index), rewards[index]) + decrease_balance(state, ValidatorIndex(index), penalties[index]) + + +- name: process_rewards_and_penalties#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs + search: pub fn process_rewards_and_penalties_slow< + spec: | + + def process_rewards_and_penalties(state: BeaconState) -> None: + # No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch + if get_current_epoch(state) == GENESIS_EPOCH: + return + + flag_deltas = [ + get_flag_index_deltas(state, flag_index) + for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS)) + ] + deltas = flag_deltas + [get_inactivity_penalty_deltas(state)] + for rewards, penalties in deltas: + for index in range(len(state.validators)): + increase_balance(state, ValidatorIndex(index), rewards[index]) + decrease_balance(state, ValidatorIndex(index), penalties[index]) + + +- name: process_slashings#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/slashings.rs + search: pub fn process_slashings< + spec: | + + def process_slashings(state: BeaconState) -> None: + epoch = get_current_epoch(state) + total_balance = get_total_active_balance(state) + adjusted_total_slashing_balance = min( + sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance + ) + for index, validator in enumerate(state.validators): + if ( + validator.slashed + and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch + ): + increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow + penalty_numerator = ( + validator.effective_balance // increment * adjusted_total_slashing_balance + ) + penalty = penalty_numerator // total_balance * increment + decrease_balance(state, ValidatorIndex(index), penalty) + + +- name: process_slashings#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing/slashings.rs + search: pub fn process_slashings< + spec: | + + def process_slashings(state: BeaconState) -> None: + epoch = get_current_epoch(state) + total_balance = get_total_active_balance(state) + adjusted_total_slashing_balance = min( + sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance + ) + for index, validator in enumerate(state.validators): + if ( + validator.slashed + and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch + ): + increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow + penalty_numerator = ( + validator.effective_balance // increment * adjusted_total_slashing_balance + ) + penalty = penalty_numerator // total_balance * increment + decrease_balance(state, ValidatorIndex(index), penalty) + + +- name: process_slashings#bellatrix + sources: + - file: consensus/state_processing/src/per_epoch_processing/slashings.rs + search: pub fn process_slashings< + spec: | + + def process_slashings(state: BeaconState) -> None: + epoch = get_current_epoch(state) + total_balance = get_total_active_balance(state) + adjusted_total_slashing_balance = min( + sum(state.slashings) + # [Modified in Bellatrix] + * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, + total_balance, + ) + for index, validator in enumerate(state.validators): + if ( + validator.slashed + and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch + ): + increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow + penalty_numerator = ( + validator.effective_balance // increment * adjusted_total_slashing_balance + ) + penalty = penalty_numerator // total_balance * increment + decrease_balance(state, ValidatorIndex(index), penalty) + + +- name: process_slashings#electra + sources: + - file: consensus/state_processing/src/per_epoch_processing/slashings.rs + search: pub fn process_slashings< + spec: | + + def process_slashings(state: BeaconState) -> None: + epoch = get_current_epoch(state) + total_balance = get_total_active_balance(state) + adjusted_total_slashing_balance = min( + sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, total_balance + ) + increment = ( + EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow + ) + penalty_per_effective_balance_increment = adjusted_total_slashing_balance // ( + total_balance // increment + ) + for index, validator in enumerate(state.validators): + if ( + validator.slashed + and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch + ): + effective_balance_increments = validator.effective_balance // increment + # [Modified in Electra:EIP7251] + penalty = penalty_per_effective_balance_increment * effective_balance_increments + decrease_balance(state, ValidatorIndex(index), penalty) + + +- name: process_slashings_reset#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/resets.rs + search: pub fn process_slashings_reset< + spec: | + + def process_slashings_reset(state: BeaconState) -> None: + next_epoch = Epoch(get_current_epoch(state) + 1) + # Reset slashings + state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0) + + +- name: process_slot#phase0 + sources: + - file: consensus/state_processing/src/per_slot_processing.rs + search: pub fn per_slot_processing< + spec: | + + def process_slot(state: BeaconState) -> None: + # Cache state root + previous_state_root = hash_tree_root(state) + state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root + # Cache latest block header state root + if state.latest_block_header.state_root == Bytes32(): + state.latest_block_header.state_root = previous_state_root + # Cache block root + previous_block_root = hash_tree_root(state.latest_block_header) + state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root + + +- name: process_slot#gloas + sources: [] + spec: | + + def process_slot(state: BeaconState) -> None: + # Cache state root + previous_state_root = hash_tree_root(state) + state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root + # Cache latest block header state root + if state.latest_block_header.state_root == Bytes32(): + state.latest_block_header.state_root = previous_state_root + # Cache block root + previous_block_root = hash_tree_root(state.latest_block_header) + state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root + # [New in Gloas:EIP7732] + # Unset the next payload availability + state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0 + + +- name: process_slots#phase0 + sources: [] + spec: | + + def process_slots(state: BeaconState, slot: Slot) -> None: + assert state.slot < slot + while state.slot < slot: + process_slot(state) + # Process epoch on the start slot of the next epoch + if (state.slot + 1) % SLOTS_PER_EPOCH == 0: + process_epoch(state) + state.slot = Slot(state.slot + 1) + + +- name: process_sync_aggregate#altair + sources: + - file: consensus/state_processing/src/per_block_processing/altair/sync_committee.rs + search: pub fn process_sync_aggregate< + spec: | + + def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None: + # Verify sync committee aggregate signature signing over the previous slot block root + committee_pubkeys = state.current_sync_committee.pubkeys + committee_bits = sync_aggregate.sync_committee_bits + if sum(committee_bits) == SYNC_COMMITTEE_SIZE: + # All members participated - use precomputed aggregate key + participant_pubkeys = [state.current_sync_committee.aggregate_pubkey] + elif sum(committee_bits) > SYNC_COMMITTEE_SIZE // 2: + # More than half participated - subtract non-participant keys. + # First determine nonparticipating members + non_participant_pubkeys = [ + pubkey for pubkey, bit in zip(committee_pubkeys, committee_bits) if not bit + ] + # Compute aggregate of non-participants + non_participant_aggregate = eth_aggregate_pubkeys(non_participant_pubkeys) + # Subtract non-participants from the full aggregate + # This is equivalent to: aggregate_pubkey + (-non_participant_aggregate) + participant_pubkey = bls.add( + bls.bytes48_to_G1(state.current_sync_committee.aggregate_pubkey), + bls.neg(bls.bytes48_to_G1(non_participant_aggregate)), + ) + participant_pubkeys = [BLSPubkey(bls.G1_to_bytes48(participant_pubkey))] + else: + # Less than half participated - aggregate participant keys + participant_pubkeys = [ + pubkey + for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) + if bit + ] + previous_slot = max(state.slot, Slot(1)) - Slot(1) + domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) + signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) + # Note: eth_fast_aggregate_verify works with a singleton list containing an aggregated key + assert eth_fast_aggregate_verify( + participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature + ) + + # Compute participant and proposer rewards + total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT + total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments) + max_participant_rewards = Gwei( + total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH + ) + participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE) + proposer_reward = Gwei( + participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) + ) + + # Apply participant and proposer rewards + all_pubkeys = [v.pubkey for v in state.validators] + committee_indices = [ + ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys + ] + for participant_index, participation_bit in zip( + committee_indices, sync_aggregate.sync_committee_bits + ): + if participation_bit: + increase_balance(state, participant_index, participant_reward) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + else: + decrease_balance(state, participant_index, participant_reward) + + +- name: process_sync_committee_contributions#altair + sources: + - file: consensus/types/src/sync_committee/sync_aggregate.rs + search: pub fn from_contributions( + spec: | + + def process_sync_committee_contributions( + block: BeaconBlock, contributions: Set[SyncCommitteeContribution] + ) -> None: + sync_aggregate = SyncAggregate() + signatures = [] + sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT + + for contribution in contributions: + subcommittee_index = contribution.subcommittee_index + for index, participated in enumerate(contribution.aggregation_bits): + if participated: + participant_index = sync_subcommittee_size * subcommittee_index + index + sync_aggregate.sync_committee_bits[participant_index] = True + signatures.append(contribution.signature) + + sync_aggregate.sync_committee_signature = bls.Aggregate(signatures) + + block.body.sync_aggregate = sync_aggregate + + +- name: process_sync_committee_updates#altair + sources: + - file: consensus/state_processing/src/per_epoch_processing/altair/sync_committee_updates.rs + search: pub fn process_sync_committee_updates< + spec: | + + def process_sync_committee_updates(state: BeaconState) -> None: + next_epoch = get_current_epoch(state) + Epoch(1) + if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0: + state.current_sync_committee = state.next_sync_committee + state.next_sync_committee = get_next_sync_committee(state) + + +- name: process_voluntary_exit#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_exits< + spec: | + + def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: + voluntary_exit = signed_voluntary_exit.message + validator = state.validators[voluntary_exit.validator_index] + # Verify the validator is active + assert is_active_validator(validator, get_current_epoch(state)) + # Verify exit has not been initiated + assert validator.exit_epoch == FAR_FUTURE_EPOCH + # Exits must specify an epoch when they become valid; they are not valid before then + assert get_current_epoch(state) >= voluntary_exit.epoch + # Verify the validator has been active long enough + assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD + # Verify signature + domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) + signing_root = compute_signing_root(voluntary_exit, domain) + assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) + # Initiate exit + initiate_validator_exit(state, voluntary_exit.validator_index) + + +- name: process_voluntary_exit#deneb + sources: [] + spec: | + + def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: + voluntary_exit = signed_voluntary_exit.message + validator = state.validators[voluntary_exit.validator_index] + # Verify the validator is active + assert is_active_validator(validator, get_current_epoch(state)) + # Verify exit has not been initiated + assert validator.exit_epoch == FAR_FUTURE_EPOCH + # Exits must specify an epoch when they become valid; they are not valid before then + assert get_current_epoch(state) >= voluntary_exit.epoch + # Verify the validator has been active long enough + assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD + # Verify signature + # [Modified in Deneb:EIP7044] + domain = compute_domain( + DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root + ) + signing_root = compute_signing_root(voluntary_exit, domain) + assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) + # Initiate exit + initiate_validator_exit(state, voluntary_exit.validator_index) + + +- name: process_voluntary_exit#electra + sources: [] + spec: | + + def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: + voluntary_exit = signed_voluntary_exit.message + validator = state.validators[voluntary_exit.validator_index] + # Verify the validator is active + assert is_active_validator(validator, get_current_epoch(state)) + # Verify exit has not been initiated + assert validator.exit_epoch == FAR_FUTURE_EPOCH + # Exits must specify an epoch when they become valid; they are not valid before then + assert get_current_epoch(state) >= voluntary_exit.epoch + # Verify the validator has been active long enough + assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD + # [New in Electra:EIP7251] + # Only exit validator if it has no pending withdrawals in the queue + assert get_pending_balance_to_withdraw(state, voluntary_exit.validator_index) == 0 + # Verify signature + domain = compute_domain( + DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root + ) + signing_root = compute_signing_root(voluntary_exit, domain) + assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) + # Initiate exit + initiate_validator_exit(state, voluntary_exit.validator_index) + + +- name: process_withdrawal_request#electra + sources: + - file: consensus/state_processing/src/per_block_processing/process_operations.rs + search: pub fn process_withdrawal_requests< + spec: | + + def process_withdrawal_request(state: BeaconState, withdrawal_request: WithdrawalRequest) -> None: + amount = withdrawal_request.amount + is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT + + # If partial withdrawal queue is full, only full exits are processed + if ( + len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT + and not is_full_exit_request + ): + return + + validator_pubkeys = [v.pubkey for v in state.validators] + # Verify pubkey exists + request_pubkey = withdrawal_request.validator_pubkey + if request_pubkey not in validator_pubkeys: + return + index = ValidatorIndex(validator_pubkeys.index(request_pubkey)) + validator = state.validators[index] + + # Verify withdrawal credentials + has_correct_credential = has_execution_withdrawal_credential(validator) + is_correct_source_address = ( + validator.withdrawal_credentials[12:] == withdrawal_request.source_address + ) + if not (has_correct_credential and is_correct_source_address): + return + # Verify the validator is active + if not is_active_validator(validator, get_current_epoch(state)): + return + # Verify exit has not been initiated + if validator.exit_epoch != FAR_FUTURE_EPOCH: + return + # Verify the validator has been active long enough + if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD: + return + + pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index) + + if is_full_exit_request: + # Only exit validator if it has no pending withdrawals in the queue + if pending_balance_to_withdraw == 0: + initiate_validator_exit(state, index) + return + + has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE + has_excess_balance = ( + state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw + ) + + # Only allow partial withdrawals with compounding withdrawal credentials + if ( + has_compounding_withdrawal_credential(validator) + and has_sufficient_effective_balance + and has_excess_balance + ): + to_withdraw = min( + state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw, amount + ) + exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw) + withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) + state.pending_partial_withdrawals.append( + PendingPartialWithdrawal( + validator_index=index, + amount=to_withdraw, + withdrawable_epoch=withdrawable_epoch, + ) + ) + + +- name: process_withdrawals#capella + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_withdrawals< + spec: | + + def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None: + expected_withdrawals = get_expected_withdrawals(state) + assert payload.withdrawals == expected_withdrawals + + for withdrawal in expected_withdrawals: + decrease_balance(state, withdrawal.validator_index, withdrawal.amount) + + # Update the next withdrawal index if this block contained withdrawals + if len(expected_withdrawals) != 0: + latest_withdrawal = expected_withdrawals[-1] + state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1) + + # Update the next validator index to start the next withdrawal sweep + if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + # Next sweep starts after the latest withdrawal's validator index + next_validator_index = ValidatorIndex( + (expected_withdrawals[-1].validator_index + 1) % len(state.validators) + ) + state.next_withdrawal_validator_index = next_validator_index + else: + # Advance sweep by the max length of the sweep if there was not a full set of withdrawals + next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP + next_validator_index = ValidatorIndex(next_index % len(state.validators)) + state.next_withdrawal_validator_index = next_validator_index + + +- name: process_withdrawals#electra + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn process_withdrawals< + spec: | + + def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None: + # [Modified in Electra:EIP7251] + expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) + + assert payload.withdrawals == expected_withdrawals + + for withdrawal in expected_withdrawals: + decrease_balance(state, withdrawal.validator_index, withdrawal.amount) + + # [New in Electra:EIP7251] + # Update pending partial withdrawals + state.pending_partial_withdrawals = state.pending_partial_withdrawals[ + processed_partial_withdrawals_count: + ] + + # Update the next withdrawal index if this block contained withdrawals + if len(expected_withdrawals) != 0: + latest_withdrawal = expected_withdrawals[-1] + state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1) + + # Update the next validator index to start the next withdrawal sweep + if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + # Next sweep starts after the latest withdrawal's validator index + next_validator_index = ValidatorIndex( + (expected_withdrawals[-1].validator_index + 1) % len(state.validators) + ) + state.next_withdrawal_validator_index = next_validator_index + else: + # Advance sweep by the max length of the sweep if there was not a full set of withdrawals + next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP + next_validator_index = ValidatorIndex(next_index % len(state.validators)) + state.next_withdrawal_validator_index = next_validator_index + + +- name: process_withdrawals#gloas + sources: [] + spec: | + + def process_withdrawals( + state: BeaconState, + # [Modified in Gloas:EIP7732] + # Removed `payload` + ) -> None: + # [New in Gloas:EIP7732] + # Return early if the parent block is empty + if not is_parent_block_full(state): + return + + # [Modified in Gloas:EIP7732] + # Get information about the expected withdrawals + withdrawals, processed_builder_withdrawals_count, processed_partial_withdrawals_count = ( + get_expected_withdrawals(state) + ) + withdrawals_list = List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD](withdrawals) + state.latest_withdrawals_root = hash_tree_root(withdrawals_list) + for withdrawal in withdrawals: + decrease_balance(state, withdrawal.validator_index, withdrawal.amount) + + # [New in Gloas:EIP7732] + # Update the pending builder withdrawals + state.builder_pending_withdrawals = [ + w + for w in state.builder_pending_withdrawals[:processed_builder_withdrawals_count] + if not is_builder_payment_withdrawable(state, w) + ] + state.builder_pending_withdrawals[processed_builder_withdrawals_count:] + + # Update pending partial withdrawals + state.pending_partial_withdrawals = state.pending_partial_withdrawals[ + processed_partial_withdrawals_count: + ] + + # Update the next withdrawal index if this block contained withdrawals + if len(withdrawals) != 0: + latest_withdrawal = withdrawals[-1] + state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1) + + # Update the next validator index to start the next withdrawal sweep + if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: + # Next sweep starts after the latest withdrawal's validator index + next_validator_index = ValidatorIndex( + (withdrawals[-1].validator_index + 1) % len(state.validators) + ) + state.next_withdrawal_validator_index = next_validator_index + else: + # Advance sweep by the max length of the sweep if there was not a full set of withdrawals + next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP + next_validator_index = ValidatorIndex(next_index % len(state.validators)) + state.next_withdrawal_validator_index = next_validator_index + + +- name: queue_excess_active_balance#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn queue_excess_active_balance( + spec: | + + def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None: + balance = state.balances[index] + if balance > MIN_ACTIVATION_BALANCE: + excess_balance = balance - MIN_ACTIVATION_BALANCE + state.balances[index] = MIN_ACTIVATION_BALANCE + validator = state.validators[index] + # Use bls.G2_POINT_AT_INFINITY as a signature field placeholder + # and GENESIS_SLOT to distinguish from a pending deposit request + state.pending_deposits.append( + PendingDeposit( + pubkey=validator.pubkey, + withdrawal_credentials=validator.withdrawal_credentials, + amount=excess_balance, + signature=bls.G2_POINT_AT_INFINITY, + slot=GENESIS_SLOT, + ) + ) + + +- name: recover_cells_and_kzg_proofs#fulu + sources: [] + spec: | + + def recover_cells_and_kzg_proofs( + cell_indices: Sequence[CellIndex], cells: Sequence[Cell] + ) -> Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]: + """ + Given at least 50% of cells for a blob, recover all the cells/proofs. + This algorithm uses FFTs to recover cells faster than using Lagrange + implementation, as can be seen here: + https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039 + + A faster version thanks to Qi Zhou can be found here: + https://github.com/ethereum/research/blob/51b530a53bd4147d123ab3e390a9d08605c2cdb8/polynomial_reconstruction/polynomial_reconstruction_danksharding.py + + Public method. + """ + # Check we have the same number of cells and indices + assert len(cell_indices) == len(cells) + # Check we have enough cells to be able to perform the reconstruction + assert CELLS_PER_EXT_BLOB // 2 <= len(cell_indices) <= CELLS_PER_EXT_BLOB + # Check for duplicates + assert len(cell_indices) == len(set(cell_indices)) + # Check that indices are in ascending order + assert cell_indices == sorted(cell_indices) + # Check that the cell indices are within bounds + for cell_index in cell_indices: + assert cell_index < CELLS_PER_EXT_BLOB + # Check that each cell is the correct length + for cell in cells: + assert len(cell) == BYTES_PER_CELL + + # Convert cells to coset evaluations + cosets_evals = [cell_to_coset_evals(cell) for cell in cells] + + # Given the coset evaluations, recover the polynomial in coefficient form + polynomial_coeff = recover_polynomialcoeff(cell_indices, cosets_evals) + + # Recompute all cells/proofs + return compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff) + + +- name: recover_matrix#fulu + sources: + - file: beacon_node/beacon_chain/src/data_availability_checker.rs + search: pub fn reconstruct_data_columns( + spec: | + + def recover_matrix( + partial_matrix: Sequence[MatrixEntry], blob_count: uint64 + ) -> Sequence[MatrixEntry]: + """ + Recover the full, flattened sequence of matrix entries. + + This helper demonstrates how to apply ``recover_cells_and_kzg_proofs``. + The data structure for storing cells/proofs is implementation-dependent. + """ + matrix = [] + for blob_index in range(blob_count): + cell_indices = [e.column_index for e in partial_matrix if e.row_index == blob_index] + cells = [e.cell for e in partial_matrix if e.row_index == blob_index] + recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells) + for cell_index, (cell, proof) in enumerate(zip(recovered_cells, recovered_proofs)): + matrix.append( + MatrixEntry( + cell=cell, + kzg_proof=proof, + row_index=blob_index, + column_index=cell_index, + ) + ) + return matrix + + +- name: recover_polynomialcoeff#fulu + sources: [] + spec: | + + def recover_polynomialcoeff( + cell_indices: Sequence[CellIndex], cosets_evals: Sequence[CosetEvals] + ) -> PolynomialCoeff: + """ + Recover the polynomial in coefficient form that when evaluated at the roots of unity will give the extended blob. + """ + # Get the extended domain. This will be referred to as the FFT domain. + roots_of_unity_extended = compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB) + + # Flatten the cosets evaluations. + # If a cell is missing, then its evaluation is zero. + # We let E(x) be a polynomial of degree FIELD_ELEMENTS_PER_EXT_BLOB - 1 + # that interpolates the evaluations including the zeros for missing ones. + extended_evaluation_rbo = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_EXT_BLOB + for cell_index, cell in zip(cell_indices, cosets_evals): + start = cell_index * FIELD_ELEMENTS_PER_CELL + end = (cell_index + 1) * FIELD_ELEMENTS_PER_CELL + extended_evaluation_rbo[start:end] = cell + extended_evaluation = bit_reversal_permutation(extended_evaluation_rbo) + + # Compute the vanishing polynomial Z(x) in coefficient form. + # Z(x) is the polynomial which vanishes on all of the evaluations which are missing. + missing_cell_indices = [ + CellIndex(cell_index) + for cell_index in range(CELLS_PER_EXT_BLOB) + if cell_index not in cell_indices + ] + zero_poly_coeff = construct_vanishing_polynomial(missing_cell_indices) + + # Convert Z(x) to evaluation form over the FFT domain + zero_poly_eval = fft_field(zero_poly_coeff, roots_of_unity_extended) + + # Compute (E*Z)(x) = E(x) * Z(x) in evaluation form over the FFT domain + # Note: over the FFT domain, the polynomials (E*Z)(x) and (P*Z)(x) agree, where + # P(x) is the polynomial we want to reconstruct (degree FIELD_ELEMENTS_PER_BLOB - 1). + extended_evaluation_times_zero = [a * b for a, b in zip(zero_poly_eval, extended_evaluation)] + + # We know that (E*Z)(x) and (P*Z)(x) agree over the FFT domain, + # and we know that (P*Z)(x) has degree at most FIELD_ELEMENTS_PER_EXT_BLOB - 1. + # Thus, an inverse FFT of the evaluations of (E*Z)(x) (= evaluations of (P*Z)(x)) + # yields the coefficient form of (P*Z)(x). + extended_evaluation_times_zero_coeffs = fft_field( + extended_evaluation_times_zero, roots_of_unity_extended, inv=True + ) + + # Next step is to divide the polynomial (P*Z)(x) by polynomial Z(x) to get P(x). + # We do this in evaluation form over a coset of the FFT domain to avoid division by 0. + + # Convert (P*Z)(x) to evaluation form over a coset of the FFT domain + extended_evaluations_over_coset = coset_fft_field( + extended_evaluation_times_zero_coeffs, roots_of_unity_extended + ) + + # Convert Z(x) to evaluation form over a coset of the FFT domain + zero_poly_over_coset = coset_fft_field(zero_poly_coeff, roots_of_unity_extended) + + # Compute P(x) = (P*Z)(x) / Z(x) in evaluation form over a coset of the FFT domain + reconstructed_poly_over_coset = [ + a / b for a, b in zip(extended_evaluations_over_coset, zero_poly_over_coset) + ] + + # Convert P(x) to coefficient form + reconstructed_poly_coeff = coset_fft_field( + reconstructed_poly_over_coset, roots_of_unity_extended, inv=True + ) + + return PolynomialCoeff(reconstructed_poly_coeff[:FIELD_ELEMENTS_PER_BLOB]) + + +- name: reverse_bits#deneb + sources: [] + spec: | + + def reverse_bits(n: int, order: int) -> int: + """ + Reverse the bit order of an integer ``n``. + """ + assert is_power_of_two(order) + # Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order + return int(("{:0" + str(order.bit_length() - 1) + "b}").format(n)[::-1], 2) + + +- name: saturating_sub#phase0 + sources: + - file: consensus/types/src/core/slot_epoch_macros.rs + search: pub fn saturating_sub< + spec: | + + def saturating_sub(a: int, b: int) -> int: + """ + Computes a - b, saturating at numeric bounds. + """ + return a - b if a > b else 0 + + +- name: seconds_to_milliseconds#phase0 + sources: [] + spec: | + + def seconds_to_milliseconds(seconds: uint64) -> uint64: + """ + Convert seconds to milliseconds with overflow protection. + Returns ``UINT64_MAX`` if the result would overflow. + """ + if seconds > UINT64_MAX // 1000: + return UINT64_MAX + return seconds * 1000 + + +- name: set_or_append_list#altair + sources: [] + spec: | + + def set_or_append_list(list: List, index: ValidatorIndex, value: Any) -> None: + if index == len(list): + list.append(value) + else: + list[index] = value + + +- name: should_extend_payload#gloas + sources: [] + spec: | + + def should_extend_payload(store: Store, root: Root) -> bool: + proposer_root = store.proposer_boost_root + return ( + is_payload_timely(store, root) + or proposer_root == Root() + or store.blocks[proposer_root].parent_root != root + or is_parent_node_full(store, store.blocks[proposer_root]) + ) + + +- name: should_override_forkchoice_update#bellatrix + sources: [] + spec: | + + def should_override_forkchoice_update(store: Store, head_root: Root) -> bool: + head_block = store.blocks[head_root] + parent_root = head_block.parent_root + parent_block = store.blocks[parent_root] + current_slot = get_current_slot(store) + proposal_slot = head_block.slot + Slot(1) + + # Only re-org the head_block block if it arrived later than the attestation deadline. + head_late = is_head_late(store, head_root) + + # Shuffling stable. + shuffling_stable = is_shuffling_stable(proposal_slot) + + # FFG information of the new head_block will be competitive with the current head. + ffg_competitive = is_ffg_competitive(store, head_root, parent_root) + + # Do not re-org if the chain is not finalizing with acceptable frequency. + finalization_ok = is_finalization_ok(store, proposal_slot) + + # Only suppress the fork choice update if we are confident that we will propose the next block. + parent_state_advanced = store.block_states[parent_root].copy() + process_slots(parent_state_advanced, proposal_slot) + proposer_index = get_beacon_proposer_index(parent_state_advanced) + proposing_reorg_slot = validator_is_connected(proposer_index) + + # Single slot re-org. + parent_slot_ok = parent_block.slot + 1 == head_block.slot + proposing_on_time = is_proposing_on_time(store) + + # Note that this condition is different from `get_proposer_head` + current_time_ok = head_block.slot == current_slot or ( + proposal_slot == current_slot and proposing_on_time + ) + single_slot_reorg = parent_slot_ok and current_time_ok + + # Check the head weight only if the attestations from the head slot have already been applied. + # Implementations may want to do this in different ways, e.g. by advancing + # `store.time` early, or by counting queued attestations during the head block's slot. + if current_slot > head_block.slot: + head_weak = is_head_weak(store, head_root) + parent_strong = is_parent_strong(store, parent_root) + else: + head_weak = True + parent_strong = True + + return all( + [ + head_late, + shuffling_stable, + ffg_competitive, + finalization_ok, + proposing_reorg_slot, + single_slot_reorg, + head_weak, + parent_strong, + ] + ) + + +- name: slash_validator#phase0 + sources: + - file: consensus/state_processing/src/common/slash_validator.rs + search: pub fn slash_validator< + spec: | + + def slash_validator( + state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None + ) -> None: + """ + Slash the validator with index ``slashed_index``. + """ + epoch = get_current_epoch(state) + initiate_validator_exit(state, slashed_index) + validator = state.validators[slashed_index] + validator.slashed = True + validator.withdrawable_epoch = max( + validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) + ) + state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance + decrease_balance( + state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT + ) + + # Apply proposer and whistleblower rewards + proposer_index = get_beacon_proposer_index(state) + if whistleblower_index is None: + whistleblower_index = proposer_index + whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) + proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT) + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) + + +- name: slash_validator#altair + sources: + - file: consensus/state_processing/src/common/slash_validator.rs + search: pub fn slash_validator< + spec: | + + def slash_validator( + state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None + ) -> None: + """ + Slash the validator with index ``slashed_index``. + """ + epoch = get_current_epoch(state) + initiate_validator_exit(state, slashed_index) + validator = state.validators[slashed_index] + validator.slashed = True + validator.withdrawable_epoch = max( + validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) + ) + state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance + decrease_balance( + state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR + ) + + # Apply proposer and whistleblower rewards + proposer_index = get_beacon_proposer_index(state) + if whistleblower_index is None: + whistleblower_index = proposer_index + whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) + proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR) + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) + + +- name: slash_validator#bellatrix + sources: + - file: consensus/state_processing/src/common/slash_validator.rs + search: pub fn slash_validator< + spec: | + + def slash_validator( + state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None + ) -> None: + """ + Slash the validator with index ``slashed_index``. + """ + epoch = get_current_epoch(state) + initiate_validator_exit(state, slashed_index) + validator = state.validators[slashed_index] + validator.slashed = True + validator.withdrawable_epoch = max( + validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) + ) + state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance + # [Modified in Bellatrix] + slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX + decrease_balance(state, slashed_index, slashing_penalty) + + # Apply proposer and whistleblower rewards + proposer_index = get_beacon_proposer_index(state) + if whistleblower_index is None: + whistleblower_index = proposer_index + whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) + proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR) + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) + + +- name: slash_validator#electra + sources: + - file: consensus/state_processing/src/common/slash_validator.rs + search: pub fn slash_validator< + spec: | + + def slash_validator( + state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None + ) -> None: + """ + Slash the validator with index ``slashed_index``. + """ + epoch = get_current_epoch(state) + initiate_validator_exit(state, slashed_index) + validator = state.validators[slashed_index] + validator.slashed = True + validator.withdrawable_epoch = max( + validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) + ) + state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance + # [Modified in Electra:EIP7251] + slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA + decrease_balance(state, slashed_index, slashing_penalty) + + # Apply proposer and whistleblower rewards + proposer_index = get_beacon_proposer_index(state) + if whistleblower_index is None: + whistleblower_index = proposer_index + # [Modified in Electra:EIP7251] + whistleblower_reward = Gwei( + validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA + ) + proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR) + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) + + +- name: state_transition#phase0 + sources: [] + spec: | + + def state_transition( + state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool = True + ) -> None: + block = signed_block.message + # Process slots (including those with no blocks) since block + process_slots(state, block.slot) + # Verify signature + if validate_result: + assert verify_block_signature(state, signed_block) + # Process block + process_block(state, block) + # Verify state root + if validate_result: + assert block.state_root == hash_tree_root(state) + + +- name: store_target_checkpoint_state#phase0 + sources: [] + spec: | + + def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: + # Store target checkpoint state if not yet seen + if target not in store.checkpoint_states: + base_state = copy(store.block_states[target.root]) + if base_state.slot < compute_start_slot_at_epoch(target.epoch): + process_slots(base_state, compute_start_slot_at_epoch(target.epoch)) + store.checkpoint_states[target] = base_state + + +- name: switch_to_compounding_validator#electra + sources: + - file: consensus/types/src/state/beacon_state.rs + search: pub fn switch_to_compounding_validator( + spec: | + + def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None: + validator = state.validators[index] + validator.withdrawal_credentials = ( + COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:] + ) + queue_excess_active_balance(state, index) + + +- name: translate_participation#altair + sources: + - file: consensus/state_processing/src/upgrade/altair.rs + search: pub fn translate_participation< + spec: | + + def translate_participation( + state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation] + ) -> None: + for attestation in pending_attestations: + data = attestation.data + inclusion_delay = attestation.inclusion_delay + # Translate attestation inclusion info to flag indices + participation_flag_indices = get_attestation_participation_flag_indices( + state, data, inclusion_delay + ) + + # Apply flags to all attesting validators + epoch_participation = state.previous_epoch_participation + for index in get_attesting_indices(state, attestation): + for flag_index in participation_flag_indices: + epoch_participation[index] = add_flag(epoch_participation[index], flag_index) + + +- name: update_checkpoints#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: fn update_checkpoints( + spec: | + + def update_checkpoints( + store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint + ) -> None: + """ + Update checkpoints in store if necessary + """ + # Update justified checkpoint + if justified_checkpoint.epoch > store.justified_checkpoint.epoch: + store.justified_checkpoint = justified_checkpoint + + # Update finalized checkpoint + if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: + store.finalized_checkpoint = finalized_checkpoint + + +- name: update_latest_messages#phase0 + sources: [] + spec: | + + def update_latest_messages( + store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation + ) -> None: + target = attestation.data.target + beacon_block_root = attestation.data.beacon_block_root + non_equivocating_attesting_indices = [ + i for i in attesting_indices if i not in store.equivocating_indices + ] + for i in non_equivocating_attesting_indices: + if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch: + store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root) + + +- name: update_latest_messages#gloas + sources: [] + spec: | + + def update_latest_messages( + store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation + ) -> None: + slot = attestation.data.slot + beacon_block_root = attestation.data.beacon_block_root + payload_present = attestation.data.index == 1 + non_equivocating_attesting_indices = [ + i for i in attesting_indices if i not in store.equivocating_indices + ] + for i in non_equivocating_attesting_indices: + if i not in store.latest_messages or slot > store.latest_messages[i].slot: + store.latest_messages[i] = LatestMessage( + slot=slot, root=beacon_block_root, payload_present=payload_present + ) + + +- name: update_unrealized_checkpoints#phase0 + sources: [] + spec: | + + def update_unrealized_checkpoints( + store: Store, + unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint, + ) -> None: + """ + Update unrealized checkpoints in store if necessary + """ + # Update unrealized justified checkpoint + if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch: + store.unrealized_justified_checkpoint = unrealized_justified_checkpoint + + # Update unrealized finalized checkpoint + if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch: + store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint + + +- name: upgrade_lc_bootstrap_to_capella#capella + sources: [] + spec: | + + def upgrade_lc_bootstrap_to_capella(pre: altair.LightClientBootstrap) -> LightClientBootstrap: + return LightClientBootstrap( + header=upgrade_lc_header_to_capella(pre.header), + current_sync_committee=pre.current_sync_committee, + current_sync_committee_branch=pre.current_sync_committee_branch, + ) + + +- name: upgrade_lc_bootstrap_to_deneb#deneb + sources: [] + spec: | + + def upgrade_lc_bootstrap_to_deneb(pre: capella.LightClientBootstrap) -> LightClientBootstrap: + return LightClientBootstrap( + header=upgrade_lc_header_to_deneb(pre.header), + current_sync_committee=pre.current_sync_committee, + current_sync_committee_branch=pre.current_sync_committee_branch, + ) + + +- name: upgrade_lc_bootstrap_to_electra#electra + sources: [] + spec: | + + def upgrade_lc_bootstrap_to_electra(pre: deneb.LightClientBootstrap) -> LightClientBootstrap: + return LightClientBootstrap( + header=upgrade_lc_header_to_electra(pre.header), + current_sync_committee=pre.current_sync_committee, + current_sync_committee_branch=normalize_merkle_branch( + pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA + ), + ) + + +- name: upgrade_lc_finality_update_to_capella#capella + sources: [] + spec: | + + def upgrade_lc_finality_update_to_capella( + pre: altair.LightClientFinalityUpdate, + ) -> LightClientFinalityUpdate: + return LightClientFinalityUpdate( + attested_header=upgrade_lc_header_to_capella(pre.attested_header), + finalized_header=upgrade_lc_header_to_capella(pre.finalized_header), + finality_branch=pre.finality_branch, + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_finality_update_to_deneb#deneb + sources: [] + spec: | + + def upgrade_lc_finality_update_to_deneb( + pre: capella.LightClientFinalityUpdate, + ) -> LightClientFinalityUpdate: + return LightClientFinalityUpdate( + attested_header=upgrade_lc_header_to_deneb(pre.attested_header), + finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), + finality_branch=pre.finality_branch, + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_finality_update_to_electra#electra + sources: [] + spec: | + + def upgrade_lc_finality_update_to_electra( + pre: deneb.LightClientFinalityUpdate, + ) -> LightClientFinalityUpdate: + return LightClientFinalityUpdate( + attested_header=upgrade_lc_header_to_electra(pre.attested_header), + finalized_header=upgrade_lc_header_to_electra(pre.finalized_header), + finality_branch=normalize_merkle_branch(pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA), + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_header_to_capella#capella + sources: [] + spec: | + + def upgrade_lc_header_to_capella(pre: altair.LightClientHeader) -> LightClientHeader: + return LightClientHeader( + beacon=pre.beacon, + execution=ExecutionPayloadHeader(), + execution_branch=ExecutionBranch(), + ) + + +- name: upgrade_lc_header_to_deneb#deneb + sources: [] + spec: | + + def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHeader: + return LightClientHeader( + beacon=pre.beacon, + execution=ExecutionPayloadHeader( + parent_hash=pre.execution.parent_hash, + fee_recipient=pre.execution.fee_recipient, + state_root=pre.execution.state_root, + receipts_root=pre.execution.receipts_root, + logs_bloom=pre.execution.logs_bloom, + prev_randao=pre.execution.prev_randao, + block_number=pre.execution.block_number, + gas_limit=pre.execution.gas_limit, + gas_used=pre.execution.gas_used, + timestamp=pre.execution.timestamp, + extra_data=pre.execution.extra_data, + base_fee_per_gas=pre.execution.base_fee_per_gas, + block_hash=pre.execution.block_hash, + transactions_root=pre.execution.transactions_root, + withdrawals_root=pre.execution.withdrawals_root, + # [New in Deneb:EIP4844] + blob_gas_used=uint64(0), + # [New in Deneb:EIP4844] + excess_blob_gas=uint64(0), + ), + execution_branch=pre.execution_branch, + ) + + +- name: upgrade_lc_header_to_electra#electra + sources: [] + spec: | + + def upgrade_lc_header_to_electra(pre: deneb.LightClientHeader) -> LightClientHeader: + return LightClientHeader( + beacon=pre.beacon, + execution=pre.execution, + execution_branch=pre.execution_branch, + ) + + +- name: upgrade_lc_optimistic_update_to_capella#capella + sources: [] + spec: | + + def upgrade_lc_optimistic_update_to_capella( + pre: altair.LightClientOptimisticUpdate, + ) -> LightClientOptimisticUpdate: + return LightClientOptimisticUpdate( + attested_header=upgrade_lc_header_to_capella(pre.attested_header), + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_optimistic_update_to_deneb#deneb + sources: [] + spec: | + + def upgrade_lc_optimistic_update_to_deneb( + pre: capella.LightClientOptimisticUpdate, + ) -> LightClientOptimisticUpdate: + return LightClientOptimisticUpdate( + attested_header=upgrade_lc_header_to_deneb(pre.attested_header), + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_optimistic_update_to_electra#electra + sources: [] + spec: | + + def upgrade_lc_optimistic_update_to_electra( + pre: deneb.LightClientOptimisticUpdate, + ) -> LightClientOptimisticUpdate: + return LightClientOptimisticUpdate( + attested_header=upgrade_lc_header_to_electra(pre.attested_header), + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_store_to_capella#capella + sources: [] + spec: | + + def upgrade_lc_store_to_capella(pre: altair.LightClientStore) -> LightClientStore: + if pre.best_valid_update is None: + best_valid_update = None + else: + best_valid_update = upgrade_lc_update_to_capella(pre.best_valid_update) + return LightClientStore( + finalized_header=upgrade_lc_header_to_capella(pre.finalized_header), + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + best_valid_update=best_valid_update, + optimistic_header=upgrade_lc_header_to_capella(pre.optimistic_header), + previous_max_active_participants=pre.previous_max_active_participants, + current_max_active_participants=pre.current_max_active_participants, + ) + + +- name: upgrade_lc_store_to_deneb#deneb + sources: [] + spec: | + + def upgrade_lc_store_to_deneb(pre: capella.LightClientStore) -> LightClientStore: + if pre.best_valid_update is None: + best_valid_update = None + else: + best_valid_update = upgrade_lc_update_to_deneb(pre.best_valid_update) + return LightClientStore( + finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + best_valid_update=best_valid_update, + optimistic_header=upgrade_lc_header_to_deneb(pre.optimistic_header), + previous_max_active_participants=pre.previous_max_active_participants, + current_max_active_participants=pre.current_max_active_participants, + ) + + +- name: upgrade_lc_store_to_electra#electra + sources: [] + spec: | + + def upgrade_lc_store_to_electra(pre: deneb.LightClientStore) -> LightClientStore: + if pre.best_valid_update is None: + best_valid_update = None + else: + best_valid_update = upgrade_lc_update_to_electra(pre.best_valid_update) + return LightClientStore( + finalized_header=upgrade_lc_header_to_electra(pre.finalized_header), + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + best_valid_update=best_valid_update, + optimistic_header=upgrade_lc_header_to_electra(pre.optimistic_header), + previous_max_active_participants=pre.previous_max_active_participants, + current_max_active_participants=pre.current_max_active_participants, + ) + + +- name: upgrade_lc_update_to_capella#capella + sources: [] + spec: | + + def upgrade_lc_update_to_capella(pre: altair.LightClientUpdate) -> LightClientUpdate: + return LightClientUpdate( + attested_header=upgrade_lc_header_to_capella(pre.attested_header), + next_sync_committee=pre.next_sync_committee, + next_sync_committee_branch=pre.next_sync_committee_branch, + finalized_header=upgrade_lc_header_to_capella(pre.finalized_header), + finality_branch=pre.finality_branch, + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_update_to_deneb#deneb + sources: [] + spec: | + + def upgrade_lc_update_to_deneb(pre: capella.LightClientUpdate) -> LightClientUpdate: + return LightClientUpdate( + attested_header=upgrade_lc_header_to_deneb(pre.attested_header), + next_sync_committee=pre.next_sync_committee, + next_sync_committee_branch=pre.next_sync_committee_branch, + finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), + finality_branch=pre.finality_branch, + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_lc_update_to_electra#electra + sources: [] + spec: | + + def upgrade_lc_update_to_electra(pre: deneb.LightClientUpdate) -> LightClientUpdate: + return LightClientUpdate( + attested_header=upgrade_lc_header_to_electra(pre.attested_header), + next_sync_committee=pre.next_sync_committee, + next_sync_committee_branch=normalize_merkle_branch( + pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA + ), + finalized_header=upgrade_lc_header_to_electra(pre.finalized_header), + finality_branch=normalize_merkle_branch(pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA), + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) + + +- name: upgrade_to_altair#altair + sources: + - file: consensus/state_processing/src/upgrade/altair.rs + search: pub fn upgrade_to_altair< + spec: | + + def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState: + epoch = phase0.get_current_epoch(pre) + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + current_version=ALTAIR_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=[ + ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators)) + ], + current_epoch_participation=[ + ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators)) + ], + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], + ) + # Fill in previous epoch participation from the pre state's pending attestations + translate_participation(post, pre.previous_epoch_attestations) + + # Fill in sync committees + # Note: A duplicate committee is assigned for the current and next committee at the fork boundary + post.current_sync_committee = get_next_sync_committee(post) + post.next_sync_committee = get_next_sync_committee(post) + return post + + +- name: upgrade_to_bellatrix#bellatrix + sources: + - file: consensus/state_processing/src/upgrade/bellatrix.rs + search: pub fn upgrade_to_bellatrix< + spec: | + + def upgrade_to_bellatrix(pre: altair.BeaconState) -> BeaconState: + epoch = altair.get_current_epoch(pre) + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + # [New in Bellatrix] + current_version=BELLATRIX_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=pre.inactivity_scores, + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + # [New in Bellatrix] + latest_execution_payload_header=ExecutionPayloadHeader(), + ) + + return post + + +- name: upgrade_to_capella#capella + sources: + - file: consensus/state_processing/src/upgrade/capella.rs + search: pub fn upgrade_to_capella< + spec: | + + def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState: + epoch = bellatrix.get_current_epoch(pre) + latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=pre.latest_execution_payload_header.parent_hash, + fee_recipient=pre.latest_execution_payload_header.fee_recipient, + state_root=pre.latest_execution_payload_header.state_root, + receipts_root=pre.latest_execution_payload_header.receipts_root, + logs_bloom=pre.latest_execution_payload_header.logs_bloom, + prev_randao=pre.latest_execution_payload_header.prev_randao, + block_number=pre.latest_execution_payload_header.block_number, + gas_limit=pre.latest_execution_payload_header.gas_limit, + gas_used=pre.latest_execution_payload_header.gas_used, + timestamp=pre.latest_execution_payload_header.timestamp, + extra_data=pre.latest_execution_payload_header.extra_data, + base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, + block_hash=pre.latest_execution_payload_header.block_hash, + transactions_root=pre.latest_execution_payload_header.transactions_root, + # [New in Capella] + withdrawals_root=Root(), + ) + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + current_version=CAPELLA_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=pre.inactivity_scores, + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + latest_execution_payload_header=latest_execution_payload_header, + # [New in Capella] + next_withdrawal_index=WithdrawalIndex(0), + # [New in Capella] + next_withdrawal_validator_index=ValidatorIndex(0), + # [New in Capella] + historical_summaries=List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]([]), + ) + + return post + + +- name: upgrade_to_deneb#deneb + sources: + - file: consensus/state_processing/src/upgrade/deneb.rs + search: pub fn upgrade_to_deneb< + spec: | + + def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: + epoch = capella.get_current_epoch(pre) + latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=pre.latest_execution_payload_header.parent_hash, + fee_recipient=pre.latest_execution_payload_header.fee_recipient, + state_root=pre.latest_execution_payload_header.state_root, + receipts_root=pre.latest_execution_payload_header.receipts_root, + logs_bloom=pre.latest_execution_payload_header.logs_bloom, + prev_randao=pre.latest_execution_payload_header.prev_randao, + block_number=pre.latest_execution_payload_header.block_number, + gas_limit=pre.latest_execution_payload_header.gas_limit, + gas_used=pre.latest_execution_payload_header.gas_used, + timestamp=pre.latest_execution_payload_header.timestamp, + extra_data=pre.latest_execution_payload_header.extra_data, + base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, + block_hash=pre.latest_execution_payload_header.block_hash, + transactions_root=pre.latest_execution_payload_header.transactions_root, + withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, + # [New in Deneb:EIP4844] + blob_gas_used=uint64(0), + # [New in Deneb:EIP4844] + excess_blob_gas=uint64(0), + ) + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + # [Modified in Deneb] + current_version=DENEB_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=pre.inactivity_scores, + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + # [Modified in Deneb:EIP4844] + latest_execution_payload_header=latest_execution_payload_header, + next_withdrawal_index=pre.next_withdrawal_index, + next_withdrawal_validator_index=pre.next_withdrawal_validator_index, + historical_summaries=pre.historical_summaries, + ) + + return post + + +- name: upgrade_to_electra#electra + sources: + - file: consensus/state_processing/src/upgrade/electra.rs + search: pub fn upgrade_to_electra< + spec: | + + def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState: + epoch = deneb.get_current_epoch(pre) + + earliest_exit_epoch = compute_activation_exit_epoch(get_current_epoch(pre)) + for validator in pre.validators: + if validator.exit_epoch != FAR_FUTURE_EPOCH: + if validator.exit_epoch > earliest_exit_epoch: + earliest_exit_epoch = validator.exit_epoch + earliest_exit_epoch += Epoch(1) + + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + # [Modified in Electra] + current_version=ELECTRA_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=pre.inactivity_scores, + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + latest_execution_payload_header=pre.latest_execution_payload_header, + next_withdrawal_index=pre.next_withdrawal_index, + next_withdrawal_validator_index=pre.next_withdrawal_validator_index, + historical_summaries=pre.historical_summaries, + # [New in Electra:EIP6110] + deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX, + # [New in Electra:EIP7251] + deposit_balance_to_consume=0, + # [New in Electra:EIP7251] + exit_balance_to_consume=0, + # [New in Electra:EIP7251] + earliest_exit_epoch=earliest_exit_epoch, + # [New in Electra:EIP7251] + consolidation_balance_to_consume=0, + # [New in Electra:EIP7251] + earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)), + # [New in Electra:EIP7251] + pending_deposits=[], + # [New in Electra:EIP7251] + pending_partial_withdrawals=[], + # [New in Electra:EIP7251] + pending_consolidations=[], + ) + + post.exit_balance_to_consume = get_activation_exit_churn_limit(post) + post.consolidation_balance_to_consume = get_consolidation_churn_limit(post) + + # [New in Electra:EIP7251] + # add validators that are not yet active to pending balance deposits + pre_activation = sorted( + [ + index + for index, validator in enumerate(post.validators) + if validator.activation_epoch == FAR_FUTURE_EPOCH + ], + key=lambda index: (post.validators[index].activation_eligibility_epoch, index), + ) + + for index in pre_activation: + balance = post.balances[index] + post.balances[index] = 0 + validator = post.validators[index] + validator.effective_balance = 0 + validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH + # Use bls.G2_POINT_AT_INFINITY as a signature field placeholder + # and GENESIS_SLOT to distinguish from a pending deposit request + post.pending_deposits.append( + PendingDeposit( + pubkey=validator.pubkey, + withdrawal_credentials=validator.withdrawal_credentials, + amount=balance, + signature=bls.G2_POINT_AT_INFINITY, + slot=GENESIS_SLOT, + ) + ) + + # Ensure early adopters of compounding credentials go through the activation churn + for index, validator in enumerate(post.validators): + if has_compounding_withdrawal_credential(validator): + queue_excess_active_balance(post, ValidatorIndex(index)) + + return post + + +- name: upgrade_to_fulu#fulu + sources: + - file: consensus/state_processing/src/upgrade/fulu.rs + search: pub fn upgrade_to_fulu< + spec: | + + def upgrade_to_fulu(pre: electra.BeaconState) -> BeaconState: + epoch = electra.get_current_epoch(pre) + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + # [Modified in Fulu] + current_version=FULU_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=pre.inactivity_scores, + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + latest_execution_payload_header=pre.latest_execution_payload_header, + next_withdrawal_index=pre.next_withdrawal_index, + next_withdrawal_validator_index=pre.next_withdrawal_validator_index, + historical_summaries=pre.historical_summaries, + deposit_requests_start_index=pre.deposit_requests_start_index, + deposit_balance_to_consume=pre.deposit_balance_to_consume, + exit_balance_to_consume=pre.exit_balance_to_consume, + earliest_exit_epoch=pre.earliest_exit_epoch, + consolidation_balance_to_consume=pre.consolidation_balance_to_consume, + earliest_consolidation_epoch=pre.earliest_consolidation_epoch, + pending_deposits=pre.pending_deposits, + pending_partial_withdrawals=pre.pending_partial_withdrawals, + pending_consolidations=pre.pending_consolidations, + # [New in Fulu:EIP7917] + proposer_lookahead=initialize_proposer_lookahead(pre), + ) + + return post + + +- name: upgrade_to_gloas#gloas + sources: [] + spec: | + + def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState: + epoch = fulu.get_current_epoch(pre) + + post = BeaconState( + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + # [Modified in Gloas:EIP7732] + current_version=GLOAS_FORK_VERSION, + epoch=epoch, + ), + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + validators=pre.validators, + balances=pre.balances, + randao_mixes=pre.randao_mixes, + slashings=pre.slashings, + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + inactivity_scores=pre.inactivity_scores, + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + # [Modified in Gloas:EIP7732] + # Removed `latest_execution_payload_header` + # [New in Gloas:EIP7732] + latest_execution_payload_bid=ExecutionPayloadBid( + block_hash=pre.latest_execution_payload_header.block_hash, + ), + next_withdrawal_index=pre.next_withdrawal_index, + next_withdrawal_validator_index=pre.next_withdrawal_validator_index, + historical_summaries=pre.historical_summaries, + deposit_requests_start_index=pre.deposit_requests_start_index, + deposit_balance_to_consume=pre.deposit_balance_to_consume, + exit_balance_to_consume=pre.exit_balance_to_consume, + earliest_exit_epoch=pre.earliest_exit_epoch, + consolidation_balance_to_consume=pre.consolidation_balance_to_consume, + earliest_consolidation_epoch=pre.earliest_consolidation_epoch, + pending_deposits=pre.pending_deposits, + pending_partial_withdrawals=pre.pending_partial_withdrawals, + pending_consolidations=pre.pending_consolidations, + proposer_lookahead=pre.proposer_lookahead, + # [New in Gloas:EIP7732] + execution_payload_availability=[0b1 for _ in range(SLOTS_PER_HISTORICAL_ROOT)], + # [New in Gloas:EIP7732] + builder_pending_payments=[BuilderPendingPayment() for _ in range(2 * SLOTS_PER_EPOCH)], + # [New in Gloas:EIP7732] + builder_pending_withdrawals=[], + # [New in Gloas:EIP7732] + latest_block_hash=pre.latest_execution_payload_header.block_hash, + # [New in Gloas:EIP7732] + latest_withdrawals_root=Root(), + ) + + return post + + +- name: validate_kzg_g1#deneb + sources: [] + spec: | + + def validate_kzg_g1(b: Bytes48) -> None: + """ + Perform BLS validation required by the types `KZGProof` and `KZGCommitment`. + """ + if b == G1_POINT_AT_INFINITY: + return + + assert bls.KeyValidate(b) + + +- name: validate_light_client_update#altair + sources: [] + spec: | + + def validate_light_client_update( + store: LightClientStore, + update: LightClientUpdate, + current_slot: Slot, + genesis_validators_root: Root, + ) -> None: + # Verify sync committee has sufficient participants + sync_aggregate = update.sync_aggregate + assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS + + # Verify update does not skip a sync committee period + assert is_valid_light_client_header(update.attested_header) + update_attested_slot = update.attested_header.beacon.slot + update_finalized_slot = update.finalized_header.beacon.slot + assert current_slot >= update.signature_slot > update_attested_slot >= update_finalized_slot + store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot) + update_signature_period = compute_sync_committee_period_at_slot(update.signature_slot) + if is_next_sync_committee_known(store): + assert update_signature_period in (store_period, store_period + 1) + else: + assert update_signature_period == store_period + + # Verify update is relevant + update_attested_period = compute_sync_committee_period_at_slot(update_attested_slot) + update_has_next_sync_committee = not is_next_sync_committee_known(store) and ( + is_sync_committee_update(update) and update_attested_period == store_period + ) + assert ( + update_attested_slot > store.finalized_header.beacon.slot or update_has_next_sync_committee + ) + + # Verify that the `finality_branch`, if present, confirms `finalized_header` + # to match the finalized checkpoint root saved in the state of `attested_header`. + # Note that the genesis finalized checkpoint root is represented as a zero hash. + if not is_finality_update(update): + assert update.finalized_header == LightClientHeader() + else: + if update_finalized_slot == GENESIS_SLOT: + assert update.finalized_header == LightClientHeader() + finalized_root = Bytes32() + else: + assert is_valid_light_client_header(update.finalized_header) + finalized_root = hash_tree_root(update.finalized_header.beacon) + assert is_valid_normalized_merkle_branch( + leaf=finalized_root, + branch=update.finality_branch, + gindex=finalized_root_gindex_at_slot(update.attested_header.beacon.slot), + root=update.attested_header.beacon.state_root, + ) + + # Verify that the `next_sync_committee`, if present, actually is the next sync committee saved in the + # state of the `attested_header` + if not is_sync_committee_update(update): + assert update.next_sync_committee == SyncCommittee() + else: + if update_attested_period == store_period and is_next_sync_committee_known(store): + assert update.next_sync_committee == store.next_sync_committee + assert is_valid_normalized_merkle_branch( + leaf=hash_tree_root(update.next_sync_committee), + branch=update.next_sync_committee_branch, + gindex=next_sync_committee_gindex_at_slot(update.attested_header.beacon.slot), + root=update.attested_header.beacon.state_root, + ) + + # Verify sync committee aggregate signature + if update_signature_period == store_period: + sync_committee = store.current_sync_committee + else: + sync_committee = store.next_sync_committee + participant_pubkeys = [ + pubkey + for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) + if bit + ] + fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1) + fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot)) + domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) + signing_root = compute_signing_root(update.attested_header.beacon, domain) + assert bls.FastAggregateVerify( + participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature + ) + + +- name: validate_merge_block#bellatrix + sources: + - file: beacon_node/beacon_chain/src/execution_payload.rs + search: pub async fn validate_merge_block< + spec: | + + def validate_merge_block(block: BeaconBlock) -> None: + """ + Check the parent PoW block of execution payload is a valid terminal PoW block. + + Note: Unavailable PoW block(s) may later become available, + and a client software MAY delay a call to ``validate_merge_block`` + until the PoW block(s) become available. + """ + if TERMINAL_BLOCK_HASH != Hash32(): + # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. + assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH + assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH + return + + pow_block = get_pow_block(block.body.execution_payload.parent_hash) + # Check if `pow_block` is available + assert pow_block is not None + pow_parent = get_pow_block(pow_block.parent_hash) + # Check if `pow_parent` is available + assert pow_parent is not None + # Check if `pow_block` is a valid terminal PoW block + assert is_valid_terminal_pow_block(pow_block, pow_parent) + + +- name: validate_merge_block#gloas + sources: [] + spec: | + + def validate_merge_block(block: BeaconBlock) -> None: + """ + Check the parent PoW block of execution payload is a valid terminal PoW block. + + Note: Unavailable PoW block(s) may later become available, + and a client software MAY delay a call to ``validate_merge_block`` + until the PoW block(s) become available. + """ + if TERMINAL_BLOCK_HASH != Hash32(): + # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. + assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH + assert ( + block.body.signed_execution_payload_bid.message.parent_block_hash == TERMINAL_BLOCK_HASH + ) + return + + pow_block = get_pow_block(block.body.signed_execution_payload_bid.message.parent_block_hash) + # Check if `pow_block` is available + assert pow_block is not None + pow_parent = get_pow_block(pow_block.parent_hash) + # Check if `pow_parent` is available + assert pow_parent is not None + # Check if `pow_block` is a valid terminal PoW block + assert is_valid_terminal_pow_block(pow_block, pow_parent) + + +- name: validate_on_attestation#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: fn validate_on_attestation( + spec: | + + def validate_on_attestation(store: Store, attestation: Attestation, is_from_block: bool) -> None: + target = attestation.data.target + + # If the given attestation is not from a beacon block message, we have to check the target epoch scope. + if not is_from_block: + validate_target_epoch_against_current_time(store, attestation) + + # Check that the epoch number and slot number are matching + assert target.epoch == compute_epoch_at_slot(attestation.data.slot) + + # Attestation target must be for a known block. If target block is unknown, delay consideration until block is found + assert target.root in store.blocks + + # Attestations must be for a known block. If block is unknown, delay consideration until the block is found + assert attestation.data.beacon_block_root in store.blocks + # Attestations must not be for blocks in the future. If not, the attestation should not be considered + assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot + + # LMD vote must be consistent with FFG vote target + assert target.root == get_checkpoint_block( + store, attestation.data.beacon_block_root, target.epoch + ) + + # Attestations can only affect the fork choice of subsequent slots. + # Delay consideration in the fork choice until their slot is in the past. + assert get_current_slot(store) >= attestation.data.slot + 1 + + +- name: validate_on_attestation#gloas + sources: [] + spec: | + + def validate_on_attestation(store: Store, attestation: Attestation, is_from_block: bool) -> None: + target = attestation.data.target + + # If the given attestation is not from a beacon block message, + # we have to check the target epoch scope. + if not is_from_block: + validate_target_epoch_against_current_time(store, attestation) + + # Check that the epoch number and slot number are matching. + assert target.epoch == compute_epoch_at_slot(attestation.data.slot) + + # Attestation target must be for a known block. If target block + # is unknown, delay consideration until block is found. + assert target.root in store.blocks + + # Attestations must be for a known block. If block + # is unknown, delay consideration until the block is found. + assert attestation.data.beacon_block_root in store.blocks + # Attestations must not be for blocks in the future. + # If not, the attestation should not be considered. + block_slot = store.blocks[attestation.data.beacon_block_root].slot + assert block_slot <= attestation.data.slot + + # [New in Gloas:EIP7732] + assert attestation.data.index in [0, 1] + if block_slot == attestation.data.slot: + assert attestation.data.index == 0 + + # LMD vote must be consistent with FFG vote target + assert target.root == get_checkpoint_block( + store, attestation.data.beacon_block_root, target.epoch + ) + + # Attestations can only affect the fork-choice of subsequent slots. + # Delay consideration in the fork-choice until their slot is in the past. + assert get_current_slot(store) >= attestation.data.slot + 1 + + +- name: validate_target_epoch_against_current_time#phase0 + sources: + - file: consensus/fork_choice/src/fork_choice.rs + search: fn validate_target_epoch_against_current_time( + spec: | + + def validate_target_epoch_against_current_time(store: Store, attestation: Attestation) -> None: + target = attestation.data.target + + # Attestations must be from the current or previous epoch + current_epoch = get_current_store_epoch(store) + # Use GENESIS_EPOCH for previous when genesis to avoid underflow + previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH + # If attestation target is from a future epoch, delay consideration until the epoch arrives + assert target.epoch in [current_epoch, previous_epoch] + + +- name: vanishing_polynomialcoeff#fulu + sources: [] + spec: | + + def vanishing_polynomialcoeff(xs: Sequence[BLSFieldElement]) -> PolynomialCoeff: + """ + Compute the vanishing polynomial on ``xs`` (in coefficient form). + """ + p = PolynomialCoeff([BLSFieldElement(1)]) + for x in xs: + p = multiply_polynomialcoeff(p, PolynomialCoeff([-x, BLSFieldElement(1)])) + return p + + +- name: verify_blob_kzg_proof#deneb + sources: [] + spec: | + + def verify_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48, proof_bytes: Bytes48) -> bool: + """ + Given a blob and a KZG proof, verify that the blob data corresponds to the provided commitment. + + Public method. + """ + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(proof_bytes) == BYTES_PER_PROOF + + commitment = bytes_to_kzg_commitment(commitment_bytes) + + polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(blob, commitment) + + # Evaluate polynomial at `evaluation_challenge` + y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge) + + # Verify proof + proof = bytes_to_kzg_proof(proof_bytes) + return verify_kzg_proof_impl(commitment, evaluation_challenge, y, proof) + + +- name: verify_blob_kzg_proof_batch#deneb + sources: [] + spec: | + + def verify_blob_kzg_proof_batch( + blobs: Sequence[Blob], commitments_bytes: Sequence[Bytes48], proofs_bytes: Sequence[Bytes48] + ) -> bool: + """ + Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments. + Will return True if there are zero blobs/commitments/proofs. + Public method. + """ + + assert len(blobs) == len(commitments_bytes) == len(proofs_bytes) + + commitments, evaluation_challenges, ys, proofs = [], [], [], [] + for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(proof_bytes) == BYTES_PER_PROOF + commitment = bytes_to_kzg_commitment(commitment_bytes) + commitments.append(commitment) + polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(blob, commitment) + evaluation_challenges.append(evaluation_challenge) + ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) + proofs.append(bytes_to_kzg_proof(proof_bytes)) + + return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs) + + +- name: verify_blob_sidecar_inclusion_proof#deneb + sources: + - file: consensus/types/src/data/blob_sidecar.rs + search: fn verify_blob_sidecar_inclusion_proof( + spec: | + + def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool: + gindex = get_subtree_index( + get_generalized_index(BeaconBlockBody, "blob_kzg_commitments", blob_sidecar.index) + ) + return is_valid_merkle_branch( + leaf=blob_sidecar.kzg_commitment.hash_tree_root(), + branch=blob_sidecar.kzg_commitment_inclusion_proof, + depth=KZG_COMMITMENT_INCLUSION_PROOF_DEPTH, + index=gindex, + root=blob_sidecar.signed_block_header.message.body_root, + ) + + +- name: verify_block_signature#phase0 + sources: + - file: consensus/state_processing/src/per_block_processing.rs + search: pub fn verify_block_signature< + spec: | + + def verify_block_signature(state: BeaconState, signed_block: SignedBeaconBlock) -> bool: + proposer = state.validators[signed_block.message.proposer_index] + signing_root = compute_signing_root( + signed_block.message, get_domain(state, DOMAIN_BEACON_PROPOSER) + ) + return bls.Verify(proposer.pubkey, signing_root, signed_block.signature) + + +- name: verify_cell_kzg_proof_batch#fulu + sources: [] + spec: | + + def verify_cell_kzg_proof_batch( + commitments_bytes: Sequence[Bytes48], + cell_indices: Sequence[CellIndex], + cells: Sequence[Cell], + proofs_bytes: Sequence[Bytes48], + ) -> bool: + """ + Verify that a set of cells belong to their corresponding commitments. + + Given four lists representing tuples of (``commitment``, ``cell_index``, ``cell``, ``proof``), + the function verifies ``proof`` which shows that ``cell`` are the evaluations of the polynomial + associated with ``commitment``, evaluated over the domain specified by ``cell_index``. + + This function implements the universal verification equation that has been introduced here: + https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240 + + Public method. + """ + + assert len(commitments_bytes) == len(cells) == len(proofs_bytes) == len(cell_indices) + for commitment_bytes in commitments_bytes: + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + for cell_index in cell_indices: + assert cell_index < CELLS_PER_EXT_BLOB + for cell in cells: + assert len(cell) == BYTES_PER_CELL + for proof_bytes in proofs_bytes: + assert len(proof_bytes) == BYTES_PER_PROOF + + # Create the list of deduplicated commitments we are dealing with + deduplicated_commitments = [ + bytes_to_kzg_commitment(commitment_bytes) + for index, commitment_bytes in enumerate(commitments_bytes) + if commitments_bytes.index(commitment_bytes) == index + ] + # Create indices list mapping initial commitments (that may contain duplicates) to the deduplicated commitments + commitment_indices = [ + CommitmentIndex(deduplicated_commitments.index(commitment_bytes)) + for commitment_bytes in commitments_bytes + ] + + cosets_evals = [cell_to_coset_evals(cell) for cell in cells] + proofs = [bytes_to_kzg_proof(proof_bytes) for proof_bytes in proofs_bytes] + + # Do the actual verification + return verify_cell_kzg_proof_batch_impl( + deduplicated_commitments, commitment_indices, cell_indices, cosets_evals, proofs + ) + + +- name: verify_cell_kzg_proof_batch_impl#fulu + sources: [] + spec: | + + def verify_cell_kzg_proof_batch_impl( + commitments: Sequence[KZGCommitment], + commitment_indices: Sequence[CommitmentIndex], + cell_indices: Sequence[CellIndex], + cosets_evals: Sequence[CosetEvals], + proofs: Sequence[KZGProof], + ) -> bool: + """ + Helper: Verify that a set of cells belong to their corresponding commitment. + + Given a list of ``commitments`` (which contains no duplicates) and four lists representing + tuples of (``commitment_index``, ``cell_index``, ``evals``, ``proof``), the function + verifies ``proof`` which shows that ``evals`` are the evaluations of the polynomial associated + with ``commitments[commitment_index]``, evaluated over the domain specified by ``cell_index``. + + This function is the internal implementation of ``verify_cell_kzg_proof_batch``. + """ + assert len(commitment_indices) == len(cell_indices) == len(cosets_evals) == len(proofs) + assert len(commitments) == len(set(commitments)) + for commitment_index in commitment_indices: + assert commitment_index < len(commitments) + + # The verification equation that we will check is pairing (LL, LR) = pairing (RL, [1]), where + # LL = sum_k r^k proofs[k], + # LR = [s^n] + # RL = RLC - RLI + RLP, where + # RLC = sum_i weights[i] commitments[i] + # RLI = [sum_k r^k interpolation_poly_k(s)] + # RLP = sum_k (r^k * h_k^n) proofs[k] + # + # Here, the variables have the following meaning: + # - k < len(cell_indices) is an index iterating over all cells in the input + # - r is a random coefficient, derived from hashing all data provided by the prover + # - s is the secret embedded in the KZG setup + # - n = FIELD_ELEMENTS_PER_CELL is the size of the evaluation domain + # - i ranges over all provided commitments + # - weights[i] is a weight computed for commitment i + # - It depends on r and on which cells are associated with commitment i + # - interpolation_poly_k is the interpolation polynomial for the kth cell + # - h_k is the coset shift specifying the evaluation domain of the kth cell + + # Preparation + num_cells = len(cell_indices) + n = FIELD_ELEMENTS_PER_CELL + num_commitments = len(commitments) + + # Step 1: Compute a challenge r and its powers r^0, ..., r^{num_cells-1} + r = compute_verify_cell_kzg_proof_batch_challenge( + commitments, commitment_indices, cell_indices, cosets_evals, proofs + ) + r_powers = compute_powers(r, num_cells) + + # Step 2: Compute LL = sum_k r^k proofs[k] + ll = bls.bytes48_to_G1(g1_lincomb(proofs, r_powers)) + + # Step 3: Compute LR = [s^n] + lr = bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[n]) + + # Step 4: Compute RL = RLC - RLI + RLP + # Step 4.1: Compute RLC = sum_i weights[i] commitments[i] + # Step 4.1a: Compute weights[i]: the sum of all r^k for which cell k is associated with commitment i. + # Note: we do that by iterating over all k and updating the correct weights[i] accordingly + weights = [BLSFieldElement(0)] * num_commitments + for k in range(num_cells): + i = commitment_indices[k] + weights[i] += r_powers[k] + # Step 4.1b: Linearly combine the weights with the commitments to get RLC + rlc = bls.bytes48_to_G1(g1_lincomb(commitments, weights)) + + # Step 4.2: Compute RLI = [sum_k r^k interpolation_poly_k(s)] + # Note: an efficient implementation would use the IDFT based method explained in the blog post + sum_interp_polys_coeff = PolynomialCoeff([BLSFieldElement(0)] * n) + for k in range(num_cells): + interp_poly_coeff = interpolate_polynomialcoeff( + coset_for_cell(cell_indices[k]), cosets_evals[k] + ) + interp_poly_scaled_coeff = multiply_polynomialcoeff( + PolynomialCoeff([r_powers[k]]), interp_poly_coeff + ) + sum_interp_polys_coeff = add_polynomialcoeff( + sum_interp_polys_coeff, interp_poly_scaled_coeff + ) + rli = bls.bytes48_to_G1(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:n], sum_interp_polys_coeff)) + + # Step 4.3: Compute RLP = sum_k (r^k * h_k^n) proofs[k] + weighted_r_powers = [] + for k in range(num_cells): + h_k = coset_shift_for_cell(cell_indices[k]) + h_k_pow = h_k.pow(BLSFieldElement(n)) + wrp = r_powers[k] * h_k_pow + weighted_r_powers.append(wrp) + rlp = bls.bytes48_to_G1(g1_lincomb(proofs, weighted_r_powers)) + + # Step 4.4: Compute RL = RLC - RLI + RLP + rl = bls.add(rlc, bls.neg(rli)) + rl = bls.add(rl, rlp) + + # Step 5: Check pairing (LL, LR) = pairing (RL, [1]) + return bls.pairing_check( + [ + [ll, lr], + [rl, bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0]))], + ] + ) + + +- name: verify_data_column_sidecar#fulu + sources: + - file: beacon_node/beacon_chain/src/data_column_verification.rs + search: fn verify_data_column_sidecar< + spec: | + + def verify_data_column_sidecar(sidecar: DataColumnSidecar) -> bool: + """ + Verify if the data column sidecar is valid. + """ + # The sidecar index must be within the valid range + if sidecar.index >= NUMBER_OF_COLUMNS: + return False + + # A sidecar for zero blobs is invalid + if len(sidecar.kzg_commitments) == 0: + return False + + # Check that the sidecar respects the blob limit + epoch = compute_epoch_at_slot(sidecar.signed_block_header.message.slot) + if len(sidecar.kzg_commitments) > get_blob_parameters(epoch).max_blobs_per_block: + return False + + # The column length must be equal to the number of commitments/proofs + if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len( + sidecar.kzg_proofs + ): + return False + + return True + + +- name: verify_data_column_sidecar#gloas + sources: [] + spec: | + + def verify_data_column_sidecar(sidecar: DataColumnSidecar) -> bool: + """ + Verify if the data column sidecar is valid. + """ + # The sidecar index must be within the valid range + if sidecar.index >= NUMBER_OF_COLUMNS: + return False + + # A sidecar for zero blobs is invalid + if len(sidecar.kzg_commitments) == 0: + return False + + # [Modified in Gloas:EIP7732] + # Check that the sidecar respects the blob limit + epoch = compute_epoch_at_slot(sidecar.slot) + if len(sidecar.kzg_commitments) > get_blob_parameters(epoch).max_blobs_per_block: + return False + + # The column length must be equal to the number of commitments/proofs + if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len( + sidecar.kzg_proofs + ): + return False + + return True + + +- name: verify_data_column_sidecar_inclusion_proof#fulu + sources: + - file: consensus/types/src/data/data_column_sidecar.rs + search: pub fn verify_inclusion_proof( + spec: | + + def verify_data_column_sidecar_inclusion_proof(sidecar: DataColumnSidecar) -> bool: + """ + Verify if the given KZG commitments included in the given beacon block. + """ + return is_valid_merkle_branch( + leaf=hash_tree_root(sidecar.kzg_commitments), + branch=sidecar.kzg_commitments_inclusion_proof, + depth=KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH, + index=get_subtree_index(get_generalized_index(BeaconBlockBody, "blob_kzg_commitments")), + root=sidecar.signed_block_header.message.body_root, + ) + + +- name: verify_data_column_sidecar_kzg_proofs#fulu + sources: + - file: beacon_node/beacon_chain/src/data_column_verification.rs + search: pub fn verify_kzg_for_data_column< + spec: | + + def verify_data_column_sidecar_kzg_proofs(sidecar: DataColumnSidecar) -> bool: + """ + Verify if the KZG proofs are correct. + """ + # The column index also represents the cell index + cell_indices = [CellIndex(sidecar.index)] * len(sidecar.column) + + # Batch verify that the cells match the corresponding commitments and proofs + return verify_cell_kzg_proof_batch( + commitments_bytes=sidecar.kzg_commitments, + cell_indices=cell_indices, + cells=sidecar.column, + proofs_bytes=sidecar.kzg_proofs, + ) + + +- name: verify_execution_payload_bid_signature#gloas + sources: [] + spec: | + + def verify_execution_payload_bid_signature( + state: BeaconState, signed_bid: SignedExecutionPayloadBid + ) -> bool: + builder = state.validators[signed_bid.message.builder_index] + signing_root = compute_signing_root( + signed_bid.message, get_domain(state, DOMAIN_BEACON_BUILDER) + ) + return bls.Verify(builder.pubkey, signing_root, signed_bid.signature) + + +- name: verify_execution_payload_envelope_signature#gloas + sources: [] + spec: | + + def verify_execution_payload_envelope_signature( + state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope + ) -> bool: + builder = state.validators[signed_envelope.message.builder_index] + signing_root = compute_signing_root( + signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER) + ) + return bls.Verify(builder.pubkey, signing_root, signed_envelope.signature) + + +- name: verify_kzg_proof#deneb + sources: [] + spec: | + + def verify_kzg_proof( + commitment_bytes: Bytes48, z_bytes: Bytes32, y_bytes: Bytes32, proof_bytes: Bytes48 + ) -> bool: + """ + Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``. + Receives inputs as bytes. + Public method. + """ + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT + assert len(y_bytes) == BYTES_PER_FIELD_ELEMENT + assert len(proof_bytes) == BYTES_PER_PROOF + + return verify_kzg_proof_impl( + bytes_to_kzg_commitment(commitment_bytes), + bytes_to_bls_field(z_bytes), + bytes_to_bls_field(y_bytes), + bytes_to_kzg_proof(proof_bytes), + ) + + +- name: verify_kzg_proof_batch#deneb + sources: [] + spec: | + + def verify_kzg_proof_batch( + commitments: Sequence[KZGCommitment], + zs: Sequence[BLSFieldElement], + ys: Sequence[BLSFieldElement], + proofs: Sequence[KZGProof], + ) -> bool: + """ + Verify multiple KZG proofs efficiently. + """ + + assert len(commitments) == len(zs) == len(ys) == len(proofs) + + # Compute a random challenge. Note that it does not have to be computed from a hash, + # r just has to be random. + degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, KZG_ENDIANNESS) + num_commitments = int.to_bytes(len(commitments), 8, KZG_ENDIANNESS) + data = RANDOM_CHALLENGE_KZG_BATCH_DOMAIN + degree_poly + num_commitments + + # Append all inputs to the transcript before we hash + for commitment, z, y, proof in zip(commitments, zs, ys, proofs): + data += commitment + bls_field_to_bytes(z) + bls_field_to_bytes(y) + proof + + r = hash_to_bls_field(data) + r_powers = compute_powers(r, len(commitments)) + + # Verify: e(sum r^i proof_i, [s]) == + # e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1]) + proof_lincomb = g1_lincomb(proofs, r_powers) + proof_z_lincomb = g1_lincomb(proofs, [z * r_power for z, r_power in zip(zs, r_powers)]) + C_minus_ys = [ + bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), -y)) + for commitment, y in zip(commitments, ys) + ] + C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys] + C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers) + + return bls.pairing_check( + [ + [ + bls.bytes48_to_G1(proof_lincomb), + bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[1])), + ], + [ + bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), + bls.G2(), + ], + ] + ) + + +- name: verify_kzg_proof_impl#deneb + sources: [] + spec: | + + def verify_kzg_proof_impl( + commitment: KZGCommitment, z: BLSFieldElement, y: BLSFieldElement, proof: KZGProof + ) -> bool: + """ + Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``. + """ + # Verify: P - y = Q * (X - z) + X_minus_z = bls.add( + bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[1]), + bls.multiply(bls.G2(), -z), + ) + P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), -y)) + return bls.pairing_check( + [[P_minus_y, bls.neg(bls.G2())], [bls.bytes48_to_G1(proof), X_minus_z]] + ) + + +- name: voting_period_start_time#phase0 + sources: [] + spec: | + + def voting_period_start_time(state: BeaconState) -> uint64: + eth1_voting_period_start_slot = Slot( + state.slot - state.slot % (EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH) + ) + return compute_time_at_slot(state, eth1_voting_period_start_slot) + + +- name: weigh_justification_and_finalization#phase0 + sources: + - file: consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs + search: pub fn weigh_justification_and_finalization< + spec: | + + def weigh_justification_and_finalization( + state: BeaconState, + total_active_balance: Gwei, + previous_epoch_target_balance: Gwei, + current_epoch_target_balance: Gwei, + ) -> None: + previous_epoch = get_previous_epoch(state) + current_epoch = get_current_epoch(state) + old_previous_justified_checkpoint = state.previous_justified_checkpoint + old_current_justified_checkpoint = state.current_justified_checkpoint + + # Process justifications + state.previous_justified_checkpoint = state.current_justified_checkpoint + state.justification_bits[1:] = state.justification_bits[: JUSTIFICATION_BITS_LENGTH - 1] + state.justification_bits[0] = 0b0 + if previous_epoch_target_balance * 3 >= total_active_balance * 2: + state.current_justified_checkpoint = Checkpoint( + epoch=previous_epoch, root=get_block_root(state, previous_epoch) + ) + state.justification_bits[1] = 0b1 + if current_epoch_target_balance * 3 >= total_active_balance * 2: + state.current_justified_checkpoint = Checkpoint( + epoch=current_epoch, root=get_block_root(state, current_epoch) + ) + state.justification_bits[0] = 0b1 + + # Process finalizations + bits = state.justification_bits + # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source + if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch: + state.finalized_checkpoint = old_previous_justified_checkpoint + # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source + if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch: + state.finalized_checkpoint = old_previous_justified_checkpoint + # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source + if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch: + state.finalized_checkpoint = old_current_justified_checkpoint + # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source + if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch: + state.finalized_checkpoint = old_current_justified_checkpoint + + +- name: xor#phase0 + sources: [] + spec: | + + def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: + """ + Return the exclusive-or of two 32-byte strings. + """ + return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2)) + diff --git a/specrefs/presets.yml b/specrefs/presets.yml new file mode 100644 index 00000000000..a2225019f81 --- /dev/null +++ b/specrefs/presets.yml @@ -0,0 +1,770 @@ +- name: BASE_REWARD_FACTOR#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: BASE_REWARD_FACTOR: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*base_rewards_per_epoch:' + regex: true + spec: | + + BASE_REWARD_FACTOR: uint64 = 64 + + +- name: BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas + sources: [] + spec: | + + BUILDER_PENDING_WITHDRAWALS_LIMIT: uint64 = 1048576 + + +- name: BYTES_PER_LOGS_BLOOM#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: BYTES_PER_LOGS_BLOOM: + spec: | + + BYTES_PER_LOGS_BLOOM: uint64 = 256 + + +- name: CELLS_PER_EXT_BLOB#fulu + sources: + - file: consensus/types/presets/mainnet/fulu.yaml + search: CELLS_PER_EXT_BLOB: + spec: | + + CELLS_PER_EXT_BLOB = 128 + + +- name: EFFECTIVE_BALANCE_INCREMENT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: EFFECTIVE_BALANCE_INCREMENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*effective_balance_increment:' + regex: true + spec: | + + EFFECTIVE_BALANCE_INCREMENT: Gwei = 1000000000 + + +- name: EPOCHS_PER_ETH1_VOTING_PERIOD#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: EPOCHS_PER_ETH1_VOTING_PERIOD: + spec: | + + EPOCHS_PER_ETH1_VOTING_PERIOD: uint64 = 64 + + +- name: EPOCHS_PER_HISTORICAL_VECTOR#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: EPOCHS_PER_HISTORICAL_VECTOR: + spec: | + + EPOCHS_PER_HISTORICAL_VECTOR: uint64 = 65536 + + +- name: EPOCHS_PER_SLASHINGS_VECTOR#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: EPOCHS_PER_SLASHINGS_VECTOR: + spec: | + + EPOCHS_PER_SLASHINGS_VECTOR: uint64 = 8192 + + +- name: EPOCHS_PER_SYNC_COMMITTEE_PERIOD#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: EPOCHS_PER_SYNC_COMMITTEE_PERIOD: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*epochs_per_sync_committee_period:' + regex: true + spec: | + + EPOCHS_PER_SYNC_COMMITTEE_PERIOD: uint64 = 256 + + +- name: FIELD_ELEMENTS_PER_BLOB#deneb + sources: + - file: consensus/types/presets/mainnet/deneb.yaml + search: FIELD_ELEMENTS_PER_BLOB: + spec: | + + FIELD_ELEMENTS_PER_BLOB: uint64 = 4096 + + +- name: FIELD_ELEMENTS_PER_CELL#fulu + sources: + - file: consensus/types/presets/mainnet/fulu.yaml + search: FIELD_ELEMENTS_PER_CELL: + spec: | + + FIELD_ELEMENTS_PER_CELL: uint64 = 64 + + +- name: FIELD_ELEMENTS_PER_EXT_BLOB#fulu + sources: + - file: consensus/types/presets/mainnet/fulu.yaml + search: FIELD_ELEMENTS_PER_EXT_BLOB: + spec: | + + FIELD_ELEMENTS_PER_EXT_BLOB = 8192 + + +- name: HISTORICAL_ROOTS_LIMIT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: HISTORICAL_ROOTS_LIMIT: + spec: | + + HISTORICAL_ROOTS_LIMIT: uint64 = 16777216 + + +- name: HYSTERESIS_DOWNWARD_MULTIPLIER#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: HYSTERESIS_DOWNWARD_MULTIPLIER: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*hysteresis_downward_multiplier:' + regex: true + spec: | + + HYSTERESIS_DOWNWARD_MULTIPLIER: uint64 = 1 + + +- name: HYSTERESIS_QUOTIENT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: HYSTERESIS_QUOTIENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*hysteresis_quotient:' + regex: true + spec: | + + HYSTERESIS_QUOTIENT: uint64 = 4 + + +- name: HYSTERESIS_UPWARD_MULTIPLIER#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: HYSTERESIS_UPWARD_MULTIPLIER: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*hysteresis_upward_multiplier:' + regex: true + spec: | + + HYSTERESIS_UPWARD_MULTIPLIER: uint64 = 5 + + +- name: INACTIVITY_PENALTY_QUOTIENT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: INACTIVITY_PENALTY_QUOTIENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*inactivity_penalty_quotient:' + regex: true + spec: | + + INACTIVITY_PENALTY_QUOTIENT: uint64 = 67108864 + + +- name: INACTIVITY_PENALTY_QUOTIENT_ALTAIR#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: INACTIVITY_PENALTY_QUOTIENT_ALTAIR: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*inactivity_penalty_quotient_altair:' + regex: true + spec: | + + INACTIVITY_PENALTY_QUOTIENT_ALTAIR: uint64 = 50331648 + + +- name: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*inactivity_penalty_quotient_bellatrix:' + regex: true + spec: | + + INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: uint64 = 16777216 + + +- name: KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH#fulu + sources: + - file: consensus/types/presets/mainnet/fulu.yaml + search: KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: + spec: | + + KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: uint64 = 4 + + +- name: KZG_COMMITMENT_INCLUSION_PROOF_DEPTH#deneb + sources: + - file: consensus/types/presets/mainnet/deneb.yaml + search: KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: + spec: | + + KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: uint64 = 17 + + +- name: MAX_ATTESTATIONS#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_ATTESTATIONS: + spec: | + + MAX_ATTESTATIONS = 128 + + +- name: MAX_ATTESTATIONS_ELECTRA#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_ATTESTATIONS_ELECTRA: + spec: | + + MAX_ATTESTATIONS_ELECTRA = 8 + + +- name: MAX_ATTESTER_SLASHINGS#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_ATTESTER_SLASHINGS: + spec: | + + MAX_ATTESTER_SLASHINGS = 2 + + +- name: MAX_ATTESTER_SLASHINGS_ELECTRA#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_ATTESTER_SLASHINGS_ELECTRA: + spec: | + + MAX_ATTESTER_SLASHINGS_ELECTRA = 1 + + +- name: MAX_BLOB_COMMITMENTS_PER_BLOCK#deneb + sources: + - file: consensus/types/presets/mainnet/deneb.yaml + search: MAX_BLOB_COMMITMENTS_PER_BLOCK: + spec: | + + MAX_BLOB_COMMITMENTS_PER_BLOCK: uint64 = 4096 + + +- name: MAX_BLS_TO_EXECUTION_CHANGES#capella + sources: + - file: consensus/types/presets/mainnet/capella.yaml + search: MAX_BLS_TO_EXECUTION_CHANGES: + spec: | + + MAX_BLS_TO_EXECUTION_CHANGES = 16 + + +- name: MAX_BYTES_PER_TRANSACTION#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: MAX_BYTES_PER_TRANSACTION: + spec: | + + MAX_BYTES_PER_TRANSACTION: uint64 = 1073741824 + + +- name: MAX_COMMITTEES_PER_SLOT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_COMMITTEES_PER_SLOT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_committees_per_slot:' + regex: true + spec: | + + MAX_COMMITTEES_PER_SLOT: uint64 = 64 + + +- name: MAX_COMMITTEES_PER_SLOT#electra + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_COMMITTEES_PER_SLOT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_committees_per_slot:' + regex: true + spec: | + + MAX_COMMITTEES_PER_SLOT: uint64 = 64 + + +- name: MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: + spec: | + + MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: uint64 = 2 + + +- name: MAX_DEPOSITS#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_DEPOSITS: + spec: | + + MAX_DEPOSITS = 16 + + +- name: MAX_DEPOSIT_REQUESTS_PER_PAYLOAD#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: + spec: | + + MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: uint64 = 8192 + + +- name: MAX_EFFECTIVE_BALANCE#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_EFFECTIVE_BALANCE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_effective_balance:' + regex: true + spec: | + + MAX_EFFECTIVE_BALANCE: Gwei = 32000000000 + + +- name: MAX_EFFECTIVE_BALANCE_ELECTRA#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_EFFECTIVE_BALANCE_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_effective_balance_electra:' + regex: true + spec: | + + MAX_EFFECTIVE_BALANCE_ELECTRA: Gwei = 2048000000000 + + +- name: MAX_EXTRA_DATA_BYTES#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: MAX_EXTRA_DATA_BYTES: + spec: | + + MAX_EXTRA_DATA_BYTES = 32 + + +- name: MAX_PAYLOAD_ATTESTATIONS#gloas + sources: [] + spec: | + + MAX_PAYLOAD_ATTESTATIONS = 4 + + +- name: MAX_PENDING_DEPOSITS_PER_EPOCH#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_PENDING_DEPOSITS_PER_EPOCH: + spec: | + + MAX_PENDING_DEPOSITS_PER_EPOCH: uint64 = 16 + + +- name: MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_pending_partials_per_withdrawals_sweep:' + regex: true + spec: | + + MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: uint64 = 8 + + +- name: MAX_PROPOSER_SLASHINGS#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_PROPOSER_SLASHINGS: + spec: | + + MAX_PROPOSER_SLASHINGS = 16 + + +- name: MAX_SEED_LOOKAHEAD#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_SEED_LOOKAHEAD: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_seed_lookahead:' + regex: true + spec: | + + MAX_SEED_LOOKAHEAD: uint64 = 4 + + +- name: MAX_TRANSACTIONS_PER_PAYLOAD#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: MAX_TRANSACTIONS_PER_PAYLOAD: + spec: | + + MAX_TRANSACTIONS_PER_PAYLOAD: uint64 = 1048576 + + +- name: MAX_VALIDATORS_PER_COMMITTEE#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_VALIDATORS_PER_COMMITTEE: + spec: | + + MAX_VALIDATORS_PER_COMMITTEE: uint64 = 2048 + + +- name: MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP#capella + sources: + - file: consensus/types/presets/mainnet/capella.yaml + search: MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*max_validators_per_withdrawals_sweep:' + regex: true + spec: | + + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP = 16384 + + +- name: MAX_VOLUNTARY_EXITS#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MAX_VOLUNTARY_EXITS: + spec: | + + MAX_VOLUNTARY_EXITS = 16 + + +- name: MAX_WITHDRAWALS_PER_PAYLOAD#capella + sources: + - file: consensus/types/presets/mainnet/capella.yaml + search: MAX_WITHDRAWALS_PER_PAYLOAD: + spec: | + + MAX_WITHDRAWALS_PER_PAYLOAD: uint64 = 16 + + +- name: MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: + spec: | + + MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: uint64 = 16 + + +- name: MIN_ACTIVATION_BALANCE#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MIN_ACTIVATION_BALANCE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_activation_balance:' + regex: true + spec: | + + MIN_ACTIVATION_BALANCE: Gwei = 32000000000 + + +- name: MIN_ATTESTATION_INCLUSION_DELAY#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MIN_ATTESTATION_INCLUSION_DELAY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_attestation_inclusion_delay:' + regex: true + spec: | + + MIN_ATTESTATION_INCLUSION_DELAY: uint64 = 1 + + +- name: MIN_DEPOSIT_AMOUNT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MIN_DEPOSIT_AMOUNT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_deposit_amount:' + regex: true + spec: | + + MIN_DEPOSIT_AMOUNT: Gwei = 1000000000 + + +- name: MIN_EPOCHS_TO_INACTIVITY_PENALTY#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MIN_EPOCHS_TO_INACTIVITY_PENALTY: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_epochs_to_inactivity_penalty:' + regex: true + spec: | + + MIN_EPOCHS_TO_INACTIVITY_PENALTY: uint64 = 4 + + +- name: MIN_SEED_LOOKAHEAD#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MIN_SEED_LOOKAHEAD: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_seed_lookahead:' + regex: true + spec: | + + MIN_SEED_LOOKAHEAD: uint64 = 1 + + +- name: MIN_SLASHING_PENALTY_QUOTIENT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: MIN_SLASHING_PENALTY_QUOTIENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_slashing_penalty_quotient:' + regex: true + spec: | + + MIN_SLASHING_PENALTY_QUOTIENT: uint64 = 128 + + +- name: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_slashing_penalty_quotient_altair:' + regex: true + spec: | + + MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: uint64 = 64 + + +- name: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_slashing_penalty_quotient_bellatrix:' + regex: true + spec: | + + MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: uint64 = 32 + + +- name: MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_slashing_penalty_quotient_electra:' + regex: true + spec: | + + MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: uint64 = 4096 + + +- name: MIN_SYNC_COMMITTEE_PARTICIPANTS#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: MIN_SYNC_COMMITTEE_PARTICIPANTS: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*min_sync_committee_participants:' + regex: true + spec: | + + MIN_SYNC_COMMITTEE_PARTICIPANTS = 1 + + +- name: NUMBER_OF_COLUMNS#fulu + sources: + - file: consensus/types/presets/mainnet/fulu.yaml + search: NUMBER_OF_COLUMNS: + spec: | + + NUMBER_OF_COLUMNS: uint64 = 128 + + +- name: PENDING_CONSOLIDATIONS_LIMIT#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: PENDING_CONSOLIDATIONS_LIMIT: + spec: | + + PENDING_CONSOLIDATIONS_LIMIT: uint64 = 262144 + + +- name: PENDING_DEPOSITS_LIMIT#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: PENDING_DEPOSITS_LIMIT: + spec: | + + PENDING_DEPOSITS_LIMIT: uint64 = 134217728 + + +- name: PENDING_PARTIAL_WITHDRAWALS_LIMIT#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: PENDING_PARTIAL_WITHDRAWALS_LIMIT: + spec: | + + PENDING_PARTIAL_WITHDRAWALS_LIMIT: uint64 = 134217728 + + +- name: PROPORTIONAL_SLASHING_MULTIPLIER#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: PROPORTIONAL_SLASHING_MULTIPLIER: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*proportional_slashing_multiplier:' + regex: true + spec: | + + PROPORTIONAL_SLASHING_MULTIPLIER: uint64 = 1 + + +- name: PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*proportional_slashing_multiplier_altair:' + regex: true + spec: | + + PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: uint64 = 2 + + +- name: PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX#bellatrix + sources: + - file: consensus/types/presets/mainnet/bellatrix.yaml + search: PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*proportional_slashing_multiplier_bellatrix:' + regex: true + spec: | + + PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: uint64 = 3 + + +- name: PROPOSER_REWARD_QUOTIENT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: PROPOSER_REWARD_QUOTIENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*proposer_reward_quotient:' + regex: true + spec: | + + PROPOSER_REWARD_QUOTIENT: uint64 = 8 + + +- name: PTC_SIZE#gloas + sources: [] + spec: | + + PTC_SIZE: uint64 = 512 + + +- name: SHUFFLE_ROUND_COUNT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: SHUFFLE_ROUND_COUNT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*shuffle_round_count:' + regex: true + spec: | + + SHUFFLE_ROUND_COUNT: uint64 = 90 + + +- name: SLOTS_PER_EPOCH#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: SLOTS_PER_EPOCH: + spec: | + + SLOTS_PER_EPOCH: uint64 = 32 + + +- name: SLOTS_PER_HISTORICAL_ROOT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: SLOTS_PER_HISTORICAL_ROOT: + spec: | + + SLOTS_PER_HISTORICAL_ROOT: uint64 = 8192 + + +- name: SYNC_COMMITTEE_SIZE#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: SYNC_COMMITTEE_SIZE: + spec: | + + SYNC_COMMITTEE_SIZE: uint64 = 512 + + +- name: TARGET_COMMITTEE_SIZE#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: TARGET_COMMITTEE_SIZE: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*target_committee_size:' + regex: true + spec: | + + TARGET_COMMITTEE_SIZE: uint64 = 128 + + +- name: UPDATE_TIMEOUT#altair + sources: + - file: consensus/types/presets/mainnet/altair.yaml + search: UPDATE_TIMEOUT: + spec: | + + UPDATE_TIMEOUT = 8192 + + +- name: VALIDATOR_REGISTRY_LIMIT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: VALIDATOR_REGISTRY_LIMIT: + spec: | + + VALIDATOR_REGISTRY_LIMIT: uint64 = 1099511627776 + + +- name: WHISTLEBLOWER_REWARD_QUOTIENT#phase0 + sources: + - file: consensus/types/presets/mainnet/phase0.yaml + search: WHISTLEBLOWER_REWARD_QUOTIENT: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*whistleblower_reward_quotient:' + regex: true + spec: | + + WHISTLEBLOWER_REWARD_QUOTIENT: uint64 = 512 + + +- name: WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA#electra + sources: + - file: consensus/types/presets/mainnet/electra.yaml + search: WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: + - file: consensus/types/src/core/chain_spec.rs + search: 'pub fn mainnet\(\).*\n(?:.*\n)*?.*whistleblower_reward_quotient_electra:' + regex: true + spec: | + + WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: uint64 = 4096 +