diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index dfd28cd9572..d6990894018 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4149,6 +4149,24 @@ impl BeaconChain { // This prevents inconsistency between the two at the expense of concurrency. drop(fork_choice); + // Persist execution proofs to the database if zkvm is enabled and proofs are cached. + // This is done after the block is successfully stored so we don't lose proofs on cache eviction. + if let Some(proofs) = self + .data_availability_checker + .get_execution_proofs(&block_root) + && !proofs.is_empty() + { + let proofs_owned: Vec<_> = proofs.iter().map(|p| (**p).clone()).collect(); + if let Err(e) = self.store.put_execution_proofs(&block_root, &proofs_owned) { + // Log but don't fail block import - proofs can still be served from cache + warn!( + %block_root, + error = ?e, + "Failed to persist execution proofs to database" + ); + } + } + // We're declaring the block "imported" at this point, since fork choice and the DB know // about it. let block_time_imported = timestamp_now(); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index bc5b41b09e1..feabcd5f44a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1159,6 +1159,13 @@ where .process_prune_blobs(data_availability_boundary); } + // Prune execution proofs older than the execution proof boundary in the background. + if let Some(execution_proof_boundary) = beacon_chain.execution_proof_boundary() { + beacon_chain + .store_migrator + .process_prune_execution_proofs(execution_proof_boundary); + } + Ok(beacon_chain) } } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 228e5eb2d27..17dc227430b 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1034,6 +1034,12 @@ impl BeaconChain { .process_prune_blobs(data_availability_boundary); } + // Prune execution proofs in the background. + if let Some(execution_proof_boundary) = self.execution_proof_boundary() { + self.store_migrator + .process_prune_execution_proofs(execution_proof_boundary); + } + // Take a write-lock on the canonical head and signal for it to prune. self.canonical_head.fork_choice_write_lock().prune()?; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index bd232f2e8a2..e290cf510f0 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -120,6 +120,7 @@ pub enum Notification { Finalization(FinalizationNotification), Reconstruction, PruneBlobs(Epoch), + PruneExecutionProofs(Epoch), ManualFinalization(ManualFinalizationNotification), ManualCompaction, } @@ -251,6 +252,28 @@ impl, Cold: ItemStore> BackgroundMigrator>, + execution_proof_boundary: Epoch, + ) { + if let Err(e) = db.try_prune_execution_proofs(false, execution_proof_boundary) { + error!( + error = ?e, + "Execution proof pruning failed" + ); + } + } + /// If configured to run in the background, send `notif` to the background thread. /// /// Return `None` if the message was sent to the background thread, `Some(notif)` otherwise. @@ -440,11 +463,15 @@ impl, Cold: ItemStore> BackgroundMigrator reconstruction_notif = Some(notif), Notification::Finalization(fin) => finalization_notif = Some(fin), Notification::ManualFinalization(fin) => manual_finalization_notif = Some(fin), Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), + Notification::PruneExecutionProofs(epb) => { + prune_execution_proofs_notif = Some(epb) + } Notification::ManualCompaction => manual_compaction_notif = Some(notif), } // Read the rest of the messages in the channel, taking the best of each type. @@ -475,6 +502,10 @@ impl, Cold: ItemStore> BackgroundMigrator { prune_blobs_notif = std::cmp::max(prune_blobs_notif, Some(dab)); } + Notification::PruneExecutionProofs(epb) => { + prune_execution_proofs_notif = + std::cmp::max(prune_execution_proofs_notif, Some(epb)); + } } } // Run finalization and blob pruning migrations first, then a reconstruction batch. @@ -489,6 +520,9 @@ impl, Cold: ItemStore> BackgroundMigrator = DBColumn::iter().map(|c| c.as_str()).collect(); let expected_columns = vec![ - "bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", - "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", - "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", + "bma", "blk", "blb", "bdc", "bdi", "bep", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", + "bcs", "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", + "bhr", "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", ]; assert_eq!(expected_columns, current_columns); } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 6f5170be300..f98d57e5cb6 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -129,6 +129,7 @@ pub struct BeaconProcessorQueueLengths { block_broots_queue: usize, blob_broots_queue: usize, execution_proof_broots_queue: usize, + execution_proof_brange_queue: usize, blob_brange_queue: usize, dcbroots_queue: usize, dcbrange_queue: usize, @@ -198,6 +199,7 @@ impl BeaconProcessorQueueLengths { block_broots_queue: 1024, blob_broots_queue: 1024, execution_proof_broots_queue: 1024, + execution_proof_brange_queue: 1024, blob_brange_queue: 1024, dcbroots_queue: 1024, dcbrange_queue: 1024, @@ -620,6 +622,7 @@ pub enum Work { BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), ExecutionProofsByRootsRequest(BlockingFn), + ExecutionProofsByRangeRequest(BlockingFn), DataColumnsByRootsRequest(BlockingFn), DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), @@ -675,6 +678,7 @@ pub enum WorkType { BlobsByRangeRequest, BlobsByRootsRequest, ExecutionProofsByRootsRequest, + ExecutionProofsByRangeRequest, DataColumnsByRootsRequest, DataColumnsByRangeRequest, GossipBlsToExecutionChange, @@ -728,6 +732,7 @@ impl Work { Work::BlobsByRangeRequest(_) => WorkType::BlobsByRangeRequest, Work::BlobsByRootsRequest(_) => WorkType::BlobsByRootsRequest, Work::ExecutionProofsByRootsRequest(_) => WorkType::ExecutionProofsByRootsRequest, + Work::ExecutionProofsByRangeRequest(_) => WorkType::ExecutionProofsByRangeRequest, Work::DataColumnsByRootsRequest(_) => WorkType::DataColumnsByRootsRequest, Work::DataColumnsByRangeRequest(_) => WorkType::DataColumnsByRangeRequest, Work::LightClientBootstrapRequest(_) => WorkType::LightClientBootstrapRequest, @@ -901,6 +906,8 @@ impl BeaconProcessor { let mut blob_broots_queue = FifoQueue::new(queue_lengths.blob_broots_queue); let mut execution_proof_broots_queue = FifoQueue::new(queue_lengths.execution_proof_broots_queue); + let mut execution_proof_brange_queue = + FifoQueue::new(queue_lengths.execution_proof_brange_queue); let mut blob_brange_queue = FifoQueue::new(queue_lengths.blob_brange_queue); let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); @@ -1226,6 +1233,8 @@ impl BeaconProcessor { Some(item) } else if let Some(item) = execution_proof_broots_queue.pop() { Some(item) + } else if let Some(item) = execution_proof_brange_queue.pop() { + Some(item) } else if let Some(item) = dcbroots_queue.pop() { Some(item) } else if let Some(item) = dcbrange_queue.pop() { @@ -1430,6 +1439,9 @@ impl BeaconProcessor { Work::ExecutionProofsByRootsRequest { .. } => { execution_proof_broots_queue.push(work, work_id) } + Work::ExecutionProofsByRangeRequest { .. } => { + execution_proof_brange_queue.push(work, work_id) + } Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } @@ -1489,6 +1501,9 @@ impl BeaconProcessor { WorkType::ExecutionProofsByRootsRequest => { execution_proof_broots_queue.len() } + WorkType::ExecutionProofsByRangeRequest => { + execution_proof_brange_queue.len() + } WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), WorkType::GossipBlsToExecutionChange => { @@ -1649,6 +1664,7 @@ impl BeaconProcessor { Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) | Work::ExecutionProofsByRootsRequest(process_fn) + | Work::ExecutionProofsByRangeRequest(process_fn) | Work::DataColumnsByRootsRequest(process_fn) | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 1b280d54035..52b98d4d3c7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -606,6 +606,7 @@ impl PeerManager { Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -627,6 +628,7 @@ impl PeerManager { Protocol::DataColumnsByRoot => return, Protocol::DataColumnsByRange => return, Protocol::ExecutionProofsByRoot => return, + Protocol::ExecutionProofsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -651,6 +653,7 @@ impl PeerManager { Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::ExecutionProofsByRoot => PeerAction::MidToleranceError, + Protocol::ExecutionProofsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index b3401038df8..aa0fe8a3d9d 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -81,6 +81,7 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), RpcSuccessResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::ExecutionProofsByRoot(res) => res.as_ssz_bytes(), + RpcSuccessResponse::ExecutionProofsByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -362,6 +363,7 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::DataColumnsByRange(req) => req.as_ssz_bytes(), RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), RequestType::ExecutionProofsByRoot(req) => req.as_ssz_bytes(), + RequestType::ExecutionProofsByRange(req) => req.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), @@ -578,6 +580,11 @@ fn handle_rpc_request( Ok(Some(RequestType::ExecutionProofsByRoot(request))) } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RequestType::ExecutionProofsByRange( + ExecutionProofsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } SupportedProtocol::PingV1 => Ok(Some(RequestType::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -746,6 +753,11 @@ fn handle_rpc_response( ExecutionProof::from_ssz_bytes(decoded_buffer)?, )))) } + SupportedProtocol::ExecutionProofsByRangeV1 => { + Ok(Some(RpcSuccessResponse::ExecutionProofsByRange(Arc::new( + ExecutionProof::from_ssz_bytes(decoded_buffer)?, + )))) + } SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -1295,6 +1307,12 @@ mod tests { RequestType::ExecutionProofsByRoot(exec_proofs) => { assert_eq!(decoded, RequestType::ExecutionProofsByRoot(exec_proofs)) } + RequestType::ExecutionProofsByRange(exec_proofs_range) => { + assert_eq!( + decoded, + RequestType::ExecutionProofsByRange(exec_proofs_range) + ) + } RequestType::Ping(ping) => { assert_eq!(decoded, RequestType::Ping(ping)) } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index d23c16f8fa1..99c0f33da31 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -94,6 +94,7 @@ pub struct RateLimiterConfig { pub(super) data_columns_by_root_quota: Quota, pub(super) data_columns_by_range_quota: Quota, pub(super) execution_proofs_by_root_quota: Quota, + pub(super) execution_proofs_by_range_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -126,6 +127,9 @@ impl RateLimiterConfig { // TODO(zkproofs): Configure this to be less arbitrary pub const DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA: Quota = Quota::n_every(NonZeroU64::new(128).unwrap(), 10); + // TODO(zkproofs): Configure this to be less arbitrary + pub const DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA: Quota = + Quota::n_every(NonZeroU64::new(128).unwrap(), 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -146,6 +150,7 @@ impl Default for RateLimiterConfig { data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, execution_proofs_by_root_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA, + execution_proofs_by_range_quota: Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -184,6 +189,14 @@ impl Debug for RateLimiterConfig { "data_columns_by_root", fmt_q!(&self.data_columns_by_root_quota), ) + .field( + "execution_proofs_by_root", + fmt_q!(&self.execution_proofs_by_root_quota), + ) + .field( + "execution_proofs_by_range", + fmt_q!(&self.execution_proofs_by_range_quota), + ) .finish() } } @@ -207,6 +220,7 @@ impl FromStr for RateLimiterConfig { let mut data_columns_by_root_quota = None; let mut data_columns_by_range_quota = None; let mut execution_proofs_by_root_quota = None; + let mut execution_proofs_by_range_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -231,6 +245,9 @@ impl FromStr for RateLimiterConfig { Protocol::ExecutionProofsByRoot => { execution_proofs_by_root_quota = execution_proofs_by_root_quota.or(quota) } + Protocol::ExecutionProofsByRange => { + execution_proofs_by_range_quota = execution_proofs_by_range_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -268,6 +285,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), execution_proofs_by_root_quota: execution_proofs_by_root_quota .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_ROOT_QUOTA), + execution_proofs_by_range_quota: execution_proofs_by_range_quota + .unwrap_or(Self::DEFAULT_EXECUTION_PROOFS_BY_RANGE_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9ba8f66dafa..966106b6f69 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -603,6 +603,36 @@ impl ExecutionProofsByRootRequest { } } +/// Request execution proofs for a range of slots. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct ExecutionProofsByRangeRequest { + /// The starting slot to request execution proofs. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, +} + +impl ExecutionProofsByRangeRequest { + pub fn max_proofs_requested(&self) -> u64 { + // Each slot could have up to MAX_PROOFS execution proofs + self.count + .saturating_mul(types::execution_proof::MAX_PROOFS as u64) + } + + pub fn ssz_min_len() -> usize { + ExecutionProofsByRangeRequest { + start_slot: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /// Request a number of beacon data columns from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct LightClientUpdatesByRangeRequest { @@ -673,6 +703,9 @@ pub enum RpcSuccessResponse { /// A response to a get EXECUTION_PROOFS_BY_ROOT request. ExecutionProofsByRoot(Arc), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Arc), + /// A PONG response to a PING request. Pong(Ping), @@ -704,6 +737,9 @@ pub enum ResponseTermination { /// Execution proofs by root stream termination. ExecutionProofsByRoot, + /// Execution proofs by range stream termination. + ExecutionProofsByRange, + /// Light client updates by range stream termination. LightClientUpdatesByRange, } @@ -718,6 +754,7 @@ impl ResponseTermination { ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, ResponseTermination::ExecutionProofsByRoot => Protocol::ExecutionProofsByRoot, + ResponseTermination::ExecutionProofsByRange => Protocol::ExecutionProofsByRange, ResponseTermination::LightClientUpdatesByRange => Protocol::LightClientUpdatesByRange, } } @@ -814,6 +851,7 @@ impl RpcSuccessResponse { RpcSuccessResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, RpcSuccessResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, RpcSuccessResponse::ExecutionProofsByRoot(_) => Protocol::ExecutionProofsByRoot, + RpcSuccessResponse::ExecutionProofsByRange(_) => Protocol::ExecutionProofsByRange, RpcSuccessResponse::Pong(_) => Protocol::Ping, RpcSuccessResponse::MetaData(_) => Protocol::MetaData, RpcSuccessResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -840,6 +878,7 @@ impl RpcSuccessResponse { Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), // TODO(zkproofs): Change this when we add Slot to ExecutionProof Self::ExecutionProofsByRoot(_) + | Self::ExecutionProofsByRange(_) | Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, @@ -905,6 +944,13 @@ impl std::fmt::Display for RpcSuccessResponse { RpcSuccessResponse::ExecutionProofsByRoot(proof) => { write!(f, "ExecutionProofsByRoot: Block root: {}", proof.block_root) } + RpcSuccessResponse::ExecutionProofsByRange(proof) => { + write!( + f, + "ExecutionProofsByRange: Block root: {}", + proof.block_root + ) + } RpcSuccessResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RpcSuccessResponse::MetaData(metadata) => { write!(f, "Metadata: {}", metadata.seq_number()) @@ -1027,3 +1073,13 @@ impl std::fmt::Display for ExecutionProofsByRootRequest { ) } } + +impl std::fmt::Display for ExecutionProofsByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Request: ExecutionProofsByRange: Start Slot: {}, Count: {}", + self.start_slot, self.count + ) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index dfa44976390..0a37db0d210 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -254,6 +254,9 @@ pub enum Protocol { /// The `ExecutionProofsByRoot` protocol name. #[strum(serialize = "execution_proofs_by_root")] ExecutionProofsByRoot, + /// The `ExecutionProofsByRange` protocol name. + #[strum(serialize = "execution_proofs_by_range")] + ExecutionProofsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -285,6 +288,7 @@ impl Protocol { Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), Protocol::ExecutionProofsByRoot => Some(ResponseTermination::ExecutionProofsByRoot), + Protocol::ExecutionProofsByRange => Some(ResponseTermination::ExecutionProofsByRange), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -316,6 +320,7 @@ pub enum SupportedProtocol { DataColumnsByRootV1, DataColumnsByRangeV1, ExecutionProofsByRootV1, + ExecutionProofsByRangeV1, PingV1, MetaDataV1, MetaDataV2, @@ -341,6 +346,7 @@ impl SupportedProtocol { SupportedProtocol::DataColumnsByRootV1 => "1", SupportedProtocol::DataColumnsByRangeV1 => "1", SupportedProtocol::ExecutionProofsByRootV1 => "1", + SupportedProtocol::ExecutionProofsByRangeV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -366,6 +372,7 @@ impl SupportedProtocol { SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, SupportedProtocol::ExecutionProofsByRootV1 => Protocol::ExecutionProofsByRoot, + SupportedProtocol::ExecutionProofsByRangeV1 => Protocol::ExecutionProofsByRange, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -417,10 +424,16 @@ impl SupportedProtocol { ]); } if fork_context.spec.is_zkvm_enabled() { - supported.push(ProtocolId::new( - SupportedProtocol::ExecutionProofsByRootV1, - Encoding::SSZSnappy, - )); + supported.extend_from_slice(&[ + ProtocolId::new( + SupportedProtocol::ExecutionProofsByRootV1, + Encoding::SSZSnappy, + ), + ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + ), + ]); } supported } @@ -535,6 +548,10 @@ impl ProtocolId { DataColumnsByRangeRequest::ssz_max_len::(), ), Protocol::ExecutionProofsByRoot => RpcLimits::new(0, spec.max_blocks_by_root_request), + Protocol::ExecutionProofsByRange => RpcLimits::new( + ExecutionProofsByRangeRequest::ssz_min_len(), + ExecutionProofsByRangeRequest::ssz_max_len(), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -572,6 +589,7 @@ impl ProtocolId { rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } Protocol::ExecutionProofsByRoot => rpc_execution_proof_limits(), + Protocol::ExecutionProofsByRange => rpc_execution_proof_limits(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -614,6 +632,7 @@ impl ProtocolId { | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 | SupportedProtocol::ExecutionProofsByRootV1 + | SupportedProtocol::ExecutionProofsByRangeV1 | SupportedProtocol::PingV1 | SupportedProtocol::MetaDataV1 | SupportedProtocol::MetaDataV2 @@ -748,6 +767,7 @@ pub enum RequestType { DataColumnsByRoot(DataColumnsByRootRequest), DataColumnsByRange(DataColumnsByRangeRequest), ExecutionProofsByRoot(ExecutionProofsByRootRequest), + ExecutionProofsByRange(ExecutionProofsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -772,6 +792,7 @@ impl RequestType { RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), RequestType::ExecutionProofsByRoot(req) => req.max_requested() as u64, + RequestType::ExecutionProofsByRange(req) => req.max_proofs_requested(), RequestType::Ping(_) => 1, RequestType::MetaData(_) => 1, RequestType::LightClientBootstrap(_) => 1, @@ -802,6 +823,7 @@ impl RequestType { RequestType::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, RequestType::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, RequestType::ExecutionProofsByRoot(_) => SupportedProtocol::ExecutionProofsByRootV1, + RequestType::ExecutionProofsByRange(_) => SupportedProtocol::ExecutionProofsByRangeV1, RequestType::Ping(_) => SupportedProtocol::PingV1, RequestType::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -834,6 +856,7 @@ impl RequestType { RequestType::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, RequestType::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, RequestType::ExecutionProofsByRoot(_) => ResponseTermination::ExecutionProofsByRoot, + RequestType::ExecutionProofsByRange(_) => ResponseTermination::ExecutionProofsByRange, RequestType::Status(_) => unreachable!(), RequestType::Goodbye(_) => unreachable!(), RequestType::Ping(_) => unreachable!(), @@ -884,6 +907,10 @@ impl RequestType { SupportedProtocol::ExecutionProofsByRootV1, Encoding::SSZSnappy, )], + RequestType::ExecutionProofsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::ExecutionProofsByRangeV1, + Encoding::SSZSnappy, + )], RequestType::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -923,6 +950,7 @@ impl RequestType { RequestType::DataColumnsByRoot(_) => false, RequestType::DataColumnsByRange(_) => false, RequestType::ExecutionProofsByRoot(_) => false, + RequestType::ExecutionProofsByRange(_) => false, RequestType::Ping(_) => true, RequestType::MetaData(_) => true, RequestType::LightClientBootstrap(_) => true, @@ -1039,6 +1067,9 @@ impl std::fmt::Display for RequestType { RequestType::ExecutionProofsByRoot(req) => { write!(f, "Execution proofs by root: {:?}", req) } + RequestType::ExecutionProofsByRange(req) => { + write!(f, "Execution proofs by range: {:?}", req) + } RequestType::Ping(ping) => write!(f, "Ping: {}", ping.data), RequestType::MetaData(_) => write!(f, "MetaData request"), RequestType::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index f70b29cfe45..9dfbc668c89 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -107,6 +107,8 @@ pub struct RPCRateLimiter { dcbrange_rl: Limiter, /// ExecutionProofsByRoot rate limiter. execution_proofs_by_root_rl: Limiter, + /// ExecutionProofsByRange rate limiter. + execution_proofs_by_range_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -152,6 +154,8 @@ pub struct RPCRateLimiterBuilder { dcbrange_quota: Option, /// Quota for the ExecutionProofsByRoot protocol. execution_proofs_by_root_quota: Option, + /// Quota for the ExecutionProofsByRange protocol. + execution_proofs_by_range_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -178,6 +182,7 @@ impl RPCRateLimiterBuilder { Protocol::DataColumnsByRoot => self.dcbroot_quota = q, Protocol::DataColumnsByRange => self.dcbrange_quota = q, Protocol::ExecutionProofsByRoot => self.execution_proofs_by_root_quota = q, + Protocol::ExecutionProofsByRange => self.execution_proofs_by_range_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -230,6 +235,10 @@ impl RPCRateLimiterBuilder { .execution_proofs_by_root_quota .ok_or("ExecutionProofsByRoot quota not specified")?; + let execution_proofs_by_range_quota = self + .execution_proofs_by_range_quota + .ok_or("ExecutionProofsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -242,6 +251,7 @@ impl RPCRateLimiterBuilder { let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; let execution_proofs_by_root_rl = Limiter::from_quota(execution_proofs_by_root_quota)?; + let execution_proofs_by_range_rl = Limiter::from_quota(execution_proofs_by_range_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -266,6 +276,7 @@ impl RPCRateLimiterBuilder { dcbroot_rl, dcbrange_rl, execution_proofs_by_root_rl, + execution_proofs_by_range_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -320,6 +331,7 @@ impl RPCRateLimiter { data_columns_by_root_quota, data_columns_by_range_quota, execution_proofs_by_root_quota, + execution_proofs_by_range_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -341,6 +353,10 @@ impl RPCRateLimiter { Protocol::ExecutionProofsByRoot, execution_proofs_by_root_quota, ) + .set_quota( + Protocol::ExecutionProofsByRange, + execution_proofs_by_range_quota, + ) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -389,6 +405,7 @@ impl RPCRateLimiter { Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, Protocol::DataColumnsByRange => &mut self.dcbrange_rl, Protocol::ExecutionProofsByRoot => &mut self.execution_proofs_by_root_rl, + Protocol::ExecutionProofsByRange => &mut self.execution_proofs_by_range_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, @@ -414,6 +431,7 @@ impl RPCRateLimiter { dcbroot_rl, dcbrange_rl, execution_proofs_by_root_rl, + execution_proofs_by_range_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -432,6 +450,7 @@ impl RPCRateLimiter { dcbrange_rl.prune(time_since_start); dcbroot_rl.prune(time_since_start); execution_proofs_by_root_rl.prune(time_since_start); + execution_proofs_by_range_rl.prune(time_since_start); lc_bootstrap_rl.prune(time_since_start); lc_optimistic_update_rl.prune(time_since_start); lc_finality_update_rl.prune(time_since_start); diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index d97506653b5..1fbc313be0a 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -168,6 +168,8 @@ pub enum Response { DataColumnsByRoot(Option>>), /// A response to a get EXECUTION_PROOFS_BY_ROOT request. ExecutionProofsByRoot(Option>), + /// A response to a get EXECUTION_PROOFS_BY_RANGE request. + ExecutionProofsByRange(Option>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -209,6 +211,10 @@ impl std::convert::From> for RpcResponse { Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRoot(p)), None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRoot), }, + Response::ExecutionProofsByRange(r) => match r { + Some(p) => RpcResponse::Success(RpcSuccessResponse::ExecutionProofsByRange(p)), + None => RpcResponse::StreamTermination(ResponseTermination::ExecutionProofsByRange), + }, Response::Status(s) => RpcResponse::Success(RpcSuccessResponse::Status(s)), Response::LightClientBootstrap(b) => { RpcResponse::Success(RpcSuccessResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 9f1530ec732..d934131b647 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1580,6 +1580,17 @@ impl Network { request_type, }) } + RequestType::ExecutionProofsByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["execution_proofs_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + inbound_request_id, + request_type, + }) + } RequestType::LightClientBootstrap(_) => { metrics::inc_counter_vec( &metrics::TOTAL_RPC_REQUESTS, @@ -1670,6 +1681,11 @@ impl Network { peer_id, Response::ExecutionProofsByRoot(Some(resp)), ), + RpcSuccessResponse::ExecutionProofsByRange(resp) => self.build_response( + id, + peer_id, + Response::ExecutionProofsByRange(Some(resp)), + ), // Should never be reached RpcSuccessResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1702,6 +1718,9 @@ impl Network { ResponseTermination::ExecutionProofsByRoot => { Response::ExecutionProofsByRoot(None) } + ResponseTermination::ExecutionProofsByRange => { + Response::ExecutionProofsByRange(None) + } ResponseTermination::LightClientUpdatesByRange => { Response::LightClientUpdatesByRange(None) } diff --git a/beacon_node/lighthouse_tracing/src/lib.rs b/beacon_node/lighthouse_tracing/src/lib.rs index dd9e9f1ebb2..9ca5afbcf9c 100644 --- a/beacon_node/lighthouse_tracing/src/lib.rs +++ b/beacon_node/lighthouse_tracing/src/lib.rs @@ -41,6 +41,8 @@ pub const SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST: &str = "handle_blocks_by_root_requ pub const SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST: &str = "handle_blobs_by_root_request"; pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST: &str = "handle_execution_proofs_by_root_request"; +pub const SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST: &str = + "handle_execution_proofs_by_range_request"; pub const SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST: &str = "handle_data_columns_by_root_request"; pub const SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE: &str = "handle_light_client_updates_by_range"; pub const SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP: &str = "handle_light_client_bootstrap"; @@ -73,6 +75,7 @@ pub const LH_BN_ROOT_SPAN_NAMES: &[&str] = &[ SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 7db2790920e..ffac53e522a 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -14,7 +14,7 @@ use beacon_processor::{ use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, LightClientUpdatesByRangeRequest, }; use lighthouse_network::service::api_types::CustodyBackfillBatchId; use lighthouse_network::{ @@ -699,6 +699,24 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `ExecutionProofsByRangeRequest`s from the RPC network. + pub fn send_execution_proofs_by_range_request( + self: &Arc, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + request: ExecutionProofsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_execution_proofs_by_range_request(peer_id, inbound_request_id, request) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::ExecutionProofsByRangeRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. pub fn send_data_columns_by_roots_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index f063d7e8380..17ee4076731 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -7,7 +7,7 @@ use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessStatus, WhenS use itertools::{Itertools, process_results}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, - ExecutionProofsByRootRequest, + ExecutionProofsByRangeRequest, ExecutionProofsByRootRequest, }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; @@ -15,9 +15,9 @@ use lighthouse_tracing::{ SPAN_HANDLE_BLOBS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOBS_BY_ROOT_REQUEST, SPAN_HANDLE_BLOCKS_BY_RANGE_REQUEST, SPAN_HANDLE_BLOCKS_BY_ROOT_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_RANGE_REQUEST, SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, - SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, - SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, - SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, + SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, SPAN_HANDLE_EXECUTION_PROOFS_BY_ROOT_REQUEST, + SPAN_HANDLE_LIGHT_CLIENT_BOOTSTRAP, SPAN_HANDLE_LIGHT_CLIENT_FINALITY_UPDATE, + SPAN_HANDLE_LIGHT_CLIENT_OPTIMISTIC_UPDATE, SPAN_HANDLE_LIGHT_CLIENT_UPDATES_BY_RANGE, }; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; @@ -436,19 +436,39 @@ impl NetworkBeaconProcessor { request.already_have.iter().copied().collect(); let count_needed = request.count_needed as usize; - // Get all execution proofs we have for this block from the DA checker - let Some(available_proofs) = self + // Get all execution proofs we have for this block from the DA checker, falling back to the + // store (which checks the store cache/DB). + let available_proofs = match self .chain .data_availability_checker .get_execution_proofs(&block_root) - else { - // No proofs available for this block - debug!( - %peer_id, - %block_root, - "No execution proofs available for peer" - ); - return Ok(()); + { + Some(proofs) => proofs, + None => match self.chain.store.get_execution_proofs(&block_root) { + Ok(proofs) => { + if proofs.is_empty() { + debug!( + %peer_id, + %block_root, + "No execution proofs available for peer" + ); + return Ok(()); + } + proofs + } + Err(e) => { + error!( + %peer_id, + %block_root, + error = ?e, + "Error fetching execution proofs for block root" + ); + return Err(( + RpcErrorResponse::ServerError, + "Error fetching execution proofs", + )); + } + }, }; // Filter out proofs the peer already has and send up to count_needed @@ -486,6 +506,137 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle an `ExecutionProofsByRange` request from the peer. + #[instrument( + name = SPAN_HANDLE_EXECUTION_PROOFS_BY_RANGE_REQUEST, + parent = None, + level = "debug", + skip_all, + fields(peer_id = %peer_id, client = tracing::field::Empty) + )] + pub fn handle_execution_proofs_by_range_request( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) { + let client = self.network_globals.client(&peer_id); + Span::current().record("client", field::display(client.kind)); + + self.terminate_response_stream( + peer_id, + inbound_request_id, + self.handle_execution_proofs_by_range_request_inner(peer_id, inbound_request_id, req), + Response::ExecutionProofsByRange, + ); + } + + /// Handle an `ExecutionProofsByRange` request from the peer. + fn handle_execution_proofs_by_range_request_inner( + &self, + peer_id: PeerId, + inbound_request_id: InboundRequestId, + req: ExecutionProofsByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!( + %peer_id, + count = req.count, + start_slot = req.start_slot, + "Received ExecutionProofsByRange Request" + ); + + let request_start_slot = Slot::from(req.start_slot); + + // Check if zkvm is enabled and get the execution proof boundary + let execution_proof_boundary_slot = match self.chain.execution_proof_boundary() { + Some(boundary) => boundary.start_slot(T::EthSpec::slots_per_epoch()), + None => { + debug!("ZKVM fork is disabled"); + return Err((RpcErrorResponse::InvalidRequest, "ZKVM fork is disabled")); + } + }; + + // Get the oldest execution proof slot from the store + let oldest_execution_proof_slot = self + .chain + .store + .get_execution_proof_info() + .oldest_execution_proof_slot + .unwrap_or(execution_proof_boundary_slot); + + if request_start_slot < oldest_execution_proof_slot { + debug!( + %request_start_slot, + %oldest_execution_proof_slot, + %execution_proof_boundary_slot, + "Range request start slot is older than the oldest execution proof slot." + ); + + return if execution_proof_boundary_slot < oldest_execution_proof_slot { + Err(( + RpcErrorResponse::ResourceUnavailable, + "execution proofs pruned within boundary", + )) + } else { + Err(( + RpcErrorResponse::InvalidRequest, + "Req outside availability period", + )) + }; + } + + let block_roots = self.get_block_roots_for_slot_range( + req.start_slot, + req.count, + "ExecutionProofsByRange", + )?; + let mut proofs_sent = 0; + + for root in block_roots { + // Get execution proofs from the database (like BlobsByRange does for blobs) + match self.chain.store.get_execution_proofs(&root) { + Ok(proofs) => { + for proof in proofs { + // Due to skip slots, proofs could be out of the range + if proof.slot >= request_start_slot + && proof.slot < request_start_slot + req.count + { + proofs_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + inbound_request_id, + response: Response::ExecutionProofsByRange(Some(proof)), + }); + } + } + } + Err(e) => { + error!( + request = ?req, + %peer_id, + block_root = ?root, + error = ?e, + "Error fetching execution proofs for block root" + ); + return Err(( + RpcErrorResponse::ServerError, + "Failed fetching execution proofs from database", + )); + } + } + } + + debug!( + %peer_id, + start_slot = req.start_slot, + count = req.count, + sent = proofs_sent, + "ExecutionProofsByRange outgoing response processed" + ); + + Ok(()) + } + /// Handle a `DataColumnsByRoot` request from the peer. #[instrument( name = SPAN_HANDLE_DATA_COLUMNS_BY_ROOT_REQUEST, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index eb02ddad921..1910f106535 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -283,6 +283,15 @@ impl Router { request, ), ), + RequestType::ExecutionProofsByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_execution_proofs_by_range_request( + peer_id, + inbound_request_id, + request, + ), + ), _ => {} } } @@ -323,6 +332,15 @@ impl Router { Response::ExecutionProofsByRoot(execution_proof) => { self.on_execution_proofs_by_root_response(peer_id, app_request_id, execution_proof); } + Response::ExecutionProofsByRange(execution_proof) => { + // ExecutionProofsByRange responses are not currently used by sync + // This is primarily for serving range requests to peers + debug!( + %peer_id, + ?execution_proof, + "Received ExecutionProofsByRange Response (not processed)" + ); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index a07cc838863..9cb6620816e 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -32,6 +32,8 @@ pub enum Error { BlobInfoConcurrentMutation, /// The store's `data_column_info` was mutated concurrently, the latest modification wasn't applied. DataColumnInfoConcurrentMutation, + /// The store's `execution_proof_info` was mutated concurrently, the latest modification wasn't applied. + ExecutionProofInfoConcurrentMutation, /// The block or state is unavailable due to weak subjectivity sync. HistoryUnavailable, /// State reconstruction cannot commence because not all historic blocks are known. @@ -92,6 +94,7 @@ pub enum Error { LoadSplit(Box), LoadBlobInfo(Box), LoadDataColumnInfo(Box), + LoadExecutionProofInfo(Box), LoadConfig(Box), LoadHotStateSummary(Hash256, Box), LoadHotStateSummaryForSplit(Box), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c4137191744..a05d915795f 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -9,12 +9,13 @@ use crate::metadata::{ ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, AnchorInfo, BLOB_INFO_KEY, BlobInfo, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, CompactionTimestamp, DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, DataColumnCustodyInfo, DataColumnInfo, - SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, + EXECUTION_PROOF_INFO_KEY, ExecutionProofInfo, SCHEMA_VERSION_KEY, SPLIT_KEY, + STATE_UPPER_LIMIT_NO_RETAIN, SchemaVersion, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, - StoreOp, get_data_column_key, + StoreOp, get_data_column_key, get_execution_proof_key, metrics::{self, COLD_METRIC, HOT_METRIC}, parse_data_column_key, }; @@ -61,6 +62,8 @@ pub struct HotColdDB, Cold: ItemStore> { blob_info: RwLock, /// The starting slots for the range of data columns stored in the database. data_column_info: RwLock, + /// The starting slots for the range of execution proofs stored in the database. + execution_proof_info: RwLock, pub(crate) config: StoreConfig, pub hierarchy: HierarchyModuli, /// Cold database containing compact historical data. @@ -93,6 +96,7 @@ struct BlockCache { block_cache: LruCache>, blob_cache: LruCache>, data_column_cache: LruCache>>>, + execution_proof_cache: LruCache>>, data_column_custody_info_cache: Option, } @@ -102,6 +106,7 @@ impl BlockCache { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), data_column_cache: LruCache::new(size), + execution_proof_cache: LruCache::new(size), data_column_custody_info_cache: None, } } @@ -116,6 +121,9 @@ impl BlockCache { .get_or_insert_mut(block_root, Default::default) .insert(data_column.index, data_column); } + pub fn put_execution_proofs(&mut self, block_root: Hash256, proofs: Vec>) { + self.execution_proof_cache.put(block_root, proofs); + } pub fn put_data_column_custody_info( &mut self, data_column_custody_info: Option, @@ -139,6 +147,12 @@ impl BlockCache { .get(block_root) .and_then(|map| map.get(column_index).cloned()) } + pub fn get_execution_proofs( + &mut self, + block_root: &Hash256, + ) -> Option>> { + self.execution_proof_cache.get(block_root).cloned() + } pub fn get_data_column_custody_info(&self) -> Option { self.data_column_custody_info_cache.clone() } @@ -151,10 +165,14 @@ impl BlockCache { pub fn delete_data_columns(&mut self, block_root: &Hash256) { let _ = self.data_column_cache.pop(block_root); } + pub fn delete_execution_proofs(&mut self, block_root: &Hash256) { + let _ = self.execution_proof_cache.pop(block_root); + } pub fn delete(&mut self, block_root: &Hash256) { self.delete_block(block_root); self.delete_blobs(block_root); self.delete_data_columns(block_root); + self.delete_execution_proofs(block_root); } } @@ -232,6 +250,7 @@ impl HotColdDB, MemoryStore> { anchor_info: RwLock::new(ANCHOR_UNINITIALIZED), blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), + execution_proof_info: RwLock::new(ExecutionProofInfo::default()), cold_db: MemoryStore::open(), blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), @@ -286,6 +305,7 @@ impl HotColdDB, BeaconNodeBackend> { anchor_info, blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), + execution_proof_info: RwLock::new(ExecutionProofInfo::default()), blobs_db: BeaconNodeBackend::open(&config, blobs_db_path)?, cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, @@ -395,10 +415,38 @@ impl HotColdDB, BeaconNodeBackend> { new_data_column_info.clone(), )?; + // Initialize execution proof info + let execution_proof_info = db.load_execution_proof_info()?; + let zkvm_fork_slot = db + .spec + .zkvm_fork_epoch() + .map(|epoch| epoch.start_slot(E::slots_per_epoch())); + let new_execution_proof_info = match &execution_proof_info { + Some(execution_proof_info) => { + // Set the oldest execution proof slot to the fork slot if it is not yet set. + let oldest_execution_proof_slot = execution_proof_info + .oldest_execution_proof_slot + .or(zkvm_fork_slot); + ExecutionProofInfo { + oldest_execution_proof_slot, + } + } + // First start. + None => ExecutionProofInfo { + // Set the oldest execution proof slot to the fork slot if it is not yet set. + oldest_execution_proof_slot: zkvm_fork_slot, + }, + }; + db.compare_and_set_execution_proof_info_with_write( + <_>::default(), + new_execution_proof_info.clone(), + )?; + info!( path = ?blobs_db_path, oldest_blob_slot = ?new_blob_info.oldest_blob_slot, oldest_data_column_slot = ?new_data_column_info.oldest_data_column_slot, + oldest_execution_proof_slot = ?new_execution_proof_info.oldest_execution_proof_slot, "Blob DB initialized" ); @@ -1027,6 +1075,47 @@ impl, Cold: ItemStore> HotColdDB } } + /// Store execution proofs for a block. + pub fn put_execution_proofs( + &self, + block_root: &Hash256, + proofs: &[ExecutionProof], + ) -> Result<(), Error> { + for proof in proofs { + self.blobs_db.put_bytes( + DBColumn::BeaconExecutionProof, + &get_execution_proof_key(block_root, proof.proof_id.as_u8()), + &proof.as_ssz_bytes(), + )?; + } + if !proofs.is_empty() { + let cached = proofs + .iter() + .map(|proof| Arc::new(proof.clone())) + .collect::>(); + self.block_cache + .as_ref() + .inspect(|cache| cache.lock().put_execution_proofs(*block_root, cached)); + } + Ok(()) + } + + /// Create key-value store operations for storing execution proofs. + pub fn execution_proofs_as_kv_store_ops( + &self, + block_root: &Hash256, + proofs: &[ExecutionProof], + ops: &mut Vec, + ) { + for proof in proofs { + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconExecutionProof, + get_execution_proof_key(block_root, proof.proof_id.as_u8()), + proof.as_ssz_bytes(), + )); + } + } + /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { let mut ops: Vec = Vec::new(); @@ -2558,6 +2647,47 @@ impl, Cold: ItemStore> HotColdDB } } + /// Fetch all execution proofs for a given block from the store. + pub fn get_execution_proofs( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + if let Some(proofs) = self + .block_cache + .as_ref() + .and_then(|cache| cache.lock().get_execution_proofs(block_root)) + { + return Ok(proofs); + } + + let mut proofs = Vec::new(); + let prefix = block_root.as_slice(); + + for result in self + .blobs_db + .iter_column_from::>(DBColumn::BeaconExecutionProof, prefix) + { + let (key, value) = result?; + // Check if key starts with our block_root prefix + if !key.starts_with(prefix) { + // We've moved past this block's proofs + break; + } + let proof = Arc::new(ExecutionProof::from_ssz_bytes(&value)?); + proofs.push(proof); + } + + if !proofs.is_empty() { + self.block_cache.as_ref().inspect(|cache| { + cache + .lock() + .put_execution_proofs(*block_root, proofs.clone()) + }); + } + + Ok(proofs) + } + /// Fetch all keys in the data_column column with prefix `block_root` pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { self.blobs_db @@ -2877,6 +3007,77 @@ impl, Cold: ItemStore> HotColdDB data_column_info.as_kv_store_op(DATA_COLUMN_INFO_KEY) } + /// Get a clone of the store's execution proof info. + /// + /// To do mutations, use `compare_and_set_execution_proof_info`. + pub fn get_execution_proof_info(&self) -> ExecutionProofInfo { + self.execution_proof_info.read_recursive().clone() + } + + /// Initialize the `ExecutionProofInfo` when starting from genesis or a checkpoint. + pub fn init_execution_proof_info(&self, anchor_slot: Slot) -> Result { + let oldest_execution_proof_slot = self.spec.zkvm_fork_epoch().map(|fork_epoch| { + std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) + }); + let execution_proof_info = ExecutionProofInfo { + oldest_execution_proof_slot, + }; + self.compare_and_set_execution_proof_info( + self.get_execution_proof_info(), + execution_proof_info, + ) + } + + /// Atomically update the execution proof info from `prev_value` to `new_value`. + /// + /// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other + /// values. + /// + /// Return an `ExecutionProofInfoConcurrentMutation` error if the `prev_value` provided + /// is not correct. + pub fn compare_and_set_execution_proof_info( + &self, + prev_value: ExecutionProofInfo, + new_value: ExecutionProofInfo, + ) -> Result { + let mut execution_proof_info = self.execution_proof_info.write(); + if *execution_proof_info == prev_value { + let kv_op = self.store_execution_proof_info_in_batch(&new_value); + *execution_proof_info = new_value; + Ok(kv_op) + } else { + Err(Error::ExecutionProofInfoConcurrentMutation) + } + } + + /// As for `compare_and_set_execution_proof_info`, but also writes to disk immediately. + pub fn compare_and_set_execution_proof_info_with_write( + &self, + prev_value: ExecutionProofInfo, + new_value: ExecutionProofInfo, + ) -> Result<(), Error> { + let kv_store_op = self.compare_and_set_execution_proof_info(prev_value, new_value)?; + self.hot_db.do_atomically(vec![kv_store_op]) + } + + /// Load the execution proof info from disk, but do not set `self.execution_proof_info`. + fn load_execution_proof_info(&self) -> Result, Error> { + self.hot_db + .get(&EXECUTION_PROOF_INFO_KEY) + .map_err(|e| Error::LoadExecutionProofInfo(e.into())) + } + + /// Store the given `execution_proof_info` to disk. + /// + /// The argument is intended to be `self.execution_proof_info`, but is passed manually to avoid + /// issues with recursive locking. + fn store_execution_proof_info_in_batch( + &self, + execution_proof_info: &ExecutionProofInfo, + ) -> KeyValueStoreOp { + execution_proof_info.as_kv_store_op(EXECUTION_PROOF_INFO_KEY) + } + /// Return the slot-window describing the available historic states. /// /// Returns `(lower_limit, upper_limit)`. @@ -3395,6 +3596,178 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Try to prune execution proofs older than the execution proof boundary. + /// + /// Proofs from the epoch `execution_proof_boundary` are retained. + /// This epoch is an _exclusive_ endpoint for the pruning process. + /// + /// This function only supports pruning execution proofs older than the split point, + /// which is older than (or equal to) finalization. + pub fn try_prune_execution_proofs( + &self, + force: bool, + execution_proof_boundary: Epoch, + ) -> Result<(), Error> { + // Check if zkvm fork is enabled + if self.spec.zkvm_fork_epoch().is_none() { + debug!("ZKVM fork is disabled"); + return Ok(()); + } + + let pruning_enabled = self.get_config().prune_blobs; // Use same config as blobs for now + if !force && !pruning_enabled { + debug!( + prune_blobs = pruning_enabled, + "Execution proof pruning is disabled" + ); + return Ok(()); + } + + let execution_proof_info = self.get_execution_proof_info(); + let Some(oldest_execution_proof_slot) = execution_proof_info.oldest_execution_proof_slot + else { + debug!("No execution proofs stored yet"); + return Ok(()); + }; + + let start_epoch = oldest_execution_proof_slot.epoch(E::slots_per_epoch()); + + // Prune execution proofs up until the `execution_proof_boundary - 1` or the split + // slot's epoch, whichever is older. + let split = self.get_split_info(); + let end_epoch = std::cmp::min( + execution_proof_boundary.saturating_sub(1u64), + split.slot.epoch(E::slots_per_epoch()).saturating_sub(1u64), + ); + let end_slot = end_epoch.end_slot(E::slots_per_epoch()); + + let can_prune = end_epoch != Epoch::new(0) && start_epoch <= end_epoch; + if !can_prune { + debug!( + %oldest_execution_proof_slot, + %execution_proof_boundary, + %split.slot, + %end_epoch, + %start_epoch, + "Execution proofs are pruned" + ); + return Ok(()); + } + + debug!( + %end_epoch, + %execution_proof_boundary, + "Pruning execution proofs" + ); + + // Iterate blocks backwards from the `end_epoch`. + let Some((end_block_root, _)) = self + .forwards_block_roots_iterator_until(end_slot, end_slot, || { + self.get_hot_state(&split.state_root, true)? + .ok_or(HotColdDBError::MissingSplitState( + split.state_root, + split.slot, + )) + .map(|state| (state, split.state_root)) + .map_err(Into::into) + })? + .next() + .transpose()? + else { + debug!( + %end_epoch, + %execution_proof_boundary, + "No execution proofs to prune" + ); + return Ok(()); + }; + + let mut db_ops = vec![]; + let mut removed_block_roots = vec![]; + let mut new_oldest_slot: Option = None; + + // Iterate blocks backwards until we reach blocks older than the boundary. + for tuple in ParentRootBlockIterator::new(self, end_block_root) { + let (block_root, blinded_block) = tuple?; + let slot = blinded_block.slot(); + + // Get all execution proof keys for this block + let keys = self.get_all_execution_proof_keys(&block_root); + + // Check if any proofs exist for this block + let mut block_has_proofs = false; + for key in keys { + if self + .blobs_db + .key_exists(DBColumn::BeaconExecutionProof, &key)? + { + block_has_proofs = true; + db_ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconExecutionProof, + key, + )); + } + } + + if block_has_proofs { + debug!( + ?block_root, + %slot, + "Pruning execution proofs for block" + ); + removed_block_roots.push(block_root); + new_oldest_slot = Some(slot); + } + // Continue iterating even if this block has no proofs - proofs may be sparse + } + + // Commit deletions + if !db_ops.is_empty() { + debug!( + num_deleted = db_ops.len(), + "Deleting execution proofs from disk" + ); + self.blobs_db.do_atomically(db_ops)?; + } + + // TODO(zkproofs): Fix this to make it more readable + if !removed_block_roots.is_empty() + && let Some(mut block_cache) = self.block_cache.as_ref().map(|cache| cache.lock()) + { + for block_root in removed_block_roots { + block_cache.delete_execution_proofs(&block_root); + } + } + + // Update the execution proof info with the new oldest slot + if let Some(new_slot) = new_oldest_slot { + let new_oldest = end_slot + 1; + self.compare_and_set_execution_proof_info_with_write( + execution_proof_info.clone(), + ExecutionProofInfo { + oldest_execution_proof_slot: Some(new_oldest), + }, + )?; + debug!( + old_oldest = %new_slot, + new_oldest = %new_oldest, + "Updated execution proof info" + ); + } + + debug!("Execution proof pruning complete"); + + Ok(()) + } + + /// Get all possible execution proof keys for a given block root. + /// Returns keys for proof_ids 0 to MAX_PROOFS-1. + fn get_all_execution_proof_keys(&self, block_root: &Hash256) -> Vec> { + (0..types::MAX_PROOFS as u8) + .map(|proof_id| get_execution_proof_key(block_root, proof_id)) + .collect() + } + /// Delete *all* states from the freezer database and update the anchor accordingly. /// /// WARNING: this method deletes the genesis state and replaces it with the provided diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ae5b2e1e571..516e858e581 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -40,6 +40,7 @@ use strum::{EnumIter, EnumString, IntoStaticStr}; pub use types::*; const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; +const EXECUTION_PROOF_DB_KEY_SIZE: usize = 32 + 1; // block_root + proof_id pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a, K> = Box> + 'a>; @@ -171,6 +172,25 @@ pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Er Ok((block_root, column_index)) } +pub fn get_execution_proof_key(block_root: &Hash256, proof_id: u8) -> Vec { + let mut result = block_root.as_slice().to_vec(); + result.push(proof_id); + result +} + +pub fn parse_execution_proof_key(data: Vec) -> Result<(Hash256, u8), Error> { + if data.len() != EXECUTION_PROOF_DB_KEY_SIZE { + return Err(Error::InvalidKey(format!( + "Unexpected BeaconExecutionProof key len {}", + data.len() + ))); + } + let (block_root_bytes, proof_id_bytes) = data.split_at(32); + let block_root = Hash256::from_slice(block_root_bytes); + let proof_id = proof_id_bytes[0]; + Ok((block_root, proof_id)) +} + #[must_use] #[derive(Clone)] pub enum KeyValueStoreOp { @@ -263,6 +283,12 @@ pub enum DBColumn { BeaconDataColumn, #[strum(serialize = "bdi")] BeaconDataColumnCustodyInfo, + /// For storing execution proofs (zkVM proofs) in the blob database. + /// + /// - Key: `Hash256` block root + `u8` proof_id (33 bytes total). + /// - Value: SSZ-encoded ExecutionProof. + #[strum(serialize = "bep")] + BeaconExecutionProof, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). /// /// DEPRECATED. @@ -437,6 +463,7 @@ impl DBColumn { | Self::LightClientUpdate | Self::Dummy => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, + Self::BeaconExecutionProof => EXECUTION_PROOF_DB_KEY_SIZE, } } } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index cf494684515..7a5979481fe 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -19,6 +19,7 @@ pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); pub const DATA_COLUMN_CUSTODY_INFO_KEY: Hash256 = Hash256::repeat_byte(8); +pub const EXECUTION_PROOF_INFO_KEY: Hash256 = Hash256::repeat_byte(9); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -255,3 +256,30 @@ impl StoreItem for DataColumnInfo { Ok(Self::from_ssz_bytes(bytes)?) } } + +/// Database parameters relevant to execution proof sync. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct ExecutionProofInfo { + /// The slot after which execution proofs are or *will be* available (>=). + /// + /// If this slot is in the future, then it is the first slot of the ZKVM fork, from which + /// execution proofs will be available. + /// + /// If the `oldest_execution_proof_slot` is `None` then this means that the ZKVM fork epoch + /// is not yet known. + pub oldest_execution_proof_slot: Option, +} + +impl StoreItem for ExecutionProofInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 1bab464b689..21eb5e1b8c7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -61,7 +61,7 @@ pub mod chain_spec { } // Re-export execution_proof types for backwards compatibility -pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES}; +pub use crate::execution_proof::{ExecutionProof, MAX_PROOF_DATA_BYTES, MAX_PROOFS}; pub use crate::execution_proof_id::{EXECUTION_PROOF_TYPE_COUNT, ExecutionProofId}; pub mod beacon_block {