diff --git a/Cargo.toml b/Cargo.toml index 9370ac26b8..94511f2dce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -161,3 +161,9 @@ opt-level = 3 inherits = "dev" incremental = false debug = "line-tables-only" + +# [profile.test.package.proptest] +# opt-level = 3 + +# [profile.test.package.rand_chacha] +# opt-level = 3 diff --git a/crates/executor/src/block.rs b/crates/executor/src/block.rs index 83ce814f05..44cd0139ed 100644 --- a/crates/executor/src/block.rs +++ b/crates/executor/src/block.rs @@ -13,7 +13,7 @@ use crate::types::{ transaction_declared_deprecated_class, transaction_type, BlockInfo, - Receipt, + ReceiptAndEvents, StateDiff, }; use crate::{ExecutionState, Transaction, TransactionExecutionError}; @@ -27,15 +27,66 @@ pub struct BlockExecutor { next_txn_idx: usize, } -type ReceiptAndEvents = (Receipt, Vec); +pub trait BlockExecutorExt { + fn new( + chain_id: ChainId, + block_info: BlockInfo, + eth_fee_address: ContractAddress, + strk_fee_address: ContractAddress, + db_conn: pathfinder_storage::Connection, + ) -> anyhow::Result + where + Self: Sized; + + /// Create a new BlockExecutor from a StateUpdate + /// This allows reconstructing an executor from a stored state diff + /// checkpoint + fn new_with_pending_state( + chain_id: ChainId, + block_info: BlockInfo, + eth_fee_address: ContractAddress, + strk_fee_address: ContractAddress, + db_conn: pathfinder_storage::Connection, + pending_state: std::sync::Arc, + ) -> anyhow::Result + where + Self: Sized; + + /// Evecute a batch of transactions in the current block. + fn execute( + &mut self, + txns: Vec, + ) -> Result, TransactionExecutionError>; + + fn finalize(self) -> anyhow::Result; + + /// This allows for setting the correct starting index for chained executors + fn set_transaction_index(&mut self, index: usize); + + /// Extract state diff without consuming the executor + /// This allows extracting the diff for rollback scenarios without losing + /// the executor + /// + /// Note: This method does NOT call `executor.finalize()`, which means it + /// doesn't include stateful compression changes (system contract 0x2 + /// updates). These changes are only needed when finalizing the proposal + /// for commitment computation, not for intermediate diff extraction + /// during batch execution. + fn extract_state_diff(&self) -> anyhow::Result; +} impl BlockExecutor { - pub fn new( + /// Create a new BlockExecutor with a pre-existing initial state + /// This allows for executor chaining where the new executor starts with + /// the final state of a previous executor + #[cfg(test)] + pub fn new_with_initial_state( chain_id: ChainId, block_info: BlockInfo, eth_fee_address: ContractAddress, strk_fee_address: ContractAddress, db_conn: pathfinder_storage::Connection, + initial_state: PathfinderExecutionState, ) -> anyhow::Result { let execution_state = ExecutionState::validation( chain_id, @@ -47,12 +98,12 @@ impl BlockExecutor { None, ); let storage_adapter = ConcurrentStorageAdapter::new(db_conn); - let executor = create_executor(storage_adapter, execution_state)?; - let initial_state = executor - .block_state - .as_ref() - .expect(BLOCK_STATE_ACCESS_ERR) - .clone(); + let mut executor = create_executor(storage_adapter, execution_state)?; + + // Set the initial state + if let Some(block_state) = executor.block_state.as_mut() { + *block_state = initial_state.clone(); + } Ok(Self { executor, @@ -62,16 +113,36 @@ impl BlockExecutor { }) } - /// Create a new BlockExecutor with a pre-existing initial state - /// This allows for executor chaining where the new executor starts with - /// the final state of a previous executor - pub fn new_with_initial_state( + /// Get the final state of the executor + /// This allows for state extraction before finalizing + #[cfg(test)] + fn get_final_state( + &self, + ) -> anyhow::Result> { + let final_state = self + .executor + .block_state + .as_ref() + .expect(BLOCK_STATE_ACCESS_ERR) + .clone(); + Ok(final_state) + } + + /// Get the current transaction index + /// This allows for tracking transaction indices across chained executors + #[cfg(test)] + pub fn get_transaction_index(&self) -> usize { + self.next_txn_idx + } +} + +impl BlockExecutorExt for BlockExecutor { + fn new( chain_id: ChainId, block_info: BlockInfo, eth_fee_address: ContractAddress, strk_fee_address: ContractAddress, db_conn: pathfinder_storage::Connection, - initial_state: PathfinderExecutionState, ) -> anyhow::Result { let execution_state = ExecutionState::validation( chain_id, @@ -83,12 +154,12 @@ impl BlockExecutor { None, ); let storage_adapter = ConcurrentStorageAdapter::new(db_conn); - let mut executor = create_executor(storage_adapter, execution_state)?; - - // Set the initial state - if let Some(block_state) = executor.block_state.as_mut() { - *block_state = initial_state.clone(); - } + let executor = create_executor(storage_adapter, execution_state)?; + let initial_state = executor + .block_state + .as_ref() + .expect(BLOCK_STATE_ACCESS_ERR) + .clone(); Ok(Self { executor, @@ -101,7 +172,7 @@ impl BlockExecutor { /// Create a new BlockExecutor from a StateUpdate /// This allows reconstructing an executor from a stored state diff /// checkpoint - pub fn new_with_pending_state( + fn new_with_pending_state( chain_id: ChainId, block_info: BlockInfo, eth_fee_address: ContractAddress, @@ -135,7 +206,7 @@ impl BlockExecutor { } /// Evecute a batch of transactions in the current block. - pub fn execute( + fn execute( &mut self, txns: Vec, ) -> Result, TransactionExecutionError> { @@ -196,7 +267,7 @@ impl BlockExecutor { } /// Finalizes block execution and returns the state diff for the block. - pub fn finalize(self) -> anyhow::Result { + fn finalize(self) -> anyhow::Result { let Self { mut executor, initial_state, @@ -216,29 +287,9 @@ impl BlockExecutor { Ok(diff) } - /// Get the final state of the executor - /// This allows for state extraction before finalizing - pub fn get_final_state( - &self, - ) -> anyhow::Result> { - let final_state = self - .executor - .block_state - .as_ref() - .expect(BLOCK_STATE_ACCESS_ERR) - .clone(); - Ok(final_state) - } - - /// Get the current transaction index - /// This allows for tracking transaction indices across chained executors - pub fn get_transaction_index(&self) -> usize { - self.next_txn_idx - } - /// Set the transaction index /// This allows for setting the correct starting index for chained executors - pub fn set_transaction_index(&mut self, index: usize) { + fn set_transaction_index(&mut self, index: usize) { self.next_txn_idx = index; } @@ -251,7 +302,7 @@ impl BlockExecutor { /// updates). These changes are only needed when finalizing the proposal /// for commitment computation, not for intermediate diff extraction /// during batch execution. - pub fn extract_state_diff(&self) -> anyhow::Result { + fn extract_state_diff(&self) -> anyhow::Result { let current_state = self .executor .block_state @@ -272,7 +323,6 @@ impl BlockExecutor { #[cfg(test)] mod tests { - use pathfinder_common::state_update::StateUpdateData; use pathfinder_common::transaction::{L1HandlerTransaction, TransactionVariant}; use pathfinder_common::{ @@ -288,7 +338,7 @@ mod tests { use pathfinder_storage::StorageBuilder; use crate::execution_state::create_executor; - use crate::BlockExecutor; + use crate::{BlockExecutor, BlockExecutorExt as _}; // Fee token addresses (same as in pathfinder_rpc::context) const ETH_FEE_TOKEN_ADDRESS: ContractAddress = diff --git a/crates/executor/src/lib.rs b/crates/executor/src/lib.rs index 836d6b3c3d..ddc662c23b 100644 --- a/crates/executor/src/lib.rs +++ b/crates/executor/src/lib.rs @@ -13,7 +13,7 @@ pub(crate) mod state_reader; pub(crate) mod transaction; pub mod types; -pub use block::BlockExecutor; +pub use block::{BlockExecutor, BlockExecutorExt}; // re-export blockifier transaction type since it's exposed on our API pub use blockifier::blockifier_versioned_constants::{VersionedConstants, VersionedConstantsError}; pub use blockifier::transaction::account_transaction::{ diff --git a/crates/executor/src/types.rs b/crates/executor/src/types.rs index ca131b195f..bb2d97645f 100644 --- a/crates/executor/src/types.rs +++ b/crates/executor/src/types.rs @@ -48,6 +48,8 @@ pub struct Receipt { pub transaction_index: TransactionIndex, } +pub type ReceiptAndEvents = (Receipt, Vec); + #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct BlockInfo { pub number: BlockNumber, diff --git a/crates/p2p_proto/src/common.rs b/crates/p2p_proto/src/common.rs index ecb61f0c8c..ec35d2f954 100644 --- a/crates/p2p_proto/src/common.rs +++ b/crates/p2p_proto/src/common.rs @@ -25,6 +25,10 @@ use crate::{proto, ToProtobuf, TryFromProtobuf}; )] pub struct Hash(pub Felt); +impl Hash { + pub const ZERO: Self = Self(Felt::ZERO); +} + impl std::fmt::Display for Hash { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) @@ -81,8 +85,9 @@ pub struct BlockId { pub hash: Hash, } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Dummy)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Dummy)] pub enum L1DataAvailabilityMode { + #[default] Calldata, Blob, } diff --git a/crates/p2p_proto/src/consensus.rs b/crates/p2p_proto/src/consensus.rs index 7f28aaf150..9d408d35e2 100644 --- a/crates/p2p_proto/src/consensus.rs +++ b/crates/p2p_proto/src/consensus.rs @@ -1,4 +1,4 @@ -use fake::Dummy; +use fake::{Dummy, Fake as _}; use pathfinder_crypto::Felt; use prost::Message; use proto::consensus::consensus as consensus_proto; @@ -86,13 +86,13 @@ pub struct TransactionBatch { pub transactions: Vec, } -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] #[protobuf(name = "consensus_proto::TransactionsFin")] pub struct TransactionsFin { pub executed_transaction_count: u64, } -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf)] #[protobuf(name = "consensus_proto::BlockInfo")] pub struct BlockInfo { pub block_number: u64, @@ -105,7 +105,23 @@ pub struct BlockInfo { pub l1_da_mode: L1DataAvailabilityMode, } -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +impl Dummy for BlockInfo { + fn dummy_with_rng(_: &T, rng: &mut R) -> Self { + Self { + block_number: rng.gen_range(0..i64::MAX) as u64, + builder: fake::Faker.fake_with_rng(rng), + timestamp: rng.gen_range(0..i64::MAX) as u64, + // Keep the prices low enough to avoid overflow when converting between fri and wei + l2_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, + l1_gas_price_wei: rng.gen_range(1..i64::MAX) as u128, + l1_data_gas_price_wei: rng.gen_range(1..i64::MAX) as u128, + eth_to_strk_rate: rng.gen_range(1..i64::MAX) as u128, + l1_da_mode: fake::Faker.fake_with_rng(rng), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf)] #[protobuf(name = "consensus_proto::ProposalCommitment")] pub struct ProposalCommitment { pub block_number: u64, @@ -128,6 +144,32 @@ pub struct ProposalCommitment { pub l1_da_mode: L1DataAvailabilityMode, } +impl Dummy for ProposalCommitment { + fn dummy_with_rng(_: &T, rng: &mut R) -> Self { + Self { + block_number: rng.gen_range(0..i64::MAX) as u64, + parent_commitment: fake::Faker.fake_with_rng(rng), + builder: fake::Faker.fake_with_rng(rng), + timestamp: rng.gen_range(0..i64::MAX) as u64, + protocol_version: "0.14.1".to_string(), + old_state_root: fake::Faker.fake_with_rng(rng), + version_constant_commitment: fake::Faker.fake_with_rng(rng), + state_diff_commitment: fake::Faker.fake_with_rng(rng), + transaction_commitment: fake::Faker.fake_with_rng(rng), + event_commitment: fake::Faker.fake_with_rng(rng), + receipt_commitment: fake::Faker.fake_with_rng(rng), + concatenated_counts: fake::Faker.fake_with_rng(rng), + // Keep the prices low enough to avoid overflow when converting between fri and wei + l1_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, + l1_data_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, + l2_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, + l2_gas_used: rng.gen_range(1..i64::MAX) as u128, + next_l2_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, + l1_da_mode: fake::Faker.fake_with_rng(rng), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] #[protobuf(name = "consensus_proto::StreamMessage")] pub struct StreamMessage { @@ -281,6 +323,86 @@ impl ProposalPart { None } } + + pub fn is_proposal_init(&self) -> bool { + matches!(self, Self::Init(_)) + } + + pub fn is_block_info(&self) -> bool { + matches!(self, Self::BlockInfo(_)) + } + + pub fn is_transaction_batch(&self) -> bool { + matches!(self, Self::TransactionBatch(_)) + } + + pub fn is_transactions_fin(&self) -> bool { + matches!(self, Self::TransactionsFin(_)) + } + + pub fn is_proposal_commitment(&self) -> bool { + matches!(self, Self::ProposalCommitment(_)) + } + + pub fn is_proposal_fin(&self) -> bool { + matches!(self, Self::Fin(_)) + } + + pub fn variant_name(&self) -> &'static str { + match self { + Self::Init(_) => "Init", + Self::Fin(_) => "Fin", + Self::BlockInfo(_) => "BlockInfo", + Self::TransactionBatch(_) => "TransactionBatch", + Self::TransactionsFin(_) => "TransactionsFin", + Self::ProposalCommitment(_) => "ProposalCommitment", + } + } +} + +impl TryFrom for TransactionsFin { + type Error = std::io::Error; + + fn try_from(value: ProposalPart) -> Result { + if let ProposalPart::TransactionsFin(fin) = value { + Ok(fin) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "ProposalPart is not a TransactionsFin variant", + )) + } + } +} + +impl TryFrom for ProposalCommitment { + type Error = std::io::Error; + + fn try_from(value: ProposalPart) -> Result { + if let ProposalPart::ProposalCommitment(commitment) = value { + Ok(commitment) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "ProposalPart is not a ProposalCommitment variant", + )) + } + } +} + +impl TryFrom for ProposalFin { + type Error = std::io::Error; + + fn try_from(value: ProposalPart) -> Result { + if let ProposalPart::Fin(fin) = value { + Ok(fin) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "ProposalPart is not a Fin variant", + )) + } + } } impl ToProtobuf for TransactionVariant { diff --git a/crates/p2p_proto/src/transaction.rs b/crates/p2p_proto/src/transaction.rs index 77aa64fe68..b8129744dd 100644 --- a/crates/p2p_proto/src/transaction.rs +++ b/crates/p2p_proto/src/transaction.rs @@ -5,13 +5,22 @@ use crate::class::Cairo1Class; use crate::common::{Address, Hash, VolitionDomain}; use crate::{ToProtobuf, TryFromProtobuf}; -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf)] #[protobuf(name = "crate::proto::transaction::ResourceLimits")] pub struct ResourceLimits { pub max_amount: Felt, pub max_price_per_unit: Felt, } +impl Dummy for ResourceLimits { + fn dummy_with_rng(_: &T, rng: &mut R) -> Self { + Self { + max_amount: Felt::from_u64(rng.gen()), + max_price_per_unit: Felt::from_u128(rng.gen()), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] #[protobuf(name = "crate::proto::transaction::ResourceBounds")] pub struct ResourceBounds { diff --git a/crates/pathfinder/proptest-regressions/consensus/inner/p2p_task/handler_proptest.txt b/crates/pathfinder/proptest-regressions/consensus/inner/p2p_task/handler_proptest.txt new file mode 100644 index 0000000000..e0f9d9a5f2 --- /dev/null +++ b/crates/pathfinder/proptest-regressions/consensus/inner/p2p_task/handler_proptest.txt @@ -0,0 +1,11 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 65ab450aa5ac3b3cb9fd05cc0073fed253ad757338c6bffb242cf6919353f15b # shrinks to (proposal_type, seed) = (StructurallyInvalidExecutionOk, 14774658799269555950) +cc bd3644c258f2b87d94ad2fefacf8b776dc6dc858d32e40ae4e689eb7dd04901a # shrinks to (proposal_type, seed) = (StructurallyValidNonEmptyExecutionFails, 8467432240279251058) +cc 8773afd684ac46fd41aafb884b7679f57a4450fe3b297fd8a9efd0390d8959d8 # shrinks to (proposal_type, seed) = (StructurallyInvalidExecutionOk, 9181708935074874480) +cc 102b2839e4f00df2f7d70c33871ab16a1a23bae6230551778c7335855a33d360 # shrinks to (proposal_type, seed) = (StructurallyInvalidExecutionOk, 10609718920510075896) +cc b33e8ec389839f514689ca7e29df1e11a2254e917de4e3077485e97ddf747eb3 # shrinks to (proposal_type, seed) = (StructurallyInvalidExecutionFails, 8605337213757032975) diff --git a/crates/pathfinder/proptest-regressions/p2p_network/sync/sync_handlers/tests.txt b/crates/pathfinder/proptest-regressions/p2p_network/sync/sync_handlers/tests.txt new file mode 100644 index 0000000000..31daaf08ff --- /dev/null +++ b/crates/pathfinder/proptest-regressions/p2p_network/sync/sync_handlers/tests.txt @@ -0,0 +1,16 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc bc13615220330a9aa3dcbad1f71a6b8c74a7d14e36ffa93a182243c593cc866c # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (1, 8902626232982046058, 0, 1, Step(1), Forward) +cc 57069d20aea6c1421adac0b72bef7431b6c650f5f41dd6d3b16c2da0121486c3 # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (1, 649405005586826819, 0, 1, Step(1), Forward) +cc bfb2d0ca5bc6271afdb10db6bb941552417549d62313cfdab5b1c8d63bed4559 # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (7, 1516226182208585536, 0, 1, Step(1), Forward) +cc 362172e92f8c3bb8b57add0452a53575bef5640a22e0d9cfcabe821c5150086f # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (4, 1507789758493800495, 1, 3, Step(1), Backward) +cc 3c0631f4271587b05d7638c8f95a767a85062d1ffb771167a3b24028376315df # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (7, 9090751796217969733, 1, 4, Step(1), Backward) +cc e61a757eb84e98a3e8429942c16b6937603d36bd6272a92db52a392df2370a84 # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (9, 12221019298661150784, 5, 3, Step(1), Backward) +cc 86c701dc281422d164cfcdd813470d0908f8da74089472c547085c89fd4fc74b # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (11, 16005500644522549812, 0, 5, Step(1), Forward) +cc 88947174b63dc40a8ecadc8258db12c16449fe512c4729e350ded4c7b4a34baf # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (0, 0, 0, 1, Step(1), Forward) +cc 48a4cce9020765acde8c0046cc73e72ef238865b8712045d0a95c23fb4062070 # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (0, 0, 0, 1, Step(1), Forward) +cc bb0bb73a6e6719184832c149727d3e166cda4c891355f25ba8f8b4ed839ea3c2 # shrinks to (num_blocks, seed, start_block, limit, step, direction) = (0, 0, 0, 1, Step(1), Forward) diff --git a/crates/pathfinder/src/consensus/inner.rs b/crates/pathfinder/src/consensus/inner.rs index eba229bf5d..1b54ca503e 100644 --- a/crates/pathfinder/src/consensus/inner.rs +++ b/crates/pathfinder/src/consensus/inner.rs @@ -8,9 +8,6 @@ mod integration_testing; mod p2p_task; mod persist_proposals; -#[cfg(all(test, feature = "p2p"))] -mod p2p_task_tests; - #[cfg(test)] mod test_helpers; diff --git a/crates/pathfinder/src/consensus/inner/batch_execution.rs b/crates/pathfinder/src/consensus/inner/batch_execution.rs index 2767bb5f51..20c2ab9acc 100644 --- a/crates/pathfinder/src/consensus/inner/batch_execution.rs +++ b/crates/pathfinder/src/consensus/inner/batch_execution.rs @@ -10,9 +10,10 @@ use anyhow::Context; use p2p::consensus::HeightAndRound; use p2p_proto::consensus as proto_consensus; use pathfinder_common::{BlockId, BlockNumber}; +use pathfinder_executor::BlockExecutorExt; use pathfinder_storage::Transaction as DbTransaction; -use crate::validator::ValidatorTransactionBatchStage; +use crate::validator::{TransactionExt, ValidatorTransactionBatchStage}; /// Manages batch execution with rollback support for TransactionsFin #[derive(Debug, Clone)] @@ -68,11 +69,11 @@ impl BatchExecutionManager { /// Process a transaction batch with deferral support /// /// This is the main method that should be used by the P2P task - pub fn process_batch_with_deferral( + pub fn process_batch_with_deferral( &mut self, height_and_round: HeightAndRound, transactions: Vec, - validator: &mut ValidatorTransactionBatchStage, + validator: &mut ValidatorTransactionBatchStage, db_tx: &DbTransaction<'_>, deferred_executions: &mut HashMap, ) -> anyhow::Result<()> { @@ -95,7 +96,7 @@ impl BatchExecutionManager { // Execute any previously deferred transactions first let deferred = deferred_executions.remove(&height_and_round); let deferred_txns_len = deferred.as_ref().map_or(0, |d| d.transactions.len()); - let deferred_transactions_fin = deferred.as_ref().and_then(|d| d.transactions_fin.clone()); + let deferred_transactions_fin = deferred.as_ref().and_then(|d| d.transactions_fin); let mut all_transactions = transactions; if let Some(DeferredExecution { @@ -108,7 +109,7 @@ impl BatchExecutionManager { // Execute the batch validator - .execute_batch(all_transactions) + .execute_batch::(all_transactions) .context("Failed to execute transaction batch")?; // Mark that execution has started for this height/round @@ -131,7 +132,7 @@ impl BatchExecutionManager { "Processing deferred TransactionsFin for {height_and_round} after batch execution \ started" ); - self.process_transactions_fin(height_and_round, transactions_fin, validator)?; + self.process_transactions_fin::(height_and_round, transactions_fin, validator)?; } Ok(()) @@ -143,11 +144,11 @@ impl BatchExecutionManager { /// know execution should proceed immediately (e.g., when executing /// previously deferred transactions after the parent block is /// committed). - pub fn execute_batch( + pub fn execute_batch( &mut self, height_and_round: HeightAndRound, transactions: Vec, - validator: &mut ValidatorTransactionBatchStage, + validator: &mut ValidatorTransactionBatchStage, ) -> anyhow::Result<()> { // Mark that execution has started for this height/round, even if batch is // empty. This is necessary because TransactionsFin may arrive later and @@ -164,7 +165,7 @@ impl BatchExecutionManager { // Execute the batch validator - .execute_batch(transactions) + .execute_batch::(transactions) .context("Failed to execute transaction batch")?; tracing::debug!( @@ -180,11 +181,11 @@ impl BatchExecutionManager { /// execution has already started (at least one batch executed). If /// transactions are deferred, deferral should be handled by the caller /// before calling this function. - pub fn process_transactions_fin( + pub fn process_transactions_fin( &mut self, height_and_round: HeightAndRound, transactions_fin: proto_consensus::TransactionsFin, - validator: &mut ValidatorTransactionBatchStage, + validator: &mut ValidatorTransactionBatchStage, ) -> anyhow::Result<()> { // Verify that execution has started (at least one batch was executed, not // deferred) @@ -218,7 +219,7 @@ impl BatchExecutionManager { .checked_sub(1) .context("Cannot rollback to 0 transactions")?; validator - .rollback_to_transaction(target_index) + .rollback_to_transaction::(target_index) .context("Failed to rollback to target transaction count")?; } else if target_transaction_count > current_transaction_count { // This shouldn't happen with proper message ordering and no protocol errors. @@ -311,8 +312,10 @@ pub fn should_defer_execution( #[cfg(test)] mod tests { use pathfinder_crypto::Felt; + use pathfinder_executor::BlockExecutor; use super::*; + use crate::validator::ProdTransactionMapper; /// Helper function to create a committed parent block in storage fn create_committed_parent_block( @@ -407,7 +410,7 @@ mod tests { }; let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage) + ValidatorTransactionBatchStage::::new(chain_id, block_info, storage) .expect("Failed to create validator stage"); let mut batch_execution_manager = BatchExecutionManager::new(); @@ -426,7 +429,11 @@ mod tests { // Execute a batch to start execution let transactions = create_transaction_batch(0, 5, chain_id); batch_execution_manager - .execute_batch(height_and_round, transactions, &mut validator_stage) + .execute_batch::( + height_and_round, + transactions, + &mut validator_stage, + ) .expect("Failed to execute batch"); // Verify execution has started @@ -453,7 +460,11 @@ mod tests { executed_transaction_count: 5, }; batch_execution_manager - .process_transactions_fin(height_and_round, transactions_fin, &mut validator_stage) + .process_transactions_fin::( + height_and_round, + transactions_fin, + &mut validator_stage, + ) .expect("Failed to process TransactionsFin"); // Verify TransactionsFin is now marked as processed @@ -525,9 +536,12 @@ mod tests { starknet_version: StarknetVersion::new(0, 14, 0, 0), }; - let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator_stage = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); let mut batch_execution_manager = BatchExecutionManager::new(); let height_and_round = HeightAndRound::new(2, 1); @@ -555,7 +569,7 @@ mod tests { // Simulate the fix: create deferred entry and store TransactionsFin let deferred = deferred_executions.entry(height_and_round).or_default(); - deferred.transactions_fin = Some(transactions_fin.clone()); + deferred.transactions_fin = Some(transactions_fin); // Verify TransactionsFin was stored assert!( @@ -571,7 +585,7 @@ mod tests { let mut db_conn = storage.connection().unwrap(); let db_tx = db_conn.transaction().unwrap(); batch_execution_manager - .process_batch_with_deferral( + .process_batch_with_deferral::( height_and_round, transactions, &mut validator_stage, @@ -618,9 +632,12 @@ mod tests { let chain_id = ChainId::SEPOLIA_TESTNET; let block_info = create_test_block_info(1); - let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator_stage = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); let mut batch_execution_manager = BatchExecutionManager::new(); let height_and_round = HeightAndRound::new(2, 1); @@ -634,7 +651,7 @@ mod tests { let transactions = create_transaction_batch(0, 3, chain_id); batch_execution_manager - .process_batch_with_deferral( + .process_batch_with_deferral::( height_and_round, transactions, &mut validator_stage, @@ -673,7 +690,7 @@ mod tests { let transactions = create_transaction_batch(3, 2, chain_id); batch_execution_manager - .process_batch_with_deferral( + .process_batch_with_deferral::( height_and_round, transactions, &mut validator_stage, @@ -700,7 +717,7 @@ mod tests { // Test 3: Multiple batches with immediate execution (parent already committed) let height_and_round_2 = HeightAndRound::new(3, 1); - let mut validator_stage_2 = ValidatorTransactionBatchStage::new( + let mut validator_stage_2 = ValidatorTransactionBatchStage::::new( chain_id, create_test_block_info(2), storage.clone(), @@ -717,7 +734,7 @@ mod tests { for i in 0..3 { let transactions = create_transaction_batch(i * 2, 2, chain_id); batch_execution_manager - .process_batch_with_deferral( + .process_batch_with_deferral::( height_and_round_2, transactions, &mut validator_stage_2, @@ -753,9 +770,12 @@ mod tests { let chain_id = ChainId::SEPOLIA_TESTNET; let block_info = create_test_block_info(1); - let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator_stage = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); let mut batch_execution_manager = BatchExecutionManager::new(); let height_and_round = HeightAndRound::new(2, 1); @@ -766,13 +786,25 @@ mod tests { let batch3 = create_transaction_batch(10, 4, chain_id); batch_execution_manager - .execute_batch(height_and_round, batch1, &mut validator_stage) + .execute_batch::( + height_and_round, + batch1, + &mut validator_stage, + ) .expect("Failed to execute batch 1"); batch_execution_manager - .execute_batch(height_and_round, batch2, &mut validator_stage) + .execute_batch::( + height_and_round, + batch2, + &mut validator_stage, + ) .expect("Failed to execute batch 2"); batch_execution_manager - .execute_batch(height_and_round, batch3, &mut validator_stage) + .execute_batch::( + height_and_round, + batch3, + &mut validator_stage, + ) .expect("Failed to execute batch 3"); assert_eq!( @@ -788,7 +820,11 @@ mod tests { }; batch_execution_manager - .process_transactions_fin(height_and_round, transactions_fin, &mut validator_stage) + .process_transactions_fin::( + height_and_round, + transactions_fin, + &mut validator_stage, + ) .expect("Failed to process TransactionsFin"); assert!( @@ -805,9 +841,12 @@ mod tests { // Test 2: Rollback case - TransactionsFin indicates fewer transactions // Re-execute batches to get back to 14 transactions let storage_2 = StorageBuilder::in_tempdir().expect("Failed to create temp database"); - let mut validator_stage_2 = - ValidatorTransactionBatchStage::new(chain_id, create_test_block_info(1), storage_2) - .expect("Failed to create validator stage"); + let mut validator_stage_2 = ValidatorTransactionBatchStage::::new( + chain_id, + create_test_block_info(1), + storage_2, + ) + .expect("Failed to create validator stage"); let batch1_2 = create_transaction_batch(0, 3, chain_id); let batch2_2 = create_transaction_batch(3, 7, chain_id); @@ -815,13 +854,25 @@ mod tests { let height_and_round_2 = HeightAndRound::new(3, 1); batch_execution_manager - .execute_batch(height_and_round_2, batch1_2, &mut validator_stage_2) + .execute_batch::( + height_and_round_2, + batch1_2, + &mut validator_stage_2, + ) .expect("Failed to execute batch 1"); batch_execution_manager - .execute_batch(height_and_round_2, batch2_2, &mut validator_stage_2) + .execute_batch::( + height_and_round_2, + batch2_2, + &mut validator_stage_2, + ) .expect("Failed to execute batch 2"); batch_execution_manager - .execute_batch(height_and_round_2, batch3_2, &mut validator_stage_2) + .execute_batch::( + height_and_round_2, + batch3_2, + &mut validator_stage_2, + ) .expect("Failed to execute batch 3"); let transactions_fin_rollback = TransactionsFin { @@ -829,7 +880,7 @@ mod tests { }; batch_execution_manager - .process_transactions_fin( + .process_transactions_fin::( height_and_round_2, transactions_fin_rollback, &mut validator_stage_2, @@ -860,7 +911,7 @@ mod tests { let block_info = create_test_block_info(1); let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage) + ValidatorTransactionBatchStage::::new(chain_id, block_info, storage) .expect("Failed to create validator stage"); let mut batch_execution_manager = BatchExecutionManager::new(); @@ -868,7 +919,11 @@ mod tests { // Empty batch still marks execution as started batch_execution_manager - .execute_batch(height_and_round, vec![], &mut validator_stage) + .execute_batch::( + height_and_round, + vec![], + &mut validator_stage, + ) .expect("Failed to execute empty batch"); assert!( @@ -887,7 +942,11 @@ mod tests { }; batch_execution_manager - .process_transactions_fin(height_and_round, transactions_fin, &mut validator_stage) + .process_transactions_fin::( + height_and_round, + transactions_fin, + &mut validator_stage, + ) .expect("Failed to process TransactionsFin after empty batch"); assert!( diff --git a/crates/pathfinder/src/consensus/inner/consensus_task.rs b/crates/pathfinder/src/consensus/inner/consensus_task.rs index 449a039526..4500740fe4 100644 --- a/crates/pathfinder/src/consensus/inner/consensus_task.rs +++ b/crates/pathfinder/src/consensus/inner/consensus_task.rs @@ -18,7 +18,6 @@ use anyhow::Context; use p2p::consensus::HeightAndRound; use p2p_proto::common::{Address, Hash, L1DataAvailabilityMode}; use p2p_proto::consensus::{ - BlockInfo, ProposalCommitment as ProposalCommitmentProto, ProposalFin, ProposalInit, @@ -54,11 +53,6 @@ use super::fetch_validators::L2ValidatorSetProvider; use super::{integration_testing, ConsensusTaskEvent, ConsensusValue, HeightExt, P2PTaskEvent}; use crate::config::integration_testing::InjectFailureConfig; use crate::config::ConsensusConfig; -use crate::state::block_hash::{ - calculate_event_commitment, - calculate_receipt_commitment, - calculate_transaction_commitment, -}; use crate::validator::{FinalizedBlock, ValidatorBlockInfoStage}; #[allow(clippy::too_many_arguments)] @@ -381,10 +375,12 @@ fn start_height( } } -/// Create an empty proposal for the given height and round. Returns -/// proposal parts that can be gossiped via P2P network and the -/// finalized block that corresponds to this proposal. -fn create_empty_proposal( +/// Create an empty proposal for the given height and round. Returns proposal +/// parts that can be gossiped via P2P network and the finalized block that +/// corresponds to this proposal. +/// +/// https://github.com/starknet-io/starknet-p2p-specs/blob/main/p2p/proto/consensus/consensus.md#empty-proposals +pub(crate) fn create_empty_proposal( chain_id: ChainId, height: u64, round: Round, @@ -403,16 +399,6 @@ fn create_empty_proposal( valid_round: None, proposer, }; - let block_info = BlockInfo { - block_number: height, - timestamp, - builder: proposer, - l1_da_mode: L1DataAvailabilityMode::Calldata, - l2_gas_price_fri: 1, - l1_gas_price_wei: 1_000_000_000, - l1_data_gas_price_wei: 1, - eth_to_strk_rate: 1_000_000_000, - }; let current_block = BlockNumber::new(height).context("Invalid height")?; let parent_proposal_commitment_hash = if let Some(parent_number) = current_block.parent() { let mut db_conn = storage @@ -430,9 +416,39 @@ fn create_empty_proposal( BlockHash::ZERO }; + // The only version handled by consensus, so far + let starknet_version = StarknetVersion::new(0, 14, 0, 0); + + // Empty proposal is strictly defined in the spec: + // https://github.com/starknet-io/starknet-p2p-specs/blob/main/p2p/proto/consensus/consensus.md#empty-proposals + let proposal_commitment = ProposalCommitmentProto { + block_number: height, + parent_commitment: Hash(parent_proposal_commitment_hash.0), + builder: proposer, + timestamp, + protocol_version: starknet_version.to_string(), + // TODO required by the spec + old_state_root: Default::default(), + // TODO required by the spec + version_constant_commitment: Default::default(), + state_diff_commitment: Hash::ZERO, + transaction_commitment: Hash::ZERO, + event_commitment: Hash::ZERO, + receipt_commitment: Hash::ZERO, + // TODO should contain len of version_constant_commitment + concatenated_counts: Default::default(), + l1_gas_price_fri: 0, + l1_data_gas_price_fri: 0, + l2_gas_price_fri: 0, + l2_gas_used: 0, + // TODO keep the value from the last block as per spec + next_l2_gas_price_fri: 0, + // Equivalent to zero on the wire + l1_da_mode: L1DataAvailabilityMode::default(), + }; + let validator = ValidatorBlockInfoStage::new(chain_id, proposal_init.clone())? - .validate_consensus_block_info(block_info.clone(), storage.clone())?; - let validator = validator.consensus_finalize0()?; + .verify_proposal_commitment(&proposal_commitment)?; let mut db_conn = storage .connection() .context("Creating database connection")?; @@ -442,40 +458,9 @@ fn create_empty_proposal( let finalized_block = validator.finalize(db_txn, storage.clone())?; let proposal_commitment_hash = Hash(finalized_block.header.state_diff_commitment.0); - // The only version handled by consensus, so far - let starknet_version = StarknetVersion::new(0, 14, 0, 0); - let transactions = vec![]; - let transaction_commitment = calculate_transaction_commitment(&transactions, starknet_version)?; - let transaction_events = vec![]; - let event_commitment = calculate_event_commitment(&transaction_events, starknet_version)?; - let receipts = vec![]; - let receipt_commitment = calculate_receipt_commitment(&receipts)?; - let proposal_commitment = ProposalCommitmentProto { - block_number: height, - parent_commitment: Hash(parent_proposal_commitment_hash.0), - builder: proposer, - timestamp, - protocol_version: starknet_version.to_string(), - old_state_root: Default::default(), // not used by 0.14.0 - version_constant_commitment: Default::default(), // TODO - state_diff_commitment: proposal_commitment_hash, - transaction_commitment: Hash(transaction_commitment.0), - event_commitment: Hash(event_commitment.0), - receipt_commitment: Hash(receipt_commitment.0), - concatenated_counts: Default::default(), // should be the sum of lengths of inputs to *_commitment - l1_gas_price_fri: 1000, - l1_data_gas_price_fri: 2000, - l2_gas_price_fri: 3000, - l2_gas_used: 4000, - next_l2_gas_price_fri: 3000, - l1_da_mode: L1DataAvailabilityMode::Calldata, - }; - Ok(( vec![ ProposalPart::Init(proposal_init), - ProposalPart::BlockInfo(block_info), - // Note: Per spec, empty proposals skip TransactionBatch entirely. ProposalPart::ProposalCommitment(proposal_commitment), ProposalPart::Fin(ProposalFin { proposal_commitment: proposal_commitment_hash, @@ -509,8 +494,8 @@ mod tests { // Verify proposal structure assert!( - proposal_parts.len() >= 4, - "Empty proposal should have at least Init, BlockInfo, ProposalCommitment, and Fin" + proposal_parts.len() == 3, + "Empty proposal should have exactly Init, ProposalCommitment, and Fin" ); // Verify it starts with Init @@ -519,10 +504,10 @@ mod tests { "First part should be ProposalInit" ); - // Verify it has BlockInfo + // Verify it has ProposalCommitment assert!( - matches!(proposal_parts[1], ProposalPart::BlockInfo(_)), - "Second part should be BlockInfo" + matches!(proposal_parts[1], ProposalPart::ProposalCommitment(_)), + "Second part should be ProposalCommitment" ); // Verify it ends with Fin diff --git a/crates/pathfinder/src/consensus/inner/p2p_task.rs b/crates/pathfinder/src/consensus/inner/p2p_task.rs index 596f559fee..9c27392b2b 100644 --- a/crates/pathfinder/src/consensus/inner/p2p_task.rs +++ b/crates/pathfinder/src/consensus/inner/p2p_task.rs @@ -19,7 +19,7 @@ use anyhow::Context; use p2p::consensus::{Client, Event, HeightAndRound}; use p2p::libp2p::gossipsub::PublishError; use p2p_proto::common::{Address, Hash}; -use p2p_proto::consensus::{ProposalFin, ProposalInit, ProposalPart}; +use p2p_proto::consensus::{BlockInfo, ProposalFin, ProposalInit, ProposalPart, TransactionsFin}; use pathfinder_common::{BlockId, ChainId, ContractAddress, ProposalCommitment}; use pathfinder_consensus::{ ConsensusCommand, @@ -30,6 +30,7 @@ use pathfinder_consensus::{ SignedProposal, SignedVote, }; +use pathfinder_executor::{BlockExecutor, BlockExecutorExt}; use pathfinder_storage::{Storage, Transaction, TransactionBehavior}; use tokio::sync::mpsc; @@ -43,7 +44,18 @@ use crate::consensus::inner::batch_execution::{ }; use crate::consensus::inner::persist_proposals::ConsensusProposals; use crate::consensus::inner::ConsensusValue; -use crate::validator::{FinalizedBlock, ValidatorBlockInfoStage, ValidatorStage}; +use crate::validator::{ + FinalizedBlock, + ProdTransactionMapper, + TransactionExt, + ValidatorBlockInfoStage, + ValidatorStage, +}; + +#[cfg(test)] +mod handler_proptest; +#[cfg(test)] +mod p2p_task_tests; // Successful result of handling an incoming message in a dedicated // thread; carried data are used for async handling (e.g. gossiping). @@ -73,8 +85,6 @@ pub fn spawn( inject_failure: Option, ) -> tokio::task::JoinHandle> { let validator_address = config.my_validator_address; - // TODO validators are long-lived but not persisted - let validator_cache = ValidatorCache::new(); // Contains transaction batches and proposal finalizations that are // waiting for previous block to be committed before they can be executed. let deferred_executions = Arc::new(Mutex::new(HashMap::new())); @@ -89,12 +99,44 @@ pub fn spawn( util::task::spawn(async move { let readonly_storage = storage.clone(); + let main_storage2 = storage.clone(); + let consensus_storage2 = consensus_storage.clone(); let mut db_conn = storage .connection() .context("Creating database connection")?; let mut cons_conn = consensus_storage .connection() .context("Creating consensus database connection")?; + // TODO validators are long-lived but not persisted, and the recovery process + // right now works by re-executing all last proposals from the database + // for all heights found in the database. + let validator_cache = util::task::spawn_blocking(move |_| { + tracing::info!( + "🖧 🔧 {validator_address} Recovering validator cache from the database ..." + ); + let mut cons_conn = consensus_storage2 + .connection() + .context("Creating consensus database connection")?; + let consensus_db_tx = cons_conn + .transaction() + .context("Create consensus database transaction")?; + let stopwatch = std::time::Instant::now(); + let cache = ValidatorCache::::recover::( + main_storage2, + consensus_db_tx, + &validator_address, + chain_id, + )?; + tracing::info!( + "🖧 🔧 {validator_address} Recovered validator cache from the database in {} ms", + stopwatch.elapsed().as_millis() + ); + anyhow::Ok(cache) + }) + .await + .context("Joining blocking task")? + .context("Recovering validator cache")?; + loop { let p2p_task_event = tokio::select! { p2p_event = p2p_event_rx.recv() => { @@ -155,7 +197,10 @@ pub fn spawn( Event::Proposal(height_and_round, proposal_part) => { let vcache = validator_cache.clone(); let dex = deferred_executions.clone(); - let result = handle_incoming_proposal_part( + let result = handle_incoming_proposal_part::< + BlockExecutor, + ProdTransactionMapper, + >( chain_id, validator_address, height_and_round, @@ -415,7 +460,10 @@ pub fn spawn( anyhow::Ok(()) }?; - let exec_success = execute_deferred_for_next_height( + let exec_success = execute_deferred_for_next_height::< + BlockExecutor, + ProdTransactionMapper, + >( height_and_round, validator_cache.clone(), deferred_executions.clone(), @@ -530,20 +578,26 @@ pub fn spawn( }) } -#[derive(Clone)] -struct ValidatorCache(Arc>>); +struct ValidatorCache(Arc>>>); + +impl Clone for ValidatorCache { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } +} -impl ValidatorCache { +impl ValidatorCache { + #[cfg(test)] fn new() -> Self { Self(Arc::new(Mutex::new(HashMap::new()))) } - fn insert(&mut self, hnr: HeightAndRound, stage: ValidatorStage) { + fn insert(&mut self, hnr: HeightAndRound, stage: ValidatorStage) { let mut cache = self.0.lock().unwrap(); cache.insert(hnr, stage); } - fn remove(&mut self, hnr: &HeightAndRound) -> anyhow::Result { + fn remove(&mut self, hnr: &HeightAndRound) -> anyhow::Result> { let mut cache = self.0.lock().unwrap(); cache .remove(hnr) @@ -551,9 +605,223 @@ impl ValidatorCache { } } -fn execute_deferred_for_next_height( +impl ValidatorCache { + /// Reads the proposal parts for last rounds for all available heights from + /// the database and reconstructs the validator stages for those heights and + /// rounds. + /// + /// # Important + /// + /// This function re-executes all the proposal parts read from the database + /// to reconstruct the validator stage. Hence it is considered + /// **computationally expensive** and must be called from **blocking + /// context**. + fn recover( + main_storage: Storage, + consensus_db_tx: Transaction<'_>, + validator_address: &ContractAddress, + chain_id: ChainId, + ) -> anyhow::Result { + let proposals_db: ConsensusProposals<'_> = ConsensusProposals::new(&consensus_db_tx); + let all_last_parts = proposals_db.all_last_parts(validator_address)?; + let mut cache = HashMap::new(); + + for (height, round, parts) in all_last_parts { + tracing::info!( + "Reconstructing validator stage for height {height} and round {round} ..." + ); + let stopwatch = std::time::Instant::now(); + let validator = Self::recover_one::( + main_storage.clone(), + height, + round, + parts, + validator_address, + chain_id, + )?; + tracing::info!( + "Reconstructed validator stage for height {height} and round {round} in {} ms", + stopwatch.elapsed().as_millis() + ); + cache.insert(HeightAndRound::new(height, round), validator); + } + Ok(Self(Arc::new(Mutex::new(cache)))) + } + + /// Reads the latest proposal parts from the database and reconstructs the + /// validator stage. + /// + /// # Important + /// + /// This function re-executes all the proposal parts to reconstruct the + /// validator stage. Hence it is considered **computationally expensive** + /// and must be called from **blocking context**. + fn recover_one( + main_storage: Storage, + height: u64, + round: u32, + parts: Vec, + validator_address: &ContractAddress, + chain_id: ChainId, + ) -> anyhow::Result> { + let hnr = HeightAndRound::new(height, round); + + if parts.is_empty() { + anyhow::bail!( + "No proposal parts found for height and round {hnr} for validator \ + {validator_address}. This is a fatal inconsistency in the consensus_proposals \ + table.", + ); + } + + let mut parts = parts.into_iter(); + + let Some(ProposalPart::Init(init)) = parts.next() else { + anyhow::bail!("Proposal init expected!"); + }; + + let validator = ValidatorBlockInfoStage::new(chain_id, init)?; + let Some(part) = parts.next() else { + return Ok(validator.into()); + }; + + match part { + ProposalPart::BlockInfo(block_info) => { + Self::recover_non_empty::(main_storage, validator, block_info, parts, hnr) + } + ProposalPart::ProposalCommitment(proposal_commitment) => Ok(validator + .verify_proposal_commitment(&proposal_commitment)? + .into()), + _ => { + anyhow::bail!( + "Unexpected proposal part for height and round {hnr}. Expected BlockInfo or \ + ProposalCommitment, got {}. This is a fatal inconsistency in the \ + consensus_proposals table.", + part.variant_name() + ); + } + } + } + + fn recover_non_empty( + main_storage: Storage, + validator: ValidatorBlockInfoStage, + block_info: BlockInfo, + mut parts: impl Iterator, + hnr: HeightAndRound, + ) -> anyhow::Result> { + let mut validator = + validator.validate_consensus_block_info::(block_info, main_storage)?; + // When parsing input from the network we always enforce the following order of + // proposal parts: + // 1. Proposal Init + // 2. Block Info for non-empty proposals (or Proposal Commitment for empty + // proposals) + // 3. In random order: at least one Transaction Batch, Proposal Commitment, + // Transactions Fin + // 4. Proposal Fin + // + // However, when reconstructing the validator stage from the database, we make + // our life easier and reorder the parts according to the spec, which is + // stricter about the ordering. + let executed_txns: usize = parts + .by_ref() + .find_map(|part| { + part.is_transactions_fin() + .then_some(part.try_into().expect("TransactionsFin")) + }) + .map( + |TransactionsFin { + executed_transaction_count: x, + }| { x.try_into().expect("ptr size is 64 bits") }, + ) + .unwrap_or(usize::MAX); + let proposal_commitment: Option = + parts.by_ref().find_map(|part| { + part.is_proposal_commitment() + .then_some(part.try_into().expect("ProposalCommitment")) + }); + let proposal_fin: Option = parts.by_ref().find_map(|part| { + part.is_proposal_fin() + .then_some(part.try_into().expect("Fin")) + }); + + if proposal_fin.is_some() && proposal_commitment.is_none() { + anyhow::bail!( + "Unexpected proposal fin without commitment for height and round {hnr}. This is a \ + fatal inconsistency in the consensus_proposals table." + ); + } + + let mut total_txn_cnt = 0; + let mut _dgb_cnts = debug::Counts::default(); + + loop { + match parts.next() { + Some(ProposalPart::TransactionBatch(mut batch)) => { + if total_txn_cnt + batch.len() > executed_txns { + batch.truncate(executed_txns - total_txn_cnt); + validator.execute_batch::(batch)?; + break; + } else { + total_txn_cnt += batch.len(); + validator.execute_batch::(batch)?; + } + } + Some(part) => Self::debug_check_duplicates(part, hnr, &mut _dgb_cnts)?, + None => break, + } + } + + let Some(commitment) = proposal_commitment else { + return Ok(validator.into()); + }; + + validator.record_proposal_commitment(&commitment)?; + + let Some(ProposalFin { + proposal_commitment, + }) = proposal_fin + else { + return Ok(validator.into()); + }; + + let validator = validator.consensus_finalize(ProposalCommitment(proposal_commitment.0))?; + Ok(validator.into()) + } + + fn debug_check_duplicates( + _part: ProposalPart, + _hnr: HeightAndRound, + _cnts: &mut debug::Counts, + ) -> anyhow::Result<()> { + #[cfg(debug_assertions)] + { + match _part { + ProposalPart::TransactionBatch(_) => { + unreachable!("already handled in recover_non_empty") + } + // These can occur once + ProposalPart::TransactionsFin(_) => _cnts.txns_fin += 1, + ProposalPart::ProposalCommitment(_) => _cnts.commitment += 1, + ProposalPart::Fin(_) => _cnts.fin += 1, + // Another Init or BlockInfo is too much + _ => _cnts.init_info += 1, + }; + anyhow::ensure!( + !_cnts._got_duplicate(), + "Unexpected duplicate {} while reconstructing validator stage for height and \ + round {_hnr}. This is a fatal inconsistency in the consensus_proposals table.", + _part.variant_name() + ); + } + Ok(()) + } +} + +fn execute_deferred_for_next_height( height_and_round: HeightAndRound, - mut validator_cache: ValidatorCache, + mut validator_cache: ValidatorCache, deferred_executions: Arc>>, batch_execution_manager: &mut BatchExecutionManager, ) -> anyhow::Result> { @@ -581,7 +849,7 @@ fn execute_deferred_for_next_height( // Parent block is now committed, so we can execute directly without deferral // checks if !deferred.transactions.is_empty() { - batch_execution_manager.execute_batch( + batch_execution_manager.execute_batch::( hnr, deferred.transactions, &mut validator, @@ -597,7 +865,7 @@ fn execute_deferred_for_next_height( // transactions were non-empty). If transactions were empty, // execute_batch handles marking execution as started, so we can // process TransactionsFin immediately. - batch_execution_manager.process_transactions_fin( + batch_execution_manager.process_transactions_fin::( hnr, transactions_fin, &mut validator, @@ -730,13 +998,25 @@ fn commit_finalized_block( /// - a complete proposal has been received but it cannot be executed yet. /// /// Returns `Err` if there was an error processing the proposal part. +/// +/// # Important +/// +/// We always enforce the following order of proposal parts: +/// 1. Proposal Init +/// 2. Block Info for non-empty proposals (or Proposal Commitment for empty +/// proposals) +/// 3. In random order: at least one Transaction Batch, Proposal Commitment, +/// Transactions Fin +/// 4. Proposal Fin +/// +/// The [spec](https://github.com/starknet-io/starknet-p2p-specs/blob/main/p2p/proto/consensus/consensus.md#order-of-messages) is more restrictive. #[allow(clippy::too_many_arguments)] -fn handle_incoming_proposal_part( +fn handle_incoming_proposal_part( chain_id: ChainId, validator_address: ContractAddress, height_and_round: HeightAndRound, proposal_part: ProposalPart, - mut validator_cache: ValidatorCache, + mut validator_cache: ValidatorCache, deferred_executions: Arc>>, db_tx: &Transaction<'_>, storage: Storage, @@ -753,6 +1033,13 @@ fn handle_incoming_proposal_part( )? .unwrap_or_default(); + let has_txns_fin = parts + .iter() + .any(|part| matches!(part, ProposalPart::TransactionsFin(_))); + let has_commitment = parts + .iter() + .any(|part| matches!(part, ProposalPart::ProposalCommitment(_))); + // Does nothing in production builds. integration_testing::debug_fail_on_proposal_part( &proposal_part, @@ -770,7 +1057,14 @@ fn handle_incoming_proposal_part( parts.len() ); } - + // If this is a valid proposal, then this may be an empty proposal: + // - [x] Proposal Init + // - [ ] Proposal Commitment + // - [ ] Proposal Fin + // or the first part of a non-empty proposal: + // - [x] Proposal Init + // - [ ] Block Info + // (...) let proposal_init = prop_init.clone(); parts.push(proposal_part); let proposer_address = ContractAddress(proposal_init.proposer.0); @@ -793,26 +1087,16 @@ fn handle_incoming_proposal_part( parts.len() ); } - + // Looks like a non-empty proposal: + // - [x] Proposal Init + // - [x] Block Info + // (...) let validator_stage = validator_cache.remove(&height_and_round)?; let validator = validator_stage.try_into_block_info_stage()?; let block_info = block_info.clone(); - parts.push(proposal_part); - let ProposalPart::Init(ProposalInit { proposer, .. }) = - parts.first().expect("Proposal Init") - else { - unreachable!("Proposal Init is inserted first"); - }; + append_and_persist_part(height_and_round, proposal_part, proposals_db, &mut parts)?; - let proposer_address = ContractAddress(proposer.0); - let updated = proposals_db.persist_parts( - height_and_round.height(), - height_and_round.round(), - &proposer_address, - &parts, - )?; - assert!(updated); let new_validator = validator.validate_consensus_block_info(block_info, storage)?; validator_cache.insert( height_and_round, @@ -830,6 +1114,21 @@ fn handle_incoming_proposal_part( ); } + if tx_batch.is_empty() { + anyhow::bail!( + "Received empty TransactionBatch for height and round {} at position {}", + height_and_round, + parts.len() + ); + } + // Looks like a non-empty proposal: + // - [x] Proposal Init + // - [x] Block Info + // - [ ] in any order: + // - [x] at least one Transaction Batch + // - [?] Transactions Fin + // - [?] Proposal Commitment + // - [ ] Proposal Fin tracing::debug!( "🖧 ⚙️ executing transaction batch for height and round {height_and_round}..." ); @@ -838,11 +1137,11 @@ fn handle_incoming_proposal_part( let mut validator = validator_stage.try_into_transaction_batch_stage()?; let tx_batch = tx_batch.clone(); - parts.push(proposal_part); + append_and_persist_part(height_and_round, proposal_part, proposals_db, &mut parts)?; // Use BatchExecutionManager to handle optimistic execution with checkpoints and // deferral - batch_execution_manager.process_batch_with_deferral( + batch_execution_manager.process_batch_with_deferral::( height_and_round, tx_batch, &mut validator, @@ -855,33 +1154,70 @@ fn handle_incoming_proposal_part( ValidatorStage::TransactionBatch(validator), ); - let ProposalPart::Init(ProposalInit { proposer, .. }) = - parts.first().expect("Proposal Init") - else { - unreachable!("Proposal Init is inserted first"); - }; - - let proposer_address = ContractAddress(proposer.0); - let updated = proposals_db.persist_parts( - height_and_round.height(), - height_and_round.round(), - &proposer_address, - &parts, - )?; - assert!(updated); - Ok(None) } - ProposalPart::ProposalCommitment(proposal_commitment) => { - let validator_stage = validator_cache.remove(&height_and_round)?; - let mut validator = validator_stage.try_into_transaction_batch_stage()?; + ProposalPart::ProposalCommitment(ref proposal_commitment) => { + match parts.len() { + 1 => { + // Looks like this could be an empty proposal: + // - [x] Proposal Init + // - [x] Proposal Commitment + // - [ ] Proposal Fin + append_and_persist_part( + height_and_round, + proposal_part.clone(), + proposals_db, + &mut parts, + )?; + + let validator_stage = validator_cache.remove(&height_and_round)?; + let validator = validator_stage.try_into_block_info_stage()?; + let validator = validator.verify_proposal_commitment(proposal_commitment)?; + let validator = ValidatorStage::Finalize(Box::new(validator)); + validator_cache.insert(height_and_round, validator); + Ok(None) + } + 2.. => { + if has_commitment { + anyhow::bail!( + "Duplicate ProposalCommitment for height and round {height_and_round}", + ); + } - validator.record_proposal_commitment(proposal_commitment)?; - validator_cache.insert( - height_and_round, - ValidatorStage::TransactionBatch(validator), - ); - Ok(None) + // Looks like a non-empty proposal: + // - [x] Proposal Init + // - [x] Block Info + // - [ ] in any order: + // - [?] at least one Transaction Batch + // - [?] Transactions Fin + // - [x] Proposal Commitment + // - [ ] Proposal Fin + append_and_persist_part( + height_and_round, + proposal_part.clone(), + proposals_db, + &mut parts, + )?; + + let validator_stage = validator_cache.remove(&height_and_round)?; + let mut validator = validator_stage.try_into_transaction_batch_stage()?; + + validator.record_proposal_commitment(proposal_commitment)?; + validator_cache.insert( + height_and_round, + ValidatorStage::TransactionBatch(validator), + ); + Ok(None) + } + _ => { + anyhow::bail!( + "Unexpected proposal ProposalCommitment for height and round {} at \ + position {}", + height_and_round, + parts.len() + ); + } + } } ProposalPart::Fin(ProposalFin { proposal_commitment, @@ -890,54 +1226,115 @@ fn handle_incoming_proposal_part( "🖧 ⚙️ finalizing consensus for height and round {height_and_round}..." ); - let validator_stage = validator_cache.remove(&height_and_round)?; - let validator = validator_stage.try_into_transaction_batch_stage()?; - - if !validator.has_proposal_commitment() { - anyhow::bail!( - "Transaction batch missing proposal commitment for height and round \ - {height_and_round}" - ); - } - - parts.push(proposal_part); - let ProposalPart::Init(ProposalInit { - proposer, - valid_round, - .. - }) = parts.first().expect("Proposal Init") - else { - unreachable!("Proposal Init is inserted first"); - }; + match parts.len() { + 2 if parts + .get(1) + .expect("part 1 to exist") + .is_proposal_commitment() => + { + // Looks like an empty proposal: + // - [x] Proposal Init + // - [x] Proposal Commitment + // - [x] Proposal Fin + let proposer_address = append_and_persist_part( + height_and_round, + proposal_part, + proposals_db, + &mut parts, + )?; + + let valid_round = valid_round_from_parts(&parts)?; + let proposal_commitment = Some(ProposalCommitmentWithOrigin { + proposal_commitment: ProposalCommitment(proposal_commitment.0), + proposer_address, + pol_round: valid_round.map(Round::new).unwrap_or(Round::nil()), + }); - let proposer_address = ContractAddress(proposer.0); - let updated = proposals_db.persist_parts( - height_and_round.height(), - height_and_round.round(), - &proposer_address, - &parts, - )?; - assert!(updated); + // We don't retrieve the validator from cache here, it'll be retrieved for + // block finalization + Ok(proposal_commitment) + } + 5.. if parts.get(1).expect("part 1 to exist").is_block_info() => { + // Looks like a non-empty proposal: + // - [x] Proposal Init + // - [x] Block Info + // - [ ] in any order: + // - [?] at least one Transaction Batch + // - [?] Transactions Fin + // - [?] Proposal Commitment + // - [x] Proposal Fin + let validator_stage = validator_cache.remove(&height_and_round)?; + let validator = validator_stage.try_into_transaction_batch_stage()?; + + if !validator.has_proposal_commitment() { + anyhow::bail!( + "Transaction batch missing proposal commitment for height and round \ + {height_and_round}" + ); + } - let (validator, proposal_commitment) = defer_or_execute_proposal_fin( - height_and_round, - proposal_commitment, - proposer, - *valid_round, - db_tx, - validator, - deferred_executions, - batch_execution_manager, - )?; + let proposer_address = append_and_persist_part( + height_and_round, + proposal_part, + proposals_db, + &mut parts, + )?; - validator_cache.insert(height_and_round, validator); - Ok(proposal_commitment) + let valid_round = valid_round_from_parts(&parts)?; + let (validator, proposal_commitment) = defer_or_execute_proposal_fin::( + height_and_round, + proposal_commitment, + proposer_address, + valid_round, + db_tx, + validator, + deferred_executions, + batch_execution_manager, + )?; + + validator_cache.insert(height_and_round, validator); + Ok(proposal_commitment) + } + _ => { + anyhow::bail!( + "Unexpected proposal ProposalFin for height and round {} at position {}", + height_and_round, + parts.len() + ); + } + } } - ProposalPart::TransactionsFin(transactions_fin) => { + ProposalPart::TransactionsFin(ref transactions_fin) => { tracing::debug!( "🖧 ⚙️ handling TransactionsFin for height and round {height_and_round}..." ); + if !parts.get(1).map(|p| p.is_block_info()).unwrap_or_default() { + anyhow::bail!( + "Unexpected proposal TransactionsFin for height and round {} at position {}", + height_and_round, + parts.len() + ); + } + + if has_txns_fin { + anyhow::bail!("Duplicate TransactionsFin for height and round {height_and_round}",); + } + // Looks like a non-empty proposal: + // - [x] Proposal Init + // - [x] Block Info + // - [ ] in any order: + // - [?] at least one Transaction Batch + // - [x] Transactions Fin + // - [?] Proposal Commitment + // - [ ] Proposal Fin + append_and_persist_part( + height_and_round, + proposal_part.clone(), + proposals_db, + &mut parts, + )?; + let validator_stage = validator_cache.remove(&height_and_round)?; let mut validator = validator_stage.try_into_transaction_batch_stage()?; @@ -956,16 +1353,16 @@ fn handle_incoming_proposal_part( let mut dex = deferred_executions.lock().unwrap(); let deferred = dex.entry(height_and_round).or_default(); - deferred.transactions_fin = Some(transactions_fin.clone()); + deferred.transactions_fin = Some(*transactions_fin); tracing::debug!( "TransactionsFin for {height_and_round} is deferred - storing for later \ processing (execution not started yet)" ); } else { // Execution has started - process TransactionsFin immediately - batch_execution_manager.process_transactions_fin( + batch_execution_manager.process_transactions_fin::( height_and_round, - transactions_fin, + *transactions_fin, &mut validator, )?; @@ -1001,25 +1398,43 @@ fn handle_incoming_proposal_part( } } +fn append_and_persist_part( + height_and_round: HeightAndRound, + proposal_part: ProposalPart, + proposals_db: &ConsensusProposals<'_>, + parts: &mut Vec, +) -> Result { + parts.push(proposal_part); + let proposer_address = proposer_address_from_parts(parts)?; + let updated = proposals_db.persist_parts( + height_and_round.height(), + height_and_round.round(), + &proposer_address, + parts, + )?; + assert!(updated); + Ok(proposer_address) +} + /// Either defer or execute the proposal finalization depending on whether /// the previous block is committed yet. If execution is deferred, the proposal /// commitment and proposer address are stored for later finalization. If /// execution is performed, any previously deferred transactions for the height /// and round are executed first, then the proposal is finalized. #[allow(clippy::too_many_arguments)] -fn defer_or_execute_proposal_fin( +fn defer_or_execute_proposal_fin( height_and_round: HeightAndRound, proposal_commitment: Hash, - proposer: &Address, + proposer_address: ContractAddress, valid_round: Option, db_tx: &Transaction<'_>, - mut validator: Box, + mut validator: Box>, deferred_executions: Arc>>, batch_execution_manager: &mut BatchExecutionManager, -) -> anyhow::Result<(ValidatorStage, Option)> { +) -> anyhow::Result<(ValidatorStage, Option)> { let commitment = ProposalCommitmentWithOrigin { proposal_commitment: ProposalCommitment(proposal_commitment.0), - proposer_address: ContractAddress(proposer.0), + proposer_address, pol_round: valid_round.map(Round::new).unwrap_or(Round::nil()), }; @@ -1048,7 +1463,7 @@ fn defer_or_execute_proposal_fin( if let Some(deferred) = deferred { if !deferred.transactions.is_empty() { - batch_execution_manager.execute_batch( + batch_execution_manager.execute_batch::( height_and_round, deferred.transactions, &mut validator, @@ -1063,7 +1478,7 @@ fn defer_or_execute_proposal_fin( ); // Execution has started at this point (from execute_batch), // so we can process TransactionsFin immediately - batch_execution_manager.process_transactions_fin( + batch_execution_manager.process_transactions_fin::( height_and_round, transactions_fin, &mut validator, @@ -1155,3 +1570,61 @@ fn consensus_vote_to_p2p_vote( voter: Address(vote.validator_address.0), } } + +/// Extract the proposer address from the proposal parts. +fn proposer_address_from_parts(parts: &[ProposalPart]) -> anyhow::Result { + let ProposalPart::Init(ProposalInit { proposer, .. }) = + parts.first().context("Proposal part list is empty")? + else { + anyhow::bail!("First proposal part is not ProposalInit"); + }; + Ok(ContractAddress(proposer.0)) +} + +/// Extract the valid round from the proposal parts. +fn valid_round_from_parts(parts: &[ProposalPart]) -> anyhow::Result> { + let ProposalPart::Init(ProposalInit { valid_round, .. }) = + parts.first().context("Proposal part list is empty")? + else { + anyhow::bail!("First proposal part is not ProposalInit"); + }; + Ok(*valid_round) +} + +#[cfg(debug_assertions)] +mod debug { + pub struct Counts { + pub txns_fin: i8, + pub commitment: i8, + pub fin: i8, + pub init_info: i8, + } + + impl Default for Counts { + fn default() -> Self { + Self { + txns_fin: -1, + commitment: -1, + fin: -1, + init_info: 0, + } + } + } + + impl Counts { + pub fn _got_duplicate(&self) -> bool { + self.txns_fin > 0 || self.commitment > 0 || self.fin > 0 || self.init_info > 0 + } + } +} +#[cfg(not(debug_assertions))] +mod debug { + #[derive(Default)] + pub struct Counts; + + impl Counts { + fn _got_duplicate(&self) -> bool { + false + } + } +} diff --git a/crates/pathfinder/src/consensus/inner/p2p_task/handler_proptest.rs b/crates/pathfinder/src/consensus/inner/p2p_task/handler_proptest.rs new file mode 100644 index 0000000000..13b0090685 --- /dev/null +++ b/crates/pathfinder/src/consensus/inner/p2p_task/handler_proptest.rs @@ -0,0 +1,662 @@ +//! This test is focused more on correct parsing of the icoming parts rather +//! than actual execution. This is why we're mocking the executor to force +//! either success or failure. There is no deferred execution in the test +//! either. We're also starting with a fresh database and we're using one of the +//! 3 proposal types: +//! - valid and empty, execution always succeeds, +//! - structurally always valid with some fake transactions that nominally +//! should always succeed on empty db, however only sometimes passing +//! execution without error, +//! - invalid proposal (proposal parts well formed but the entire proposal not +//! always conforming to the spec), execution sometimes succeeds. +//! +//! Ultimately, we end up with 5 possible paths, 2 of them leading to success. +use std::borrow::Cow; +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::atomic::{AtomicU64, AtomicUsize}; +use std::sync::{Arc, Mutex}; + +use fake::Fake as _; +use p2p::consensus::HeightAndRound; +use p2p::sync::client::conv::TryFromDto; +use p2p_proto::common::{Address, Hash, L1DataAvailabilityMode}; +use p2p_proto::consensus::{ + BlockInfo, + ProposalCommitment, + ProposalFin, + ProposalInit, + ProposalPart, + Transaction, + TransactionVariant as ConsensusVariant, + TransactionsFin, +}; +use p2p_proto::sync::transaction::{DeclareV3WithoutClass, TransactionVariant as SyncVariant}; +use p2p_proto::transaction::DeclareV3WithClass; +use pathfinder_common::transaction::TransactionVariant; +use pathfinder_common::{ChainId, ContractAddress, TransactionHash}; +use pathfinder_executor::types::to_starknet_api_transaction; +use pathfinder_executor::{BlockExecutorExt, IntoStarkFelt}; +use pathfinder_storage::StorageBuilder; +use proptest::prelude::*; +use rand::seq::SliceRandom as _; +use rand::Rng as _; + +use crate::consensus::inner::batch_execution::BatchExecutionManager; +use crate::consensus::inner::open_consensus_storage; +use crate::consensus::inner::p2p_task::{handle_incoming_proposal_part, ValidatorCache}; +use crate::consensus::inner::persist_proposals::ConsensusProposals; +use crate::validator::{deployed_address, TransactionExt}; + +proptest! { + #![proptest_config(ProptestConfig::with_cases(100))] + #[test] + fn test_handle_incoming_proposal_part((proposal_type, seed) in strategy::composite()) { + MockExecutor::set_seed(seed); + let validator_cache = ValidatorCache::::new(); + let deferred_executions = Arc::new(Mutex::new(HashMap::new())); + let main_storage = StorageBuilder::in_tempdir().unwrap(); + let mut main_db_conn = main_storage.connection().unwrap(); + let main_db_tx = main_db_conn.transaction().unwrap(); + let consensus_storage_tempdir = tempfile::tempdir().unwrap(); + let consensus_storage = open_consensus_storage(consensus_storage_tempdir.path()).unwrap(); + let mut consensus_db_conn = consensus_storage.connection().unwrap(); + let consensus_db_tx = consensus_db_conn.transaction().unwrap(); + let proposals_db = ConsensusProposals::new(&consensus_db_tx); + let mut batch_execution_manager = BatchExecutionManager::new(); + + let (proposal_parts, expect_success) = match proposal_type { + strategy::ProposalCase::ValidEmpty => (create_structurally_valid_empty_proposal(seed), true), + strategy::ProposalCase::StructurallyValidNonEmptyExecutionOk => + create_structurally_valid_non_empty_proposal(seed, true), + strategy::ProposalCase::StructurallyValidNonEmptyExecutionFails => + create_structurally_valid_non_empty_proposal(seed, false), + strategy::ProposalCase::StructurallyInvalidExecutionOk => + create_structurally_invalid_proposal(seed, true), + strategy::ProposalCase::StructurallyInvalidExecutionFails => + create_structurally_invalid_proposal(seed, false), + }; + let mut result = if expect_success { Err(anyhow::anyhow!("No proposal parts processed")) } else { Ok(None) }; + + let proposal_parts_len = proposal_parts.len(); + let no_fin = proposal_parts.iter().all(|part| !part.is_proposal_fin()); + let debug_info = debug_info(&proposal_parts); + + for (proposal_part, is_last) in proposal_parts + .into_iter() + .zip((0..proposal_parts_len).map(|x| x == proposal_parts_len - 1)) + { + result = + handle_incoming_proposal_part::( + ChainId::SEPOLIA_TESTNET, + // Arbitrary contract address for testing + ContractAddress::ONE, + HeightAndRound::new(0, 0), + proposal_part, + validator_cache.clone(), + deferred_executions.clone(), + &main_db_tx, + main_storage.clone(), + &proposals_db, + &mut batch_execution_manager, + // Utilized by failure injection which is not happening in this test, so we can + // safely use an empty path + &PathBuf::new(), + // No failure injection in this test + None, + ); + + if expect_success { + prop_assert!(result.is_ok(), "{}", debug_info); + // If we expect success, all results must be Ok, and the last must contain valid value + prop_assert_eq!(result.as_ref().unwrap().is_some(), is_last, "{}", debug_info); + } else if result.is_err() { + break; + } + } + + // If we expect failure, we stop at the first error, Fin could be missing as well + // but the handler does not error out in such case. + if !expect_success { + prop_assert!(result.is_err() || no_fin, "{}", debug_info); + } + } +} + +fn debug_info(proposal_parts: &[ProposalPart]) -> String { + let num_txns = proposal_parts + .iter() + .filter_map(|part| match part { + ProposalPart::TransactionBatch(batch) => Some(batch.len()), + _ => None, + }) + .sum::(); + let fail_at_txn = MockExecutor::get_fail_at_txn(); + let mut s = dump_parts(proposal_parts); + s.push_str(&format!("\nTotal txns: {num_txns}")); + if fail_at_txn != DONT_FAIL { + s.push_str(&format!("\nExec fail at txn: {fail_at_txn}")); + } + s.push_str("\n=====\n"); + s +} + +fn dump_parts(proposal_parts: &[ProposalPart]) -> String { + let s = "\n=====\n[".to_string(); + let mut s = proposal_parts.iter().fold(s, |mut s, part| { + s.push_str(&dump_part(part)); + s.push(','); + s + }); + s.pop(); // Remove last comma + s.push(']'); + s +} + +fn dump_part(part: &ProposalPart) -> Cow<'static, str> { + match part { + ProposalPart::Init(_) => "Init".into(), + ProposalPart::BlockInfo(_) => "BlockInfo".into(), + ProposalPart::TransactionBatch(batch) => format!("Batch(len: {})", batch.len()).into(), + ProposalPart::TransactionsFin(TransactionsFin { + executed_transaction_count, + }) => format!("TxnFin(count: {executed_transaction_count})").into(), + ProposalPart::ProposalCommitment(_) => "Commitment".into(), + ProposalPart::Fin(_) => "Fin".into(), + } +} + +/// Creates a structurally valid, empty proposal. +/// +/// The proposal parts will be ordered as follows: +/// - Proposal Init +/// - Proposal Commitment +/// - Proposal Fin +fn create_structurally_valid_empty_proposal(seed: u64) -> Vec { + use rand::SeedableRng; + // Explicitly choose RNG to make sure seeded proposals are always reproducible + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(seed); + let mut proposal_parts = Vec::new(); + let init = ProposalPart::Init(ProposalInit { + block_number: 0, + round: 0, + valid_round: None, + proposer: Address(ContractAddress::ZERO.0), + }); + proposal_parts.push(init); + + let mut proposal_commitment: ProposalCommitment = fake::Faker.fake_with_rng(&mut rng); + proposal_commitment.block_number = 0; + proposal_commitment.builder = Address(ContractAddress::ZERO.0); + proposal_commitment.state_diff_commitment = Hash::ZERO; + proposal_commitment.transaction_commitment = Hash::ZERO; + proposal_commitment.event_commitment = Hash::ZERO; + proposal_commitment.receipt_commitment = Hash::ZERO; + proposal_commitment.l1_gas_price_fri = 0; + proposal_commitment.l1_data_gas_price_fri = 0; + proposal_commitment.l2_gas_price_fri = 0; + proposal_commitment.l2_gas_used = 0; + proposal_commitment.l1_da_mode = L1DataAvailabilityMode::default(); + let proposal_commitment = ProposalPart::ProposalCommitment(proposal_commitment); + proposal_parts.push(proposal_commitment); + + let proposal_fin = ProposalPart::Fin(ProposalFin { + proposal_commitment: Hash::ZERO, + }); + proposal_parts.push(proposal_fin); + proposal_parts +} + +/// Creates a structurally valid, non-empty proposal with random parts. +/// The proposal will contain at least one transaction batch with random +/// fake transactions. The proposal will be well-formed but not necessarily +/// valid according to the consensus rules. +/// +/// The proposal parts will be ordered as follows: +/// - Proposal Init +/// - Block Info +/// - In random order: one or more Transaction Batches, Transactions Fin, +/// Proposal Commitment +/// - Proposal Fin +fn create_structurally_valid_non_empty_proposal( + seed: u64, + execution_succeeds: bool, +) -> (Vec, bool) { + use rand::SeedableRng; + // Explicitly choose RNG to make sure seeded proposals are always reproducible + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(seed); + let mut proposal_parts = Vec::new(); + let init = ProposalPart::Init(ProposalInit { + block_number: 0, + round: 0, + valid_round: None, + proposer: Address(ContractAddress::ZERO.0), + }); + let mut block_info: BlockInfo = fake::Faker.fake_with_rng(&mut rng); + block_info.block_number = 0; + block_info.builder = Address(ContractAddress::ZERO.0); + let block_info = ProposalPart::BlockInfo(block_info); + + // Init and block info must be first + proposal_parts.push(init); + proposal_parts.push(block_info); + + let num_txns = rng.gen_range(1..200); + + let transactions = (0..num_txns) + .map(|_| fake::Faker.fake_with_rng(&mut rng)) + .collect::>(); + let mut relaxed_ordered_parts = split_random(&transactions, &mut rng) + .into_iter() + .map(ProposalPart::TransactionBatch) + .collect::>(); + + let executed_transaction_count = rng.gen_range(1..=num_txns).try_into().unwrap(); + + if execution_succeeds { + MockExecutor::set_fail_at_txn(DONT_FAIL); + } else { + let fail_at = rng.gen_range(0..num_txns); + MockExecutor::set_fail_at_txn(fail_at); + } + + let transactions_fin = ProposalPart::TransactionsFin(TransactionsFin { + executed_transaction_count, + }); + let mut proposal_commitment: ProposalCommitment = fake::Faker.fake_with_rng(&mut rng); + proposal_commitment.block_number = 0; + proposal_commitment.builder = Address(ContractAddress::ZERO.0); + proposal_commitment.state_diff_commitment = Hash::ZERO; + proposal_commitment.transaction_commitment = Hash::ZERO; + proposal_commitment.receipt_commitment = Hash::ZERO; + let proposal_commitment = ProposalPart::ProposalCommitment(proposal_commitment); + + relaxed_ordered_parts.push(transactions_fin); + relaxed_ordered_parts.push(proposal_commitment); + // All other parts except init, block info, and proposal fin can be in any order + relaxed_ordered_parts.shuffle(&mut rng); + + proposal_parts.extend(relaxed_ordered_parts); + + let proposal_fin = ProposalPart::Fin(ProposalFin { + proposal_commitment: Hash::ZERO, + }); + proposal_parts.push(proposal_fin); + (proposal_parts, execution_succeeds) +} + +#[derive(Debug, Clone, Copy, fake::Dummy)] +enum ModifyPart { + DoNothing, + Remove, + Duplicate, +} + +#[derive(Debug, Clone, Copy, fake::Dummy)] +struct InvalidProposalConfig { + remove_all_txns: bool, + init: ModifyPart, + block_info: ModifyPart, + txn_fin: ModifyPart, + proposal_commitment: ModifyPart, + proposal_fin: ModifyPart, + shuffle: bool, +} + +impl InvalidProposalConfig { + /// Returns true if the configuration would result in a probable valid + /// proposal. + fn maybe_valid(&self) -> bool { + // We don't take shuffling into account here because it can still result + // in a valid proposal. + !self.remove_all_txns + && matches!(self.init, ModifyPart::DoNothing) + && matches!(self.block_info, ModifyPart::DoNothing) + && matches!(self.txn_fin, ModifyPart::DoNothing) + && matches!(self.proposal_commitment, ModifyPart::DoNothing) + && matches!(self.proposal_fin, ModifyPart::DoNothing) + } +} + +/// Takes the output of [`create_structurally_valid_non_empty_proposal`] and +/// does at least one of the following: +/// - removes all transaction batches, +/// - removes or duplicates some of the following: proposal init, block info, +/// transactions fin, proposal commitment, proposal fin +/// - reshuffles all of the parts without respect to to the spec, or how +/// permissive we are wrt the ordering. +fn create_structurally_invalid_proposal(seed: u64, fail_at_txn: bool) -> (Vec, bool) { + use rand::SeedableRng; + // Explicitly choose RNG to make sure seeded proposals are always reproducible + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(seed); + let (mut proposal_parts, _) = create_structurally_valid_non_empty_proposal(seed, fail_at_txn); + let config: InvalidProposalConfig = fake::Faker.fake_with_rng(&mut rng); + + if config.remove_all_txns { + proposal_parts.retain(|x| !x.is_transaction_batch()); + } + modify_part(&mut proposal_parts, &mut rng, config.init, |x| { + x.is_proposal_init() + }); + modify_part(&mut proposal_parts, &mut rng, config.block_info, |x| { + x.is_block_info() + }); + modify_part(&mut proposal_parts, &mut rng, config.txn_fin, |x| { + x.is_transactions_fin() + }); + modify_part( + &mut proposal_parts, + &mut rng, + config.proposal_commitment, + |x| x.is_proposal_commitment(), + ); + modify_part(&mut proposal_parts, &mut rng, config.proposal_fin, |x| { + x.is_proposal_fin() + }); + + if config.shuffle { + proposal_parts.shuffle(&mut rng); + } + + // If we were unfortunate enough to get an unmodified proposal, let's at least + // force removing the init at the head, so that the proposal is invalid for + // sure. + if config.maybe_valid() { + proposal_parts.remove(0); + } + + // This proposal should always fail, regardless of execution outcome + (proposal_parts, false) +} + +/// Removes a proposal part if the flag is true, or duplicates it if the flag +/// is false +fn modify_part( + proposal_parts: &mut Vec, + rng: &mut impl rand::Rng, + modify_part: ModifyPart, + match_fn: impl Fn(&ProposalPart) -> bool, +) { + match modify_part { + ModifyPart::DoNothing => {} + ModifyPart::Remove => proposal_parts.retain(|x| !match_fn(x)), + ModifyPart::Duplicate => { + let (i, proposal) = proposal_parts + .iter() + .enumerate() + .find_map(|(i, x)| match_fn(x).then_some((i, x.clone()))) + .expect("Part to be present"); + let insert_pos = rng.gen_range(i..proposal_parts.len()); + proposal_parts.insert(insert_pos, proposal); + } + } +} + +/// Splits a slice into a random number of parts (between 1 and slice length) +fn split_random(v: &[T], rng: &mut impl rand::Rng) -> Vec> { + let n = v.len(); + + // 1. Choose a random number of parts: between 1 and n + let parts = rng.gen_range(1..=n); + + if parts == 1 { + return vec![v.to_vec()]; + } + + // 2. Generate (parts - 1) cut points in 1..n-1 + let mut cuts: Vec = (0..parts - 1).map(|_| rng.gen_range(1..n)).collect(); + + // 3. Sort and deduplicate to avoid empty segments + cuts.sort(); + cuts.dedup(); + + // 4. Build the segments + let mut result = Vec::with_capacity(parts); + let mut start = 0; + + for cut in cuts { + result.push(v[start..cut].to_vec()); + start = cut; + } + result.push(v[start..].to_vec()); + + result +} + +/// Strategy for generating proposal parts for proptests. +mod strategy { + use proptest::prelude::*; + + #[derive(Debug, Clone, Copy)] + pub enum ProposalCase { + ValidEmpty, + StructurallyValidNonEmptyExecutionOk, + StructurallyValidNonEmptyExecutionFails, + StructurallyInvalidExecutionOk, + StructurallyInvalidExecutionFails, + } + + /// Generates a composite strategy that yields a tuple of + /// (ProposalCase, u64) where u64 can be used as a seed or + /// identifier for generating proposal parts according to the + /// specified case. + pub fn composite() -> BoxedStrategy<(ProposalCase, u64)> { + prop_oneof![ + // 1/20 (4% of the time) + 1 => (Just(ProposalCase::ValidEmpty), any::()), + // 4/20 (20% of the time) + 4 => (Just(ProposalCase::StructurallyValidNonEmptyExecutionOk), any::()), + // 5/20 (25% of the time) + 5 => (Just(ProposalCase::StructurallyValidNonEmptyExecutionFails), any::()), + 5 => (Just(ProposalCase::StructurallyInvalidExecutionOk), any::()), + 5 => (Just(ProposalCase::StructurallyInvalidExecutionFails), any::()), + ] + .boxed() + } +} + +struct MockExecutor; + +impl BlockExecutorExt for MockExecutor { + fn new( + _: ChainId, + _: pathfinder_executor::types::BlockInfo, + _: ContractAddress, + _: ContractAddress, + _: pathfinder_storage::Connection, + ) -> anyhow::Result + where + Self: Sized, + { + Ok(Self) + } + + fn new_with_pending_state( + _: ChainId, + _: pathfinder_executor::types::BlockInfo, + _: ContractAddress, + _: ContractAddress, + _: pathfinder_storage::Connection, + _: std::sync::Arc, + ) -> anyhow::Result + where + Self: Sized, + { + Ok(Self) + } + + /// We want execution in the proptests to be deterministic based on the seed + /// set in the MockMapper. This way we can have proposals that produce + /// consistent results which, in case of a successful test case, can then be + /// serialized into the consensus DB. This way we bypass real execution but + /// can still heavily test the other parts of the proposal handling logic, + /// including the consensus DB ops. + fn execute( + &mut self, + txns: Vec, + ) -> Result< + Vec, + pathfinder_executor::TransactionExecutionError, + > { + MockExecutor::add_executed_txn_count(txns.len()); + + let fail_at_txn = MockExecutor::get_fail_at_txn(); + if fail_at_txn != DONT_FAIL && MockExecutor::get_executed_txn_count() > fail_at_txn { + return Err( + pathfinder_executor::TransactionExecutionError::ExecutionError { + transaction_index: fail_at_txn, + error: "Injected execution failure for proptests".to_string(), + error_stack: Default::default(), + }, + ); + } + + use rand::SeedableRng; + let seed = MockExecutor::get_seed(); + // Explicitly choose RNG to make sure seeded proposals are always reproducible + let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(seed); + + let dummy = ( + // Garbage is fine as long as it's serializable + pathfinder_executor::types::Receipt { + actual_fee: fake::Faker.fake_with_rng(&mut rng), + execution_resources: fake::Faker.fake_with_rng(&mut rng), + l2_to_l1_messages: fake::Faker.fake_with_rng(&mut rng), + execution_status: fake::Faker.fake_with_rng(&mut rng), + transaction_index: fake::Faker.fake_with_rng(&mut rng), + }, + fake::Faker.fake_with_rng(&mut rng), + ); + Ok(vec![dummy; txns.len()]) + } + + fn finalize(self) -> anyhow::Result { + Ok(pathfinder_executor::types::StateDiff::default()) + } + + fn set_transaction_index(&mut self, _: usize) {} + + fn extract_state_diff(&self) -> anyhow::Result { + Ok(pathfinder_executor::types::StateDiff::default()) + } +} + +const DONT_FAIL: usize = usize::MAX; + +// Thread-local is a precaution to ensure that the settings are passed correctly +// even if multiple cases for a particular proptest are running in parallel, +// which I'm pretty sure doesn't happen with proptest as of now (28/11/2025). +// Anyway, it will still serve well in case we have more than one proptest +// instance in this module, which would then mean that there are at least 2 +// proptests running in parallel. +thread_local! { + pub static MOCK_EXECUTOR_SEED: AtomicU64 = const { AtomicU64::new(0) }; + pub static MOCK_EXECUTOR_EXECUTED_TXN_COUNT: AtomicUsize = const { AtomicUsize::new(0) }; + pub static MOCK_EXECUTOR_FAIL_AT_TXN: AtomicUsize = const { AtomicUsize::new(DONT_FAIL) }; +} + +impl MockExecutor { + pub fn set_seed(seed: u64) { + MOCK_EXECUTOR_SEED.with(|s| { + s.store(seed, std::sync::atomic::Ordering::SeqCst); + }); + } + + pub fn get_seed() -> u64 { + MOCK_EXECUTOR_SEED.with(|s| s.load(std::sync::atomic::Ordering::SeqCst)) + } + + pub fn add_executed_txn_count(count: usize) { + MOCK_EXECUTOR_EXECUTED_TXN_COUNT.with(|s| { + s.fetch_add(count, std::sync::atomic::Ordering::SeqCst); + }); + } + + pub fn get_executed_txn_count() -> usize { + MOCK_EXECUTOR_EXECUTED_TXN_COUNT.with(|s| s.load(std::sync::atomic::Ordering::SeqCst)) + } + + pub fn set_fail_at_txn(txn_index: usize) { + MOCK_EXECUTOR_FAIL_AT_TXN.with(|s| { + s.store(txn_index, std::sync::atomic::Ordering::SeqCst); + }); + } + + pub fn get_fail_at_txn() -> usize { + MOCK_EXECUTOR_FAIL_AT_TXN.with(|s| s.load(std::sync::atomic::Ordering::SeqCst)) + } +} + +struct MockMapper; + +/// Does the same as ProdTransactionMapper with an exception: +/// - fills ClassInfo with dummy data +impl TransactionExt for MockMapper { + fn try_map_transaction( + transaction: p2p_proto::consensus::Transaction, + ) -> anyhow::Result<( + pathfinder_common::transaction::Transaction, + pathfinder_executor::Transaction, + )> { + let p2p_proto::consensus::Transaction { + txn, + transaction_hash, + } = transaction; + let (variant, class_info) = match txn { + ConsensusVariant::DeclareV3(DeclareV3WithClass { + common, + class: _, /* Ignore */ + }) => ( + SyncVariant::DeclareV3(DeclareV3WithoutClass { + common, + class_hash: Default::default(), + }), + Some(starknet_api::contract_class::ClassInfo { + contract_class: starknet_api::contract_class::ContractClass::V0( + starknet_api::deprecated_contract_class::ContractClass::default(), + ), + sierra_program_length: 0, + abi_length: 0, + sierra_version: starknet_api::contract_class::SierraVersion::DEPRECATED, + }), + ), + ConsensusVariant::DeployAccountV3(v) => (SyncVariant::DeployAccountV3(v), None), + ConsensusVariant::InvokeV3(v) => (SyncVariant::InvokeV3(v), None), + ConsensusVariant::L1HandlerV0(v) => (SyncVariant::L1HandlerV0(v), None), + }; + + let common_txn_variant = TransactionVariant::try_from_dto(variant)?; + + let deployed_address = deployed_address(&common_txn_variant); + + // TODO(validator) why 10^12? + let paid_fee_on_l1 = match &common_txn_variant { + TransactionVariant::L1Handler(_) => { + Some(starknet_api::transaction::fields::Fee(1_000_000_000_000)) + } + _ => None, + }; + + let api_txn = to_starknet_api_transaction(common_txn_variant.clone())?; + let tx_hash = + starknet_api::transaction::TransactionHash(transaction_hash.0.into_starkfelt()); + let executor_txn = pathfinder_executor::Transaction::from_api( + api_txn, + tx_hash, + class_info, + paid_fee_on_l1, + deployed_address, + pathfinder_executor::AccountTransactionExecutionFlags::default(), + )?; + let common_txn = pathfinder_common::transaction::Transaction { + hash: TransactionHash(transaction_hash.0), + variant: common_txn_variant, + }; + + Ok((common_txn, executor_txn)) + } + + fn verify_hash(_: &pathfinder_common::transaction::Transaction, _: ChainId) -> bool { + true + } +} diff --git a/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs b/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs new file mode 100644 index 0000000000..fea9f985ce --- /dev/null +++ b/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs @@ -0,0 +1,1503 @@ +//! End-to-end tests for p2p_task +//! +//! These tests verify the full integration flow of p2p_task, including proposal +//! processing, deferral logic (when TransactionsFin or ProposalFin arrive out +//! of order), rollback scenarios, and database persistence. They test the +//! complete path from receiving P2P events to sending consensus commands. + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use p2p::consensus::{Client, Event, HeightAndRound}; +use p2p::libp2p::identity::Keypair; +use p2p_proto::consensus::ProposalPart; +use pathfinder_common::prelude::*; +use pathfinder_common::{ChainId, ContractAddress, ProposalCommitment}; +use pathfinder_consensus::ConsensusCommand; +use pathfinder_crypto::Felt; +use pathfinder_storage::StorageBuilder; +use tokio::sync::mpsc; +use tokio::time::error::Elapsed; +use tokio::time::timeout; + +use crate::consensus::inner::persist_proposals::ConsensusProposals; +use crate::consensus::inner::test_helpers::{create_test_proposal, create_transaction_batch}; +use crate::consensus::inner::{p2p_task, ConsensusTaskEvent, ConsensusValue, P2PTaskConfig}; +use crate::validator::FinalizedBlock; + +/// Helper struct to setup and manage the test environment (databases, +/// channels, mock client) +struct TestEnvironment { + storage: pathfinder_storage::Storage, + consensus_storage: pathfinder_storage::Storage, + p2p_tx: mpsc::UnboundedSender, + rx_from_p2p: mpsc::Receiver, + tx_to_p2p: mpsc::Sender, + handle: Arc>>>>, +} + +impl TestEnvironment { + fn new(chain_id: ChainId, validator_address: ContractAddress) -> Self { + // Create temp directory for consensus storage + let consensus_storage_dir = tempfile::tempdir().expect("Failed to create temp directory"); + let consensus_storage_dir = consensus_storage_dir.path().to_path_buf(); + + // Initialize temp pathfinder and consensus databases + let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); + let consensus_storage = + StorageBuilder::in_tempdir().expect("Failed to create consensus temp database"); + + // Initialize consensus storage tables + { + let mut db_conn = consensus_storage.connection().unwrap(); + let db_tx = db_conn.transaction().unwrap(); + db_tx.ensure_consensus_proposals_table_exists().unwrap(); + db_tx + .ensure_consensus_finalized_blocks_table_exists() + .unwrap(); + db_tx.commit().unwrap(); + } + + // Mock channels for p2p communication + let (p2p_tx, p2p_rx) = mpsc::unbounded_channel(); + let (tx_to_consensus, rx_from_p2p) = mpsc::channel(100); + let (tx_to_p2p, rx_from_consensus) = mpsc::channel(100); + + // Create mock Client (used for receiving events in these tests) + let keypair = Keypair::generate_ed25519(); + let (client_sender, _client_receiver) = mpsc::unbounded_channel(); + let peer_id = keypair.public().to_peer_id(); + let p2p_client = Client::from((peer_id, client_sender)); + + let handle = p2p_task::spawn( + chain_id, + P2PTaskConfig { + my_validator_address: validator_address, + history_depth: 10, + }, + p2p_client, + storage.clone(), + p2p_rx, + tx_to_consensus, + rx_from_consensus, + consensus_storage.clone(), + &consensus_storage_dir, + None, + ); + + Self { + storage, + consensus_storage, + p2p_tx, + rx_from_p2p, + tx_to_p2p, + handle: Arc::new(Mutex::new(Some(handle))), + } + } + + fn create_committed_parent_block(&self, parent_height: u64) { + let block_id_felt = Felt::from(parent_height); + let mut db_conn = self.storage.connection().unwrap(); + let db_tx = db_conn.transaction().unwrap(); + let parent_header = BlockHeader::builder() + .number(BlockNumber::new_or_panic(parent_height)) + .timestamp(BlockTimestamp::new_or_panic(1000)) + .calculated_state_commitment( + StorageCommitment(block_id_felt), + ClassCommitment(block_id_felt), + ) + .sequencer_address(SequencerAddress::ZERO) + .finalize_with_hash(BlockHash(block_id_felt)); + db_tx.insert_block_header(&parent_header).unwrap(); + db_tx.commit().unwrap(); + } + + fn create_uncommitted_finalized_block(&self, height: u64, round: u32) { + let block_id_felt = Felt::from(height); + let mut db_conn = self.consensus_storage.connection().unwrap(); + let db_tx = db_conn.transaction().unwrap(); + let proposals_db = ConsensusProposals::new(&db_tx); + let block = FinalizedBlock { + header: BlockHeader::builder() + .number(BlockNumber::new_or_panic(height)) + .timestamp(BlockTimestamp::new_or_panic(1000)) + .calculated_state_commitment( + StorageCommitment(block_id_felt), + ClassCommitment(block_id_felt), + ) + .sequencer_address(SequencerAddress::ZERO) + .state_diff_commitment(StateDiffCommitment(block_id_felt)) + .finalize_with_hash(BlockHash(block_id_felt)), + state_update: Default::default(), + transactions_and_receipts: vec![], + events: vec![], + }; + proposals_db + .persist_finalized_block(height, round, block) + .unwrap(); + db_tx.commit().unwrap(); + } + + async fn wait_for_task_initialization(&self) { + tokio::time::sleep(Duration::from_millis(100)).await; + } + + async fn verify_task_alive(&self) { + let handle_opt = { + let handle_guard = self.handle.lock().unwrap(); + handle_guard.as_ref().map(|h| h.is_finished()) + }; + + if let Some(true) = handle_opt { + // Handle is finished, take it out and await to get the error + let handle = { + let mut handle_guard = self.handle.lock().unwrap(); + handle_guard.take().expect("Handle should exist") + }; + + match handle.await { + Ok(Ok(())) => { + panic!("Task finished successfully (unexpected - should still be running)"); + } + Ok(Err(e)) => { + panic!("Task finished with error: {e:#}"); + } + Err(e) => { + panic!("Task panicked: {e:?}"); + } + } + } + } + + async fn wait_for_task_exit(&self) -> Result, Elapsed> { + let wait_for_exit_fut = async { + loop { + let handle_opt = { + let handle_guard = self.handle.lock().unwrap(); + handle_guard.as_ref().map(|h| h.is_finished()) + }; + + if let Some(true) = handle_opt { + // Handle is finished, take it out and await to get the result + let handle = { + let mut handle_guard = self.handle.lock().unwrap(); + handle_guard.take().expect("Handle should exist") + }; + + return handle.await?; + } + + tokio::time::sleep(Duration::from_millis(50)).await; + } + }; + timeout(Duration::from_millis(300), wait_for_exit_fut).await + } +} + +/// Helper: Wait for a proposal event from consensus +async fn wait_for_proposal_event( + rx: &mut mpsc::Receiver, + timeout_duration: Duration, +) -> Option> { + let start = std::time::Instant::now(); + while start.elapsed() < timeout_duration { + // First try non-blocking recv + match rx.try_recv() { + Ok(ConsensusTaskEvent::CommandFromP2P(ConsensusCommand::Proposal(proposal))) => { + return Some(ConsensusCommand::Proposal(proposal)) + } + Ok(_) => { + // Other event, continue waiting + continue; + } + Err(mpsc::error::TryRecvError::Empty) => { + // No event yet, wait a bit + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + Err(mpsc::error::TryRecvError::Disconnected) => { + // Channel closed + return None; + } + } + } + None +} + +/// Helper: Verify no proposal event was received +async fn verify_no_proposal_event(rx: &mut mpsc::Receiver, duration: Duration) { + let start = std::time::Instant::now(); + while start.elapsed() < duration { + match rx.try_recv() { + Ok(ConsensusTaskEvent::CommandFromP2P(ConsensusCommand::Proposal(_))) => { + panic!("Unexpected proposal event received"); + } + Ok(_) => { + // Other event, continue checking + continue; + } + Err(mpsc::error::TryRecvError::Empty) => { + // No event, wait a bit + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + Err(mpsc::error::TryRecvError::Disconnected) => { + // Channel closed, that's fine + return; + } + } + } +} + +/// Helper: Verify proposal event matches expected values +fn verify_proposal_event( + proposal_cmd: ConsensusCommand, + expected_height: u64, + expected_commitment: ProposalCommitment, +) { + match proposal_cmd { + ConsensusCommand::Proposal(signed_proposal) => { + assert_eq!( + signed_proposal.proposal.height, expected_height, + "Proposal height should match" + ); + assert_eq!( + signed_proposal.proposal.value.0, expected_commitment, + "Proposal commitment should match" + ); + } + _ => panic!("Expected Proposal command"), + } +} + +/// Helper: Verify proposal parts are persisted in database +/// +/// Verifies that the expected part types are present: +/// - Init (required) +/// - BlockInfo (optional, if `expect_transaction_batch` is true) +/// - TransactionBatch (optional, if `expect_transaction_batch` is true) +/// - TransactionsFin (optional, if `expect_transaction_batch` is true) +/// - ProposalCommitment (required) +/// - Fin (required) +/// +/// Also verifies the total count matches `expected_count`. +fn verify_proposal_parts_persisted( + consensus_storage: &pathfinder_storage::Storage, + height: u64, + round: u32, + validator_address: &ContractAddress, // Query with validator address (receiver) + expected_count: usize, + expect_transaction_batch: bool, +) { + let mut db_conn = consensus_storage.connection().unwrap(); + let db_tx = db_conn.transaction().unwrap(); + let proposals_db = ConsensusProposals::new(&db_tx); + // seems like foreign_parts queries by validator_address to get + // proposals from foreign validators (proposals where proposer != + // validator) + let parts = proposals_db + .foreign_parts(height, round, validator_address) + .unwrap() + .unwrap_or_default(); + + // Part types for error message + let part_types: Vec = parts + .iter() + .map(|part| format!("{:?}", std::mem::discriminant(part))) + .collect(); + + // ----- Debug output for proposal parts ----- + #[cfg(debug_assertions)] + { + eprintln!( + "Found {} proposal parts in database for height {} round {} (querying with validator \ + {})", + parts.len(), + height, + round, + validator_address.0 + ); + for (i, part) in parts.iter().enumerate() { + eprintln!(" Part {}: {:?}", i, std::mem::discriminant(part)); + } + } + // ----- End debug output for proposal parts ----- + + // Verify required parts are present + use p2p_proto::consensus::ProposalPart as P2PProposalPart; + let has_init = parts.iter().any(|p| matches!(p, P2PProposalPart::Init(_))); + let has_block_info = parts + .iter() + .any(|p| matches!(p, P2PProposalPart::BlockInfo(_))); + let has_transaction_batch = parts + .iter() + .any(|p| matches!(p, P2PProposalPart::TransactionBatch(_))); + let has_transactions_fin = parts + .iter() + .any(|p| matches!(p, P2PProposalPart::TransactionsFin(_))); + let has_proposal_commitment = parts + .iter() + .any(|p| matches!(p, P2PProposalPart::ProposalCommitment(_))); + let has_fin = parts.iter().any(|p| matches!(p, P2PProposalPart::Fin(_))); + + assert!( + has_init, + "Expected Init part to be persisted. Persisted parts: [{}]", + part_types.join(", ") + ); + assert!( + has_proposal_commitment, + "Expected ProposalCommitment part to be persisted. Persisted parts: [{}]", + part_types.join(", ") + ); + assert!( + has_fin, + "Expected Fin part to be persisted. Persisted parts: [{}]", + part_types.join(", ") + ); + if expect_transaction_batch { + assert!( + has_block_info, + "Expected BlockInfo part to be persisted. Persisted parts: [{}]", + part_types.join(", ") + ); + assert!( + has_transaction_batch, + "Expected TransactionBatch part to be persisted. Persisted parts: [{}]", + part_types.join(", ") + ); + assert!( + has_transactions_fin, + "Expected TransactionsFin part to be persisted. Persisted parts: [{}]", + part_types.join(", ") + ); + } + + // Verify total count + assert_eq!( + parts.len(), + expected_count, + "Expected {} proposal parts, got {}. Persisted parts: [{}]", + expected_count, + parts.len(), + part_types.join(", ") + ); +} + +/// Helper: Verify transaction count from persisted proposal parts +fn verify_transaction_count( + consensus_storage: &pathfinder_storage::Storage, + height: u64, + round: u32, + validator_address: &ContractAddress, + expected_count: usize, +) { + let mut db_conn = consensus_storage.connection().unwrap(); + let db_tx = db_conn.transaction().unwrap(); + let proposals = ConsensusProposals::new(&db_tx); + let parts = proposals + .foreign_parts(height, round, validator_address) + .unwrap() + .unwrap_or_default(); + + // Count transactions from all TransactionBatch parts + let mut total_transactions = 0; + for part in &parts { + if let ProposalPart::TransactionBatch(transactions) = part { + total_transactions += transactions.len(); + } + } + + assert_eq!( + total_transactions, expected_count, + "Expected {expected_count} transactions in persisted proposal parts, found \ + {total_transactions}", + ); +} + +/// Helper: Create a ProposalCommitment message +fn create_proposal_commitment_part( + height: u64, + proposal_commitment: ProposalCommitment, +) -> ProposalPart { + let zero_hash = p2p_proto::common::Hash(Felt::ZERO); + let zero_address = p2p_proto::common::Address(Felt::ZERO); + ProposalPart::ProposalCommitment(p2p_proto::consensus::ProposalCommitment { + block_number: height, + parent_commitment: zero_hash, + builder: zero_address, + timestamp: 1000, + protocol_version: "0.0.0".to_string(), + old_state_root: zero_hash, + version_constant_commitment: zero_hash, + state_diff_commitment: p2p_proto::common::Hash(proposal_commitment.0), /* Use real commitment */ + transaction_commitment: zero_hash, + event_commitment: zero_hash, + receipt_commitment: zero_hash, + concatenated_counts: Felt::ZERO, + l1_gas_price_fri: 0, + l1_data_gas_price_fri: 0, + l2_gas_price_fri: 0, + l2_gas_used: 0, + next_l2_gas_price_fri: 0, + l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::default(), + }) +} + +/// ProposalFin deferred until parent block is committed. +/// +/// **Scenario**: ProposalFin arrives before the parent block is committed. +/// Execution has started (TransactionBatch received), so ProposalFin must be +/// deferred until the parent block is committed, then finalization can proceed. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch → ProposalCommitment → +/// TransactionsFin → ProposalFin → CommitBlock(parent). +/// +/// Verify ProposalFin is deferred (no proposal event), then verify +/// finalization occurs after parent block is committed. Also verify +/// ProposalFin is persisted in the database even when deferred. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_proposal_fin_deferred_until_parent_block_committed() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(0); + env.create_uncommitted_finalized_block(1, 0); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions = create_transaction_batch(0, 5, chain_id); + let (proposal_init, block_info) = + create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); + + // Focus is on batch execution and deferral logic, not commitment validation. + // Using a dummy commitment... + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + // Step 1: Send ProposalInit + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + // Step 2: Send BlockInfo + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + // Step 3: Send TransactionBatch (execution should start) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions), + )) + .expect("Failed to send TransactionBatch"); + env.verify_task_alive().await; + + // Verify: No proposal event yet (execution started, but not finalized) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 4: Send ProposalCommitment + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + // Step 5: Send TransactionsFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 5, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + // Step 6: Send ProposalFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + env.verify_task_alive().await; + + // Verify: Still no proposal event + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Check what's in the database right after ProposalFin + #[cfg(debug_assertions)] + { + let mut db_conn = env.consensus_storage.connection().unwrap(); + let db_tx = db_conn.transaction().unwrap(); + let proposals = ConsensusProposals::new(&db_tx); + let parts_after_proposal_fin = proposals + .foreign_parts(2, 1, &validator_address) + .unwrap() + .unwrap_or_default(); + eprintln!( + "Parts in database after ProposalFin (before TransactionsFin): {}", + parts_after_proposal_fin.len() + ); + for (i, part) in parts_after_proposal_fin.iter().enumerate() { + eprintln!(" Part {}: {:?}", i, std::mem::discriminant(part)); + } + } + + // Step 7: Send CommitBlock for parent block (should trigger finalization) + env.tx_to_p2p + .send(crate::consensus::inner::P2PTaskEvent::CommitBlock( + HeightAndRound::new(1, 0), + ConsensusValue(ProposalCommitment(Felt::ONE)), + )) + .await + .expect("Failed to send CommitBlock"); + env.verify_task_alive().await; + + // Verify: Proposal event should be sent now + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(3)) + .await + .expect("Expected proposal event after TransactionsFin"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + // Verify proposal parts persisted + // Query with validator_address (receiver) to get foreign proposals + // Expected: Init, BlockInfo, TransactionBatch, ProposalFin (4 parts) + // Note: ProposalCommitment is not persisted as a proposal part (it's validator + // state only) + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 6, + true, // expect_transaction_batch + ); + + env.verify_task_alive().await; + + // Verify transaction count matches TransactionsFin count + verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); + + env.verify_task_alive().await; +} + +/// Full proposal flow in normal order. +/// +/// **Scenario**: Complete proposal flow with all parts arriving in the +/// expected order. TransactionsFin arrives before ProposalFin, so no +/// deferral is needed. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch → +/// TransactionsFin → ProposalCommitment → ProposalFin. +/// +/// Verify proposal event is sent immediately after ProposalFin (no +/// deferral), and verify all parts are persisted correctly. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_full_proposal_flow_normal_order() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions = create_transaction_batch(0, 5, chain_id); + let (proposal_init, block_info) = + create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); + + // Focus is on batch execution and deferral logic, not commitment validation. + // Using a dummy commitment... + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + // Step 1: Send ProposalInit + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + // Step 2: Send BlockInfo + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + // Step 3: Send TransactionBatch + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions), + )) + .expect("Failed to send TransactionBatch"); + env.verify_task_alive().await; + + // Verify: No proposal event yet (execution started, but TransactionsFin not + // processed) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 4: Send TransactionsFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 5, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + // Verify: Still no proposal event (TransactionsFin processed, but ProposalFin + // not received) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 5: Send ProposalCommitment + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + // Step 6: Send ProposalFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + tokio::time::sleep(Duration::from_millis(500)).await; + + // Verify: Proposal event should be sent immediately (both conditions met) + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after ProposalFin"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + // Verify proposal parts persisted + // Query with validator_address (receiver) to get foreign proposals + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 6, + true, // expect_transaction_batch + ); + + // Verify transaction count matches TransactionsFin count + verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); +} + +/// TransactionsFin deferred when execution not started. +/// +/// **Scenario**: Parent block is not committed initially, so +/// TransactionBatch and TransactionsFin are both deferred. After parent +/// is committed, execution starts and deferred messages are processed. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch → +/// TransactionsFin (without committing parent). +/// +/// Verify no execution occurs. Then commit parent block and send another +/// TransactionBatch. Verify deferred TransactionsFin is processed when +/// execution starts. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_transactions_fin_deferred_when_execution_not_started() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + // Parent block NOT committed initially + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions_batch1 = create_transaction_batch(0, 3, chain_id); + let transactions_batch2 = create_transaction_batch(3, 2, chain_id); // Total: 5 + let (proposal_init, block_info) = create_test_proposal( + chain_id, + 2, + 1, + proposer_address, + transactions_batch1.clone(), + ); + + // Step 1: Send ProposalInit + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + // Step 2: Send BlockInfo + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + // Step 3: Send first TransactionBatch (should be deferred - parent not + // committed) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch1), + )) + .expect("Failed to send first TransactionBatch"); + env.verify_task_alive().await; + + // Verify: No proposal event (execution deferred) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 4: Send TransactionsFin (should be deferred - execution not started) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 5, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + // Verify: Still no proposal event (TransactionsFin deferred) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 5: Now we commit the parent block + env.create_committed_parent_block(1); + tokio::time::sleep(Duration::from_millis(100)).await; + + // Step 6: Send another TransactionBatch + // This should trigger execution of deferred batches + process deferred + // TransactionsFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch2), + )) + .expect("Failed to send second TransactionBatch"); + env.verify_task_alive().await; + + // At this point, execution should have started and TransactionsFin should be + // processed... + + // To verify this, we send ProposalCommitment and ProposalFin, then verify that + // a proposal event is sent (which confirms TransactionsFin was processed). + + // Once again, using a dummy commitment... + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + // Step 7: Send ProposalCommitment + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + // Step 8: Send ProposalFin + // This should trigger finalization since TransactionsFin was processed + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + env.verify_task_alive().await; + + // Verify: Proposal event should be sent (confirms TransactionsFin was + // processed) + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after deferred TransactionsFin was processed"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + // Verify proposal parts persisted + // Expected: Init, BlockInfo, TransactionBatch (2 batches), ProposalFin (5 + // parts) + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 7, // 2 TransactionBatch parts + true, // expect_transaction_batch + ); +} + +/// Multiple TransactionBatch messages are executed correctly. +/// +/// **Scenario**: A proposal contains multiple TransactionBatch messages +/// that must all be executed in order. All batches should be executed +/// before TransactionsFin is processed. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch 1 → +/// TransactionBatch 2 → TransactionBatch 3 → TransactionsFin → +/// ProposalCommitment → ProposalFin. +/// +/// Verify proposal event is sent after ProposalFin, and verify all batches +/// are persisted (combined into a single TransactionBatch part in the +/// database). +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_multiple_batches_execution() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions_batch1 = create_transaction_batch(0, 2, chain_id); + let transactions_batch2 = create_transaction_batch(2, 3, chain_id); + let transactions_batch3 = create_transaction_batch(5, 2, chain_id); // Total: 7 + let (proposal_init, block_info) = create_test_proposal( + chain_id, + 2, + 1, + proposer_address, + transactions_batch1.clone(), + ); + + // Focus is on batch execution and deferral logic, not commitment validation. + // Using a dummy commitment... + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + // Step 1: Send ProposalInit + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + // Step 2: Send BlockInfo + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + // Step 3: Send multiple TransactionBatches + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch1), + )) + .expect("Failed to send TransactionBatch1"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch2), + )) + .expect("Failed to send TransactionBatch2"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch3), + )) + .expect("Failed to send TransactionBatch3"); + env.verify_task_alive().await; + + // Step 4: Send TransactionsFin (total count = 7) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 7, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + // Step 5: Send ProposalCommitment + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + // Step 6: Send ProposalFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + tokio::time::sleep(Duration::from_millis(500)).await; + + // Verify: Proposal event should be sent + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after ProposalFin"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + // Verify all batches persisted + // Query with validator_address (receiver) to get foreign proposals + // Expected: Init, BlockInfo, TransactionBatch (3 batches), ProposalCommitment, + // TransactionsFin, ProposalFin (8 parts) + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 8, // 3 TransactionBatch parts + true, // expect_transaction_batch + ); + + // Verify transaction count matches TransactionsFin count + // Multiple batches are persisted as separate TransactionBatch parts, so we + // count the transactions from all persisted parts + verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 7); +} + +/// TransactionsFin triggers rollback when count is less than executed. +/// +/// **Scenario**: We execute 10 transactions (2 batches of 5), but +/// TransactionsFin indicates only 7 transactions were executed by the +/// proposer. The validator must rollback from 10 to 7 transactions to +/// match the proposer's state. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch1 (5 txs) → +/// TransactionBatch2 (5 txs) → TransactionsFin (count=7) → +/// ProposalCommitment → ProposalFin. +/// +/// Verify proposal event is sent successfully after rollback, confirming +/// the rollback mechanism works correctly. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_transactions_fin_rollback() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions_batch1 = create_transaction_batch(0, 5, chain_id); + let transactions_batch2 = create_transaction_batch(5, 5, chain_id); // Total: 10 + let (proposal_init, block_info) = create_test_proposal( + chain_id, + 2, + 1, + proposer_address, + transactions_batch1.clone(), + ); + + // Focus is on batch execution and deferral logic, not commitment validation. + // Using a dummy commitment... + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + // Step 1: Send ProposalInit + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + // Step 2: Send BlockInfo + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + // Step 3: Send TransactionBatch 1 (5 transactions) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch1), + )) + .expect("Failed to send TransactionBatch1"); + env.verify_task_alive().await; + + // Step 4: Send TransactionBatch 2 (5 more transactions, total = 10) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions_batch2), + )) + .expect("Failed to send TransactionBatch2"); + env.verify_task_alive().await; + + // Step 5: Send TransactionsFin with count=7 (should trigger rollback from 10 to + // 7) + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 7, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + // Step 6: Send ProposalCommitment + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + // Step 7: Send ProposalFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + tokio::time::sleep(Duration::from_millis(500)).await; + + // Verify: Proposal event should be sent (rollback completed successfully) + // NOTE: We verify that a proposal event is sent, which indicates rollback + // completed. However, we cannot directly verify the transaction count + // in e2e tests because the validator is internal to p2p_task. The + // rollback logic itself is verified in unit tests (batch_execution. + // rs::test_transactions_fin_rollback). This e2e test verifies + // that rollback doesn't break the proposal flow end-to-end. + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after ProposalFin"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + // Verify proposal parts persisted + // Query with validator_address (receiver) to get foreign proposals + // Expected: Init, BlockInfo, TransactionBatch (2 batches), ProposalFin (5 + // parts) + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 7, // 2 TransactionBatch parts + true, // expect_transaction_batch + ); + + // Note: Persisted proposal parts contain the original transactions (10), not + // the rolled-back count (7). Rollback happens in the validator's + // execution state at runtime, but the persisted parts reflect what was + // received from the network. The rollback verification (that execution + // state has 7 transactions) is covered in unit tests. Here we verify + // that all original batches are persisted. + // + // This means that if the process crashes and recovers from persisted parts, it + // would restore 10 transactions instead of the rolled-back count of 7. Recovery + // logic should take TransactionsFin into account to ensure the correct + // transaction count is restored after rollback. + verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 10); +} + +/// Empty TransactionBatch execution (non-spec edge case). +/// +/// **Scenario**: A proposal contains an empty TransactionBatch. Per the +/// [Starknet consensus spec](https://raw.githubusercontent.com/starknet-io/starknet-p2p-specs/refs/heads/main/p2p/proto/consensus/consensus.md), +/// if a proposer has no transactions, they should send an empty proposal +/// (skipping BlockInfo, TransactionBatch and TransactionsFin entirely). +/// However, this test covers the case where a non-empty proposal includes +/// an empty TransactionBatch. Such a proposal is invalid per the spec, so +/// we reject it. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch (empty) → +/// TransactionsFin (count=0) → ProposalCommitment → ProposalFin. +/// +/// Verify that the proposal is rejected and no proposal event is sent. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_empty_batch_is_rejected() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let empty_transactions = create_transaction_batch(0, 0, chain_id); + let (proposal_init, block_info) = + create_test_proposal(chain_id, 2, 1, proposer_address, empty_transactions.clone()); + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(empty_transactions), + )) + .expect("Failed to send empty TransactionBatch"); + + // TODO Invalid proposals are a recoverable error, so the task should not exit. + // It should only log the fact, clean any cashes of the invalid proposal and + // alter the score of the peer. See: https://github.com/eqlabs/pathfinder/issues/2975. + let task_result = env.wait_for_task_exit().await.expect("Timed out"); + assert!( + task_result.is_err(), + "Expected task to exit with error on empty TransactionBatch" + ); +} + +/// TransactionsFin indicates more transactions than executed. +/// +/// **Scenario**: We execute 5 transactions, but TransactionsFin indicates +/// 10. This shouldn't happen with proper message ordering, but the code +/// handles it by logging a warning and continuing. +/// +/// **Test**: Send Init → BlockInfo → TransactionBatch (5 txs) → +/// TransactionsFin (count=10) → ProposalCommitment → ProposalFin. Verify +/// processing continues and proposal event is sent (with 5 transactions, +/// not 10). +/// +/// **Note**: We cannot directly verify these things. The goal of this +/// e2e test is to verify that processing continues correctly despite the +/// mismatch. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_transactions_fin_count_exceeds_executed() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions = create_transaction_batch(0, 5, chain_id); + let (proposal_init, block_info) = + create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); + + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions), + )) + .expect("Failed to send TransactionBatch"); + env.verify_task_alive().await; + + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 10, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + tokio::time::sleep(Duration::from_millis(500)).await; + + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after ProposalFin"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 6, + true, // expect_transaction_batch + ); + + // Verify transaction count matches what was actually received (5 transactions). + // Persisted proposal parts should reflect what was received from the network. + verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); +} + +/// TransactionsFin arrives before any TransactionBatch. +/// +/// **Scenario**: TransactionsFin arrives before execution starts (no +/// batches received yet). It should be deferred until execution starts, +/// then processed. +/// +/// **Test**: Send Init → BlockInfo → TransactionsFin → +/// TransactionBatch → ProposalCommitment → ProposalFin. Verify +/// TransactionsFin is deferred, then processed when execution starts, +/// and proposal event is sent. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_transactions_fin_before_any_batch() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + let transactions = create_transaction_batch(0, 5, chain_id); + let (proposal_init, block_info) = + create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); + + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::BlockInfo(block_info), + )) + .expect("Failed to send BlockInfo"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { + executed_transaction_count: 5, + }), + )) + .expect("Failed to send TransactionsFin"); + env.verify_task_alive().await; + + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 4: Send TransactionBatch + // This should trigger execution start and process the deferred TransactionsFin + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::TransactionBatch(transactions), + )) + .expect("Failed to send TransactionBatch"); + env.verify_task_alive().await; + + // Verify: Still no proposal event (TransactionsFin processed, but ProposalFin + // not received) + // Note: We verify that deferred TransactionsFin was processed indirectly by + // sending ProposalFin below and confirming the proposal event is sent + // (which requires TransactionsFin to be processed first). + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + tokio::time::sleep(Duration::from_millis(500)).await; + + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after ProposalFin"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 6, + true, // expect_transaction_batch + ); + + // Verify transaction count matches TransactionsFin count + verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); +} + +/// Empty proposal per spec (no TransactionBatch, no TransactionsFin). +/// +/// **Scenario**: A proposer cannot offer a valid proposal, so the height is +/// agreed to be empty. Per the spec, empty proposals skip +/// TransactionBatch and TransactionsFin entirely. The order is: +/// ProposalInit → ProposalCommitment → ProposalFin. +/// +/// **Test**: Send ProposalInit → ProposalCommitment → ProposalFin (no +/// TransactionBatch, no ProposalCommitment, no TransactionsFin). +/// +/// Verify ProposalFin proceeds immediately (not deferred, since execution +/// never started), proposal event is sent, and all parts are persisted +/// correctly. +#[test_log::test(tokio::test(flavor = "multi_thread"))] +async fn test_empty_proposal_per_spec() { + let chain_id = ChainId::SEPOLIA_TESTNET; + let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); + let mut env = TestEnvironment::new(chain_id, validator_address); + env.create_committed_parent_block(1); + env.wait_for_task_initialization().await; + + let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); + let height_and_round = HeightAndRound::new(2, 1); + + // For empty proposals, we still need BlockInfo to transition to + // TransactionBatch stage, but we don't send any TransactionBatch or + // TransactionsFin + let (proposal_init, _block_info) = + create_test_proposal(chain_id, 2, 1, proposer_address, vec![]); + + // Using a dummy commitment... + let proposal_commitment = ProposalCommitment(Felt::ZERO); + + // Step 1: Send ProposalInit + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Init(proposal_init), + )) + .expect("Failed to send ProposalInit"); + env.verify_task_alive().await; + + // Verify: No proposal event yet (ProposalCommitment and ProposalFin not + // received) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 2: Send ProposalCommitment + // Note: No TransactionBatch or TransactionsFin - this is the key difference + // from normal proposals. Execution never starts. + env.p2p_tx + .send(Event::Proposal( + height_and_round, + create_proposal_commitment_part(2, proposal_commitment), + )) + .expect("Failed to send ProposalCommitment"); + env.verify_task_alive().await; + + // Verify: Still no proposal event (ProposalFin not received) + verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; + + // Step 3: Send ProposalFin + // Since execution never started (no TransactionBatch), ProposalFin should + // proceed immediately without deferral. This is different from first test + // where execution started but TransactionsFin wasn't processed yet. + env.p2p_tx + .send(Event::Proposal( + height_and_round, + ProposalPart::Fin(p2p_proto::consensus::ProposalFin { + proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + }), + )) + .expect("Failed to send ProposalFin"); + env.verify_task_alive().await; + + // Verify: Proposal event should be sent immediately (not deferred) + // This confirms that ProposalFin proceeds when execution never started, + // which is the correct behavior for empty proposals per spec. + let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) + .await + .expect("Expected proposal event after ProposalFin for empty proposal"); + verify_proposal_event(proposal_cmd, 2, proposal_commitment); + + // Verify proposal parts persisted + // Expected: Init, ProposalCommitment, ProposalFin (3 parts) + verify_proposal_parts_persisted( + &env.consensus_storage, + 2, + 1, + &validator_address, + 3, + false, // expect_transaction_batch (empty proposal) + ); +} diff --git a/crates/pathfinder/src/consensus/inner/p2p_task_tests.rs b/crates/pathfinder/src/consensus/inner/p2p_task_tests.rs deleted file mode 100644 index 41a1af053e..0000000000 --- a/crates/pathfinder/src/consensus/inner/p2p_task_tests.rs +++ /dev/null @@ -1,1447 +0,0 @@ -//! End-to-end tests for p2p_task -//! -//! These tests verify the full integration flow of p2p_task, including proposal -//! processing, deferral logic (when TransactionsFin or ProposalFin arrive out -//! of order), rollback scenarios, and database persistence. They test the -//! complete path from receiving P2P events to sending consensus commands. - -#[cfg(all(test, feature = "p2p"))] -mod tests { - use std::sync::{Arc, Mutex}; - use std::time::Duration; - - use p2p::consensus::{Client, Event, HeightAndRound}; - use p2p::libp2p::identity::Keypair; - use p2p_proto::consensus::ProposalPart; - use pathfinder_common::prelude::*; - use pathfinder_common::{ChainId, ContractAddress, ProposalCommitment}; - use pathfinder_consensus::ConsensusCommand; - use pathfinder_crypto::Felt; - use pathfinder_storage::StorageBuilder; - use tokio::sync::mpsc; - - use crate::consensus::inner::persist_proposals::ConsensusProposals; - use crate::consensus::inner::test_helpers::{create_test_proposal, create_transaction_batch}; - use crate::consensus::inner::{p2p_task, ConsensusTaskEvent, ConsensusValue, P2PTaskConfig}; - - /// Helper struct to setup and manage the test environment (databases, - /// channels, mock client) - struct TestEnvironment { - storage: pathfinder_storage::Storage, - consensus_storage: pathfinder_storage::Storage, - p2p_tx: mpsc::UnboundedSender, - rx_from_p2p: mpsc::Receiver, - _tx_to_p2p: mpsc::Sender, /* Keep alive to prevent receiver from being dropped */ - handle: Arc>>>>, - } - - impl TestEnvironment { - fn new(chain_id: ChainId, validator_address: ContractAddress) -> Self { - // Create temp directory for consensus storage - let consensus_storage_dir = - tempfile::tempdir().expect("Failed to create temp directory"); - let consensus_storage_dir = consensus_storage_dir.path().to_path_buf(); - - // Initialize temp pathfinder and consensus databases - let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); - let consensus_storage = - StorageBuilder::in_tempdir().expect("Failed to create consensus temp database"); - - // Initialize consensus storage tables - { - let mut db_conn = consensus_storage.connection().unwrap(); - let db_tx = db_conn.transaction().unwrap(); - db_tx.ensure_consensus_proposals_table_exists().unwrap(); - db_tx - .ensure_consensus_finalized_blocks_table_exists() - .unwrap(); - db_tx.commit().unwrap(); - } - - // Mock channels for p2p communication - let (p2p_tx, p2p_rx) = mpsc::unbounded_channel(); - let (tx_to_consensus, rx_from_p2p) = mpsc::channel(100); - let (tx_to_p2p, rx_from_consensus) = mpsc::channel(100); - - // Create mock Client (used for receiving events in these tests) - let keypair = Keypair::generate_ed25519(); - let (client_sender, _client_receiver) = mpsc::unbounded_channel(); - let peer_id = keypair.public().to_peer_id(); - let p2p_client = Client::from((peer_id, client_sender)); - - let handle = p2p_task::spawn( - chain_id, - P2PTaskConfig { - my_validator_address: validator_address, - history_depth: 10, - }, - p2p_client, - storage.clone(), - p2p_rx, - tx_to_consensus, - rx_from_consensus, - consensus_storage.clone(), - &consensus_storage_dir, - None, - ); - - Self { - storage, - consensus_storage, - p2p_tx, - rx_from_p2p, - _tx_to_p2p: tx_to_p2p, - handle: Arc::new(Mutex::new(Some(handle))), - } - } - - fn create_committed_parent_block(&self, parent_height: u64) { - let mut db_conn = self.storage.connection().unwrap(); - let db_tx = db_conn.transaction().unwrap(); - let parent_header = BlockHeader::builder() - .number(BlockNumber::new_or_panic(parent_height)) - .timestamp(BlockTimestamp::new_or_panic(1000)) - .calculated_state_commitment( - StorageCommitment(Felt::ZERO), - ClassCommitment(Felt::ZERO), - ) - .sequencer_address(SequencerAddress::ZERO) - .finalize_with_hash(BlockHash(Felt::ZERO)); - db_tx.insert_block_header(&parent_header).unwrap(); - db_tx.commit().unwrap(); - } - - async fn wait_for_task_initialization(&self) { - tokio::time::sleep(Duration::from_millis(100)).await; - } - - async fn verify_task_alive(&self) { - let handle_opt = { - let handle_guard = self.handle.lock().unwrap(); - handle_guard.as_ref().map(|h| h.is_finished()) - }; - - if let Some(true) = handle_opt { - // Handle is finished, take it out and await to get the error - let handle = { - let mut handle_guard = self.handle.lock().unwrap(); - handle_guard.take().expect("Handle should exist") - }; - - match handle.await { - Ok(Ok(())) => { - panic!("Task finished successfully (unexpected - should still be running)"); - } - Ok(Err(e)) => { - panic!("Task finished with error: {e:#}"); - } - Err(e) => { - panic!("Task panicked: {e:?}"); - } - } - } - } - } - - /// Helper: Wait for a proposal event from consensus - async fn wait_for_proposal_event( - rx: &mut mpsc::Receiver, - timeout_duration: Duration, - ) -> Option> { - let start = std::time::Instant::now(); - while start.elapsed() < timeout_duration { - // First try non-blocking recv - match rx.try_recv() { - Ok(ConsensusTaskEvent::CommandFromP2P(ConsensusCommand::Proposal(proposal))) => { - return Some(ConsensusCommand::Proposal(proposal)) - } - Ok(_) => { - // Other event, continue waiting - continue; - } - Err(mpsc::error::TryRecvError::Empty) => { - // No event yet, wait a bit - tokio::time::sleep(Duration::from_millis(50)).await; - continue; - } - Err(mpsc::error::TryRecvError::Disconnected) => { - // Channel closed - return None; - } - } - } - None - } - - /// Helper: Verify no proposal event was received - async fn verify_no_proposal_event( - rx: &mut mpsc::Receiver, - duration: Duration, - ) { - let start = std::time::Instant::now(); - while start.elapsed() < duration { - match rx.try_recv() { - Ok(ConsensusTaskEvent::CommandFromP2P(ConsensusCommand::Proposal(_))) => { - panic!("Unexpected proposal event received"); - } - Ok(_) => { - // Other event, continue checking - continue; - } - Err(mpsc::error::TryRecvError::Empty) => { - // No event, wait a bit - tokio::time::sleep(Duration::from_millis(50)).await; - continue; - } - Err(mpsc::error::TryRecvError::Disconnected) => { - // Channel closed, that's fine - return; - } - } - } - } - - /// Helper: Verify proposal event matches expected values - fn verify_proposal_event( - proposal_cmd: ConsensusCommand, - expected_height: u64, - expected_commitment: ProposalCommitment, - ) { - match proposal_cmd { - ConsensusCommand::Proposal(signed_proposal) => { - assert_eq!( - signed_proposal.proposal.height, expected_height, - "Proposal height should match" - ); - assert_eq!( - signed_proposal.proposal.value.0, expected_commitment, - "Proposal commitment should match" - ); - } - _ => panic!("Expected Proposal command"), - } - } - - /// Helper: Verify proposal parts are persisted in database - /// - /// Verifies that the expected part types are present: - /// - Init (required) - /// - BlockInfo (required) - /// - Fin (required) - /// - TransactionBatch (optional, if `expect_transaction_batch` is true) - /// - /// Also verifies the total count matches `expected_count`. - fn verify_proposal_parts_persisted( - consensus_storage: &pathfinder_storage::Storage, - height: u64, - round: u32, - validator_address: &ContractAddress, // Query with validator address (receiver) - expected_count: usize, - expect_transaction_batch: bool, - ) { - let mut db_conn = consensus_storage.connection().unwrap(); - let db_tx = db_conn.transaction().unwrap(); - let proposals_db = ConsensusProposals::new(&db_tx); - // seems like foreign_parts queries by validator_address to get - // proposals from foreign validators (proposals where proposer != - // validator) - let parts = proposals_db - .foreign_parts(height, round, validator_address) - .unwrap() - .unwrap_or_default(); - - // Part types for error message - let part_types: Vec = parts - .iter() - .map(|part| format!("{:?}", std::mem::discriminant(part))) - .collect(); - - // ----- Debug output for proposal parts ----- - #[cfg(debug_assertions)] - { - eprintln!( - "Found {} proposal parts in database for height {} round {} (querying with \ - validator {})", - parts.len(), - height, - round, - validator_address.0 - ); - for (i, part) in parts.iter().enumerate() { - eprintln!(" Part {}: {:?}", i, std::mem::discriminant(part)); - } - } - // ----- End debug output for proposal parts ----- - - // Verify required parts are present - use p2p_proto::consensus::ProposalPart as P2PProposalPart; - let has_init = parts.iter().any(|p| matches!(p, P2PProposalPart::Init(_))); - let has_block_info = parts - .iter() - .any(|p| matches!(p, P2PProposalPart::BlockInfo(_))); - let has_fin = parts.iter().any(|p| matches!(p, P2PProposalPart::Fin(_))); - let has_transaction_batch = parts - .iter() - .any(|p| matches!(p, P2PProposalPart::TransactionBatch(_))); - - assert!( - has_init, - "Expected Init part to be persisted. Persisted parts: [{}]", - part_types.join(", ") - ); - assert!( - has_block_info, - "Expected BlockInfo part to be persisted. Persisted parts: [{}]", - part_types.join(", ") - ); - assert!( - has_fin, - "Expected Fin part to be persisted. Persisted parts: [{}]", - part_types.join(", ") - ); - if expect_transaction_batch { - assert!( - has_transaction_batch, - "Expected TransactionBatch part to be persisted. Persisted parts: [{}]", - part_types.join(", ") - ); - } - - // Verify total count - assert_eq!( - parts.len(), - expected_count, - "Expected {} proposal parts, got {}. Persisted parts: [{}]", - expected_count, - parts.len(), - part_types.join(", ") - ); - } - - /// Helper: Verify transaction count from persisted proposal parts - fn verify_transaction_count( - consensus_storage: &pathfinder_storage::Storage, - height: u64, - round: u32, - validator_address: &ContractAddress, - expected_count: usize, - ) { - let mut db_conn = consensus_storage.connection().unwrap(); - let db_tx = db_conn.transaction().unwrap(); - let proposals = ConsensusProposals::new(&db_tx); - let parts = proposals - .foreign_parts(height, round, validator_address) - .unwrap() - .unwrap_or_default(); - - // Count transactions from all TransactionBatch parts - let mut total_transactions = 0; - for part in &parts { - if let ProposalPart::TransactionBatch(transactions) = part { - total_transactions += transactions.len(); - } - } - - assert_eq!( - total_transactions, expected_count, - "Expected {expected_count} transactions in persisted proposal parts, found \ - {total_transactions}", - ); - } - - /// Helper: Create a ProposalCommitment message - fn create_proposal_commitment_part( - height: u64, - proposal_commitment: ProposalCommitment, - ) -> ProposalPart { - let zero_hash = p2p_proto::common::Hash(Felt::ZERO); - let zero_address = p2p_proto::common::Address(Felt::ZERO); - ProposalPart::ProposalCommitment(p2p_proto::consensus::ProposalCommitment { - block_number: height, - parent_commitment: zero_hash, - builder: zero_address, - timestamp: 1000, - protocol_version: "0.0.0".to_string(), - old_state_root: zero_hash, - version_constant_commitment: zero_hash, - state_diff_commitment: p2p_proto::common::Hash(proposal_commitment.0), /* Use real commitment */ - transaction_commitment: zero_hash, - event_commitment: zero_hash, - receipt_commitment: zero_hash, - concatenated_counts: Felt::ZERO, - l1_gas_price_fri: 0, - l1_data_gas_price_fri: 0, - l2_gas_price_fri: 0, - l2_gas_used: 0, - next_l2_gas_price_fri: 0, - l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, - }) - } - - /// ProposalFin deferred until TransactionsFin is processed. - /// - /// **Scenario**: ProposalFin arrives before TransactionsFin. Execution has - /// started (TransactionBatch received), so ProposalFin must be deferred - /// until TransactionsFin arrives and is processed, then finalization - /// can proceed. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch → - /// ProposalCommitment → ProposalFin → TransactionsFin. - /// - /// Verify ProposalFin is deferred (no proposal event), then verify - /// finalization occurs after TransactionsFin arrives. Also verify - /// ProposalFin is persisted in the database even when deferred. - #[tokio::test(flavor = "multi_thread")] - async fn test_proposal_fin_deferred_until_transactions_fin_processed() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions = create_transaction_batch(0, 5, chain_id); - let (proposal_init, block_info) = - create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); - - // Focus is on batch execution and deferral logic, not commitment validation. - // Using a dummy commitment... - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - // Step 1: Send ProposalInit - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch (execution should start) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions), - )) - .expect("Failed to send TransactionBatch"); - env.verify_task_alive().await; - - // Verify: No proposal event yet (execution started, but not finalized) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send ProposalCommitment - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - // Step 5: Send ProposalFin BEFORE TransactionsFin - // This should be DEFERRED because TransactionsFin hasn't been processed - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (ProposalFin was deferred) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Check what's in the database right after ProposalFin (before TransactionsFin) - #[cfg(debug_assertions)] - { - let mut db_conn = env.consensus_storage.connection().unwrap(); - let db_tx = db_conn.transaction().unwrap(); - let proposals = ConsensusProposals::new(&db_tx); - let parts_after_proposal_fin = proposals - .foreign_parts(2, 1, &validator_address) - .unwrap() - .unwrap_or_default(); - eprintln!( - "Parts in database after ProposalFin (before TransactionsFin): {}", - parts_after_proposal_fin.len() - ); - for (i, part) in parts_after_proposal_fin.iter().enumerate() { - eprintln!(" Part {}: {:?}", i, std::mem::discriminant(part)); - } - } - - // Step 6: Send TransactionsFin - // This should trigger finalization of the deferred ProposalFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 5, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - // Verify: Proposal event should be sent now - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(3)) - .await - .expect("Expected proposal event after TransactionsFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - // Verify proposal parts persisted - // Query with validator_address (receiver) to get foreign proposals - // Expected: Init, BlockInfo, TransactionBatch, ProposalFin (4 parts) - // Note: ProposalCommitment is not persisted as a proposal part (it's validator - // state only) - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 4, - true, // expect_transaction_batch - ); - - // Verify transaction count matches TransactionsFin count - verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); - } - - /// Full proposal flow in normal order. - /// - /// **Scenario**: Complete proposal flow with all parts arriving in the - /// expected order. TransactionsFin arrives before ProposalFin, so no - /// deferral is needed. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch → - /// TransactionsFin → ProposalCommitment → ProposalFin. - /// - /// Verify proposal event is sent immediately after ProposalFin (no - /// deferral), and verify all parts are persisted correctly. - #[tokio::test(flavor = "multi_thread")] - async fn test_full_proposal_flow_normal_order() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions = create_transaction_batch(0, 5, chain_id); - let (proposal_init, block_info) = - create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); - - // Focus is on batch execution and deferral logic, not commitment validation. - // Using a dummy commitment... - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - // Step 1: Send ProposalInit - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions), - )) - .expect("Failed to send TransactionBatch"); - env.verify_task_alive().await; - - // Verify: No proposal event yet (execution started, but TransactionsFin not - // processed) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send TransactionsFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 5, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (TransactionsFin processed, but ProposalFin - // not received) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 5: Send ProposalCommitment - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - // Step 6: Send ProposalFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - tokio::time::sleep(Duration::from_millis(500)).await; - - // Verify: Proposal event should be sent immediately (both conditions met) - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - // Verify proposal parts persisted - // Query with validator_address (receiver) to get foreign proposals - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 4, - true, // expect_transaction_batch - ); - - // Verify transaction count matches TransactionsFin count - verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); - } - - /// TransactionsFin deferred when execution not started. - /// - /// **Scenario**: Parent block is not committed initially, so - /// TransactionBatch and TransactionsFin are both deferred. After parent - /// is committed, execution starts and deferred messages are processed. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch → - /// TransactionsFin (without committing parent). - /// - /// Verify no execution occurs. Then commit parent block and send another - /// TransactionBatch. Verify deferred TransactionsFin is processed when - /// execution starts. - #[tokio::test(flavor = "multi_thread")] - async fn test_transactions_fin_deferred_when_execution_not_started() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - // Parent block NOT committed initially - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions_batch1 = create_transaction_batch(0, 3, chain_id); - let transactions_batch2 = create_transaction_batch(3, 2, chain_id); // Total: 5 - let (proposal_init, block_info) = create_test_proposal( - chain_id, - 2, - 1, - proposer_address, - transactions_batch1.clone(), - ); - - // Step 1: Send ProposalInit - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send first TransactionBatch (should be deferred - parent not - // committed) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch1), - )) - .expect("Failed to send first TransactionBatch"); - env.verify_task_alive().await; - - // Verify: No proposal event (execution deferred) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send TransactionsFin (should be deferred - execution not started) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 5, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (TransactionsFin deferred) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 5: Now we commit the parent block - env.create_committed_parent_block(1); - tokio::time::sleep(Duration::from_millis(100)).await; - - // Step 6: Send another TransactionBatch - // This should trigger execution of deferred batches + process deferred - // TransactionsFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch2), - )) - .expect("Failed to send second TransactionBatch"); - env.verify_task_alive().await; - - // At this point, execution should have started and TransactionsFin should be - // processed... - - // To verify this, we send ProposalCommitment and ProposalFin, then verify that - // a proposal event is sent (which confirms TransactionsFin was processed). - - // Once again, using a dummy commitment... - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - // Step 7: Send ProposalCommitment - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - // Step 8: Send ProposalFin - // This should trigger finalization since TransactionsFin was processed - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - env.verify_task_alive().await; - - // Verify: Proposal event should be sent (confirms TransactionsFin was - // processed) - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after deferred TransactionsFin was processed"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - // Verify proposal parts persisted - // Expected: Init, BlockInfo, TransactionBatch (2 batches), ProposalFin (5 - // parts) - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 5, // 2 TransactionBatch parts - true, // expect_transaction_batch - ); - } - - /// Multiple TransactionBatch messages are executed correctly. - /// - /// **Scenario**: A proposal contains multiple TransactionBatch messages - /// that must all be executed in order. All batches should be executed - /// before TransactionsFin is processed. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch 1 → - /// TransactionBatch 2 → TransactionBatch 3 → TransactionsFin → - /// ProposalCommitment → ProposalFin. - /// - /// Verify proposal event is sent after ProposalFin, and verify all batches - /// are persisted (combined into a single TransactionBatch part in the - /// database). - #[tokio::test(flavor = "multi_thread")] - async fn test_multiple_batches_execution() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions_batch1 = create_transaction_batch(0, 2, chain_id); - let transactions_batch2 = create_transaction_batch(2, 3, chain_id); - let transactions_batch3 = create_transaction_batch(5, 2, chain_id); // Total: 7 - let (proposal_init, block_info) = create_test_proposal( - chain_id, - 2, - 1, - proposer_address, - transactions_batch1.clone(), - ); - - // Focus is on batch execution and deferral logic, not commitment validation. - // Using a dummy commitment... - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - // Step 1: Send ProposalInit - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send multiple TransactionBatches - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch1), - )) - .expect("Failed to send TransactionBatch1"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch2), - )) - .expect("Failed to send TransactionBatch2"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch3), - )) - .expect("Failed to send TransactionBatch3"); - env.verify_task_alive().await; - - // Step 4: Send TransactionsFin (total count = 7) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 7, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - // Step 5: Send ProposalCommitment - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - // Step 6: Send ProposalFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - tokio::time::sleep(Duration::from_millis(500)).await; - - // Verify: Proposal event should be sent - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - // Verify all batches persisted - // Query with validator_address (receiver) to get foreign proposals - // Expected: Init, BlockInfo, TransactionBatch (3 batches), ProposalFin (6 - // parts) - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 6, // 3 TransactionBatch parts - true, // expect_transaction_batch - ); - - // Verify transaction count matches TransactionsFin count - // Multiple batches are persisted as separate TransactionBatch parts, so we - // count the transactions from all persisted parts - verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 7); - } - - /// TransactionsFin triggers rollback when count is less than executed. - /// - /// **Scenario**: We execute 10 transactions (2 batches of 5), but - /// TransactionsFin indicates only 7 transactions were executed by the - /// proposer. The validator must rollback from 10 to 7 transactions to - /// match the proposer's state. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch1 (5 txs) → - /// TransactionBatch2 (5 txs) → TransactionsFin (count=7) → - /// ProposalCommitment → ProposalFin. - /// - /// Verify proposal event is sent successfully after rollback, confirming - /// the rollback mechanism works correctly. - #[tokio::test(flavor = "multi_thread")] - async fn test_transactions_fin_rollback() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions_batch1 = create_transaction_batch(0, 5, chain_id); - let transactions_batch2 = create_transaction_batch(5, 5, chain_id); // Total: 10 - let (proposal_init, block_info) = create_test_proposal( - chain_id, - 2, - 1, - proposer_address, - transactions_batch1.clone(), - ); - - // Focus is on batch execution and deferral logic, not commitment validation. - // Using a dummy commitment... - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - // Step 1: Send ProposalInit - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch 1 (5 transactions) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch1), - )) - .expect("Failed to send TransactionBatch1"); - env.verify_task_alive().await; - - // Step 4: Send TransactionBatch 2 (5 more transactions, total = 10) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions_batch2), - )) - .expect("Failed to send TransactionBatch2"); - env.verify_task_alive().await; - - // Step 5: Send TransactionsFin with count=7 (should trigger rollback from 10 to - // 7) - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 7, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - // Step 6: Send ProposalCommitment - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - // Step 7: Send ProposalFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - tokio::time::sleep(Duration::from_millis(500)).await; - - // Verify: Proposal event should be sent (rollback completed successfully) - // NOTE: We verify that a proposal event is sent, which indicates rollback - // completed. However, we cannot directly verify the transaction count - // in e2e tests because the validator is internal to p2p_task. The - // rollback logic itself is verified in unit tests (batch_execution. - // rs::test_transactions_fin_rollback). This e2e test verifies - // that rollback doesn't break the proposal flow end-to-end. - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - // Verify proposal parts persisted - // Query with validator_address (receiver) to get foreign proposals - // Expected: Init, BlockInfo, TransactionBatch (2 batches), ProposalFin (5 - // parts) - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 5, // 2 TransactionBatch parts - true, // expect_transaction_batch - ); - - // Note: Persisted proposal parts contain the original transactions (10), not - // the rolled-back count (7). Rollback happens in the validator's - // execution state at runtime, but the persisted parts reflect what was - // received from the network. The rollback verification (that execution - // state has 7 transactions) is covered in unit tests. Here we verify - // that all original batches are persisted. - // - // This means that if the process crashes and recovers from persisted parts, it - // would restore 10 transactions instead of the rolled-back count of 7. Recovery - // logic should take TransactionsFin into account to ensure the correct - // transaction count is restored after rollback. - verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 10); - } - - /// Empty TransactionBatch execution (non-spec edge case). - /// - /// **Scenario**: A proposal contains an empty TransactionBatch. Per the - /// [Starknet consensus spec](https://raw.githubusercontent.com/starknet-io/starknet-p2p-specs/refs/heads/main/p2p/proto/consensus/consensus.md), - /// if a proposer has no transactions, they should send an empty proposal - /// (skipping TransactionBatch and TransactionsFin entirely). However, this - /// test covers the defensive case where a non-empty proposal includes an - /// empty TransactionBatch. Execution should still be marked as started, and - /// TransactionsFin with count=0 should be processable. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch (empty) → - /// TransactionsFin (count=0) → ProposalCommitment → ProposalFin. - /// - /// Verify execution is marked as started and TransactionsFin is processed. - #[tokio::test(flavor = "multi_thread")] - async fn test_empty_batch_execution() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let empty_transactions = create_transaction_batch(0, 0, chain_id); - let (proposal_init, block_info) = - create_test_proposal(chain_id, 2, 1, proposer_address, empty_transactions.clone()); - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(empty_transactions), - )) - .expect("Failed to send empty TransactionBatch"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 0, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Empty batches don't initialize the executor, but finalization now - // handles this case (see test_empty_proposal_finalization in - // validator.rs). This test focuses on batch execution and - // TransactionsFin processing not finalization (which we can't - // verify here). - } - - /// TransactionsFin indicates more transactions than executed. - /// - /// **Scenario**: We execute 5 transactions, but TransactionsFin indicates - /// 10. This shouldn't happen with proper message ordering, but the code - /// handles it by logging a warning and continuing. - /// - /// **Test**: Send Init → BlockInfo → TransactionBatch (5 txs) → - /// TransactionsFin (count=10) → ProposalCommitment → ProposalFin. Verify - /// processing continues and proposal event is sent (with 5 transactions, - /// not 10). - /// - /// **Note**: We cannot directly verify these things. The goal of this - /// e2e test is to verify that processing continues correctly despite the - /// mismatch. - #[tokio::test(flavor = "multi_thread")] - async fn test_transactions_fin_count_exceeds_executed() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions = create_transaction_batch(0, 5, chain_id); - let (proposal_init, block_info) = - create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); - - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions), - )) - .expect("Failed to send TransactionBatch"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 10, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - tokio::time::sleep(Duration::from_millis(500)).await; - - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 4, - true, // expect_transaction_batch - ); - - // Verify transaction count matches what was actually received (5 transactions). - // Persisted proposal parts should reflect what was received from the network. - verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); - } - - /// TransactionsFin arrives before any TransactionBatch. - /// - /// **Scenario**: TransactionsFin arrives before execution starts (no - /// batches received yet). It should be deferred until execution starts, - /// then processed. - /// - /// **Test**: Send Init → BlockInfo → TransactionsFin → - /// TransactionBatch → ProposalCommitment → ProposalFin. Verify - /// TransactionsFin is deferred, then processed when execution starts, - /// and proposal event is sent. - #[tokio::test(flavor = "multi_thread")] - async fn test_transactions_fin_before_any_batch() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions = create_transaction_batch(0, 5, chain_id); - let (proposal_init, block_info) = - create_test_proposal(chain_id, 2, 1, proposer_address, transactions.clone()); - - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionsFin(p2p_proto::consensus::TransactionsFin { - executed_transaction_count: 5, - }), - )) - .expect("Failed to send TransactionsFin"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send TransactionBatch - // This should trigger execution start and process the deferred TransactionsFin - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions), - )) - .expect("Failed to send TransactionBatch"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (TransactionsFin processed, but ProposalFin - // not received) - // Note: We verify that deferred TransactionsFin was processed indirectly by - // sending ProposalFin below and confirming the proposal event is sent - // (which requires TransactionsFin to be processed first). - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - tokio::time::sleep(Duration::from_millis(500)).await; - - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 4, - true, // expect_transaction_batch - ); - - // Verify transaction count matches TransactionsFin count - verify_transaction_count(&env.consensus_storage, 2, 1, &validator_address, 5); - } - - /// Empty proposal per spec (no TransactionBatch, no TransactionsFin). - /// - /// **Scenario**: A proposer cannot offer a valid proposal, so the height is - /// agreed to be empty. Per the spec, empty proposals skip - /// TransactionBatch and TransactionsFin entirely. The order is: - /// ProposalInit → ProposalCommitment → ProposalFin. - /// - /// **Test**: Send ProposalInit → BlockInfo → ProposalCommitment → - /// ProposalFin (no TransactionBatch, no TransactionsFin). - /// - /// Verify ProposalFin proceeds immediately (not deferred, since execution - /// never started), proposal event is sent, and all parts are persisted - /// correctly. - #[tokio::test(flavor = "multi_thread")] - async fn test_empty_proposal_per_spec() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_parent_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - - // For empty proposals, we still need BlockInfo to transition to - // TransactionBatch stage, but we don't send any TransactionBatch or - // TransactionsFin - let (proposal_init, block_info) = - create_test_proposal(chain_id, 2, 1, proposer_address, vec![]); - - // Using a dummy commitment... - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - // Step 1: Send ProposalInit - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Init(proposal_init), - )) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::BlockInfo(block_info), - )) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Verify: No proposal event yet (ProposalCommitment and ProposalFin not - // received) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 3: Send ProposalCommitment - // Note: No TransactionBatch or TransactionsFin - this is the key difference - // from normal proposals. Execution never starts. - env.p2p_tx - .send(Event::Proposal( - height_and_round, - create_proposal_commitment_part(2, proposal_commitment), - )) - .expect("Failed to send ProposalCommitment"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (ProposalFin not received) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send ProposalFin - // Since execution never started (no TransactionBatch), ProposalFin should - // proceed immediately without deferral. This is different from first test - // where execution started but TransactionsFin wasn't processed yet. - env.p2p_tx - .send(Event::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - )) - .expect("Failed to send ProposalFin"); - env.verify_task_alive().await; - - // Verify: Proposal event should be sent immediately (not deferred) - // This confirms that ProposalFin proceeds when execution never started, - // which is the correct behavior for empty proposals per spec. - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin for empty proposal"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - - // Verify proposal parts persisted - // Expected: Init, BlockInfo, ProposalFin (3 parts) - verify_proposal_parts_persisted( - &env.consensus_storage, - 2, - 1, - &validator_address, - 3, - false, // expect_transaction_batch (empty proposal) - ); - } -} diff --git a/crates/pathfinder/src/consensus/inner/persist_proposals.rs b/crates/pathfinder/src/consensus/inner/persist_proposals.rs index 284bf8e651..bc093a4717 100644 --- a/crates/pathfinder/src/consensus/inner/persist_proposals.rs +++ b/crates/pathfinder/src/consensus/inner/persist_proposals.rs @@ -95,6 +95,23 @@ impl<'tx> ConsensusProposals<'tx> { } } + /// Retrieve the last proposal parts for all available heights from other + /// validators. Returns the heights, the last round numbers and the proposal + /// parts. + pub fn all_last_parts( + &self, + validator: &ContractAddress, + ) -> anyhow::Result)>> { + let mut results = Vec::new(); + for (height, round, buf) in self.tx.all_last_consensus_proposal_parts(validator)? { + let parts = Self::decode_proposal_parts(&buf[..])?; + let last_round = round.try_into().context("Round exceeds u32::MAX")?; + let height = height.try_into().context("Invalid height")?; + results.push((height, last_round, parts)); + } + Ok(results) + } + /// Remove proposal parts for a given height and optionally a specific /// round. If `round` is `None`, all rounds for that height are removed. pub fn remove_parts(&self, height: u64, round: Option) -> anyhow::Result<()> { diff --git a/crates/pathfinder/src/consensus/inner/test_helpers.rs b/crates/pathfinder/src/consensus/inner/test_helpers.rs index c88b7fe093..03cce5fe6e 100644 --- a/crates/pathfinder/src/consensus/inner/test_helpers.rs +++ b/crates/pathfinder/src/consensus/inner/test_helpers.rs @@ -80,7 +80,7 @@ pub fn create_test_proposal( block_number: height, timestamp, builder: proposer_address, - l1_da_mode: L1DataAvailabilityMode::Calldata, + l1_da_mode: L1DataAvailabilityMode::default(), l2_gas_price_fri: 1, l1_gas_price_wei: 1_000_000_000, l1_data_gas_price_wei: 1, diff --git a/crates/pathfinder/src/validator.rs b/crates/pathfinder/src/validator.rs index 314f818ab3..ba1e2d36ec 100644 --- a/crates/pathfinder/src/validator.rs +++ b/crates/pathfinder/src/validator.rs @@ -5,6 +5,7 @@ use std::time::Instant; use anyhow::Context; use p2p::sync::client::conv::TryFromDto; use p2p_proto::class::Cairo1Class; +use p2p_proto::common::Hash; use p2p_proto::consensus::{BlockInfo, ProposalInit, TransactionVariant as ConsensusVariant}; use p2p_proto::sync::transaction::{DeclareV3WithoutClass, TransactionVariant as SyncVariant}; use p2p_proto::transaction::DeclareV3WithClass; @@ -22,6 +23,7 @@ use pathfinder_common::{ ChainId, EntryPoint, EventCommitment, + GasPrice, L1DataAvailabilityMode, ProposalCommitment, ReceiptCommitment, @@ -33,7 +35,7 @@ use pathfinder_common::{ TransactionHash, }; use pathfinder_executor::types::{to_starknet_api_transaction, BlockInfoPriceConverter}; -use pathfinder_executor::{BlockExecutor, ClassInfo, IntoStarkFelt}; +use pathfinder_executor::{BlockExecutorExt, ClassInfo, IntoStarkFelt}; use pathfinder_merkle_tree::starknet_state::update_starknet_state; use pathfinder_rpc::context::{ETH_FEE_TOKEN_ADDRESS, STRK_FEE_TOKEN_ADDRESS}; use pathfinder_storage::{Storage, Transaction as DbTransaction}; @@ -75,11 +77,11 @@ impl ValidatorBlockInfoStage { }) } - pub fn validate_consensus_block_info( + pub fn validate_consensus_block_info( self, block_info: BlockInfo, storage: Storage, - ) -> anyhow::Result { + ) -> anyhow::Result> { let _span = tracing::debug_span!( "Validator::validate_block_info", height = %block_info.block_number, @@ -148,10 +150,118 @@ impl ValidatorBlockInfoStage { storage, }) } + + pub fn verify_proposal_commitment( + self, + proposal_commitment: &p2p_proto::consensus::ProposalCommitment, + ) -> anyhow::Result { + if proposal_commitment.state_diff_commitment != Hash::ZERO { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero state_diff_commitment, got: {}", + proposal_commitment.state_diff_commitment + )); + } + + if proposal_commitment.transaction_commitment != Hash::ZERO { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero transaction_commitment, got: {}", + proposal_commitment.transaction_commitment + )); + } + + if proposal_commitment.event_commitment != Hash::ZERO { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero event_commitment, got: {}", + proposal_commitment.event_commitment + )); + } + + if proposal_commitment.receipt_commitment != Hash::ZERO { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero receipt_commitment, got: {}", + proposal_commitment.receipt_commitment + )); + } + + if proposal_commitment.l1_gas_price_fri != 0 { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero l1_gas_price_fri, got: {}", + proposal_commitment.l1_gas_price_fri + )); + } + + if proposal_commitment.l1_data_gas_price_fri != 0 { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero l1_data_gas_price_fri, got: {}", + proposal_commitment.l1_data_gas_price_fri + )); + } + + if proposal_commitment.l2_gas_price_fri != 0 { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero l2_gas_price_fri, got: {}", + proposal_commitment.l2_gas_price_fri + )); + } + + if proposal_commitment.l2_gas_used != 0 { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have zero l2_gas_used, got: {}", + proposal_commitment.l2_gas_used + )); + } + + if proposal_commitment.l1_da_mode != p2p_proto::common::L1DataAvailabilityMode::Calldata { + return Err(anyhow::anyhow!( + "Empty proposal commitment should have Calldata l1_da_mode, got: {:?}", + proposal_commitment.l1_da_mode + )); + } + + // TODO check parent_commitment + // TODO check builder + // TODO check old_state_root + // TODO check version_constant_commitment + // TODO check concatenated_counts + // TODO check next_l2_gas_price_fri vs prev block + + let expected_block_header = BlockHeader { + hash: BlockHash::ZERO, // UNUSED + parent_hash: BlockHash::ZERO, // UNUSED + number: BlockNumber::new(proposal_commitment.block_number) + .context("ProposalCommitment block number exceeds i64::MAX")?, + timestamp: BlockTimestamp::new(proposal_commitment.timestamp) + .context("ProposalCommitment timestamp exceeds i64::MAX")?, + eth_l1_gas_price: GasPrice::ZERO, + strk_l1_gas_price: GasPrice::ZERO, + eth_l1_data_gas_price: GasPrice::ZERO, + strk_l1_data_gas_price: GasPrice::ZERO, + eth_l2_gas_price: GasPrice::ZERO, + strk_l2_gas_price: GasPrice::ZERO, + sequencer_address: SequencerAddress(proposal_commitment.builder.0), + starknet_version: StarknetVersion::from_str(&proposal_commitment.protocol_version)?, + event_commitment: EventCommitment::ZERO, + state_commitment: StateCommitment::ZERO, // UNUSED + transaction_commitment: TransactionCommitment::ZERO, + transaction_count: 0, + event_count: 0, + l1_da_mode: L1DataAvailabilityMode::Calldata, + receipt_commitment: ReceiptCommitment::ZERO, + state_diff_commitment: StateDiffCommitment::ZERO, + state_diff_length: 0, + }; + Ok(ValidatorFinalizeStage { + header: expected_block_header, + state_update: StateUpdateData::default(), + transactions: Vec::new(), + receipts: Vec::new(), + events: Vec::new(), + }) + } } /// Executes transactions and manages the block execution state. -pub struct ValidatorTransactionBatchStage { +pub struct ValidatorTransactionBatchStage { chain_id: ChainId, block_info: pathfinder_executor::types::BlockInfo, expected_block_header: Option, @@ -159,7 +269,7 @@ pub struct ValidatorTransactionBatchStage { receipts: Vec, events: Vec>, /// Single executor for all batches (optimized from multiple executors) - executor: Option, + executor: Option, /// Cumulative state updates after each batch (for rollback reconstruction) cumulative_state_updates: Vec, /// Size of each batch (for proper rollback calculations) @@ -170,7 +280,7 @@ pub struct ValidatorTransactionBatchStage { storage: Storage, } -impl ValidatorTransactionBatchStage { +impl ValidatorTransactionBatchStage { /// Create a new ValidatorTransactionBatchStage pub fn new( chain_id: ChainId, @@ -207,7 +317,7 @@ impl ValidatorTransactionBatchStage { fn reconstruct_executor_from_state_update( &self, state_update_data: &StateUpdateData, - ) -> anyhow::Result { + ) -> anyhow::Result { // Convert StateUpdateData to StateUpdate let state_update = StateUpdate { block_hash: pathfinder_common::BlockHash::ZERO, @@ -221,7 +331,7 @@ impl ValidatorTransactionBatchStage { }; // Create BlockExecutor from the StateUpdate - BlockExecutor::new_with_pending_state( + E::new_with_pending_state( self.chain_id, self.block_info, ETH_FEE_TOKEN_ADDRESS, @@ -236,7 +346,7 @@ impl ValidatorTransactionBatchStage { /// Execute a batch of transactions using a single executor and extract /// state diffs - pub fn execute_batch( + pub fn execute_batch( &mut self, transactions: Vec, ) -> anyhow::Result<()> { @@ -256,7 +366,7 @@ impl ValidatorTransactionBatchStage { // Convert transactions to executor format let txns = transactions .iter() - .map(|t| try_map_transaction(t.clone())) + .map(|t| T::try_map_transaction(t.clone())) .collect::>>()?; let (common_txns, executor_txns): (Vec<_>, Vec<_>) = txns.into_iter().unzip(); @@ -264,7 +374,7 @@ impl ValidatorTransactionBatchStage { let txn_hashes = common_txns .par_iter() .map(|t| { - if t.verify_hash(self.chain_id) { + if T::verify_hash(t, self.chain_id) { Ok(t.hash) } else { Err(anyhow::anyhow!( @@ -279,7 +389,7 @@ impl ValidatorTransactionBatchStage { // Initialize executor on first batch, or use existing executor if self.executor.is_none() { // First batch - start from initial state - self.executor = Some(BlockExecutor::new( + self.executor = Some(E::new( self.chain_id, self.block_info, ETH_FEE_TOKEN_ADDRESS, @@ -394,7 +504,7 @@ impl ValidatorTransactionBatchStage { } /// Rollback to a specific transaction count - pub fn rollback_to_transaction( + pub fn rollback_to_transaction( &mut self, target_transaction_count: usize, ) -> anyhow::Result<()> { @@ -425,7 +535,7 @@ impl ValidatorTransactionBatchStage { // Execute the partial batch let partial_transactions = &original_p2p_transactions[..transactions_in_target_batch + 1]; - self.execute_batch(partial_transactions.to_vec())?; + self.execute_batch::(partial_transactions.to_vec())?; } else { // Store the original p2p transactions before rollback let original_p2p_transactions = self.batch_p2p_transactions[target_batch].clone(); @@ -436,7 +546,7 @@ impl ValidatorTransactionBatchStage { // Execute the partial batch that's left let partial_transactions = &original_p2p_transactions[..transactions_in_target_batch + 1]; - self.execute_batch(partial_transactions.to_vec())?; + self.execute_batch::(partial_transactions.to_vec())?; } Ok(()) @@ -458,6 +568,7 @@ impl ValidatorTransactionBatchStage { )) } + #[cfg(test)] /// Finalize with the current state (up to the last executed transaction) pub fn finalize(&mut self) -> anyhow::Result> { if self.executor.is_none() { @@ -543,9 +654,12 @@ impl ValidatorTransactionBatchStage { Ok(()) } + // TODO we should probably introduce another stage because we expect exactly one + // proposal commitment per proposal and the API allows calling this multiple + // times pub fn record_proposal_commitment( &mut self, - proposal_commitment: p2p_proto::consensus::ProposalCommitment, + proposal_commitment: &p2p_proto::consensus::ProposalCommitment, ) -> anyhow::Result<()> { let expected_block_header = BlockHeader { hash: BlockHash::ZERO, // UNUSED @@ -742,6 +856,7 @@ impl ValidatorFinalizeStage { /// /// This function performs database operations and is computationally /// and IO intensive. + // TODO make it into a trait, we don't want this heavy stuff in proptests pub fn finalize( self, db_tx: DbTransaction<'_>, @@ -814,13 +929,31 @@ impl ValidatorFinalizeStage { } } -pub enum ValidatorStage { +pub enum ValidatorStage { BlockInfo(ValidatorBlockInfoStage), - TransactionBatch(Box), + TransactionBatch(Box>), Finalize(Box), } -impl ValidatorStage { +impl From for ValidatorStage { + fn from(stage: ValidatorBlockInfoStage) -> Self { + ValidatorStage::BlockInfo(stage) + } +} + +impl From> for ValidatorStage { + fn from(stage: ValidatorTransactionBatchStage) -> Self { + ValidatorStage::TransactionBatch(Box::new(stage)) + } +} + +impl From for ValidatorStage { + fn from(stage: ValidatorFinalizeStage) -> Self { + ValidatorStage::Finalize(Box::new(stage)) + } +} + +impl ValidatorStage { pub fn try_into_block_info_stage(self) -> anyhow::Result { match self { ValidatorStage::BlockInfo(stage) => Ok(stage), @@ -830,7 +963,7 @@ impl ValidatorStage { pub fn try_into_transaction_batch_stage( self, - ) -> anyhow::Result> { + ) -> anyhow::Result>> { match self { ValidatorStage::TransactionBatch(stage) => Ok(stage), _ => anyhow::bail!( @@ -856,60 +989,80 @@ impl ValidatorStage { } } -/// Maps consensus transaction to a pair of: -/// - common transaction, which is used for verifying the transaction hash -/// - executor transaction, which is used for executing the transaction -fn try_map_transaction( - transaction: p2p_proto::consensus::Transaction, -) -> anyhow::Result<( - pathfinder_common::transaction::Transaction, - pathfinder_executor::Transaction, -)> { - let p2p_proto::consensus::Transaction { - txn, - transaction_hash, - } = transaction; - let (variant, class_info) = match txn { - ConsensusVariant::DeclareV3(DeclareV3WithClass { common, class }) => ( - SyncVariant::DeclareV3(DeclareV3WithoutClass { - common, - class_hash: Default::default(), - }), - Some(class_info(class)?), - ), - ConsensusVariant::DeployAccountV3(v) => (SyncVariant::DeployAccountV3(v), None), - ConsensusVariant::InvokeV3(v) => (SyncVariant::InvokeV3(v), None), - ConsensusVariant::L1HandlerV0(v) => (SyncVariant::L1HandlerV0(v), None), - }; +pub trait TransactionExt { + /// Maps consensus transaction to a pair of: + /// - common transaction, which is used for verifying the transaction hash + /// - executor transaction, which is used for executing the transaction + fn try_map_transaction( + transaction: p2p_proto::consensus::Transaction, + ) -> anyhow::Result<( + pathfinder_common::transaction::Transaction, + pathfinder_executor::Transaction, + )>; + + fn verify_hash(transaction: &Transaction, chain_id: ChainId) -> bool; +} - let common_txn_variant = TransactionVariant::try_from_dto(variant)?; +pub struct ProdTransactionMapper; - let deployed_address = deployed_address(&common_txn_variant); +impl TransactionExt for ProdTransactionMapper { + fn try_map_transaction( + transaction: p2p_proto::consensus::Transaction, + ) -> anyhow::Result<( + pathfinder_common::transaction::Transaction, + pathfinder_executor::Transaction, + )> { + let p2p_proto::consensus::Transaction { + txn, + transaction_hash, + } = transaction; + let (variant, class_info) = match txn { + ConsensusVariant::DeclareV3(DeclareV3WithClass { common, class }) => ( + SyncVariant::DeclareV3(DeclareV3WithoutClass { + common, + class_hash: Default::default(), + }), + Some(class_info(class)?), + ), + ConsensusVariant::DeployAccountV3(v) => (SyncVariant::DeployAccountV3(v), None), + ConsensusVariant::InvokeV3(v) => (SyncVariant::InvokeV3(v), None), + ConsensusVariant::L1HandlerV0(v) => (SyncVariant::L1HandlerV0(v), None), + }; - // TODO(validator) why 10^12? - let paid_fee_on_l1 = match &common_txn_variant { - TransactionVariant::L1Handler(_) => { - Some(starknet_api::transaction::fields::Fee(1_000_000_000_000)) - } - _ => None, - }; + let common_txn_variant = TransactionVariant::try_from_dto(variant)?; - let api_txn = to_starknet_api_transaction(common_txn_variant.clone())?; - let tx_hash = starknet_api::transaction::TransactionHash(transaction_hash.0.into_starkfelt()); - let executor_txn = pathfinder_executor::Transaction::from_api( - api_txn, - tx_hash, - class_info, - paid_fee_on_l1, - deployed_address, - pathfinder_executor::AccountTransactionExecutionFlags::default(), - )?; - let common_txn = pathfinder_common::transaction::Transaction { - hash: TransactionHash(transaction_hash.0), - variant: common_txn_variant, - }; + let deployed_address = deployed_address(&common_txn_variant); + + // TODO(validator) why 10^12? + let paid_fee_on_l1 = match &common_txn_variant { + TransactionVariant::L1Handler(_) => { + Some(starknet_api::transaction::fields::Fee(1_000_000_000_000)) + } + _ => None, + }; + + let api_txn = to_starknet_api_transaction(common_txn_variant.clone())?; + let tx_hash = + starknet_api::transaction::TransactionHash(transaction_hash.0.into_starkfelt()); + let executor_txn = pathfinder_executor::Transaction::from_api( + api_txn, + tx_hash, + class_info, + paid_fee_on_l1, + deployed_address, + pathfinder_executor::AccountTransactionExecutionFlags::default(), + )?; + let common_txn = pathfinder_common::transaction::Transaction { + hash: TransactionHash(transaction_hash.0), + variant: common_txn_variant, + }; + + Ok((common_txn, executor_txn)) + } - Ok((common_txn, executor_txn)) + fn verify_hash(transaction: &Transaction, chain_id: ChainId) -> bool { + transaction.verify_hash(chain_id) + } } fn class_info(class: Cairo1Class) -> anyhow::Result { @@ -980,7 +1133,7 @@ fn class_info(class: Cairo1Class) -> anyhow::Result { Ok(ci) } -fn deployed_address(txnv: &TransactionVariant) -> Option { +pub fn deployed_address(txnv: &TransactionVariant) -> Option { match txnv { TransactionVariant::DeployAccountV3(t) => Some(starknet_api::core::ContractAddress( starknet_api::core::PatriciaKey::try_from(t.contract_address.get().into_starkfelt()) @@ -1017,6 +1170,7 @@ mod tests { }; use pathfinder_crypto::Felt; use pathfinder_executor::types::BlockInfo; + use pathfinder_executor::BlockExecutor; use pathfinder_storage::StorageBuilder; use super::*; @@ -1075,9 +1229,12 @@ mod tests { starknet_version: StarknetVersion::new(0, 14, 0, 0), }; - let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator_stage = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); // Create batches: 3 batches with 2 transactions each let batches = [ @@ -1088,7 +1245,7 @@ mod tests { // Execute batch 1 validator_stage - .execute_batch(batches[0].clone()) + .execute_batch::(batches[0].clone()) .expect("Failed to execute batch 1"); // Should have 1 batch (state update) after first execution @@ -1105,7 +1262,7 @@ mod tests { // Execute batch 2 validator_stage - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to execute batch 2"); // Should have 2 batches and 2 state updates @@ -1118,7 +1275,7 @@ mod tests { // Execute batch 3 validator_stage - .execute_batch(batches[2].clone()) + .execute_batch::(batches[2].clone()) .expect("Failed to execute batch 3"); // Should have 3 batches now with 6 transactions @@ -1147,7 +1304,7 @@ mod tests { // Make sure we can continue executing after rollback validator_stage - .execute_batch(batches[2].clone()) + .execute_batch::(batches[2].clone()) .expect("Failed to execute batch 3 after rollback"); assert_eq!( @@ -1208,35 +1365,41 @@ mod tests { ]; // Create first validator and execute both batches - let mut validator1 = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator1 = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); validator1 - .execute_batch(batches[0].clone()) + .execute_batch::(batches[0].clone()) .expect("Failed to execute batch 1"); validator1 - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to execute batch 2"); let receipts1 = validator1.receipts().to_vec(); // Create second validator and execute, then rollback and re-execute - let mut validator2 = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator2 = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); validator2 - .execute_batch(batches[0].clone()) + .execute_batch::(batches[0].clone()) .expect("Failed to execute batch 1"); validator2 - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to execute batch 2"); // Rollback and re-execute validator2.rollback_to_batch(0).expect("Failed to rollback"); validator2 - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to re-execute batch 2"); let receipts2 = validator2.receipts(); @@ -1284,9 +1447,12 @@ mod tests { starknet_version: StarknetVersion::new(0, 14, 0, 0), }; - let mut validator_stage = - ValidatorTransactionBatchStage::new(chain_id, block_info, storage.clone()) - .expect("Failed to create validator stage"); + let mut validator_stage = ValidatorTransactionBatchStage::::new( + chain_id, + block_info, + storage.clone(), + ) + .expect("Failed to create validator stage"); // Create batches with different sizes to test boundary conditions // Batch 0: 3 transactions (tx's 0, 1, 2) @@ -1304,13 +1470,13 @@ mod tests { // Execute all batches validator_stage - .execute_batch(batches[0].clone()) + .execute_batch::(batches[0].clone()) .expect("Failed to execute batch 0"); validator_stage - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to execute batch 1"); validator_stage - .execute_batch(batches[2].clone()) + .execute_batch::(batches[2].clone()) .expect("Failed to execute batch 2"); assert_eq!( @@ -1322,7 +1488,7 @@ mod tests { // Rollback to transaction at batch boundary (end of batch 0 = transaction 2) // This should rollback to batch 0 validator_stage - .rollback_to_transaction(2) + .rollback_to_transaction::(2) .expect("Failed to rollback to transaction 2"); assert_eq!( validator_stage.transaction_count(), @@ -1337,16 +1503,16 @@ mod tests { // Re-execute to get back to 7 transactions validator_stage - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to re-execute batch 1"); validator_stage - .execute_batch(batches[2].clone()) + .execute_batch::(batches[2].clone()) .expect("Failed to re-execute batch 2"); // Rollback to transaction at batch boundary (start of batch 1 = transaction 3) // This should rollback to batch 1 (which includes transaction 3) validator_stage - .rollback_to_transaction(3) + .rollback_to_transaction::(3) .expect("Failed to rollback to transaction 3"); assert_eq!( validator_stage.transaction_count(), @@ -1361,13 +1527,13 @@ mod tests { // Re-execute to get back to 7 transactions validator_stage - .execute_batch(batches[2].clone()) + .execute_batch::(batches[2].clone()) .expect("Failed to re-execute batch 2"); // Rollback to transaction in middle of batch (transaction 1 in batch 0) // This should rollback to transaction 1, keeping only first 2 transactions validator_stage - .rollback_to_transaction(1) + .rollback_to_transaction::(1) .expect("Failed to rollback to transaction 1"); assert_eq!( validator_stage.transaction_count(), @@ -1384,14 +1550,14 @@ mod tests { // This should keep only the first transaction // First, we need to get back to having multiple transactions validator_stage - .execute_batch(vec![create_test_transaction(2)]) + .execute_batch::(vec![create_test_transaction(2)]) .expect("Failed to add transaction 2 back"); validator_stage - .execute_batch(batches[1].clone()) + .execute_batch::(batches[1].clone()) .expect("Failed to re-execute batch 1"); validator_stage - .rollback_to_transaction(0) + .rollback_to_transaction::(0) .expect("Failed to rollback to transaction 0"); assert_eq!( validator_stage.transaction_count(), @@ -1405,7 +1571,7 @@ mod tests { ); // Verify an out of bounds rollback error - let result = validator_stage.rollback_to_transaction(10); + let result = validator_stage.rollback_to_transaction::(10); assert!( result.is_err(), "Rollback to transaction 10 (out of bounds) should error" @@ -1448,7 +1614,7 @@ mod tests { .expect("Failed to create ValidatorBlockInfoStage"); let validator_transaction_batch = validator_block_info - .validate_consensus_block_info(block_info, storage.clone()) + .validate_consensus_block_info::(block_info, storage.clone()) .expect("Failed to validate block info"); // Verify the validator is in the expected empty state diff --git a/crates/pathfinder/tests/consensus.rs b/crates/pathfinder/tests/consensus.rs index a2844a3cbf..d40e247fff 100644 --- a/crates/pathfinder/tests/consensus.rs +++ b/crates/pathfinder/tests/consensus.rs @@ -32,17 +32,15 @@ mod test { // TODO Test cases that should be supported by the integration tests: // - proposals: - // - [ ] non-empty proposals (L1 handlers + transactions that modify storage), - // - [ ] empty proposals, which follow the spec, ie. no transaction batches: + // - [ ] non-empty proposals (L1 handlers + transactions that modify storage): // - ProposalInit, + // - BlockInfo, + // - TransactionBatch(/*Non-empty vec of transactions*/), + // - TransactionsFin, // - ProposalCommitment, // - ProposalFin, - // - [x] consider supporting empty proposals with an empty transaction batch, - // not fully following the spec: + // - [x] empty proposals, which follow the spec, ie. no transaction batches: // - ProposalInit, - // - BlockInfo, - // - TransactionBatch([]), - // - (TransactionsFin cannot be sent in this case), // - ProposalCommitment, // - ProposalFin, // - node set sizes: @@ -54,30 +52,18 @@ mod test { // - [ ] ??? any missing significant failure injection points ???. #[rstest] #[case::happy_path(None)] - // TODO Usually proposal parts at H=13 arrive before the local consensus engine emits a decided - // upon event for H=12. The network moves to H=13, while locally H=12 is uncommitted, so - // executing and thus committing H=13 locally is deferred indefinitely. With fully implemented - // proposal recovery, this should be resolved. - #[ignore = "TODO Determine why the test fails"] #[case::fail_on_proposal_init_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::ProposalInitRx }))] - #[ignore = "TODO Determine why the test fails"] #[case::fail_on_block_info_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::BlockInfoRx }))] - #[ignore = "TODO Determine why the test fails"] #[case::fail_on_transaction_batch_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::TransactionBatchRx }))] #[ignore = "TransactionsFin is not currently present in fake proposals, so this test is the \ same as the happy path right now."] #[case::fail_on_transactions_fin_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::TransactionsFinRx }))] - #[ignore = "TODO Determine why the test fails"] #[case::fail_on_proposal_commitment_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::ProposalCommitmentRx }))] - #[ignore = "TODO Determine why the test fails"] #[case::fail_on_proposal_fin_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::ProposalFinRx }))] #[case::fail_on_entire_proposal_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::EntireProposalRx }))] #[case::fail_on_entire_proposal_persisted(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::EntireProposalPersisted }))] - #[ignore = "TODO Determine why the test fails"] #[case::fail_on_prevote_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::PrevoteRx }))] - #[ignore = "TODO Proposal recovery not fully implemented yet"] #[case::fail_on_precommit_rx(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::PrecommitRx }))] - #[ignore = "TODO Proposal recovery not fully implemented yet"] #[case::fail_on_proposal_decided(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::ProposalDecided }))] #[case::fail_on_proposal_committed(Some(InjectFailureConfig { height: 13, trigger: InjectFailureTrigger::ProposalCommitted }))] #[tokio::test] diff --git a/crates/storage/src/connection/consensus.rs b/crates/storage/src/connection/consensus.rs index b3c3fc3f7a..cd6d1f19ad 100644 --- a/crates/storage/src/connection/consensus.rs +++ b/crates/storage/src/connection/consensus.rs @@ -147,6 +147,47 @@ impl Transaction<'_> { .map_err(|e| e.into()) } + pub fn all_last_consensus_proposal_parts( + &self, + validator: &ContractAddress, + ) -> anyhow::Result)>> { + let mut stmt = self.inner().prepare_cached( + r" + SELECT + cp.height, + cp.round, + cp.parts + FROM consensus_proposals AS cp + JOIN ( + SELECT + height, + MAX(round) AS max_round + FROM consensus_proposals + WHERE proposer <> :proposer + GROUP BY height + ) AS m + ON cp.height = m.height + AND cp.round = m.max_round + WHERE cp.proposer <> :proposer + ORDER BY cp.height ASC", + )?; + let mut rows = stmt.query(named_params! { + ":proposer": validator, + })?; + + let mut results = Vec::new(); + + while let Some(row) = rows.next()? { + let height = row.get_i64(0)?; + let round = row.get_i64(1)?; + let buf = row.get_blob(2).map(|x| x.to_vec())?; + + results.push((height, round, buf)); + } + + Ok(results) + } + /// Always all proposers pub fn remove_consensus_proposal_parts( &self, diff --git a/justfile b/justfile index e3292e6d22..395abd01ba 100644 --- a/justfile +++ b/justfile @@ -1,25 +1,30 @@ default: just --summary --unsorted -test $RUST_BACKTRACE="1" *args="": test-consensus build-pathfinder-release +test $RUST_BACKTRACE="1" *args="": build-pathfinder-release cargo nextest run --no-fail-fast --all-targets --features p2p --workspace --locked \ - -E 'not (test(/^p2p_network::sync_handlers::tests::prop/) | test(/^test::consensus_3_nodes/))' \ + -E 'not (test(/^p2p_network::sync::sync_handlers::tests::prop/) | test(/^consensus::inner::p2p_task::handler_proptest/) | test(/^test::consensus_3_nodes/))' \ {{args}} -test-all-features $RUST_BACKTRACE="1" *args="": test-consensus build-pathfinder-release +test-all-features $RUST_BACKTRACE="1" *args="": build-pathfinder-release cargo nextest run --no-fail-fast --all-targets --all-features --workspace --locked \ - -E 'not (test(/^p2p_network::sync_handlers::tests::prop/) | test(/^test::consensus_3_nodes/))' \ + -E 'not (test(/^p2p_network::sync::sync_handlers::tests::prop/) | test(/^consensus::inner::p2p_task::handler_proptest/) | test(/^test::consensus_3_nodes/))' \ {{args}} -test-consensus $RUST_BACKTRACE="1" *args="": +test-consensus $RUST_BACKTRACE="1" *args="": build-pathfinder-release PATHFINDER_TEST_ENABLE_PORT_MARKER_FILES=1 cargo nextest run --test consensus -p pathfinder --features p2p,consensus-integration-tests --locked \ {{args}} -proptest $RUST_BACKTRACE="1" *args="": +proptest-sync-handlers $RUST_BACKTRACE="1" *args="": cargo nextest run --no-fail-fast --all-targets --features p2p --workspace --locked \ -E 'test(/^p2p_network::sync::sync_handlers::tests::prop/)' \ {{args}} +proptest-consensus-handler $RUST_BACKTRACE="1" *args="": + cargo nextest run --no-fail-fast --all-targets --features p2p --workspace --locked \ + -E 'test(/^consensus::inner::p2p_task::handler_proptest/)' \ + {{args}} + build: cargo build --workspace --all-targets