diff --git a/.github/workflows/consolidated_system_test.yaml b/.github/workflows/consolidated_system_test.yaml index cc181119e11..0ff4f7048b1 100644 --- a/.github/workflows/consolidated_system_test.yaml +++ b/.github/workflows/consolidated_system_test.yaml @@ -13,12 +13,13 @@ on: env: job_link: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - deployment_config_path: ${{ github.workspace }}/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_consolidated.json + deployment_config_path: ${{ github.workspace }}/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json namespace: sequencer-consolidated-system-test-run-${{ github.run_number }}-attempt-${{ github.run_attempt }} cluster_name: consolidated-system-test crate_triggers: "apollo_node,apollo_deployments,apollo_integration_tests" path_triggers: ".github/workflows/consolidated_system_test.yaml,scripts/*.py,scripts/system_tests/**/*.py" pvc_storage_class_name: "premium-rwo" + anvil_port: "8545" permissions: contents: read @@ -317,7 +318,22 @@ jobs: --interval ${{ env.check_interval_sec }} - name: Copy state and restart pod - run: pipenv run python ./scripts/system_tests/copy_state_and_restart.py --deployment_config_path ${{ env.deployment_config_path }} --data-dir "./output/data/node_0" + run: pipenv run python ./scripts/system_tests/copy_state_and_restart.py --deployment_config_path ${{ env.deployment_config_path }} --data-dir "./output/data/node_0/executable_0" + + - name: Port-forward Anvil pod to localhost:${{ env.anvil_port }} + run: | + echo "🔌 Setting up port-forward to Anvil..." + + ANVIL_POD=$(kubectl get pods -n anvil -l app=anvil -o jsonpath="{.items[0].metadata.name}") + echo "🌐 Found Anvil pod: $ANVIL_POD" + + # Start port-forwarding in background and keep it running + kubectl port-forward -n anvil "$ANVIL_POD" ${{ env.anvil_port }}:${{ env.anvil_port }} & + echo "⏳ Waiting a few seconds to ensure port-forward is established..." + sleep 2 + + - name: Send transactions test + run: pipenv run python ./scripts/system_tests/sequencer_simulator.py --deployment_config_path ${{ env.deployment_config_path }} --config_dir "${{ env.config_dir }}" --node_type "consolidated" --sender_address "${{ env.SENDER_ADDRESS }}" --receiver_address "${{ env.RECEIVER_ADDRESS }}" - name: Get container logs if: always() diff --git a/.github/workflows/sequencer_cdk8s-test.yml b/.github/workflows/sequencer_cdk8s-test.yml index 0acd7e9e608..ba37b673749 100644 --- a/.github/workflows/sequencer_cdk8s-test.yml +++ b/.github/workflows/sequencer_cdk8s-test.yml @@ -26,7 +26,7 @@ jobs: env: cluster: test namespace: test - deployment_config_path: ${{ github.workspace }}/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_consolidated.json + deployment_config_path: ${{ github.workspace }}/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json monitoring_dashboard_file: ${{ github.workspace }}/deployments/monitoring/examples/output/dashboards/sequencer_node_dashboard.json steps: diff --git a/.github/workflows/sequencer_docker-test.yml b/.github/workflows/sequencer_docker-test.yml index 00e93f82ca0..5b923edb009 100644 --- a/.github/workflows/sequencer_docker-test.yml +++ b/.github/workflows/sequencer_docker-test.yml @@ -12,7 +12,7 @@ on: pull_request: env: - crate_triggers: "apollo_node,apollo_dashboard" + crate_triggers: "apollo_node,apollo_dashboard,apollo_integration_tests" path_triggers: ".github/workflows/sequencer_docker-test.yml,scripts/*.py,scripts/system_tests/**/*.py" permissions: diff --git a/Cargo.lock b/Cargo.lock index 1b874e984d1..f2829894559 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -850,6 +850,7 @@ dependencies = [ "blockifier", "chrono", "derive_more 0.99.18", + "indexmap 2.9.0", "mockall", "serde", "starknet_api", @@ -892,6 +893,8 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "url", + "validator", ] [[package]] @@ -1029,6 +1032,7 @@ dependencies = [ "tempfile", "thiserror 1.0.69", "tracing", + "url", "validator", ] @@ -1108,6 +1112,7 @@ dependencies = [ "apollo_class_manager_types", "apollo_config", "apollo_consensus", + "apollo_infra", "apollo_infra_utils", "apollo_l1_gas_price_types", "apollo_metrics", @@ -1119,6 +1124,7 @@ dependencies = [ "apollo_storage", "apollo_test_utils", "apollo_time", + "assert_matches", "async-trait", "blockifier", "cairo-lang-casm", @@ -1187,7 +1193,6 @@ dependencies = [ "apollo_infra_utils", "apollo_node", "apollo_protobuf", - "const_format", "hex", "indexmap 2.9.0", "libp2p", @@ -1197,6 +1202,8 @@ dependencies = [ "starknet_api", "strum 0.25.0", "strum_macros 0.25.3", + "tempfile", + "url", ] [[package]] @@ -1232,6 +1239,7 @@ dependencies = [ "mockall", "mockito 1.6.1", "num-bigint 0.4.6", + "num-rational 0.4.2", "pretty_assertions", "reqwest 0.11.27", "rstest", @@ -1277,6 +1285,7 @@ dependencies = [ "apollo_infra", "apollo_infra_utils", "apollo_metrics", + "apollo_proc_macros", "assert_matches", "axum", "base64 0.13.1", @@ -1334,6 +1343,7 @@ dependencies = [ name = "apollo_infra_utils" version = "0.0.0" dependencies = [ + "apollo_proc_macros", "assert-json-diff", "colored 3.0.0", "nix 0.20.2", @@ -1447,6 +1457,7 @@ version = "0.0.0" dependencies = [ "apollo_config", "apollo_infra", + "apollo_infra_utils", "apollo_l1_gas_price_types", "apollo_metrics", "async-trait", @@ -1496,6 +1507,7 @@ dependencies = [ "apollo_batcher_types", "apollo_config", "apollo_infra", + "apollo_infra_utils", "apollo_l1_endpoint_monitor_types", "apollo_l1_provider_types", "apollo_metrics", @@ -1752,11 +1764,9 @@ dependencies = [ "apollo_signature_manager_types", "apollo_state_sync", "apollo_state_sync_types", - "assert_matches", "clap", "const_format", "futures", - "mempool_test_utils", "papyrus_base_layer", "pretty_assertions", "rstest", @@ -1809,16 +1819,24 @@ dependencies = [ [[package]] name = "apollo_proc_macros" version = "0.0.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "apollo_proc_macros_tests" +version = "0.0.0" dependencies = [ "apollo_metrics", + "apollo_proc_macros", "apollo_test_utils", "metrics 0.24.1", "metrics-exporter-prometheus", "papyrus_common", "prometheus-parse", - "quote", "rstest", - "syn 2.0.100", ] [[package]] @@ -12170,6 +12188,7 @@ dependencies = [ "apollo_starknet_os_program", "assert_matches", "blake2s", + "blockifier", "cairo-lang-starknet-classes", "cairo-vm", "clap", @@ -12178,8 +12197,6 @@ dependencies = [ "ethnum", "futures", "indexmap 2.9.0", - "num-bigint 0.4.6", - "num-integer", "pretty_assertions", "rand 0.8.5", "rand_distr", @@ -12220,6 +12237,7 @@ dependencies = [ "cairo-lang-starknet-classes", "cairo-vm", "derive_more 0.99.18", + "ethnum", "indexmap 2.9.0", "indoc 2.0.5", "log", @@ -12237,6 +12255,7 @@ dependencies = [ "shared_execution_objects", "starknet-types-core", "starknet_api", + "starknet_committer", "starknet_patricia", "strum 0.25.0", "strum_macros 0.25.3", diff --git a/Cargo.toml b/Cargo.toml index 952104807f3..aa1a2546084 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ members = [ "crates/apollo_node", "crates/apollo_p2p_sync", "crates/apollo_proc_macros", + "crates/apollo_proc_macros_tests", "crates/apollo_protobuf", "crates/apollo_reverts", "crates/apollo_rpc", @@ -127,7 +128,8 @@ apollo_network.path = "crates/apollo_network" apollo_network_types.path = "crates/apollo_network_types" apollo_node.path = "crates/apollo_node" apollo_p2p_sync.path = "crates/apollo_p2p_sync" -apollo_proc_macros.path = "crates/apollo_proc_macros" +apollo_proc_macros = { path = "crates/apollo_proc_macros", version = "0.0.0" } +apollo_proc_macros_tests.path = "crates/apollo_proc_macros_tests" apollo_protobuf.path = "crates/apollo_protobuf" apollo_reverts.path = "crates/apollo_reverts" apollo_rpc.path = "crates/apollo_rpc" @@ -252,6 +254,7 @@ paste = "1.0.15" phf = "0.11" pretty_assertions = "1.4.0" primitive-types = "0.12.1" +proc-macro2 = "1.0" prometheus-parse = "0.2.4" prost = "0.12.1" prost-build = "0.12.1" diff --git a/crates/apollo_batcher/src/batcher.rs b/crates/apollo_batcher/src/batcher.rs index 0364d619d18..125ed90959c 100644 --- a/crates/apollo_batcher/src/batcher.rs +++ b/crates/apollo_batcher/src/batcher.rs @@ -71,11 +71,11 @@ use crate::metrics::{ SYNCED_TRANSACTIONS, }; use crate::pre_confirmed_block_writer::{ - PreConfirmedBlockWriterFactory, - PreConfirmedBlockWriterFactoryTrait, - PreConfirmedBlockWriterTrait, + PreconfirmedBlockWriterFactory, + PreconfirmedBlockWriterFactoryTrait, + PreconfirmedBlockWriterTrait, }; -use crate::pre_confirmed_cende_client::PreConfirmedCendeClientTrait; +use crate::pre_confirmed_cende_client::PreconfirmedCendeClientTrait; use crate::transaction_provider::{ProposeTransactionProvider, ValidateTransactionProvider}; use crate::utils::{ deadline_as_instant, @@ -101,7 +101,7 @@ pub struct Batcher { block_builder_factory: Box, /// Used to create pre-confirmed block writers. - pre_confirmed_block_writer_factory: Box, + pre_confirmed_block_writer_factory: Box, /// The height that the batcher is currently working on. /// All proposals are considered to be at this height. @@ -137,7 +137,7 @@ impl Batcher { mempool_client: SharedMempoolClient, transaction_converter: TransactionConverter, block_builder_factory: Box, - pre_confirmed_block_writer_factory: Box, + pre_confirmed_block_writer_factory: Box, ) -> Self { Self { config, @@ -580,12 +580,7 @@ impl Batcher { block_execution_artifacts.execution_data.rejected_tx_hashes, ) .await?; - let execution_infos: Vec<_> = block_execution_artifacts - .execution_data - .execution_infos - .into_iter() - .map(|(_, info)| info) - .collect(); + let execution_infos = block_execution_artifacts.execution_data.execution_infos; LAST_BATCHED_BLOCK.set_lossy(height.0); BATCHED_TRANSACTIONS.increment(n_txs); @@ -711,7 +706,7 @@ impl Batcher { mut block_builder: Box, abort_signal_sender: tokio::sync::oneshot::Sender<()>, final_n_executed_txs_sender: Option>, - pre_confirmed_block_writer: Option>, + pre_confirmed_block_writer: Option>, mut proposal_metrics_handle: ProposalMetricsHandle, ) -> BatcherResult<()> { self.set_active_proposal(proposal_id).await?; @@ -862,14 +857,14 @@ pub fn create_batcher( mempool_client: SharedMempoolClient, l1_provider_client: SharedL1ProviderClient, class_manager_client: SharedClassManagerClient, - pre_confirmed_cende_client: Arc, + pre_confirmed_cende_client: Arc, ) -> Batcher { let (storage_reader, storage_writer) = apollo_storage::open_storage(config.storage.clone()) .expect("Failed to open batcher's storage"); let execute_config = &config.block_builder_config.execute_config; let worker_pool = Arc::new(WorkerPool::start(execute_config)); - let pre_confirmed_block_writer_factory = Box::new(PreConfirmedBlockWriterFactory { + let pre_confirmed_block_writer_factory = Box::new(PreconfirmedBlockWriterFactory { config: config.pre_confirmed_block_writer_config, cende_client: pre_confirmed_cende_client, }); diff --git a/crates/apollo_batcher/src/batcher_test.rs b/crates/apollo_batcher/src/batcher_test.rs index cdba63505d8..9cdad3669b3 100644 --- a/crates/apollo_batcher/src/batcher_test.rs +++ b/crates/apollo_batcher/src/batcher_test.rs @@ -31,7 +31,6 @@ use apollo_mempool_types::mempool_types::CommitBlockArgs; use apollo_state_sync_types::state_sync_types::SyncBlock; use assert_matches::assert_matches; use blockifier::abi::constants; -use blockifier::transaction::objects::TransactionExecutionInfo; use indexmap::{indexmap, IndexSet}; use metrics_exporter_prometheus::PrometheusBuilder; use mockall::predicate::eq; @@ -69,8 +68,8 @@ use crate::metrics::{ SYNCED_TRANSACTIONS, }; use crate::pre_confirmed_block_writer::{ - MockPreConfirmedBlockWriterFactoryTrait, - MockPreConfirmedBlockWriterTrait, + MockPreconfirmedBlockWriterFactoryTrait, + MockPreconfirmedBlockWriterTrait, }; use crate::test_utils::{ test_txs, @@ -117,7 +116,7 @@ struct MockDependencies { mempool_client: MockMempoolClient, l1_provider_client: MockL1ProviderClient, block_builder_factory: MockBlockBuilderFactoryTrait, - pre_confirmed_block_writer_factory: MockPreConfirmedBlockWriterFactoryTrait, + pre_confirmed_block_writer_factory: MockPreconfirmedBlockWriterFactoryTrait, class_manager_client: SharedClassManagerClient, } @@ -138,11 +137,11 @@ impl Default for MockDependencies { .with(eq(CommitBlockArgs::default())) .returning(|_| Ok(())); let block_builder_factory = MockBlockBuilderFactoryTrait::new(); - let mut pre_confirmed_block_writer_factory = MockPreConfirmedBlockWriterFactoryTrait::new(); + let mut pre_confirmed_block_writer_factory = MockPreconfirmedBlockWriterFactoryTrait::new(); pre_confirmed_block_writer_factory.expect_create().returning(|_, _, _| { let (non_working_candidate_tx_sender, _) = tokio::sync::mpsc::channel(1); let (non_working_pre_confirmed_tx_sender, _) = tokio::sync::mpsc::channel(1); - let mut mock_writer = Box::new(MockPreConfirmedBlockWriterTrait::new()); + let mut mock_writer = Box::new(MockPreconfirmedBlockWriterTrait::new()); mock_writer.expect_run().return_once(|| Box::pin(async move { Ok(()) })); (mock_writer, non_working_candidate_tx_sender, non_working_pre_confirmed_tx_sender) }); @@ -301,7 +300,7 @@ fn verify_decision_reached_response( assert_eq!(response.central_objects.bouncer_weights, expected_artifacts.bouncer_weights); assert_eq!( response.central_objects.execution_infos, - expected_artifacts.execution_data.execution_infos.values().cloned().collect::>() + expected_artifacts.execution_data.execution_infos ); } @@ -1088,8 +1087,7 @@ async fn test_execution_info_order_is_kept() { batcher_propose_and_commit_block(mock_dependencies).await.unwrap(); // Verify that the execution_infos are in the same order as returned from the block_builder. - let expected_execution_infos: Vec = - block_builder_result.execution_data.execution_infos.into_values().collect(); + let expected_execution_infos = block_builder_result.execution_data.execution_infos; assert_eq!(decision_reached_response.central_objects.execution_infos, expected_execution_infos); } diff --git a/crates/apollo_batcher/src/block_builder.rs b/crates/apollo_batcher/src/block_builder.rs index 80e182e08c7..844c0574f26 100644 --- a/crates/apollo_batcher/src/block_builder.rs +++ b/crates/apollo_batcher/src/block_builder.rs @@ -52,7 +52,7 @@ use tracing::{debug, error, info, trace, warn}; use crate::block_builder::FailOnErrorCause::L1HandlerTransactionValidationFailed; use crate::cende_client_types::{StarknetClientStateDiff, StarknetClientTransactionReceipt}; use crate::metrics::FULL_BLOCKS; -use crate::pre_confirmed_block_writer::{CandidateTxSender, PreConfirmedTxSender}; +use crate::pre_confirmed_block_writer::{CandidateTxSender, PreconfirmedTxSender}; use crate::transaction_executor::TransactionExecutorTrait; use crate::transaction_provider::{TransactionProvider, TransactionProviderError}; @@ -167,7 +167,7 @@ pub struct BlockBuilder { output_content_sender: Option>, /// The senders are utilized only during block proposal and not during block validation. candidate_tx_sender: Option, - pre_confirmed_tx_sender: Option, + pre_confirmed_tx_sender: Option, abort_signal_receiver: tokio::sync::oneshot::Receiver<()>, transaction_converter: TransactionConverter, /// The number of transactions whose execution is completed. @@ -191,7 +191,7 @@ impl BlockBuilder { tokio::sync::mpsc::UnboundedSender, >, candidate_tx_sender: Option, - pre_confirmed_tx_sender: Option, + pre_confirmed_tx_sender: Option, abort_signal_receiver: tokio::sync::oneshot::Receiver<()>, transaction_converter: TransactionConverter, n_concurrent_txs: usize, @@ -436,23 +436,23 @@ impl BlockBuilder { trace!( "Attempting to send a candidate transaction chunk with {num_txs} transactions to the \ - PreConfirmedBlockWriter.", + PreconfirmedBlockWriter.", ); match candidate_tx_sender.try_send(txs) { Ok(_) => { info!( "Successfully sent a candidate transaction chunk with {num_txs} transactions \ - to the PreConfirmedBlockWriter.", + to the PreconfirmedBlockWriter.", ); } // We continue with block building even if sending candidate transactions to - // the PreConfirmedBlockWriter fails because it is not critical for the block + // the PreconfirmedBlockWriter fails because it is not critical for the block // building process. Err(err) => { error!( "Failed to send a candidate transaction chunk with {num_txs} transactions to \ - the PreConfirmedBlockWriter: {:?}", + the PreconfirmedBlockWriter: {:?}", err ); } @@ -484,7 +484,7 @@ async fn collect_execution_results_and_stream_txs( tx_chunk: &[InternalConsensusTransaction], results: Vec>, execution_data: &mut BlockTransactionExecutionData, - pre_confirmed_tx_sender: &Option, + pre_confirmed_tx_sender: &Option, ) -> BlockBuilderResult<()> { assert!( results.len() == tx_chunk.len(), @@ -535,7 +535,7 @@ async fn collect_execution_results_and_stream_txs( )); if result.is_err() { // We continue with block building even if sending data to The - // PreConfirmedBlockWriter fails because it is not critical + // PreconfirmedBlockWriter fails because it is not critical // for the block building process. warn!("Sending data to preconfirmed block writer failed."); } @@ -580,7 +580,7 @@ pub trait BlockBuilderFactoryTrait: Send + Sync { tokio::sync::mpsc::UnboundedSender, >, candidate_tx_sender: Option, - pre_confirmed_tx_sender: Option, + pre_confirmed_tx_sender: Option, runtime: tokio::runtime::Handle, ) -> BlockBuilderResult<(Box, AbortSignalSender)>; } @@ -603,7 +603,7 @@ impl Default for BlockBuilderConfig { execute_config: WorkerPoolConfig::default(), bouncer_config: BouncerConfig::default(), n_concurrent_txs: 100, - tx_polling_interval_millis: 100, + tx_polling_interval_millis: 1, versioned_constants_overrides: VersionedConstantsOverrides::default(), } } @@ -694,7 +694,7 @@ impl BlockBuilderFactoryTrait for BlockBuilderFactory { tokio::sync::mpsc::UnboundedSender, >, candidate_tx_sender: Option, - pre_confirmed_tx_sender: Option, + pre_confirmed_tx_sender: Option, runtime: tokio::runtime::Handle, ) -> BlockBuilderResult<(Box, AbortSignalSender)> { let executor = self.preprocess_and_create_transaction_executor(block_metadata, runtime)?; diff --git a/crates/apollo_batcher/src/cende_client_types.rs b/crates/apollo_batcher/src/cende_client_types.rs index 885bc3ef332..ef511ce5437 100644 --- a/crates/apollo_batcher/src/cende_client_types.rs +++ b/crates/apollo_batcher/src/cende_client_types.rs @@ -324,7 +324,7 @@ fn get_execution_resources(execution_info: &TransactionExecutionInfo) -> Executi // Also a few modifications were made to the serialization format. #[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)] #[serde(tag = "type")] -pub enum CendePreConfirmedTransaction { +pub enum CendePreconfirmedTransaction { #[serde(rename = "DECLARE")] Declare(IntermediateDeclareTransaction), #[serde(rename = "DEPLOY_ACCOUNT")] @@ -335,18 +335,18 @@ pub enum CendePreConfirmedTransaction { L1Handler(L1HandlerTransaction), } -impl CendePreConfirmedTransaction { +impl CendePreconfirmedTransaction { pub fn transaction_hash(&self) -> TransactionHash { match self { - CendePreConfirmedTransaction::Declare(tx) => tx.transaction_hash, - CendePreConfirmedTransaction::DeployAccount(tx) => tx.transaction_hash, - CendePreConfirmedTransaction::Invoke(tx) => tx.transaction_hash, - CendePreConfirmedTransaction::L1Handler(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::Declare(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::DeployAccount(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::Invoke(tx) => tx.transaction_hash, + CendePreconfirmedTransaction::L1Handler(tx) => tx.transaction_hash, } } } -impl From for CendePreConfirmedTransaction { +impl From for CendePreconfirmedTransaction { fn from(transaction: InternalConsensusTransaction) -> Self { match transaction { InternalConsensusTransaction::RpcTransaction(internal_rpc_transaction) => { @@ -467,7 +467,7 @@ pub struct IntermediateInvokeTransaction { pub version: TransactionVersion, } -impl From for CendePreConfirmedTransaction { +impl From for CendePreconfirmedTransaction { fn from(internal_rpc_transaction: InternalRpcTransaction) -> Self { let tx_hash = internal_rpc_transaction.tx_hash; match internal_rpc_transaction.tx { @@ -475,7 +475,7 @@ impl From for CendePreConfirmedTransaction { declare_transaction, ) => { let version = declare_transaction.version(); - CendePreConfirmedTransaction::Declare(IntermediateDeclareTransaction { + CendePreconfirmedTransaction::Declare(IntermediateDeclareTransaction { resource_bounds: Some(declare_transaction.resource_bounds.into()), tip: Some(declare_transaction.tip), signature: declare_transaction.signature, @@ -505,7 +505,7 @@ impl From for CendePreConfirmedTransaction { tx: RpcDeployAccountTransaction::V3(tx), contract_address, } = deploy_account_transaction; - CendePreConfirmedTransaction::DeployAccount(IntermediateDeployAccountTransaction { + CendePreconfirmedTransaction::DeployAccount(IntermediateDeployAccountTransaction { resource_bounds: Some(tx.resource_bounds.into()), tip: Some(tx.tip), signature: tx.signature, @@ -528,7 +528,7 @@ impl From for CendePreConfirmedTransaction { ) => { let version = invoke_transaction.version(); let RpcInvokeTransaction::V3(tx) = invoke_transaction; - CendePreConfirmedTransaction::Invoke(IntermediateInvokeTransaction { + CendePreconfirmedTransaction::Invoke(IntermediateInvokeTransaction { resource_bounds: Some(tx.resource_bounds.into()), tip: Some(tx.tip), calldata: tx.calldata, @@ -562,10 +562,10 @@ pub struct L1HandlerTransaction { pub calldata: Calldata, } -impl From for CendePreConfirmedTransaction { +impl From for CendePreconfirmedTransaction { fn from(l1_handler_transaction: ExecutableL1HandlerTransaction) -> Self { let ExecutableL1HandlerTransaction { tx, tx_hash, .. } = l1_handler_transaction; - CendePreConfirmedTransaction::L1Handler(L1HandlerTransaction { + CendePreconfirmedTransaction::L1Handler(L1HandlerTransaction { transaction_hash: tx_hash, version: tx.version, nonce: tx.nonce, @@ -636,10 +636,10 @@ fn get_gas_prices( } #[derive(Serialize)] -pub struct CendePreConfirmedBlock { +pub struct CendePreconfirmedBlock { #[serde(flatten)] pub metadata: CendeBlockMetadata, - pub transactions: Vec, + pub transactions: Vec, pub transaction_receipts: Vec>, pub transaction_state_diffs: Vec>, } diff --git a/crates/apollo_batcher/src/config.rs b/crates/apollo_batcher/src/config.rs index 4029b844abb..3700738a61d 100644 --- a/crates/apollo_batcher/src/config.rs +++ b/crates/apollo_batcher/src/config.rs @@ -7,8 +7,8 @@ use serde::{Deserialize, Serialize}; use validator::{Validate, ValidationError}; use crate::block_builder::BlockBuilderConfig; -use crate::pre_confirmed_block_writer::PreConfirmedBlockWriterConfig; -use crate::pre_confirmed_cende_client::PreConfirmedCendeConfig; +use crate::pre_confirmed_block_writer::PreconfirmedBlockWriterConfig; +use crate::pre_confirmed_cende_client::PreconfirmedCendeConfig; /// The batcher related configuration. #[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] @@ -18,10 +18,10 @@ pub struct BatcherConfig { pub outstream_content_buffer_size: usize, pub input_stream_content_buffer_size: usize, pub block_builder_config: BlockBuilderConfig, - pub pre_confirmed_block_writer_config: PreConfirmedBlockWriterConfig, + pub pre_confirmed_block_writer_config: PreconfirmedBlockWriterConfig, pub contract_class_manager_config: ContractClassManagerConfig, pub max_l1_handler_txs_per_block_proposal: usize, - pub pre_confirmed_cende_config: PreConfirmedCendeConfig, + pub pre_confirmed_cende_config: PreconfirmedCendeConfig, } impl SerializeConfig for BatcherConfig { @@ -85,10 +85,10 @@ impl Default for BatcherConfig { outstream_content_buffer_size: 100, input_stream_content_buffer_size: 400, block_builder_config: BlockBuilderConfig::default(), - pre_confirmed_block_writer_config: PreConfirmedBlockWriterConfig::default(), + pre_confirmed_block_writer_config: PreconfirmedBlockWriterConfig::default(), contract_class_manager_config: ContractClassManagerConfig::default(), max_l1_handler_txs_per_block_proposal: 3, - pre_confirmed_cende_config: PreConfirmedCendeConfig::default(), + pre_confirmed_cende_config: PreconfirmedCendeConfig::default(), } } } diff --git a/crates/apollo_batcher/src/pre_confirmed_block_writer.rs b/crates/apollo_batcher/src/pre_confirmed_block_writer.rs index cbba3c6e99d..dfccbcbfce6 100644 --- a/crates/apollo_batcher/src/pre_confirmed_block_writer.rs +++ b/crates/apollo_batcher/src/pre_confirmed_block_writer.rs @@ -22,20 +22,20 @@ use tracing::info; use crate::cende_client_types::{ CendeBlockMetadata, - CendePreConfirmedBlock, - CendePreConfirmedTransaction, + CendePreconfirmedBlock, + CendePreconfirmedTransaction, StarknetClientTransactionReceipt, }; use crate::pre_confirmed_cende_client::{ - CendeWritePreConfirmedBlock, - PreConfirmedCendeClientError, - PreConfirmedCendeClientTrait, + CendeWritePreconfirmedBlock, + PreconfirmedCendeClientError, + PreconfirmedCendeClientTrait, }; #[derive(Debug, Error)] pub enum BlockWriterError { #[error(transparent)] - PreConfirmedCendeClientError(#[from] PreConfirmedCendeClientError), + PreconfirmedCendeClientError(#[from] PreconfirmedCendeClientError), } pub type BlockWriterResult = Result; @@ -43,13 +43,13 @@ pub type BlockWriterResult = Result; pub type CandidateTxReceiver = tokio::sync::mpsc::Receiver>; pub type CandidateTxSender = tokio::sync::mpsc::Sender>; -pub type PreConfirmedTxReceiver = tokio::sync::mpsc::Receiver<( +pub type PreconfirmedTxReceiver = tokio::sync::mpsc::Receiver<( InternalConsensusTransaction, StarknetClientTransactionReceipt, StateDiff, )>; -pub type PreConfirmedTxSender = tokio::sync::mpsc::Sender<( +pub type PreconfirmedTxSender = tokio::sync::mpsc::Sender<( InternalConsensusTransaction, StarknetClientTransactionReceipt, StateDiff, @@ -60,24 +60,24 @@ pub type PreConfirmedTxSender = tokio::sync::mpsc::Sender<( /// Cende client to communicate the updates to the Cende recorder. #[async_trait] #[cfg_attr(test, automock)] -pub trait PreConfirmedBlockWriterTrait: Send { +pub trait PreconfirmedBlockWriterTrait: Send { async fn run(&mut self) -> BlockWriterResult<()>; } -pub struct PreConfirmedBlockWriter { - pre_confirmed_block_writer_input: PreConfirmedBlockWriterInput, +pub struct PreconfirmedBlockWriter { + pre_confirmed_block_writer_input: PreconfirmedBlockWriterInput, candidate_tx_receiver: CandidateTxReceiver, - pre_confirmed_tx_receiver: PreConfirmedTxReceiver, - cende_client: Arc, + pre_confirmed_tx_receiver: PreconfirmedTxReceiver, + cende_client: Arc, write_block_interval_millis: u64, } -impl PreConfirmedBlockWriter { +impl PreconfirmedBlockWriter { pub fn new( - pre_confirmed_block_writer_input: PreConfirmedBlockWriterInput, + pre_confirmed_block_writer_input: PreconfirmedBlockWriterInput, candidate_tx_receiver: CandidateTxReceiver, - pre_confirmed_tx_receiver: PreConfirmedTxReceiver, - cende_client: Arc, + pre_confirmed_tx_receiver: PreconfirmedTxReceiver, + cende_client: Arc, write_block_interval_millis: u64, ) -> Self { Self { @@ -94,13 +94,13 @@ impl PreConfirmedBlockWriter { transactions_map: &IndexMap< TransactionHash, ( - CendePreConfirmedTransaction, + CendePreconfirmedTransaction, Option, Option, ), >, write_iteration: u64, - ) -> CendeWritePreConfirmedBlock { + ) -> CendeWritePreconfirmedBlock { let mut transactions = Vec::with_capacity(transactions_map.len()); let mut transaction_receipts = Vec::with_capacity(transactions_map.len()); let mut transaction_state_diffs = Vec::with_capacity(transactions_map.len()); @@ -111,14 +111,14 @@ impl PreConfirmedBlockWriter { transaction_state_diffs.push(tx_state_diff.clone()); } - let pre_confirmed_block = CendePreConfirmedBlock { + let pre_confirmed_block = CendePreconfirmedBlock { metadata: self.pre_confirmed_block_writer_input.block_metadata.clone(), transactions, transaction_receipts, transaction_state_diffs, }; - CendeWritePreConfirmedBlock { + CendeWritePreconfirmedBlock { block_number: self.pre_confirmed_block_writer_input.block_number, round: self.pre_confirmed_block_writer_input.round, write_iteration, @@ -128,12 +128,12 @@ impl PreConfirmedBlockWriter { } #[async_trait] -impl PreConfirmedBlockWriterTrait for PreConfirmedBlockWriter { +impl PreconfirmedBlockWriterTrait for PreconfirmedBlockWriter { async fn run(&mut self) -> BlockWriterResult<()> { let mut transactions_map: IndexMap< TransactionHash, ( - CendePreConfirmedTransaction, + CendePreconfirmedTransaction, Option, Option, ), @@ -168,7 +168,7 @@ impl PreConfirmedBlockWriterTrait for PreConfirmedBlockWriter { msg = self.pre_confirmed_tx_receiver.recv() => { match msg { Some((tx, tx_receipt, tx_state_diff)) => { - let tx = CendePreConfirmedTransaction::from(tx); + let tx = CendePreconfirmedTransaction::from(tx); let tx_hash = tx.transaction_hash(); transactions_map.insert(tx_hash, (tx, Some(tx_receipt), Some(tx_state_diff))); pending_changes = true; @@ -184,7 +184,7 @@ impl PreConfirmedBlockWriterTrait for PreConfirmedBlockWriter { Some(txs) => { // Skip transactions that were already executed, to avoid an unnecessary write. for tx in txs { - let tx = CendePreConfirmedTransaction::from(tx); + let tx = CendePreconfirmedTransaction::from(tx); match transactions_map.entry(tx.transaction_hash()) { Entry::Vacant(entry) => { entry.insert((tx, None, None)); @@ -219,18 +219,18 @@ impl PreConfirmedBlockWriterTrait for PreConfirmedBlockWriter { } #[derive(Serialize, Deserialize, Clone, PartialEq, Debug, Copy)] -pub struct PreConfirmedBlockWriterConfig { +pub struct PreconfirmedBlockWriterConfig { pub channel_buffer_capacity: usize, pub write_block_interval_millis: u64, } -impl Default for PreConfirmedBlockWriterConfig { +impl Default for PreconfirmedBlockWriterConfig { fn default() -> Self { Self { channel_buffer_capacity: 1000, write_block_interval_millis: 50 } } } -impl SerializeConfig for PreConfirmedBlockWriterConfig { +impl SerializeConfig for PreconfirmedBlockWriterConfig { fn dump(&self) -> BTreeMap { BTreeMap::from_iter([ ser_param( @@ -251,27 +251,27 @@ impl SerializeConfig for PreConfirmedBlockWriterConfig { } #[cfg_attr(test, automock)] -pub trait PreConfirmedBlockWriterFactoryTrait: Send + Sync { +pub trait PreconfirmedBlockWriterFactoryTrait: Send + Sync { fn create( &self, block_number: BlockNumber, proposal_round: Round, block_metadata: CendeBlockMetadata, - ) -> (Box, CandidateTxSender, PreConfirmedTxSender); + ) -> (Box, CandidateTxSender, PreconfirmedTxSender); } -pub struct PreConfirmedBlockWriterFactory { - pub config: PreConfirmedBlockWriterConfig, - pub cende_client: Arc, +pub struct PreconfirmedBlockWriterFactory { + pub config: PreconfirmedBlockWriterConfig, + pub cende_client: Arc, } -impl PreConfirmedBlockWriterFactoryTrait for PreConfirmedBlockWriterFactory { +impl PreconfirmedBlockWriterFactoryTrait for PreconfirmedBlockWriterFactory { fn create( &self, block_number: BlockNumber, round: Round, block_metadata: CendeBlockMetadata, - ) -> (Box, CandidateTxSender, PreConfirmedTxSender) { + ) -> (Box, CandidateTxSender, PreconfirmedTxSender) { // Initialize channels for communication between the pre confirmed block writer and the // block builder. let (pre_confirmed_tx_sender, pre_confirmed_tx_receiver) = @@ -282,9 +282,9 @@ impl PreConfirmedBlockWriterFactoryTrait for PreConfirmedBlockWriterFactory { let cende_client = self.cende_client.clone(); let pre_confirmed_block_writer_input = - PreConfirmedBlockWriterInput { block_number, round, block_metadata }; + PreconfirmedBlockWriterInput { block_number, round, block_metadata }; - let pre_confirmed_block_writer = Box::new(PreConfirmedBlockWriter::new( + let pre_confirmed_block_writer = Box::new(PreconfirmedBlockWriter::new( pre_confirmed_block_writer_input, candidate_tx_receiver, pre_confirmed_tx_receiver, @@ -296,7 +296,7 @@ impl PreConfirmedBlockWriterFactoryTrait for PreConfirmedBlockWriterFactory { } // TODO(noamsp): find a better name for this struct. -pub struct PreConfirmedBlockWriterInput { +pub struct PreconfirmedBlockWriterInput { pub block_number: BlockNumber, pub round: Round, pub block_metadata: CendeBlockMetadata, diff --git a/crates/apollo_batcher/src/pre_confirmed_cende_client.rs b/crates/apollo_batcher/src/pre_confirmed_cende_client.rs index ad58546b8ef..55a660f4f82 100644 --- a/crates/apollo_batcher/src/pre_confirmed_cende_client.rs +++ b/crates/apollo_batcher/src/pre_confirmed_cende_client.rs @@ -11,33 +11,32 @@ use thiserror::Error; use tracing::{debug, error, trace, warn}; use url::Url; -use crate::cende_client_types::CendePreConfirmedBlock; +use crate::cende_client_types::CendePreconfirmedBlock; use crate::metrics::PRECONFIRMED_BLOCK_WRITTEN; -// TODO(noamsp): rename PreConfirmed.. to Preconfirmed.. throughout the codebase. #[derive(Debug, Error)] // TODO(noamsp): add block number/round mismatch and handle it in the client implementation. -pub enum PreConfirmedCendeClientError { +pub enum PreconfirmedCendeClientError { #[error(transparent)] RequestError(#[from] reqwest::Error), #[error("CendeRecorder returned an error: {0}")] CendeRecorderError(String), } -pub type PreConfirmedCendeClientResult = Result; +pub type PreconfirmedCendeClientResult = Result; /// Interface for communicating pre-confirmed block data to the Cende recorder during block /// proposal. #[async_trait] -pub trait PreConfirmedCendeClientTrait: Send + Sync { +pub trait PreconfirmedCendeClientTrait: Send + Sync { /// Notifies the Cende recorder about a pre-confirmed block update. async fn write_pre_confirmed_block( &self, - pre_confirmed_block: CendeWritePreConfirmedBlock, - ) -> PreConfirmedCendeClientResult<()>; + pre_confirmed_block: CendeWritePreconfirmedBlock, + ) -> PreconfirmedCendeClientResult<()>; } -pub struct PreConfirmedCendeClient { +pub struct PreconfirmedCendeClient { write_pre_confirmed_block_url: Url, client: Client, } @@ -46,8 +45,8 @@ pub struct PreConfirmedCendeClient { pub const RECORDER_WRITE_PRE_CONFIRMED_BLOCK_PATH: &str = "/cende_recorder/write_pre_confirmed_block"; -impl PreConfirmedCendeClient { - pub fn new(config: PreConfirmedCendeConfig) -> Self { +impl PreconfirmedCendeClient { + pub fn new(config: PreconfirmedCendeConfig) -> Self { let recorder_url = config.recorder_url; Self { @@ -60,11 +59,11 @@ impl PreConfirmedCendeClient { } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct PreConfirmedCendeConfig { +pub struct PreconfirmedCendeConfig { pub recorder_url: Url, } -impl Default for PreConfirmedCendeConfig { +impl Default for PreconfirmedCendeConfig { fn default() -> Self { Self { recorder_url: "https://recorder_url" @@ -74,7 +73,7 @@ impl Default for PreConfirmedCendeConfig { } } -impl SerializeConfig for PreConfirmedCendeConfig { +impl SerializeConfig for PreconfirmedCendeConfig { fn dump(&self) -> BTreeMap { BTreeMap::from([ser_param( "recorder_url", @@ -86,19 +85,19 @@ impl SerializeConfig for PreConfirmedCendeConfig { } #[derive(Serialize)] -pub struct CendeWritePreConfirmedBlock { +pub struct CendeWritePreconfirmedBlock { pub block_number: BlockNumber, pub round: Round, pub write_iteration: u64, - pub pre_confirmed_block: CendePreConfirmedBlock, + pub pre_confirmed_block: CendePreconfirmedBlock, } #[async_trait] -impl PreConfirmedCendeClientTrait for PreConfirmedCendeClient { +impl PreconfirmedCendeClientTrait for PreconfirmedCendeClient { async fn write_pre_confirmed_block( &self, - pre_confirmed_block: CendeWritePreConfirmedBlock, - ) -> PreConfirmedCendeClientResult<()> { + pre_confirmed_block: CendeWritePreconfirmedBlock, + ) -> PreconfirmedCendeClientResult<()> { let block_number = pre_confirmed_block.block_number; let round = pre_confirmed_block.round; let write_iteration = pre_confirmed_block.write_iteration; @@ -127,7 +126,7 @@ impl PreConfirmedCendeClientTrait for PreConfirmedCendeClient { {round}, write_iteration: {write_iteration}, status: {response_status}", ); warn!("{error_msg}"); - Err(PreConfirmedCendeClientError::CendeRecorderError(error_msg)) + Err(PreconfirmedCendeClientError::CendeRecorderError(error_msg)) } } } diff --git a/crates/apollo_batcher_types/Cargo.toml b/crates/apollo_batcher_types/Cargo.toml index a32e277818b..a7a331a2594 100644 --- a/crates/apollo_batcher_types/Cargo.toml +++ b/crates/apollo_batcher_types/Cargo.toml @@ -19,6 +19,7 @@ async-trait.workspace = true blockifier = { workspace = true, features = ["transaction_serde"] } chrono = { workspace = true, features = ["serde"] } derive_more.workspace = true +indexmap.workspace = true mockall = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"] } starknet_api.workspace = true diff --git a/crates/apollo_batcher_types/src/batcher_types.rs b/crates/apollo_batcher_types/src/batcher_types.rs index fb4ed5d29da..4c57036d60d 100644 --- a/crates/apollo_batcher_types/src/batcher_types.rs +++ b/crates/apollo_batcher_types/src/batcher_types.rs @@ -4,12 +4,14 @@ use blockifier::bouncer::{BouncerWeights, CasmHashComputationData}; use blockifier::state::cached_state::CommitmentStateDiff; use blockifier::transaction::objects::TransactionExecutionInfo; use chrono::prelude::*; +use indexmap::IndexMap; use serde::{Deserialize, Serialize}; use starknet_api::block::{BlockHashAndNumber, BlockInfo, BlockNumber}; use starknet_api::consensus_transaction::InternalConsensusTransaction; use starknet_api::core::StateDiffCommitment; use starknet_api::execution_resources::GasAmount; use starknet_api::state::ThinStateDiff; +use starknet_api::transaction::TransactionHash; use crate::errors::BatcherError; @@ -100,7 +102,7 @@ pub struct SendProposalContentResponse { #[derive(Debug, Serialize, Deserialize, PartialEq)] #[cfg_attr(any(test, feature = "testing"), derive(Default))] pub struct CentralObjects { - pub execution_infos: Vec, + pub execution_infos: IndexMap, pub bouncer_weights: BouncerWeights, pub compressed_state_diff: Option, pub casm_hash_computation_data_sierra_gas: CasmHashComputationData, diff --git a/crates/apollo_central_sync/Cargo.toml b/crates/apollo_central_sync/Cargo.toml index 7f1faf9e7f8..8fdaacc4064 100644 --- a/crates/apollo_central_sync/Cargo.toml +++ b/crates/apollo_central_sync/Cargo.toml @@ -21,6 +21,7 @@ cairo-lang-starknet-classes.workspace = true chrono.workspace = true futures.workspace = true futures-util.workspace = true +validator.workspace = true indexmap = { workspace = true, features = ["serde"] } itertools.workspace = true lru.workspace = true @@ -28,6 +29,7 @@ metrics.workspace = true papyrus_base_layer.workspace = true papyrus_common.workspace = true reqwest = { workspace = true, features = ["blocking", "json"] } +url.workspace = true serde = { workspace = true, features = ["derive"] } starknet-types-core.workspace = true starknet_api.workspace = true diff --git a/crates/apollo_central_sync/src/lib.rs b/crates/apollo_central_sync/src/lib.rs index 40d6a145848..8a359b4289c 100644 --- a/crates/apollo_central_sync/src/lib.rs +++ b/crates/apollo_central_sync/src/lib.rs @@ -863,7 +863,7 @@ fn stream_new_blocks< ).await?; } else{ - debug!("Blocks syncing reached the last known block {:?}, waiting for blockchain to advance.", header_marker.prev()); + trace!("Blocks syncing reached the last known block {:?}, waiting for blockchain to advance.", header_marker.prev()); tokio::time::sleep(block_propagation_sleep_duration).await; }; continue; @@ -894,7 +894,7 @@ fn stream_new_state_diffs( let last_block_number = txn.get_header_marker()?; drop(txn); if state_marker == last_block_number { - debug!("State updates syncing reached the last downloaded block {:?}, waiting for more blocks.", state_marker.prev()); + trace!("State updates syncing reached the last downloaded block {:?}, waiting for more blocks.", state_marker.prev()); tokio::time::sleep(block_propagation_sleep_duration).await; continue; } diff --git a/crates/apollo_central_sync/src/sources/central.rs b/crates/apollo_central_sync/src/sources/central.rs index f601b9b639e..938fddb4c55 100644 --- a/crates/apollo_central_sync/src/sources/central.rs +++ b/crates/apollo_central_sync/src/sources/central.rs @@ -38,14 +38,16 @@ use starknet_api::deprecated_contract_class::ContractClass as DeprecatedContract use starknet_api::state::StateDiff; use starknet_api::StarknetApiError; use tracing::{debug, trace}; +use url::Url; +use validator::Validate; use self::state_update_stream::{StateUpdateStream, StateUpdateStreamConfig}; type CentralResult = Result; -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Validate)] pub struct CentralSourceConfig { pub concurrent_requests: usize, - pub starknet_url: String, + pub starknet_url: Url, #[serde(deserialize_with = "deserialize_optional_map")] pub http_headers: Option>, pub max_state_updates_to_download: usize, @@ -60,7 +62,8 @@ impl Default for CentralSourceConfig { fn default() -> Self { CentralSourceConfig { concurrent_requests: 10, - starknet_url: String::from("https://alpha-mainnet.starknet.io/"), + starknet_url: Url::parse("https://alpha-mainnet.starknet.io/") + .expect("Unable to parse default URL, this should never happen."), http_headers: None, max_state_updates_to_download: 20, max_state_updates_to_store_in_memory: 20, @@ -451,7 +454,7 @@ impl CentralSource { storage_reader: StorageReader, ) -> Result { let apollo_starknet_client = StarknetFeederGatewayClient::new( - &config.starknet_url, + config.starknet_url.as_ref(), config.http_headers, node_version, config.retry_config, diff --git a/crates/apollo_central_sync/src/sources/pending.rs b/crates/apollo_central_sync/src/sources/pending.rs index db95a931278..286d738d4fd 100644 --- a/crates/apollo_central_sync/src/sources/pending.rs +++ b/crates/apollo_central_sync/src/sources/pending.rs @@ -58,7 +58,7 @@ impl PendingSource { node_version: &'static str, ) -> Result { let apollo_starknet_client = StarknetFeederGatewayClient::new( - &config.starknet_url, + config.starknet_url.as_ref(), config.http_headers, node_version, config.retry_config, diff --git a/crates/apollo_config/Cargo.toml b/crates/apollo_config/Cargo.toml index dd8dc4d19cf..2471cd4846c 100644 --- a/crates/apollo_config/Cargo.toml +++ b/crates/apollo_config/Cargo.toml @@ -7,7 +7,7 @@ license-file.workspace = true description = "A library for handling node configuration." [dependencies] -apollo_infra_utils = { workspace = true, optional = true } +apollo_infra_utils.workspace = true clap = { workspace = true, features = ["env", "string"] } colored = { workspace = true, optional = true } const_format.workspace = true @@ -17,6 +17,7 @@ serde_json = { workspace = true, features = ["arbitrary_precision"] } strum_macros.workspace = true thiserror.workspace = true tracing.workspace = true +url = { workspace = true, features = ["serde"] } validator = { workspace = true, features = ["derive"] } [dev-dependencies] diff --git a/crates/apollo_config/src/config_test.rs b/crates/apollo_config/src/config_test.rs index 2ed411d647f..68dd80b498e 100644 --- a/crates/apollo_config/src/config_test.rs +++ b/crates/apollo_config/src/config_test.rs @@ -12,10 +12,16 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use serde_json::json; use tempfile::{NamedTempFile, TempDir}; +use url::Url; use validator::Validate; use crate::command::{get_command_matches, update_config_map_by_command_args}; -use crate::converters::deserialize_milliseconds_to_duration; +use crate::converters::{ + deserialize_milliseconds_to_duration, + deserialize_optional_list_with_url_and_headers, + serialize_optional_list_with_url_and_headers, + UrlAndHeaders, +}; use crate::dumping::{ combine_config_map_and_pointers, generate_struct_pointer, @@ -906,3 +912,59 @@ fn deeply_nested_optionals() { } ); } + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Default)] +struct TestConfigWithNestedJson { + #[serde(deserialize_with = "deserialize_optional_list_with_url_and_headers")] + list_of_maps: Option>, +} + +impl SerializeConfig for TestConfigWithNestedJson { + fn dump(&self) -> BTreeMap { + BTreeMap::from([ser_param( + "list_of_maps", + &serialize_optional_list_with_url_and_headers(&self.list_of_maps), + "A list of nested JSON values.", + ParamPrivacyInput::Public, + )]) + } +} + +#[test] +fn optional_list_nested_btreemaps() { + let config = TestConfigWithNestedJson { + list_of_maps: Some(vec![ + UrlAndHeaders { + url: Url::parse("http://a.com/").unwrap(), + headers: BTreeMap::from([ + ("inner1".to_owned(), "1".to_owned()), + ("inner2".to_owned(), "2".to_owned()), + ]), + }, + UrlAndHeaders { + url: Url::parse("http://b.com/").unwrap(), + headers: BTreeMap::from([ + ("inner3".to_owned(), "3".to_owned()), + ("inner4".to_owned(), "4".to_owned()), + ]), + }, + UrlAndHeaders { + url: Url::parse("http://c.com/").unwrap(), + headers: BTreeMap::from([]), + }, + UrlAndHeaders { + url: Url::parse("http://d.com/").unwrap(), + headers: BTreeMap::from([("inner5".to_owned(), "5".to_owned())]), + }, + ]), + }; + let dumped = config.dump(); + let (config_map, _) = split_values_and_types(dumped); + let loaded_config = load::(&config_map).unwrap(); + assert_eq!(loaded_config.list_of_maps, config.list_of_maps); + let serialized = serde_json::to_string(&loaded_config).unwrap(); + assert_eq!( + serialized, + r#"{"list_of_maps":[{"url":"http://a.com/","headers":{"inner1":"1","inner2":"2"}},{"url":"http://b.com/","headers":{"inner3":"3","inner4":"4"}},{"url":"http://c.com/","headers":{}},{"url":"http://d.com/","headers":{"inner5":"5"}}]}"# + ); +} diff --git a/crates/apollo_config/src/converters.rs b/crates/apollo_config/src/converters.rs index 14f3e8424ff..f2e0ca52da0 100644 --- a/crates/apollo_config/src/converters.rs +++ b/crates/apollo_config/src/converters.rs @@ -24,11 +24,12 @@ //! assert_eq!(loaded_config.dur.as_secs(), 1); //! ``` -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::time::Duration; use serde::de::Error; -use serde::{Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize}; +use url::Url; /// Deserializes milliseconds to duration object. pub fn deserialize_milliseconds_to_duration<'de, D>(de: D) -> Result @@ -88,6 +89,51 @@ where Ok(Some(map)) } +/// A struct containing a URL and its associated headers. +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +pub struct UrlAndHeaders { + /// The base URL. + pub url: Url, + /// A map of header keyword-value pairs. + pub headers: BTreeMap, +} + +/// Serializes a vector containing the UrlAndHeaders struct into a space-separated string. +pub fn serialize_optional_list_with_url_and_headers(list: &Option>) -> String { + match list { + None => "".to_owned(), + Some(list) => list + .iter() + .map(|item| { + serde_json::to_string(item).expect("Failed to serialize UrlAndHeader to JSON") + }) + .collect::>() + .join(" "), + } +} + +/// Deserializes a space-separated string into a vector of UrlAndHeaders structs. +/// Returns an error if any of the substrings cannot be parsed into a valid struct. +pub fn deserialize_optional_list_with_url_and_headers<'de, D>( + de: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = Deserialize::deserialize(de)?; + if raw.trim().is_empty() { + return Ok(None); + } + let number_of_items = raw.split_whitespace().count(); + let mut output = Vec::with_capacity(number_of_items); + for item in raw.split_whitespace() { + let value: UrlAndHeaders = serde_json::from_str(item) + .map_err(|e| D::Error::custom(format!("Invalid JSON '{item}': {e}")))?; + output.push(value); + } + Ok(Some(output)) +} + /// Serializes a vector to string structure. The vector is expected to be a hex string. pub fn serialize_optional_vec_u8(optional_vector: &Option>) -> String { match optional_vector { @@ -132,3 +178,27 @@ where } Ok(Some(vector)) } + +// TODO(Tsabary): generalize these for Vec serde. + +/// Serializes a `&[Url]` into a single space-separated string. +pub fn serialize_slice_url(vector: &[Url]) -> String { + vector.iter().map(Url::as_str).collect::>().join(" ") +} + +/// Deserializes a space-separated string into a `Vec`. +/// Returns an error if any of the substrings cannot be parsed into a valid URL. +pub fn deserialize_vec_url<'de, D>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = ::deserialize(de)?; + + if raw.trim().is_empty() { + return Ok(Vec::new()); + } + + raw.split_whitespace() + .map(|s| Url::parse(s).map_err(|e| D::Error::custom(format!("Invalid URL '{s}': {e}")))) + .collect() +} diff --git a/crates/apollo_config/src/dumping.rs b/crates/apollo_config/src/dumping.rs index 648870ff664..96e2960869a 100644 --- a/crates/apollo_config/src/dumping.rs +++ b/crates/apollo_config/src/dumping.rs @@ -35,9 +35,8 @@ //! ``` use std::collections::{BTreeMap, HashSet}; -use std::fs::File; -use std::io::{BufWriter, Write}; +use apollo_infra_utils::dumping::serialize_to_file; use itertools::chain; use serde::Serialize; use serde_json::{json, Value}; @@ -174,19 +173,7 @@ pub trait SerializeConfig { ) -> Result<(), ConfigError> { let combined_map = combine_config_map_and_pointers(self.dump(), config_pointers, non_pointer_params)?; - - // Create file writer. - let file = File::create(file_path)?; - let mut writer = BufWriter::new(file); - - // Add config as JSON content to writer. - serde_json::to_writer_pretty(&mut writer, &combined_map)?; - - // Add an extra newline after the JSON content. - writer.write_all(b"\n")?; - - // Write to file. - writer.flush()?; + serialize_to_file(combined_map, file_path); Ok(()) } } diff --git a/crates/apollo_config/src/lib.rs b/crates/apollo_config/src/lib.rs index eb8d7f666df..1da6cad87d4 100644 --- a/crates/apollo_config/src/lib.rs +++ b/crates/apollo_config/src/lib.rs @@ -165,6 +165,11 @@ impl SerializedParam { pub fn is_required(&self) -> bool { self.description.starts_with(REQUIRED_PARAM_DESCRIPTION_PREFIX) } + + /// Whether the parameter is private. + pub fn is_private(&self) -> bool { + self.privacy == ParamPrivacy::Private + } } /// A serialized type of a configuration parameter. diff --git a/crates/apollo_consensus/Cargo.toml b/crates/apollo_consensus/Cargo.toml index eb16265ff83..98a652b3040 100644 --- a/crates/apollo_consensus/Cargo.toml +++ b/crates/apollo_consensus/Cargo.toml @@ -15,7 +15,7 @@ apollo_metrics.workspace = true apollo_network.workspace = true apollo_network_types.workspace = true apollo_protobuf.workspace = true -apollo_time.workspace = true +apollo_time = { workspace = true, features = ["tokio"] } async-trait.workspace = true clap = { workspace = true, features = ["derive"] } fs2.workspace = true diff --git a/crates/apollo_consensus/src/single_height_consensus.rs b/crates/apollo_consensus/src/single_height_consensus.rs index 6c98d2c35a5..f78eea610b6 100644 --- a/crates/apollo_consensus/src/single_height_consensus.rs +++ b/crates/apollo_consensus/src/single_height_consensus.rs @@ -276,7 +276,7 @@ impl SingleHeightConsensus { // Only replay the newest prevote. return Ok(ShcReturn::Tasks(Vec::new())); } - debug!("Rebroadcasting {last_vote:?}"); + trace!("Rebroadcasting {last_vote:?}"); context.broadcast(last_vote.clone()).await?; Ok(ShcReturn::Tasks(vec![ShcTask::Prevote( self.timeouts.prevote_timeout, @@ -377,7 +377,7 @@ impl SingleHeightConsensus { context: &mut ContextT, vote: Vote, ) -> Result { - debug!("Received {:?}", vote); + trace!("Received {:?}", vote); if !self.validators.contains(&vote.voter) { debug!("Ignoring vote from non validator: vote={:?}", vote); return Ok(ShcReturn::Tasks(Vec::new())); diff --git a/crates/apollo_consensus/src/types.rs b/crates/apollo_consensus/src/types.rs index f18e8f004ef..afca6e34327 100644 --- a/crates/apollo_consensus/src/types.rs +++ b/crates/apollo_consensus/src/types.rs @@ -161,6 +161,8 @@ pub enum ConsensusError { // For example the state machine and SHC are out of sync. #[error("{0}")] InternalInconsistency(String), + #[error("Block info conversion error: {0}")] + BlockInfoConversion(#[from] starknet_api::StarknetApiError), #[error("{0}")] Other(String), } diff --git a/crates/apollo_consensus_orchestrator/Cargo.toml b/crates/apollo_consensus_orchestrator/Cargo.toml index 3e8dd573e4d..df6be2aedf6 100644 --- a/crates/apollo_consensus_orchestrator/Cargo.toml +++ b/crates/apollo_consensus_orchestrator/Cargo.toml @@ -44,9 +44,11 @@ url = { workspace = true, features = ["serde"] } validator.workspace = true [dev-dependencies] +assert_matches.workspace = true apollo_batcher.workspace = true apollo_batcher_types = { workspace = true, features = ["testing"] } apollo_class_manager_types = { workspace = true, features = ["testing"] } +apollo_infra = { workspace = true, features = ["testing"] } apollo_infra_utils = { workspace = true, features = ["testing"] } apollo_l1_gas_price_types = { workspace = true, features = ["testing"] } apollo_metrics = { workspace = true, features = ["testing"] } diff --git a/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json b/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json index 7072b7f1ea7..e0d374fa192 100644 --- a/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json +++ b/crates/apollo_consensus_orchestrator/resources/orchestrator_versioned_constants_0_14_0.json @@ -2,6 +2,6 @@ "gas_price_max_change_denominator": 48, "gas_target": 2000000000, "max_block_size": 4000000000, - "min_gas_price": "0x5f5e100", + "min_gas_price": "0xb2d05e00", "l1_gas_price_margin_percent": 10 } diff --git a/crates/apollo_consensus_orchestrator/src/build_proposal.rs b/crates/apollo_consensus_orchestrator/src/build_proposal.rs index 833699cc254..f5609e80e40 100644 --- a/crates/apollo_consensus_orchestrator/src/build_proposal.rs +++ b/crates/apollo_consensus_orchestrator/src/build_proposal.rs @@ -1,3 +1,7 @@ +#[cfg(test)] +#[path = "build_proposal_test.rs"] +mod build_proposal_test; + use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -8,7 +12,10 @@ use apollo_batcher_types::batcher_types::{ ProposeBlockInput, }; use apollo_batcher_types::communication::{BatcherClient, BatcherClientError}; -use apollo_class_manager_types::transaction_converter::TransactionConverterTrait; +use apollo_class_manager_types::transaction_converter::{ + TransactionConverterError, + TransactionConverterTrait, +}; use apollo_consensus::types::{ProposalCommitment, Round}; use apollo_l1_gas_price_types::errors::{EthToStrkOracleClientError, L1GasPriceClientError}; use apollo_protobuf::consensus::{ @@ -19,13 +26,13 @@ use apollo_protobuf::consensus::{ TransactionBatch, }; use apollo_state_sync_types::communication::StateSyncClientError; -use futures::channel::oneshot; -use futures::FutureExt; +use apollo_time::time::{Clock, DateTime}; use starknet_api::block::{BlockHash, GasPrice}; use starknet_api::consensus_transaction::InternalConsensusTransaction; use starknet_api::core::ContractAddress; use starknet_api::data_availability::L1DataAvailabilityMode; use starknet_api::transaction::TransactionHash; +use starknet_api::StarknetApiError; use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; use tracing::{debug, error, info, trace, warn}; @@ -46,7 +53,6 @@ pub(crate) struct ProposalBuildArguments { pub proposal_init: ProposalInit, pub l1_da_mode: L1DataAvailabilityMode, pub stream_sender: StreamSender, - pub fin_sender: oneshot::Sender, pub gas_price_params: GasPriceParams, pub valid_proposals: Arc>, pub proposal_id: ProposalId, @@ -63,27 +69,34 @@ type BuildProposalResult = Result; #[derive(Debug, thiserror::Error)] pub(crate) enum BuildProposalError { #[error("Batcher error: {0}")] - Batcher(#[from] BatcherClientError), + Batcher(String, BatcherClientError), #[error("State sync client error: {0}")] StateSyncClientError(#[from] StateSyncClientError), #[error("State sync is not ready: {0}")] StateSyncNotReady(String), + // Consensus may exit early (e.g. sync). + #[error("Failed to send commitment to consensus: {0}")] + SendError(ProposalCommitment), #[error("EthToStrkOracle error: {0}")] EthToStrkOracle(#[from] EthToStrkOracleClientError), #[error("L1GasPriceProvider error: {0}")] L1GasPriceProvider(#[from] L1GasPriceClientError), + #[error("Proposal interrupted.")] + Interrupted, + #[error("Writing blob to Aerospike failed. {0}")] + CendeWriteError(String), + #[error("Failed to convert transactions: {0}")] + TransactionConverterError(#[from] TransactionConverterError), + #[error("Block info conversion error: {0}")] + BlockInfoConversion(#[from] StarknetApiError), } // Handles building a new proposal without blocking consensus: -pub(crate) async fn build_proposal(mut args: ProposalBuildArguments) { - let block_info = initiate_build(&args).await; - let block_info = match block_info { - Ok(info) => info, - Err(e) => { - error!("Failed to initiate proposal build. {e:?}"); - return; - } - }; +pub(crate) async fn build_proposal( + mut args: ProposalBuildArguments, +) -> BuildProposalResult { + let batcher_deadline = args.deps.clock.now() + args.batcher_timeout; + let block_info = initiate_build(&args).await?; args.stream_sender .send(ProposalPart::Init(args.proposal_init)) .await @@ -93,18 +106,17 @@ pub(crate) async fn build_proposal(mut args: ProposalBuildArguments) { .await .expect("Failed to send block info"); - let Some((proposal_commitment, content)) = get_proposal_content( + let (proposal_commitment, content) = get_proposal_content( args.proposal_id, args.deps.batcher.as_ref(), args.stream_sender, args.cende_write_success, args.deps.transaction_converter, args.cancel_token, + args.deps.clock, + batcher_deadline, ) - .await - else { - return; - }; + .await?; // Update valid_proposals before sending fin to avoid a race condition // with `repropose` being called before `valid_proposals` is updated. @@ -116,10 +128,7 @@ pub(crate) async fn build_proposal(mut args: ProposalBuildArguments) { content, &args.proposal_id, ); - if args.fin_sender.send(proposal_commitment).is_err() { - // Consensus may exit early (e.g. sync). - warn!("Failed to send proposal content id"); - } + Ok(proposal_commitment) } async fn initiate_build(args: &ProposalBuildArguments) -> BuildProposalResult { @@ -152,41 +161,51 @@ async fn initiate_build(args: &ProposalBuildArguments) -> BuildProposalResult, transaction_converter: Arc, cancel_token: CancellationToken, -) -> Option<(ProposalCommitment, Vec>)> { + clock: Arc, + batcher_deadline: DateTime, +) -> BuildProposalResult<(ProposalCommitment, Vec>)> { let mut content = Vec::new(); loop { if cancel_token.is_cancelled() { - warn!("Proposal interrupted during building."); - return None; + return Err(BuildProposalError::Interrupted); } // We currently want one part of the node failing to cause all components to fail. If this // changes, we can simply return None and consider this as a failed proposal which consensus // should support. - let response = batcher.get_proposal_content(GetProposalContentInput { proposal_id }).await; - let response = match response { - Ok(resp) => resp, - Err(e) => { - error!("Failed to get proposal content. {e:?}"); - return None; - } - }; + let response = batcher + .get_proposal_content(GetProposalContentInput { proposal_id }) + .await + .map_err(|err| { + BuildProposalError::Batcher( + format!("Failed to get proposal content for proposal_id {proposal_id}."), + err, + ) + })?; match response.content { GetProposalContent::Txs(txs) => { @@ -202,14 +221,7 @@ pub(crate) async fn get_proposal_content( })) .await .into_iter() - .collect::, _>>(); - let transactions = match transactions { - Ok(txs) => txs, - Err(e) => { - error!("Failed to convert transactions. {e:?}"); - return None; - } - }; + .collect::, _>>()?; trace!(?transactions, "Sending transaction batch with {} txs.", transactions.len()); stream_sender @@ -231,22 +243,28 @@ pub(crate) async fn get_proposal_content( } // If the blob writing operation to Aerospike doesn't return a success status, we - // can't finish the proposal. - match cende_write_success.now_or_never() { - Some(Ok(true)) => { - info!("Writing blob to Aerospike completed successfully."); + // can't finish the proposal. Must wait for it at least until batcher_timeout is + // reached. + let remaining = (batcher_deadline - clock.now()) + .to_std() + .unwrap_or_default() + .max(Duration::from_millis(1)); // Ensure we wait at least 1 ms to avoid immediate timeout. + match tokio::time::timeout(remaining, cende_write_success).await { + Err(_) => { + return Err(BuildProposalError::CendeWriteError( + "Writing blob to Aerospike didn't return in time.".to_string(), + )); } - Some(Ok(false)) => { - warn!("Writing blob to Aerospike failed."); - return None; + Ok(Ok(true)) => { + info!("Writing blob to Aerospike completed successfully."); } - Some(Err(e)) => { - warn!("Writing blob to Aerospike failed. Error: {e:?}"); - return None; + Ok(Ok(false)) => { + return Err(BuildProposalError::CendeWriteError( + "Writing blob to Aerospike failed.".to_string(), + )); } - None => { - warn!("Writing blob to Aerospike didn't return in time."); - return None; + Ok(Err(e)) => { + return Err(BuildProposalError::CendeWriteError(e.to_string())); } } @@ -263,7 +281,7 @@ pub(crate) async fn get_proposal_content( .send(ProposalPart::Fin(fin)) .await .expect("Failed to broadcast proposal fin"); - return Some((proposal_commitment, content)); + return Ok((proposal_commitment, content)); } } } diff --git a/crates/apollo_consensus_orchestrator/src/build_proposal_test.rs b/crates/apollo_consensus_orchestrator/src/build_proposal_test.rs new file mode 100644 index 00000000000..568efe067ac --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/build_proposal_test.rs @@ -0,0 +1,260 @@ +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + GetProposalContent, + GetProposalContentResponse, + ProposalCommitment, + ProposalId, +}; +use apollo_batcher_types::communication::BatcherClientError; +use apollo_class_manager_types::transaction_converter::{ + MockTransactionConverterTrait, + TransactionConverterError, +}; +use apollo_consensus::types::Round; +use apollo_infra::component_client::ClientError; +use apollo_protobuf::consensus::{ConsensusBlockInfo, ProposalInit, ProposalPart}; +use apollo_state_sync_types::communication::StateSyncClientError; +use assert_matches::assert_matches; +use blockifier::abi::constants::STORED_BLOCK_HASH_BUFFER; +use futures::channel::mpsc; +use num_rational::Ratio; +use starknet_api::block::{BlockHash, BlockNumber, GasPrice}; +use starknet_api::core::{ClassHash, ContractAddress}; +use starknet_api::data_availability::L1DataAvailabilityMode; +use tokio_util::sync::CancellationToken; +use tokio_util::task::AbortOnDropHandle; + +use crate::build_proposal::{build_proposal, BuildProposalError, ProposalBuildArguments}; +use crate::config::ContextConfig; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::sequencer_consensus_context::BuiltProposals; +use crate::test_utils::{ + create_test_and_network_deps, + TestDeps, + CHANNEL_SIZE, + INTERNAL_TX_BATCH, + STATE_DIFF_COMMITMENT, + TIMEOUT, +}; +use crate::utils::{GasPriceParams, StreamSender}; + +struct TestProposalBuildArguments { + pub deps: TestDeps, + pub batcher_timeout: Duration, + pub proposal_init: ProposalInit, + pub l1_da_mode: L1DataAvailabilityMode, + pub stream_sender: StreamSender, + pub gas_price_params: GasPriceParams, + pub valid_proposals: Arc>, + pub proposal_id: ProposalId, + pub cende_write_success: AbortOnDropHandle, + pub l2_gas_price: GasPrice, + pub builder_address: ContractAddress, + pub cancel_token: CancellationToken, + pub previous_block_info: Option, + pub proposal_round: Round, +} + +impl From for ProposalBuildArguments { + fn from(args: TestProposalBuildArguments) -> Self { + ProposalBuildArguments { + deps: args.deps.into(), + batcher_timeout: args.batcher_timeout, + proposal_init: args.proposal_init, + l1_da_mode: args.l1_da_mode, + stream_sender: args.stream_sender, + gas_price_params: args.gas_price_params, + valid_proposals: args.valid_proposals, + proposal_id: args.proposal_id, + cende_write_success: args.cende_write_success, + l2_gas_price: args.l2_gas_price, + builder_address: args.builder_address, + cancel_token: args.cancel_token, + previous_block_info: args.previous_block_info, + proposal_round: args.proposal_round, + } + } +} + +fn create_proposal_build_arguments() -> (TestProposalBuildArguments, mpsc::Receiver) { + let (mut deps, _) = create_test_and_network_deps(); + deps.setup_default_expectations(); + let batcher_timeout = TIMEOUT; + let proposal_init = ProposalInit::default(); + let l1_da_mode = L1DataAvailabilityMode::Calldata; + let (proposal_sender, proposal_receiver) = mpsc::channel::(CHANNEL_SIZE); + let stream_sender = StreamSender { proposal_sender }; + let context_config = ContextConfig::default(); + + let gas_price_params = GasPriceParams { + min_l1_gas_price_wei: GasPrice(context_config.min_l1_gas_price_wei), + max_l1_gas_price_wei: GasPrice(context_config.max_l1_gas_price_wei), + min_l1_data_gas_price_wei: GasPrice(context_config.min_l1_data_gas_price_wei), + max_l1_data_gas_price_wei: GasPrice(context_config.max_l1_data_gas_price_wei), + l1_data_gas_price_multiplier: Ratio::new( + context_config.l1_data_gas_price_multiplier_ppt, + 1000, + ), + l1_gas_tip_wei: GasPrice(context_config.l1_gas_tip_wei), + }; + let valid_proposals = Arc::new(Mutex::new(BuiltProposals::new())); + let proposal_id = ProposalId(1); + let cende_write_success = AbortOnDropHandle::new(tokio::spawn(async { true })); + let l2_gas_price = VersionedConstants::latest_constants().min_gas_price; + let builder_address = ContractAddress::default(); + let cancel_token = CancellationToken::new(); + let previous_block_info = None; + let proposal_round = 0; + + ( + TestProposalBuildArguments { + deps, + batcher_timeout, + proposal_init, + l1_da_mode, + stream_sender, + gas_price_params, + valid_proposals, + proposal_id, + cende_write_success, + l2_gas_price, + builder_address, + cancel_token, + previous_block_info, + proposal_round, + }, + proposal_receiver, + ) +} + +#[tokio::test] +async fn build_proposal_succeed() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().returning(|_| { + Ok(GetProposalContentResponse { + content: GetProposalContent::Finished { + id: ProposalCommitment { state_diff_commitment: STATE_DIFF_COMMITMENT }, + final_n_executed_txs: 0, + }, + }) + }); + // Make sure cende returns on time. + tokio::time::sleep(Duration::from_millis(100)).await; + + let res = build_proposal(proposal_args.into()).await.unwrap(); + assert_eq!(res, BlockHash::default()); +} + +#[tokio::test] +async fn state_sync_client_error() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Make sure state_sync_client being called, by setting height to >= STORED_BLOCK_HASH_BUFFER. + proposal_args.proposal_init.height = BlockNumber(STORED_BLOCK_HASH_BUFFER); + // Setup state sync client to return an error. + proposal_args.deps.state_sync_client.expect_get_block().returning(|_| { + Err(StateSyncClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::StateSyncClientError(_)))); +} + +#[tokio::test] +async fn state_sync_not_ready_error() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Make sure state_sync_client being called, by setting height to >= STORED_BLOCK_HASH_BUFFER. + proposal_args.proposal_init.height = BlockNumber(STORED_BLOCK_HASH_BUFFER); + // Setup state sync client to return None, indicating that the state sync is not ready. + proposal_args.deps.state_sync_client.expect_get_block().returning(|_| Ok(None)); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::StateSyncNotReady(_)))); +} + +#[tokio::test] +async fn propose_block_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return an error on propose_block. + proposal_args.deps.batcher.expect_propose_block().returning(|_| { + Err(BatcherClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + + let res = build_proposal(proposal_args.into()).await; + assert_matches!( + res, + Err(BuildProposalError::Batcher(msg, _)) if msg.contains("Failed to initiate build proposal") + ); +} + +#[tokio::test] +async fn get_proposal_content_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return an error on get_proposal_content. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().returning(|_| { + Err(BatcherClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + + let res = build_proposal(proposal_args.into()).await; + assert_matches!( + res, + Err(BuildProposalError::Batcher(msg, _)) if msg.contains("Failed to get proposal content") + ); +} + +#[tokio::test] +async fn interrupt_proposal() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return Ok on propose_block. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + // Interrupt the proposal. + proposal_args.cancel_token.cancel(); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::Interrupted))); +} + +#[tokio::test] +async fn convert_internal_consensus_tx_to_consensus_tx_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return Ok on propose_block and TX from get_proposal_content. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().times(1).returning(|_| { + Ok(GetProposalContentResponse { + content: GetProposalContent::Txs(INTERNAL_TX_BATCH.clone()), + }) + }); + // Overwrite the transaction converter to return an error, since by default it returns Ok. + let mut transaction_converter = MockTransactionConverterTrait::new(); + transaction_converter.expect_convert_internal_consensus_tx_to_consensus_tx().returning(|_| { + Err(TransactionConverterError::ClassNotFound { class_hash: ClassHash::default() }) + }); + proposal_args.deps.transaction_converter = transaction_converter; + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::TransactionConverterError(_)))); +} + +#[tokio::test] +async fn cende_fail() { + let (mut proposal_args, _proposal_receiver) = create_proposal_build_arguments(); + // Setup batcher to return Ok on propose_block and Finished from get_proposal_content. + proposal_args.deps.batcher.expect_propose_block().returning(|_| Ok(())); + proposal_args.deps.batcher.expect_get_proposal_content().times(1).returning(|_| { + Ok(GetProposalContentResponse { + content: GetProposalContent::Finished { + id: ProposalCommitment { state_diff_commitment: STATE_DIFF_COMMITMENT }, + final_n_executed_txs: 0, + }, + }) + }); + // Setup cende to return false, indicating a failure. + proposal_args.cende_write_success = AbortOnDropHandle::new(tokio::spawn(async { false })); + + let res = build_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(BuildProposalError::CendeWriteError(_)))); +} diff --git a/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs b/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs index e946d5e05af..e55feb22831 100644 --- a/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs +++ b/crates/apollo_consensus_orchestrator/src/cende/central_objects_test.rs @@ -5,8 +5,8 @@ use std::vec; use apollo_batcher::cende_client_types::{ Builtin, CendeBlockMetadata, - CendePreConfirmedBlock, - CendePreConfirmedTransaction, + CendePreconfirmedBlock, + CendePreconfirmedTransaction, ExecutionResources as CendeClientExecutionResources, IntermediateInvokeTransaction, StarknetClientTransactionReceipt, @@ -674,7 +674,7 @@ fn event_from_serialized_fields(from_address: &str, keys: Vec<&str>, data: Vec<& } } -fn starknet_preconfiremd_block() -> CendePreConfirmedBlock { +fn starknet_preconfiremd_block() -> CendePreconfirmedBlock { let metadata = CendeBlockMetadata { status: "PRE_CONFIRMED", starknet_version: StarknetVersion::V0_14_0, @@ -698,7 +698,7 @@ fn starknet_preconfiremd_block() -> CendePreConfirmedBlock { }; let transactions = vec![ - CendePreConfirmedTransaction::Invoke(IntermediateInvokeTransaction { + CendePreconfirmedTransaction::Invoke(IntermediateInvokeTransaction { resource_bounds: Some( AllResourceBounds { l1_gas: ResourceBounds { @@ -755,7 +755,7 @@ fn starknet_preconfiremd_block() -> CendePreConfirmedBlock { entry_point_selector: None, max_fee: None, }), - CendePreConfirmedTransaction::Invoke(IntermediateInvokeTransaction { + CendePreconfirmedTransaction::Invoke(IntermediateInvokeTransaction { resource_bounds: Some( AllResourceBounds { l1_gas: ResourceBounds { @@ -924,7 +924,7 @@ fn starknet_preconfiremd_block() -> CendePreConfirmedBlock { ..Default::default() })]; - CendePreConfirmedBlock { metadata, transactions, transaction_receipts, transaction_state_diffs } + CendePreconfirmedBlock { metadata, transactions, transaction_receipts, transaction_state_diffs } } #[rstest] diff --git a/crates/apollo_consensus_orchestrator/src/cende/mod.rs b/crates/apollo_consensus_orchestrator/src/cende/mod.rs index 9288e7c1c80..a97c911f506 100644 --- a/crates/apollo_consensus_orchestrator/src/cende/mod.rs +++ b/crates/apollo_consensus_orchestrator/src/cende/mod.rs @@ -156,7 +156,7 @@ impl SerializeConfig for CendeConfig { "skip_write_height", "A height that the consensus can skip writing to Aerospike. Needed for booting up (no \ previous height blob to write) or to handle extreme cases (all the nodes failed).", - ParamPrivacyInput::Private, + ParamPrivacyInput::Public, )); config diff --git a/crates/apollo_consensus_orchestrator/src/fee_market/test.rs b/crates/apollo_consensus_orchestrator/src/fee_market/test.rs index c2f197ad8ee..ac9a703c224 100644 --- a/crates/apollo_consensus_orchestrator/src/fee_market/test.rs +++ b/crates/apollo_consensus_orchestrator/src/fee_market/test.rs @@ -12,16 +12,19 @@ static VERSIONED_CONSTANTS: LazyLock<&VersionedConstants> = #[test] fn test_price_calculation_snapshot() { // Setup: using realistic arbitrary values. - let init_price = GasPrice(1_000_000_000); + let init_price = GasPrice(30_000_000_000); let max_block_size = VERSIONED_CONSTANTS.max_block_size; + let change_denominator = VERSIONED_CONSTANTS.gas_price_max_change_denominator; let gas_target = max_block_size / 2; let high_congestion_gas_used = GasAmount(max_block_size.0 * 3 / 4); let low_congestion_gas_used = max_block_size / 4; let stable_congestion_gas_used = gas_target; + // (30000000000 * 1 / 4 * max_block_size) / (0.5 * max_block_size * change_denominator) + let price_change = init_price.0 / (change_denominator * 2); // Fixed expected output values. - let increased_price = GasPrice(init_price.0 + 10416666); // 1000000000 + (1000000000 * 1 / 4 * max_block_size) / (0.5 * max_block_size * 48); - let decreased_price = GasPrice(init_price.0 - 10416666); // 1000000000 - (1000000000 * 1 / 4 * max_block_size) / (0.5 * max_block_size * 48); + let increased_price = GasPrice(init_price.0 + price_change); + let decreased_price = GasPrice(init_price.0 - price_change); // Assert. assert_eq!( diff --git a/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs b/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs index 1d83e337d1f..feecf515d59 100644 --- a/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs +++ b/crates/apollo_consensus_orchestrator/src/sequencer_consensus_context.rs @@ -62,14 +62,19 @@ use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; use tracing::{error, error_span, info, instrument, trace, warn, Instrument}; -use crate::build_proposal::{build_proposal, ProposalBuildArguments}; +use crate::build_proposal::{build_proposal, BuildProposalError, ProposalBuildArguments}; use crate::cende::{BlobParameters, CendeContext}; use crate::config::ContextConfig; use crate::fee_market::{calculate_next_base_gas_price, FeeMarketInfo}; use crate::metrics::{register_metrics, CONSENSUS_L2_GAS_PRICE}; use crate::orchestrator_versioned_constants::VersionedConstants; use crate::utils::{convert_to_sn_api_block_info, GasPriceParams, StreamSender}; -use crate::validate_proposal::{validate_proposal, BlockInfoValidation, ProposalValidateArguments}; +use crate::validate_proposal::{ + validate_proposal, + BlockInfoValidation, + ProposalValidateArguments, + ValidateProposalError, +}; type ValidationParams = (BlockNumber, ValidatorId, Duration, mpsc::Receiver); @@ -93,7 +98,7 @@ pub(crate) struct BuiltProposals { } impl BuiltProposals { - fn new() -> Self { + pub fn new() -> Self { Self { data: HeightToIdToContent::default() } } @@ -260,7 +265,6 @@ impl ConsensusContext for SequencerConsensusContext { proposal_init, l1_da_mode: self.l1_da_mode, stream_sender, - fin_sender, gas_price_params, valid_proposals: Arc::clone(&self.valid_proposals), proposal_id, @@ -273,7 +277,20 @@ impl ConsensusContext for SequencerConsensusContext { }; let handle = tokio::spawn( async move { - build_proposal(args).await; + let res = build_proposal(args).await.map(|proposal_commitment| { + fin_sender + .send(proposal_commitment) + .map_err(|_| BuildProposalError::SendError(proposal_commitment))?; + Ok::<_, BuildProposalError>(proposal_commitment) + }); + match res { + Ok(proposal_commitment) => { + info!(?proposal_id, ?proposal_commitment, "Proposal succeeded."); + } + Err(e) => { + warn!("Proposal failed. Error: {e:?}"); + } + } } .instrument( error_span!("consensus_build_proposal", %proposal_id, round=proposal_init.round), @@ -430,7 +447,7 @@ impl ConsensusContext for SequencerConsensusContext { proposals.remove_proposals_below_or_at_height(&height); } - let transactions = transactions.concat(); + // TODO(dvir): return from the batcher's 'decision_reached' function the relevant data to // build a blob. let DecisionReachedResponse { state_diff, l2_gas_used, central_objects } = self @@ -440,6 +457,15 @@ impl ConsensusContext for SequencerConsensusContext { .await .expect("Failed to get state diff."); + // Remove transactions that were not accepted by the Batcher, so `transactions` and + // `central_objects.execution_infos` correspond to the same list of (only accepted) + // transactions. + let transactions: Vec = transactions + .concat() + .into_iter() + .filter(|tx| central_objects.execution_infos.contains_key(&tx.tx_hash())) + .collect(); + let gas_target = GasAmount(VersionedConstants::latest_constants().max_block_size.0 / 2); self.l2_gas_price = calculate_next_base_gas_price(self.l2_gas_price, l2_gas_used, gas_target); @@ -447,7 +473,8 @@ impl ConsensusContext for SequencerConsensusContext { let gas_price_u64 = u64::try_from(self.l2_gas_price.0).unwrap_or(u64::MAX); CONSENSUS_L2_GAS_PRICE.set_lossy(gas_price_u64); - let cende_block_info = convert_to_sn_api_block_info(&block_info); + // The conversion should never fail, if we already managed to get a decision. + let cende_block_info = convert_to_sn_api_block_info(&block_info)?; let l1_gas_price = GasPricePerToken { price_in_fri: cende_block_info.gas_prices.strk_gas_prices.l1_gas_price.get(), price_in_wei: cende_block_info.gas_prices.eth_gas_prices.l1_gas_price.get(), @@ -502,6 +529,11 @@ impl ConsensusContext for SequencerConsensusContext { // `add_new_block` returns immediately, it doesn't wait for sync to fully process the block. state_sync_client.add_new_block(sync_block).await.expect("Failed to add new block."); + // Strip the transaction hashes from `execution_infos`, since we don't use it in the blob + // version of `execution_infos`. + let stripped_execution_infos = + central_objects.execution_infos.into_iter().map(|(_, info)| info).collect(); + // TODO(dvir): pass here real `BlobParameters` info. // TODO(dvir): when passing here the correct `BlobParameters`, also test that // `prepare_blob_for_next_height` is called with the correct parameters. @@ -513,7 +545,7 @@ impl ConsensusContext for SequencerConsensusContext { state_diff, compressed_state_diff: central_objects.compressed_state_diff, transactions, - execution_infos: central_objects.execution_infos, + execution_infos: stripped_execution_infos, bouncer_weights: central_objects.bouncer_weights, casm_hash_computation_data_sierra_gas: central_objects .casm_hash_computation_data_sierra_gas, @@ -664,13 +696,12 @@ impl SequencerConsensusContext { content_receiver: mpsc::Receiver, fin_sender: oneshot::Sender, ) { - let cancel_token = CancellationToken::new(); - let cancel_token_clone = cancel_token.clone(); - let l1_gas_tip_wei = GasPrice(self.config.l1_gas_tip_wei); - let valid_proposals = Arc::clone(&self.valid_proposals); let proposal_id = ProposalId(self.proposal_id); self.proposal_id += 1; - let deps = self.deps.clone(); + info!(?timeout, %proposal_id, %proposer, round=self.current_round, "Validating proposal."); + + let cancel_token = CancellationToken::new(); + let cancel_token_clone = cancel_token.clone(); let gas_price_params = GasPriceParams { min_l1_gas_price_wei: GasPrice(self.config.min_l1_gas_price_wei), max_l1_gas_price_wei: GasPrice(self.config.max_l1_gas_price_wei), @@ -680,25 +711,30 @@ impl SequencerConsensusContext { self.config.l1_data_gas_price_multiplier_ppt, 1000, ), - l1_gas_tip_wei, + l1_gas_tip_wei: GasPrice(self.config.l1_gas_tip_wei), + }; + let args = ProposalValidateArguments { + deps: self.deps.clone(), + block_info_validation, + proposal_id, + timeout, + batcher_timeout_margin, + valid_proposals: Arc::clone(&self.valid_proposals), + content_receiver, + gas_price_params, + cancel_token: cancel_token_clone, }; - info!(?timeout, %proposal_id, %proposer, round=self.current_round, "Validating proposal."); let handle = tokio::spawn( async move { - validate_proposal(ProposalValidateArguments { - deps, - block_info_validation, - proposal_id, - timeout, - batcher_timeout_margin, - valid_proposals, - content_receiver, - fin_sender, - gas_price_params, - cancel_token: cancel_token_clone, - }) - .await + match validate_and_send(args, fin_sender).await { + Ok(proposal_commitment) => { + info!(?proposal_id, ?proposal_commitment, "Proposal succeeded."); + } + Err(e) => { + warn!("Proposal failed. Error: {e:?}"); + } + } } .instrument( error_span!("consensus_validate_proposal", %proposal_id, round=self.current_round), @@ -714,3 +750,14 @@ impl SequencerConsensusContext { } } } + +async fn validate_and_send( + args: ProposalValidateArguments, + fin_sender: oneshot::Sender, +) -> Result { + let proposal_commitment = validate_proposal(args).await?; + fin_sender + .send(proposal_commitment) + .map_err(|_| ValidateProposalError::SendError(proposal_commitment))?; + Ok(proposal_commitment) +} diff --git a/crates/apollo_consensus_orchestrator/src/test_utils.rs b/crates/apollo_consensus_orchestrator/src/test_utils.rs index e34f9cb4970..16ce088ad61 100644 --- a/crates/apollo_consensus_orchestrator/src/test_utils.rs +++ b/crates/apollo_consensus_orchestrator/src/test_utils.rs @@ -178,7 +178,6 @@ impl TestDeps { ); self.batcher .expect_start_height() - .times(1) .withf(move |input| input.height == block_number) .return_const(Ok(())); let proposal_id_clone = Arc::clone(&proposal_id); diff --git a/crates/apollo_consensus_orchestrator/src/utils.rs b/crates/apollo_consensus_orchestrator/src/utils.rs index b88c22da75f..7c1d687bfc4 100644 --- a/crates/apollo_consensus_orchestrator/src/utils.rs +++ b/crates/apollo_consensus_orchestrator/src/utils.rs @@ -24,6 +24,7 @@ use starknet_api::block::{ }; use starknet_api::consensus_transaction::InternalConsensusTransaction; use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::StarknetApiError; use tracing::{info, warn}; use crate::build_proposal::BuildProposalError; @@ -86,7 +87,6 @@ pub(crate) async fn get_oracle_rate_and_prices( eth_to_strk_oracle_client.eth_to_fri_rate(timestamp), l1_gas_price_provider_client.get_price_info(BlockTimestamp(timestamp)) ); - if price_info.is_err() { warn!("Failed to get l1 gas price from provider: {:?}", price_info); CONSENSUS_L1_GAS_PRICE_PROVIDER_ERROR.increment(1); @@ -150,22 +150,19 @@ fn apply_fee_transformations(price_info: &mut PriceInfo, gas_price_params: &GasP pub(crate) fn convert_to_sn_api_block_info( block_info: &ConsensusBlockInfo, -) -> starknet_api::block::BlockInfo { +) -> Result { let l1_gas_price_fri = - NonzeroGasPrice::new(block_info.l1_gas_price_wei.wei_to_fri(block_info.eth_to_fri_rate)) - .unwrap(); + NonzeroGasPrice::new(block_info.l1_gas_price_wei.wei_to_fri(block_info.eth_to_fri_rate)?)?; let l1_data_gas_price_fri = NonzeroGasPrice::new( - block_info.l1_data_gas_price_wei.wei_to_fri(block_info.eth_to_fri_rate), - ) - .unwrap(); - let l2_gas_price_fri = NonzeroGasPrice::new(block_info.l2_gas_price_fri).unwrap(); + block_info.l1_data_gas_price_wei.wei_to_fri(block_info.eth_to_fri_rate)?, + )?; + let l2_gas_price_fri = NonzeroGasPrice::new(block_info.l2_gas_price_fri)?; let l2_gas_price_wei = - NonzeroGasPrice::new(block_info.l2_gas_price_fri.fri_to_wei(block_info.eth_to_fri_rate)) - .unwrap(); - let l1_gas_price_wei = NonzeroGasPrice::new(block_info.l1_gas_price_wei).unwrap(); - let l1_data_gas_price_wei = NonzeroGasPrice::new(block_info.l1_data_gas_price_wei).unwrap(); + NonzeroGasPrice::new(block_info.l2_gas_price_fri.fri_to_wei(block_info.eth_to_fri_rate)?)?; + let l1_gas_price_wei = NonzeroGasPrice::new(block_info.l1_gas_price_wei)?; + let l1_data_gas_price_wei = NonzeroGasPrice::new(block_info.l1_data_gas_price_wei)?; - starknet_api::block::BlockInfo { + Ok(starknet_api::block::BlockInfo { block_number: block_info.height, block_timestamp: BlockTimestamp(block_info.timestamp), sequencer_address: block_info.builder, @@ -182,7 +179,7 @@ pub(crate) fn convert_to_sn_api_block_info( }, }, use_kzg_da: block_info.l1_da_mode == L1DataAvailabilityMode::Blob, - } + }) } pub(crate) async fn retrospective_block_hash( diff --git a/crates/apollo_consensus_orchestrator/src/validate_proposal.rs b/crates/apollo_consensus_orchestrator/src/validate_proposal.rs index 2d03c7abfe6..994cfdc4262 100644 --- a/crates/apollo_consensus_orchestrator/src/validate_proposal.rs +++ b/crates/apollo_consensus_orchestrator/src/validate_proposal.rs @@ -1,3 +1,7 @@ +#[cfg(test)] +#[path = "validate_proposal_test.rs"] +mod validate_proposal_test; + use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -16,12 +20,13 @@ use apollo_l1_gas_price_types::{EthToStrkOracleClientTrait, L1GasPriceProviderCl use apollo_protobuf::consensus::{ConsensusBlockInfo, ProposalFin, ProposalPart, TransactionBatch}; use apollo_state_sync_types::communication::{StateSyncClient, StateSyncClientError}; use apollo_time::time::{sleep_until, Clock, DateTime}; -use futures::channel::{mpsc, oneshot}; +use futures::channel::mpsc; use futures::StreamExt; use starknet_api::block::{BlockHash, BlockNumber, GasPrice}; use starknet_api::consensus_transaction::InternalConsensusTransaction; use starknet_api::data_availability::L1DataAvailabilityMode; use starknet_api::transaction::TransactionHash; +use starknet_api::StarknetApiError; use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, instrument, warn}; @@ -49,7 +54,6 @@ pub(crate) struct ProposalValidateArguments { pub batcher_timeout_margin: Duration, pub valid_proposals: Arc>, pub content_receiver: mpsc::Receiver, - pub fin_sender: oneshot::Sender, pub gas_price_params: GasPriceParams, pub cancel_token: CancellationToken, } @@ -71,45 +75,74 @@ enum HandledProposalPart { Failed(String), } +enum SecondProposalPart { + BlockInfo(ConsensusBlockInfo), + Fin(ProposalFin), +} + type ValidateProposalResult = Result; #[derive(Debug, thiserror::Error)] pub(crate) enum ValidateProposalError { #[error("Batcher error: {0}")] - Batcher(#[from] BatcherClientError), + Batcher(String, BatcherClientError), #[error("State sync client error: {0}")] StateSyncClientError(#[from] StateSyncClientError), #[error("State sync is not ready: {0}")] StateSyncNotReady(String), + // Consensus may exit early (e.g. sync). + #[error("Failed to send commitment to consensus: {0}")] + SendError(ProposalCommitment), #[error("EthToStrkOracle error: {0}")] EthToStrkOracle(#[from] EthToStrkOracleClientError), #[error("L1GasPriceProvider error: {0}")] L1GasPriceProvider(#[from] L1GasPriceClientError), + #[error("Block info conversion error: {0}")] + BlockInfoConversion(#[from] StarknetApiError), + #[error("Invalid BlockInfo: {2}. received:{0:?}, validation criteria {1:?}.")] + InvalidBlockInfo(ConsensusBlockInfo, BlockInfoValidation, String), + #[error("Validation timed out.")] + ValidationTimeout, + #[error("Proposal interrupted.")] + ProposalInterrupted, + #[error("Got an invalid second proposal part: {0:?}.")] + InvalidSecondProposalPart(Option), + #[error("Batcher returned Invalid status.")] + InvalidProposal, + #[error("Proposal part {1:?} failed validation: {0}.")] + ProposalPartFailed(String, Option), + #[error("proposal_commitment built by the batcher does not match the proposal fin.")] + ProposalFinMismatch, + #[error("Cannot calculate deadline. timeout: {timeout:?}, now: {now:?}")] + CannotCalculateDeadline { timeout: Duration, now: DateTime }, } -pub(crate) async fn validate_proposal(mut args: ProposalValidateArguments) { +pub(crate) async fn validate_proposal( + mut args: ProposalValidateArguments, +) -> ValidateProposalResult { let mut content = Vec::new(); let mut final_n_executed_txs: Option = None; let now = args.deps.clock.now(); let Some(deadline) = now.checked_add_signed(chrono::TimeDelta::from_std(args.timeout).unwrap()) else { - warn!("Cannot calculate deadline. Timeout: {:?}, now: {:?}", args.timeout, now); - return; + return Err(ValidateProposalError::CannotCalculateDeadline { timeout: args.timeout, now }); }; - let Some((block_info, fin_sender)) = await_second_proposal_part( + let block_info = match await_second_proposal_part( &args.cancel_token, deadline, &mut args.content_receiver, - args.fin_sender, args.deps.clock.as_ref(), ) - .await - else { - return; + .await? + { + SecondProposalPart::BlockInfo(block_info) => block_info, + SecondProposalPart::Fin(ProposalFin { proposal_commitment }) => { + return Ok(proposal_commitment); + } }; - if !is_block_info_valid( + is_block_info_valid( args.block_info_validation.clone(), block_info.clone(), args.deps.eth_to_strk_oracle_client, @@ -117,11 +150,9 @@ pub(crate) async fn validate_proposal(mut args: ProposalValidateArguments) { args.deps.l1_gas_price_provider, &args.gas_price_params, ) - .await - { - return; - } - if let Err(e) = initiate_validation( + .await?; + + initiate_validation( args.deps.batcher.as_ref(), args.deps.state_sync_client, block_info.clone(), @@ -129,29 +160,24 @@ pub(crate) async fn validate_proposal(mut args: ProposalValidateArguments) { args.timeout + args.batcher_timeout_margin, args.deps.clock.as_ref(), ) - .await - { - error!("Failed to initiate proposal validation. {e:?}"); - return; - } + .await?; + // Validating the rest of the proposal parts. let (built_block, received_fin) = loop { tokio::select! { _ = args.cancel_token.cancelled() => { - warn!("Proposal interrupted during validation."); batcher_abort_proposal(args.deps.batcher.as_ref(), args.proposal_id).await; - return; + return Err(ValidateProposalError::ProposalInterrupted); } _ = sleep_until(deadline, args.deps.clock.as_ref()) => { - warn!("Validation timed out."); batcher_abort_proposal(args.deps.batcher.as_ref(), args.proposal_id).await; - return; + return Err(ValidateProposalError::ValidationTimeout); } proposal_part = args.content_receiver.next() => { match handle_proposal_part( args.proposal_id, args.deps.batcher.as_ref(), - proposal_part, + proposal_part.clone(), &mut content, &mut final_n_executed_txs, args.deps.transaction_converter.clone(), @@ -161,14 +187,12 @@ pub(crate) async fn validate_proposal(mut args: ProposalValidateArguments) { } HandledProposalPart::Continue => {continue;} HandledProposalPart::Invalid => { - warn!("Invalid proposal."); // No need to abort since the Batcher is the source of this info. - return; + return Err(ValidateProposalError::InvalidProposal); } HandledProposalPart::Failed(fail_reason) => { - warn!("Failed to handle proposal part. {fail_reason}"); batcher_abort_proposal(args.deps.batcher.as_ref(), args.proposal_id).await; - return; + return Err(ValidateProposalError::ProposalPartFailed(fail_reason,proposal_part)); } } } @@ -192,14 +216,10 @@ pub(crate) async fn validate_proposal(mut args: ProposalValidateArguments) { // TODO(matan): Switch to signature validation. if built_block != received_fin.proposal_commitment { - warn!("proposal_id built from content received does not match fin."); - return; + return Err(ValidateProposalError::ProposalFinMismatch); } - if fin_sender.send(built_block).is_err() { - // Consensus may exit early (e.g. sync). - warn!("Failed to send proposal content ids"); - } + Ok(built_block) } #[instrument(level = "warn", skip_all, fields(?block_info_validation, ?block_info_proposed))] @@ -210,19 +230,42 @@ async fn is_block_info_valid( clock: &dyn Clock, l1_gas_price_provider: Arc, gas_price_params: &GasPriceParams, -) -> bool { +) -> ValidateProposalResult<()> { let now: u64 = clock.unix_now(); let last_block_timestamp = block_info_validation.previous_block_info.as_ref().map_or(0, |info| info.timestamp); + if block_info_proposed.timestamp < last_block_timestamp { + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed.clone(), + block_info_validation.clone(), + format!( + "Timestamp is too old: last_block_timestamp={}, proposed={}", + last_block_timestamp, block_info_proposed.timestamp + ), + )); + } + if block_info_proposed.timestamp > now + block_info_validation.block_timestamp_window_seconds { + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed.clone(), + block_info_validation.clone(), + format!( + "Timestamp is in the future: now={}, block_timestamp_window_seconds={}, \ + proposed={}", + now, + block_info_validation.block_timestamp_window_seconds, + block_info_proposed.timestamp + ), + )); + } if !(block_info_proposed.height == block_info_validation.height - && block_info_proposed.timestamp >= last_block_timestamp - // Check timestamp isn't in the future (allowing for clock disagreement). - && block_info_proposed.timestamp <= now + block_info_validation.block_timestamp_window_seconds && block_info_proposed.l1_da_mode == block_info_validation.l1_da_mode && block_info_proposed.l2_gas_price_fri == block_info_validation.l2_gas_price_fri) { - warn!("Invalid BlockInfo. local_timestamp={now}"); - return false; + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed.clone(), + block_info_validation.clone(), + "Block info validation failed".to_string(), + )); } let (eth_to_fri_rate, l1_gas_prices) = get_oracle_rate_and_prices( eth_to_strk_oracle_client, @@ -236,12 +279,14 @@ async fn is_block_info_valid( VersionedConstants::latest_constants().l1_gas_price_margin_percent.into(); debug!("L1 price info: {l1_gas_prices:?}"); - let l1_gas_price_fri = l1_gas_prices.base_fee_per_gas.wei_to_fri(eth_to_fri_rate); - let l1_data_gas_price_fri = l1_gas_prices.blob_fee.wei_to_fri(eth_to_fri_rate); + let l1_gas_price_fri = l1_gas_prices.base_fee_per_gas.wei_to_fri(eth_to_fri_rate)?; + let l1_data_gas_price_fri = l1_gas_prices.blob_fee.wei_to_fri(eth_to_fri_rate)?; let l1_gas_price_fri_proposed = - block_info_proposed.l1_gas_price_wei.wei_to_fri(block_info_proposed.eth_to_fri_rate); - let l1_data_gas_price_fri_proposed = - block_info_proposed.l1_data_gas_price_wei.wei_to_fri(block_info_proposed.eth_to_fri_rate); + block_info_proposed.l1_gas_price_wei.wei_to_fri(block_info_proposed.eth_to_fri_rate)?; + let l1_data_gas_price_fri_proposed = block_info_proposed + .l1_data_gas_price_wei + .wei_to_fri(block_info_proposed.eth_to_fri_rate)?; + if !(within_margin(l1_gas_price_fri_proposed, l1_gas_price_fri, l1_gas_price_margin_percent) && within_margin( l1_data_gas_price_fri_proposed, @@ -249,23 +294,25 @@ async fn is_block_info_valid( l1_gas_price_margin_percent, )) { - warn!( - %l1_gas_price_fri_proposed, - %l1_gas_price_fri, - %l1_data_gas_price_fri_proposed, - %l1_data_gas_price_fri, - %l1_gas_price_margin_percent, - "Invalid L1 gas price proposed.", - ); - return false; + return Err(ValidateProposalError::InvalidBlockInfo( + block_info_proposed, + block_info_validation, + format!( + "L1 gas price mismatch: expected L1 gas price FRI={l1_gas_price_fri}, \ + proposed={l1_gas_price_fri_proposed}, expected L1 data gas price \ + FRI={l1_data_gas_price_fri}, proposed={l1_data_gas_price_fri_proposed}, \ + l1_gas_price_margin_percent={l1_gas_price_margin_percent}" + ), + )); } + // TODO(Asmaa): consider removing after 0.14 as other validators may use other sources. if l1_gas_price_fri_proposed != l1_gas_price_fri { CONSENSUS_L1_GAS_MISMATCH.increment(1); } if l1_data_gas_price_fri_proposed != l1_data_gas_price_fri { CONSENSUS_L1_DATA_GAS_MISMATCH.increment(1); } - true + Ok(()) } fn within_margin(number1: GasPrice, number2: GasPrice, margin_percent: u128) -> bool { @@ -280,37 +327,27 @@ async fn await_second_proposal_part( cancel_token: &CancellationToken, deadline: DateTime, content_receiver: &mut mpsc::Receiver, - fin_sender: oneshot::Sender, clock: &dyn Clock, -) -> Option<(ConsensusBlockInfo, oneshot::Sender)> { +) -> ValidateProposalResult { tokio::select! { _ = cancel_token.cancelled() => { - warn!("Proposal interrupted"); - None + Err(ValidateProposalError::ProposalInterrupted) } _ = sleep_until(deadline, clock) => { - warn!("Validation timed out."); - None + Err(ValidateProposalError::ValidationTimeout) } proposal_part = content_receiver.next() => { match proposal_part { Some(ProposalPart::BlockInfo(block_info)) => { - Some((block_info, fin_sender)) + Ok(SecondProposalPart::BlockInfo(block_info)) } Some(ProposalPart::Fin(ProposalFin { proposal_commitment })) => { warn!("Received an empty proposal."); - if fin_sender - .send(proposal_commitment) - .is_err() - { - // Consensus may exit early (e.g. sync). - warn!("Failed to send proposal content ids"); - } - None + Ok(SecondProposalPart::Fin(ProposalFin { proposal_commitment })) } x => { - warn!("Invalid second proposal part: {x:?}"); - None + Err(ValidateProposalError::InvalidSecondProposalPart(x + )) } } } @@ -332,10 +369,15 @@ async fn initiate_validation( proposal_id, deadline: clock.now() + chrono_timeout, retrospective_block_hash: retrospective_block_hash(state_sync_client, &block_info).await?, - block_info: convert_to_sn_api_block_info(&block_info), + block_info: convert_to_sn_api_block_info(&block_info)?, }; debug!("Initiating validate proposal: input={input:?}"); - batcher.validate_block(input).await?; + batcher.validate_block(input.clone()).await.map_err(|err| { + ValidateProposalError::Batcher( + format!("Failed to initiate validate proposal {input:?}."), + err, + ) + })?; Ok(()) } diff --git a/crates/apollo_consensus_orchestrator/src/validate_proposal_test.rs b/crates/apollo_consensus_orchestrator/src/validate_proposal_test.rs new file mode 100644 index 00000000000..aa6b90cee00 --- /dev/null +++ b/crates/apollo_consensus_orchestrator/src/validate_proposal_test.rs @@ -0,0 +1,377 @@ +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use apollo_batcher_types::batcher_types::{ + ProposalCommitment, + ProposalId, + ProposalStatus, + SendProposalContent, + SendProposalContentInput, + SendProposalContentResponse, +}; +use apollo_batcher_types::communication::BatcherClientError; +use apollo_infra::component_client::ClientError; +use apollo_protobuf::consensus::{ProposalFin, ProposalPart, TransactionBatch}; +use assert_matches::assert_matches; +use futures::channel::mpsc; +use futures::SinkExt; +use num_rational::Ratio; +use starknet_api::block::{BlockHash, BlockNumber, GasPrice}; +use starknet_api::core::StateDiffCommitment; +use starknet_api::data_availability::L1DataAvailabilityMode; +use starknet_api::hash::PoseidonHash; +use starknet_types_core::felt::Felt; +use tokio_util::sync::CancellationToken; + +use crate::config::ContextConfig; +use crate::orchestrator_versioned_constants::VersionedConstants; +use crate::sequencer_consensus_context::BuiltProposals; +use crate::test_utils::{ + block_info, + create_test_and_network_deps, + TestDeps, + CHANNEL_SIZE, + TIMEOUT, + TX_BATCH, +}; +use crate::utils::GasPriceParams; +use crate::validate_proposal::{ + validate_proposal, + BlockInfoValidation, + ProposalValidateArguments, + ValidateProposalError, +}; + +struct TestProposalValidateArguments { + pub deps: TestDeps, + pub block_info_validation: BlockInfoValidation, + pub proposal_id: ProposalId, + pub timeout: Duration, + pub batcher_timeout_margin: Duration, + pub valid_proposals: Arc>, + pub content_receiver: mpsc::Receiver, + pub gas_price_params: GasPriceParams, + pub cancel_token: CancellationToken, +} + +impl From for ProposalValidateArguments { + fn from(args: TestProposalValidateArguments) -> Self { + ProposalValidateArguments { + deps: args.deps.into(), + block_info_validation: args.block_info_validation, + proposal_id: args.proposal_id, + timeout: args.timeout, + batcher_timeout_margin: args.batcher_timeout_margin, + valid_proposals: args.valid_proposals, + content_receiver: args.content_receiver, + gas_price_params: args.gas_price_params, + cancel_token: args.cancel_token, + } + } +} + +fn create_proposal_validate_arguments() +-> (TestProposalValidateArguments, mpsc::Sender) { + let (mut deps, _) = create_test_and_network_deps(); + deps.setup_default_expectations(); + let block_info_validation = BlockInfoValidation { + height: BlockNumber(0), + block_timestamp_window_seconds: 60, + previous_block_info: None, + l1_da_mode: L1DataAvailabilityMode::Blob, + l2_gas_price_fri: VersionedConstants::latest_constants().min_gas_price, + }; + let proposal_id = ProposalId(1); + let timeout = TIMEOUT; + let batcher_timeout_margin = TIMEOUT; + let valid_proposals = Arc::new(Mutex::new(BuiltProposals::new())); + let (content_sender, content_receiver) = mpsc::channel(CHANNEL_SIZE); + let context_config = ContextConfig::default(); + let gas_price_params = GasPriceParams { + min_l1_gas_price_wei: GasPrice(context_config.min_l1_gas_price_wei), + max_l1_gas_price_wei: GasPrice(context_config.max_l1_gas_price_wei), + min_l1_data_gas_price_wei: GasPrice(context_config.min_l1_data_gas_price_wei), + max_l1_data_gas_price_wei: GasPrice(context_config.max_l1_data_gas_price_wei), + l1_data_gas_price_multiplier: Ratio::new( + context_config.l1_data_gas_price_multiplier_ppt, + 1000, + ), + l1_gas_tip_wei: GasPrice(context_config.l1_gas_tip_wei), + }; + let cancel_token = CancellationToken::new(); + + ( + TestProposalValidateArguments { + deps, + block_info_validation, + proposal_id, + timeout, + batcher_timeout_margin, + valid_proposals, + content_receiver, + gas_price_params, + cancel_token, + }, + content_sender, + ) +} + +#[tokio::test] +async fn validate_empty_proposal() { + let (proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Send an empty proposal. + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Ok(val) if val == BlockHash::default()); +} + +#[tokio::test] +async fn validate_proposal_success() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + let n_executed = 1; + // Setup deps to validate the block. + proposal_args.deps.setup_deps_for_validate(BlockNumber(0), n_executed); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + // Send transactions, then executed transaction count, and finally Fin part. + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.clone() })) + .await + .unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(n_executed.try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Ok(val) if val == BlockHash::default()); +} + +#[tokio::test] +async fn interrupt_proposal() { + let (proposal_args, _content_sender) = create_proposal_validate_arguments(); + // Interrupt the proposal. + proposal_args.cancel_token.cancel(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::ProposalInterrupted))); +} + +#[tokio::test] +async fn validation_timeout() { + let (mut proposal_args, _content_sender) = create_proposal_validate_arguments(); + // Set a very short timeout to trigger a timeout error. + proposal_args.timeout = Duration::from_micros(1); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::ValidationTimeout))); +} + +#[tokio::test] +async fn invalid_second_proposal_part() { + let (proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Send an invalid proposal part (not BlockInfo or Fin). + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::InvalidSecondProposalPart(_)))); +} + +#[tokio::test] +async fn invalid_block_info() { + let (proposal_args, mut content_sender) = create_proposal_validate_arguments(); + + let mut block_info = block_info(BlockNumber(0)); + block_info.l2_gas_price_fri = + GasPrice(proposal_args.block_info_validation.l2_gas_price_fri.0 + 1); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::InvalidBlockInfo(_, _, _)))); +} + +#[tokio::test] +async fn validate_block_fail() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to return an error when validating the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| { + Err(BatcherClientError::ClientError(ClientError::CommunicationFailure("".to_string()))) + }); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::Batcher(msg,_ )) + if msg.contains("Failed to initiate validate proposal")); +} + +#[tokio::test] +async fn send_executed_transaction_count_more_than_once() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher aborts the proposal. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Abort + }) + .returning(|_| Ok(SendProposalContentResponse { response: ProposalStatus::Aborted })); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + // Send executed transaction count more than once. + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::ProposalPartFailed(err,_)) + if err.contains("Received executed transaction count more than once")); +} + +#[tokio::test] +async fn receive_fin_without_executed_transaction_count() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher aborts the proposal. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Abort + }) + .returning(|_| Ok(SendProposalContentResponse { response: ProposalStatus::Aborted })); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + // Send Fin part without sending executed transaction count. + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::ProposalPartFailed(err,_)) + if err.contains("Received Fin without executed transaction count")); +} + +#[tokio::test] +async fn receive_txs_after_executed_transaction_count() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher aborts the proposal. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Abort + }) + .returning(|_| Ok(SendProposalContentResponse { response: ProposalStatus::Aborted })); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + content_sender.send(ProposalPart::ExecutedTransactionCount(0)).await.unwrap(); + // Send transactions after executed transaction count. + content_sender + .send(ProposalPart::Transactions(TransactionBatch { transactions: TX_BATCH.clone() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert_matches!(res, Err(ValidateProposalError::ProposalPartFailed(err,_)) + if err.contains("Received transactions after executed transaction count")); +} + +#[tokio::test] +async fn proposal_fin_mismatch() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + let n_executed = 0; + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher returns a different block hash than the one received in Fin. + let built_block = StateDiffCommitment(PoseidonHash(Felt::ONE)); + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Finish(n_executed) + }) + .returning(move |_| { + Ok(SendProposalContentResponse { + response: ProposalStatus::Finished(ProposalCommitment { + state_diff_commitment: built_block, + }), + }) + }); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(n_executed.try_into().unwrap())) + .await + .unwrap(); + // Send Fin part. + let received_fin = BlockHash::default(); + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: received_fin })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::ProposalFinMismatch))); +} + +#[tokio::test] +async fn batcher_returns_invalid_proposal() { + let (mut proposal_args, mut content_sender) = create_proposal_validate_arguments(); + let n_executed = 0; + // Setup batcher to validate the block. + proposal_args.deps.batcher.expect_validate_block().returning(|_| Ok(())); + // Batcher returns an invalid proposal status. + proposal_args + .deps + .batcher + .expect_send_proposal_content() + .withf(move |input: &SendProposalContentInput| { + input.proposal_id == proposal_args.proposal_id + && input.content == SendProposalContent::Finish(n_executed) + }) + .returning(|_| { + Ok(SendProposalContentResponse { response: ProposalStatus::InvalidProposal }) + }); + // Send a valid block info. + let block_info = block_info(BlockNumber(0)); + content_sender.send(ProposalPart::BlockInfo(block_info)).await.unwrap(); + content_sender + .send(ProposalPart::ExecutedTransactionCount(n_executed.try_into().unwrap())) + .await + .unwrap(); + content_sender + .send(ProposalPart::Fin(ProposalFin { proposal_commitment: BlockHash::default() })) + .await + .unwrap(); + + let res = validate_proposal(proposal_args.into()).await; + assert!(matches!(res, Err(ValidateProposalError::InvalidProposal))); +} diff --git a/crates/apollo_dashboard/resources/dev_grafana.json b/crates/apollo_dashboard/resources/dev_grafana.json index fc33d0daad7..61811ec2c2d 100644 --- a/crates/apollo_dashboard/resources/dev_grafana.json +++ b/crates/apollo_dashboard/resources/dev_grafana.json @@ -402,6 +402,16 @@ "http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}" ], "extra_params": {} + }, + { + "title": "http_server_add_tx_latency", + "description": "Latency of HTTP add_tx endpoint in secs", + "type": "timeseries", + "exprs": [ + "histogram_quantile(0.50, sum(rate(http_server_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", + "histogram_quantile(0.95, sum(rate(http_server_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))" + ], + "extra_params": {} } ], "State Sync": [ diff --git a/crates/apollo_dashboard/resources/dev_grafana_alerts.json b/crates/apollo_dashboard/resources/dev_grafana_alerts.json index 08297ec11f4..37e62d9581a 100644 --- a/crates/apollo_dashboard/resources/dev_grafana_alerts.json +++ b/crates/apollo_dashboard/resources/dev_grafana_alerts.json @@ -540,6 +540,60 @@ "intervalSec": 30, "severity": "p5" }, + { + "name": "eth_to_strk_error_count", + "title": "Eth to Strk error count", + "ruleGroup": "l1_gas_price", + "expr": "increase(eth_to_strk_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 10.0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "1m", + "intervalSec": 20, + "severity": "p5" + }, + { + "name": "eth_to_strk_success_count", + "title": "Eth to Strk success count", + "ruleGroup": "l1_gas_price", + "expr": "increase(eth_to_strk_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p3" + }, { "name": "gateway_add_tx_idle", "title": "Gateway add_tx idle", @@ -568,15 +622,15 @@ "severity": "p2" }, { - "name": "http_server_idle", - "title": "http server idle", + "name": "http_server_add_tx_idle", + "title": "HTTP Server add_tx idle", "ruleGroup": "http_server", "expr": "increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) or vector(0)", "conditions": [ { "evaluator": { "params": [ - 1.0 + 0.1 ], "type": "lt" }, @@ -595,17 +649,17 @@ "severity": "p2" }, { - "name": "http_server_add_tx_idle", - "title": "HTTP Server add_tx idle", + "name": "http_server_avg_add_tx_latency", + "title": "High HTTP server average add_tx latency", "ruleGroup": "http_server", - "expr": "increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) or vector(0)", + "expr": "rate(http_server_add_tx_latency_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) / rate(http_server_add_tx_latency_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", "conditions": [ { "evaluator": { "params": [ - 0.1 + 2.0 ], - "type": "lt" + "type": "gt" }, "operator": { "type": "and" @@ -649,17 +703,17 @@ "severity": "p3" }, { - "name": "http_server_internal_error_ratio", - "title": "http server internal error ratio", + "name": "http_server_idle", + "title": "http server idle", "ruleGroup": "http_server", - "expr": "increase(http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) / clamp_min(increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]), 1)", + "expr": "increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) or vector(0)", "conditions": [ { "evaluator": { "params": [ - 0.2 + 1.0 ], - "type": "gt" + "type": "lt" }, "operator": { "type": "and" @@ -676,15 +730,15 @@ "severity": "p2" }, { - "name": "http_server_internal_error_once", - "title": "http server internal error once", + "name": "http_server_internal_error_ratio", + "title": "http server internal error ratio", "ruleGroup": "http_server", - "expr": "increase(http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) or vector(0)", + "expr": "increase(http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) / clamp_min(increase(http_server_added_transactions_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]), 1)", "conditions": [ { "evaluator": { "params": [ - 0.0 + 0.2 ], "type": "gt" }, @@ -700,20 +754,20 @@ ], "for": "30s", "intervalSec": 30, - "severity": "p4" + "severity": "p2" }, { - "name": "http_server_no_successful_transactions", - "title": "http server no successful transactions", + "name": "http_server_internal_error_once", + "title": "http server internal error once", "ruleGroup": "http_server", - "expr": "increase(http_server_added_transactions_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) or vector(0)", + "expr": "increase(http_server_added_transactions_internal_error{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m]) or vector(0)", "conditions": [ { "evaluator": { "params": [ - 1.0 + 0.0 ], - "type": "lt" + "type": "gt" }, "operator": { "type": "and" @@ -727,20 +781,20 @@ ], "for": "30s", "intervalSec": 30, - "severity": "p2" + "severity": "p4" }, { - "name": "l1_gas_price_provider_insufficient_history", - "title": "L1 gas price provider insufficient history", - "ruleGroup": "l1_gas_price", - "expr": "increase(l1_gas_price_provider_insufficient_history{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", + "name": "http_server_low_successful_transaction_rate", + "title": "http server low successful transaction rate", + "ruleGroup": "http_server", + "expr": "rate(http_server_added_transactions_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m]) or vector(0)", "conditions": [ { "evaluator": { "params": [ - 0.0 + 0.05 ], - "type": "gt" + "type": "lt" }, "operator": { "type": "and" @@ -754,20 +808,20 @@ ], "for": "30s", "intervalSec": 30, - "severity": "p5" + "severity": "p3" }, { - "name": "l1_gas_price_scraper_reorg_detected", - "title": "L1 gas price scraper reorg detected", - "ruleGroup": "l1_gas_price", - "expr": "increase(l1_gas_price_scraper_reorg_detected{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", + "name": "http_server_no_successful_transactions", + "title": "http server no successful transactions", + "ruleGroup": "http_server", + "expr": "increase(http_server_added_transactions_success{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]) or vector(0)", "conditions": [ { "evaluator": { "params": [ - 0.0 + 1.0 ], - "type": "gt" + "type": "lt" }, "operator": { "type": "and" @@ -781,20 +835,20 @@ ], "for": "30s", "intervalSec": 30, - "severity": "p5" + "severity": "p2" }, { - "name": "l1_gas_price_scraper_success_count", - "title": "L1 gas price scraper success count", - "ruleGroup": "l1_gas_price", - "expr": "increase(l1_gas_price_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "name": "http_server_p95_add_tx_latency", + "title": "High HTTP server P95 add_tx latency", + "ruleGroup": "http_server", + "expr": "histogram_quantile(0.95, sum(rate(http_server_add_tx_latency_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])) by (le))", "conditions": [ { "evaluator": { "params": [ - 1.0 + 2.0 ], - "type": "lt" + "type": "gt" }, "operator": { "type": "and" @@ -808,13 +862,13 @@ ], "for": "30s", "intervalSec": 30, - "severity": "p3" + "severity": "p4" }, { - "name": "l1_gas_price_scraper_baselayer_error_count", - "title": "L1 gas price scraper baselayer error count", + "name": "l1_gas_price_provider_insufficient_history", + "title": "L1 gas price provider insufficient history", "ruleGroup": "l1_gas_price", - "expr": "increase(l1_gas_price_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", + "expr": "increase(l1_gas_price_provider_insufficient_history{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", "conditions": [ { "evaluator": { @@ -838,15 +892,15 @@ "severity": "p5" }, { - "name": "eth_to_strk_error_count", - "title": "Eth to Strk error count", + "name": "l1_gas_price_scraper_reorg_detected", + "title": "L1 gas price scraper reorg detected", "ruleGroup": "l1_gas_price", - "expr": "increase(eth_to_strk_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "expr": "increase(l1_gas_price_scraper_reorg_detected{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1m])", "conditions": [ { "evaluator": { "params": [ - 10.0 + 0.0 ], "type": "gt" }, @@ -860,15 +914,15 @@ "type": "query" } ], - "for": "1m", - "intervalSec": 20, + "for": "30s", + "intervalSec": 30, "severity": "p5" }, { - "name": "eth_to_strk_success_count", - "title": "Eth to Strk success count", + "name": "l1_gas_price_scraper_success_count", + "title": "L1 gas price scraper success count", "ruleGroup": "l1_gas_price", - "expr": "increase(eth_to_strk_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", + "expr": "increase(l1_gas_price_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h])", "conditions": [ { "evaluator": { @@ -892,17 +946,17 @@ "severity": "p3" }, { - "name": "l1_message_no_successes", - "title": "L1 message no successes", + "name": "l1_gas_price_scraper_baselayer_error_count", + "title": "L1 gas price scraper baselayer error count", "ruleGroup": "l1_gas_price", - "expr": "increase(l1_message_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])", + "expr": "increase(l1_gas_price_scraper_baselayer_error_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[5m])", "conditions": [ { "evaluator": { "params": [ - 1.0 + 0.0 ], - "type": "lt" + "type": "gt" }, "operator": { "type": "and" @@ -916,7 +970,7 @@ ], "for": "30s", "intervalSec": 30, - "severity": "p2" + "severity": "p5" }, { "name": "l1_message_scraper_baselayer_error_count", @@ -945,6 +999,33 @@ "intervalSec": 30, "severity": "p5" }, + { + "name": "l1_message_no_successes", + "title": "L1 message no successes", + "ruleGroup": "l1_gas_price", + "expr": "increase(l1_message_scraper_success_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[20m])", + "conditions": [ + { + "evaluator": { + "params": [ + 1.0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "for": "30s", + "intervalSec": 30, + "severity": "p2" + }, { "name": "l1_message_scraper_reorg_detected", "title": "L1 message scraper reorg detected", diff --git a/crates/apollo_dashboard/src/alert_definitions.rs b/crates/apollo_dashboard/src/alert_definitions.rs index 2c6b428c716..8e9d24bd103 100644 --- a/crates/apollo_dashboard/src/alert_definitions.rs +++ b/crates/apollo_dashboard/src/alert_definitions.rs @@ -26,6 +26,7 @@ use apollo_http_server::metrics::{ ADDED_TRANSACTIONS_INTERNAL_ERROR, ADDED_TRANSACTIONS_SUCCESS, ADDED_TRANSACTIONS_TOTAL, + HTTP_SERVER_ADD_TX_LATENCY, }; use apollo_l1_gas_price::metrics::{ ETH_TO_STRK_ERROR_COUNT, @@ -523,6 +524,26 @@ fn get_http_server_no_successful_transactions() -> Alert { } } +fn get_http_server_low_successful_transaction_rate() -> Alert { + Alert { + name: "http_server_low_successful_transaction_rate", + title: "http server low successful transaction rate", + alert_group: AlertGroup::HttpServer, + expr: format!( + "rate({}[5m]) or vector(0)", + ADDED_TRANSACTIONS_SUCCESS.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::LessThan, + comparison_value: 0.05, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::DayOnly, + } +} + fn get_http_server_high_transaction_failure_ratio() -> Alert { Alert { name: "http_server_high_transaction_failure_ratio", @@ -544,6 +565,50 @@ fn get_http_server_high_transaction_failure_ratio() -> Alert { } } +/// Triggers if the average latency of `add_tx` calls, across all HTTP servers, exceeds 2 seconds +/// over a 5-minute window. +fn get_http_server_avg_add_tx_latency_alert() -> Alert { + let sum_metric = HTTP_SERVER_ADD_TX_LATENCY.get_name_sum_with_filter(); + let count_metric = HTTP_SERVER_ADD_TX_LATENCY.get_name_count_with_filter(); + + Alert { + name: "http_server_avg_add_tx_latency", + title: "High HTTP server average add_tx latency", + alert_group: AlertGroup::HttpServer, + expr: format!("rate({sum_metric}[5m]) / rate({count_metric}[5m])"), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 2.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::Regular, + } +} + +/// Triggers when the slowest 5% of transactions for a specific HTTP server are taking longer than 2 +/// seconds over a 5-minute window. +fn get_http_server_p95_add_tx_latency_alert() -> Alert { + Alert { + name: "http_server_p95_add_tx_latency", + title: "High HTTP server P95 add_tx latency", + alert_group: AlertGroup::HttpServer, + expr: format!( + "histogram_quantile(0.95, sum(rate({}[5m])) by (le))", + HTTP_SERVER_ADD_TX_LATENCY.get_name_with_filter() + ), + conditions: &[AlertCondition { + comparison_op: AlertComparisonOp::GreaterThan, + comparison_value: 2.0, + logical_op: AlertLogicalOp::And, + }], + pending_duration: PENDING_DURATION_DEFAULT, + evaluation_interval_sec: EVALUATION_INTERVAL_SEC_DEFAULT, + severity: AlertSeverity::WorkingHours, + } +} + fn get_l1_gas_price_scraper_baselayer_error_count_alert() -> Alert { Alert { name: "l1_gas_price_scraper_baselayer_error_count", @@ -983,21 +1048,24 @@ pub fn get_apollo_alerts() -> Alerts { get_consensus_round_high(), get_consensus_validate_proposal_failed_alert(), get_consensus_votes_num_sent_messages_alert(), + get_eth_to_strk_error_count_alert(), + get_eth_to_strk_success_count_alert(), get_gateway_add_tx_idle(), - get_http_server_idle(), get_http_server_add_tx_idle(), + get_http_server_avg_add_tx_latency_alert(), get_http_server_high_transaction_failure_ratio(), + get_http_server_idle(), get_http_server_internal_error_ratio(), get_http_server_internal_error_once(), + get_http_server_low_successful_transaction_rate(), get_http_server_no_successful_transactions(), + get_http_server_p95_add_tx_latency_alert(), get_l1_gas_price_provider_insufficient_history_alert(), get_l1_gas_price_reorg_detected_alert(), get_l1_gas_price_scraper_success_count_alert(), get_l1_gas_price_scraper_baselayer_error_count_alert(), - get_eth_to_strk_error_count_alert(), - get_eth_to_strk_success_count_alert(), - get_l1_message_scraper_no_successes_alert(), get_l1_message_scraper_baselayer_error_count_alert(), + get_l1_message_scraper_no_successes_alert(), get_l1_message_scraper_reorg_detected_alert(), get_mempool_add_tx_idle(), get_mempool_evictions_count_alert(), diff --git a/crates/apollo_dashboard/src/panels/http_server.rs b/crates/apollo_dashboard/src/panels/http_server.rs index 4607c888574..08170d4a97e 100644 --- a/crates/apollo_dashboard/src/panels/http_server.rs +++ b/crates/apollo_dashboard/src/panels/http_server.rs @@ -3,6 +3,7 @@ use apollo_http_server::metrics::{ ADDED_TRANSACTIONS_INTERNAL_ERROR, ADDED_TRANSACTIONS_SUCCESS, ADDED_TRANSACTIONS_TOTAL, + HTTP_SERVER_ADD_TX_LATENCY, }; use crate::dashboard::{Panel, PanelType, Row}; @@ -35,6 +36,10 @@ fn get_panel_http_server_transactions_received_rate() -> Panel { ) } +fn get_panel_http_add_tx_latency() -> Panel { + Panel::from_hist(HTTP_SERVER_ADD_TX_LATENCY, PanelType::TimeSeries) +} + pub(crate) fn get_http_server_row() -> Row { Row::new( "Http Server", @@ -44,6 +49,7 @@ pub(crate) fn get_http_server_row() -> Row { get_panel_added_transactions_success(), get_panel_added_transactions_failure(), get_panel_added_transactions_internal_error(), + get_panel_http_add_tx_latency(), ], ) } diff --git a/crates/apollo_deployments/Cargo.toml b/crates/apollo_deployments/Cargo.toml index 2a6c6c864a8..c61da960748 100644 --- a/crates/apollo_deployments/Cargo.toml +++ b/crates/apollo_deployments/Cargo.toml @@ -13,7 +13,6 @@ apollo_config.workspace = true apollo_infra_utils.workspace = true apollo_node.workspace = true apollo_protobuf.workspace = true -const_format.workspace = true hex.workspace = true indexmap.workspace = true libp2p = { workspace = true, features = ["identify"] } @@ -27,3 +26,5 @@ strum_macros.workspace = true [dev-dependencies] apollo_infra_utils = { workspace = true, features = ["testing"] } apollo_node = { workspace = true, features = ["testing"] } +tempfile.workspace = true +url = { workspace = true, features = ["serde"] } diff --git a/crates/apollo_deployments/resources/base_app_config.json b/crates/apollo_deployments/resources/base_app_config.json index a13a24bfd6c..50b0ae31c46 100644 --- a/crates/apollo_deployments/resources/base_app_config.json +++ b/crates/apollo_deployments/resources/base_app_config.json @@ -1,5 +1,4 @@ { - "base_layer_config.node_url": "http://localhost:53260/", "base_layer_config.prague_blob_gas_calc": true, "base_layer_config.timeout_millis": 1000, "batcher_config.block_builder_config.bouncer_config.block_max_capacity.l1_gas": 4400000, @@ -7,22 +6,22 @@ "batcher_config.block_builder_config.bouncer_config.block_max_capacity.n_events": 5000, "batcher_config.block_builder_config.bouncer_config.block_max_capacity.n_txs": 2000, "batcher_config.block_builder_config.bouncer_config.block_max_capacity.sierra_gas": 4000000000, - "batcher_config.block_builder_config.bouncer_config.block_max_capacity.proving_gas": 4000000000, + "batcher_config.block_builder_config.bouncer_config.block_max_capacity.proving_gas": 5000000000, "batcher_config.block_builder_config.bouncer_config.block_max_capacity.state_diff_size": 4000, - "batcher_config.block_builder_config.bouncer_config.builtin_weights.pedersen": 8100, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.pedersen": 10125, "batcher_config.block_builder_config.bouncer_config.builtin_weights.range_check": 70, - "batcher_config.block_builder_config.bouncer_config.builtin_weights.ecdsa": 1333333, - "batcher_config.block_builder_config.bouncer_config.builtin_weights.ec_op": 571900, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.ecdsa": 1666666, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.ec_op": 714875, "batcher_config.block_builder_config.bouncer_config.builtin_weights.bitwise": 583, - "batcher_config.block_builder_config.bouncer_config.builtin_weights.keccak": 408566, - "batcher_config.block_builder_config.bouncer_config.builtin_weights.poseidon": 8334, - "batcher_config.block_builder_config.bouncer_config.builtin_weights.add_mod": 250, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.keccak": 510707, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.poseidon": 6250, + "batcher_config.block_builder_config.bouncer_config.builtin_weights.add_mod": 312, "batcher_config.block_builder_config.bouncer_config.builtin_weights.mul_mod": 604, "batcher_config.block_builder_config.bouncer_config.builtin_weights.range_check96": 56, - "batcher_config.block_builder_config.execute_config.n_workers": 4, + "batcher_config.block_builder_config.execute_config.n_workers": 28, "batcher_config.block_builder_config.execute_config.stack_size": 62914560, - "batcher_config.block_builder_config.n_concurrent_txs": 10, - "batcher_config.block_builder_config.tx_polling_interval_millis": 100, + "batcher_config.block_builder_config.n_concurrent_txs": 100, + "batcher_config.block_builder_config.tx_polling_interval_millis": 1, "batcher_config.contract_class_manager_config.cairo_native_run_config.channel_size": 2000, "batcher_config.contract_class_manager_config.cairo_native_run_config.native_classes_whitelist": "All", "batcher_config.contract_class_manager_config.cairo_native_run_config.panic_on_compilation_failure": false, @@ -85,14 +84,12 @@ "consensus_manager_config.context_config.l1_da_mode": true, "consensus_manager_config.context_config.l1_data_gas_price_multiplier_ppt": 135, "consensus_manager_config.context_config.l1_gas_tip_wei": 1000000000, - "consensus_manager_config.context_config.num_validators": 3, "consensus_manager_config.context_config.proposal_buffer_size": 512, "consensus_manager_config.context_config.validate_proposal_margin_millis": 10000, "consensus_manager_config.context_config.min_l1_gas_price_wei": 1000000000, "consensus_manager_config.context_config.max_l1_gas_price_wei": 1000000000000, "consensus_manager_config.context_config.min_l1_data_gas_price_wei": 1, "consensus_manager_config.context_config.max_l1_data_gas_price_wei": 1000000000000, - "consensus_manager_config.eth_to_strk_oracle_config.headers": "", "consensus_manager_config.eth_to_strk_oracle_config.lag_interval_seconds": 900, "consensus_manager_config.eth_to_strk_oracle_config.max_cache_size": 100, "consensus_manager_config.eth_to_strk_oracle_config.query_timeout_sec": 3, @@ -118,6 +115,7 @@ "gateway_config.block_declare": false, "gateway_config.stateful_tx_validator_config.max_allowed_nonce_gap": 50, "gateway_config.stateful_tx_validator_config.max_nonce_for_validation_skip": "0x1", + "gateway_config.stateful_tx_validator_config.min_gas_price_percentage": 100, "gateway_config.stateful_tx_validator_config.reject_future_declare_txs": true, "gateway_config.stateless_tx_validator_config.max_calldata_length": 5000, "gateway_config.stateless_tx_validator_config.max_contract_bytecode_size": 81920, @@ -126,14 +124,13 @@ "gateway_config.stateless_tx_validator_config.max_sierra_version.minor": 7, "gateway_config.stateless_tx_validator_config.max_sierra_version.patch": 0, "gateway_config.stateless_tx_validator_config.max_signature_length": 4000, - "gateway_config.stateless_tx_validator_config.min_gas_price": 100000000, + "gateway_config.stateless_tx_validator_config.min_gas_price": 3000000000, "gateway_config.stateless_tx_validator_config.min_sierra_version.major": 1, "gateway_config.stateless_tx_validator_config.min_sierra_version.minor": 1, "gateway_config.stateless_tx_validator_config.min_sierra_version.patch": 0, "gateway_config.stateless_tx_validator_config.validate_non_zero_resource_bounds": true, "http_server_config.ip": "0.0.0.0", "http_server_config.port": 8080, - "l1_endpoint_monitor_config.ordered_l1_endpoint_urls": "http://localhost:53260/", "l1_gas_price_provider_config.lag_margin_seconds": 60, "l1_gas_price_provider_config.number_of_blocks_for_mean": 300, "l1_gas_price_provider_config.storage_limit": 3000, @@ -181,13 +178,10 @@ "monitoring_endpoint_config.collect_profiling_metrics": true, "monitoring_endpoint_config.ip": "0.0.0.0", "monitoring_endpoint_config.port": 8082, - "recorder_url": "http://127.0.0.1:53261/", "revert_config.revert_up_to_and_including": 18446744073709551615, "revert_config.should_revert": false, - "state_sync_config.central_sync_client_config.#is_none": false, "state_sync_config.central_sync_client_config.central_source_config.class_cache_size": 128, "state_sync_config.central_sync_client_config.central_source_config.concurrent_requests": 10, - "state_sync_config.central_sync_client_config.central_source_config.http_headers": "", "state_sync_config.central_sync_client_config.central_source_config.max_classes_to_download": 20, "state_sync_config.central_sync_client_config.central_source_config.max_state_updates_to_download": 20, "state_sync_config.central_sync_client_config.central_source_config.max_state_updates_to_store_in_memory": 20, @@ -202,7 +196,6 @@ "state_sync_config.central_sync_client_config.sync_config.state_updates_max_stream_size": 1000, "state_sync_config.central_sync_client_config.sync_config.store_sierras_and_casms": false, "state_sync_config.central_sync_client_config.sync_config.verify_blocks": false, - "state_sync_config.network_config.#is_none": true, "state_sync_config.network_config.advertised_multiaddr": "", "state_sync_config.network_config.advertised_multiaddr.#is_none": true, "state_sync_config.network_config.bootstrap_peer_multiaddr": "", @@ -217,9 +210,7 @@ "state_sync_config.network_config.peer_manager_config.unstable_timeout_millis": 1000, "state_sync_config.network_config.port": 53140, "state_sync_config.network_config.reported_peer_ids_buffer_size": 100000, - "state_sync_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "state_sync_config.network_config.session_timeout": 120, - "state_sync_config.p2p_sync_client_config.#is_none": true, "state_sync_config.p2p_sync_client_config.buffer_size": 100000, "state_sync_config.p2p_sync_client_config.num_block_classes_per_query": 100, "state_sync_config.p2p_sync_client_config.num_block_state_diffs_per_query": 100, diff --git a/crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_0.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_0.json similarity index 76% rename from crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_0.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_0.json index 480104fc784..fbcfef22609 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_0.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_0.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", "services/hybrid/core.json" ], "ingress": null, @@ -36,8 +36,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", "services/hybrid/http_server.json" ], "ingress": { @@ -79,8 +79,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -109,8 +109,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -132,15 +132,15 @@ "external_secret": { "gcsm_key": "apollo-sepolia-integration-0" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_0.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_1.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_1.json similarity index 76% rename from crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_1.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_1.json index ceaee28fb96..38eb18c4369 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_1.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_1.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", "services/hybrid/core.json" ], "ingress": null, @@ -36,8 +36,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", "services/hybrid/http_server.json" ], "ingress": { @@ -79,8 +79,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -109,8 +109,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -132,15 +132,15 @@ "external_secret": { "gcsm_key": "apollo-sepolia-integration-1" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_1.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_2.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_2.json similarity index 76% rename from crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_2.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_2.json index 325c35c41c4..e3578a6dd10 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/deployment_configs/integration_hybrid_node_2.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_hybrid_2.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", "services/hybrid/core.json" ], "ingress": null, @@ -36,8 +36,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", "services/hybrid/http_server.json" ], "ingress": { @@ -79,8 +79,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -109,8 +109,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -132,15 +132,15 @@ "external_secret": { "gcsm_key": "apollo-sepolia-integration-2" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/sepolia_integration/deployment_config_override.json", + "deployments/sepolia_integration/hybrid_2.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_override.json similarity index 73% rename from crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_override.json index 07293ad91bb..751605bf6d0 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/deployment_config_override.json @@ -1,10 +1,14 @@ { "base_layer_config.starknet_contract_address": "0x4737c0c1B4D5b1A687B42610DdabEE781152359c", "chain_id": "SN_INTEGRATION_SEPOLIA", + "consensus_manager_config.context_config.num_validators": 3, "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", "l1_provider_config.provider_startup_height_override": 0, "l1_provider_config.provider_startup_height_override.#is_none": true, "starknet_url": "https://feeder.integration-sepolia.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, "strk_fee_token_address": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" } diff --git a/crates/apollo_deployments/resources/testing/app_configs/consolidated/deployment_test_consolidated/instance_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_0.json similarity index 71% rename from crates/apollo_deployments/resources/testing/app_configs/consolidated/deployment_test_consolidated/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_0.json index 3ad0c80875a..979d8ad5661 100644 --- a/crates/apollo_deployments/resources/testing/app_configs/consolidated/deployment_test_consolidated/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_0.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "validator_id": "0x64" } diff --git a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_1.json similarity index 78% rename from crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_1.json index b523a2b8246..80708430f9d 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_1.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "validator_id": "0x65" } diff --git a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_2.json similarity index 78% rename from crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_2.json index 2054388aa31..2f1bbfaa8ea 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_integration/hybrid_2.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-integration-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "validator_id": "0x66" } diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_0.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_0.json new file mode 100644 index 00000000000..5f9decfbc9d --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_0.json @@ -0,0 +1,176 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-sepolia-alpha-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2-d56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": true + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": true + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_0.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-0" + }, + "anti_affinity": false + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_1.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_1.json new file mode 100644 index 00000000000..986808fc9fc --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_1.json @@ -0,0 +1,176 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-sepolia-alpha-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2-d56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": true + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-sepolia-alpha-1.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": true + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_1.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-1" + }, + "anti_affinity": false + } + ] +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_2.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_2.json new file mode 100644 index 00000000000..6581158ccad --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_hybrid_2.json @@ -0,0 +1,176 @@ +{ + "application_config_subdir": "crates/apollo_deployments/resources/", + "services": [ + { + "name": "Core", + "controller": "StatefulSet", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/core.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-core-service.apollo-sepolia-alpha-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": 1000, + "toleration": "apollo-core-service-c2-d56", + "resources": { + "requests": { + "cpu": 50, + "memory": 200 + }, + "limits": { + "cpu": 50, + "memory": 220 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": true + }, + { + "name": "HttpServer", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/http_server.json" + ], + "ingress": { + "domain": "starknet.io", + "alternative_names": [ + "alpha-sepolia.starknet.io" + ], + "internal": false, + "rules": [ + { + "path": "/gateway", + "port": 8080, + "backend": null + } + ] + }, + "k8s_service_config": null, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 4, + "memory": 8 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false + }, + { + "name": "Gateway", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/gateway.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false + }, + { + "name": "Mempool", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/mempool.json" + ], + "ingress": null, + "k8s_service_config": { + "type": "LoadBalancer", + "external_dns_name": "sequencer-mempool-service.apollo-sepolia-alpha-2.starknet.io", + "internal": true + }, + "autoscale": false, + "replicas": 1, + "storage": null, + "toleration": "apollo-core-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": true + }, + { + "name": "SierraCompiler", + "controller": "Deployment", + "config_paths": [ + "base_app_config.json", + "deployments/sepolia_testnet/deployment_config_override.json", + "deployments/sepolia_testnet/hybrid_2.json", + "services/hybrid/sierra_compiler.json" + ], + "ingress": null, + "k8s_service_config": null, + "autoscale": true, + "replicas": 2, + "storage": null, + "toleration": "apollo-general-service", + "resources": { + "requests": { + "cpu": 1, + "memory": 2 + }, + "limits": { + "cpu": 2, + "memory": 4 + } + }, + "external_secret": { + "gcsm_key": "apollo-sepolia-alpha-2" + }, + "anti_affinity": false + } + ] +} diff --git a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_override.json similarity index 53% rename from crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json rename to crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_override.json index 07293ad91bb..b1d601770e6 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/deployment_config_override.json @@ -1,10 +1,14 @@ { - "base_layer_config.starknet_contract_address": "0x4737c0c1B4D5b1A687B42610DdabEE781152359c", - "chain_id": "SN_INTEGRATION_SEPOLIA", + "base_layer_config.starknet_contract_address": "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057", + "chain_id": "SN_SEPOLIA", + "consensus_manager_config.context_config.num_validators": 3, "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", "l1_provider_config.provider_startup_height_override": 0, "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "https://feeder.integration-sepolia.starknet.io/", + "starknet_url": "https://feeder.alpha-sepolia.starknet.io", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, "strk_fee_token_address": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" } diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_0.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_0.json new file mode 100644 index 00000000000..7eeca042f06 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_1.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_1.json new file mode 100644 index 00000000000..6aa420d5344 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_1.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-1.starknet.io/tcp/53080/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-1.starknet.io/tcp/53200/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x65" +} diff --git a/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_2.json b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_2.json new file mode 100644 index 00000000000..c0583c68e16 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/sepolia_testnet/hybrid_2.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-2.starknet.io/tcp/53080/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-sepolia-alpha-0.starknet.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-2.starknet.io/tcp/53200/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-sepolia-alpha-0.starknet.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, + "validator_id": "0x66" +} diff --git a/crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_0.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_0.json similarity index 77% rename from crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_0.json rename to crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_0.json index 76f039b569f..7c38da375ec 100644 --- a/crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_0.json +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_0.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", "services/hybrid/core.json" ], "ingress": null, @@ -36,8 +36,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", "services/hybrid/http_server.json" ], "ingress": { @@ -79,8 +79,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -109,8 +109,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -132,15 +132,15 @@ "external_secret": { "gcsm_key": "apollo-stresstest-dev-0" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_0.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_1.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_1.json similarity index 77% rename from crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_1.json rename to crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_1.json index d143f12fe15..36f566cbbdf 100644 --- a/crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_1.json +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_1.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", "services/hybrid/core.json" ], "ingress": null, @@ -36,8 +36,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", "services/hybrid/http_server.json" ], "ingress": { @@ -79,8 +79,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -109,8 +109,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -132,15 +132,15 @@ "external_secret": { "gcsm_key": "apollo-stresstest-dev-1" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_1.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_2.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_2.json similarity index 77% rename from crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_2.json rename to crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_2.json index f1eb48c7b54..df6401b8d07 100644 --- a/crates/apollo_deployments/resources/stress_test/deployment_configs/integration_hybrid_node_2.json +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_hybrid_2.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", "services/hybrid/core.json" ], "ingress": null, @@ -36,8 +36,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", "services/hybrid/http_server.json" ], "ingress": { @@ -79,8 +79,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -109,8 +109,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -132,15 +132,15 @@ "external_secret": { "gcsm_key": "apollo-stresstest-dev-2" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/stress_test/deployment_config_override.json", + "deployments/stress_test/hybrid_2.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_override.json new file mode 100644 index 00000000000..f8f7937cc5e --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/stress_test/deployment_config_override.json @@ -0,0 +1,14 @@ +{ + "base_layer_config.starknet_contract_address": "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f", + "chain_id": "INTERNAL_STRESS_TEST", + "consensus_manager_config.context_config.num_validators": 3, + "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", + "eth_fee_token_address": "0x7e813ecf3e7b3e14f07bd2f68cb4a3d12110e3c75ec5a63de3d2dacf1852904", + "l1_provider_config.provider_startup_height_override": 0, + "l1_provider_config.provider_startup_height_override.#is_none": true, + "starknet_url": "http://feeder-gateway.starknet-0-14-0-stress-test-03:9713/", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, + "strk_fee_token_address": "0x2208cce4221df1f35943958340abc812aa79a8f6a533bff4ee00416d3d06cd6" +} diff --git a/crates/apollo_deployments/resources/testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_0.json similarity index 71% rename from crates/apollo_deployments/resources/testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/stress_test/hybrid_0.json index 3ad0c80875a..979d8ad5661 100644 --- a/crates/apollo_deployments/resources/testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_0.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "validator_id": "0x64" } diff --git a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_1.json similarity index 78% rename from crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/stress_test/hybrid_1.json index 9a8a9895dfe..c86a3528944 100644 --- a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_1.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "validator_id": "0x65" } diff --git a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_2.json similarity index 78% rename from crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/stress_test/hybrid_2.json index 613ee6f20e5..7958055b92b 100644 --- a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/stress_test/hybrid_2.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-stresstest-dev-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "validator_id": "0x66" } diff --git a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json b/crates/apollo_deployments/resources/deployments/testing/consolidated.json similarity index 71% rename from crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/testing/consolidated.json index 3ad0c80875a..979d8ad5661 100644 --- a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing/consolidated.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "validator_id": "0x64" } diff --git a/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_consolidated.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json similarity index 75% rename from crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_consolidated.json rename to crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json index 7cf9270cc19..3d792b529e2 100644 --- a/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_consolidated.json +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_consolidated.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing/app_configs/consolidated/deployment_test_consolidated/deployment_config_override.json", - "testing/app_configs/consolidated/deployment_test_consolidated/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/consolidated.json", "services/consolidated/node.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_distributed.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_distributed.json similarity index 75% rename from crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_distributed.json rename to crates/apollo_deployments/resources/deployments/testing/deployment_config_distributed.json index 93545857b50..693a329be6f 100644 --- a/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_distributed.json +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_distributed.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/batcher.json" ], "ingress": null, @@ -34,8 +34,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/class_manager.json" ], "ingress": null, @@ -62,8 +62,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/consensus_manager.json" ], "ingress": null, @@ -90,8 +90,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/http_server.json" ], "ingress": { @@ -129,8 +129,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/gateway.json" ], "ingress": null, @@ -157,8 +157,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/l1.json" ], "ingress": null, @@ -185,8 +185,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/mempool.json" ], "ingress": null, @@ -213,8 +213,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/sierra_compiler.json" ], "ingress": null, @@ -241,8 +241,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json", - "testing/app_configs/distributed/deployment_test_distributed/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/distributed.json", "services/distributed/state_sync.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_hybrid.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_hybrid.json similarity index 77% rename from crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_hybrid.json rename to crates/apollo_deployments/resources/deployments/testing/deployment_config_hybrid.json index f16ca22c38c..803686bc49a 100644 --- a/crates/apollo_deployments/resources/testing/deployment_configs/deployment_test_hybrid.json +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_hybrid.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json", - "testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", "services/hybrid/core.json" ], "ingress": null, @@ -34,8 +34,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json", - "testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", "services/hybrid/http_server.json" ], "ingress": { @@ -73,8 +73,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json", - "testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -101,8 +101,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json", - "testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -129,8 +129,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json", - "testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json", + "deployments/testing/deployment_config_override.json", + "deployments/testing/hybrid.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing/app_configs/consolidated/deployment_test_consolidated/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/testing/deployment_config_override.json similarity index 69% rename from crates/apollo_deployments/resources/testing/app_configs/consolidated/deployment_test_consolidated/deployment_config_override.json rename to crates/apollo_deployments/resources/deployments/testing/deployment_config_override.json index 9d5f4ac373a..e783b696e12 100644 --- a/crates/apollo_deployments/resources/testing/app_configs/consolidated/deployment_test_consolidated/deployment_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing/deployment_config_override.json @@ -1,10 +1,14 @@ { "base_layer_config.starknet_contract_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", "chain_id": "CHAIN_ID_SUBDIR", + "consensus_manager_config.context_config.num_validators": 1, "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", "eth_fee_token_address": "0x1001", "l1_provider_config.provider_startup_height_override": 1, "l1_provider_config.provider_startup_height_override.#is_none": false, "starknet_url": "https://integration-sepolia.starknet.io/", + "state_sync_config.central_sync_client_config.#is_none": true, + "state_sync_config.network_config.#is_none": false, + "state_sync_config.p2p_sync_client_config.#is_none": false, "strk_fee_token_address": "0x1002" } diff --git a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json b/crates/apollo_deployments/resources/deployments/testing/distributed.json similarity index 71% rename from crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/testing/distributed.json index 3ad0c80875a..979d8ad5661 100644 --- a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing/distributed.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "validator_id": "0x64" } diff --git a/crates/apollo_deployments/resources/deployments/testing/hybrid.json b/crates/apollo_deployments/resources/deployments/testing/hybrid.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing/hybrid.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_0.json b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_0.json similarity index 78% rename from crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_0.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_0.json index 4ad05a972e7..7299e1d8c19 100644 --- a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_0.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_0.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_0.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_0.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_0.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_0.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "sequencer-test-3-node-0" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_0.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_1.json b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_1.json similarity index 78% rename from crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_1.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_1.json index b9124a12ff1..a109bf962ae 100644 --- a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_1.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_1.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_1.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_1.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_1.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_1.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "sequencer-test-3-node-1" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_1.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_2.json b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_2.json similarity index 78% rename from crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_2.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_2.json index 5fc9e15a534..471b014e73d 100644 --- a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_2.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_2.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_2.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_2.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_2.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_2.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "sequencer-test-3-node-2" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_2.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_3.json b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_3.json similarity index 78% rename from crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_3.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_3.json index cff68c334cc..118ab73f57a 100644 --- a/crates/apollo_deployments/resources/testing_env_3/deployment_configs/integration_hybrid_node_3.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_hybrid_3.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_3.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_3.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_3.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_3.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "sequencer-test-3-node-3" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json", - "testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json", + "deployments/testing_env_3/deployment_config_override.json", + "deployments/testing_env_3/hybrid_3.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_override.json similarity index 73% rename from crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_override.json index 68abefe3c85..b5c6ec58daa 100644 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/deployment_config_override.json @@ -1,10 +1,14 @@ { "base_layer_config.starknet_contract_address": "0xa23a6BA7DA61988D2420dAE9F10eE964552459d5", "chain_id": "SN_GOERLI", + "consensus_manager_config.context_config.num_validators": 3, "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", "l1_provider_config.provider_startup_height_override": 0, "l1_provider_config.provider_startup_height_override.#is_none": true, "starknet_url": "https://fgw-sn-test-sepolia-3-sepolia.gateway-proxy.sw-dev.io", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" } diff --git a/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_0.json b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_0.json new file mode 100644 index 00000000000..979d8ad5661 --- /dev/null +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_0.json @@ -0,0 +1,11 @@ +{ + "consensus_manager_config.network_config.advertised_multiaddr": "", + "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, + "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", + "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.advertised_multiaddr": "", + "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", + "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, + "validator_id": "0x64" +} diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_1.json similarity index 78% rename from crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_1.json index b24c2dddbe6..31f6ca7b17d 100644 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_1/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_1.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.sequencer-test-3-node-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.sequencer-test-3-node-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "validator_id": "0x65" } diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_2.json similarity index 78% rename from crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_2.json index d67e32fa548..0817c9d7e32 100644 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_2/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_2.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.sequencer-test-3-node-0.svc.cluster.local/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "mempool_p2p_config.network_config.advertised_multiaddr": "", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.sequencer-test-3-node-0.svc.cluster.local/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "validator_id": "0x66" } diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_3.json similarity index 82% rename from crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_3.json index 829305a80b1..e81f2d2d515 100644 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_3/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/testing_env_3/hybrid_3.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.sequencer-test-3-node-0.sw-dev.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010104", "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.sequencer-test-3-node-3.sw-dev.io/tcp/53200/p2p/12D3KooWFdTjV6DXVJfQFisTXadCsqGzCbEnJJWzc6mXSPwy9g54", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.sequencer-test-3-node-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010104", "validator_id": "0x67" } diff --git a/crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_0.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_0.json similarity index 80% rename from crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_0.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_0.json index 83ba80a6db3..938cc515aa8 100644 --- a/crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_0.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_0.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "apollo-alpha-test-0" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_0.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_1.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_1.json similarity index 80% rename from crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_1.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_1.json index f2b17b7d65a..edb649933f3 100644 --- a/crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_1.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_1.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "apollo-alpha-test-1" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_1.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_2.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_2.json similarity index 80% rename from crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_2.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_2.json index f8591bcfa8a..d4688e8e130 100644 --- a/crates/apollo_deployments/resources/upgrade_test/deployment_configs/hybrid_node_2.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_hybrid_2.json @@ -6,8 +6,8 @@ "controller": "StatefulSet", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", "services/hybrid/core.json" ], "ingress": null, @@ -40,8 +40,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", "services/hybrid/http_server.json" ], "ingress": { @@ -83,8 +83,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", "services/hybrid/gateway.json" ], "ingress": null, @@ -113,8 +113,8 @@ "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", "services/hybrid/mempool.json" ], "ingress": null, @@ -140,15 +140,15 @@ "external_secret": { "gcsm_key": "apollo-alpha-test-2" }, - "anti_affinity": false + "anti_affinity": true }, { "name": "SierraCompiler", "controller": "Deployment", "config_paths": [ "base_app_config.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json", - "upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json", + "deployments/upgrade_test/deployment_config_override.json", + "deployments/upgrade_test/hybrid_2.json", "services/hybrid/sierra_compiler.json" ], "ingress": null, diff --git a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_override.json similarity index 65% rename from crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_override.json index 8df9aebc121..a8a2d18e8dd 100644 --- a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_0/deployment_config_override.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/deployment_config_override.json @@ -1,10 +1,14 @@ { "base_layer_config.starknet_contract_address": "0x9b8A6361d204a0C1F93d5194763538057444d958", "chain_id": "SN_GOERLI", + "consensus_manager_config.context_config.num_validators": 3, "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", "l1_provider_config.provider_startup_height_override": 0, "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io", + "starknet_url": "https://feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io", + "state_sync_config.central_sync_client_config.#is_none": false, + "state_sync_config.network_config.#is_none": true, + "state_sync_config.p2p_sync_client_config.#is_none": true, "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" } diff --git a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_0.json similarity index 78% rename from crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_0.json index bc40ea6066d..cff8cf4a87d 100644 --- a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_0/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_0.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", "validator_id": "0x64" } diff --git a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_1.json similarity index 82% rename from crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_1.json index 8440669c83b..36f7cadc228 100644 --- a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_1/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_1.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-0.sw-dev.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-1.sw-dev.io/tcp/53200/p2p/12D3KooWCPzcTZ4ymgyveYaFfZ4bfWsBEh2KxuxM3Rmy7MunqHwe", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010102", "validator_id": "0x65" } diff --git a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_2.json similarity index 82% rename from crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json rename to crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_2.json index 88c31176ef8..8ee85a63ad6 100644 --- a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_2/instance_config_override.json +++ b/crates/apollo_deployments/resources/deployments/upgrade_test/hybrid_2.json @@ -3,11 +3,9 @@ "consensus_manager_config.network_config.advertised_multiaddr.#is_none": false, "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-core-service.apollo-alpha-test-0.sw-dev.io/tcp/53080/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "mempool_p2p_config.network_config.advertised_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-2.sw-dev.io/tcp/53200/p2p/12D3KooWT3eoCYeMPrSNnF1eQHimWFDiqPkna7FUD6XKBw8oPiMp", "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": false, "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "/dns/sequencer-mempool-service.apollo-alpha-test-0.sw-dev.io/tcp/53200/p2p/12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": false, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010103", "validator_id": "0x66" } diff --git a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json b/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json deleted file mode 100644 index 07293ad91bb..00000000000 --- a/crates/apollo_deployments/resources/sepolia_integration/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x4737c0c1B4D5b1A687B42610DdabEE781152359c", - "chain_id": "SN_INTEGRATION_SEPOLIA", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "https://feeder.integration-sepolia.starknet.io/", - "strk_fee_token_address": "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" -} diff --git a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json b/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json deleted file mode 100644 index 6fd1c7d62c5..00000000000 --- a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x497d1c054cec40f64454b45deecdc83e0c7f7b961c63531eae03748abd95350", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "http://feeder-gateway.starknet-0-14-0-stress-test:9713/", - "strk_fee_token_address": "0x4fa9355c504fa2de263bd7920644b5e48794fe1450ec2a6526518ad77d6a567" -} diff --git a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json b/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json deleted file mode 100644 index 6fd1c7d62c5..00000000000 --- a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_1/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x497d1c054cec40f64454b45deecdc83e0c7f7b961c63531eae03748abd95350", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "http://feeder-gateway.starknet-0-14-0-stress-test:9713/", - "strk_fee_token_address": "0x4fa9355c504fa2de263bd7920644b5e48794fe1450ec2a6526518ad77d6a567" -} diff --git a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json b/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json deleted file mode 100644 index 6fd1c7d62c5..00000000000 --- a/crates/apollo_deployments/resources/stress_test/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x497d1c054cec40f64454b45deecdc83e0c7f7b961c63531eae03748abd95350", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "http://feeder-gateway.starknet-0-14-0-stress-test:9713/", - "strk_fee_token_address": "0x4fa9355c504fa2de263bd7920644b5e48794fe1450ec2a6526518ad77d6a567" -} diff --git a/crates/apollo_deployments/resources/testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json b/crates/apollo_deployments/resources/testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json deleted file mode 100644 index 9d5f4ac373a..00000000000 --- a/crates/apollo_deployments/resources/testing/app_configs/distributed/deployment_test_distributed/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", - "chain_id": "CHAIN_ID_SUBDIR", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x1001", - "l1_provider_config.provider_startup_height_override": 1, - "l1_provider_config.provider_startup_height_override.#is_none": false, - "starknet_url": "https://integration-sepolia.starknet.io/", - "strk_fee_token_address": "0x1002" -} diff --git a/crates/apollo_deployments/resources/testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json b/crates/apollo_deployments/resources/testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json deleted file mode 100644 index 9d5f4ac373a..00000000000 --- a/crates/apollo_deployments/resources/testing/app_configs/hybrid/deployment_test_hybrid/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x5FbDB2315678afecb367f032d93F642f64180aa3", - "chain_id": "CHAIN_ID_SUBDIR", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x1001", - "l1_provider_config.provider_startup_height_override": 1, - "l1_provider_config.provider_startup_height_override.#is_none": false, - "starknet_url": "https://integration-sepolia.starknet.io/", - "strk_fee_token_address": "0x1002" -} diff --git a/crates/apollo_deployments/resources/testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json b/crates/apollo_deployments/resources/testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json deleted file mode 100644 index 3ad0c80875a..00000000000 --- a/crates/apollo_deployments/resources/testing/app_configs/hybrid/deployment_test_hybrid/instance_config_override.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "consensus_manager_config.network_config.advertised_multiaddr": "", - "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, - "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", - "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", - "mempool_p2p_config.network_config.advertised_multiaddr": "", - "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", - "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", - "validator_id": "0x64" -} diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json b/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json deleted file mode 100644 index 68abefe3c85..00000000000 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_0/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0xa23a6BA7DA61988D2420dAE9F10eE964552459d5", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "https://fgw-sn-test-sepolia-3-sepolia.gateway-proxy.sw-dev.io", - "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" -} diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json b/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json deleted file mode 100644 index 3ad0c80875a..00000000000 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_0/instance_config_override.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "consensus_manager_config.network_config.advertised_multiaddr": "", - "consensus_manager_config.network_config.advertised_multiaddr.#is_none": true, - "consensus_manager_config.network_config.bootstrap_peer_multiaddr": "", - "consensus_manager_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", - "mempool_p2p_config.network_config.advertised_multiaddr": "", - "mempool_p2p_config.network_config.advertised_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.bootstrap_peer_multiaddr": "", - "mempool_p2p_config.network_config.bootstrap_peer_multiaddr.#is_none": true, - "mempool_p2p_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", - "validator_id": "0x64" -} diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json b/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json deleted file mode 100644 index 68abefe3c85..00000000000 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_2/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0xa23a6BA7DA61988D2420dAE9F10eE964552459d5", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "https://fgw-sn-test-sepolia-3-sepolia.gateway-proxy.sw-dev.io", - "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" -} diff --git a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json b/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json deleted file mode 100644 index 68abefe3c85..00000000000 --- a/crates/apollo_deployments/resources/testing_env_3/app_configs/hybrid/integration_hybrid_node_3/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0xa23a6BA7DA61988D2420dAE9F10eE964552459d5", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "https://fgw-sn-test-sepolia-3-sepolia.gateway-proxy.sw-dev.io", - "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" -} diff --git a/crates/apollo_deployments/resources/testing_secrets.json b/crates/apollo_deployments/resources/testing_secrets.json index 80a8941a4af..f33d7e9b736 100644 --- a/crates/apollo_deployments/resources/testing_secrets.json +++ b/crates/apollo_deployments/resources/testing_secrets.json @@ -1,6 +1,14 @@ { - "recorder_url": "http://dummy-recorder-service.dummy-recorder.svc.cluster.local", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "http://dummy-eth2strk-oracle-service.dummy-eth2strk-oracle.svc.cluster.local/eth_to_strk_oracle?timestamp=", "base_layer_config.node_url": "http://anvil-service.anvil.svc.cluster.local:8545", - "l1_endpoint_monitor_config.ordered_l1_endpoint_urls": "http://anvil-service.anvil.svc.cluster.local:8545" + "consensus_manager_config.eth_to_strk_oracle_config.base_url": "http://dummy-eth2strk-oracle-service.dummy-eth2strk-oracle.svc.cluster.local/eth_to_strk_oracle?timestamp=:9000", + "consensus_manager_config.eth_to_strk_oracle_config.headers": "", + "consensus_manager_config.network_config.secret_key": "0x0101010101010101010101010101010101010101010101010101010101010101", + "l1_endpoint_monitor_config.ordered_l1_endpoint_urls": "http://anvil-service.anvil.svc.cluster.local:8545", + "mempool_p2p_config.network_config.secret_key" : "0x0101010101010101010101010101010101010101010101010101010101010101", + "recorder_url": "http://dummy-recorder-service.dummy-recorder.svc.cluster.local:8080", + "state_sync_config.central_sync_client_config.central_source_config.http_headers": "", + "state_sync_config.network_config.secret_key" : "0x0101010101010101010101010101010101010101010101010101010101010101" } + + + diff --git a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json b/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json deleted file mode 100644 index 8df9aebc121..00000000000 --- a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_1/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x9b8A6361d204a0C1F93d5194763538057444d958", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io", - "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" -} diff --git a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json b/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json deleted file mode 100644 index 8df9aebc121..00000000000 --- a/crates/apollo_deployments/resources/upgrade_test/app_configs/hybrid/hybrid_node_2/deployment_config_override.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "base_layer_config.starknet_contract_address": "0x9b8A6361d204a0C1F93d5194763538057444d958", - "chain_id": "SN_GOERLI", - "consensus_manager_config.eth_to_strk_oracle_config.base_url": "https://api.devnet.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median", - "eth_fee_token_address": "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", - "l1_provider_config.provider_startup_height_override": 0, - "l1_provider_config.provider_startup_height_override.#is_none": true, - "starknet_url": "feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io", - "strk_fee_token_address": "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4" -} diff --git a/crates/apollo_deployments/src/addresses.rs b/crates/apollo_deployments/src/addresses.rs index d963a5c43c7..3ede84f40c4 100644 --- a/crates/apollo_deployments/src/addresses.rs +++ b/crates/apollo_deployments/src/addresses.rs @@ -3,6 +3,9 @@ use std::fmt; use libp2p::identity::Keypair; +// TODO(Tsabary): Get rid of the secret key type and its usage; there should be a precomputed map +// from node index to its peer id. + #[derive(Debug, Clone, PartialEq, Eq)] pub struct SecretKey([u8; 32]); diff --git a/crates/apollo_deployments/src/bin/deployment_generator.rs b/crates/apollo_deployments/src/bin/deployment_generator.rs index 9fa37cbcc7a..f099378c3ac 100644 --- a/crates/apollo_deployments/src/bin/deployment_generator.rs +++ b/crates/apollo_deployments/src/bin/deployment_generator.rs @@ -1,12 +1,12 @@ use apollo_deployments::deployment_definitions::DEPLOYMENTS; -use apollo_deployments::service::DeploymentName; +use apollo_deployments::service::NodeType; use apollo_infra_utils::dumping::serialize_to_file; use strum::IntoEnumIterator; /// Creates the deployment json file. fn main() { - for deployment_name in DeploymentName::iter() { - deployment_name.dump_service_component_configs(None); + for node_type in NodeType::iter() { + node_type.dump_service_component_configs(None); } for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { serialize_to_file(&deployment, deployment.deployment_file_path().to_str().unwrap()); diff --git a/crates/apollo_deployments/src/config_override.rs b/crates/apollo_deployments/src/config_override.rs index e8d3f30f6e0..ec956335aac 100644 --- a/crates/apollo_deployments/src/config_override.rs +++ b/crates/apollo_deployments/src/config_override.rs @@ -3,20 +3,21 @@ use std::path::Path; use apollo_infra_utils::dumping::serialize_to_file; #[cfg(test)] use apollo_infra_utils::dumping::serialize_to_file_test; +use apollo_infra_utils::template::Template; use serde::Serialize; use serde_json::to_value; use serde_with::with_prefix; use starknet_api::block::BlockNumber; use crate::deployment::PragmaDomain; +use crate::deployment_definitions::{StateSyncConfig, StateSyncType}; #[cfg(test)] -use crate::deployment::FIX_BINARY_NAME; +use crate::test_utils::FIX_BINARY_NAME; const DEPLOYMENT_FILE_NAME: &str = "deployment_config_override.json"; -const INSTANCE_FILE_NAME: &str = "instance_config_override.json"; -const PRAGMA_URL_TEMPLATE: &str = - "https://api.{}.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median"; +const PRAGMA_URL_TEMPLATE: Template = + Template("https://api.{}.pragma.build/node/v1/data/eth/strk?interval=15min&aggregation=median"); #[derive(Clone, Debug, Serialize, PartialEq)] pub struct ConfigOverride { @@ -34,11 +35,12 @@ impl ConfigOverride { fn config_files( &self, - application_config_subdir: &Path, + deployment_config_override_dir: &Path, + instance_name: &str, create: bool, ) -> ConfigOverrideWithPaths { - let deployment_path = application_config_subdir.join(DEPLOYMENT_FILE_NAME); - let instance_path = application_config_subdir.join(INSTANCE_FILE_NAME); + let deployment_path = deployment_config_override_dir.join(DEPLOYMENT_FILE_NAME); + let instance_path = deployment_config_override_dir.join(format!("{instance_name}.json")); if create { serialize_to_file( @@ -62,19 +64,34 @@ impl ConfigOverride { } } - pub fn get_config_file_paths(&self, application_config_subdir: &Path) -> Vec { - let config_override_with_paths = self.config_files(application_config_subdir, false); + pub fn get_config_file_paths( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + ) -> Vec { + let config_override_with_paths = + self.config_files(deployment_config_override_dir, instance_name, false); vec![config_override_with_paths.deployment_path, config_override_with_paths.instance_path] } - pub fn dump_config_files(&self, application_config_subdir: &Path) -> Vec { - let config_override_with_paths = self.config_files(application_config_subdir, true); + pub fn dump_config_files( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + ) -> Vec { + let config_override_with_paths = + self.config_files(deployment_config_override_dir, instance_name, true); vec![config_override_with_paths.deployment_path, config_override_with_paths.instance_path] } #[cfg(test)] - pub fn test_dump_config_files(&self, application_config_subdir: &Path) { - let config_override_with_paths = self.config_files(application_config_subdir, false); + pub fn test_dump_config_files( + &self, + deployment_config_override_dir: &Path, + instance_name: &str, + ) { + let config_override_with_paths = + self.config_files(deployment_config_override_dir, instance_name, false); serialize_to_file_test( to_value(config_override_with_paths.deployment_config_override).unwrap(), @@ -113,9 +130,14 @@ pub struct DeploymentConfigOverride { l1_provider_config_provider_startup_height_override: u64, #[serde(rename = "l1_provider_config.provider_startup_height_override.#is_none")] l1_provider_config_provider_startup_height_override_is_none: bool, + #[serde(rename = "consensus_manager_config.context_config.num_validators")] + consensus_manager_config_context_config_num_validators: usize, + #[serde(flatten)] + state_sync_config: StateSyncConfig, } impl DeploymentConfigOverride { + #[allow(clippy::too_many_arguments)] pub fn new( starknet_contract_address: impl ToString, chain_id: impl ToString, @@ -124,6 +146,8 @@ impl DeploymentConfigOverride { strk_fee_token_address: impl ToString, pragma_domain: PragmaDomain, l1_startup_height_override: Option, + consensus_manager_config_context_config_num_validators: usize, + state_sync_type: StateSyncType, ) -> Self { let ( l1_provider_config_provider_startup_height_override, @@ -140,9 +164,11 @@ impl DeploymentConfigOverride { starknet_url: starknet_url.to_string(), strk_fee_token_address: strk_fee_token_address.to_string(), consensus_manager_config_eth_to_strk_oracle_config_base_url: PRAGMA_URL_TEMPLATE - .replace("{}", &pragma_domain.to_string()), + .format(&[&pragma_domain]), l1_provider_config_provider_startup_height_override, l1_provider_config_provider_startup_height_override_is_none, + consensus_manager_config_context_config_num_validators, + state_sync_config: state_sync_type.get_state_sync_config(), } } } @@ -160,16 +186,12 @@ pub struct NetworkConfigOverride { advertised_multiaddr: String, #[serde(rename = "advertised_multiaddr.#is_none")] advertised_multiaddr_is_none: bool, - - // TODO(Tsabary): network secret keys should be defined as secrets. - secret_key: String, } impl NetworkConfigOverride { pub fn new( bootstrap_peer_multiaddr: Option, advertised_multiaddr: Option, - secret_key: impl ToString, ) -> Self { let (bootstrap_peer_multiaddr, bootstrap_peer_multiaddr_is_none) = match bootstrap_peer_multiaddr { @@ -185,7 +207,6 @@ impl NetworkConfigOverride { bootstrap_peer_multiaddr_is_none, advertised_multiaddr, advertised_multiaddr_is_none, - secret_key: secret_key.to_string(), } } } diff --git a/crates/apollo_deployments/src/deployment.rs b/crates/apollo_deployments/src/deployment.rs index 5f5b90c39a3..49c17c7df8e 100644 --- a/crates/apollo_deployments/src/deployment.rs +++ b/crates/apollo_deployments/src/deployment.rs @@ -12,14 +12,9 @@ use serde::Serialize; use serde_json::{json, Value}; use crate::config_override::ConfigOverride; -use crate::deployment_definitions::{Environment, CONFIG_BASE_DIR}; +use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH, CONFIG_BASE_DIR}; use crate::k8s::{ExternalSecret, IngressParams, K8SServiceType, K8sServiceConfigParams}; -use crate::service::{DeploymentName, Service, ServiceName}; - -#[cfg(test)] -pub(crate) const FIX_BINARY_NAME: &str = "deployment_generator"; - -const DEPLOYMENT_CONFIG_DIR_NAME: &str = "deployment_configs/"; +use crate::service::{NodeService, NodeType, Service}; #[derive(Clone, Debug, Serialize)] pub struct Deployment { @@ -30,32 +25,26 @@ pub struct Deployment { } impl Deployment { - #[allow(clippy::too_many_arguments)] pub fn new( - deployment_name: DeploymentName, + node_type: NodeType, environment: Environment, instance_name: &str, external_secret: Option, - base_app_config_file_path: PathBuf, config_override: ConfigOverride, ingress_params: IngressParams, k8s_service_config_params: Option, ) -> Self { - let service_names = deployment_name.all_service_names(); + let node_services = node_type.all_service_names(); - let config_override_dir = deployment_name - .add_path_suffix(environment.application_config_dir_path(), instance_name); - - let config_override_files = config_override.get_config_file_paths(&config_override_dir); + let config_override_files = + config_override.get_config_file_paths(&environment.env_dir_path(), instance_name); let config_filenames: Vec = - once(base_app_config_file_path.to_string_lossy().to_string()) - .chain(config_override_files) - .collect(); + once(BASE_APP_CONFIG_PATH.to_string()).chain(config_override_files).collect(); - let services = service_names + let services = node_services .iter() - .map(|service_name| { - service_name.create_service( + .map(|node_service| { + node_service.create_service( &environment, &external_secret, config_filenames.clone(), @@ -68,27 +57,20 @@ impl Deployment { application_config_subdir: CONFIG_BASE_DIR.into(), services, deployment_aux_data: DeploymentAuxData { - deployment_name, + node_type, environment, instance_name: instance_name.to_string(), - base_app_config_file_path, config_override, - config_override_dir, }, } } - pub fn get_deployment_name(&self) -> &DeploymentName { - &self.deployment_aux_data.deployment_name - } - - pub fn get_base_app_config_file_path(&self) -> PathBuf { - self.deployment_aux_data.base_app_config_file_path.clone() + pub fn get_node_type(&self) -> &NodeType { + &self.deployment_aux_data.node_type } - pub fn application_config_values(&self) -> IndexMap { - let component_configs = - self.deployment_aux_data.deployment_name.get_component_configs(None); + pub fn application_config_values(&self) -> IndexMap { + let component_configs = self.deployment_aux_data.node_type.get_component_configs(None); let mut result = IndexMap::new(); for (service, component_config) in component_configs.into_iter() { @@ -118,34 +100,34 @@ impl Deployment { } pub fn deployment_file_path(&self) -> PathBuf { - PathBuf::from(CONFIG_BASE_DIR) - .join(self.deployment_aux_data.environment.to_string()) - .join(DEPLOYMENT_CONFIG_DIR_NAME) - .join(format!("{}.json", self.deployment_aux_data.instance_name)) + self.deployment_aux_data + .environment + .env_dir_path() + .join(format!("deployment_config_{}.json", self.deployment_aux_data.instance_name)) } pub fn dump_config_override_files(&self) { - self.deployment_aux_data - .config_override - .dump_config_files(&self.deployment_aux_data.config_override_dir); + self.deployment_aux_data.config_override.dump_config_files( + &self.deployment_aux_data.environment.env_dir_path(), + &self.deployment_aux_data.instance_name, + ); } #[cfg(test)] pub fn test_dump_config_override_files(&self) { - self.deployment_aux_data - .config_override - .test_dump_config_files(&self.deployment_aux_data.config_override_dir); + self.deployment_aux_data.config_override.test_dump_config_files( + &self.deployment_aux_data.environment.env_dir_path(), + &self.deployment_aux_data.instance_name, + ); } } #[derive(Clone, Debug)] struct DeploymentAuxData { - deployment_name: DeploymentName, + node_type: NodeType, environment: Environment, instance_name: String, - base_app_config_file_path: PathBuf, config_override: ConfigOverride, - config_override_dir: PathBuf, } // TODO(Tsabary): test no conflicts between config entries defined in each of the override types. @@ -168,13 +150,13 @@ impl Display for PragmaDomain { } } -// Creates the service name in the format: .. +// Creates the service name in the format: .. pub(crate) fn build_service_namespace_domain_address( - service_name: &str, + node_service: &str, namespace: &str, domain: &str, ) -> String { - format!("{service_name}.{namespace}.{domain}") + format!("{node_service}.{namespace}.{domain}") } // TODO(Tsabary): when transitioning runnings nodes in different clusters, this enum should be diff --git a/crates/apollo_deployments/src/deployment_definitions.rs b/crates/apollo_deployments/src/deployment_definitions.rs index 9ba0cb2997e..6db31ccabcc 100644 --- a/crates/apollo_deployments/src/deployment_definitions.rs +++ b/crates/apollo_deployments/src/deployment_definitions.rs @@ -1,10 +1,11 @@ use std::path::PathBuf; -use const_format::formatcp; +use serde::Serialize; use strum_macros::{Display, EnumString}; use crate::deployment::Deployment; use crate::deployment_definitions::sepolia_integration::sepolia_integration_hybrid_deployments; +use crate::deployment_definitions::sepolia_testnet::sepolia_testnet_hybrid_deployments; use crate::deployment_definitions::stress_test::stress_test_hybrid_deployments; use crate::deployment_definitions::testing::system_test_deployments; use crate::deployment_definitions::testing_env_3::testing_env_3_hybrid_deployments; @@ -15,15 +16,16 @@ use crate::deployment_definitions::upgrade_test::upgrade_test_hybrid_deployments mod deployment_definitions_test; mod sepolia_integration; +mod sepolia_testnet; mod stress_test; mod testing; mod testing_env_3; mod upgrade_test; pub(crate) const CONFIG_BASE_DIR: &str = "crates/apollo_deployments/resources/"; +pub(crate) const DEPLOYMENT_CONFIG_DIR_NAME: &str = "deployments/"; pub(crate) const BASE_APP_CONFIG_PATH: &str = - formatcp!("{}{}", CONFIG_BASE_DIR, "base_app_config.json"); -const APP_CONFIGS_DIR_NAME: &str = "app_configs/"; + "crates/apollo_deployments/resources/base_app_config.json"; type DeploymentFn = fn() -> Vec; @@ -33,6 +35,7 @@ pub const DEPLOYMENTS: &[DeploymentFn] = &[ upgrade_test_hybrid_deployments, testing_env_3_hybrid_deployments, stress_test_hybrid_deployments, + sepolia_testnet_hybrid_deployments, ]; #[derive(EnumString, Clone, Display, PartialEq, Debug)] @@ -51,7 +54,39 @@ pub enum Environment { } impl Environment { - pub fn application_config_dir_path(&self) -> PathBuf { - PathBuf::from(CONFIG_BASE_DIR).join(self.to_string()).join(APP_CONFIGS_DIR_NAME) + pub(crate) fn env_dir_path(&self) -> PathBuf { + PathBuf::from(CONFIG_BASE_DIR).join(DEPLOYMENT_CONFIG_DIR_NAME).join(self.to_string()) + } +} + +#[derive(Clone, Debug, Serialize, PartialEq)] +pub struct StateSyncConfig { + #[serde(rename = "state_sync_config.central_sync_client_config.#is_none")] + state_sync_config_central_sync_client_config_is_none: bool, + #[serde(rename = "state_sync_config.p2p_sync_client_config.#is_none")] + state_sync_config_p2p_sync_client_config_is_none: bool, + #[serde(rename = "state_sync_config.network_config.#is_none")] + state_sync_config_network_config_is_none: bool, +} + +pub enum StateSyncType { + Central, + P2P, +} + +impl StateSyncType { + pub fn get_state_sync_config(&self) -> StateSyncConfig { + match self { + StateSyncType::Central => StateSyncConfig { + state_sync_config_central_sync_client_config_is_none: false, + state_sync_config_p2p_sync_client_config_is_none: true, + state_sync_config_network_config_is_none: true, + }, + StateSyncType::P2P => StateSyncConfig { + state_sync_config_central_sync_client_config_is_none: true, + state_sync_config_p2p_sync_client_config_is_none: false, + state_sync_config_network_config_is_none: false, + }, + } } } diff --git a/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs b/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs index b7186fb6150..5e924d5dce6 100644 --- a/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs +++ b/crates/apollo_deployments/src/deployment_definitions/sepolia_integration.rs @@ -1,20 +1,18 @@ -use std::path::PathBuf; +use apollo_infra_utils::template::Template; use crate::config_override::{ConfigOverride, DeploymentConfigOverride}; use crate::deployment::{Deployment, P2PCommunicationType, PragmaDomain}; -use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH}; -use crate::deployments::hybrid::create_hybrid_instance_config_override; +use crate::deployment_definitions::{Environment, StateSyncType}; +use crate::deployments::hybrid::{create_hybrid_instance_config_override, INSTANCE_NAME_FORMAT}; use crate::k8s::{ExternalSecret, IngressParams}; -use crate::service::DeploymentName; -use crate::utils::format_node_id; +use crate::service::NodeType; const SEPOLIA_INTEGRATION_NODE_IDS: [usize; 3] = [0, 1, 2]; const SEPOLIA_INTEGRATION_HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "integration-sepolia.starknet.io"; const SEPOLIA_INTEGRATION_INGRESS_DOMAIN: &str = "starknet.io"; -const INSTANCE_NAME_FORMAT: &str = "integration_hybrid_node_{}"; -const SECRET_NAME_FORMAT: &str = "apollo-sepolia-integration-{}"; -const NODE_NAMESPACE_FORMAT: &str = "apollo-sepolia-integration-{}"; +const SECRET_NAME_FORMAT: Template = Template("apollo-sepolia-integration-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-sepolia-integration-{}"); pub(crate) fn sepolia_integration_hybrid_deployments() -> Vec { SEPOLIA_INTEGRATION_NODE_IDS @@ -31,6 +29,8 @@ fn sepolia_integration_deployment_config_override() -> DeploymentConfigOverride "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d", PragmaDomain::Dev, None, + SEPOLIA_INTEGRATION_NODE_IDS.len(), + StateSyncType::Central, ) } @@ -39,11 +39,10 @@ fn sepolia_integration_hybrid_deployment_node( p2p_communication_type: P2PCommunicationType, ) -> Deployment { Deployment::new( - DeploymentName::HybridNode, + NodeType::Hybrid, Environment::SepoliaIntegration, - &format_node_id(INSTANCE_NAME_FORMAT, id), - Some(ExternalSecret::new(format_node_id(SECRET_NAME_FORMAT, id))), - PathBuf::from(BASE_APP_CONFIG_PATH), + &INSTANCE_NAME_FORMAT.format(&[&id]), + Some(ExternalSecret::new(SECRET_NAME_FORMAT.format(&[&id]))), ConfigOverride::new( sepolia_integration_deployment_config_override(), create_hybrid_instance_config_override( diff --git a/crates/apollo_deployments/src/deployment_definitions/sepolia_testnet.rs b/crates/apollo_deployments/src/deployment_definitions/sepolia_testnet.rs new file mode 100644 index 00000000000..afe691fc99c --- /dev/null +++ b/crates/apollo_deployments/src/deployment_definitions/sepolia_testnet.rs @@ -0,0 +1,61 @@ +use apollo_infra_utils::template::Template; + +use crate::config_override::{ConfigOverride, DeploymentConfigOverride}; +use crate::deployment::{Deployment, P2PCommunicationType, PragmaDomain}; +use crate::deployment_definitions::{Environment, StateSyncType}; +use crate::deployments::hybrid::{create_hybrid_instance_config_override, INSTANCE_NAME_FORMAT}; +use crate::k8s::{ExternalSecret, IngressParams, K8sServiceConfigParams}; +use crate::service::NodeType; + +const NODE_IDS: [usize; 3] = [0, 1, 2]; +const HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "alpha-sepolia.starknet.io"; +const INGRESS_DOMAIN: &str = "starknet.io"; +const SECRET_NAME_FORMAT: Template = Template("apollo-sepolia-alpha-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-sepolia-alpha-{}"); + +pub(crate) fn sepolia_testnet_hybrid_deployments() -> Vec { + NODE_IDS.map(|i| hybrid_deployments(i, P2PCommunicationType::External)).to_vec() +} + +// TODO(Tsabary): for all envs, define the values as constants at the top of the module, and cancel +// the inner function calls. +fn deployment_config_override() -> DeploymentConfigOverride { + DeploymentConfigOverride::new( + "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057", + "SN_SEPOLIA", + "0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7", + "https://feeder.alpha-sepolia.starknet.io", + "0x4718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d", + PragmaDomain::Dev, + None, + NODE_IDS.len(), + StateSyncType::Central, + ) +} + +fn hybrid_deployments(id: usize, p2p_communication_type: P2PCommunicationType) -> Deployment { + Deployment::new( + NodeType::Hybrid, + Environment::SepoliaTestnet, + &INSTANCE_NAME_FORMAT.format(&[&id]), + Some(ExternalSecret::new(SECRET_NAME_FORMAT.format(&[&id]))), + ConfigOverride::new( + deployment_config_override(), + create_hybrid_instance_config_override( + id, + NODE_NAMESPACE_FORMAT, + p2p_communication_type, + INGRESS_DOMAIN, + ), + ), + IngressParams::new( + INGRESS_DOMAIN.to_string(), + Some(vec![HTTP_SERVER_INGRESS_ALTERNATIVE_NAME.into()]), + ), + Some(K8sServiceConfigParams::new( + NODE_NAMESPACE_FORMAT.format(&[&id]), + INGRESS_DOMAIN.to_string(), + P2PCommunicationType::External, + )), + ) +} diff --git a/crates/apollo_deployments/src/deployment_definitions/stress_test.rs b/crates/apollo_deployments/src/deployment_definitions/stress_test.rs index 94bb5f4ac2f..a8501fd9eb4 100644 --- a/crates/apollo_deployments/src/deployment_definitions/stress_test.rs +++ b/crates/apollo_deployments/src/deployment_definitions/stress_test.rs @@ -1,19 +1,17 @@ -use std::path::PathBuf; +use apollo_infra_utils::template::Template; use crate::config_override::{ConfigOverride, DeploymentConfigOverride}; use crate::deployment::{Deployment, P2PCommunicationType, PragmaDomain}; -use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH}; -use crate::deployments::hybrid::create_hybrid_instance_config_override; +use crate::deployment_definitions::{Environment, StateSyncType}; +use crate::deployments::hybrid::{create_hybrid_instance_config_override, INSTANCE_NAME_FORMAT}; use crate::k8s::{ExternalSecret, IngressParams}; -use crate::service::DeploymentName; -use crate::utils::format_node_id; +use crate::service::NodeType; const STRESS_TEST_NODE_IDS: [usize; 3] = [0, 1, 2]; const STRESS_TEST_HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "apollo-stresstest-dev.sw-dev.io"; const STRESS_TEST_INGRESS_DOMAIN: &str = "sw-dev.io"; -const INSTANCE_NAME_FORMAT: &str = "integration_hybrid_node_{}"; -const SECRET_NAME_FORMAT: &str = "apollo-stresstest-dev-{}"; -const NODE_NAMESPACE_FORMAT: &str = "apollo-stresstest-dev-{}"; +const SECRET_NAME_FORMAT: Template = Template("apollo-stresstest-dev-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-stresstest-dev-{}"); pub(crate) fn stress_test_hybrid_deployments() -> Vec { STRESS_TEST_NODE_IDS @@ -24,12 +22,14 @@ pub(crate) fn stress_test_hybrid_deployments() -> Vec { fn stress_test_deployment_config_override() -> DeploymentConfigOverride { DeploymentConfigOverride::new( "0x4fA369fEBf0C574ea05EC12bC0e1Bc9Cd461Dd0f", - "SN_GOERLI", - "0x497d1c054cec40f64454b45deecdc83e0c7f7b961c63531eae03748abd95350", - "http://feeder-gateway.starknet-0-14-0-stress-test:9713/", - "0x4fa9355c504fa2de263bd7920644b5e48794fe1450ec2a6526518ad77d6a567", + "INTERNAL_STRESS_TEST", + "0x7e813ecf3e7b3e14f07bd2f68cb4a3d12110e3c75ec5a63de3d2dacf1852904", + "http://feeder-gateway.starknet-0-14-0-stress-test-03:9713/", + "0x2208cce4221df1f35943958340abc812aa79a8f6a533bff4ee00416d3d06cd6", PragmaDomain::Dev, None, + STRESS_TEST_NODE_IDS.len(), + StateSyncType::Central, ) } @@ -38,11 +38,10 @@ fn stress_test_hybrid_deployment_node( p2p_communication_type: P2PCommunicationType, ) -> Deployment { Deployment::new( - DeploymentName::HybridNode, + NodeType::Hybrid, Environment::StressTest, - &format_node_id(INSTANCE_NAME_FORMAT, id), - Some(ExternalSecret::new(format_node_id(SECRET_NAME_FORMAT, id))), - PathBuf::from(BASE_APP_CONFIG_PATH), + &INSTANCE_NAME_FORMAT.format(&[&id]), + Some(ExternalSecret::new(SECRET_NAME_FORMAT.format(&[&id]))), ConfigOverride::new( stress_test_deployment_config_override(), create_hybrid_instance_config_override( diff --git a/crates/apollo_deployments/src/deployment_definitions/testing.rs b/crates/apollo_deployments/src/deployment_definitions/testing.rs index 310d31381e5..6195ba370f5 100644 --- a/crates/apollo_deployments/src/deployment_definitions/testing.rs +++ b/crates/apollo_deployments/src/deployment_definitions/testing.rs @@ -1,5 +1,3 @@ -use std::path::PathBuf; - use starknet_api::block::BlockNumber; use crate::config_override::{ @@ -9,11 +7,12 @@ use crate::config_override::{ NetworkConfigOverride, }; use crate::deployment::{Deployment, PragmaDomain}; -use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH}; +use crate::deployment_definitions::{Environment, StateSyncType}; use crate::k8s::IngressParams; -use crate::service::DeploymentName; +use crate::service::NodeType; const TESTING_INGRESS_DOMAIN: &str = "sw-dev.io"; +const TESTING_NODE_IDS: [usize; 1] = [0]; pub(crate) fn system_test_deployments() -> Vec { vec![ @@ -32,15 +31,15 @@ fn testing_deployment_config_override() -> DeploymentConfigOverride { "0x1002", PragmaDomain::Dev, Some(BlockNumber(1)), + TESTING_NODE_IDS.len(), + StateSyncType::P2P, ) } fn testing_instance_config_override() -> InstanceConfigOverride { - const SECRET_KEY: &str = "0x0101010101010101010101010101010101010101010101010101010101010101"; - InstanceConfigOverride::new( - NetworkConfigOverride::new(None, None, SECRET_KEY), - NetworkConfigOverride::new(None, None, SECRET_KEY), + NetworkConfigOverride::new(None, None), + NetworkConfigOverride::new(None, None), "0x64", ) } @@ -55,11 +54,10 @@ fn get_ingress_params() -> IngressParams { fn system_test_distributed_deployment() -> Deployment { Deployment::new( - DeploymentName::DistributedNode, + NodeType::Distributed, Environment::Testing, - "deployment_test_distributed", + "distributed", None, - PathBuf::from(BASE_APP_CONFIG_PATH), testing_config_override(), get_ingress_params(), None, @@ -68,11 +66,10 @@ fn system_test_distributed_deployment() -> Deployment { fn system_test_hybrid_deployment() -> Deployment { Deployment::new( - DeploymentName::HybridNode, + NodeType::Hybrid, Environment::Testing, - "deployment_test_hybrid", + "hybrid", None, - PathBuf::from(BASE_APP_CONFIG_PATH), testing_config_override(), get_ingress_params(), None, @@ -81,11 +78,10 @@ fn system_test_hybrid_deployment() -> Deployment { fn system_test_consolidated_deployment() -> Deployment { Deployment::new( - DeploymentName::ConsolidatedNode, + NodeType::Consolidated, Environment::Testing, - "deployment_test_consolidated", + "consolidated", None, - PathBuf::from(BASE_APP_CONFIG_PATH), testing_config_override(), get_ingress_params(), None, diff --git a/crates/apollo_deployments/src/deployment_definitions/testing_env_3.rs b/crates/apollo_deployments/src/deployment_definitions/testing_env_3.rs index bfd0b1dcdaa..fc67407096b 100644 --- a/crates/apollo_deployments/src/deployment_definitions/testing_env_3.rs +++ b/crates/apollo_deployments/src/deployment_definitions/testing_env_3.rs @@ -1,12 +1,11 @@ -use std::path::PathBuf; +use apollo_infra_utils::template::Template; use crate::config_override::{ConfigOverride, DeploymentConfigOverride}; use crate::deployment::{Deployment, P2PCommunicationType, PragmaDomain}; -use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH}; -use crate::deployments::hybrid::create_hybrid_instance_config_override; +use crate::deployment_definitions::{Environment, StateSyncType}; +use crate::deployments::hybrid::{create_hybrid_instance_config_override, INSTANCE_NAME_FORMAT}; use crate::k8s::{ExternalSecret, IngressParams, K8sServiceConfigParams}; -use crate::service::DeploymentName; -use crate::utils::format_node_id; +use crate::service::NodeType; // TODO(Tsabary): note this env has configs for 4 despite needing only 3. Delete when we're done // with it. @@ -19,9 +18,8 @@ const TESTING_ENV_3_NODE_IDS: [(usize, P2PCommunicationType); 4] = [ const TESTING_ENV_3_HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "sn-test-sepolia-3-sepolia.gateway-proxy.sw-dev.io"; const TESTING_ENV_3_INGRESS_DOMAIN: &str = "sw-dev.io"; -const INSTANCE_NAME_FORMAT: &str = "integration_hybrid_node_{}"; -const SECRET_NAME_FORMAT: &str = "sequencer-test-3-node-{}"; -const NODE_NAMESPACE_FORMAT: &str = "sequencer-test-3-node-{}"; +const SECRET_NAME_FORMAT: Template = Template("sequencer-test-3-node-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("sequencer-test-3-node-{}"); pub(crate) fn testing_env_3_hybrid_deployments() -> Vec { TESTING_ENV_3_NODE_IDS @@ -40,6 +38,8 @@ fn testing_env_3_deployment_config_override() -> DeploymentConfigOverride { "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4", PragmaDomain::Dev, None, + 3, + StateSyncType::Central, ) } @@ -49,11 +49,10 @@ fn testing_env_3_hybrid_deployment_node( p2p_communication_type: P2PCommunicationType, ) -> Deployment { Deployment::new( - DeploymentName::HybridNode, + NodeType::Hybrid, Environment::TestingEnvThree, - &format_node_id(INSTANCE_NAME_FORMAT, id), - Some(ExternalSecret::new(format_node_id(SECRET_NAME_FORMAT, id))), - PathBuf::from(BASE_APP_CONFIG_PATH), + &INSTANCE_NAME_FORMAT.format(&[&id]), + Some(ExternalSecret::new(SECRET_NAME_FORMAT.format(&[&id]))), ConfigOverride::new( testing_env_3_deployment_config_override(), create_hybrid_instance_config_override( @@ -68,7 +67,7 @@ fn testing_env_3_hybrid_deployment_node( Some(vec![TESTING_ENV_3_HTTP_SERVER_INGRESS_ALTERNATIVE_NAME.into()]), ), Some(K8sServiceConfigParams::new( - format_node_id(NODE_NAMESPACE_FORMAT, id), + NODE_NAMESPACE_FORMAT.format(&[&id]), TESTING_ENV_3_INGRESS_DOMAIN.to_string(), p2p_communication_type, )), diff --git a/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs b/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs index d1d04438706..df3344a35dd 100644 --- a/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs +++ b/crates/apollo_deployments/src/deployment_definitions/upgrade_test.rs @@ -1,20 +1,18 @@ -use std::path::PathBuf; +use apollo_infra_utils::template::Template; use crate::config_override::{ConfigOverride, DeploymentConfigOverride}; use crate::deployment::{Deployment, P2PCommunicationType, PragmaDomain}; -use crate::deployment_definitions::{Environment, BASE_APP_CONFIG_PATH}; -use crate::deployments::hybrid::create_hybrid_instance_config_override; +use crate::deployment_definitions::{Environment, StateSyncType}; +use crate::deployments::hybrid::{create_hybrid_instance_config_override, INSTANCE_NAME_FORMAT}; use crate::k8s::{ExternalSecret, IngressParams, K8sServiceConfigParams}; -use crate::service::DeploymentName; -use crate::utils::format_node_id; +use crate::service::NodeType; const UPGRADE_TEST_NODE_IDS: [usize; 3] = [0, 1, 2]; const UPGRADE_TEST_HTTP_SERVER_INGRESS_ALTERNATIVE_NAME: &str = "sn-alpha-test-upgrade.gateway-proxy.sw-dev.io"; const UPGRADE_TEST_INGRESS_DOMAIN: &str = "sw-dev.io"; -const INSTANCE_NAME_FORMAT: &str = "hybrid_node_{}"; -const SECRET_NAME_FORMAT: &str = "apollo-alpha-test-{}"; -const NODE_NAMESPACE_FORMAT: &str = "apollo-alpha-test-{}"; +const SECRET_NAME_FORMAT: Template = Template("apollo-alpha-test-{}"); +const NODE_NAMESPACE_FORMAT: Template = Template("apollo-alpha-test-{}"); pub(crate) fn upgrade_test_hybrid_deployments() -> Vec { UPGRADE_TEST_NODE_IDS @@ -29,10 +27,12 @@ fn upgrade_test_deployment_config_override() -> DeploymentConfigOverride { "0x9b8A6361d204a0C1F93d5194763538057444d958", "SN_GOERLI", "0x7c07a3eec8ff611328722c3fc3e5d2e4ef2f60740c0bf86c756606036b74c16", - "feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io", + "https://feeder.sn-alpha-test-upgrade.gateway-proxy.sw-dev.io", "0x54a93d918d62b2fb62b25e77d9cb693bd277ab7e6fa236e53af263f1adb40e4", PragmaDomain::Dev, None, + UPGRADE_TEST_NODE_IDS.len(), + StateSyncType::Central, ) } @@ -41,11 +41,10 @@ fn upgrade_test_hybrid_deployment_node( p2p_communication_type: P2PCommunicationType, ) -> Deployment { Deployment::new( - DeploymentName::HybridNode, + NodeType::Hybrid, Environment::UpgradeTest, - &format_node_id(INSTANCE_NAME_FORMAT, id), - Some(ExternalSecret::new(format_node_id(SECRET_NAME_FORMAT, id))), - PathBuf::from(BASE_APP_CONFIG_PATH), + &INSTANCE_NAME_FORMAT.format(&[&id]), + Some(ExternalSecret::new(SECRET_NAME_FORMAT.format(&[&id]))), ConfigOverride::new( upgrade_test_deployment_config_override(), create_hybrid_instance_config_override( @@ -60,7 +59,7 @@ fn upgrade_test_hybrid_deployment_node( Some(vec![UPGRADE_TEST_HTTP_SERVER_INGRESS_ALTERNATIVE_NAME.into()]), ), Some(K8sServiceConfigParams::new( - format_node_id(NODE_NAMESPACE_FORMAT, id), + NODE_NAMESPACE_FORMAT.format(&[&id]), UPGRADE_TEST_INGRESS_DOMAIN.to_string(), P2PCommunicationType::External, )), diff --git a/crates/apollo_deployments/src/deployment_definitions_test.rs b/crates/apollo_deployments/src/deployment_definitions_test.rs index f509d320df9..46ccbce907e 100644 --- a/crates/apollo_deployments/src/deployment_definitions_test.rs +++ b/crates/apollo_deployments/src/deployment_definitions_test.rs @@ -1,31 +1,31 @@ +use std::collections::HashSet; use std::env; use apollo_config::CONFIG_FILE_ARG; -use apollo_infra_utils::dumping::serialize_to_file_test; +use apollo_infra_utils::dumping::{serialize_to_file, serialize_to_file_test}; use apollo_infra_utils::path::resolve_project_relative_path; use apollo_node::config::component_execution_config::{ ActiveComponentExecutionMode, ReactiveComponentExecutionMode, }; use apollo_node::config::node_config::SequencerNodeConfig; +use apollo_node::config::test_utils::private_parameters; +use serde_json::to_value; use strum::IntoEnumIterator; +use tempfile::NamedTempFile; -use crate::deployment::FIX_BINARY_NAME; use crate::deployment_definitions::DEPLOYMENTS; -use crate::service::DeploymentName; +use crate::service::NodeType; +use crate::test_utils::{SecretsConfigOverride, FIX_BINARY_NAME}; -/// Test that the deployment file is up to date. To update it run: -/// cargo run --bin deployment_generator -q +/// Test that the deployment file is up to date. #[test] fn deployment_files_are_up_to_date() { env::set_current_dir(resolve_project_relative_path("").unwrap()) .expect("Couldn't set working dir."); - // TODO(Tsabary): The word "deployment" is overloaded. On one hand it means the "node - // configuration" (e.g. hybrid), on the other it means the "k8s setups" (e.g. upgrade_test). - // Need to fix that. - for deployment_name in DeploymentName::iter() { - deployment_name.test_dump_service_component_configs(None); + for node_type in NodeType::iter() { + node_type.test_dump_service_component_configs(None); } for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { serialize_to_file_test( @@ -42,15 +42,25 @@ fn deployment_files_are_up_to_date() { fn load_and_process_service_config_files() { env::set_current_dir(resolve_project_relative_path("").unwrap()) .expect("Couldn't set working dir."); + + // Create a dummy secrets value to the config file paths. + let temp_file = NamedTempFile::new().unwrap(); + let temp_file_path = temp_file.path().to_str().unwrap(); + let secrets_config_override = SecretsConfigOverride::default(); + serialize_to_file(to_value(&secrets_config_override).unwrap(), temp_file_path); + for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { - for service_config_paths in deployment.get_config_file_paths().into_iter() { + for mut service_config_paths in deployment.get_config_file_paths().into_iter() { println!( "Loading deployment {} in path {:?} with application files {:?} ... ", - deployment.get_deployment_name(), + deployment.get_node_type(), deployment.deployment_file_path(), service_config_paths ); + // Add the secrets config file path to the config load command. + service_config_paths.push(temp_file_path.to_string()); + let config_file_args: Vec = service_config_paths .clone() .into_iter() @@ -74,11 +84,40 @@ fn load_and_process_service_config_files() { } } +/// Test that the private values in the apollo node config schema match the secrets config override +/// schema. +#[test] +fn secrets_config_and_private_parameters_config_schema_compatibility() { + let secrets_config_override = SecretsConfigOverride::default(); + let secrets_provided_by_config = to_value(&secrets_config_override) + .unwrap() + .as_object() + .unwrap() + .keys() + .cloned() + .collect::>(); + let secrets_required_by_schema = private_parameters(); + + let only_in_config: HashSet<_> = + secrets_provided_by_config.difference(&secrets_required_by_schema).collect(); + let only_in_schema: HashSet<_> = + secrets_required_by_schema.difference(&secrets_provided_by_config).collect(); + + if !(only_in_config.is_empty() && only_in_schema.is_empty()) { + panic!( + "Secrets config override schema mismatch:\nSecrets provided by config: \ + {secrets_provided_by_config:?}\nSecrets required by schema: \ + {secrets_required_by_schema:?}\nOnly in config: {only_in_config:?}\nOnly in schema: \ + {only_in_schema:?}" + ); + } +} + #[test] fn l1_components_state_consistency() { for deployment in DEPLOYMENTS.iter().flat_map(|f| f()) { - let deployment_name = deployment.get_deployment_name(); - let component_configs = deployment_name.get_component_configs(None); + let node_type = deployment.get_node_type(); + let component_configs = node_type.get_component_configs(None); let l1_gas_price_provider_indicator = component_configs.values().any(|component_config| { component_config.l1_gas_price_provider.execution_mode diff --git a/crates/apollo_deployments/src/deployments/consolidated.rs b/crates/apollo_deployments/src/deployments/consolidated.rs index ec73edc0e74..27c22d60567 100644 --- a/crates/apollo_deployments/src/deployments/consolidated.rs +++ b/crates/apollo_deployments/src/deployments/consolidated.rs @@ -18,7 +18,7 @@ use crate::k8s::{ Resources, Toleration, }; -use crate::service::{GetComponentConfigs, ServiceName, ServiceNameInner}; +use crate::service::{GetComponentConfigs, NodeService, ServiceNameInner}; const NODE_STORAGE: usize = 1000; const TESTING_NODE_STORAGE: usize = 1; @@ -29,17 +29,17 @@ pub enum ConsolidatedNodeServiceName { Node, } -impl From for ServiceName { +impl From for NodeService { fn from(service: ConsolidatedNodeServiceName) -> Self { - ServiceName::ConsolidatedNode(service) + NodeService::Consolidated(service) } } impl GetComponentConfigs for ConsolidatedNodeServiceName { - fn get_component_configs(_ports: Option>) -> IndexMap { + fn get_component_configs(_ports: Option>) -> IndexMap { let mut component_config_map = IndexMap::new(); component_config_map.insert( - ServiceName::ConsolidatedNode(ConsolidatedNodeServiceName::Node), + NodeService::Consolidated(ConsolidatedNodeServiceName::Node), get_consolidated_config(), ); component_config_map @@ -63,6 +63,7 @@ impl ServiceNameInner for ConsolidatedNodeServiceName { match environment { Environment::Testing => None, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { @@ -80,6 +81,7 @@ impl ServiceNameInner for ConsolidatedNodeServiceName { match environment { Environment::Testing => None, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => get_ingress(ingress_params, false), @@ -95,6 +97,7 @@ impl ServiceNameInner for ConsolidatedNodeServiceName { match environment { Environment::Testing => Some(TESTING_NODE_STORAGE), Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => Some(NODE_STORAGE), @@ -106,6 +109,7 @@ impl ServiceNameInner for ConsolidatedNodeServiceName { match environment { Environment::Testing => Resources::new(Resource::new(1, 2), Resource::new(4, 8)), Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => Resources::new(Resource::new(2, 4), Resource::new(4, 8)), @@ -121,6 +125,7 @@ impl ServiceNameInner for ConsolidatedNodeServiceName { match environment { Environment::Testing => false, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => true, diff --git a/crates/apollo_deployments/src/deployments/distributed.rs b/crates/apollo_deployments/src/deployments/distributed.rs index 717e7f2c9a9..261a825a978 100644 --- a/crates/apollo_deployments/src/deployments/distributed.rs +++ b/crates/apollo_deployments/src/deployments/distributed.rs @@ -22,7 +22,7 @@ use crate::k8s::{ Resources, Toleration, }; -use crate::service::{GetComponentConfigs, ServiceName, ServiceNameInner}; +use crate::service::{GetComponentConfigs, NodeService, ServiceNameInner}; use crate::utils::determine_port_numbers; pub const DISTRIBUTED_NODE_REQUIRED_PORTS_NUM: usize = 10; @@ -48,15 +48,15 @@ pub enum DistributedNodeServiceName { StateSync, } -// Implement conversion from `DistributedNodeServiceName` to `ServiceName` -impl From for ServiceName { +// Implement conversion from `DistributedNodeServiceName` to `NodeService` +impl From for NodeService { fn from(service: DistributedNodeServiceName) -> Self { - ServiceName::DistributedNode(service) + NodeService::Distributed(service) } } impl GetComponentConfigs for DistributedNodeServiceName { - fn get_component_configs(ports: Option>) -> IndexMap { + fn get_component_configs(ports: Option>) -> IndexMap { let ports = determine_port_numbers(ports, DISTRIBUTED_NODE_REQUIRED_PORTS_NUM, BASE_PORT); let batcher = DistributedNodeServiceName::Batcher.component_config_pair(ports[0]); @@ -73,7 +73,7 @@ impl GetComponentConfigs for DistributedNodeServiceName { let signature_manager = DistributedNodeServiceName::ConsensusManager.component_config_pair(ports[9]); - let mut component_config_map = IndexMap::::new(); + let mut component_config_map = IndexMap::::new(); for inner_service_name in DistributedNodeServiceName::iter() { let component_config = match inner_service_name { DistributedNodeServiceName::Batcher => get_batcher_component_config( @@ -123,8 +123,8 @@ impl GetComponentConfigs for DistributedNodeServiceName { get_state_sync_component_config(state_sync.local(), class_manager.remote()) } }; - let service_name = inner_service_name.into(); - component_config_map.insert(service_name, component_config); + let node_service = inner_service_name.into(); + component_config_map.insert(node_service, component_config); } component_config_map } @@ -164,6 +164,7 @@ impl ServiceNameInner for DistributedNodeServiceName { match environment { Environment::Testing => None, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { @@ -221,6 +222,7 @@ impl ServiceNameInner for DistributedNodeServiceName { match environment { Environment::Testing => None, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { @@ -250,6 +252,7 @@ impl ServiceNameInner for DistributedNodeServiceName { match environment { Environment::Testing => false, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { @@ -259,7 +262,7 @@ impl ServiceNameInner for DistributedNodeServiceName { DistributedNodeServiceName::HttpServer => false, DistributedNodeServiceName::Gateway => false, DistributedNodeServiceName::L1 => false, - DistributedNodeServiceName::Mempool => false, + DistributedNodeServiceName::Mempool => true, DistributedNodeServiceName::SierraCompiler => false, DistributedNodeServiceName::StateSync => false, }, diff --git a/crates/apollo_deployments/src/deployments/hybrid.rs b/crates/apollo_deployments/src/deployments/hybrid.rs index 2410d23478c..0be62edd05b 100644 --- a/crates/apollo_deployments/src/deployments/hybrid.rs +++ b/crates/apollo_deployments/src/deployments/hybrid.rs @@ -1,5 +1,6 @@ use std::net::{IpAddr, Ipv4Addr}; +use apollo_infra_utils::template::Template; use apollo_node::config::component_config::ComponentConfig; use apollo_node::config::component_execution_config::{ ActiveComponentExecutionConfig, @@ -25,10 +26,11 @@ use crate::k8s::{ Resources, Toleration, }; -use crate::service::{GetComponentConfigs, ServiceName, ServiceNameInner}; -use crate::utils::{determine_port_numbers, format_node_id, get_secret_key, get_validator_id}; +use crate::service::{GetComponentConfigs, NodeService, ServiceNameInner}; +use crate::utils::{determine_port_numbers, get_secret_key, get_validator_id}; pub const HYBRID_NODE_REQUIRED_PORTS_NUM: usize = 10; +pub(crate) const INSTANCE_NAME_FORMAT: Template = Template("hybrid_{}"); const BASE_PORT: u16 = 55000; // TODO(Tsabary): arbitrary port, need to resolve. const CORE_STORAGE: usize = 1000; @@ -45,16 +47,16 @@ pub enum HybridNodeServiceName { SierraCompiler, } -// Implement conversion from `HybridNodeServiceName` to `ServiceName` -impl From for ServiceName { +// Implement conversion from `HybridNodeServiceName` to `NodeService` +impl From for NodeService { fn from(service: HybridNodeServiceName) -> Self { - ServiceName::HybridNode(service) + NodeService::Hybrid(service) } } impl GetComponentConfigs for HybridNodeServiceName { - fn get_component_configs(ports: Option>) -> IndexMap { - let mut component_config_map = IndexMap::::new(); + fn get_component_configs(ports: Option>) -> IndexMap { + let mut component_config_map = IndexMap::::new(); let ports = determine_port_numbers(ports, HYBRID_NODE_REQUIRED_PORTS_NUM, BASE_PORT); @@ -100,8 +102,8 @@ impl GetComponentConfigs for HybridNodeServiceName { get_sierra_compiler_component_config(sierra_compiler.local()) } }; - let service_name = inner_service_name.into(); - component_config_map.insert(service_name, component_config); + let node_service = inner_service_name.into(); + component_config_map.insert(node_service, component_config); } component_config_map } @@ -141,7 +143,7 @@ impl ServiceNameInner for HybridNodeServiceName { HybridNodeServiceName::Mempool => Some(Toleration::ApolloCoreService), HybridNodeServiceName::SierraCompiler => Some(Toleration::ApolloGeneralService), }, - Environment::StressTest => match self { + Environment::StressTest | Environment::SepoliaTestnet => match self { HybridNodeServiceName::Core => Some(Toleration::ApolloCoreServiceC2D56), HybridNodeServiceName::HttpServer => Some(Toleration::ApolloGeneralService), HybridNodeServiceName::Gateway => Some(Toleration::ApolloGeneralService), @@ -181,6 +183,7 @@ impl ServiceNameInner for HybridNodeServiceName { match environment { Environment::Testing => None, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { @@ -216,7 +219,7 @@ impl ServiceNameInner for HybridNodeServiceName { Resources::new(Resource::new(1, 2), Resource::new(2, 4)) } }, - Environment::StressTest => match self { + Environment::StressTest | Environment::SepoliaTestnet => match self { HybridNodeServiceName::Core => { Resources::new(Resource::new(50, 200), Resource::new(50, 220)) } @@ -241,6 +244,7 @@ impl ServiceNameInner for HybridNodeServiceName { match environment { Environment::Testing => 1, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { @@ -258,13 +262,14 @@ impl ServiceNameInner for HybridNodeServiceName { match environment { Environment::Testing => false, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => match self { HybridNodeServiceName::Core => true, HybridNodeServiceName::HttpServer => false, HybridNodeServiceName::Gateway => false, - HybridNodeServiceName::Mempool => false, + HybridNodeServiceName::Mempool => true, HybridNodeServiceName::SierraCompiler => false, }, _ => unimplemented!(), @@ -407,8 +412,7 @@ fn get_http_server_component_config( pub(crate) fn create_hybrid_instance_config_override( node_id: usize, - // TODO(Tsabary): change `node_namespace_format` to be of its own type with dedicated fns - node_namespace_format: &str, + node_namespace_format: Template, p2p_communication_type: P2PCommunicationType, domain: &str, ) -> InstanceConfigOverride { @@ -433,10 +437,10 @@ pub(crate) fn create_hybrid_instance_config_override( let sanitized_domain = p2p_communication_type.get_p2p_domain(domain); let build_peer_address = - |service_name: HybridNodeServiceName, port: u16, node_id: usize, peer_id: &str| { + |node_service: HybridNodeServiceName, port: u16, node_id: usize, peer_id: &str| { let domain = build_service_namespace_domain_address( - &service_name.k8s_service_name(), - &format_node_id(node_namespace_format, node_id), + &node_service.k8s_service_name(), + &node_namespace_format.format(&[&node_id]), &sanitized_domain, ); Some(get_p2p_address(&domain, port, peer_id)) @@ -497,13 +501,8 @@ pub(crate) fn create_hybrid_instance_config_override( NetworkConfigOverride::new( consensus_bootstrap_peer_multiaddr, consensus_advertised_multiaddr, - &node_secret_key, - ), - NetworkConfigOverride::new( - mempool_bootstrap_peer_multiaddr, - mempool_advertised_multiaddr, - &node_secret_key, ), + NetworkConfigOverride::new(mempool_bootstrap_peer_multiaddr, mempool_advertised_multiaddr), get_validator_id(node_id), ) } diff --git a/crates/apollo_deployments/src/k8s.rs b/crates/apollo_deployments/src/k8s.rs index 587392ca049..8e50ad83d6e 100644 --- a/crates/apollo_deployments/src/k8s.rs +++ b/crates/apollo_deployments/src/k8s.rs @@ -113,6 +113,7 @@ pub(crate) fn get_environment_ingress_internal(environment: &Environment) -> boo match environment { Environment::Testing => true, Environment::SepoliaIntegration + | Environment::SepoliaTestnet | Environment::UpgradeTest | Environment::TestingEnvThree | Environment::StressTest => false, diff --git a/crates/apollo_deployments/src/lib.rs b/crates/apollo_deployments/src/lib.rs index ad3752e0e8a..0ee89be78e1 100644 --- a/crates/apollo_deployments/src/lib.rs +++ b/crates/apollo_deployments/src/lib.rs @@ -6,4 +6,6 @@ pub mod deployment_definitions; pub mod deployments; pub mod k8s; pub mod service; +#[cfg(test)] +pub mod test_utils; pub mod utils; diff --git a/crates/apollo_deployments/src/service.rs b/crates/apollo_deployments/src/service.rs index e52c17de386..a8b0cf237ba 100644 --- a/crates/apollo_deployments/src/service.rs +++ b/crates/apollo_deployments/src/service.rs @@ -14,8 +14,6 @@ use serde_json::json; use strum::{Display, EnumVariantNames, IntoEnumIterator}; use strum_macros::{EnumDiscriminants, EnumIter, IntoStaticStr}; -#[cfg(test)] -use crate::deployment::FIX_BINARY_NAME; use crate::deployment::{ build_service_namespace_domain_address, ComponentConfigsSerializationWrapper, @@ -34,13 +32,15 @@ use crate::k8s::{ Resources, Toleration, }; +#[cfg(test)] +use crate::test_utils::FIX_BINARY_NAME; const SERVICES_DIR_NAME: &str = "services/"; #[derive(Clone, Debug, PartialEq, Serialize)] pub struct Service { #[serde(rename = "name")] - service_name: ServiceName, + node_service: NodeService, // TODO(Tsabary): change config path to PathBuf type. controller: Controller, config_paths: Vec, @@ -59,7 +59,7 @@ pub struct Service { impl Service { pub fn new( - service_name: ServiceName, + node_service: NodeService, external_secret: Option, config_filenames: Vec, ingress_params: IngressParams, @@ -74,7 +74,7 @@ impl Service { // TODO(Tsabary): delete redundant directories in the path. // TODO(Tsabary): reduce visibility of relevant functions and consts. - let service_file_path = service_name.get_service_file_path(); + let service_file_path = node_service.get_service_file_path(); let config_paths = config_filenames .iter() @@ -89,17 +89,17 @@ impl Service { }) .collect(); - let controller = service_name.get_controller(); - let autoscale = service_name.get_autoscale(); - let toleration = service_name.get_toleration(&environment); - let ingress = service_name.get_ingress(&environment, ingress_params); - let k8s_service_config = service_name.get_k8s_service_config(k8s_service_config_params); - let storage = service_name.get_storage(&environment); - let resources = service_name.get_resources(&environment); - let replicas = service_name.get_replicas(&environment); - let anti_affinity = service_name.get_anti_affinity(&environment); + let controller = node_service.get_controller(); + let autoscale = node_service.get_autoscale(); + let toleration = node_service.get_toleration(&environment); + let ingress = node_service.get_ingress(&environment, ingress_params); + let k8s_service_config = node_service.get_k8s_service_config(k8s_service_config_params); + let storage = node_service.get_storage(&environment); + let resources = node_service.get_resources(&environment); + let replicas = node_service.get_replicas(&environment); + let anti_affinity = node_service.get_anti_affinity(&environment); Self { - service_name, + node_service, config_paths, controller, ingress, @@ -123,17 +123,17 @@ impl Service { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, EnumDiscriminants)] #[strum_discriminants( - name(DeploymentName), + name(NodeType), derive(IntoStaticStr, EnumIter, EnumVariantNames, Serialize, Display), strum(serialize_all = "snake_case") )] -pub enum ServiceName { - ConsolidatedNode(ConsolidatedNodeServiceName), - HybridNode(HybridNodeServiceName), - DistributedNode(DistributedNodeServiceName), +pub enum NodeService { + Consolidated(ConsolidatedNodeServiceName), + Hybrid(HybridNodeServiceName), + Distributed(DistributedNodeServiceName), } -impl ServiceName { +impl NodeService { fn get_config_file_path(&self) -> String { let mut name = self.as_inner().to_string(); name.push_str(".json"); @@ -149,7 +149,7 @@ impl ServiceName { k8s_service_config_params: Option, ) -> Service { Service::new( - Into::::into(*self), + Into::::into(*self), external_secret.clone(), config_filenames, ingress_params.clone(), @@ -160,9 +160,9 @@ impl ServiceName { fn as_inner(&self) -> &dyn ServiceNameInner { match self { - ServiceName::ConsolidatedNode(inner) => inner, - ServiceName::HybridNode(inner) => inner, - ServiceName::DistributedNode(inner) => inner, + NodeService::Consolidated(inner) => inner, + NodeService::Hybrid(inner) => inner, + NodeService::Distributed(inner) => inner, } } @@ -218,7 +218,7 @@ impl ServiceName { pub fn get_service_file_path(&self) -> String { PathBuf::from(CONFIG_BASE_DIR) .join(SERVICES_DIR_NAME) - .join(DeploymentName::from(self).get_folder_name()) + .join(NodeType::from(self).get_folder_name()) .join(self.get_config_file_path()) .to_string_lossy() .to_string() @@ -277,35 +277,20 @@ pub(crate) trait ServiceNameInner: Display { } } -impl DeploymentName { - pub fn get_folder_name(&self) -> &'static str { - match self { - Self::ConsolidatedNode => "consolidated/", - Self::HybridNode => "hybrid/", - Self::DistributedNode => "distributed/", - } - } - - pub fn add_path_suffix(&self, path: PathBuf, instance_name: &str) -> PathBuf { - let deployment_name_dir = path.join(self.get_folder_name()); - let deployment_with_instance = deployment_name_dir.join(instance_name); - - let s = deployment_with_instance.to_string_lossy(); - let modified = if s.ends_with('/') { s.into_owned() } else { format!("{s}/") }; - modified.into() +impl NodeType { + fn get_folder_name(&self) -> String { + self.to_string() } - pub fn all_service_names(&self) -> Vec { + pub fn all_service_names(&self) -> Vec { match self { // TODO(Tsabary): find a way to avoid this code duplication. - Self::ConsolidatedNode => { - ConsolidatedNodeServiceName::iter().map(ServiceName::ConsolidatedNode).collect() - } - Self::HybridNode => { - HybridNodeServiceName::iter().map(ServiceName::HybridNode).collect() + Self::Consolidated => { + ConsolidatedNodeServiceName::iter().map(NodeService::Consolidated).collect() } - Self::DistributedNode => { - DistributedNodeServiceName::iter().map(ServiceName::DistributedNode).collect() + Self::Hybrid => HybridNodeServiceName::iter().map(NodeService::Hybrid).collect(), + Self::Distributed => { + DistributedNodeServiceName::iter().map(NodeService::Distributed).collect() } } } @@ -313,12 +298,12 @@ impl DeploymentName { pub fn get_component_configs( &self, ports: Option>, - ) -> IndexMap { + ) -> IndexMap { match self { // TODO(Tsabary): avoid this code duplication. - Self::ConsolidatedNode => ConsolidatedNodeServiceName::get_component_configs(ports), - Self::HybridNode => HybridNodeServiceName::get_component_configs(ports), - Self::DistributedNode => DistributedNodeServiceName::get_component_configs(ports), + Self::Consolidated => ConsolidatedNodeServiceName::get_component_configs(ports), + Self::Hybrid => HybridNodeServiceName::get_component_configs(ports), + Self::Distributed => DistributedNodeServiceName::get_component_configs(ports), } } @@ -327,10 +312,10 @@ impl DeploymentName { SerdeFn: Fn(&serde_json::Value, &str), { let component_configs = self.get_component_configs(ports); - for (service_name, config) in component_configs { + for (node_service, config) in component_configs { let wrapper = ComponentConfigsSerializationWrapper::from(config); let flattened = config_to_preset(&json!(wrapper.dump())); - let file_path = service_name.get_service_file_path(); + let file_path = node_service.get_service_file_path(); writer(&flattened, &file_path); } } @@ -352,19 +337,19 @@ impl DeploymentName { pub trait GetComponentConfigs { // TODO(Tsabary): replace IndexMap with regular HashMap. Currently using IndexMap as the // integration test relies on indices rather than service names. - fn get_component_configs(ports: Option>) -> IndexMap; + fn get_component_configs(ports: Option>) -> IndexMap; } -impl Serialize for ServiceName { +impl Serialize for NodeService { fn serialize(&self, serializer: S) -> Result where S: Serializer, { // Serialize only the inner value. match self { - ServiceName::ConsolidatedNode(inner) => inner.serialize(serializer), - ServiceName::HybridNode(inner) => inner.serialize(serializer), - ServiceName::DistributedNode(inner) => inner.serialize(serializer), + NodeService::Consolidated(inner) => inner.serialize(serializer), + NodeService::Hybrid(inner) => inner.serialize(serializer), + NodeService::Distributed(inner) => inner.serialize(serializer), } } } diff --git a/crates/apollo_deployments/src/test_utils.rs b/crates/apollo_deployments/src/test_utils.rs new file mode 100644 index 00000000000..73dff1248e5 --- /dev/null +++ b/crates/apollo_deployments/src/test_utils.rs @@ -0,0 +1,92 @@ +use apollo_config::converters::{serialize_optional_vec_u8, serialize_slice_url}; +use serde::{Serialize, Serializer}; +use url::Url; + +pub(crate) const FIX_BINARY_NAME: &str = "deployment_generator"; + +#[derive(Serialize)] +pub struct SecretsConfigOverride { + #[serde(rename = "base_layer_config.node_url")] + base_layer_config_node_url: Url, + #[serde(rename = "consensus_manager_config.eth_to_strk_oracle_config.base_url")] + consensus_manager_config_eth_to_strk_oracle_config_base_url: Url, + #[serde(rename = "consensus_manager_config.eth_to_strk_oracle_config.headers")] + consensus_manager_config_eth_to_strk_oracle_config_headers: String, + #[serde( + rename = "consensus_manager_config.network_config.secret_key", + serialize_with = "serialize_optional_vec_u8_wrapper" + )] + consensus_manager_config_network_config_secret_key: Option>, + #[serde( + rename = "l1_endpoint_monitor_config.ordered_l1_endpoint_urls", + serialize_with = "serialize_slice_url_wrapper" + )] + l1_endpoint_monitor_config_ordered_l1_endpoint_urls: Vec, + #[serde( + rename = "mempool_p2p_config.network_config.secret_key", + serialize_with = "serialize_optional_vec_u8_wrapper" + )] + mempool_p2p_config_network_config_secret_key: Option>, + recorder_url: Url, + #[serde( + rename = "state_sync_config.central_sync_client_config.central_source_config.http_headers" + )] + state_sync_config_central_sync_client_config_central_source_config_http_headers: String, + #[serde( + rename = "state_sync_config.network_config.secret_key", + serialize_with = "serialize_optional_vec_u8_wrapper" + )] + state_sync_config_network_config_secret_key: Option>, +} + +impl Default for SecretsConfigOverride { + fn default() -> Self { + Self { + base_layer_config_node_url: Url::parse("https://arbitrary.url.com").unwrap(), + consensus_manager_config_eth_to_strk_oracle_config_base_url: Url::parse( + "https://arbitrary.eth_to_strk_oracle.url", + ) + .unwrap(), + consensus_manager_config_eth_to_strk_oracle_config_headers: "".to_string(), + consensus_manager_config_network_config_secret_key: None, + l1_endpoint_monitor_config_ordered_l1_endpoint_urls: vec![ + Url::parse("https://arbitrary.ordered_l1_endpoint_1.url").unwrap(), + Url::parse("https://arbitrary.ordered_l1_endpoint_2.url").unwrap(), + ], + mempool_p2p_config_network_config_secret_key: None, + recorder_url: Url::parse("https://arbitrary.recorder.url").unwrap(), + state_sync_config_central_sync_client_config_central_source_config_http_headers: "" + .to_string(), + state_sync_config_network_config_secret_key: None, + } + } +} + +// Wrapper function for the custom `serialize_slice_url` function, to be compatible with serde's +// `serialize_with` attribute. It first applies the custom serialization logic to convert the slice +// of `Url` into a `String`, and then serializes that string. +fn serialize_slice_url_wrapper(urls: &[Url], serializer: S) -> Result +where + S: Serializer, +{ + // Call the implemented custom serialization function + let s = serialize_slice_url(urls); + // Serialize the returned String + serializer.serialize_str(&s) +} + +// Wrapper function for the custom `serialize_optional_vec_u8` function, to be compatible with +// serde's `serialize_with` attribute. It first applies the custom serialization logic to convert +// the optional u8 vector into a `String`, and then serializes that string. +pub fn serialize_optional_vec_u8_wrapper( + value: &Option>, + serializer: S, +) -> Result +where + S: Serializer, +{ + // Call the implemented custom serialization function + let s = serialize_optional_vec_u8(value); + // Serialize the returned String + serializer.serialize_str(&s) +} diff --git a/crates/apollo_deployments/src/utils.rs b/crates/apollo_deployments/src/utils.rs index edfa536f9cd..d3d47023a5b 100644 --- a/crates/apollo_deployments/src/utils.rs +++ b/crates/apollo_deployments/src/utils.rs @@ -6,10 +6,6 @@ use apollo_protobuf::consensus::DEFAULT_VALIDATOR_ID; // TODO(Tsabary): delete duplicates from the base app config, and add a test that there are no // conflicts between all the override config entries and the values in the base app config. -pub(crate) fn format_node_id(base_format: &str, id: usize) -> String { - base_format.replace("{}", &id.to_string()) -} - pub(crate) fn get_secret_key(id: usize) -> String { format!("0x010101010101010101010101010101010101010101010101010101010101010{}", id + 1) } diff --git a/crates/apollo_gateway/Cargo.toml b/crates/apollo_gateway/Cargo.toml index ed820440a19..1549074defd 100644 --- a/crates/apollo_gateway/Cargo.toml +++ b/crates/apollo_gateway/Cargo.toml @@ -31,6 +31,7 @@ cairo-lang-starknet-classes.workspace = true futures.workspace = true lazy_static.workspace = true mempool_test_utils.workspace = true +num-rational.workspace = true reqwest.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/crates/apollo_gateway/src/config.rs b/crates/apollo_gateway/src/config.rs index 6f82ff83cd9..19032b3eca7 100644 --- a/crates/apollo_gateway/src/config.rs +++ b/crates/apollo_gateway/src/config.rs @@ -44,6 +44,8 @@ impl SerializeConfig for GatewayConfig { #[derive(Clone, Debug, Serialize, Deserialize, Validate, PartialEq)] pub struct StatelessTransactionValidatorConfig { + // TODO(Arni): Align the name of this field with the mempool config, and all other places where + // validation is skipped during the systems bootstrap phase. // If true, validates that the resource bounds are not zero. pub validate_non_zero_resource_bounds: bool, // TODO(AlonH): Remove this field and use the one from the versioned constants. @@ -62,7 +64,7 @@ impl Default for StatelessTransactionValidatorConfig { fn default() -> Self { StatelessTransactionValidatorConfig { validate_non_zero_resource_bounds: true, - min_gas_price: 100_000_000, + min_gas_price: 3_000_000_000, max_calldata_length: 4000, max_signature_length: 4000, max_contract_bytecode_size: 81920, @@ -170,6 +172,8 @@ pub struct StatefulTransactionValidatorConfig { pub reject_future_declare_txs: bool, pub max_nonce_for_validation_skip: Nonce, pub versioned_constants_overrides: VersionedConstantsOverrides, + // Minimum gas price as percentage of threshold to accept transactions. + pub min_gas_price_percentage: u8, // E.g., 80 to require 80% of threshold. } impl Default for StatefulTransactionValidatorConfig { @@ -178,6 +182,7 @@ impl Default for StatefulTransactionValidatorConfig { max_allowed_nonce_gap: 50, reject_future_declare_txs: true, max_nonce_for_validation_skip: Nonce(Felt::ONE), + min_gas_price_percentage: 100, versioned_constants_overrides: VersionedConstantsOverrides::default(), } } @@ -204,6 +209,12 @@ impl SerializeConfig for StatefulTransactionValidatorConfig { "If true, rejects declare transactions with future nonces.", ParamPrivacyInput::Public, ), + ser_param( + "min_gas_price_percentage", + &self.min_gas_price_percentage, + "Minimum gas price as percentage of threshold to accept transactions.", + ParamPrivacyInput::Public, + ), ]); dump.append(&mut prepend_sub_config_name( self.versioned_constants_overrides.dump(), diff --git a/crates/apollo_gateway/src/gateway.rs b/crates/apollo_gateway/src/gateway.rs index d25ffd13b15..25b4f2eb5f7 100644 --- a/crates/apollo_gateway/src/gateway.rs +++ b/crates/apollo_gateway/src/gateway.rs @@ -26,12 +26,15 @@ use apollo_proc_macros::sequencer_latency_histogram; use apollo_state_sync_types::communication::SharedStateSyncClient; use axum::async_trait; use blockifier::context::ChainInfo; +use num_rational::Ratio; +use starknet_api::block::NonzeroGasPrice; use starknet_api::executable_transaction::ValidateCompiledClassHashError; use starknet_api::rpc_transaction::{ InternalRpcTransaction, InternalRpcTransactionWithoutTxHash, RpcTransaction, }; +use starknet_api::transaction::fields::ValidResourceBounds; use tracing::{debug, error, info, instrument, warn, Span}; use crate::config::GatewayConfig; @@ -203,6 +206,19 @@ impl ProcessTxBlockingTask { let mut validator = self .stateful_tx_validator .instantiate_validator(self.state_reader_factory.as_ref(), &self.chain_info)?; + + // Skip this validation during the systems bootstrap phase. + if self.stateless_tx_validator.config.validate_non_zero_resource_bounds { + // TODO(Arni): get next_l2_gas_price from the block header. + let previous_block_l2_gas_price = + validator.block_context().block_info().gas_prices.strk_gas_prices.l2_gas_price; + validate_tx_l2_gas_price_within_threshold( + executable_tx.resource_bounds(), + previous_block_l2_gas_price, + self.stateful_tx_validator.config.min_gas_price_percentage, + )?; + } + let address = executable_tx.contract_address(); let nonce = validator.get_nonce(address).map_err(|e| { error!("Failed to get nonce for sender address {}: {}", address, e); @@ -222,6 +238,40 @@ impl ProcessTxBlockingTask { } } +// TODO(Arni): Consider running this validation for all gas prices. +fn validate_tx_l2_gas_price_within_threshold( + tx_resource_bounds: ValidResourceBounds, + previous_block_l2_gas_price: NonzeroGasPrice, + min_gas_price_percentage: u8, +) -> GatewayResult<()> { + match tx_resource_bounds { + ValidResourceBounds::AllResources(tx_resource_bounds) => { + let tx_l2_gas_price = tx_resource_bounds.l2_gas.max_price_per_unit; + let gas_price_threshold_multiplier = + Ratio::new(min_gas_price_percentage.into(), 100_u128); + let threshold = + (gas_price_threshold_multiplier * previous_block_l2_gas_price.get().0).to_integer(); + if tx_l2_gas_price.0 < threshold { + return Err(StarknetError { + // We didn't have this kind of an error. + code: StarknetErrorCode::UnknownErrorCode( + "StarknetErrorCode.GAS_PRICE_TOO_LOW".to_string(), + ), + message: format!( + "Transaction L2 gas price {tx_l2_gas_price} is below the required \ + threshold {threshold}." + ), + }); + } + } + ValidResourceBounds::L1Gas(_) => { + // No validation required for legacy transactions. + } + } + + Ok(()) +} + fn convert_compiled_class_hash_error(error: ValidateCompiledClassHashError) -> StarknetError { let ValidateCompiledClassHashError::CompiledClassHashMismatch { computed_class_hash, diff --git a/crates/apollo_gateway/src/stateless_transaction_validator_test.rs b/crates/apollo_gateway/src/stateless_transaction_validator_test.rs index 103e94e730b..96dc3e9c0e7 100644 --- a/crates/apollo_gateway/src/stateless_transaction_validator_test.rs +++ b/crates/apollo_gateway/src/stateless_transaction_validator_test.rs @@ -34,7 +34,8 @@ use crate::test_utils::{ NON_EMPTY_RESOURCE_BOUNDS, }; -const MAX_GAS_PRICE: u128 = 100_000_000_u128; +static DEFAULT_VALIDATOR_CONFIG: LazyLock = + LazyLock::new(StatelessTransactionValidatorConfig::default); static MIN_SIERRA_VERSION: LazyLock = LazyLock::new(|| VersionId::new(1, 1, 0)); static MAX_SIERRA_VERSION: LazyLock = LazyLock::new(|| VersionId::new(1, 5, usize::MAX)); @@ -140,7 +141,7 @@ fn test_positive_flow( RpcTransactionArgs { resource_bounds: AllResourceBounds { l2_gas: ResourceBounds { - max_price_per_unit: GasPrice(MAX_GAS_PRICE - 1), + max_price_per_unit: GasPrice(DEFAULT_VALIDATOR_CONFIG.min_gas_price - 1), ..NON_EMPTY_RESOURCE_BOUNDS }, ..Default::default() @@ -148,8 +149,8 @@ fn test_positive_flow( ..Default::default() }, StatelessTransactionValidatorError::MaxGasPriceTooLow { - gas_price: GasPrice(MAX_GAS_PRICE - 1), - min_gas_price: MAX_GAS_PRICE + gas_price: GasPrice(DEFAULT_VALIDATOR_CONFIG.min_gas_price - 1), + min_gas_price: DEFAULT_VALIDATOR_CONFIG.min_gas_price }, )] fn test_invalid_resource_bounds( @@ -158,12 +159,8 @@ fn test_invalid_resource_bounds( #[values(TransactionType::Declare, TransactionType::DeployAccount, TransactionType::Invoke)] tx_type: TransactionType, ) { - let config = StatelessTransactionValidatorConfig { - validate_non_zero_resource_bounds: true, - min_gas_price: MAX_GAS_PRICE, - ..*DEFAULT_VALIDATOR_CONFIG_FOR_TESTING - }; - let tx_validator = StatelessTransactionValidator { config }; + let tx_validator = + StatelessTransactionValidator { config: DEFAULT_VALIDATOR_CONFIG.to_owned() }; let tx = rpc_tx_for_testing(tx_type, rpc_tx_args); diff --git a/crates/apollo_http_server/Cargo.toml b/crates/apollo_http_server/Cargo.toml index 54f1c0d7b05..b8e032c0fe1 100644 --- a/crates/apollo_http_server/Cargo.toml +++ b/crates/apollo_http_server/Cargo.toml @@ -23,6 +23,7 @@ apollo_gateway_types.workspace = true apollo_infra.workspace = true apollo_infra_utils.workspace = true apollo_metrics.workspace = true +apollo_proc_macros.workspace = true axum.workspace = true blockifier_test_utils = { workspace = true, optional = true } futures.workspace = true diff --git a/crates/apollo_http_server/src/errors.rs b/crates/apollo_http_server/src/errors.rs index af4336e58a3..6c30ac9e6b7 100644 --- a/crates/apollo_http_server/src/errors.rs +++ b/crates/apollo_http_server/src/errors.rs @@ -90,14 +90,16 @@ fn gw_client_err_into_response(err: GatewayClientError) -> Response { } /// Serializes a `StarknetError` into an HTTP response, encode the error message -/// to defend potential Cross-Site risks. We replace all non-alphanumeric except some punctuation -/// characters with `?`. +/// to defend potential Cross-Site risks. fn serialize_error(error: &StarknetError) -> Response { - let re = Regex::new(r"[^a-zA-Z0-9 :.,\[\]]").unwrap(); - let sanitized_error = StarknetError { - code: error.code.clone(), - message: format!("{}", re.replace_all(&error.message, "?")), - }; + let quote_re = Regex::new(r#"[\"`]"#).unwrap(); // " and ` => ' (single quote) + let sanitize_re = Regex::new(r#"[^a-zA-Z0-9 :.,\[\]\(\)\{\}'_]"#).unwrap(); // All other non-alphanumeric characters except [:.,[](){}]_ => ' ' (space) + + let mut message = error.message.clone(); + message = quote_re.replace_all(&message, "'").to_string(); + message = sanitize_re.replace_all(&message, " ").to_string(); + + let sanitized_error = StarknetError { code: error.code.clone(), message }; serde_json::to_vec(&sanitized_error) .expect("Expecting a serializable StarknetError.") diff --git a/crates/apollo_http_server/src/http_server.rs b/crates/apollo_http_server/src/http_server.rs index 019da06b4f6..a46908d087f 100644 --- a/crates/apollo_http_server/src/http_server.rs +++ b/crates/apollo_http_server/src/http_server.rs @@ -16,6 +16,7 @@ use apollo_gateway_types::gateway_types::{ }; use apollo_infra::component_definitions::ComponentStarter; use apollo_infra_utils::type_name::short_type_name; +use apollo_proc_macros::sequencer_latency_histogram; use axum::extract::State; use axum::http::HeaderMap; use axum::routing::{get, post}; @@ -34,6 +35,7 @@ use crate::metrics::{ ADDED_TRANSACTIONS_INTERNAL_ERROR, ADDED_TRANSACTIONS_SUCCESS, ADDED_TRANSACTIONS_TOTAL, + HTTP_SERVER_ADD_TX_LATENCY, }; #[cfg(test)] @@ -108,6 +110,7 @@ async fn add_rpc_tx( } #[instrument(skip(app_state))] +#[sequencer_latency_histogram(HTTP_SERVER_ADD_TX_LATENCY, true)] async fn add_tx( State(app_state): State, headers: HeaderMap, diff --git a/crates/apollo_http_server/src/http_server_test.rs b/crates/apollo_http_server/src/http_server_test.rs index c23011c2a1d..5f965dd3ec4 100644 --- a/crates/apollo_http_server/src/http_server_test.rs +++ b/crates/apollo_http_server/src/http_server_test.rs @@ -282,7 +282,7 @@ async fn sanitizing_error_message() { let mut tx_json = TransactionSerialization(serde_json::to_value(deprecated_gateway_invoke_tx()).unwrap()); let tx_object = tx_json.0.as_object_mut().unwrap(); - let malicious_version: &'static str = ""; + let malicious_version: &'static str = "\"'`[](){}_!@#$%^&*+=~"; tx_object.insert("version".to_string(), Value::String(malicious_version.to_string())).unwrap(); let mock_gateway_client = MockGatewayClient::new(); @@ -303,9 +303,9 @@ async fn sanitizing_error_message() { "Message should not contain unescaped script tag" ); - // Make sure it is escaped. + // Make sure it is escaped correctly. assert!( - starknet_error.message.contains("?script?alert?1???script?"), + starknet_error.message.contains(" script alert(1) script '''[](){}_ "), "Escaped message not found. This is the returned error message: {}", starknet_error.message ); diff --git a/crates/apollo_http_server/src/metrics.rs b/crates/apollo_http_server/src/metrics.rs index 057ee9ea8c9..a6393d1357d 100644 --- a/crates/apollo_http_server/src/metrics.rs +++ b/crates/apollo_http_server/src/metrics.rs @@ -12,6 +12,7 @@ define_metrics!( MetricCounter { ADDED_TRANSACTIONS_SUCCESS, "http_server_added_transactions_success", "Number of successfully added transactions", init = 0 }, MetricCounter { ADDED_TRANSACTIONS_FAILURE, "http_server_added_transactions_failure", "Number of faulty added transactions", init = 0 }, MetricCounter { ADDED_TRANSACTIONS_INTERNAL_ERROR, "http_server_added_transactions_internal_error", "Number of faulty added transactions failing on internal error", init = 0 }, + MetricHistogram { HTTP_SERVER_ADD_TX_LATENCY, "http_server_add_tx_latency", "Latency of HTTP add_tx endpoint in secs" }, }, ); @@ -21,4 +22,5 @@ pub(crate) fn init_metrics() { ADDED_TRANSACTIONS_SUCCESS.register(); ADDED_TRANSACTIONS_FAILURE.register(); ADDED_TRANSACTIONS_INTERNAL_ERROR.register(); + HTTP_SERVER_ADD_TX_LATENCY.register(); } diff --git a/crates/apollo_infra_utils/Cargo.toml b/crates/apollo_infra_utils/Cargo.toml index 5f2fbf6b8d5..4b08d7f5310 100644 --- a/crates/apollo_infra_utils/Cargo.toml +++ b/crates/apollo_infra_utils/Cargo.toml @@ -13,6 +13,7 @@ testing = ["colored", "dep:assert-json-diff", "socket2", "tempfile"] workspace = true [dependencies] +apollo_proc_macros.workspace = true assert-json-diff = { workspace = true, optional = true } colored = { workspace = true, optional = true } num_enum.workspace = true diff --git a/crates/apollo_infra_utils/src/lib.rs b/crates/apollo_infra_utils/src/lib.rs index c6f49e1067d..79c0420dac0 100644 --- a/crates/apollo_infra_utils/src/lib.rs +++ b/crates/apollo_infra_utils/src/lib.rs @@ -6,7 +6,10 @@ pub mod global_allocator; pub mod path; pub mod run_until; pub mod tasks; +pub mod template; #[cfg(any(feature = "testing", test))] pub mod test_utils; pub mod tracing; pub mod type_name; + +pub extern crate apollo_proc_macros as _apollo_proc_macros; diff --git a/crates/apollo_infra_utils/src/template.rs b/crates/apollo_infra_utils/src/template.rs new file mode 100644 index 00000000000..c74493ba914 --- /dev/null +++ b/crates/apollo_infra_utils/src/template.rs @@ -0,0 +1,46 @@ +use std::fmt::{Display, Write}; + +#[cfg(test)] +#[path = "template_test.rs"] +mod template_test; + +/// A simple positional template with `{}` placeholders. +pub struct Template(pub &'static str); + +impl Template { + /// Renders the template by substituting `{}` placeholders with the provided args. Panics if the + /// number of `{}` in the template doesn't match the number of args provided. + pub fn format(&self, args: &[&dyn Display]) -> String { + // Count how many `{}` placeholders are in the template string, and ensure the number of + // args matches the number of placeholders. + let placeholder_count = self.0.matches("{}").count(); + assert_eq!( + placeholder_count, + args.len(), + "Template {} expects {} placeholders, but got {} args", + self.0, + placeholder_count, + args.len() + ); + + // Allocate the output buffer once, with some extra capacity for each argument. This avoids + // reallocations as we append to the string. In case of insufficient capacity, the string + // will indeed reallocate, but this is a trade-off for performance in the common case. + const SIZE_PER_ARG: usize = 16; // Estimated size for each argument, for initial allocation. + let mut out = String::with_capacity(self.0.len() + SIZE_PER_ARG * args.len()); + + // Walk through the template, streaming chunks + args into `out` + let mut rest = self.0; + for value in args { + if let Some(i) = rest.find("{}") { + // Write the prefix before the placeholder + out.push_str(&rest[..i]); + write!(out, "{value}").unwrap(); + rest = &rest[i + 2..]; + } + } + // Append whatever is left after the last placeholder + out.push_str(rest); + out + } +} diff --git a/crates/apollo_infra_utils/src/template_test.rs b/crates/apollo_infra_utils/src/template_test.rs new file mode 100644 index 00000000000..17630fbbe4e --- /dev/null +++ b/crates/apollo_infra_utils/src/template_test.rs @@ -0,0 +1,48 @@ +use std::fmt::Display; + +use pretty_assertions::assert_eq; + +use crate::template::Template; + +#[test] +fn templates() { + // Test with a simple template and two arguments + let template = Template("Hello, {}! Welcome to {}."); + let args: Vec<&dyn Display> = vec![&"Alice", &"Wonderland"]; + let formatted = template.format(&args); + assert_eq!(formatted, "Hello, Alice! Welcome to Wonderland."); + + // Test with a simple template and two arguments + let template = Template("My two favorite numbers are {} and {}."); + let args: Vec<&dyn Display> = vec![&1913, &1312]; + let formatted = template.format(&args); + assert_eq!(formatted, "My two favorite numbers are 1913 and 1312."); + + // Test with an empty template + let empty_template = Template(""); + let empty_args: Vec<&dyn Display> = vec![]; + let empty_formatted = empty_template.format(&empty_args); + assert_eq!(empty_formatted, ""); + + // Test with a template that is a single placeholder + let placeholder_template = Template("{}"); + let placeholder_template_args: Vec<&dyn Display> = vec![&"MHFC"]; + let placeholder_template_formatted = placeholder_template.format(&placeholder_template_args); + assert_eq!(placeholder_template_formatted, "MHFC"); +} + +#[test] +#[should_panic] +fn template_too_many_args() { + let template = Template("{}"); + let args: Vec<&dyn Display> = vec![&"1", &"2"]; + template.format(&args); +} + +#[test] +#[should_panic] +fn template_too_few_args() { + let template = Template("{}{}{}"); + let args: Vec<&dyn Display> = vec![&"1", &"2"]; + template.format(&args); +} diff --git a/crates/apollo_infra_utils/src/tracing.rs b/crates/apollo_infra_utils/src/tracing.rs index f91c0d72448..3daa20439da 100644 --- a/crates/apollo_infra_utils/src/tracing.rs +++ b/crates/apollo_infra_utils/src/tracing.rs @@ -1,3 +1,4 @@ +pub use apollo_proc_macros::log_every_n; use tracing::{debug, error, info, trace, warn}; #[cfg(test)] @@ -48,3 +49,84 @@ pub trait LogCompatibleToStringExt: std::fmt::Display { self.to_string().replace('\n', "\t") } } + +/// Logs an INFO message once every `n` calls. +/// +/// Each call site of this macro maintains its own independent counter. +/// The message will be logged on calls: 1, N+1, 2N+1, 3N+1, etc., for each invocation **from that +/// specific call site**. +/// +/// # Arguments +/// +/// * `$n`: The integer frequency (e.g., `2` for every second call). +/// * `$($arg:tt)*`: The arguments to pass to `tracing::info!`, e.g., a format string and its +/// corresponding values. +/// +/// # Example +/// ```rust +/// use apollo_infra_utils::info_every_n; +/// +/// for i in 0..5 { +/// info_every_n!(2, "Processing item: {}", i); +/// // Output: +/// // Processing item: 0 (on 1st call) +/// // Processing item: 2 (on 3rd call) +/// // Processing item: 4 (on 5th call) +/// } +/// +/// // This will log twice since these are two different call sites. +/// info_every_n!(2, "call site"); +/// info_every_n!(2, "call site"); +/// ``` +#[macro_export] +macro_rules! info_every_n { + ($n:expr, $($arg:tt)*) => { + { + $crate::_apollo_proc_macros::log_every_n!(::tracing::info, $n, $($arg)*); + } + }; +} + +/// Logs a WARN message once every `n` calls. +/// See `info_every_n!` for detailed usage and behavior. +#[macro_export] +macro_rules! warn_every_n { + ($n:expr, $($arg:tt)*) => { + { + $crate::_apollo_proc_macros::log_every_n!(::tracing::warn, $n, $($arg)*); + } + }; +} + +/// Logs an ERROR message once every `n` calls. +/// See `info_every_n!` for detailed usage and behavior. +#[macro_export] +macro_rules! error_every_n { + ($n:expr, $($arg:tt)*) => { + { + $crate::_apollo_proc_macros::log_every_n!(::tracing::error, $n, $($arg)*); + } + }; +} + +/// Logs a DEBUG message once every `n` calls. +/// See `info_every_n!` for detailed usage and behavior. +#[macro_export] +macro_rules! debug_every_n { + ($n:expr, $($arg:tt)*) => { + { + $crate::_apollo_proc_macros::log_every_n!(::tracing::debug, $n, $($arg)*); + } + }; +} + +/// Logs a TRACE message once every `n` calls. +/// See `info_every_n!` for detailed usage and behavior. +#[macro_export] +macro_rules! trace_every_n { + ($n:expr, $($arg:tt)*) => { + { + $crate::_apollo_proc_macros::log_every_n!(::tracing::trace, $n, $($arg)*); + } + }; +} diff --git a/crates/apollo_infra_utils/src/tracing_test.rs b/crates/apollo_infra_utils/src/tracing_test.rs index 50060452387..1ba3da1ec78 100644 --- a/crates/apollo_infra_utils/src/tracing_test.rs +++ b/crates/apollo_infra_utils/src/tracing_test.rs @@ -1,12 +1,15 @@ use std::fmt::Debug; +use std::io::Write; use std::sync::{Arc, Mutex}; use tracing::field::{Field, Visit}; use tracing::span::{Attributes, Id, Record}; -use tracing::subscriber::with_default; -use tracing::{Event, Metadata, Subscriber}; +use tracing::subscriber::{with_default, DefaultGuard}; +use tracing::{Event, Level, Metadata, Subscriber}; +use tracing_subscriber::fmt::SubscriberBuilder; use crate::tracing::{CustomLogger, TraceLevel}; +use crate::{debug_every_n, error_every_n, info_every_n, trace_every_n, warn_every_n}; #[test] fn test_dynamic_logger_without_base_message() { @@ -157,3 +160,186 @@ impl Subscriber for TestSubscriber { fn record_follows_from(&self, _span: &Id, _follows: &Id) {} } + +// Tests for the `log_every_n!` macros. + +/// A struct used for capturing log outputs. +#[derive(Clone)] +struct SharedBuffer { + inner: Arc>>, +} + +impl SharedBuffer { + fn new() -> Self { + SharedBuffer { inner: Arc::new(Mutex::new(Vec::new())) } + } + + fn content(&self) -> String { + let buffer = self.inner.lock().unwrap(); + String::from_utf8_lossy(&buffer).to_string() + } +} + +impl Write for SharedBuffer { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.inner.lock().unwrap().write(buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.inner.lock().unwrap().flush() + } +} + +/// Sends logs to `SharedBuffer` (instead of stderr) for testing purposes. +/// Logs will be sent to the `buffer` until the returned `DefaultGuard` is dropped. +fn redirect_logs_to_buffer() -> (SharedBuffer, DefaultGuard) { + let buffer = SharedBuffer::new(); + let buffer_clone = buffer.clone(); + + let subscriber = SubscriberBuilder::default() + .with_writer(move || buffer_clone.clone()) + .with_max_level(Level::TRACE) + .with_ansi(false) + .finish(); + + let guard = tracing::subscriber::set_default(subscriber); + + assert!(buffer.content().is_empty(), "Buffer should be empty before logging"); + (buffer, guard) +} + +const LOG_MESSAGE: &str = "Got logged"; + +// We test all the behaviors on one specific log level and then separately test that each macro logs +// at the correct level. + +#[test] +fn test_log_every_n_logs_first_time() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + warn_every_n!(1000, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches(LOG_MESSAGE).count(), + 1, + "Log did not contain expected content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_log_every_n_does_not_log_more_than_every_n() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + for _ in 0..2 { + warn_every_n!(3, LOG_MESSAGE); + } + + assert_eq!( + buffer.content().matches(LOG_MESSAGE).count(), + 1, + "Log did not contain expected content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_log_every_n_logs_every_n() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + for _ in 0..5 { + warn_every_n!(3, LOG_MESSAGE); + } + + assert_eq!( + buffer.content().matches(LOG_MESSAGE).count(), + 2, + "Log did not contain expected content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_log_every_n_different_lines_count_separately() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + warn_every_n!(1000, LOG_MESSAGE); + warn_every_n!(1000, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches(LOG_MESSAGE).count(), + 2, + "Log did not contain expected content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_trace_every_n_logs_to_trace() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + trace_every_n!(2, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches("TRACE").count(), + 1, + "Log did not contain expected TRACE content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_debug_every_n_logs_to_debug() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + debug_every_n!(2, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches("DEBUG").count(), + 1, + "Log did not contain expected DEBUG content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_info_every_n_logs_to_info() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + info_every_n!(2, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches("INFO").count(), + 1, + "Log did not contain expected INFO content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_warn_every_n_logs_to_warn() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + warn_every_n!(2, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches("WARN").count(), + 1, + "Log did not contain expected WARN content:\n{}", + buffer.content() + ); +} + +#[test] +fn test_error_every_n_logs_to_error() { + let (buffer, _guard) = redirect_logs_to_buffer(); + + error_every_n!(2, LOG_MESSAGE); + + assert_eq!( + buffer.content().matches("ERROR").count(), + 1, + "Log did not contain expected ERROR content:\n{}", + buffer.content() + ); +} diff --git a/crates/apollo_integration_tests/src/bin/sequencer_simulator.rs b/crates/apollo_integration_tests/src/bin/sequencer_simulator.rs index 4e5e4bc8d0e..3600ce12976 100644 --- a/crates/apollo_integration_tests/src/bin/sequencer_simulator.rs +++ b/crates/apollo_integration_tests/src/bin/sequencer_simulator.rs @@ -1,5 +1,6 @@ use std::fs::read_to_string; +use alloy::primitives::{Address as EthereumContractAddress, Address}; use apollo_infra::trace_util::configure_tracing; use apollo_integration_tests::integration_test_manager::{HTTP_PORT_ARG, MONITORING_PORT_ARG}; use apollo_integration_tests::sequencer_simulator_utils::SequencerSimulator; @@ -12,9 +13,46 @@ use apollo_integration_tests::utils::{ }; use clap::Parser; use mempool_test_utils::starknet_api_test_utils::MultiAccountTransactionGenerator; +use papyrus_base_layer::ethereum_base_layer_contract::EthereumBaseLayerConfig; +use papyrus_base_layer::test_utils::{ + deploy_starknet_l1_contract, + make_block_history_on_anvil, + DEFAULT_ANVIL_L1_DEPLOYED_ADDRESS, +}; use serde_json::Value; use tokio::time::{sleep, Duration}; use tracing::info; +use url::Url; + +const ANVIL_NODE_URL: &str = "http://localhost:8545"; +const NUM_BLOCKS_NEEDED_ON_L1: usize = 310; + +#[derive(Debug)] +struct AnvilAddresses { + sender: Address, + receiver: Address, +} + +impl AnvilAddresses { + fn from_optional_strings( + sender: &Option, + receiver: &Option, + ) -> anyhow::Result> { + match (sender, receiver) { + (Some(s), Some(r)) => Ok(Some(Self { + sender: s.parse().map_err(|e| anyhow::anyhow!("Invalid sender address: {}", e))?, + receiver: r + .parse() + .map_err(|e| anyhow::anyhow!("Invalid receiver address: {}", e))?, + })), + (None, None) => Ok(None), + _ => anyhow::bail!( + "Both --sender-address and --receiver-address must be provided together, or \ + neither." + ), + } + } +} fn read_ports_from_file(path: &str) -> (u16, u16) { // Read the file content @@ -89,6 +127,38 @@ async fn run_simulation( } } +async fn initialize_anvil_state(sender_address: Address, receiver_address: Address) { + info!( + "Initializing Anvil state with sender: {} and receiver: {}", + sender_address, receiver_address + ); + + let base_layer_config = build_base_layer_config_for_testing(); + + deploy_starknet_l1_contract(base_layer_config.clone()).await; + + make_block_history_on_anvil( + sender_address, + receiver_address, + base_layer_config, + NUM_BLOCKS_NEEDED_ON_L1, + ) + .await; +} + +fn build_base_layer_config_for_testing() -> EthereumBaseLayerConfig { + let starknet_contract_address: EthereumContractAddress = + DEFAULT_ANVIL_L1_DEPLOYED_ADDRESS.parse().expect("Invalid contract address"); + let node_url = Url::parse(ANVIL_NODE_URL).expect("Failed to parse Anvil URL"); + + EthereumBaseLayerConfig { + node_url, + starknet_contract_address, + prague_blob_gas_calc: true, + ..Default::default() + } +} + #[derive(Parser, Debug)] #[command(name = "sequencer_simulator", about = "Run sequencer simulator.")] struct Args { @@ -109,6 +179,12 @@ struct Args { #[arg(long, help = "Run the simulator in an infinite loop")] run_forever: bool, + + #[arg(long, help = "Anvil sender address (0x...)")] + sender_address: Option, + + #[arg(long, help = "Anvil receiver address (0x...)")] + receiver_address: Option, } #[tokio::main] @@ -117,6 +193,15 @@ async fn main() -> anyhow::Result<()> { let args = Args::parse(); + let anvil_addresses = + AnvilAddresses::from_optional_strings(&args.sender_address, &args.receiver_address)?; + + if let Some(addresses) = anvil_addresses { + initialize_anvil_state(addresses.sender, addresses.receiver).await; + } else { + info!("Skipping Anvil state initialization (no sender/receiver provided)."); + } + let mut tx_generator = create_integration_test_tx_generator(); let (http_port, monitoring_port) = get_ports(&args); diff --git a/crates/apollo_integration_tests/src/node_component_configs.rs b/crates/apollo_integration_tests/src/node_component_configs.rs index 2d63a5ee6ab..cd359672dd2 100644 --- a/crates/apollo_integration_tests/src/node_component_configs.rs +++ b/crates/apollo_integration_tests/src/node_component_configs.rs @@ -6,7 +6,7 @@ use apollo_deployments::deployments::hybrid::{ HybridNodeServiceName, HYBRID_NODE_REQUIRED_PORTS_NUM, }; -use apollo_deployments::service::{DeploymentName, ServiceName}; +use apollo_deployments::service::{NodeService, NodeType}; use apollo_infra_utils::test_utils::AvailablePortsGenerator; use apollo_node::config::component_config::{set_urls_to_localhost, ComponentConfig}; @@ -73,7 +73,7 @@ impl IntoIterator for NodeComponentConfigs { pub fn create_consolidated_component_configs() -> NodeComponentConfigs { // All components are in executable index 0. NodeComponentConfigs::new( - DeploymentName::ConsolidatedNode.get_component_configs(None).into_values().collect(), + NodeType::Consolidated.get_component_configs(None).into_values().collect(), 0, 0, 0, @@ -89,8 +89,7 @@ pub fn create_distributed_component_configs( .expect("Failed to get an AvailablePorts instance for distributed node configs"); let ports = available_ports.get_next_ports(DISTRIBUTED_NODE_REQUIRED_PORTS_NUM); - let services_component_config = - DeploymentName::DistributedNode.get_component_configs(Some(ports)); + let services_component_config = NodeType::Distributed.get_component_configs(Some(ports)); let mut component_configs: Vec = services_component_config.values().cloned().collect(); @@ -101,16 +100,16 @@ pub fn create_distributed_component_configs( NodeComponentConfigs::new( component_configs, services_component_config - .get_index_of::(&DistributedNodeServiceName::Batcher.into()) + .get_index_of::(&DistributedNodeServiceName::Batcher.into()) .unwrap(), services_component_config - .get_index_of::(&DistributedNodeServiceName::HttpServer.into()) + .get_index_of::(&DistributedNodeServiceName::HttpServer.into()) .unwrap(), services_component_config - .get_index_of::(&DistributedNodeServiceName::StateSync.into()) + .get_index_of::(&DistributedNodeServiceName::StateSync.into()) .unwrap(), services_component_config - .get_index_of::(&DistributedNodeServiceName::ClassManager.into()) + .get_index_of::(&DistributedNodeServiceName::ClassManager.into()) .unwrap(), ) } @@ -123,7 +122,7 @@ pub fn create_hybrid_component_configs( .expect("Failed to get an AvailablePorts instance for distributed node configs"); let ports = available_ports.get_next_ports(HYBRID_NODE_REQUIRED_PORTS_NUM); - let services_component_config = DeploymentName::HybridNode.get_component_configs(Some(ports)); + let services_component_config = NodeType::Hybrid.get_component_configs(Some(ports)); let mut component_configs: Vec = services_component_config.values().cloned().collect(); @@ -134,16 +133,16 @@ pub fn create_hybrid_component_configs( NodeComponentConfigs::new( component_configs, services_component_config - .get_index_of::(&HybridNodeServiceName::Core.into()) + .get_index_of::(&HybridNodeServiceName::Core.into()) .unwrap(), services_component_config - .get_index_of::(&HybridNodeServiceName::HttpServer.into()) + .get_index_of::(&HybridNodeServiceName::HttpServer.into()) .unwrap(), services_component_config - .get_index_of::(&HybridNodeServiceName::Core.into()) + .get_index_of::(&HybridNodeServiceName::Core.into()) .unwrap(), services_component_config - .get_index_of::(&HybridNodeServiceName::Core.into()) + .get_index_of::(&HybridNodeServiceName::Core.into()) .unwrap(), ) } diff --git a/crates/apollo_integration_tests/src/utils.rs b/crates/apollo_integration_tests/src/utils.rs index 40a8ee3857f..2b4456aee14 100644 --- a/crates/apollo_integration_tests/src/utils.rs +++ b/crates/apollo_integration_tests/src/utils.rs @@ -4,6 +4,7 @@ use std::time::Duration; use apollo_batcher::block_builder::BlockBuilderConfig; use apollo_batcher::config::BatcherConfig; +use apollo_batcher::pre_confirmed_cende_client::RECORDER_WRITE_PRE_CONFIRMED_BLOCK_PATH; use apollo_class_manager::class_storage::CachedClassStorageConfig; use apollo_class_manager::config::{ ClassManagerConfig, @@ -310,16 +311,27 @@ pub(crate) fn create_consensus_manager_configs_from_network_configs( // Creates a local recorder server that always returns a success status. pub fn spawn_success_recorder(socket_address: SocketAddr) -> JoinHandle<()> { tokio::spawn(async move { - let router = Router::new().route( - RECORDER_WRITE_BLOB_PATH, - post(move || { - async { - debug!("Received a request to write a blob."); - StatusCode::OK.to_string() - } - .instrument(tracing::debug_span!("success recorder write_blob")) - }), - ); + let router = Router::new() + .route( + RECORDER_WRITE_BLOB_PATH, + post(move || { + async { + debug!("Received a request to write a blob."); + StatusCode::OK.to_string() + } + .instrument(tracing::debug_span!("success recorder write_blob")) + }), + ) + .route( + RECORDER_WRITE_PRE_CONFIRMED_BLOCK_PATH, + post(move || { + async { + debug!("Received a request to write a pre-confirmed block."); + StatusCode::OK.to_string() + } + .instrument(tracing::debug_span!("success recorder write_pre_confirmed_block")) + }), + ); axum::Server::bind(&socket_address).serve(router.into_make_service()).await.unwrap(); }) } diff --git a/crates/apollo_l1_endpoint_monitor/src/monitor.rs b/crates/apollo_l1_endpoint_monitor/src/monitor.rs index 4a6be1a8d7f..85350241eb1 100644 --- a/crates/apollo_l1_endpoint_monitor/src/monitor.rs +++ b/crates/apollo_l1_endpoint_monitor/src/monitor.rs @@ -2,16 +2,15 @@ use std::collections::BTreeMap; use alloy::primitives::U64; use alloy::providers::{Provider, ProviderBuilder}; +use apollo_config::converters::{deserialize_vec_url, serialize_slice_url}; use apollo_config::dumping::{ser_param, SerializeConfig}; use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use apollo_infra::component_definitions::ComponentStarter; use apollo_l1_endpoint_monitor_types::{L1EndpointMonitorError, L1EndpointMonitorResult}; -use serde::de::Error; -use serde::{Deserialize, Deserializer, Serialize}; +use serde::{Deserialize, Serialize}; use tracing::{error, warn}; use url::Url; use validator::Validate; - #[cfg(test)] #[path = "l1_endpoint_monitor_tests.rs"] pub mod l1_endpoint_monitor_tests; @@ -117,27 +116,3 @@ impl SerializeConfig for L1EndpointMonitorConfig { )]) } } - -// TODO(Tsabary): generalize these for Vec serde. - -/// Serializes a `&[Url]` into a single space-separated string. -fn serialize_slice_url(vector: &[Url]) -> String { - vector.iter().map(Url::as_str).collect::>().join(" ") -} - -/// Deserializes a space-separated string into a `Vec`. -/// Returns an error if any of the substrings cannot be parsed into a valid URL. -fn deserialize_vec_url<'de, D>(de: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - let raw: String = ::deserialize(de)?; - - if raw.trim().is_empty() { - return Ok(Vec::new()); - } - - raw.split_whitespace() - .map(|s| Url::parse(s).map_err(|e| D::Error::custom(format!("Invalid URL '{s}': {e}")))) - .collect() -} diff --git a/crates/apollo_l1_gas_price/Cargo.toml b/crates/apollo_l1_gas_price/Cargo.toml index 90b69d39f9e..197c0b1cb61 100644 --- a/crates/apollo_l1_gas_price/Cargo.toml +++ b/crates/apollo_l1_gas_price/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] apollo_config.workspace = true apollo_infra.workspace = true +apollo_infra_utils.workspace = true apollo_l1_gas_price_types.workspace = true apollo_metrics.workspace = true async-trait.workspace = true diff --git a/crates/apollo_l1_gas_price/src/eth_to_strk_oracle.rs b/crates/apollo_l1_gas_price/src/eth_to_strk_oracle.rs index 94698244d7b..d3d98abb63e 100644 --- a/crates/apollo_l1_gas_price/src/eth_to_strk_oracle.rs +++ b/crates/apollo_l1_gas_price/src/eth_to_strk_oracle.rs @@ -30,11 +30,6 @@ pub mod eth_to_strk_oracle_test; pub const ETH_TO_STRK_QUANTIZATION: u64 = 18; -pub enum Query { - Resolved(u128), - Unresolved(AbortOnDropHandle>), -} - fn hashmap_to_headermap(hash_map: Option>) -> HeaderMap { let mut header_map = HeaderMap::new(); if let Some(map) = hash_map { @@ -82,19 +77,19 @@ impl SerializeConfig for EthToStrkOracleConfig { lag refers to the fact that the interval `[T, T+k)` contains the conversion rate \ for queries in the interval `[T+k, T+2k)`. Should be configured in alignment \ with relevant query parameters in `base_url`, if required.", - ParamPrivacyInput::Private, + ParamPrivacyInput::Public, ), ser_param( "max_cache_size", &self.max_cache_size, "The maximum number of cached conversion rates.", - ParamPrivacyInput::Private, + ParamPrivacyInput::Public, ), ser_param( "query_timeout_sec", &self.query_timeout_sec, "The timeout (seconds) for the query to the eth to strk oracle.", - ParamPrivacyInput::Private, + ParamPrivacyInput::Public, ), ]) } @@ -122,7 +117,8 @@ pub struct EthToStrkOracleClient { /// HTTP headers required for requests. headers: HeaderMap, client: reqwest::Client, - cached_prices: Mutex>, + cached_prices: Mutex>, + queries: Mutex>>>, } impl EthToStrkOracleClient { @@ -141,13 +137,16 @@ impl EthToStrkOracleClient { cached_prices: Mutex::new(LruCache::new( NonZeroUsize::new(config.max_cache_size).expect("Invalid cache size"), )), + queries: Mutex::new(LruCache::new( + NonZeroUsize::new(config.max_cache_size).expect("Invalid cache size"), + )), } } fn spawn_query( &self, quantized_timestamp: u64, - ) -> AbortOnDropHandle> { + ) -> AbortOnDropHandle> { let adjusted_timestamp = quantized_timestamp * self.config.lag_interval_seconds; let client = self.client.clone(); let base_url = self.base_url.clone(); @@ -155,7 +154,7 @@ impl EthToStrkOracleClient { let query_timeout_sec = self.config.query_timeout_sec; let future = async move { - loop { + let response_body = loop { let mut url = base_url.clone(); url.query_pairs_mut().append_pair("timestamp", &adjusted_timestamp.to_string()); @@ -167,51 +166,45 @@ impl EthToStrkOracleClient { .await; match result { - Ok(inner_result) => return inner_result, + Ok(inner_result) => { + break inner_result?; + } Err(_) => { ETH_TO_STRK_ERROR_COUNT.increment(1); - warn!("Timeout when resolving query for timestamp {adjusted_timestamp}") + warn!("Timeout when resolving query for timestamp {adjusted_timestamp}"); + continue; } } - } + }; + resolve_query(response_body) }; AbortOnDropHandle::new(tokio::spawn(future)) } +} - fn resolve_query(&self, quantized_timestamp: u64) -> Result { - let Some(Query::Unresolved(handle)) = self - .cached_prices - .lock() - .expect("Lock on cached prices was poisoned due to a previous panic") - .pop(&quantized_timestamp) - else { - panic!("Entry must exist") - }; - let body = - handle.now_or_never().expect("Should only be called once the query completes")??; - let json: serde_json::Value = serde_json::from_str(&body)?; - let price = json - .get("price") - .and_then(|v| v.as_str()) - .ok_or(EthToStrkOracleClientError::MissingFieldError("price"))?; - // Convert hex to u128 - let rate = u128::from_str_radix(price.trim_start_matches("0x"), 16) - .expect("Failed to parse price as u128"); - // Extract decimals from API response - let decimals = json - .get("decimals") - .and_then(|v| v.as_u64()) - .ok_or(EthToStrkOracleClientError::MissingFieldError("decimals"))?; - if decimals != ETH_TO_STRK_QUANTIZATION { - return Err(EthToStrkOracleClientError::InvalidDecimalsError( - ETH_TO_STRK_QUANTIZATION, - decimals, - )); - } - ETH_TO_STRK_SUCCESS_COUNT.increment(1); - Ok(rate) +fn resolve_query(body: String) -> Result { + let json: serde_json::Value = serde_json::from_str(&body)?; + let price = json + .get("price") + .and_then(|v| v.as_str()) + .ok_or(EthToStrkOracleClientError::MissingFieldError("price"))?; + // Convert hex to u128 + let rate = u128::from_str_radix(price.trim_start_matches("0x"), 16) + .expect("Failed to parse price as u128"); + // Extract decimals from API response + let decimals = json + .get("decimals") + .and_then(|v| v.as_u64()) + .ok_or(EthToStrkOracleClientError::MissingFieldError("decimals"))?; + if decimals != ETH_TO_STRK_QUANTIZATION { + return Err(EthToStrkOracleClientError::InvalidDecimalsError( + ETH_TO_STRK_QUANTIZATION, + decimals, + )); } + ETH_TO_STRK_SUCCESS_COUNT.increment(1); + Ok(rate) } #[async_trait] @@ -225,43 +218,46 @@ impl EthToStrkOracleClientTrait for EthToStrkOracleClient { .checked_div(self.config.lag_interval_seconds) .expect("lag_interval_seconds should be non-zero"); - // Scope is to make sure the MutexGuard is dropped before the await. - { - let mut cached_prices = self.cached_prices.lock().expect("Lock poisoned"); - let Some(query) = cached_prices.get_mut(&quantized_timestamp) else { - cached_prices.push( - quantized_timestamp, - Query::Unresolved(self.spawn_query(quantized_timestamp)), - ); - warn!("Query not yet resolved: timestamp={timestamp}"); - return Err(EthToStrkOracleClientError::QueryNotReadyError(timestamp)); - }; + let mut cache = self.cached_prices.lock().unwrap(); - match query { - Query::Resolved(rate) => { - debug!("Cached conversion rate for timestamp {timestamp} is {rate}"); - return Ok(*rate); - } - Query::Unresolved(handle) => { - if !handle.is_finished() { - warn!("Query not yet resolved: timestamp={timestamp}"); - return Err(EthToStrkOracleClientError::QueryNotReadyError(timestamp)); - } - } - }; + if let Some(rate) = cache.get(&quantized_timestamp) { + debug!("Cached conversion rate for timestamp {timestamp} is {rate}"); + return Ok(*rate); + } + + // Check if there is a query already sent out for this timestamp, if not, start one. + let mut queries = self.queries.lock().unwrap(); + let handle = queries + .get_or_insert_mut(quantized_timestamp, || self.spawn_query(quantized_timestamp)); + + // If the query is not finished, return an error. + if !handle.is_finished() { + warn!("Query not yet resolved: timestamp={timestamp}"); + return Err(EthToStrkOracleClientError::QueryNotReadyError(timestamp)); } - let rate = match self.resolve_query(quantized_timestamp) { + + let task_result = handle.now_or_never().expect("Handle must be finished if we got here"); + let query_result = match task_result { + Ok(query_result) => query_result, + Err(e) => { + queries.pop(&quantized_timestamp); + warn!("Query failed to join handle for timestamp {timestamp}: {e:?}"); + ETH_TO_STRK_ERROR_COUNT.increment(1); + return Err(EthToStrkOracleClientError::JoinError(e)); + } + }; + let rate = match query_result { Ok(rate) => rate, Err(e) => { - warn!("Query failed for timestamp {timestamp}: {e:?}"); + queries.pop(&quantized_timestamp); + warn!("Query failed to reach oracle for timestamp {timestamp}: {e:?}"); ETH_TO_STRK_ERROR_COUNT.increment(1); return Err(e); } }; - self.cached_prices - .lock() - .expect("Lock on cached prices was poisoned due to a previous panic") - .push(quantized_timestamp, Query::Resolved(rate)); + + // Make sure to cache the result. + cache.put(quantized_timestamp, rate); debug!("Conversion rate for timestamp {timestamp} is {rate}"); Ok(rate) } diff --git a/crates/apollo_l1_gas_price/src/l1_gas_price_provider.rs b/crates/apollo_l1_gas_price/src/l1_gas_price_provider.rs index 6e774636daf..0bd44d3641b 100644 --- a/crates/apollo_l1_gas_price/src/l1_gas_price_provider.rs +++ b/crates/apollo_l1_gas_price/src/l1_gas_price_provider.rs @@ -4,12 +4,13 @@ use std::collections::{BTreeMap, VecDeque}; use apollo_config::dumping::{ser_param, SerializeConfig}; use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use apollo_infra::component_definitions::ComponentStarter; +use apollo_infra_utils::info_every_n; use apollo_l1_gas_price_types::errors::L1GasPriceProviderError; use apollo_l1_gas_price_types::{GasPriceData, L1GasPriceProviderResult, PriceInfo}; use async_trait::async_trait; use serde::{Deserialize, Serialize}; use starknet_api::block::BlockTimestamp; -use tracing::{debug, info, warn}; +use tracing::{info, trace, warn}; use validator::Validate; use crate::metrics::{register_provider_metrics, L1_GAS_PRICE_PROVIDER_INSUFFICIENT_HISTORY}; @@ -133,7 +134,9 @@ impl L1GasPriceProvider { }); } } - debug!("Received price sample for L2 block: {:?}", new_data); + trace!("Received price sample for L1 block: {:?}", new_data); + // TODO(guy.f): Replace with info_every_n_sec once implemented. + info_every_n!(100, "Received price sample for L1 block: {:?}", new_data); samples.push(new_data); Ok(()) } diff --git a/crates/apollo_l1_gas_price/src/l1_gas_price_scraper.rs b/crates/apollo_l1_gas_price/src/l1_gas_price_scraper.rs index 8be4297715d..a38b7a1ae3e 100644 --- a/crates/apollo_l1_gas_price/src/l1_gas_price_scraper.rs +++ b/crates/apollo_l1_gas_price/src/l1_gas_price_scraper.rs @@ -10,6 +10,7 @@ use apollo_config::validators::validate_ascii; use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use apollo_infra::component_client::ClientError; use apollo_infra::component_definitions::ComponentStarter; +use apollo_infra_utils::info_every_n; use apollo_l1_gas_price_types::errors::L1GasPriceClientError; use apollo_l1_gas_price_types::{GasPriceData, L1GasPriceProviderClient, PriceInfo}; use async_trait::async_trait; @@ -18,7 +19,7 @@ use serde::{Deserialize, Serialize}; use starknet_api::block::GasPrice; use starknet_api::core::ChainId; use thiserror::Error; -use tracing::{debug, error, info, warn}; +use tracing::{error, info, trace, warn}; use validator::Validate; use crate::metrics::{ @@ -165,7 +166,12 @@ impl L1GasPriceScraper { // Not enough blocks under current finality. Try again later. return Ok(start_block_number); }; - debug!( + trace!( + "Scraping gas prices starting from block {start_block_number} to {last_block_number}." + ); + // TODO(guy.f): Replace with info_every_n_sec once implemented. + info_every_n!( + 100, "Scraping gas prices starting from block {start_block_number} to {last_block_number}." ); for block_number in start_block_number..=last_block_number { diff --git a/crates/apollo_l1_provider/Cargo.toml b/crates/apollo_l1_provider/Cargo.toml index e2b81197804..e092496a88b 100644 --- a/crates/apollo_l1_provider/Cargo.toml +++ b/crates/apollo_l1_provider/Cargo.toml @@ -17,6 +17,7 @@ testing = [ apollo_batcher_types.workspace = true apollo_config.workspace = true apollo_infra.workspace = true +apollo_infra_utils.workspace = true apollo_l1_provider_types.workspace = true apollo_metrics.workspace = true apollo_state_sync_types.workspace = true diff --git a/crates/apollo_l1_provider/src/l1_provider.rs b/crates/apollo_l1_provider/src/l1_provider.rs index 3887e8eb7cb..2071a5e7cbf 100644 --- a/crates/apollo_l1_provider/src/l1_provider.rs +++ b/crates/apollo_l1_provider/src/l1_provider.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use apollo_batcher_types::communication::SharedBatcherClient; use apollo_infra::component_definitions::ComponentStarter; +use apollo_infra_utils::info_every_n; use apollo_l1_provider_types::errors::L1ProviderError; use apollo_l1_provider_types::{ Event, @@ -146,7 +147,8 @@ impl L1Provider { return Err(L1ProviderError::Uninitialized); } - info!("Adding {} l1 events", events.len()); + // TODO(guy.f): Replace with info_every_n_sec once implemented. + info_every_n!(100, "Adding {} l1 events", events.len()); trace!("Adding events: {events:?}"); for event in events { diff --git a/crates/apollo_l1_provider/src/l1_scraper.rs b/crates/apollo_l1_provider/src/l1_scraper.rs index 02eca586704..da80ffc9073 100644 --- a/crates/apollo_l1_provider/src/l1_scraper.rs +++ b/crates/apollo_l1_provider/src/l1_scraper.rs @@ -8,6 +8,7 @@ use apollo_config::validators::validate_ascii; use apollo_config::{ParamPath, ParamPrivacyInput, SerializedParam}; use apollo_infra::component_client::ClientError; use apollo_infra::component_definitions::ComponentStarter; +use apollo_infra_utils::info_every_n; use apollo_l1_provider_types::errors::{L1ProviderClientError, L1ProviderError}; use apollo_l1_provider_types::{Event, SharedL1ProviderClient}; use async_trait::async_trait; @@ -19,7 +20,7 @@ use starknet_api::core::ChainId; use starknet_api::StarknetApiError; use thiserror::Error; use tokio::time::sleep; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, error, info, instrument, trace, warn}; use validator::Validate; use crate::metrics::{ @@ -80,6 +81,9 @@ impl L1Scraper { self.assert_no_l1_reorgs().await?; let (latest_l1_block, events) = self.fetch_events().await?; + trace!("scraped up to {latest_l1_block:?}"); + // TODO(guy.f): Replace with info_every_n_sec once implemented. + info_every_n!(100, "scraped up to {latest_l1_block:?}"); // Sending even if there are no events, to keep the flow as simple/debuggable as possible. // Perf hit is minimal, since the scraper is on the same machine as the provider (no net). diff --git a/crates/apollo_l1_provider_types/src/lib.rs b/crates/apollo_l1_provider_types/src/lib.rs index e0cb1025cb4..3744a54e2cf 100644 --- a/crates/apollo_l1_provider_types/src/lib.rs +++ b/crates/apollo_l1_provider_types/src/lib.rs @@ -237,7 +237,7 @@ pub enum Event { tx_hash: TransactionHash, cancellation_request_timestamp: BlockTimestamp, }, - TransactionConsumed(EventData), + TransactionConsumed(TransactionHash), } impl Event { @@ -257,7 +257,13 @@ impl Event { Self::TransactionCancellationStarted { tx_hash, cancellation_request_timestamp } } L1Event::MessageToL2Canceled(event_data) => Self::TransactionCanceled(event_data), - L1Event::ConsumedMessageToL2(event_data) => Self::TransactionConsumed(event_data), + L1Event::ConsumedMessageToL2(tx) => { + let tx_hash = tx.calculate_transaction_hash( + chain_id, + &starknet_api::transaction::L1HandlerTransaction::VERSION, + )?; + Self::TransactionConsumed(tx_hash) + } }) } } diff --git a/crates/apollo_mempool/src/mempool.rs b/crates/apollo_mempool/src/mempool.rs index cf0015496fc..096c00ad972 100644 --- a/crates/apollo_mempool/src/mempool.rs +++ b/crates/apollo_mempool/src/mempool.rs @@ -293,16 +293,16 @@ impl Mempool { self.state.stage(tx_reference)?; } - info!( - "Returned {} out of {n_txs} transactions, ready for sequencing.", - eligible_tx_references.len() - ); - debug!( - "Returned mempool txs: {:?}", - eligible_tx_references.iter().map(|tx| tx.tx_hash).collect::>() - ); + let n_returned_txs = eligible_tx_references.len(); + if n_returned_txs != 0 { + info!("Returned {n_returned_txs} out of {n_txs} transactions, ready for sequencing."); + debug!( + "Returned mempool txs: {:?}", + eligible_tx_references.iter().map(|tx| tx.tx_hash).collect::>() + ); + } - metric_set_get_txs_size(eligible_tx_references.len()); + metric_set_get_txs_size(n_returned_txs); self.update_state_metrics(); self.update_accounts_with_gap(account_nonce_updates); @@ -337,14 +337,14 @@ impl Mempool { let mut account_nonce_updates = self.remove_expired_txs(); self.add_ready_declares(); - if self.exceeds_capacity(&args.tx) { - self.handle_capacity_overflow(&args.tx, args.account_state.nonce)?; - } - let tx_reference = TransactionReference::new(&args.tx); self.validate_incoming_tx(tx_reference, args.account_state.nonce)?; self.handle_fee_escalation(&args.tx)?; + if self.exceeds_capacity(&args.tx) { + self.handle_capacity_overflow(&args.tx, args.account_state.nonce)?; + } + metric_handle.transaction_inserted(); // May override a removed queued nonce with the received account nonce or the account's diff --git a/crates/apollo_network/src/config_test.rs b/crates/apollo_network/src/config_test.rs index 5be71857cf3..164249bbdc2 100644 --- a/crates/apollo_network/src/config_test.rs +++ b/crates/apollo_network/src/config_test.rs @@ -1,25 +1,54 @@ use std::net::Ipv4Addr; -use libp2p::identity::Keypair; -use libp2p::PeerId; +use libp2p::{identity, multiaddr, Multiaddr, PeerId}; use validator::Validate; +use crate::test_utils::DUMMY_MULTI_ADDRESS; use crate::utils::make_tcp_multiaddr; use crate::NetworkConfig; #[test] -fn test_network_config_bootstrap_peer_multiaddr_validation() { - let mut config = NetworkConfig::default(); +fn test_bootstrap_peer_multiaddr_empty_is_valid() { + let config = NetworkConfig::default(); config.validate().unwrap(); +} - let key = [0u8; 32]; - let keypair = Keypair::ed25519_from_bytes(key).unwrap(); - let mutliaddr = +#[test] +fn test_bootstrap_peer_multiaddr_unique_addresses_is_valid() { + let key = [1u8; 32]; + let keypair = identity::Keypair::ed25519_from_bytes(key).unwrap(); + let second_addr = make_tcp_multiaddr(Ipv4Addr::LOCALHOST, 12345, PeerId::from_public_key(&keypair.public())); - config.bootstrap_peer_multiaddr = Some(vec![mutliaddr.clone()]); + let config = NetworkConfig { + bootstrap_peer_multiaddr: Some(vec![DUMMY_MULTI_ADDRESS.clone(), second_addr]), + ..NetworkConfig::default() + }; + config.validate().unwrap(); +} + +#[test] +fn test_bootstrap_peer_multiaddr_duplicates_are_invalid() { + let config = NetworkConfig { + bootstrap_peer_multiaddr: Some(vec![ + DUMMY_MULTI_ADDRESS.clone(), + DUMMY_MULTI_ADDRESS.clone(), + ]), + ..NetworkConfig::default() + }; - config.bootstrap_peer_multiaddr = Some(vec![mutliaddr.clone(), mutliaddr]); + config.validate().unwrap_err(); +} + +#[test] +fn test_bootstrap_peer_multiaddr_missing_peer_id_is_invalid() { + let mut config = NetworkConfig::default(); + + let mutliaddr = Multiaddr::empty() + .with(multiaddr::Protocol::Ip4(std::net::Ipv4Addr::LOCALHOST)) + .with(multiaddr::Protocol::Tcp(12345)); + + config.bootstrap_peer_multiaddr = Some(vec![mutliaddr.clone()]); config.validate().unwrap_err(); } diff --git a/crates/apollo_network/src/test_utils/mod.rs b/crates/apollo_network/src/test_utils/mod.rs index 113b14751ee..ac4006bd56f 100644 --- a/crates/apollo_network/src/test_utils/mod.rs +++ b/crates/apollo_network/src/test_utils/mod.rs @@ -5,9 +5,10 @@ use std::fmt::Debug; use std::time::Duration; use futures::future::Either; +use lazy_static::lazy_static; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::{ConnectionId, NetworkBehaviour, Swarm, SwarmEvent}; -use libp2p::{PeerId, Stream, StreamProtocol}; +use libp2p::{identity, multiaddr, Multiaddr, PeerId, Stream, StreamProtocol}; use libp2p_swarm_test::SwarmExt; use tokio::task::JoinHandle; use tokio_stream::StreamExt; @@ -15,6 +16,19 @@ use tokio_stream::StreamExt; use crate::sqmr::Bytes; use crate::utils::StreamMap; +lazy_static! { + #[cfg(test)] + pub static ref DUMMY_PEER_ID: PeerId = { + let key = [0u8; 32]; + let keypair = identity::Keypair::ed25519_from_bytes(key).unwrap(); + PeerId::from_public_key(&keypair.public()) + }; + #[cfg(test)] + pub static ref DUMMY_MULTI_ADDRESS: Multiaddr = { + Multiaddr::empty().with(multiaddr::Protocol::P2p(*DUMMY_PEER_ID)) + }; +} + /// Create two streams that are connected to each other. Return them and a join handle for a thread /// that will perform the sends between the streams (this thread will run forever so it shouldn't /// be joined). diff --git a/crates/apollo_node/Cargo.toml b/crates/apollo_node/Cargo.toml index 4161e059493..e67004c1a72 100644 --- a/crates/apollo_node/Cargo.toml +++ b/crates/apollo_node/Cargo.toml @@ -59,8 +59,6 @@ validator.workspace = true [dev-dependencies] apollo_config = { workspace = true, features = ["testing"] } apollo_infra_utils = { workspace = true, features = ["testing"] } -assert_matches.workspace = true -mempool_test_utils.workspace = true pretty_assertions.workspace = true tokio-util = { workspace = true, features = ["rt"] } diff --git a/config/sequencer/default_config.json b/crates/apollo_node/resources/config_schema.json similarity index 99% rename from config/sequencer/default_config.json rename to crates/apollo_node/resources/config_schema.json index ba8dca373eb..3d11f827890 100644 --- a/config/sequencer/default_config.json +++ b/crates/apollo_node/resources/config_schema.json @@ -42,7 +42,7 @@ "batcher_config.block_builder_config.bouncer_config.block_max_capacity.proving_gas": { "description": "An upper bound on the total builtins and steps gas usage used in a block.", "privacy": "Public", - "value": 4000000000 + "value": 5000000000 }, "batcher_config.block_builder_config.bouncer_config.block_max_capacity.sierra_gas": { "description": "An upper bound on the total sierra_gas used in a block.", @@ -57,7 +57,7 @@ "batcher_config.block_builder_config.bouncer_config.builtin_weights.add_mod": { "description": "Add_mod gas weight.", "privacy": "Public", - "value": 250 + "value": 312 }, "batcher_config.block_builder_config.bouncer_config.builtin_weights.bitwise": { "description": "Bitwise gas weight.", @@ -67,17 +67,17 @@ "batcher_config.block_builder_config.bouncer_config.builtin_weights.ec_op": { "description": "Ec_op gas weight.", "privacy": "Public", - "value": 571900 + "value": 714875 }, "batcher_config.block_builder_config.bouncer_config.builtin_weights.ecdsa": { "description": "Ecdsa gas weight.", "privacy": "Public", - "value": 1333333 + "value": 1666666 }, "batcher_config.block_builder_config.bouncer_config.builtin_weights.keccak": { "description": "Keccak gas weight.", "privacy": "Public", - "value": 408566 + "value": 510707 }, "batcher_config.block_builder_config.bouncer_config.builtin_weights.mul_mod": { "description": "Mul_mod gas weight.", @@ -87,12 +87,12 @@ "batcher_config.block_builder_config.bouncer_config.builtin_weights.pedersen": { "description": "Pedersen gas weight.", "privacy": "Public", - "value": 8100 + "value": 10125 }, "batcher_config.block_builder_config.bouncer_config.builtin_weights.poseidon": { "description": "Poseidon gas weight.", "privacy": "Public", - "value": 8334 + "value": 6250 }, "batcher_config.block_builder_config.bouncer_config.builtin_weights.range_check": { "description": "Range_check gas weight.", @@ -137,7 +137,7 @@ "batcher_config.block_builder_config.tx_polling_interval_millis": { "description": "Time to wait (in milliseconds) between transaction requests when the previous request returned no transactions.", "privacy": "Public", - "value": 100 + "value": 1 }, "batcher_config.block_builder_config.versioned_constants_overrides.invoke_tx_max_n_steps": { "description": "Maximum number of steps the invoke function is allowed to run.", @@ -991,7 +991,7 @@ }, "consensus_manager_config.cende_config.skip_write_height": { "description": "A height that the consensus can skip writing to Aerospike. Needed for booting up (no previous height blob to write) or to handle extreme cases (all the nodes failed).", - "privacy": "Private", + "privacy": "Public", "value": 0 }, "consensus_manager_config.cende_config.skip_write_height.#is_none": { @@ -1126,17 +1126,17 @@ }, "consensus_manager_config.eth_to_strk_oracle_config.lag_interval_seconds": { "description": "The size of the interval (seconds) that the eth to strk rate is taken on. The lag refers to the fact that the interval `[T, T+k)` contains the conversion rate for queries in the interval `[T+k, T+2k)`. Should be configured in alignment with relevant query parameters in `base_url`, if required.", - "privacy": "Private", + "privacy": "Public", "value": 1 }, "consensus_manager_config.eth_to_strk_oracle_config.max_cache_size": { "description": "The maximum number of cached conversion rates.", - "privacy": "Private", + "privacy": "Public", "value": 100 }, "consensus_manager_config.eth_to_strk_oracle_config.query_timeout_sec": { "description": "The timeout (seconds) for the query to the eth to strk oracle.", - "privacy": "Private", + "privacy": "Public", "value": 3 }, "consensus_manager_config.immediate_active_height": { @@ -1294,6 +1294,11 @@ "privacy": "Public", "value": "0x1" }, + "gateway_config.stateful_tx_validator_config.min_gas_price_percentage": { + "description": "Minimum gas price as percentage of threshold to accept transactions.", + "privacy": "Public", + "value": 100 + }, "gateway_config.stateful_tx_validator_config.reject_future_declare_txs": { "description": "If true, rejects declare transactions with future nonces.", "privacy": "Public", @@ -1357,7 +1362,7 @@ "gateway_config.stateless_tx_validator_config.min_gas_price": { "description": "Minimum gas price for transactions.", "privacy": "Public", - "value": 100000000 + "value": 3000000000 }, "gateway_config.stateless_tx_validator_config.min_sierra_version.major": { "description": "The major version of the configuration.", diff --git a/crates/apollo_node/src/bin/sequencer_dump_config.rs b/crates/apollo_node/src/bin/update_apollo_node_config_schema.rs similarity index 69% rename from crates/apollo_node/src/bin/sequencer_dump_config.rs rename to crates/apollo_node/src/bin/update_apollo_node_config_schema.rs index 33721209a0b..feb4821e0e6 100644 --- a/crates/apollo_node/src/bin/sequencer_dump_config.rs +++ b/crates/apollo_node/src/bin/update_apollo_node_config_schema.rs @@ -3,13 +3,12 @@ use apollo_node::config::node_config::{ SequencerNodeConfig, CONFIG_NON_POINTERS_WHITELIST, CONFIG_POINTERS, - DEFAULT_CONFIG_PATH, + CONFIG_SCHEMA_PATH, }; -/// Updates the default config file by: -/// cargo run --bin sequencer_dump_config -q +/// Updates the apollo node config schema. fn main() { SequencerNodeConfig::default() - .dump_to_file(&CONFIG_POINTERS, &CONFIG_NON_POINTERS_WHITELIST, DEFAULT_CONFIG_PATH) + .dump_to_file(&CONFIG_POINTERS, &CONFIG_NON_POINTERS_WHITELIST, CONFIG_SCHEMA_PATH) .expect("dump to file error"); } diff --git a/crates/apollo_node/src/components.rs b/crates/apollo_node/src/components.rs index b7c4f785e17..c04f5021b8c 100644 --- a/crates/apollo_node/src/components.rs +++ b/crates/apollo_node/src/components.rs @@ -1,5 +1,5 @@ use apollo_batcher::batcher::{create_batcher, Batcher}; -use apollo_batcher::pre_confirmed_cende_client::PreConfirmedCendeClient; +use apollo_batcher::pre_confirmed_cende_client::PreconfirmedCendeClient; use apollo_class_manager::class_manager::create_class_manager; use apollo_class_manager::ClassManager; use apollo_compile_to_casm::{create_sierra_compiler, SierraCompiler}; @@ -73,7 +73,7 @@ pub async fn create_node_components( let class_manager_client = clients .get_class_manager_shared_client() .expect("Class Manager Client should be available"); - let pre_confirmed_cende_client = std::sync::Arc::new(PreConfirmedCendeClient::new( + let pre_confirmed_cende_client = std::sync::Arc::new(PreconfirmedCendeClient::new( config.batcher_config.pre_confirmed_cende_config.clone(), )); Some(create_batcher( diff --git a/crates/apollo_node/src/config/config_test.rs b/crates/apollo_node/src/config/config_test.rs index bfbbe7cc6e7..e3b95f85bfe 100644 --- a/crates/apollo_node/src/config/config_test.rs +++ b/crates/apollo_node/src/config/config_test.rs @@ -1,8 +1,9 @@ use std::net::{IpAddr, Ipv4Addr}; -use apollo_config::test_utils::assert_default_config_file_is_up_to_date; +use apollo_config::dumping::{combine_config_map_and_pointers, SerializeConfig}; use apollo_infra::component_client::RemoteClientConfig; use apollo_infra::component_server::LocalServerConfig; +use apollo_infra_utils::dumping::serialize_to_file_test; use rstest::rstest; use validator::Validate; @@ -15,9 +16,11 @@ use crate::config::node_config::{ SequencerNodeConfig, CONFIG_NON_POINTERS_WHITELIST, CONFIG_POINTERS, - DEFAULT_CONFIG_PATH, + CONFIG_SCHEMA_PATH, }; +const FIX_BINARY_NAME: &str = "update_apollo_node_config_schema"; + const LOCAL_EXECUTION_MODE: ReactiveComponentExecutionMode = ReactiveComponentExecutionMode::LocalExecutionWithRemoteDisabled; const ENABLE_REMOTE_CONNECTION_MODE: ReactiveComponentExecutionMode = @@ -83,16 +86,16 @@ fn valid_component_execution_config( } /// Test the validation of the struct SequencerNodeConfig and that the default config file is up to -/// date. To update the default config file, run: -/// cargo run --bin sequencer_dump_config -q +/// date. To update the default config file, run `cargo run --bin `. #[test] fn default_config_file_is_up_to_date() { - assert_default_config_file_is_up_to_date::( - "sequencer_dump_config", - DEFAULT_CONFIG_PATH, + let combined_map = combine_config_map_and_pointers( + SequencerNodeConfig::default().dump(), &CONFIG_POINTERS, &CONFIG_NON_POINTERS_WHITELIST, - ); + ) + .unwrap(); + serialize_to_file_test(&combined_map, CONFIG_SCHEMA_PATH, FIX_BINARY_NAME); } #[test] diff --git a/crates/apollo_node/src/config/mod.rs b/crates/apollo_node/src/config/mod.rs index f657ca57544..5de0016ce8f 100644 --- a/crates/apollo_node/src/config/mod.rs +++ b/crates/apollo_node/src/config/mod.rs @@ -9,3 +9,6 @@ pub mod config_utils; pub mod definitions; pub mod monitoring; pub mod node_config; + +#[cfg(any(feature = "testing", test))] +pub mod test_utils; diff --git a/crates/apollo_node/src/config/node_config.rs b/crates/apollo_node/src/config/node_config.rs index 3911396396e..b12f9237fe6 100644 --- a/crates/apollo_node/src/config/node_config.rs +++ b/crates/apollo_node/src/config/node_config.rs @@ -42,8 +42,8 @@ use crate::config::monitoring::MonitoringConfig; use crate::version::VERSION_FULL; // The path of the default configuration file, provided as part of the crate. -pub const DEFAULT_CONFIG_PATH: &str = "config/sequencer/default_config.json"; -pub const POINTER_TARGET_VALUE: &str = "PointerTarget"; +pub const CONFIG_SCHEMA_PATH: &str = "crates/apollo_node/resources/config_schema.json"; +pub(crate) const POINTER_TARGET_VALUE: &str = "PointerTarget"; // Configuration parameters that share the same value across multiple components. pub static CONFIG_POINTERS: LazyLock = LazyLock::new(|| { @@ -239,10 +239,9 @@ impl SerializeConfig for SequencerNodeConfig { } impl SequencerNodeConfig { - /// Creates a config object. Selects the values from the default file and from resources with - /// higher priority. + /// Creates a config object, using the config schema and provided resources. pub fn load_and_process(args: Vec) -> Result { - let config_file_name = &resolve_project_relative_path(DEFAULT_CONFIG_PATH)?; + let config_file_name = &resolve_project_relative_path(CONFIG_SCHEMA_PATH)?; let default_config_file = File::open(config_file_name)?; load_and_process_config(default_config_file, node_command(), args, true) } diff --git a/crates/apollo_node/src/config/test_utils.rs b/crates/apollo_node/src/config/test_utils.rs new file mode 100644 index 00000000000..6b499dd5d4c --- /dev/null +++ b/crates/apollo_node/src/config/test_utils.rs @@ -0,0 +1,40 @@ +use std::collections::HashSet; +use std::fs::File; + +use apollo_config::{ParamPath, SerializedParam}; +use apollo_infra_utils::path::resolve_project_relative_path; +use serde_json::{Map, Value}; + +use crate::config::node_config::{CONFIG_POINTERS, CONFIG_SCHEMA_PATH}; + +/// Returns the set of all non-pointer private parameters and all pointer target parameters pointed +/// by private parameters. +pub fn private_parameters() -> HashSet { + let config_file_name = &resolve_project_relative_path(CONFIG_SCHEMA_PATH).unwrap(); + let config_schema_file = File::open(config_file_name).unwrap(); + let deserialized_config_schema: Map = + serde_json::from_reader(config_schema_file).unwrap(); + + let mut private_values = HashSet::new(); + for (param_path, stored_param) in deserialized_config_schema.into_iter() { + let ser_param = serde_json::from_value::(stored_param).unwrap(); + // Find all private parameters. + if ser_param.is_private() { + let mut included_as_a_pointer = false; + for ((pointer_target_param_path, _ser_param), pointing_params) in CONFIG_POINTERS.iter() + { + // If the parameter is a pointer, add its pointer target value. + if pointing_params.contains(¶m_path) { + private_values.insert(pointer_target_param_path.clone()); + included_as_a_pointer = true; + continue; + } + } + if !included_as_a_pointer { + // If the parameter is not a pointer, add it directly. + private_values.insert(param_path); + } + } + } + private_values +} diff --git a/crates/apollo_proc_macros/Cargo.toml b/crates/apollo_proc_macros/Cargo.toml index f3b6d865596..086a7e7d6f3 100644 --- a/crates/apollo_proc_macros/Cargo.toml +++ b/crates/apollo_proc_macros/Cargo.toml @@ -7,18 +7,10 @@ license-file.workspace = true description = "Procedural macros for the Papyrus node" [dependencies] +proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = ["full"] } -[dev-dependencies] -apollo_metrics.workspace = true -apollo_test_utils.workspace = true -metrics.workspace = true -metrics-exporter-prometheus.workspace = true -papyrus_common.workspace = true -prometheus-parse.workspace = true -rstest.workspace = true - [lib] proc-macro = true diff --git a/crates/apollo_proc_macros/src/lib.rs b/crates/apollo_proc_macros/src/lib.rs index 958893b6a5d..7c8c5506fc9 100644 --- a/crates/apollo_proc_macros/src/lib.rs +++ b/crates/apollo_proc_macros/src/lib.rs @@ -1,11 +1,15 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; + use proc_macro::TokenStream; use quote::{quote, ToTokens}; use syn::parse::{Parse, ParseStream}; +use syn::punctuated::Punctuated; use syn::{ parse, parse2, parse_macro_input, parse_str, + Expr, ExprLit, Ident, ItemFn, @@ -390,3 +394,56 @@ pub fn handle_all_response_variants(input: TokenStream) -> TokenStream { TokenStream::from(expanded) } + +struct LogEveryNMacroInput { + log_macro: syn::Path, + n: Expr, + args: Punctuated, +} + +impl Parse for LogEveryNMacroInput { + fn parse(input: ParseStream<'_>) -> syn::Result { + let log_macro: syn::Path = input.parse()?; + input.parse::()?; + let n: Expr = input.parse()?; + input.parse::()?; + let args: Punctuated = Punctuated::parse_terminated(input)?; + + Ok(LogEveryNMacroInput { log_macro, n, args }) + } +} + +/// An internal helper macro for logging a message every `n` calls to the macro. +/// Do not use this directly. Instead use the `info_every_n!`, `debug_every_n!`, etc. macros. +#[proc_macro] +pub fn log_every_n(input: TokenStream) -> TokenStream { + let LogEveryNMacroInput { log_macro, n, args, .. } = + parse_macro_input!(input as LogEveryNMacroInput); + + // Use call site span for uniqueness + let span = proc_macro::Span::call_site(); + let span_str = format!("{span:?}"); + + let mut hasher = DefaultHasher::new(); + span_str.hash(&mut hasher); + + let hash_id = format!("{:x}", hasher.finish()); // short identifier + let ident_str = format!("__TRACING_COUNT_{hash_id}"); + let ident = Ident::new(&ident_str, proc_macro2::Span::call_site()); + + let args = args.into_iter().collect::>(); + + let expanded = quote! { + { + static #ident: ::std::sync::OnceLock<::std::sync::atomic::AtomicUsize> = ::std::sync::OnceLock::new(); + let counter = #ident.get_or_init(|| ::std::sync::atomic::AtomicUsize::new(0)); + let current_count = counter.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed); + + if current_count % (#n) == 0 { + #log_macro!(#(#args),*); + } + } + }; + + TokenStream::from(expanded) +} diff --git a/crates/apollo_proc_macros_tests/Cargo.toml b/crates/apollo_proc_macros_tests/Cargo.toml new file mode 100644 index 00000000000..33e025bc837 --- /dev/null +++ b/crates/apollo_proc_macros_tests/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "apollo_proc_macros_tests" +version.workspace = true +edition.workspace = true +repository.workspace = true +license-file.workspace = true + +[dependencies] +apollo_proc_macros.workspace = true + +[dev-dependencies] +apollo_metrics.workspace = true +apollo_test_utils.workspace = true +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +papyrus_common.workspace = true +prometheus-parse.workspace = true +rstest.workspace = true + +[lints] +workspace = true diff --git a/crates/apollo_proc_macros_tests/src/lib.rs b/crates/apollo_proc_macros_tests/src/lib.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/crates/apollo_proc_macros_tests/src/lib.rs @@ -0,0 +1 @@ + diff --git a/crates/apollo_proc_macros/tests/latency_histogram.rs b/crates/apollo_proc_macros_tests/tests/latency_histogram.rs similarity index 100% rename from crates/apollo_proc_macros/tests/latency_histogram.rs rename to crates/apollo_proc_macros_tests/tests/latency_histogram.rs diff --git a/crates/apollo_protobuf/src/consensus_test.rs b/crates/apollo_protobuf/src/consensus_test.rs index 635089f7896..f473a725842 100644 --- a/crates/apollo_protobuf/src/consensus_test.rs +++ b/crates/apollo_protobuf/src/consensus_test.rs @@ -1,6 +1,7 @@ // ConsensusBlockInfo tests. use starknet_api::block::GasPrice; +use starknet_api::StarknetApiError; #[test] fn wei_to_fri_converts_correctly() { @@ -8,12 +9,33 @@ fn wei_to_fri_converts_correctly() { let conversion_rate = 8 * u128::pow(10, 20); let price_in_wei = GasPrice(5); let price_in_fri = GasPrice(4000); - assert_eq!(price_in_wei.wei_to_fri(conversion_rate), price_in_fri); - assert_eq!(price_in_fri.fri_to_wei(conversion_rate), price_in_wei); + assert_eq!(price_in_wei.wei_to_fri(conversion_rate).unwrap(), price_in_fri); + assert_eq!(price_in_fri.fri_to_wei(conversion_rate).unwrap(), price_in_wei); } #[test] -#[should_panic] -fn wei_to_fri_panics_on_gas_too_high() { - GasPrice(u128::pow(2, 127)).wei_to_fri(4); +fn wei_to_fri_errors_on_gas_too_high() { + assert!( + GasPrice(u128::pow(2, 127)).wei_to_fri(4) + == Err(StarknetApiError::GasPriceConversionError("Gas price is too high".to_string())) + ); +} + +#[test] +fn fri_to_wei_errors_on_gas_too_high() { + // Note this fails even if rate is 1, since we first multiply by WEI_PER_ETH=10^9 + assert!( + GasPrice(u128::pow(2, 127)).fri_to_wei(1) + == Err(StarknetApiError::GasPriceConversionError("Gas price is too high".to_string())) + ); +} + +#[test] +fn fri_to_wei_errors_on_conversion_rate_zero() { + assert!( + GasPrice(5).fri_to_wei(0) + == Err(StarknetApiError::GasPriceConversionError( + "FRI to ETH rate must be non-zero".to_string() + )) + ); } diff --git a/crates/blockifier/src/blockifier/stateful_validator.rs b/crates/blockifier/src/blockifier/stateful_validator.rs index 15c24c5f017..ef747e821bf 100644 --- a/crates/blockifier/src/blockifier/stateful_validator.rs +++ b/crates/blockifier/src/blockifier/stateful_validator.rs @@ -78,6 +78,10 @@ impl StatefulValidator { Ok(()) } + pub fn block_context(&self) -> &BlockContext { + self.tx_executor.block_context.as_ref() + } + fn state(&mut self) -> &mut CachedState { self.tx_executor.block_state.as_mut().expect(BLOCK_STATE_ACCESS_ERR) } diff --git a/crates/blockifier/src/blockifier/transaction_executor.rs b/crates/blockifier/src/blockifier/transaction_executor.rs index b5b64cccf15..8e2f7840608 100644 --- a/crates/blockifier/src/blockifier/transaction_executor.rs +++ b/crates/blockifier/src/blockifier/transaction_executor.rs @@ -160,6 +160,7 @@ impl TransactionExecutor { &transactional_state, &tx_state_changes_keys, &tx_execution_info.summarize(&self.block_context.versioned_constants), + &tx_execution_info.summarize_builtins(), &tx_execution_info.receipt.resources, &self.block_context.versioned_constants, )?; @@ -245,17 +246,30 @@ pub(crate) fn finalize_block( None }; + // Take CasmHashComputationData from bouncer, + // and verify that class hashes are the same. let mut bouncer = lock_bouncer(bouncer); + let casm_hash_computation_data_sierra_gas = + mem::take(&mut bouncer.casm_hash_computation_data_sierra_gas); + let casm_hash_computation_data_proving_gas = + mem::take(&mut bouncer.casm_hash_computation_data_proving_gas); + assert_eq!( + casm_hash_computation_data_sierra_gas + .class_hash_to_casm_hash_computation_gas + .keys() + .collect::>(), + casm_hash_computation_data_proving_gas + .class_hash_to_casm_hash_computation_gas + .keys() + .collect::>() + ); + Ok(BlockExecutionSummary { state_diff: state_diff.into(), compressed_state_diff, bouncer_weights: *bouncer.get_accumulated_weights(), - casm_hash_computation_data_sierra_gas: mem::take( - &mut bouncer.casm_hash_computation_data_sierra_gas, - ), - casm_hash_computation_data_proving_gas: mem::take( - &mut bouncer.casm_hash_computation_data_proving_gas, - ), + casm_hash_computation_data_sierra_gas, + casm_hash_computation_data_proving_gas, }) } diff --git a/crates/blockifier/src/blockifier/transfers_flow_test.rs b/crates/blockifier/src/blockifier/transfers_flow_test.rs index e6f1ce1980e..2b8ec6ee3f2 100644 --- a/crates/blockifier/src/blockifier/transfers_flow_test.rs +++ b/crates/blockifier/src/blockifier/transfers_flow_test.rs @@ -31,24 +31,28 @@ pub fn transfers_flow_test( // sure the results are the same. for concurrency_enabled in [false, true] { for cairo1_version in RunnableCairo1::iter() { - let result = transfers_flow_test_body( + let mut result = transfers_flow_test_body( timeout, concurrency_enabled, cairo1_version, recipient_generator_type, ); - let Some((_expected_tx_execution_infos, expected_block_summary)) = &expected_result + for execution_info in &mut result.0 { + execution_info.clear_call_infos_nonessential_fields_for_comparison(); + } + let Some((expected_tx_execution_infos, expected_block_summary)) = &expected_result else { expected_result = Some(result); continue; }; - let (_tx_execution_infos, block_summary) = result; + let (tx_execution_infos, block_summary) = result; - // TODO(Meshi): Add assertion for `tx_execution_infos` that skips target-dependent - // fields: - // - `run_cairo`: `true` only when `cairo_native` is enabled - // - `builtin_counters`: unsupported in native mode + assert_eq!( + &tx_execution_infos, expected_tx_execution_infos, + "Transaction Results differ for concurrency_enabled: {concurrency_enabled}; \ + cairo1_version: {cairo1_version:?}" + ); assert_eq!( &block_summary, expected_block_summary, diff --git a/crates/blockifier/src/bouncer.rs b/crates/blockifier/src/bouncer.rs index bba2add363c..2531378a565 100644 --- a/crates/blockifier/src/bouncer.rs +++ b/crates/blockifier/src/bouncer.rs @@ -162,7 +162,7 @@ impl Default for BouncerWeights { n_txs: 600, state_diff_size: 4000, sierra_gas: GasAmount(4000000000), - proving_gas: GasAmount(4000000000), + proving_gas: GasAmount(5000000000), } } } @@ -362,14 +362,14 @@ impl BuiltinWeights { impl Default for BuiltinWeights { fn default() -> Self { Self { - pedersen: 8100, + pedersen: 10125, range_check: 70, - ecdsa: 1333333, - ec_op: 571900, + ecdsa: 1666666, + ec_op: 714875, bitwise: 583, - keccak: 408566, - poseidon: 8334, - add_mod: 250, + keccak: 510707, + poseidon: 6250, + add_mod: 312, mul_mod: 604, range_check96: 56, } @@ -491,6 +491,7 @@ impl Bouncer { state_reader: &S, tx_state_changes_keys: &StateChangesKeys, tx_execution_summary: &ExecutionSummary, + tx_builtin_counters: &BuiltinCounterMap, tx_resources: &TransactionResources, versioned_constants: &VersionedConstants, ) -> TransactionExecutorResult<()> { @@ -514,9 +515,10 @@ impl Bouncer { tx_resources, &marginal_state_changes_keys, versioned_constants, - &tx_execution_summary.builtin_counters, + tx_builtin_counters, &self.bouncer_config.builtin_weights, )?; + let tx_bouncer_weights = tx_weights.bouncer_weights; // Check if the transaction can fit the current block available capacity. @@ -612,15 +614,16 @@ fn proving_gas_from_builtins_and_sierra_gas( steps_proving_gas.checked_add_panic_on_overflow(builtins_proving_gas) } -// TODO(AvivY): Share code with `vm_resources_to_sierra_gas`. -/// Converts vm resources to proving gas using the builtin weights. -fn vm_resources_to_proving_gas( +/// Generic function to convert VM resources to gas with configurable builtin gas calculation +fn vm_resources_to_gas( resources: &ExecutionResources, - builtin_weights: &BuiltinWeights, versioned_constants: &VersionedConstants, -) -> GasAmount { - let builtins_gas_cost = - builtin_weights.calc_proving_gas_from_builtin_counter(&resources.prover_builtins()); + builtin_gas_calculator: F, +) -> GasAmount +where + F: FnOnce(&BuiltinCounterMap) -> GasAmount, +{ + let builtins_gas_cost = builtin_gas_calculator(&resources.prover_builtins()); let n_steps_gas_cost = n_steps_to_gas(resources.total_n_steps(), versioned_constants); let n_memory_holes_gas_cost = memory_holes_to_gas(resources.n_memory_holes, versioned_constants); @@ -630,19 +633,24 @@ fn vm_resources_to_proving_gas( .checked_add_panic_on_overflow(builtins_gas_cost) } -pub fn vm_resources_to_sierra_gas( +/// Converts vm resources to proving gas using the builtin weights. +fn vm_resources_to_proving_gas( resources: &ExecutionResources, + builtin_weights: &BuiltinWeights, versioned_constants: &VersionedConstants, ) -> GasAmount { - let builtins_gas_cost = - builtins_to_sierra_gas(&resources.prover_builtins(), versioned_constants); - let n_steps_gas_cost = n_steps_to_gas(resources.total_n_steps(), versioned_constants); - let n_memory_holes_gas_cost = - memory_holes_to_gas(resources.n_memory_holes, versioned_constants); + vm_resources_to_gas(resources, versioned_constants, |builtin_counters| { + builtin_weights.calc_proving_gas_from_builtin_counter(builtin_counters) + }) +} - n_steps_gas_cost - .checked_add_panic_on_overflow(n_memory_holes_gas_cost) - .checked_add_panic_on_overflow(builtins_gas_cost) +pub fn vm_resources_to_sierra_gas( + resources: &ExecutionResources, + versioned_constants: &VersionedConstants, +) -> GasAmount { + vm_resources_to_gas(resources, versioned_constants, |builtin_counters| { + builtins_to_sierra_gas(builtin_counters, versioned_constants) + }) } /// Computes the steps gas by subtracting the builtins' contribution from the Sierra gas. @@ -730,8 +738,6 @@ pub fn get_tx_weights( add_maps(&mut builtin_counters_without_casm_hash_computation, tx_builtin_counters); // The transaction builtin counters does not include the transaction overhead ('additional') // resources. - // TODO(AvivG): Builtins from `fee_transfer_call_info` are counted twice - in `os_vm_resources` - // and again in `tx_builtin_counters`. Remove the duplication. add_maps( &mut builtin_counters_without_casm_hash_computation, &tx_resources.computation.os_vm_resources.prover_builtins(), @@ -805,6 +811,7 @@ pub fn get_particia_update_resources(n_visited_storage_entries: usize) -> Execut pub fn verify_tx_weights_within_max_capacity( state_reader: &S, tx_execution_summary: &ExecutionSummary, + tx_builtin_counters: &BuiltinCounterMap, tx_resources: &TransactionResources, tx_state_changes_keys: &StateChangesKeys, bouncer_config: &BouncerConfig, @@ -817,7 +824,7 @@ pub fn verify_tx_weights_within_max_capacity( tx_resources, tx_state_changes_keys, versioned_constants, - &tx_execution_summary.builtin_counters, + tx_builtin_counters, &bouncer_config.builtin_weights, )? .bouncer_weights; diff --git a/crates/blockifier/src/bouncer_test.rs b/crates/blockifier/src/bouncer_test.rs index 4e287c8106f..ffd3eab1010 100644 --- a/crates/blockifier/src/bouncer_test.rs +++ b/crates/blockifier/src/bouncer_test.rs @@ -1,6 +1,9 @@ use std::collections::{HashMap, HashSet}; use assert_matches::assert_matches; +use blockifier_test_utils::cairo_versions::{CairoVersion, RunnableCairo1}; +use blockifier_test_utils::contracts::FeatureContract; +use cairo_vm::types::builtin_name::BuiltinName; use rstest::{fixture, rstest}; use starknet_api::execution_resources::GasAmount; use starknet_api::transaction::fields::Fee; @@ -9,6 +12,7 @@ use starknet_api::{class_hash, contract_address, storage_key}; use super::BouncerConfig; use crate::blockifier::transaction_executor::TransactionExecutorError; use crate::bouncer::{ + get_tx_weights, verify_tx_weights_within_max_capacity, Bouncer, BouncerWeights, @@ -17,9 +21,10 @@ use crate::bouncer::{ TxWeights, }; use crate::context::BlockContext; -use crate::execution::call_info::ExecutionSummary; +use crate::execution::call_info::{BuiltinCounterMap, ExecutionSummary}; use crate::fee::resources::{ComputationResources, TransactionResources}; -use crate::state::cached_state::{CachedState, StateChangesKeys, TransactionalState}; +use crate::state::cached_state::{CachedState, StateChangesKeys, StateMaps, TransactionalState}; +use crate::test_utils::contracts::FeatureContractData; use crate::test_utils::dict_state_reader::DictStateReader; use crate::test_utils::initial_test_state::test_state; use crate::transaction::errors::TransactionExecutionError; @@ -186,19 +191,49 @@ fn test_bouncer_update(#[case] initial_bouncer: Bouncer) { assert_eq!(updated_bouncer, expected_bouncer); } -/// This parameterized test verifies `Bouncer::try_update` behavior when varying only `sierra_gas`. #[rstest] -#[case::positive_flow(GasAmount(1), "ok")] -#[case::block_full(GasAmount(11), "block_full")] -#[case::transaction_too_large(GasAmount(21), "too_large")] -fn test_bouncer_try_update_sierra_gas( - #[case] added_gas: GasAmount, - #[case] scenario: &'static str, - block_context: BlockContext, - block_max_capacity: BouncerWeights, - bouncer_config: BouncerConfig, - mut state: CachedState, -) { +#[case::sierra_gas_positive_flow("ok")] +#[case::sierra_gas_block_full("sierra_gas_block_full")] +#[case::proving_gas_positive_flow("ok")] +#[case::proving_gas_block_full("proving_gas_block_full")] +fn test_bouncer_try_update_gas_based(#[case] scenario: &'static str, block_context: BlockContext) { + let state = &mut test_state(&block_context.chain_info, Fee(0), &[]); + let mut transactional_state = TransactionalState::create_transactional(state); + let builtin_weights = BuiltinWeights::default(); + + let range_check_count = 2; + let max_capacity_builtin_counters = + HashMap::from([(BuiltinName::range_check, range_check_count)]); + let builtin_counters = match scenario { + "proving_gas_block_full" => max_capacity_builtin_counters.clone(), + // Use a minimal or empty map. + "ok" | "sierra_gas_block_full" => { + HashMap::from([(BuiltinName::range_check, range_check_count - 1)]) + } + _ => panic!("Unexpected scenario: {scenario}"), + }; + + // Derive sierra_gas from scenario + let sierra_gas = match scenario { + "sierra_gas_block_full" => GasAmount(11), // Exceeds capacity + "ok" | "proving_gas_block_full" => GasAmount(1), // Within capacity + _ => panic!("Unexpected scenario: {scenario}"), + }; + + let proving_gas_max_capacity = + builtin_weights.calc_proving_gas_from_builtin_counter(&max_capacity_builtin_counters); + + let block_max_capacity = BouncerWeights { + l1_gas: 20, + message_segment_length: 20, + n_events: 20, + state_diff_size: 20, + n_txs: 20, + sierra_gas: GasAmount(20), + proving_gas: proving_gas_max_capacity, + }; + let bouncer_config = BouncerConfig { block_max_capacity, builtin_weights }; + let accumulated_weights = BouncerWeights { l1_gas: 10, message_segment_length: 10, @@ -214,53 +249,70 @@ fn test_bouncer_try_update_sierra_gas( // Prepare the resources to be added to the bouncer. let execution_summary = ExecutionSummary::default(); let tx_resources = TransactionResources { - // Only the `sierra_gas` field is varied. - computation: ComputationResources { sierra_gas: added_gas, ..Default::default() }, + computation: ComputationResources { sierra_gas, ..Default::default() }, ..Default::default() }; - let mut transactional_state = TransactionalState::create_transactional(&mut state); let tx_state_changes_keys = transactional_state.to_state_diff().unwrap().state_maps.keys(); - // TODO(Yoni, 1/10/2024): simplify this test and move tx-too-large cases out. + let result = bouncer.try_update( + &transactional_state, + &tx_state_changes_keys, + &execution_summary, + &builtin_counters, + &tx_resources, + &block_context.versioned_constants, + ); + + match scenario { + "ok" => assert_matches!(result, Ok(())), + "proving_gas_block_full" | "sierra_gas_block_full" => { + assert_matches!(result, Err(TransactionExecutorError::BlockFull)) + } + _ => panic!("Unexpected scenario: {scenario}"), + } +} + +#[rstest] +fn test_transaction_too_large_sierra_gas_based(block_context: BlockContext) { + let mut state = test_state(&block_context.chain_info, Fee(0), &[]); + let mut transactional_state = TransactionalState::create_transactional(&mut state); + let block_max_capacity = BouncerWeights { sierra_gas: GasAmount(20), ..Default::default() }; + let bouncer_config = + BouncerConfig { block_max_capacity, builtin_weights: BuiltinWeights::default() }; + + // Use gas amount > block_max_capacity's. + let exceeding_gas = GasAmount(30); + let execution_summary = ExecutionSummary::default(); + let builtin_counters = BuiltinCounterMap::default(); + let tx_resources = TransactionResources { + computation: ComputationResources { sierra_gas: exceeding_gas, ..Default::default() }, + ..Default::default() + }; + let tx_state_changes_keys = transactional_state.to_state_diff().unwrap().state_maps.keys(); - // Check that the transaction is not too large. - let mut result = verify_tx_weights_within_max_capacity( + let result = verify_tx_weights_within_max_capacity( &transactional_state, &execution_summary, + &builtin_counters, &tx_resources, &tx_state_changes_keys, - &bouncer.bouncer_config, + &bouncer_config, &block_context.versioned_constants, ) .map_err(TransactionExecutorError::TransactionExecutionError); + let expected_weights = BouncerWeights { - sierra_gas: added_gas, + sierra_gas: exceeding_gas, n_txs: 1, - proving_gas: added_gas, + proving_gas: exceeding_gas, ..BouncerWeights::empty() }; - if result.is_ok() { - // Try to update the bouncer. - result = bouncer.try_update( - &transactional_state, - &tx_state_changes_keys, - &execution_summary, - &tx_resources, - &block_context.versioned_constants, - ); - } - - match scenario { - "ok" => assert_matches!(result, Ok(())), - "block_full" => assert_matches!(result, Err(TransactionExecutorError::BlockFull)), - "too_large" => assert_matches!(result, Err( - TransactionExecutorError::TransactionExecutionError( - TransactionExecutionError::TransactionTooLarge { max_capacity, tx_size } - ) - ) if *max_capacity == block_max_capacity && *tx_size == expected_weights), - _ => panic!("Unexpected scenario: {scenario}"), - } + assert_matches!(result, Err( + TransactionExecutorError::TransactionExecutionError( + TransactionExecutionError::TransactionTooLarge { max_capacity, tx_size } + ) + ) if *max_capacity == bouncer_config.block_max_capacity && *tx_size == expected_weights); } #[rstest] @@ -291,6 +343,7 @@ fn test_bouncer_try_update_n_txs( &first_transactional_state, &first_tx_state_changes_keys, &ExecutionSummary::default(), + &BuiltinCounterMap::default(), &TransactionResources::default(), &block_context.versioned_constants, ); @@ -306,9 +359,101 @@ fn test_bouncer_try_update_n_txs( &second_transactional_state, &second_tx_state_changes_keys, &ExecutionSummary::default(), + &BuiltinCounterMap::default(), &TransactionResources::default(), &block_context.versioned_constants, ); assert_matches!(result, Err(TransactionExecutorError::BlockFull)); } + +/// This test verifies that `get_tx_weights` returns a reasonable casm hash computation data. +#[rstest] +fn test_get_tx_weights_with_casm_hash_computation(block_context: BlockContext) { + // Set up state with declared contracts. + let mut state_reader = DictStateReader::default(); + let test_contract_v0 = FeatureContract::TestContract(CairoVersion::Cairo0); + let test_contract_v1 = + FeatureContract::TestContract(CairoVersion::Cairo1(RunnableCairo1::Casm)); + + state_reader.add_class(&FeatureContractData::from(test_contract_v0)); + state_reader.add_class(&FeatureContractData::from(test_contract_v1)); + let state = CachedState::new(state_reader); + + let executed_class_hashes = + HashSet::from([test_contract_v0.get_class_hash(), test_contract_v1.get_class_hash()]); + + // Call get_tx_weights. + let result = get_tx_weights( + &state, + &executed_class_hashes, + 10, // n_visited_storage_entries + &TransactionResources::default(), + &StateMaps::default().keys(), + &block_context.versioned_constants, + &BuiltinCounterMap::default(), + &BuiltinWeights::default(), + ); + + let tx_weights = result.unwrap(); + + // Test that casm hash computation data keys equal executed class hashes + let sierra_keys: HashSet<_> = tx_weights + .casm_hash_computation_data_sierra_gas + .class_hash_to_casm_hash_computation_gas + .keys() + .cloned() + .collect(); + let proving_keys: HashSet<_> = tx_weights + .casm_hash_computation_data_proving_gas + .class_hash_to_casm_hash_computation_gas + .keys() + .cloned() + .collect(); + + assert_eq!( + sierra_keys, executed_class_hashes, + "Sierra gas keys should match executed class hashes" + ); + assert_eq!( + proving_keys, executed_class_hashes, + "Proving gas keys should match executed class hashes" + ); + + // Verify gas amounts of casm hash computation data are positive. + assert!( + tx_weights + .casm_hash_computation_data_sierra_gas + .class_hash_to_casm_hash_computation_gas + .values() + .all(|&gas| gas > GasAmount::ZERO) + ); + assert!( + tx_weights + .casm_hash_computation_data_proving_gas + .class_hash_to_casm_hash_computation_gas + .values() + .all(|&gas| gas > GasAmount::ZERO) + ); + + // Test gas without casm hash computation is positive. + assert!( + tx_weights.casm_hash_computation_data_sierra_gas.gas_without_casm_hash_computation + > GasAmount::ZERO + ); + assert!( + tx_weights.casm_hash_computation_data_proving_gas.gas_without_casm_hash_computation + > GasAmount::ZERO + ); + + // Test that bouncer weights are equal to casm hash computation data total gas. + let bouncer_weights = tx_weights.bouncer_weights; + assert_eq!( + bouncer_weights.sierra_gas, + tx_weights.casm_hash_computation_data_sierra_gas.total_gas() + ); + assert_eq!( + bouncer_weights.proving_gas, + tx_weights.casm_hash_computation_data_proving_gas.total_gas() + ); +} diff --git a/crates/blockifier/src/concurrency/worker_logic.rs b/crates/blockifier/src/concurrency/worker_logic.rs index e1178d57e19..826600f0328 100644 --- a/crates/blockifier/src/concurrency/worker_logic.rs +++ b/crates/blockifier/src/concurrency/worker_logic.rs @@ -336,6 +336,7 @@ impl WorkerExecutor { &tx_versioned_state, &tx_state_changes_keys, &tx_execution_info.summarize(&self.block_context.versioned_constants), + &tx_execution_info.summarize_builtins(), &tx_execution_info.receipt.resources, &self.block_context.versioned_constants, ); diff --git a/crates/blockifier/src/execution/call_info.rs b/crates/blockifier/src/execution/call_info.rs index 3200edf4888..124549634bb 100644 --- a/crates/blockifier/src/execution/call_info.rs +++ b/crates/blockifier/src/execution/call_info.rs @@ -17,7 +17,7 @@ use crate::blockifier_versioned_constants::VersionedConstants; use crate::execution::contract_class::TrackedResource; use crate::execution::entry_point::CallEntryPoint; use crate::state::cached_state::StorageEntry; -use crate::utils::{add_maps, u64_from_usize}; +use crate::utils::u64_from_usize; #[cfg_attr(feature = "transaction_serde", derive(serde::Deserialize))] #[derive(Clone, Debug, Default, Eq, PartialEq, Serialize)] @@ -104,7 +104,6 @@ pub struct ExecutionSummary { pub visited_storage_entries: HashSet, pub l2_to_l1_payload_lengths: Vec, pub event_summary: EventSummary, - pub builtin_counters: BuiltinCounterMap, } impl Add for ExecutionSummary { @@ -115,7 +114,6 @@ impl Add for ExecutionSummary { self.executed_class_hashes.extend(other.executed_class_hashes); self.visited_storage_entries.extend(other.visited_storage_entries); self.l2_to_l1_payload_lengths.extend(other.l2_to_l1_payload_lengths); - add_maps(&mut self.builtin_counters, &other.builtin_counters); self.event_summary += other.event_summary; self } @@ -257,7 +255,6 @@ impl CallInfo { let mut visited_storage_entries: HashSet = HashSet::new(); let mut event_summary = EventSummary::default(); let mut l2_to_l1_payload_lengths = Vec::new(); - let mut builtin_counters = BuiltinCounterMap::new(); for call_info in self.iter() { // Class hashes. @@ -282,8 +279,6 @@ impl CallInfo { .map(|message| message.message.payload.0.len()), ); - add_maps(&mut builtin_counters, &call_info.builtin_counters); - // Events: all event resources in the execution tree, unless executing a 0.13.1 block. if !versioned_constants.ignore_inner_event_resources { event_summary += call_info.specific_event_summary(); @@ -307,7 +302,6 @@ impl CallInfo { visited_storage_entries, l2_to_l1_payload_lengths, event_summary, - builtin_counters, } } diff --git a/crates/blockifier/src/execution/entry_point_execution.rs b/crates/blockifier/src/execution/entry_point_execution.rs index ad7da50afe8..88c08965ae4 100644 --- a/crates/blockifier/src/execution/entry_point_execution.rs +++ b/crates/blockifier/src/execution/entry_point_execution.rs @@ -1,3 +1,4 @@ +use cairo_vm::hint_processor::hint_processor_definition::HintProcessor; use cairo_vm::types::builtin_name::BuiltinName; use cairo_vm::types::layout::CairoLayoutParams; use cairo_vm::types::layout_name::LayoutName; @@ -160,7 +161,7 @@ pub fn initialize_execution_context_with_runner_mode<'a>( let mut read_only_segments = ReadOnlySegments::default(); let program_extra_data_length = prepare_program_extra_data( &mut runner, - compiled_class, + compiled_class.bytecode_length(), &mut read_only_segments, &context.versioned_constants().os_constants.gas_costs, )?; @@ -200,9 +201,9 @@ pub fn initialize_execution_context<'a>( ) } -fn prepare_program_extra_data( +pub fn prepare_program_extra_data( runner: &mut CairoRunner, - contract_class: &CompiledClassV1, + bytecode_length: usize, read_only_segments: &mut ReadOnlySegments, gas_costs: &GasCosts, ) -> Result { @@ -225,7 +226,7 @@ fn prepare_program_extra_data( // Put a pointer to the builtin cost segment at the end of the program (after the // additional `ret` statement). - let mut ptr = (runner.vm.get_pc() + contract_class.bytecode_length())?; + let mut ptr = (runner.vm.get_pc() + bytecode_length)?; // Push a `ret` opcode. write_felt(&mut runner.vm, &mut ptr, Felt::from(0x208b7fff7fff7ffe_u128))?; // Push a pointer to the builtin cost segment. @@ -294,10 +295,11 @@ pub fn prepare_call_arguments( Ok(args) } + /// Runs the runner from the given PC. -pub fn run_entry_point( +pub fn run_entry_point( runner: &mut CairoRunner, - hint_processor: &mut SyscallHintProcessor<'_>, + hint_processor: &mut HP, entry_point: EntryPointV1, args: Args, program_segment_size: usize, @@ -385,13 +387,11 @@ fn maybe_fill_holes( Ok(()) } -pub fn finalize_execution( - mut runner: CairoRunner, - mut syscall_handler: SyscallHintProcessor<'_>, +pub fn finalize_runner( + runner: &mut CairoRunner, n_total_args: usize, program_extra_data_length: usize, - tracked_resource: TrackedResource, -) -> Result { +) -> Result<(), PostExecutionError> { // Close memory holes in segments (OS code touches those memory cells, we simulate it). let program_start_ptr = runner .program_base @@ -405,10 +405,13 @@ pub fn finalize_execution( // When execution starts the stack holds the EP arguments + [ret_fp, ret_pc]. let args_ptr = (initial_fp - (n_total_args + 2))?; runner.vm.mark_address_range_as_accessed(args_ptr, n_total_args)?; - syscall_handler.read_only_segments.mark_as_accessed(&mut runner)?; - - let call_result = get_call_result(&runner, &syscall_handler, &tracked_resource)?; + Ok(()) +} +pub fn extract_vm_resources( + runner: &CairoRunner, + syscall_handler: &SyscallHintProcessor<'_>, +) -> Result { // Take into account the resources of the current call, without inner calls. // Has to happen after marking holes in segments as accessed. let mut vm_resources_without_inner_calls = runner @@ -425,6 +428,31 @@ pub fn finalize_execution( // Take into account the syscall resources of the current call. vm_resources_without_inner_calls += &versioned_constants.get_additional_os_syscall_resources(&syscall_handler.syscalls_usage); + Ok(vm_resources_without_inner_calls) +} + +pub fn total_vm_resources( + tracked_vm_resources_without_inner_calls: &ExecutionResources, + inner_calls: &[CallInfo], +) -> ExecutionResources { + tracked_vm_resources_without_inner_calls + &CallInfo::summarize_vm_resources(inner_calls.iter()) +} + +pub fn finalize_execution( + mut runner: CairoRunner, + mut syscall_handler: SyscallHintProcessor<'_>, + n_total_args: usize, + program_extra_data_length: usize, + tracked_resource: TrackedResource, +) -> Result { + finalize_runner(&mut runner, n_total_args, program_extra_data_length)?; + syscall_handler.read_only_segments.mark_as_accessed(&mut runner)?; + + let call_result = get_call_result(&runner, &syscall_handler, &tracked_resource)?; + + // Take into account the resources of the current call, without inner calls. + // Has to happen after marking holes in segments as accessed. + let vm_resources_without_inner_calls = extract_vm_resources(&runner, &syscall_handler)?; let tracked_vm_resources_without_inner_calls = match tracked_resource { TrackedResource::CairoSteps => &vm_resources_without_inner_calls, @@ -433,9 +461,13 @@ pub fn finalize_execution( syscall_handler.finalize(); - let vm_resources = tracked_vm_resources_without_inner_calls - + &CallInfo::summarize_vm_resources(syscall_handler.base.inner_calls.iter()); + let vm_resources = total_vm_resources( + tracked_vm_resources_without_inner_calls, + &syscall_handler.base.inner_calls, + ); + let syscall_handler_base = syscall_handler.base; + Ok(CallInfo { call: syscall_handler_base.call.into(), execution: CallExecution { diff --git a/crates/blockifier/src/execution/syscalls/vm_syscall_utils.rs b/crates/blockifier/src/execution/syscalls/vm_syscall_utils.rs index 4e5fb7d4be7..f0196b3af99 100644 --- a/crates/blockifier/src/execution/syscalls/vm_syscall_utils.rs +++ b/crates/blockifier/src/execution/syscalls/vm_syscall_utils.rs @@ -8,6 +8,7 @@ use cairo_vm::vm::errors::memory_errors::MemoryError; use cairo_vm::vm::errors::vm_errors::VirtualMachineError; use cairo_vm::vm::vm_core::VirtualMachine; use num_traits::ToPrimitive; +use serde::Serialize; use starknet_api::block::{BlockHash, BlockNumber}; use starknet_api::core::{ClassHash, ContractAddress, EntryPointSelector, EthAddress}; use starknet_api::execution_resources::GasAmount; @@ -47,7 +48,7 @@ pub type SyscallSelector = DeprecatedSyscallSelector; pub type SyscallUsageMap = HashMap; -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, Serialize)] pub struct SyscallUsage { pub call_count: usize, pub linear_factor: usize, diff --git a/crates/blockifier/src/fee/resources.rs b/crates/blockifier/src/fee/resources.rs index 853b986b7b9..9ea71e22859 100644 --- a/crates/blockifier/src/fee/resources.rs +++ b/crates/blockifier/src/fee/resources.rs @@ -57,6 +57,9 @@ impl TransactionResources { #[cfg_attr(feature = "transaction_serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Default, PartialEq)] pub struct ComputationResources { + /// Execution resources split between the transaction itself (`tx_vm_resources`) and OS + /// overhead (`os_vm_resources`). This enables clean proving gas calculation. See usage in + /// `get_tx_weights`. pub tx_vm_resources: ExecutionResources, pub os_vm_resources: ExecutionResources, pub n_reverted_steps: usize, diff --git a/crates/blockifier/src/state/stateful_compression_test.rs b/crates/blockifier/src/state/stateful_compression_test.rs index c6bc7ea4875..1a27e403ccb 100644 --- a/crates/blockifier/src/state/stateful_compression_test.rs +++ b/crates/blockifier/src/state/stateful_compression_test.rs @@ -1,9 +1,8 @@ use std::collections::{HashMap, HashSet}; -use std::sync::LazyLock; use assert_matches::assert_matches; use rstest::rstest; -use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce, PatriciaKey}; +use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; use starknet_api::felt; use starknet_api::state::StorageKey; use starknet_types_core::felt::Felt; @@ -23,9 +22,7 @@ use crate::state::cached_state::{CachedState, StateMaps, StorageEntry}; use crate::state::state_api::{State, StateReader}; use crate::state::stateful_compression::{AliasCompressor, CompressionError}; use crate::test_utils::dict_state_reader::DictStateReader; - -static ALIAS_CONTRACT_ADDRESS: LazyLock = - LazyLock::new(|| ContractAddress(PatriciaKey::try_from(Felt::TWO).unwrap())); +use crate::test_utils::ALIAS_CONTRACT_ADDRESS; /// Decompresses the state diff by replacing the aliases with addresses and storage keys. fn decompress( diff --git a/crates/blockifier/src/test_utils.rs b/crates/blockifier/src/test_utils.rs index 593df004d36..6f9e89b0e22 100644 --- a/crates/blockifier/src/test_utils.rs +++ b/crates/blockifier/src/test_utils.rs @@ -10,6 +10,7 @@ pub mod test_templates; pub mod transfers_generator; use std::collections::HashMap; use std::slice::Iter; +use std::sync::LazyLock; use blockifier_test_utils::cairo_versions::{CairoVersion, RunnableCairo1}; use blockifier_test_utils::contracts::FeatureContract; @@ -17,7 +18,7 @@ use cairo_vm::types::builtin_name::BuiltinName; use cairo_vm::vm::runners::cairo_runner::ExecutionResources; use starknet_api::abi::abi_utils::{get_fee_token_var_address, selector_from_name}; use starknet_api::block::{BlockHash, BlockHashAndNumber, BlockNumber}; -use starknet_api::core::{ClassHash, ContractAddress}; +use starknet_api::core::{ClassHash, ContractAddress, PatriciaKey}; use starknet_api::executable_transaction::TransactionType; use starknet_api::execution_resources::{GasAmount, GasVector}; use starknet_api::hash::StarkHash; @@ -65,6 +66,9 @@ pub const ERC20_CONTRACT_PATH: &str = "../blockifier_test_utils/resources/ERC20/ ERC20_without_some_syscalls/ERC20/\ erc20_contract_without_some_syscalls_compiled.json"; +pub static ALIAS_CONTRACT_ADDRESS: LazyLock = + LazyLock::new(|| ContractAddress(PatriciaKey::try_from(Felt::TWO).unwrap())); + #[derive(Clone, Copy, EnumCountMacro, PartialEq, Eq, Debug)] pub enum CompilerBasedVersion { CairoVersion(CairoVersion), diff --git a/crates/blockifier/src/test_utils/struct_impls.rs b/crates/blockifier/src/test_utils/struct_impls.rs index 0c50001a2e8..9ee95ad0323 100644 --- a/crates/blockifier/src/test_utils/struct_impls.rs +++ b/crates/blockifier/src/test_utils/struct_impls.rs @@ -27,7 +27,7 @@ use crate::blockifier::config::{CairoNativeRunConfig, ContractClassManagerConfig use crate::blockifier_versioned_constants::VersionedConstants; use crate::bouncer::{BouncerConfig, BouncerWeights}; use crate::context::{BlockContext, ChainInfo, FeeTokenAddresses, TransactionContext}; -use crate::execution::call_info::{CallExecution, CallInfo, Retdata}; +use crate::execution::call_info::{BuiltinCounterMap, CallExecution, CallInfo, Retdata}; use crate::execution::common_hints::ExecutionMode; #[cfg(feature = "cairo_native")] use crate::execution::contract_class::CompiledClassV1; @@ -44,6 +44,7 @@ use crate::state::state_api::State; use crate::transaction::objects::{ CurrentTransactionInfo, DeprecatedTransactionInfo, + TransactionExecutionInfo, TransactionInfo, }; @@ -128,6 +129,29 @@ impl CallInfo { self.call.class_hash = Some(ClassHash::default()); self } + + pub fn clear_nonessential_fields_for_comparison(&mut self) { + for inner_call in self.inner_calls.iter_mut() { + inner_call.clear_nonessential_fields_for_comparison(); + } + self.builtin_counters = BuiltinCounterMap::new(); + self.execution.cairo_native = false; + } +} + +impl TransactionExecutionInfo { + pub fn clear_call_infos_nonessential_fields_for_comparison(&mut self) { + // Clear non-essential fields for comparison. + if let Some(call_info) = &mut self.validate_call_info { + call_info.clear_nonessential_fields_for_comparison(); + } + if let Some(call_info) = &mut self.execute_call_info { + call_info.clear_nonessential_fields_for_comparison(); + } + if let Some(call_info) = &mut self.fee_transfer_call_info { + call_info.clear_nonessential_fields_for_comparison(); + } + } } impl VersionedConstants { @@ -174,7 +198,6 @@ impl BlockContext { n_events: max_n_events_in_block, ..BouncerWeights::max() }, - // TODO(Meshi): Check what should be the values here. ..BouncerConfig::max() }, ..Self::create_for_account_testing() diff --git a/crates/blockifier/src/test_utils/transfers_generator.rs b/crates/blockifier/src/test_utils/transfers_generator.rs index a5ede7c8707..b08ed062945 100644 --- a/crates/blockifier/src/test_utils/transfers_generator.rs +++ b/crates/blockifier/src/test_utils/transfers_generator.rs @@ -229,9 +229,27 @@ impl TransfersGenerator { // Execution infos of transactions that were executed. let mut collected_execution_infos = Vec::::new(); for result in results { - let execution_info = result.unwrap().0; + let execution_info = &result.unwrap().0; + assert!(!execution_info.is_reverted()); - collected_execution_infos.push(execution_info); + + let expected_cairo_native = self.config.cairo_version.is_cairo_native(); + + assert_eq!( + execution_info.validate_call_info.as_ref().unwrap().execution.cairo_native, + expected_cairo_native + ); + assert_eq!( + execution_info.execute_call_info.as_ref().unwrap().execution.cairo_native, + expected_cairo_native + ); + // TODO(YonatanK): after updating the sierra version the the ERC20 contract uses, change + // this asserts to check the actual value of `inner_call.execution.cairo_native`. + for inner_call in execution_info.execute_call_info.as_ref().unwrap().inner_calls.iter() + { + assert!(!inner_call.execution.cairo_native); + } + collected_execution_infos.push(execution_info.clone()); } (block_summary, collected_execution_infos) diff --git a/crates/blockifier/src/transaction/account_transaction.rs b/crates/blockifier/src/transaction/account_transaction.rs index 491dd66f271..03b69e71ccb 100644 --- a/crates/blockifier/src/transaction/account_transaction.rs +++ b/crates/blockifier/src/transaction/account_transaction.rs @@ -780,6 +780,14 @@ impl ExecutableTransaction for AccountTransaction { // Do not run validate or perform any account-related actions for declare transactions that // meet the following conditions. // This flow is used for the sequencer to bootstrap a new system. + // Note: The absence of any account-related action leads to some unintuitive but expected + // behavior: + // - After the transaction is executed successfully, the batcher does not notify the mempool + // about its inclusion in a block. As a result, the transaction remains in the mempool. + // - When the next block is produced, the mempool will propose the same transaction again. + // - This time, execution will fail because the contract has already been declared. + // - The transaction will then be marked as rejected, the mempool will be notified, and the + // transaction will be removed from the mempool. if let Transaction::Declare(tx) = &self.tx { if tx.is_bootstrap_declare(self.execution_flags.charge_fee) { let mut context = EntryPointExecutionContext::new_invoke( diff --git a/crates/blockifier/src/transaction/objects.rs b/crates/blockifier/src/transaction/objects.rs index e5359d09f10..ed4593dc317 100644 --- a/crates/blockifier/src/transaction/objects.rs +++ b/crates/blockifier/src/transaction/objects.rs @@ -28,6 +28,7 @@ use crate::fee::fee_checks::FeeCheckError; use crate::fee::fee_utils::get_fee_by_gas_vector; use crate::fee::receipt::TransactionReceipt; use crate::transaction::errors::{TransactionExecutionError, TransactionPreValidationError}; +use crate::utils::add_maps; #[cfg(test)] #[path = "objects_test.rs"] @@ -208,6 +209,12 @@ impl TransactionExecutionInfo { .chain(self.fee_transfer_call_info.iter()) } + /// Returns call infos excluding fee transfer (to avoid double-counting in bouncer + /// calculations). + pub fn non_optional_call_infos_without_fee_transfer(&self) -> impl Iterator { + self.validate_call_info.iter().chain(self.execute_call_info.iter()) + } + pub fn is_reverted(&self) -> bool { self.revert_error.is_some() } @@ -217,6 +224,18 @@ impl TransactionExecutionInfo { pub fn summarize(&self, versioned_constants: &VersionedConstants) -> ExecutionSummary { CallInfo::summarize_many(self.non_optional_call_infos(), versioned_constants) } + + pub fn summarize_builtins(&self) -> BuiltinCounterMap { + let mut builtin_counters = BuiltinCounterMap::new(); + // Remove fee transfer builtins to avoid double-counting in `get_tx_weights` + // in bouncer.rs (already included in os_vm_resources). + for call_info_iter in self.non_optional_call_infos_without_fee_transfer() { + for call_info in call_info_iter.iter() { + add_maps(&mut builtin_counters, &call_info.builtin_counters); + } + } + builtin_counters + } } pub trait ExecutionResourcesTraits { fn total_n_steps(&self) -> usize; diff --git a/crates/blockifier/src/transaction/objects_test.rs b/crates/blockifier/src/transaction/objects_test.rs index dec02a0f6e8..4a0c7382d1a 100644 --- a/crates/blockifier/src/transaction/objects_test.rs +++ b/crates/blockifier/src/transaction/objects_test.rs @@ -1,5 +1,4 @@ -use std::collections::HashMap; - +use cairo_vm::types::builtin_name::BuiltinName; use rstest::rstest; use starknet_api::core::{ClassHash, ContractAddress, EthAddress}; use starknet_api::execution_resources::GasAmount; @@ -10,6 +9,7 @@ use starknet_types_core::felt::Felt; use crate::blockifier_versioned_constants::VersionedConstants; use crate::execution::call_info::{ + BuiltinCounterMap, CallExecution, CallInfo, ChargedResources, @@ -31,6 +31,8 @@ pub struct TestExecutionSummary { pub class_hash: ClassHash, pub storage_address: ContractAddress, pub storage_key: StorageKey, + pub builtin_counters: BuiltinCounterMap, + pub inner_builtin_counters: BuiltinCounterMap, } impl TestExecutionSummary { @@ -49,9 +51,19 @@ impl TestExecutionSummary { class_hash, storage_address: contract_address!(storage_address), storage_key: storage_key!(storage_key), + builtin_counters: BuiltinCounterMap::new(), + inner_builtin_counters: BuiltinCounterMap::new(), } } + pub fn update_builtin_counters(&mut self, builtin_counters: &BuiltinCounterMap) { + self.builtin_counters.extend(builtin_counters); + } + + pub fn update_inner_builtin_counters(&mut self, inner_builtin_counters: &BuiltinCounterMap) { + self.inner_builtin_counters.extend(inner_builtin_counters); + } + pub fn to_call_info(&self) -> CallInfo { CallInfo { call: CallEntryPoint { @@ -77,6 +89,8 @@ impl TestExecutionSummary { accessed_storage_keys: vec![self.storage_key].into_iter().collect(), ..Default::default() }, + builtin_counters: self.builtin_counters.clone(), + inner_calls: vec![inner_call_info(&self.inner_builtin_counters)], ..Default::default() } } @@ -89,6 +103,14 @@ fn shared_call_info() -> CallInfo { } } +fn inner_call_info(builtin_counters: &BuiltinCounterMap) -> CallInfo { + CallInfo { + call: CallEntryPoint { class_hash: Some(class_hash!("0x1")), ..Default::default() }, + builtin_counters: builtin_counters.clone(), + ..Default::default() + } +} + fn call_info_with_x_events(n_events: usize, n_inner_calls: usize) -> CallInfo { CallInfo { execution: CallExecution { @@ -180,17 +202,47 @@ fn test_events_counter_in_tx_execution_info_with_inner_call_info(#[case] n_execu ); } +// This function gets a set of builtins for the outer and inner calls, updates the +// param builtin counter and returns the expected values for the summary test. +fn update_builtin_counters_for_summary_test( + params: &mut TestExecutionSummary, + outer_poseidon: usize, + outer_bitwise: usize, + inner_pedersen: usize, + inner_bitwise: usize, +) -> (usize, usize, usize) { + params.update_builtin_counters(&BuiltinCounterMap::from_iter([ + (BuiltinName::poseidon, outer_poseidon), + (BuiltinName::bitwise, outer_bitwise), + ])); + + params.update_inner_builtin_counters(&BuiltinCounterMap::from_iter([ + (BuiltinName::pedersen, inner_pedersen), + (BuiltinName::bitwise, inner_bitwise), + ])); + (outer_poseidon, inner_pedersen, outer_bitwise + inner_bitwise) +} + #[rstest] #[case( - TestExecutionSummary::new(10, 1, 2, class_hash!("0x1"), "0x1", "0x1"), - TestExecutionSummary::new(20, 2, 3, class_hash!("0x2"), "0x2", "0x2"), - TestExecutionSummary::new(30, 3, 4, class_hash!("0x3"), "0x3", "0x3") + &mut TestExecutionSummary::new(10, 1, 2, class_hash!("0x1"), "0x1", "0x1"), + &mut TestExecutionSummary::new(20, 2, 3, class_hash!("0x2"), "0x2", "0x2"), + &mut TestExecutionSummary::new(30, 3, 4, class_hash!("0x3"), "0x3", "0x3") )] fn test_summarize( - #[case] validate_params: TestExecutionSummary, - #[case] execute_params: TestExecutionSummary, - #[case] fee_transfer_params: TestExecutionSummary, + #[case] validate_params: &mut TestExecutionSummary, + #[case] execute_params: &mut TestExecutionSummary, + #[case] fee_transfer_params: &mut TestExecutionSummary, ) { + let (validate_poseidon, validate_pedersen, validate_bitwise) = + update_builtin_counters_for_summary_test(validate_params, 1, 5, 2, 6); + + let (execute_poseidon, execute_pedersen, execute_bitwise) = + update_builtin_counters_for_summary_test(execute_params, 1, 4, 2, 1); + + let (_fee_transfer_poseidon, _fee_transfer_pedersen, _fee_transfer_bitwise) = + update_builtin_counters_for_summary_test(fee_transfer_params, 1, 2, 3, 4); + let validate_call_info = validate_params.to_call_info(); let execute_call_info = execute_params.to_call_info(); let fee_transfer_call_info = fee_transfer_params.to_call_info(); @@ -236,13 +288,20 @@ fn test_summarize( total_event_keys: 0, total_event_data_size: 0, }, - // TODO(Meshi): Change it to a relevant value for this test. - builtin_counters: HashMap::new(), }; + // Omit the fee transfer builtin counters as done in `summarize_builtins`. + let expected_builtins = BuiltinCounterMap::from_iter([ + (BuiltinName::pedersen, validate_pedersen + execute_pedersen), + (BuiltinName::poseidon, validate_poseidon + execute_poseidon), + (BuiltinName::bitwise, validate_bitwise + execute_bitwise), + ]); + // Call the summarize method. let actual_summary = tx_execution_info.summarize(VersionedConstants::latest_constants()); + let actual_builtins = tx_execution_info.summarize_builtins(); // Compare the actual result with the expected result. assert_eq!(actual_summary, expected_summary); + assert_eq!(actual_builtins, expected_builtins); } diff --git a/crates/blockifier/src/transaction/transaction_execution.rs b/crates/blockifier/src/transaction/transaction_execution.rs index b67b0445d38..d8341b772e0 100644 --- a/crates/blockifier/src/transaction/transaction_execution.rs +++ b/crates/blockifier/src/transaction/transaction_execution.rs @@ -147,6 +147,7 @@ impl ExecutableTransaction for Transaction { // Check if the transaction is too large to fit any block. // TODO(Yoni, 1/8/2024): consider caching these two. let tx_execution_summary = tx_execution_info.summarize(&block_context.versioned_constants); + let tx_builtin_counters = tx_execution_info.summarize_builtins(); let mut tx_state_changes_keys = state.to_state_diff()?.state_maps.keys(); tx_state_changes_keys.update_sequencer_key_in_storage( &block_context.to_tx_context(self), @@ -156,6 +157,7 @@ impl ExecutableTransaction for Transaction { verify_tx_weights_within_max_capacity( state, &tx_execution_summary, + &tx_builtin_counters, &tx_execution_info.receipt.resources, &tx_state_changes_keys, &block_context.bouncer_config, diff --git a/crates/native_blockifier/src/py_block_executor.rs b/crates/native_blockifier/src/py_block_executor.rs index 2695ad2bc2f..e03837e8857 100644 --- a/crates/native_blockifier/src/py_block_executor.rs +++ b/crates/native_blockifier/src/py_block_executor.rs @@ -352,7 +352,6 @@ impl PyBlockExecutor { state_diff_size: max_state_diff_size, ..BouncerWeights::max() }, - // TODO(Meshi): Check what should be the values here. ..BouncerConfig::max() }, tx_executor_config: TransactionExecutorConfig { diff --git a/crates/native_blockifier/src/py_objects.rs b/crates/native_blockifier/src/py_objects.rs index d2cecf1ebb5..a5a809c5ad9 100644 --- a/crates/native_blockifier/src/py_objects.rs +++ b/crates/native_blockifier/src/py_objects.rs @@ -168,7 +168,6 @@ fn hash_map_into_bouncer_weights( }) } -#[allow(clippy::result_large_err)] fn hash_map_into_builtin_weights( mut data: HashMap, ) -> NativeBlockifierResult { diff --git a/crates/papyrus_base_layer/src/eth_events.rs b/crates/papyrus_base_layer/src/eth_events.rs index e0c9ee869a7..83cc1c33436 100644 --- a/crates/papyrus_base_layer/src/eth_events.rs +++ b/crates/papyrus_base_layer/src/eth_events.rs @@ -32,7 +32,9 @@ pub fn parse_event(log: Log, block_timestamp: BlockTimestamp) -> EthereumBaseLay Ok(L1Event::LogMessageToL2 { tx, fee, l1_tx_hash, timestamp: block_timestamp }) } Starknet::StarknetEvents::ConsumedMessageToL2(event) => { - Ok(L1Event::ConsumedMessageToL2(event.try_into()?)) + let event_data = EventData::try_from(event)?; + let tx = L1HandlerTransaction::from(event_data); + Ok(L1Event::ConsumedMessageToL2(tx)) } Starknet::StarknetEvents::MessageToL2Canceled(event) => { Ok(L1Event::MessageToL2Canceled(event.try_into()?)) diff --git a/crates/papyrus_base_layer/src/lib.rs b/crates/papyrus_base_layer/src/lib.rs index ee5595c8c33..0af9c644ce0 100644 --- a/crates/papyrus_base_layer/src/lib.rs +++ b/crates/papyrus_base_layer/src/lib.rs @@ -102,7 +102,7 @@ pub struct L1BlockHeader { /// Wraps Starknet L1 events with Starknet API types. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub enum L1Event { - ConsumedMessageToL2(EventData), + ConsumedMessageToL2(L1HandlerTransaction), // TODO(Arni): Consider adding the l1_tx_hash to all variants of L1 Event. LogMessageToL2 { tx: L1HandlerTransaction, diff --git a/crates/papyrus_node/src/run_test.rs b/crates/papyrus_node/src/run_test.rs index 03d0472c231..9ca8ced72a6 100644 --- a/crates/papyrus_node/src/run_test.rs +++ b/crates/papyrus_node/src/run_test.rs @@ -14,20 +14,24 @@ use crate::run::{ }; // The mission of this test is to ensure that if an error is returned from one of the spawned tasks, -// the node will stop, and this error will be returned. This is done by checking the case of an -// illegal central URL, which will cause the sync task to return an error. +// the node will stop, and this error will be returned. This is done by checking the case of a +// network handler that returns an error, which will cause the sync task to return an error. #[tokio::test] async fn run_threads_stop() { let mut config = NodeConfig::default(); let temp_dir = TempDir::new().unwrap(); config.storage.db_config.path_prefix = temp_dir.path().into(); - // Error when not supplying legal central URL. - config.central.starknet_url = "_not_legal_url".to_string(); let resources = PapyrusResources::new(&config).unwrap(); - let tasks = PapyrusTaskHandles::default(); + let tasks = PapyrusTaskHandles { + network_handle: Some(tokio::task::spawn(async { + tokio::time::sleep(Duration::from_secs(1)).await; + Err(anyhow::Error::msg("Network task stopped")) + })), + ..Default::default() + }; let error = run_threads(config, resources, tasks).await.expect_err("Should be an error."); - assert_eq!("relative URL without a base", error.to_string()); + assert_eq!("Network task stopped", error.to_string()); } // TODO(dvir): use here metrics names from the storage instead of hard-coded ones. This will be done diff --git a/crates/starknet_api/src/block.rs b/crates/starknet_api/src/block.rs index 1a1fc906882..9e082d6845c 100644 --- a/crates/starknet_api/src/block.rs +++ b/crates/starknet_api/src/block.rs @@ -352,19 +352,28 @@ pub struct GasPricePerToken { pub struct GasPrice(pub u128); impl GasPrice { - pub fn wei_to_fri(self, eth_to_fri_rate: u128) -> GasPrice { + pub fn wei_to_fri(self, eth_to_fri_rate: u128) -> Result { // We use integer division since wei * eth_to_fri_rate is expected to be high enough to not // cause too much precision loss. - self.checked_mul_u128(eth_to_fri_rate) - .expect("Gas price is too high.") + Ok(self + .checked_mul_u128(eth_to_fri_rate) + .ok_or_else(|| { + StarknetApiError::GasPriceConversionError("Gas price is too high".to_string()) + })? .checked_div(WEI_PER_ETH) - .expect("ETH to FRI rate must be non-zero") + .expect("WEI_PER_ETH must be non-zero")) } - pub fn fri_to_wei(self, eth_to_fri_rate: u128) -> GasPrice { + pub fn fri_to_wei(self, eth_to_fri_rate: u128) -> Result { self.checked_mul_u128(WEI_PER_ETH) - .expect("Gas price is too high") + .ok_or_else(|| { + StarknetApiError::GasPriceConversionError("Gas price is too high".to_string()) + })? .checked_div(eth_to_fri_rate) - .expect("FRI to ETH rate must be non-zero") + .ok_or_else(|| { + StarknetApiError::GasPriceConversionError( + "FRI to ETH rate must be non-zero".to_string(), + ) + }) } } diff --git a/crates/starknet_api/src/executable_transaction.rs b/crates/starknet_api/src/executable_transaction.rs index e8edeed851a..04e25c52451 100644 --- a/crates/starknet_api/src/executable_transaction.rs +++ b/crates/starknet_api/src/executable_transaction.rs @@ -205,7 +205,7 @@ impl DeclareTransaction { pub fn is_bootstrap_declare(&self, charge_fee: bool) -> bool { if let crate::transaction::DeclareTransaction::V3(tx) = &self.tx { return tx.sender_address == Self::bootstrap_address() - && tx.nonce == Nonce::default() + && tx.nonce == Nonce(Felt::ZERO) && !charge_fee; } false diff --git a/crates/starknet_api/src/lib.rs b/crates/starknet_api/src/lib.rs index 2471acbe6df..69b5685457c 100644 --- a/crates/starknet_api/src/lib.rs +++ b/crates/starknet_api/src/lib.rs @@ -58,6 +58,8 @@ pub enum StarknetApiError { InvalidStarknetVersion(Vec), #[error("NonzeroGasPrice cannot be zero.")] ZeroGasPrice, + #[error("Gas price conversion error: {0}")] + GasPriceConversionError(String), #[error( "Sierra program length must be > 0 for Cairo1, and == 0 for Cairo0. Got: \ {sierra_program_length:?} for contract class version {contract_class_version:?}" diff --git a/crates/starknet_committer_and_os_cli/Cargo.toml b/crates/starknet_committer_and_os_cli/Cargo.toml index f9d71211989..bb13b86ff90 100644 --- a/crates/starknet_committer_and_os_cli/Cargo.toml +++ b/crates/starknet_committer_and_os_cli/Cargo.toml @@ -22,15 +22,12 @@ tempfile.workspace = true # it exists. apollo_starknet_os_program = { workspace = true, features = ["dump_source_files", "test_programs"] } blake2s.workspace = true +blockifier.workspace = true cairo-lang-starknet-classes.workspace = true cairo-vm = { workspace = true, features = [ "cairo-0-data-availability-hints", "cairo-0-secp-hints", ] } -# Should be moved under `testing` feature, when it exists. -num-bigint = { workspace = true, features = ["rand"] } -# Should be moved under `testing` feature, when it exists. -num-integer.workspace = true clap = { workspace = true, features = ["cargo", "derive"] } derive_more.workspace = true ethnum.workspace = true diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/commands.rs b/crates/starknet_committer_and_os_cli/src/os_cli/commands.rs index 05d1b252d18..3760b8143a3 100644 --- a/crates/starknet_committer_and_os_cli/src/os_cli/commands.rs +++ b/crates/starknet_committer_and_os_cli/src/os_cli/commands.rs @@ -85,11 +85,11 @@ pub(crate) fn parse_and_run_os(input_path: String, output_path: String) { let OsCliInput { layout, os_hints, cairo_pie_zip_path } = load_input(input_path); validate_os_input(&os_hints.os_input); - let StarknetOsRunnerOutput { os_output, cairo_pie, unused_hints } = + let StarknetOsRunnerOutput { cairo_pie, da_segment, metrics, unused_hints, .. } = run_os_stateless(layout, os_hints) .unwrap_or_else(|err| panic!("OS run failed. Error: {err}")); serialize_runner_output( - &OsCliOutput { os_output, unused_hints }, + &OsCliOutput { da_segment, metrics: metrics.into(), unused_hints }, output_path, &cairo_pie, cairo_pie_zip_path, @@ -102,11 +102,11 @@ pub(crate) fn parse_and_run_aggregator(input_path: String, output_path: String) load_input(input_path); // TODO(Aner): Validate the aggregator input. - let StarknetAggregatorRunnerOutput { aggregator_output, cairo_pie, unused_hints } = + let StarknetAggregatorRunnerOutput { cairo_pie, unused_hints, .. } = run_aggregator(layout, aggregator_input) .unwrap_or_else(|err| panic!("Aggregator run failed. Error: {err}")); serialize_runner_output( - &AggregatorCliOutput { aggregator_output, unused_hints }, + &AggregatorCliOutput { unused_hints }, output_path, &cairo_pie, cairo_pie_zip_path, diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/run_os_cli.rs b/crates/starknet_committer_and_os_cli/src/os_cli/run_os_cli.rs index 52aeba43076..228b009f967 100644 --- a/crates/starknet_committer_and_os_cli/src/os_cli/run_os_cli.rs +++ b/crates/starknet_committer_and_os_cli/src/os_cli/run_os_cli.rs @@ -1,8 +1,12 @@ use std::collections::HashSet; +use blockifier::execution::syscalls::vm_syscall_utils::SyscallUsageMap; +use cairo_vm::types::relocatable::MaybeRelocatable; +use cairo_vm::vm::runners::cairo_runner::ExecutionResources; use clap::{Parser, Subcommand}; use serde::Serialize; use starknet_os::hints::enum_definition::AllHints; +use starknet_os::metrics::OsMetrics; use starknet_types_core::felt::Felt; use tracing::info; use tracing::level_filters::LevelFilter; @@ -86,14 +90,60 @@ pub async fn run_os_cli( } } +/// Intermediate metrics struct to properly serialize to a python-deserializable format. +#[derive(Serialize)] +pub struct OsCliRunInfo { + // Represent `MaybeRelocatable` values as `Vec` for serialization. + pub pc: Vec, + pub ap: Vec, + pub fp: Vec, + pub used_memory_cells: usize, +} + +/// Intermediate metrics struct to properly serialize to a python-deserializable format. +#[derive(Serialize)] +pub(crate) struct OsCliMetrics { + pub syscall_usages: Vec, + pub deprecated_syscall_usages: Vec, + pub run_info: OsCliRunInfo, + pub execution_resources: ExecutionResources, +} + +fn maybe_relocatable_to_vec(maybe_relocatable: &MaybeRelocatable) -> Vec { + match maybe_relocatable { + MaybeRelocatable::RelocatableValue(relocatable) => { + vec![relocatable.segment_index.into(), relocatable.offset.into()] + } + MaybeRelocatable::Int(int_value) => { + vec![*int_value] + } + } +} + +impl From for OsCliMetrics { + fn from(metrics: OsMetrics) -> Self { + Self { + syscall_usages: metrics.syscall_usages, + deprecated_syscall_usages: metrics.deprecated_syscall_usages, + run_info: OsCliRunInfo { + pc: maybe_relocatable_to_vec(&metrics.run_info.pc), + ap: maybe_relocatable_to_vec(&metrics.run_info.ap), + fp: maybe_relocatable_to_vec(&metrics.run_info.fp), + used_memory_cells: metrics.run_info.used_memory_cells, + }, + execution_resources: metrics.execution_resources, + } + } +} + #[derive(Serialize)] pub(crate) struct OsCliOutput { - pub(crate) os_output: Vec, + pub(crate) da_segment: Option>, + pub(crate) metrics: OsCliMetrics, pub unused_hints: HashSet, } #[derive(Serialize)] pub(crate) struct AggregatorCliOutput { - pub(crate) aggregator_output: Vec, pub unused_hints: HashSet, } diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/tests.rs b/crates/starknet_committer_and_os_cli/src/os_cli/tests.rs index c84899a8a06..4ed9c7728af 100644 --- a/crates/starknet_committer_and_os_cli/src/os_cli/tests.rs +++ b/crates/starknet_committer_and_os_cli/src/os_cli/tests.rs @@ -1,5 +1,2 @@ -pub mod aliases; -pub mod bls_field; pub mod python_tests; pub mod types; -pub mod utils; diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/tests/aliases.rs b/crates/starknet_committer_and_os_cli/src/os_cli/tests/aliases.rs deleted file mode 100644 index efa8527f0dd..00000000000 --- a/crates/starknet_committer_and_os_cli/src/os_cli/tests/aliases.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::collections::HashMap; - -use starknet_os::test_utils::cairo_runner::EntryPointRunnerConfig; -use tracing::info; - -use crate::os_cli::tests::types::OsPythonTestResult; -use crate::os_cli::tests::utils::test_cairo_function; - -// TODO(Amos): This test is incomplete. Add the rest of the test cases and remove this todo. -pub(crate) fn aliases_test(input: &str) -> OsPythonTestResult { - info!("Testing `test_constants`..."); - test_constants(input)?; - Ok("".to_string()) -} - -fn test_constants(input: &str) -> OsPythonTestResult { - let max_non_compressed_contract_address = 15; - let alias_counter_storage_key = 0; - let initial_available_alias = 128; - let alias_contract_address = 2; - test_cairo_function( - &EntryPointRunnerConfig::default(), - input, - "test_constants", - &[ - max_non_compressed_contract_address.into(), - alias_counter_storage_key.into(), - initial_available_alias.into(), - alias_contract_address.into(), - ], - &[], - &[], - &[], - HashMap::new(), - ) -} diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/tests/python_tests.rs b/crates/starknet_committer_and_os_cli/src/os_cli/tests/python_tests.rs index ab302874af4..d0a2f45b894 100644 --- a/crates/starknet_committer_and_os_cli/src/os_cli/tests/python_tests.rs +++ b/crates/starknet_committer_and_os_cli/src/os_cli/tests/python_tests.rs @@ -2,17 +2,14 @@ use blake2s::encode_felts_to_u32s; use starknet_os::test_utils::errors::OsSpecificTestError; use starknet_types_core::felt::Felt; -use crate::os_cli::commands::{validate_os_input, OsCliInput}; -use crate::os_cli::tests::aliases::aliases_test; -use crate::os_cli::tests::bls_field::test_bls_field; +use crate::os_cli::commands::{validate_os_input, AggregatorCliInput, OsCliInput}; use crate::os_cli::tests::types::{OsPythonTestError, OsPythonTestResult}; use crate::shared_utils::types::{PythonTestError, PythonTestRunner}; // Enum representing different Python tests. pub enum OsPythonTestRunner { - AliasesTest, - BlsFieldTest, - InputDeserialization, + AggregatorInputDeserialization, + OsInputDeserialization, EncodeFelts, } @@ -22,9 +19,8 @@ impl TryFrom for OsPythonTestRunner { fn try_from(value: String) -> Result { match value.as_str() { - "aliases_test" => Ok(Self::AliasesTest), - "bls_field_test" => Ok(Self::BlsFieldTest), - "input_deserialization" => Ok(Self::InputDeserialization), + "aggregator_input_deserialization" => Ok(Self::AggregatorInputDeserialization), + "os_input_deserialization" => Ok(Self::OsInputDeserialization), "encode_felts" => Ok(Self::EncodeFelts), _ => Err(PythonTestError::UnknownTestName(value)), } @@ -35,9 +31,12 @@ impl PythonTestRunner for OsPythonTestRunner { type SpecificError = OsSpecificTestError; async fn run(&self, input: Option<&str>) -> OsPythonTestResult { match self { - Self::AliasesTest => aliases_test(Self::non_optional_input(input)?), - Self::BlsFieldTest => test_bls_field(Self::non_optional_input(input)?), - Self::InputDeserialization => input_deserialization(Self::non_optional_input(input)?), + Self::AggregatorInputDeserialization => { + aggregator_input_deserialization(Self::non_optional_input(input)?) + } + Self::OsInputDeserialization => { + os_input_deserialization(Self::non_optional_input(input)?) + } Self::EncodeFelts => { let felts: Vec = serde_json::from_str(Self::non_optional_input(input)?)?; Ok(format!("{:?}", encode_felts_to_u32s(felts))) @@ -46,9 +45,16 @@ impl PythonTestRunner for OsPythonTestRunner { } } -/// Deserialize the input string into an `Input` struct. -fn input_deserialization(input_str: &str) -> OsPythonTestResult { +/// Deserialize the OS input string into an `OsInput` struct. +fn os_input_deserialization(input_str: &str) -> OsPythonTestResult { let input = serde_json::from_str::(input_str)?; validate_os_input(&input.os_hints.os_input); Ok("Deserialization successful".to_string()) } + +/// Deserialize the aggregator input string into an `AggregatorInput` struct. +fn aggregator_input_deserialization(input_str: &str) -> OsPythonTestResult { + let _input = serde_json::from_str::(input_str)?; + // TODO(Aner): Validate the aggregator input. + Ok("Deserialization successful".to_string()) +} diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/tests/utils.rs b/crates/starknet_committer_and_os_cli/src/os_cli/tests/utils.rs deleted file mode 100644 index 09abb8a838e..00000000000 --- a/crates/starknet_committer_and_os_cli/src/os_cli/tests/utils.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::any::Any; -use std::collections::HashMap; -use std::sync::LazyLock; - -use ethnum::U256; -use num_bigint::{BigInt, Sign}; -use rand::rngs::StdRng; -use rand::SeedableRng; -use starknet_os::hints::hint_implementation::kzg::utils::BASE; -use starknet_os::test_utils::cairo_runner::{EndpointArg, EntryPointRunnerConfig, ImplicitArg}; -use starknet_os::test_utils::errors::OsSpecificTestError; -use starknet_os::test_utils::utils::run_cairo_function_and_check_result; -use starknet_types_core::felt::Felt; - -use crate::os_cli::tests::types::OsPythonTestResult; -use crate::shared_utils::types::PythonTestError; - -// 2**251 + 17 * 2**192 + 1 -pub static DEFAULT_PRIME: LazyLock = LazyLock::new(|| { - BigInt::from_bytes_be( - Sign::Plus, - &(U256::from(2_u32).pow(251) + 17 * U256::from(2_u32).pow(192) + 1).to_be_bytes(), - ) -}); - -#[allow(clippy::too_many_arguments)] -pub(crate) fn test_cairo_function( - runner_config: &EntryPointRunnerConfig, - program_str: &str, - function_name: &str, - explicit_args: &[EndpointArg], - implicit_args: &[ImplicitArg], - expected_explicit_retdata: &[EndpointArg], - expected_implicit_retdata: &[EndpointArg], - hint_locals: HashMap>, -) -> OsPythonTestResult { - run_cairo_function_and_check_result( - runner_config, - program_str, - function_name, - explicit_args, - implicit_args, - expected_explicit_retdata, - expected_implicit_retdata, - hint_locals, - ) - .map_err(|error| { - PythonTestError::SpecificError(OsSpecificTestError::Cairo0EntryPointRunner(error)) - })?; - Ok("".to_string()) -} - -pub(crate) fn seeded_random_prng() -> StdRng { - StdRng::seed_from_u64(42) -} - -/// Returns the lift of the given field element, val, as a `BigInt` in the range -/// (-prime/2, prime/2). -// TODO(Amos): Use cairo VM version if it is made public: -// https://github.com/lambdaclass/cairo-vm/blob/052e7cef977b336305c869fccbf24e1794b116ff/vm/src/hint_processor/builtin_hint_processor/kzg_da/mod.rs#L90 -fn as_int(val: &Felt, prime: &BigInt) -> BigInt { - let val = val.to_bigint(); - if val < (prime / BigInt::from(2)) { - return val.clone(); - } - val - prime -} - -/// Takes a BigInt3 struct represented by the limbs (d0, d1, d2) of -/// and reconstructs the corresponding integer (see split_bigint3()). -/// Note that the limbs do not have to be in the range [0, BASE). -/// Prime is used to handle negative values of the limbs. -// TODO(Amos): Use cairo VM version if it is made public: -// https://github.com/lambdaclass/cairo-vm/blob/052e7cef977b336305c869fccbf24e1794b116ff/vm/src/hint_processor/builtin_hint_processor/kzg_da/mod.rs#L99 -pub fn pack_bigint3(limbs: &[Felt]) -> BigInt { - assert!(limbs.len() == 3, "Expected 3 limbs, got {}", limbs.len()); - limbs.iter().enumerate().fold(BigInt::ZERO, |acc, (i, &limb)| { - acc + as_int(&limb, &DEFAULT_PRIME) * BASE.pow(i.try_into().unwrap()) - }) -} diff --git a/crates/starknet_os/Cargo.toml b/crates/starknet_os/Cargo.toml index 1519d492f38..286b72dac04 100644 --- a/crates/starknet_os/Cargo.toml +++ b/crates/starknet_os/Cargo.toml @@ -13,6 +13,7 @@ deserialize = [ "starknet-types-core/serde", "starknet_patricia/deserialize", ] +include_program_output = [] testing = ["blockifier/testing", "starknet_patricia/testing"] [dependencies] @@ -37,6 +38,7 @@ indexmap.workspace = true indoc.workspace = true log.workspace = true num-bigint.workspace = true +num-integer.workspace = true num-traits.workspace = true papyrus_common.workspace = true paste.workspace = true @@ -53,12 +55,14 @@ strum_macros.workspace = true thiserror.workspace = true [dev-dependencies] +apollo_starknet_os_program = { workspace = true, features = ["test_programs"] } assert_matches.workspace = true blockifier = { workspace = true, features = ["testing"] } blockifier_test_utils.workspace = true -num-integer.workspace = true +ethnum.workspace = true rand.workspace = true rstest.workspace = true +starknet_committer.workspace = true starknet_patricia = { workspace = true, features = ["testing"] } [lints] diff --git a/crates/starknet_os/src/constants_test.rs b/crates/starknet_os/src/constants_test.rs new file mode 100644 index 00000000000..b831813f939 --- /dev/null +++ b/crates/starknet_os/src/constants_test.rs @@ -0,0 +1,64 @@ +use apollo_starknet_os_program::OS_PROGRAM; +use blockifier::abi::constants::{L1_TO_L2_MSG_HEADER_SIZE, L2_TO_L1_MSG_HEADER_SIZE}; +use cairo_vm::types::program::Program; +use starknet_api::core::L2_ADDRESS_UPPER_BOUND; +use starknet_committer::hash_function::hash::TreeHashFunctionImpl; +use starknet_types_core::felt::Felt; + +use crate::hints::hint_implementation::kzg::utils::FIELD_ELEMENTS_PER_BLOB; +use crate::hints::vars::CairoStruct; +use crate::vm_utils::get_size_of_cairo_struct; + +fn get_from_program(program: &Program, const_path: &str) -> Felt { + program + .constants + .get(const_path) + .cloned() + .unwrap_or_else(|| panic!("Constant {const_path} not found in the program.")) +} + +#[test] +fn test_l2_address_bound() { + assert_eq!( + get_from_program(&OS_PROGRAM, "starkware.starknet.common.storage.ADDR_BOUND"), + (*L2_ADDRESS_UPPER_BOUND).into() + ); +} + +#[test] +fn test_blob_constants() { + assert_eq!( + get_from_program( + &OS_PROGRAM, + "starkware.starknet.core.os.data_availability.commitment.BLOB_LENGTH" + ), + FIELD_ELEMENTS_PER_BLOB.into() + ); +} + +#[test] +fn test_contract_class_hash_version() { + assert_eq!( + get_from_program( + &OS_PROGRAM, + "starkware.starknet.core.os.state.commitment.CONTRACT_CLASS_LEAF_VERSION" + ), + Felt::from_hex(TreeHashFunctionImpl::CONTRACT_CLASS_LEAF_V0).unwrap() + ); +} + +#[test] +fn test_l1_to_l2_message_header_size() { + assert_eq!( + get_size_of_cairo_struct(CairoStruct::L1ToL2MessageHeader, &*OS_PROGRAM).unwrap(), + L1_TO_L2_MSG_HEADER_SIZE + ); +} + +#[test] +fn test_l2_to_l1_message_header_size() { + assert_eq!( + get_size_of_cairo_struct(CairoStruct::L2ToL1MessageHeader, &*OS_PROGRAM).unwrap(), + L2_TO_L1_MSG_HEADER_SIZE + ); +} diff --git a/crates/starknet_os/src/errors.rs b/crates/starknet_os/src/errors.rs index 3d16f68a7c6..44aa2a9b1ce 100644 --- a/crates/starknet_os/src/errors.rs +++ b/crates/starknet_os/src/errors.rs @@ -4,6 +4,7 @@ use cairo_vm::vm::errors::vm_errors::VirtualMachineError; use cairo_vm::vm::errors::vm_exception::VmException; use crate::io::os_input::OsInputError; +use crate::io::os_output::OsOutputError; #[derive(Debug, thiserror::Error)] pub enum StarknetOsError { @@ -12,6 +13,8 @@ pub enum StarknetOsError { #[error(transparent)] OsInput(#[from] OsInputError), #[error(transparent)] + OsOutput(#[from] OsOutputError), + #[error(transparent)] RunnerError(#[from] RunnerError), #[error(transparent)] VmException(#[from] Box), diff --git a/crates/starknet_os/src/hint_processor/snos_hint_processor.rs b/crates/starknet_os/src/hint_processor/snos_hint_processor.rs index 23d82132c33..a9788b54684 100644 --- a/crates/starknet_os/src/hint_processor/snos_hint_processor.rs +++ b/crates/starknet_os/src/hint_processor/snos_hint_processor.rs @@ -48,6 +48,7 @@ use crate::hints::vars::CairoStruct; use crate::io::os_input::{ CachedStateInput, CommitmentInfo, + HintedClassHash, OsBlockInput, OsHintsConfig, OsInputError, @@ -104,6 +105,20 @@ impl<'a, S: StateReader> ExecutionHelpersManager<'a, S> { pub fn n_helpers(&self) -> usize { self.execution_helpers.len() } + + pub(crate) fn get_syscall_usages(&self) -> Vec { + self.execution_helpers + .iter() + .map(|helper| helper.syscall_hint_processor.syscall_usage.clone()) + .collect() + } + + pub(crate) fn get_deprecated_syscall_usages(&self) -> Vec { + self.execution_helpers + .iter() + .map(|helper| helper.deprecated_syscall_hint_processor.syscalls_usage.clone()) + .collect() + } } pub struct SnosHintProcessor<'a, S: StateReader> { @@ -111,7 +126,8 @@ pub struct SnosHintProcessor<'a, S: StateReader> { pub(crate) program: &'a Program, pub(crate) execution_helpers_manager: ExecutionHelpersManager<'a, S>, pub(crate) os_hints_config: OsHintsConfig, - pub(crate) deprecated_compiled_classes_iter: IntoIter, + pub(crate) deprecated_compiled_classes_iter: + IntoIter, pub(crate) deprecated_class_hashes: HashSet, pub(crate) compiled_classes: BTreeMap, pub(crate) state_update_pointers: Option, @@ -135,7 +151,7 @@ impl<'a, S: StateReader> SnosHintProcessor<'a, S> { os_hints_config: OsHintsConfig, os_block_inputs: Vec<&'a OsBlockInput>, cached_state_inputs: Vec, - deprecated_compiled_classes: BTreeMap, + deprecated_compiled_classes: BTreeMap, compiled_classes: BTreeMap, state_readers: Vec, ) -> Result { @@ -327,7 +343,7 @@ impl<'a> SnosHintProcessor<'a, DictStateReader> { let state_inputs = vec![os_state_input.unwrap_or_default()]; let os_hints_config = os_hints_config.unwrap_or_default(); - SnosHintProcessor::new( + let mut hint_processor = SnosHintProcessor::new( os_program, os_hints_config, block_inputs, @@ -335,7 +351,9 @@ impl<'a> SnosHintProcessor<'a, DictStateReader> { BTreeMap::new(), BTreeMap::new(), vec![state_reader], - ) + )?; + hint_processor.execution_helpers_manager.increment_current_helper_index(); + Ok(hint_processor) } } diff --git a/crates/starknet_os/src/hints/enum_definition_test.rs b/crates/starknet_os/src/hints/enum_definition_test.rs index 978d59d8c07..127e5498e26 100644 --- a/crates/starknet_os/src/hints/enum_definition_test.rs +++ b/crates/starknet_os/src/hints/enum_definition_test.rs @@ -11,13 +11,18 @@ use cairo_vm::hint_processor::builtin_hint_processor::hint_code::HINT_CODES; use cairo_vm::hint_processor::builtin_hint_processor::kzg_da::WRITE_DIVMOD_SEGMENT; use cairo_vm::hint_processor::builtin_hint_processor::secp::cairo0_hints::CAIRO0_HINT_CODES; use cairo_vm::types::program::Program; +use rstest::{fixture, rstest}; use starknet_api::deprecated_contract_class::ContractClass; use strum::IntoEnumIterator; use crate::hints::enum_definition::{ AggregatorHint, AllHints, + CommonHint, DeprecatedSyscallHint, + HintExtension, + OsHint, + StatelessHint, TEST_HINT_PREFIX, }; use crate::hints::types::HintEnum; @@ -74,14 +79,62 @@ fn unknown_hints_for_program(program: &Program) -> HashSet { .collect() } -#[test] +#[fixture] +fn vm_hints() -> HashSet { + VM_HINTS.iter().map(|s| s.to_string()).collect() +} + +#[fixture] +fn common_hints() -> HashSet { + CommonHint::iter().map(|hint| hint.to_str().to_string()).collect() +} + +#[fixture] +fn stateless_hints() -> HashSet { + StatelessHint::iter().map(|hint| hint.to_str().to_string()).collect() +} + +#[fixture] +fn os_hints() -> HashSet { + OsHint::iter().map(|hint| hint.to_str().to_string()).collect() +} + +#[fixture] +fn aggregator_hints() -> HashSet { + AggregatorHint::iter().map(|hint| hint.to_str().to_string()).collect() +} + +#[fixture] +fn hint_extension() -> HashSet { + HintExtension::iter().map(|hint| hint.to_str().to_string()).collect() +} + +#[fixture] +fn os_program_hints() -> HashSet { + program_hints(&OS_PROGRAM) +} + +#[fixture] +fn aggregator_program_hints() -> HashSet { + program_hints(&AGGREGATOR_PROGRAM) +} + +#[fixture] +fn vm_union_stateless( + vm_hints: HashSet, + stateless_hints: HashSet, +) -> HashSet { + vm_hints.union(&stateless_hints).cloned().collect() +} + +#[rstest] fn test_hint_strings_are_unique() { let all_hints = AllHints::all_iter().map(|hint| hint.to_str()).collect::>(); let all_hints_set: HashSet<&&str> = HashSet::from_iter(all_hints.iter()); assert_eq!(all_hints.len(), all_hints_set.len(), "Duplicate hint strings."); } -#[test] +#[rstest] fn test_from_str_for_all_hints() { for hint in AllHints::all_iter() { let hint_str = hint.to_str(); @@ -90,7 +143,7 @@ fn test_from_str_for_all_hints() { } } -#[test] +#[rstest] fn test_syscall_compatibility_with_blockifier() { let syscall_hint_strings = DeprecatedSyscallHint::iter().map(|hint| hint.to_str()).collect::>(); @@ -103,7 +156,7 @@ fn test_syscall_compatibility_with_blockifier() { ); } -#[test] +#[rstest] fn test_all_hints_are_known() { let unknown_os_hints = unknown_hints_for_program(&OS_PROGRAM); let unknown_aggregator_hints = unknown_hints_for_program(&AGGREGATOR_PROGRAM); @@ -118,11 +171,13 @@ fn test_all_hints_are_known() { /// Tests that we do not keep any hint including the TEST_HINT_PREFIX as a prefix in the OS or /// aggregator code. -#[test] -fn test_the_debug_hint_isnt_merged() { - let os_hints = program_hints(&OS_PROGRAM); - let aggregator_hints = program_hints(&AGGREGATOR_PROGRAM); - let all_program_hints: HashSet<&String> = os_hints.union(&aggregator_hints).collect(); +#[rstest] +fn test_the_debug_hint_isnt_merged( + os_program_hints: HashSet, + aggregator_program_hints: HashSet, +) { + let all_program_hints: HashSet<&String> = + os_program_hints.union(&aggregator_program_hints).collect(); let debug_hints: HashSet<_> = all_program_hints.iter().filter(|hint| hint.trim().starts_with(TEST_HINT_PREFIX)).collect(); @@ -134,11 +189,13 @@ fn test_the_debug_hint_isnt_merged() { ); } -#[test] -fn test_all_hints_are_used() { - let os_hints = program_hints(&OS_PROGRAM); - let aggregator_hints = program_hints(&AGGREGATOR_PROGRAM); - let all_program_hints: HashSet<&String> = os_hints.union(&aggregator_hints).collect(); +#[rstest] +fn test_all_hints_are_used( + os_program_hints: HashSet, + aggregator_program_hints: HashSet, +) { + let all_program_hints: HashSet<&String> = + os_program_hints.union(&aggregator_program_hints).collect(); let redundant_hints: HashSet<_> = AllHints::all_iter() .filter(|hint| { // Skip syscalls; they do not appear in the OS code. @@ -153,21 +210,9 @@ fn test_all_hints_are_used() { ); } -#[test] -fn test_no_aggregator_hints_in_os() { - let aggregator_hints = - AggregatorHint::iter().map(|hint| hint.to_str().to_owned()).collect::>(); - let os_program_hints = program_hints(&OS_PROGRAM); - let intersection = aggregator_hints.intersection(&os_program_hints).collect::>(); - assert!( - intersection.is_empty(), - "The following Aggregator hints are found in the OS program: {intersection:#?}." - ); -} - /// Tests that the set of deprecated syscall hints is consistent with the enum of deprecated /// syscalls. -#[test] +#[rstest] fn test_deprecated_syscall_hint_consistency() { let deprecated_syscall_hints: Vec = DeprecatedSyscallHint::iter().collect(); @@ -220,10 +265,124 @@ fn test_deprecated_syscall_hint_consistency() { ); } +#[rstest] +/// If OP = OS program hints, AP = aggregator program hints, VM = VM hints, +/// S = `StatelessHint`, C = `CommonHint`, then we verify that: +/// C = (OP ∩ AP) \ VM \ S +fn test_common_hints_in_both_os_and_aggregator_programs( + common_hints: HashSet, + vm_union_stateless: HashSet, + os_program_hints: HashSet, + aggregator_program_hints: HashSet, +) { + let common_program_hints: HashSet = os_program_hints + .intersection(&aggregator_program_hints) + .filter(|hint| !vm_union_stateless.contains(hint.as_str())) + .cloned() + .collect(); + + if common_program_hints != common_hints { + let missing_in_common_hints: HashSet<_> = + common_program_hints.difference(&common_hints).cloned().collect(); + let extra_in_common_hints: HashSet<_> = + common_hints.difference(&common_program_hints).cloned().collect(); + panic!( + "The Common hints should contain exactly the common program hints, excluding VM and \ + stateless hints. Missing in Common hints: {missing_in_common_hints:#?}, Extra in \ + Common hints: {extra_in_common_hints:#?}" + ); + } +} + +#[rstest] +/// If OP = OS program hints, AP = aggregator program hints, VM = VM hints, +/// S = `StatelessHint`, then we verify that: +/// S ⊆ (OP ∪ AP) \ VM +fn test_stateless_hints_in_os_or_aggregator_programs( + stateless_hints: HashSet, + os_program_hints: HashSet, + aggregator_program_hints: HashSet, + vm_hints: HashSet, +) { + let all_program_hints_excluding_vm: HashSet = os_program_hints + .union(&aggregator_program_hints) + .filter(|hint| !vm_hints.contains(hint.as_str())) + .cloned() + .collect(); + let difference = stateless_hints + .difference(&all_program_hints_excluding_vm) + .cloned() + .collect::>(); + + assert!( + difference.is_empty(), + "The following stateless hints are not present in the OS or Aggregator programs: \ + {difference:#?}." + ); +} + +#[rstest] +/// If A = `AggregatorHint` enum hints, OP = OS program hints, AP = aggregator program hints, VM = +/// VM hints, S = `StatelessHint`, then we verify that: +/// A = AP \ (VM ∪ OP ∪ S) +fn test_aggregator_hints_are_unique_aggregator_program_hints( + aggregator_hints: HashSet, + os_program_hints: HashSet, + aggregator_program_hints: HashSet, + vm_union_stateless: HashSet, +) { + let union_os_program_vm_stateless: HashSet = + vm_union_stateless.union(&os_program_hints).cloned().collect(); + let unique_aggregator_program_hints: HashSet = + aggregator_program_hints.difference(&union_os_program_vm_stateless).cloned().collect(); + + if unique_aggregator_program_hints != aggregator_hints { + let missing_in_aggregator_hints: HashSet<_> = + unique_aggregator_program_hints.difference(&aggregator_hints).cloned().collect(); + let extra_in_aggregator_hints: HashSet<_> = + aggregator_hints.difference(&unique_aggregator_program_hints).cloned().collect(); + panic!( + "The Aggregator hints should contain exactly the unique Aggregator program hints. \ + Missing in Aggregator hints: {missing_in_aggregator_hints:#?}, Extra in Aggregator \ + hints: {extra_in_aggregator_hints:#?}" + ); + } +} + +#[rstest] +/// If O = `OsHint` enum hints, OP = OS program hints, A = `AggregatorHint` enum hints, VM = VM +/// hints, S = `StatelessHint`, E = `ExtensionHint`, then we verify that: +/// O ∪ E = OP \ (AP ∪ VM ∪ S) +fn test_os_hints_are_unique_os_program_hints( + os_hints: HashSet, + hint_extension: HashSet, + os_program_hints: HashSet, + aggregator_program_hints: HashSet, + vm_union_stateless: HashSet, +) { + let union_aggregator_program_vm_stateless: HashSet = + vm_union_stateless.union(&aggregator_program_hints).cloned().collect(); + let os_union_extension: HashSet = os_hints.union(&hint_extension).cloned().collect(); + let unique_os_program_hints: HashSet = + os_program_hints.difference(&union_aggregator_program_vm_stateless).cloned().collect(); + + if unique_os_program_hints != os_union_extension { + let missing_in_os_or_extension: HashSet<_> = + unique_os_program_hints.difference(&os_union_extension).cloned().collect(); + let extra_in_os_or_extension: HashSet<_> = + os_union_extension.difference(&unique_os_program_hints).cloned().collect(); + panic!( + "The OS & Extension hints should contain exactly the unique OS program hints. Missing \ + in OS or Extension hints: {missing_in_os_or_extension:#?}, Extra in OS or Extension \ + hints: {extra_in_os_or_extension:#?}" + ); + } +} + /// Tests that the deprecated syscall hint strings match the strings in compiled Cairo0 contracts. /// If a new deprecated syscall was added, it should be added to the `other_syscalls` function of /// the Cairo0 test contract. -#[test] +#[rstest] fn test_deprecated_syscall_hint_strings() { let test_contract: ContractClass = serde_json::from_str(&FeatureContract::TestContract(CairoVersion::Cairo0).get_raw_class()) diff --git a/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/implementation.rs b/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/implementation.rs index 3a6cbd56c4b..5fc56e02dab 100644 --- a/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/implementation.rs +++ b/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/implementation.rs @@ -15,6 +15,7 @@ use starknet_api::deprecated_contract_class::ContractClass; use crate::hint_processor::snos_hint_processor::SnosHintProcessor; use crate::hints::error::{OsHintError, OsHintExtensionResult, OsHintResult}; +use crate::hints::hint_implementation::deprecated_compiled_class::utils::ContractClassWithHintedHash; use crate::hints::types::HintArgs; use crate::hints::vars::{CairoStruct, Ids, Scope}; use crate::vm_utils::{get_address_of_nested_fields, LoadCairoObject}; @@ -38,13 +39,23 @@ pub(crate) fn load_deprecated_class_inner( hint_processor: &mut SnosHintProcessor<'_, S>, HintArgs { vm, exec_scopes, ids_data, ap_tracking, constants }: HintArgs<'_>, ) -> OsHintResult { - let (class_hash, deprecated_class) = + let (class_hash, (hinted_class_hash, deprecated_class)) = hint_processor.deprecated_compiled_classes_iter.next().ok_or_else(|| { OsHintError::EndOfIterator { item_type: "deprecated_compiled_classes".to_string() } })?; let dep_class_base = vm.add_memory_segment(); - deprecated_class.load_into(vm, hint_processor.program, dep_class_base, constants)?; + let deprecated_class_with_hinted_hash = ContractClassWithHintedHash { + contract_class: &deprecated_class, + hinted_class_hash, + class_hash, + }; + deprecated_class_with_hinted_hash.load_into( + vm, + hint_processor.program, + dep_class_base, + constants, + )?; exec_scopes.insert_value(Scope::CompiledClassHash.into(), class_hash); exec_scopes.insert_value(Scope::CompiledClass.into(), deprecated_class); diff --git a/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/utils.rs b/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/utils.rs index 283afc01061..92a257bbf9c 100644 --- a/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/utils.rs +++ b/crates/starknet_os/src/hints/hint_implementation/deprecated_compiled_class/utils.rs @@ -4,6 +4,7 @@ use cairo_vm::serde::deserialize_program::deserialize_array_of_bigint_hex; use cairo_vm::types::relocatable::{MaybeRelocatable, Relocatable}; use cairo_vm::vm::vm_core::VirtualMachine; use starknet_api::contract_class::EntryPointType; +use starknet_api::core::ClassHash; use starknet_api::deprecated_contract_class::{ContractClass, EntryPointV0}; use starknet_types_core::felt::Felt; @@ -12,6 +13,7 @@ use crate::hints::class_hash::hinted_class_hash::{ CairoContractDefinition, }; use crate::hints::vars::{CairoStruct, Const}; +use crate::io::os_input::HintedClassHash; use crate::vm_utils::{ insert_values_to_fields, CairoSized, @@ -21,7 +23,13 @@ use crate::vm_utils::{ VmUtilsResult, }; -impl LoadCairoObject for ContractClass { +pub(crate) struct ContractClassWithHintedHash<'a> { + pub(crate) contract_class: &'a ContractClass, + pub(crate) hinted_class_hash: HintedClassHash, + pub(crate) class_hash: ClassHash, +} + +impl LoadCairoObject for ContractClassWithHintedHash<'_> { fn load_into( &self, vm: &mut VirtualMachine, @@ -29,16 +37,23 @@ impl LoadCairoObject for ContractClass { address: Relocatable, constants: &HashMap, ) -> VmUtilsResult<()> { + let ContractClassWithHintedHash { contract_class, hinted_class_hash, class_hash } = self; + // Insert compiled class version field. let compiled_class_version = Const::DeprecatedCompiledClassVersion.fetch(constants)?; // Insert external entry points. - let (externals_list_base, externals_len) = - insert_entry_points(self, vm, identifier_getter, constants, &EntryPointType::External)?; + let (externals_list_base, externals_len) = insert_entry_points( + contract_class, + vm, + identifier_getter, + constants, + &EntryPointType::External, + )?; // Insert l1 handler entry points. let (l1_handlers_list_base, l1_handlers_len) = insert_entry_points( - self, + contract_class, vm, identifier_getter, constants, @@ -47,7 +62,7 @@ impl LoadCairoObject for ContractClass { // Insert constructor entry points. let (constructors_list_base, constructors_len) = insert_entry_points( - self, + contract_class, vm, identifier_getter, constants, @@ -55,12 +70,10 @@ impl LoadCairoObject for ContractClass { )?; // Insert builtins. - let builtins: Vec = - serde_json::from_value(self.program.builtins.clone()).map_err(|e| { - VmUtilsError::SerdeJsonDeserialize { - error: e, - value: self.program.builtins.clone(), - } + let builtins: Vec = serde_json::from_value(contract_class.program.builtins.clone()) + .map_err(|e| VmUtilsError::SerdeJsonDeserialize { + error: e, + value: contract_class.program.builtins.clone(), })?; let builtins: Vec = builtins .into_iter() @@ -71,14 +84,20 @@ impl LoadCairoObject for ContractClass { vm.load_data(builtin_list_base, &builtins)?; // Insert hinted class hash. - let contract_definition_vec = serde_json::to_vec(&self)?; + let contract_definition_vec = serde_json::to_vec(&contract_class)?; let contract_definition: CairoContractDefinition<'_> = serde_json::from_slice(&contract_definition_vec).map_err(VmUtilsError::SerdeJson)?; - let hinted_class_hash = compute_cairo_hinted_class_hash(&contract_definition)?; + let computed_hinted_class_hash = compute_cairo_hinted_class_hash(&contract_definition)?; + if hinted_class_hash != &computed_hinted_class_hash { + log::warn!( + "Hinted class hash mismatch for class {class_hash}: expected {hinted_class_hash}, \ + computed {computed_hinted_class_hash}." + ); + } // Insert bytecode_ptr. - let bytecode_ptr = deserialize_array_of_bigint_hex(&self.program.data)?; + let bytecode_ptr = deserialize_array_of_bigint_hex(&contract_class.program.data)?; let bytecode_ptr_base = vm.add_memory_segment(); vm.load_data(bytecode_ptr_base, &bytecode_ptr)?; diff --git a/crates/starknet_os/src/hints/hint_implementation/stateless_compression/tests.rs b/crates/starknet_os/src/hints/hint_implementation/stateless_compression/tests.rs index 4e04e0d1b2f..722378a4eeb 100644 --- a/crates/starknet_os/src/hints/hint_implementation/stateless_compression/tests.rs +++ b/crates/starknet_os/src/hints/hint_implementation/stateless_compression/tests.rs @@ -1,20 +1,15 @@ -use std::cmp::min; use std::collections::HashSet; use assert_matches::assert_matches; use num_bigint::BigUint; -use num_integer::Integer; -use num_traits::ToPrimitive; use rstest::rstest; use starknet_types_core::felt::Felt; use super::utils::{ compress, - felt_from_bits_le, get_bucket_offsets, get_n_elms_per_felt, pack_usize_in_felts, - BitLength, BitsArray, BucketElement, BucketElement125, @@ -23,127 +18,15 @@ use super::utils::{ BucketElementTrait, Buckets, CompressionSet, - COMPRESSION_VERSION, - HEADER_ELM_BOUND, N_UNIQUE_BUCKETS, TOTAL_N_BUCKETS, }; use crate::hints::error::OsHintError; - -const HEADER_LEN: usize = 1 + 1 + TOTAL_N_BUCKETS; -// Utils - -pub fn unpack_felts( - compressed: &[Felt], - n_elms: usize, -) -> Vec> { - let n_elms_per_felt = BitLength::min_bit_length(LENGTH).unwrap().n_elems_in_felt(); - let mut result = Vec::with_capacity(n_elms); - - for felt in compressed { - let n_packed_elms = min(n_elms_per_felt, n_elms - result.len()); - for chunk in felt.to_bits_le()[0..n_packed_elms * LENGTH].chunks_exact(LENGTH) { - result.push(BitsArray(chunk.try_into().unwrap())); - } - } - - result -} - -pub fn unpack_felts_to_usize(compressed: &[Felt], n_elms: usize, elm_bound: u32) -> Vec { - let n_elms_per_felt = get_n_elms_per_felt(elm_bound); - let elm_bound_as_big = BigUint::from(elm_bound); - let mut result = Vec::with_capacity(n_elms); - - for felt in compressed { - let mut remaining = felt.to_biguint(); - let n_packed_elms = min(n_elms_per_felt, n_elms - result.len()); - for _ in 0..n_packed_elms { - let (new_remaining, value) = remaining.div_rem(&elm_bound_as_big); - result.push(value.to_usize().unwrap()); - remaining = new_remaining; - } - } - - result -} - -/// Decompresses the given compressed data. -pub fn decompress(compressed: &mut impl Iterator) -> Vec { - fn unpack_chunk( - compressed: &mut impl Iterator, - n_elms: usize, - ) -> Vec { - let n_elms_per_felt = BitLength::min_bit_length(LENGTH).unwrap().n_elems_in_felt(); - let n_packed_felts = n_elms.div_ceil(n_elms_per_felt); - let compressed_chunk: Vec<_> = compressed.take(n_packed_felts).collect(); - unpack_felts(&compressed_chunk, n_elms) - .into_iter() - .map(|bits: BitsArray| felt_from_bits_le(&bits.0).unwrap()) - .collect() - } - - fn unpack_chunk_to_usize( - compressed: &mut impl Iterator, - n_elms: usize, - elm_bound: u32, - ) -> Vec { - let n_elms_per_felt = get_n_elms_per_felt(elm_bound); - let n_packed_felts = n_elms.div_ceil(n_elms_per_felt); - - let compressed_chunk: Vec<_> = compressed.take(n_packed_felts).collect(); - unpack_felts_to_usize(&compressed_chunk, n_elms, elm_bound) - } - - let header = unpack_chunk_to_usize(compressed, HEADER_LEN, HEADER_ELM_BOUND); - let version = &header[0]; - assert!(version == &usize::from(COMPRESSION_VERSION), "Unsupported compression version."); - - let data_len = &header[1]; - let unique_value_bucket_lengths: Vec = header[2..2 + N_UNIQUE_BUCKETS].to_vec(); - let n_repeating_values = &header[2 + N_UNIQUE_BUCKETS]; - - let mut unique_values = Vec::new(); - unique_values.extend(compressed.take(unique_value_bucket_lengths[0])); // 252 bucket. - unique_values.extend(unpack_chunk::<125>(compressed, unique_value_bucket_lengths[1])); - unique_values.extend(unpack_chunk::<83>(compressed, unique_value_bucket_lengths[2])); - unique_values.extend(unpack_chunk::<62>(compressed, unique_value_bucket_lengths[3])); - unique_values.extend(unpack_chunk::<31>(compressed, unique_value_bucket_lengths[4])); - unique_values.extend(unpack_chunk::<15>(compressed, unique_value_bucket_lengths[5])); - - let repeating_value_pointers = unpack_chunk_to_usize( - compressed, - *n_repeating_values, - unique_values.len().try_into().unwrap(), - ); - - let repeating_values: Vec<_> = - repeating_value_pointers.iter().map(|ptr| unique_values[*ptr]).collect(); - - let mut all_values = unique_values; - all_values.extend(repeating_values); - - let bucket_index_per_elm: Vec = - unpack_chunk_to_usize(compressed, *data_len, TOTAL_N_BUCKETS.try_into().unwrap()); - - let all_bucket_lengths: Vec = - unique_value_bucket_lengths.into_iter().chain([*n_repeating_values]).collect(); - - let bucket_offsets = get_bucket_offsets(&all_bucket_lengths); - - let mut bucket_offset_trackers: Vec<_> = bucket_offsets; - - let mut result = Vec::new(); - for bucket_index in bucket_index_per_elm { - let offset = &mut bucket_offset_trackers[bucket_index]; - let value = all_values[*offset]; - *offset += 1; - result.push(value); - } - result -} - -// Tests +use crate::hints::hint_implementation::stateless_compression::utils::{ + decompress, + unpack_felts, + unpack_felts_to_usize, +}; #[rstest] #[case::zero([false; 10], Felt::ZERO)] diff --git a/crates/starknet_os/src/hints/hint_implementation/stateless_compression/utils.rs b/crates/starknet_os/src/hints/hint_implementation/stateless_compression/utils.rs index b7a3994fc13..bc6f34d5a29 100644 --- a/crates/starknet_os/src/hints/hint_implementation/stateless_compression/utils.rs +++ b/crates/starknet_os/src/hints/hint_implementation/stateless_compression/utils.rs @@ -1,9 +1,10 @@ use std::any::type_name; -use std::cmp::max; +use std::cmp::{max, min}; use std::hash::Hash; use indexmap::IndexMap; use num_bigint::BigUint; +use num_integer::Integer; use num_traits::{ToPrimitive, Zero}; use starknet_types_core::felt::Felt; use strum::EnumCount; @@ -20,6 +21,7 @@ pub(crate) const N_UNIQUE_BUCKETS: usize = BitLength::COUNT; pub(crate) const TOTAL_N_BUCKETS: usize = N_UNIQUE_BUCKETS + 1; pub(crate) const MAX_N_BITS: usize = 251; +const HEADER_LEN: usize = 1 + 1 + TOTAL_N_BUCKETS; #[derive(Debug, Display, strum_macros::EnumCount)] pub(crate) enum BitLength { @@ -480,3 +482,114 @@ pub(crate) fn get_bucket_offsets(bucket_lengths: &[usize]) -> Vec { offsets } + +pub fn unpack_felts( + compressed: &[Felt], + n_elms: usize, +) -> Vec> { + let n_elms_per_felt = BitLength::min_bit_length(LENGTH).unwrap().n_elems_in_felt(); + let mut result = Vec::with_capacity(n_elms); + + for felt in compressed { + let n_packed_elms = min(n_elms_per_felt, n_elms - result.len()); + for chunk in felt.to_bits_le()[0..n_packed_elms * LENGTH].chunks_exact(LENGTH) { + result.push(BitsArray(chunk.try_into().unwrap())); + } + } + + result +} + +pub fn unpack_felts_to_usize(compressed: &[Felt], n_elms: usize, elm_bound: u32) -> Vec { + let n_elms_per_felt = get_n_elms_per_felt(elm_bound); + let elm_bound_as_big = BigUint::from(elm_bound); + let mut result = Vec::with_capacity(n_elms); + + for felt in compressed { + let mut remaining = felt.to_biguint(); + let n_packed_elms = min(n_elms_per_felt, n_elms - result.len()); + for _ in 0..n_packed_elms { + let (new_remaining, value) = remaining.div_rem(&elm_bound_as_big); + result.push(value.to_usize().unwrap()); + remaining = new_remaining; + } + } + + result +} + +/// Decompresses the given compressed data. +#[allow(dead_code)] +pub fn decompress(compressed: &mut impl Iterator) -> Vec { + fn unpack_chunk( + compressed: &mut impl Iterator, + n_elms: usize, + ) -> Vec { + let n_elms_per_felt = BitLength::min_bit_length(LENGTH).unwrap().n_elems_in_felt(); + let n_packed_felts = n_elms.div_ceil(n_elms_per_felt); + let compressed_chunk: Vec<_> = compressed.take(n_packed_felts).collect(); + unpack_felts(&compressed_chunk, n_elms) + .into_iter() + .map(|bits: BitsArray| felt_from_bits_le(&bits.0).unwrap()) + .collect() + } + + fn unpack_chunk_to_usize( + compressed: &mut impl Iterator, + n_elms: usize, + elm_bound: u32, + ) -> Vec { + let n_elms_per_felt = get_n_elms_per_felt(elm_bound); + let n_packed_felts = n_elms.div_ceil(n_elms_per_felt); + + let compressed_chunk: Vec<_> = compressed.take(n_packed_felts).collect(); + unpack_felts_to_usize(&compressed_chunk, n_elms, elm_bound) + } + + let header = unpack_chunk_to_usize(compressed, HEADER_LEN, HEADER_ELM_BOUND); + let version = &header[0]; + assert!(version == &usize::from(COMPRESSION_VERSION), "Unsupported compression version."); + + let data_len = &header[1]; + let unique_value_bucket_lengths: Vec = header[2..2 + N_UNIQUE_BUCKETS].to_vec(); + let n_repeating_values = &header[2 + N_UNIQUE_BUCKETS]; + + let mut unique_values = Vec::new(); + unique_values.extend(compressed.take(unique_value_bucket_lengths[0])); // 252 bucket. + unique_values.extend(unpack_chunk::<125>(compressed, unique_value_bucket_lengths[1])); + unique_values.extend(unpack_chunk::<83>(compressed, unique_value_bucket_lengths[2])); + unique_values.extend(unpack_chunk::<62>(compressed, unique_value_bucket_lengths[3])); + unique_values.extend(unpack_chunk::<31>(compressed, unique_value_bucket_lengths[4])); + unique_values.extend(unpack_chunk::<15>(compressed, unique_value_bucket_lengths[5])); + + let repeating_value_pointers = unpack_chunk_to_usize( + compressed, + *n_repeating_values, + unique_values.len().try_into().unwrap(), + ); + + let repeating_values: Vec<_> = + repeating_value_pointers.iter().map(|ptr| unique_values[*ptr]).collect(); + + let mut all_values = unique_values; + all_values.extend(repeating_values); + + let bucket_index_per_elm: Vec = + unpack_chunk_to_usize(compressed, *data_len, TOTAL_N_BUCKETS.try_into().unwrap()); + + let all_bucket_lengths: Vec = + unique_value_bucket_lengths.into_iter().chain([*n_repeating_values]).collect(); + + let bucket_offsets = get_bucket_offsets(&all_bucket_lengths); + + let mut bucket_offset_trackers: Vec<_> = bucket_offsets; + + let mut result = Vec::new(); + for bucket_index in bucket_index_per_elm { + let offset = &mut bucket_offset_trackers[bucket_index]; + let value = all_values[*offset]; + *offset += 1; + result.push(value); + } + result +} diff --git a/crates/starknet_os/src/hints/vars.rs b/crates/starknet_os/src/hints/vars.rs index 2d8a16b57d4..608a28a2317 100644 --- a/crates/starknet_os/src/hints/vars.rs +++ b/crates/starknet_os/src/hints/vars.rs @@ -407,6 +407,8 @@ define_string_enum! { ), (HashBuiltin, "starkware.cairo.common.cairo_builtins.HashBuiltin"), (HashBuiltinPtr, "starkware.cairo.common.cairo_builtins.HashBuiltin*"), + (L1ToL2MessageHeader,"starkware.starknet.core.os.output.MessageToL2Header"), + (L2ToL1MessageHeader, "starkware.starknet.core.os.output.MessageToL1Header"), (NodeEdge, "starkware.cairo.common.patricia_utils.NodeEdge"), (NonSelectableBuiltins, "starkware.starknet.core.os.builtins.NonSelectableBuiltins"), (OsStateUpdate, "starkware.starknet.core.os.state.state.OsStateUpdate"), diff --git a/crates/starknet_os/src/io/os_input.rs b/crates/starknet_os/src/io/os_input.rs index f5ed62a033d..23a457c07b6 100644 --- a/crates/starknet_os/src/io/os_input.rs +++ b/crates/starknet_os/src/io/os_input.rs @@ -72,13 +72,18 @@ pub struct OsHints { pub os_hints_config: OsHintsConfig, } +// TODO(Dori): Once computation of the hinted class hash is fully functional, delete this type. +pub(crate) type HintedClassHash = Felt; + #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] #[cfg_attr(any(test, feature = "testing"), derive(Default))] #[derive(Debug)] pub struct StarknetOsInput { pub os_block_inputs: Vec, pub cached_state_inputs: Vec, - pub(crate) deprecated_compiled_classes: BTreeMap, + // TODO(Dori): Once computation of the hinted class hash is fully functional, the extra Felt + // value in the tuple should be removed. + pub(crate) deprecated_compiled_classes: BTreeMap, pub(crate) compiled_classes: BTreeMap, } diff --git a/crates/starknet_os/src/io/os_output.rs b/crates/starknet_os/src/io/os_output.rs index e7a72448fe6..973bd3f45ca 100644 --- a/crates/starknet_os/src/io/os_output.rs +++ b/crates/starknet_os/src/io/os_output.rs @@ -6,20 +6,399 @@ use cairo_vm::vm::runners::builtin_runner::BuiltinRunner; use cairo_vm::vm::runners::cairo_pie::CairoPie; use cairo_vm::vm::vm_core::VirtualMachine; use num_traits::ToPrimitive; -use starknet_types_core::felt::Felt; +use starknet_api::block::BlockNumber; +use starknet_api::core::{ + ClassHash, + CompiledClassHash, + ContractAddress, + EntryPointSelector, + EthAddress, + Nonce, +}; +use starknet_api::hash::StarkHash; +use starknet_api::state::StorageKey; +use starknet_api::transaction::{L1ToL2Payload, L2ToL1Payload, MessageToL1}; +use starknet_types_core::felt::{Felt, NonZeroFelt}; use crate::errors::StarknetOsError; +use crate::hints::hint_implementation::stateless_compression::utils::decompress; +use crate::metrics::OsMetrics; + +#[cfg(test)] +#[path = "os_output_test.rs"] +mod os_output_test; + +// Cairo DictAccess types for concrete objects. +type ContractStorageUpdate = (StorageKey, (Option, Felt)); +type CompiledClassHashUpdate = (ClassHash, (Option, CompiledClassHash)); + +// Defined in output.cairo +const N_UPDATES_BOUND: NonZeroFelt = + NonZeroFelt::from_felt_unchecked(Felt::from_hex_unchecked("10000000000000000")); // 2^64. +const N_UPDATES_SMALL_PACKING_BOUND: NonZeroFelt = + NonZeroFelt::from_felt_unchecked(Felt::from_hex_unchecked("100")); // 2^8. +const FLAG_BOUND: NonZeroFelt = NonZeroFelt::TWO; + +const MESSAGE_TO_L1_CONST_FIELD_SIZE: usize = 3; // from_address, to_address, payload_size. +// from_address, to_address, nonce, selector, payload_size. +const MESSAGE_TO_L2_CONST_FIELD_SIZE: usize = 5; +#[derive(Debug, thiserror::Error)] +pub enum OsOutputError { + #[error("Missing expected field: {0}.")] + MissingFieldInOutput(String), + #[error("Invalid output in field: {0}. Error: {1}")] + InvalidOsOutputField(String, String), +} + +fn wrap_missing(val: Option, val_name: &str) -> Result { + val.ok_or_else(|| OsOutputError::MissingFieldInOutput(val_name.to_string())) +} + +fn try_into_custom_error>(val: Felt, val_name: &str) -> Result +where + >::Error: std::fmt::Display, +{ + val.try_into().map_err(|e: >::Error| { + OsOutputError::InvalidOsOutputField(val_name.to_string(), e.to_string()) + }) +} + +fn wrap_missing_as>(val: Option, val_name: &str) -> Result +where + >::Error: std::fmt::Display, +{ + try_into_custom_error(wrap_missing(val, val_name)?, val_name) +} + +fn felt_as_bool(felt_val: Felt, val_name: &str) -> Result { + if felt_val == Felt::ZERO || felt_val == Felt::ONE { + return Ok(felt_val == Felt::ONE); + } + Err(OsOutputError::InvalidOsOutputField( + val_name.to_string(), + format!("Expected a bool felt, got {felt_val}"), + )) +} +fn wrap_missing_as_bool(val: Option, val_name: &str) -> Result { + let felt_val = wrap_missing(val, val_name)?; + if felt_val == Felt::ZERO || felt_val == Felt::ONE { + return Ok(felt_val == Felt::ONE); + } + Err(OsOutputError::InvalidOsOutputField( + val_name.to_string(), + format!("Expected a bool felt, got {felt_val}"), + )) +} + +pub fn message_l1_from_output_iter>( + iter: &mut It, +) -> Result { + let from_address = wrap_missing_as(iter.next(), "from_address")?; + let to_address = wrap_missing_as(iter.next(), "to_address")?; + let payload_size = wrap_missing_as(iter.next(), "payload_size")?; + let payload = L2ToL1Payload(iter.take(payload_size).collect()); + + Ok(MessageToL1 { from_address, to_address, payload }) +} + +// TODO(Tzahi): Replace with starknet_api struct after it is updated. +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize, serde::Serialize))] +#[derive(Debug)] +// An L1 to L2 message header, the message payload is concatenated to the end of the header. +pub struct MessageToL2 { + // The L1 address of the contract sending the message. + from_address: EthAddress, + // The L2 address of the contract receiving the message. + to_address: ContractAddress, + nonce: Nonce, + selector: EntryPointSelector, + payload: L1ToL2Payload, +} + +impl MessageToL2 { + pub fn from_output_iter>( + iter: &mut It, + ) -> Result { + let from_address = wrap_missing_as(iter.next(), "from_address")?; + let to_address = wrap_missing_as(iter.next(), "to_address")?; + let nonce = Nonce(wrap_missing(iter.next(), "nonce")?); + let selector = EntryPointSelector(wrap_missing(iter.next(), "selector")?); + let payload_size = wrap_missing_as(iter.next(), "payload_size")?; + let payload = L1ToL2Payload(iter.take(payload_size).collect()); + + Ok(Self { from_address, to_address, nonce, selector, payload }) + } +} + +fn parse_storage_changes + ?Sized>( + n_changes: usize, + iter: &mut It, + full_output: bool, +) -> Result, OsOutputError> { + (0..n_changes) + .map(|_| { + let key = wrap_missing_as(iter.next(), "storage key")?; + let prev_value = if full_output { + Some(wrap_missing(iter.next(), "previous storage value")?) + } else { + None + }; + let new_value = wrap_missing(iter.next(), "storage value")?; + // Wrapped in Ok to be able to use ? operator in the closure. + Ok((key, (prev_value, new_value))) + }) + .collect() +} + +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize, serde::Serialize))] +#[derive(Debug)] +/// Represents the changes in a contract instance. +pub struct ContractChanges { + // The address of the contract. + addr: ContractAddress, + // The previous nonce of the contract (for account contracts, if full output). + prev_nonce: Option, + // The new nonce of the contract (for account contracts, if changed or full output). + new_nonce: Option, + // The previous class hash (if full output). + prev_class_hash: Option, + // The new class hash (if changed or full output). + new_class_hash: Option, + // A map from storage key to its prev value (optional) and new value. + storage_changes: Vec, +} + +impl ContractChanges { + pub fn from_iter + ?Sized>( + iter: &mut It, + full_output: bool, + ) -> Result { + let addr = wrap_missing_as(iter.next(), "addr")?; + if full_output { + return Ok(Self { + addr, + prev_nonce: Some(Nonce(wrap_missing(iter.next(), "prev_nonce")?)), + new_nonce: Some(Nonce(wrap_missing_as(iter.next(), "new_nonce")?)), + prev_class_hash: Some(ClassHash(wrap_missing_as(iter.next(), "prev_class_hash")?)), + new_class_hash: Some(ClassHash(wrap_missing_as(iter.next(), "new_class_hash")?)), + storage_changes: parse_storage_changes( + wrap_missing_as(iter.next(), "storage_changes")?, + iter, + full_output, + )?, + }); + } + // Parse packed info. + let nonce_n_changes_two_flags = wrap_missing(iter.next(), "nonce_n_changes_two_flags")?; + + // Parse flags. + let (nonce_n_changes_one_flag, class_updated_felt) = + nonce_n_changes_two_flags.div_rem(&FLAG_BOUND); + let class_updated = felt_as_bool(class_updated_felt, "class_updated")?; + let (nonce_n_changes, is_n_updates_small_felt) = + nonce_n_changes_one_flag.div_rem(&FLAG_BOUND); + let is_n_updates_small = felt_as_bool(is_n_updates_small_felt, "is_n_updates_small")?; + + // Parse n_changes. + let n_updates_bound = + if is_n_updates_small { N_UPDATES_SMALL_PACKING_BOUND } else { N_UPDATES_BOUND }; + let (nonce, n_changes) = nonce_n_changes.div_rem(&n_updates_bound); + + // Parse nonce. + let new_nonce = if nonce == Felt::ZERO { None } else { Some(Nonce(nonce)) }; + + let new_class_hash = if class_updated { + Some(ClassHash(wrap_missing(iter.next(), "new_class_hash")?)) + } else { + None + }; + Ok(Self { + addr, + prev_nonce: None, + new_nonce, + prev_class_hash: None, + new_class_hash, + storage_changes: parse_storage_changes( + try_into_custom_error(n_changes, "n_changes")?, + iter, + full_output, + )?, + }) + } +} + +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize, serde::Serialize))] +#[derive(Debug)] +pub struct OsStateDiff { + // Contracts that were changed. + pub contracts: Vec, + // Classes that were declared. Represents the updates of a mapping from class hash to previous + // (optional) and new compiled class hash. + pub classes: Vec, +} + +impl OsStateDiff { + pub fn from_iter>( + output_iter: &mut It, + full_output: bool, + ) -> Result { + let state_diff; + let iter: &mut dyn Iterator = if !full_output { + state_diff = decompress(output_iter); + &mut state_diff.into_iter().chain(output_iter) + } else { + output_iter + }; + // Contracts changes. + let n_contracts = wrap_missing_as(iter.next(), "OsStateDiff.n_contracts")?; + let mut contracts = Vec::with_capacity(n_contracts); + for _ in 0..n_contracts { + contracts.push(ContractChanges::from_iter(iter, full_output)?); + } + + // Classes changes. + let n_classes = wrap_missing_as(iter.next(), "OsStateDiff.n_classes")?; + let mut classes = Vec::with_capacity(n_classes); + for _ in 0..n_classes { + let class_hash = ClassHash(wrap_missing(iter.next(), "class_hash")?); + let prev_compiled_class_hash = if full_output { + Some(CompiledClassHash(wrap_missing(iter.next(), "prev_compiled_class_hash")?)) + } else { + None + }; + let new_compiled_class_hash = + CompiledClassHash(wrap_missing(iter.next(), "new_compiled_class_hash")?); + classes.push((class_hash, (prev_compiled_class_hash, new_compiled_class_hash))); + } + Ok(Self { contracts, classes }) + } +} + +#[cfg_attr(feature = "deserialize", derive(serde::Deserialize, serde::Serialize))] +#[derive(Debug)] +pub struct OsOutput { + // The root before. + pub initial_root: StarkHash, + // The root after. + pub final_root: StarkHash, + // The previous block number. + pub prev_block_number: BlockNumber, + // The new block number. + pub new_block_number: BlockNumber, + // The previous block hash. + pub prev_block_hash: StarkHash, + // The new block hash. + pub new_block_hash: StarkHash, + // The hash of the OS program, if the aggregator was used. Zero if the OS was used directly. + pub os_program_hash: StarkHash, + // The hash of the OS config. + pub starknet_os_config_hash: StarkHash, + // Indicates whether KZG data availability was used. + pub use_kzg_da: bool, + // Indicates whether previous state values are included in the state update information. + pub full_output: bool, + // Messages from L2 to L1. + pub messages_to_l1: Vec, + // Messages from L1 to L2. + pub messages_to_l2: Vec, + // The state diff. + pub state_diff: Option, +} + +impl OsOutput { + pub fn from_raw_output_iter>( + mut output_iter: It, + ) -> Result { + let initial_root = wrap_missing(output_iter.next(), "initial_root")?; + let final_root = wrap_missing(output_iter.next(), "final_root")?; + let prev_block_number = + BlockNumber(wrap_missing_as(output_iter.next(), "prev_block_number")?); + let new_block_number = + BlockNumber(wrap_missing_as(output_iter.next(), "new_block_number")?); + let prev_block_hash = wrap_missing(output_iter.next(), "prev_block_hash")?; + let new_block_hash = wrap_missing(output_iter.next(), "new_block_hash")?; + let os_program_hash = wrap_missing(output_iter.next(), "os_program_hash")?; + let starknet_os_config_hash = wrap_missing(output_iter.next(), "starknet_os_config_hash")?; + let use_kzg_da = wrap_missing_as_bool(output_iter.next(), "use_kzg_da")?; + let full_output = wrap_missing_as_bool(output_iter.next(), "full_output")?; + + if use_kzg_da { + // Skip KZG data. + + let _kzg_z = wrap_missing(output_iter.next(), "kzg_z")?; + let n_blobs: usize = wrap_missing_as(output_iter.next(), "n_blobs")?; + // Skip 'n_blobs' commitments and evaluations. + output_iter.nth((2 * 2 * n_blobs) - 1); + } + + // Messages to L1 and L2. + let mut messages_to_l1_segment_size = + wrap_missing_as(output_iter.next(), "messages_to_l1_segment_size")?; + let mut messages_to_l1_iter = + output_iter.by_ref().take(messages_to_l1_segment_size).peekable(); + let mut messages_to_l1 = Vec::::new(); + + while messages_to_l1_iter.peek().is_some() { + let message = message_l1_from_output_iter(&mut messages_to_l1_iter)?; + messages_to_l1_segment_size -= message.payload.0.len() + MESSAGE_TO_L1_CONST_FIELD_SIZE; + messages_to_l1.push(message); + } + assert_eq!( + messages_to_l1_segment_size, 0, + "Expected messages to L1 segment to be consumed, but {messages_to_l1_segment_size} \ + felts were left." + ); + + let mut messages_to_l2_segment_size = + wrap_missing_as(output_iter.next(), "messages_to_l2_segment_size")?; + let mut messages_to_l2_iter = + output_iter.by_ref().take(messages_to_l2_segment_size).peekable(); + let mut messages_to_l2 = Vec::::new(); + + while messages_to_l2_iter.peek().is_some() { + let message = MessageToL2::from_output_iter(&mut messages_to_l2_iter)?; + messages_to_l2_segment_size -= message.payload.0.len() + MESSAGE_TO_L2_CONST_FIELD_SIZE; + messages_to_l2.push(message); + } + + // State diff. + let state_diff = if use_kzg_da { + None + } else { + Some(OsStateDiff::from_iter(&mut output_iter, full_output)?) + }; + + Ok(Self { + initial_root, + final_root, + prev_block_number, + new_block_number, + prev_block_hash, + new_block_hash, + os_program_hash, + starknet_os_config_hash, + use_kzg_da, + full_output, + messages_to_l1, + messages_to_l2, + state_diff, + }) + } +} pub struct StarknetOsRunnerOutput { - // TODO(Tzahi): Define a struct for the output. - pub os_output: Vec, + #[cfg(feature = "include_program_output")] + pub os_output: OsOutput, pub cairo_pie: CairoPie, + pub da_segment: Option>, + pub metrics: OsMetrics, #[cfg(any(test, feature = "testing"))] pub unused_hints: std::collections::HashSet, } pub struct StarknetAggregatorRunnerOutput { // TODO(Aner): Define a struct for the output. + #[cfg(feature = "include_program_output")] pub aggregator_output: Vec, pub cairo_pie: CairoPie, #[cfg(any(test, feature = "testing"))] diff --git a/crates/starknet_os/src/io/os_output_test.rs b/crates/starknet_os/src/io/os_output_test.rs new file mode 100644 index 00000000000..e0a7eb59c1f --- /dev/null +++ b/crates/starknet_os/src/io/os_output_test.rs @@ -0,0 +1,10 @@ +use num_traits::ToPrimitive; +use starknet_types_core::felt::Felt; + +use super::{N_UPDATES_BOUND, N_UPDATES_SMALL_PACKING_BOUND}; + +#[test] +fn assert_const_felts() { + assert_eq!(Into::::into(N_UPDATES_BOUND).to_u128().unwrap(), 1 << 64); + assert_eq!(Into::::into(N_UPDATES_SMALL_PACKING_BOUND).to_u64().unwrap(), 1 << 8); +} diff --git a/crates/starknet_os/src/lib.rs b/crates/starknet_os/src/lib.rs index aaa73750468..057282ac04e 100644 --- a/crates/starknet_os/src/lib.rs +++ b/crates/starknet_os/src/lib.rs @@ -1,9 +1,14 @@ +#[cfg(test)] +pub mod constants_test; pub mod errors; pub mod hint_processor; pub mod hints; pub mod io; +pub mod metrics; pub mod runner; pub mod syscall_handler_utils; #[cfg(any(test, feature = "testing"))] pub mod test_utils; +#[cfg(test)] +pub(crate) mod tests; pub mod vm_utils; diff --git a/crates/starknet_os/src/metrics.rs b/crates/starknet_os/src/metrics.rs new file mode 100644 index 00000000000..22bc7cf09f0 --- /dev/null +++ b/crates/starknet_os/src/metrics.rs @@ -0,0 +1,51 @@ +use blockifier::execution::syscalls::vm_syscall_utils::SyscallUsageMap; +use blockifier::state::state_api::StateReader; +use cairo_vm::types::relocatable::MaybeRelocatable; +use cairo_vm::vm::errors::runner_errors::RunnerError; +use cairo_vm::vm::runners::cairo_runner::{CairoRunner, ExecutionResources}; +use serde::Serialize; + +use crate::hint_processor::snos_hint_processor::SnosHintProcessor; + +#[derive(Serialize)] +pub struct OsRunInfo { + pub pc: MaybeRelocatable, + pub ap: MaybeRelocatable, + pub fp: MaybeRelocatable, + pub used_memory_cells: usize, +} + +impl OsRunInfo { + pub fn new(runner: &mut CairoRunner) -> Self { + Self { + pc: runner.vm.get_pc().into(), + ap: runner.vm.get_ap().into(), + fp: runner.vm.get_fp().into(), + used_memory_cells: runner.vm.segments.compute_effective_sizes().iter().sum(), + } + } +} + +#[derive(Serialize)] +pub struct OsMetrics { + pub syscall_usages: Vec, + pub deprecated_syscall_usages: Vec, + pub run_info: OsRunInfo, + pub execution_resources: ExecutionResources, +} + +impl OsMetrics { + pub fn new( + runner: &mut CairoRunner, + hint_processor: &SnosHintProcessor<'_, S>, + ) -> Result { + Ok(Self { + syscall_usages: hint_processor.execution_helpers_manager.get_syscall_usages(), + deprecated_syscall_usages: hint_processor + .execution_helpers_manager + .get_deprecated_syscall_usages(), + run_info: OsRunInfo::new(runner), + execution_resources: runner.get_execution_resources()?, + }) + } +} diff --git a/crates/starknet_os/src/runner.rs b/crates/starknet_os/src/runner.rs index 368970c4c99..eb4beda685a 100644 --- a/crates/starknet_os/src/runner.rs +++ b/crates/starknet_os/src/runner.rs @@ -7,14 +7,12 @@ use cairo_vm::vm::runners::cairo_runner::CairoRunner; use crate::errors::StarknetOsError; use crate::hint_processor::aggregator_hint_processor::AggregatorInput; +use crate::hint_processor::common_hint_processor::CommonHintProcessor; use crate::hint_processor::panicking_state_reader::PanickingStateReader; use crate::hint_processor::snos_hint_processor::SnosHintProcessor; use crate::io::os_input::{OsHints, StarknetOsInput}; -use crate::io::os_output::{ - get_run_output, - StarknetAggregatorRunnerOutput, - StarknetOsRunnerOutput, -}; +use crate::io::os_output::{StarknetAggregatorRunnerOutput, StarknetOsRunnerOutput}; +use crate::metrics::OsMetrics; pub fn run_os( layout: LayoutName, @@ -76,9 +74,16 @@ pub fn run_os( cairo_runner.finalize_segments()?; } - // Prepare and check expected output. - let os_output = get_run_output(&cairo_runner.vm)?; - // TODO(Tzahi): log the output once it will have a proper struct. + #[cfg(feature = "include_program_output")] + let os_output = { + // Prepare and check expected output. + let os_raw_output = crate::io::os_output::get_run_output(&cairo_runner.vm)?; + let os_output = + crate::io::os_output::OsOutput::from_raw_output_iter(os_raw_output.into_iter())?; + log::debug!("OsOutput for block number={}: {os_output:?}", os_output.new_block_number); + os_output + }; + cairo_runner.vm.verify_auto_deductions().map_err(StarknetOsError::VirtualMachineError)?; cairo_runner .read_return_values(allow_missing_builtins) @@ -91,8 +96,11 @@ pub fn run_os( let cairo_pie = cairo_runner.get_cairo_pie().map_err(StarknetOsError::RunnerError)?; Ok(StarknetOsRunnerOutput { + #[cfg(feature = "include_program_output")] os_output, cairo_pie, + da_segment: snos_hint_processor.get_da_segment().take(), + metrics: OsMetrics::new(&mut cairo_runner, &snos_hint_processor)?, #[cfg(any(test, feature = "testing"))] unused_hints: snos_hint_processor.unused_hints, }) @@ -109,7 +117,6 @@ pub fn run_os_stateless( } /// Run the Aggregator. -#[allow(clippy::result_large_err)] pub fn run_aggregator( _layout: LayoutName, _aggregator_input: AggregatorInput, diff --git a/crates/starknet_os/src/test_utils.rs b/crates/starknet_os/src/test_utils.rs index 248ea4a4336..431c5944d65 100644 --- a/crates/starknet_os/src/test_utils.rs +++ b/crates/starknet_os/src/test_utils.rs @@ -1,3 +1,4 @@ pub mod cairo_runner; pub mod errors; +#[cfg(test)] pub mod utils; diff --git a/crates/starknet_os/src/test_utils/cairo_runner.rs b/crates/starknet_os/src/test_utils/cairo_runner.rs index ae54ad145a8..4491580b587 100644 --- a/crates/starknet_os/src/test_utils/cairo_runner.rs +++ b/crates/starknet_os/src/test_utils/cairo_runner.rs @@ -2,6 +2,7 @@ use std::any::Any; use std::collections::{HashMap, HashSet}; use blockifier::blockifier_versioned_constants::VersionedConstants; +use blockifier::test_utils::dict_state_reader::DictStateReader; use cairo_vm::serde::deserialize_program::Member; use cairo_vm::types::builtin_name::BuiltinName; use cairo_vm::types::layout_name::LayoutName; @@ -297,10 +298,11 @@ fn extract_builtins_from_implicit_args( // TODO(Amos): Add builtins properly once the VM allows loading an entrypoint's builtins. // In addition, pass program as struct and add hint processor as param. fn inject_builtins( - program_str: &str, + program_bytes: &[u8], implicit_args: &[ImplicitArg], ) -> Cairo0EntryPointRunnerResult { let program_builtins = extract_builtins_from_implicit_args(implicit_args)?; + let program_str = std::str::from_utf8(program_bytes).unwrap(); let mut program_dict: HashMap = serde_json::from_str(program_str).map_err(Cairo0EntryPointRunnerError::ProgramSerde)?; program_dict.insert( @@ -547,14 +549,16 @@ fn get_return_values( /// Hint locals are added to the outermost exec scope. /// If the endpoint used builtins, the respective returned (implicit) arg is the builtin instance /// usage, unless the builtin is the output builtin, in which case the arg is the output. +#[allow(clippy::too_many_arguments)] pub fn run_cairo_0_entry_point( runner_config: &EntryPointRunnerConfig, - program_str: &str, + program_bytes: &[u8], entrypoint: &str, explicit_args: &[EndpointArg], implicit_args: &[ImplicitArg], expected_explicit_return_values: &[EndpointArg], hint_locals: HashMap>, + state_reader: Option, ) -> Cairo0EntryPointRunnerResult<(Vec, Vec, CairoRunner)> { let mut entrypoint = entrypoint.to_string(); if runner_config.add_main_prefix_to_entrypoint { @@ -562,10 +566,10 @@ pub fn run_cairo_0_entry_point( entrypoint = format!("__main__.{entrypoint}"); } - let program = inject_builtins(program_str, implicit_args)?; + let program = inject_builtins(program_bytes, implicit_args)?; info!("Successfully injected builtins into program."); - let (state_reader, os_hints_config, os_state_input) = (None, None, None); + let (os_hints_config, os_state_input) = (None, None); let os_block_input = OsBlockInput::default(); let mut hint_processor = SnosHintProcessor::new_for_testing( state_reader, diff --git a/crates/starknet_os/src/test_utils/cairo_runner_test.rs b/crates/starknet_os/src/test_utils/cairo_runner_test.rs index b81e56874a2..952466ccecf 100644 --- a/crates/starknet_os/src/test_utils/cairo_runner_test.rs +++ b/crates/starknet_os/src/test_utils/cairo_runner_test.rs @@ -70,7 +70,7 @@ use crate::test_utils::utils::run_cairo_function_and_check_result; /// let sum = number_1 + number_2; /// return (res=sum); /// } -const COMPILED_DUMMY_FUNCTION: &str = include_str!("compiled_dummy_function.json"); +const COMPILED_DUMMY_FUNCTION_BYTES: &[u8] = include_bytes!("compiled_dummy_function.json"); #[test] fn test_felt_and_pointers() -> Cairo0EntryPointRunnerResult<()> { @@ -117,7 +117,7 @@ fn test_felt_and_pointers() -> Cairo0EntryPointRunnerResult<()> { ])); run_cairo_function_and_check_result( &EntryPointRunnerConfig::default(), - COMPILED_DUMMY_FUNCTION, + COMPILED_DUMMY_FUNCTION_BYTES, "pass_felt_and_pointers", &[number.into(), array, tuple, simple_struct, compound_struct], &[], @@ -172,7 +172,7 @@ fn test_tuples_and_structs() -> Cairo0EntryPointRunnerResult<()> { ])); run_cairo_function_and_check_result( &EntryPointRunnerConfig::default(), - COMPILED_DUMMY_FUNCTION, + COMPILED_DUMMY_FUNCTION_BYTES, "pass_structs_and_tuples", &[tuple, named_tuple, simple_struct, compound_struct], &[], @@ -204,7 +204,7 @@ fn test_implicit_args() -> Cairo0EntryPointRunnerResult<()> { EntryPointRunnerConfig { layout: LayoutName::all_cairo, ..Default::default() }; run_cairo_function_and_check_result( &entrypoint_runner_config, - COMPILED_DUMMY_FUNCTION, + COMPILED_DUMMY_FUNCTION_BYTES, "pass_implicit_args", &[number_1.into(), number_2.into()], &[ diff --git a/crates/starknet_os/src/test_utils/utils.rs b/crates/starknet_os/src/test_utils/utils.rs index 775f9d6aaa5..3e4b64ba93a 100644 --- a/crates/starknet_os/src/test_utils/utils.rs +++ b/crates/starknet_os/src/test_utils/utils.rs @@ -1,8 +1,16 @@ use std::any::Any; use std::collections::HashMap; +use std::sync::LazyLock; +use cairo_vm::hint_processor::builtin_hint_processor::dict_hint_utils::DICT_ACCESS_SIZE; +use cairo_vm::types::layout_name::LayoutName; +use ethnum::U256; +use num_bigint::{BigInt, Sign}; +use rand::rngs::StdRng; +use rand::SeedableRng; use starknet_types_core::felt::Felt; +use crate::hints::hint_implementation::kzg::utils::BASE; use crate::test_utils::cairo_runner::{ run_cairo_0_entry_point, Cairo0EntryPointRunnerResult, @@ -16,7 +24,7 @@ use crate::test_utils::cairo_runner::{ #[allow(clippy::too_many_arguments)] pub fn run_cairo_function_and_check_result( runner_config: &EntryPointRunnerConfig, - program_str: &str, + program_bytes: &[u8], function_name: &str, explicit_args: &[EndpointArg], implicit_args: &[ImplicitArg], @@ -24,14 +32,16 @@ pub fn run_cairo_function_and_check_result( expected_implicit_retdata: &[EndpointArg], hint_locals: HashMap>, ) -> Cairo0EntryPointRunnerResult<()> { + let state_reader = None; let (actual_implicit_retdata, actual_explicit_retdata, _) = run_cairo_0_entry_point( runner_config, - program_str, + program_bytes, function_name, explicit_args, implicit_args, expected_explicit_retdata, hint_locals, + state_reader, )?; assert_eq!(expected_explicit_retdata, &actual_explicit_retdata); assert_eq!(expected_implicit_retdata, &actual_implicit_retdata); @@ -55,3 +65,83 @@ pub fn create_squashed_cairo_dict( } PointerArg::Composed(squashed_dict) } + +pub fn parse_squashed_cairo_dict(squashed_dict: &[Felt]) -> HashMap { + assert!(squashed_dict.len() % DICT_ACCESS_SIZE == 0, "Invalid squashed dict length"); + let key_offset = 0; + let new_val_offset = 2; + squashed_dict + .chunks(DICT_ACCESS_SIZE) + .map(|chunk| (chunk[key_offset], chunk[new_val_offset])) + .collect() +} + +// 2**251 + 17 * 2**192 + 1 +pub static DEFAULT_PRIME: LazyLock = LazyLock::new(|| { + BigInt::from_bytes_be( + Sign::Plus, + &(U256::from(2_u32).pow(251) + 17 * U256::from(2_u32).pow(192) + 1).to_be_bytes(), + ) +}); + +#[allow(clippy::too_many_arguments, dead_code)] +pub(crate) fn test_cairo_function( + runner_config: &EntryPointRunnerConfig, + program_bytes: &[u8], + function_name: &str, + explicit_args: &[EndpointArg], + implicit_args: &[ImplicitArg], + expected_explicit_retdata: &[EndpointArg], + expected_implicit_retdata: &[EndpointArg], + hint_locals: HashMap>, +) { + run_cairo_function_and_check_result( + runner_config, + program_bytes, + function_name, + explicit_args, + implicit_args, + expected_explicit_retdata, + expected_implicit_retdata, + hint_locals, + ) + .unwrap(); +} + +#[allow(dead_code)] +pub(crate) fn seeded_random_prng() -> StdRng { + StdRng::seed_from_u64(42) +} + +/// Returns the lift of the given field element, val, as a `BigInt` in the range +/// (-prime/2, prime/2). +// TODO(Amos): Use cairo VM version if it is made public: +// https://github.com/lambdaclass/cairo-vm/blob/052e7cef977b336305c869fccbf24e1794b116ff/vm/src/hint_processor/builtin_hint_processor/kzg_da/mod.rs#L90 +fn as_int(val: &Felt, prime: &BigInt) -> BigInt { + let val = val.to_bigint(); + if val < (prime / BigInt::from(2)) { + return val.clone(); + } + val - prime +} + +/// Takes a BigInt3 struct represented by the limbs (d0, d1, d2) of +/// and reconstructs the corresponding integer (see split_bigint3()). +/// Note that the limbs do not have to be in the range [0, BASE). +/// Prime is used to handle negative values of the limbs. +// TODO(Amos): Use cairo VM version if it is made public: +// https://github.com/lambdaclass/cairo-vm/blob/052e7cef977b336305c869fccbf24e1794b116ff/vm/src/hint_processor/builtin_hint_processor/kzg_da/mod.rs#L99 +pub fn pack_bigint3(limbs: &[Felt]) -> BigInt { + assert!(limbs.len() == 3, "Expected 3 limbs, got {}", limbs.len()); + limbs.iter().enumerate().fold(BigInt::ZERO, |acc, (i, &limb)| { + acc + as_int(&limb, &DEFAULT_PRIME) * BASE.pow(i.try_into().unwrap()) + }) +} + +pub(crate) fn get_entrypoint_runner_config() -> EntryPointRunnerConfig { + EntryPointRunnerConfig { + layout: LayoutName::small, + add_main_prefix_to_entrypoint: false, + ..Default::default() + } +} diff --git a/crates/starknet_os/src/tests.rs b/crates/starknet_os/src/tests.rs new file mode 100644 index 00000000000..9a5818c98df --- /dev/null +++ b/crates/starknet_os/src/tests.rs @@ -0,0 +1,2 @@ +pub(crate) mod aliases; +pub(crate) mod bls_field; diff --git a/crates/starknet_os/src/tests/aliases.rs b/crates/starknet_os/src/tests/aliases.rs new file mode 100644 index 00000000000..f3d2b3c3bba --- /dev/null +++ b/crates/starknet_os/src/tests/aliases.rs @@ -0,0 +1,302 @@ +use std::collections::{HashMap, HashSet}; + +use apollo_starknet_os_program::test_programs::ALIASES_TEST_BYTES; +use blockifier::state::stateful_compression::{ALIAS_COUNTER_STORAGE_KEY, INITIAL_AVAILABLE_ALIAS}; +use blockifier::test_utils::dict_state_reader::DictStateReader; +use blockifier::test_utils::ALIAS_CONTRACT_ADDRESS; +use cairo_vm::hint_processor::builtin_hint_processor::dict_hint_utils::DICT_ACCESS_SIZE; +use cairo_vm::hint_processor::hint_processor_utils::felt_to_usize; +use cairo_vm::types::builtin_name::BuiltinName; +use rstest::rstest; +use starknet_api::core::L2_ADDRESS_UPPER_BOUND; +use starknet_api::state::StorageKey; +use starknet_types_core::felt::Felt; + +use crate::test_utils::cairo_runner::{ + run_cairo_0_entry_point, + EndpointArg, + EntryPointRunnerConfig, + ImplicitArg, + PointerArg, + ValueArg, +}; +use crate::test_utils::utils::{ + get_entrypoint_runner_config, + parse_squashed_cairo_dict, + test_cairo_function, +}; + +// TODO(Nimrod): Move this next to the stateful compression hints implementation. +// TODO(Amos): This test is incomplete. Add the rest of the test cases and remove this todo. + +#[test] +fn test_constants() { + let max_non_compressed_contract_address = 15; + let alias_counter_storage_key = 0; + let initial_available_alias = 128; + let alias_contract_address = 2; + test_cairo_function( + &EntryPointRunnerConfig::default(), + ALIASES_TEST_BYTES, + "test_constants", + &[ + max_non_compressed_contract_address.into(), + alias_counter_storage_key.into(), + initial_available_alias.into(), + alias_contract_address.into(), + ], + &[], + &[], + &[], + HashMap::new(), + ) +} + +#[rstest] +#[case( + Vec::new(), + Vec::new(), + HashMap::from([(0.into(), 128.into())]) +)] +#[case( + vec![Felt::from(&*L2_ADDRESS_UPPER_BOUND)], + vec![128], + HashMap::from([ + (0.into(), 129.into()), + (Felt::from(&*L2_ADDRESS_UPPER_BOUND), 128.into()) + ]) +)] +#[case( + vec![2000.into(), 1999999999.into(), 3000.into(), 2000.into()], + vec![128, 129, 130, 128], + HashMap::from([ + (0.into(), 131.into()), + (2000.into(), 128.into()), + (3000.into(), 130.into()), + (1999999999.into(), 129.into()) + ]) +)] +#[case( + Vec::from_iter((0..128).map(Felt::from)), + (0..128).collect::>(), + HashMap::from_iter([(0.into(), 128.into())]) +)] +#[case( + Vec::from_iter((0..129).map(Felt::from)), + (0..129).collect::>(), + HashMap::from_iter([ + (0.into(), 129.into()), + (128.into(), 128.into()) + ]) +)] +#[case( + vec![ + 13.into(), + 500.into(), + 11.into(), + 2000.into(), + 2001.into(), + 13.into(), + 501.into(), + 98.into(), + 222.into(), + 2000.into(), + 127.into(), + 128.into() + ], + vec![13, 128, 11, 129, 130, 13, 131, 98, 132, 129, 127, 133], + HashMap::from([ + (0.into(), 134.into()), + (128.into(), 133.into()), + (222.into(), 132.into()), + (500.into(), 128.into()), + (501.into(), 131.into()), + (2000.into(), 129.into()), + (2001.into(), 130.into()) + ]) +)] +#[case( + (0..150_u8) + .map(|i| Felt::from(128) + Felt::TWO.pow(i)) + .chain((0..150_u8).map(|i| Felt::from(128) + Felt::TWO.pow(i))) + .collect::>(), + (0..150_u128) + .map(|i| i + 128) + .chain((0..150_u128).map(|i| i + 128)) + .collect::>(), + HashMap::from_iter( + (0..150_u128) + .map(|i| (Felt::from(128) + Felt::TWO.pow(i), Felt::from(i + 128))) + .chain([(0.into(), (128 + 150).into())]) + ) +)] +fn allocate_and_replace_keys_from_empty_storage( + #[case] keys: Vec, + #[case] expected_alias_per_key: Vec, + #[case] expected_alias_storage: HashMap, +) { + let expected_alias_per_key: Vec<_> = + expected_alias_per_key.into_iter().map(Felt::from).collect(); + let (actual_alias_storage, actual_alias_per_key) = + allocate_aliases_for_keys_and_replace(keys, HashMap::new()); + assert_eq!(actual_alias_storage, expected_alias_storage); + assert_eq!(actual_alias_per_key, expected_alias_per_key); +} + +#[rstest] +#[case( + vec![], + vec![], + HashMap::from([(0.into(), 128.into())]), + HashMap::from([(0, 128)]) +)] +#[case( + vec![2000.into()], + vec![128], + HashMap::from([ + (0.into(), 131.into()), + (2000.into(), 128.into()) + ]), + HashMap::from([ + (0, 131), + (2000, 128), + (1999999999, 129), + (3000, 130) + ]) +)] +#[case( + vec![2001.into()], + vec![131], + HashMap::from([ + (0.into(), 132.into()), + (2001.into(), 131.into()) + ]), + HashMap::from([ + (0, 131), + (2000, 128), + (1999999999, 129), + (3000, 130) + ]) +)] +#[case( + vec![2001.into(), 2000.into(), 2005.into()], + vec![131, 128, 132], + HashMap::from([ + (0.into(), 133.into()), + (2000.into(), 128.into()), + (2001.into(), 131.into()), + (2005.into(), 132.into()) + ]), + HashMap::from([ + (0, 131), + (2000, 128), + (1999999999, 129), + (3000, 130) + ]) +)] +#[case( + vec![ + 13.into(), + 500.into(), + 11.into(), + 2000.into(), + 89999.into(), + 13.into(), + 501.into(), + 98.into(), + 222.into(), + 501.into() + ], + vec![13, 128, 11, 129, 131, 13, 132, 98, 133, 132], + HashMap::from([ + (0.into(), 134.into()), + (222.into(), 133.into()), + (500.into(), 128.into()), + (501.into(), 132.into()), + (2000.into(), 129.into()), + (89999.into(), 131.into()) + ]), + HashMap::from([ + (0, 131), + (500, 128), + (2000, 129), + (2001, 130) + ]) +)] +fn allocate_and_replace_keys_from_non_empty_storage( + #[case] keys: Vec, + #[case] expected_alias_per_key: Vec, + #[case] expected_alias_storage: HashMap, + #[case] initial_storage: HashMap, +) { + let initial_storage = initial_storage + .into_iter() + .map(|(key, value)| (StorageKey::from(key), Felt::from(value))) + .collect::>(); + let expected_alias_per_key: Vec<_> = + expected_alias_per_key.into_iter().map(Felt::from).collect(); + let (actual_alias_storage, actual_alias_per_key) = + allocate_aliases_for_keys_and_replace(keys, initial_storage); + + assert_eq!(actual_alias_storage, expected_alias_storage); + assert_eq!(actual_alias_per_key, expected_alias_per_key); +} + +fn allocate_aliases_for_keys_and_replace( + keys: Vec, + initial_storage: HashMap, +) -> (HashMap, Vec) { + let runner_config = get_entrypoint_runner_config(); + let entrypoint = "__main__.allocate_alias_for_keys_and_replace"; + let implicit_args = [ImplicitArg::Builtin(BuiltinName::range_check)]; + let unique_keys: HashSet = HashSet::from_iter( + keys.iter() + .filter(|key| key >= &&INITIAL_AVAILABLE_ALIAS) + .copied() + .chain([*ALIAS_COUNTER_STORAGE_KEY.key()]), + ); + let expected_explicit_return_values = vec![ + EndpointArg::Value(ValueArg::Single(Felt::ZERO)), // Aliases.len + EndpointArg::Pointer(PointerArg::Array(vec![ // Aliases.ptr + Felt::ZERO; + (unique_keys.len()) * DICT_ACCESS_SIZE + ])), + // Aliases per-key ptr. + EndpointArg::Pointer(PointerArg::Array(vec![Felt::ZERO; keys.len()])), + ]; + let n_keys_arg = EndpointArg::Value(ValueArg::Single(keys.len().into())); + let keys_arg = EndpointArg::Pointer(PointerArg::Array(keys)); + let explicit_args = vec![n_keys_arg, keys_arg]; + let storage_view = initial_storage + .into_iter() + .map(|(key, value)| ((*ALIAS_CONTRACT_ADDRESS, key), value)) + .collect(); + + let state_reader = DictStateReader { storage_view, ..Default::default() }; + let (_, explicit_return_values, _) = run_cairo_0_entry_point( + &runner_config, + ALIASES_TEST_BYTES, + entrypoint, + &explicit_args, + &implicit_args, + &expected_explicit_return_values, + HashMap::new(), + Some(state_reader), + ) + .unwrap(); + if let [ + EndpointArg::Value(ValueArg::Single(n_aliases)), + EndpointArg::Pointer(PointerArg::Array(aliases_storage_updates)), + EndpointArg::Pointer(PointerArg::Array(alias_per_key)), + ] = explicit_return_values.as_slice() + { + let n_aliases = felt_to_usize(n_aliases).unwrap(); + assert_eq!(n_aliases, aliases_storage_updates.len() / DICT_ACCESS_SIZE); + let actual_alias_storage = parse_squashed_cairo_dict(aliases_storage_updates); + (actual_alias_storage, alias_per_key.clone().to_vec()) + } else { + panic!( + "The return value doesn't match the given format.\n Got: {explicit_return_values:?}" + ); + } +} diff --git a/crates/starknet_committer_and_os_cli/src/os_cli/tests/bls_field.rs b/crates/starknet_os/src/tests/bls_field.rs similarity index 67% rename from crates/starknet_committer_and_os_cli/src/os_cli/tests/bls_field.rs rename to crates/starknet_os/src/tests/bls_field.rs index 8a1ba5b45cf..e7fb0a6de11 100644 --- a/crates/starknet_committer_and_os_cli/src/os_cli/tests/bls_field.rs +++ b/crates/starknet_os/src/tests/bls_field.rs @@ -1,62 +1,37 @@ use std::array; use std::collections::HashMap; +use apollo_starknet_os_program::OS_PROGRAM_BYTES; use cairo_vm::types::builtin_name::BuiltinName; -use cairo_vm::types::layout_name::LayoutName; use cairo_vm::types::program::Program; use ethnum::U256; use num_bigint::{BigInt, BigUint, RandBigInt, RandomBits, Sign, ToBigInt}; use num_integer::Integer; use rand::Rng; -use starknet_os::hints::hint_implementation::kzg::utils::{split_bigint3, BASE, BLS_PRIME}; -use starknet_os::test_utils::cairo_runner::{ +use rstest::rstest; +use starknet_types_core::felt::Felt; + +use crate::hints::hint_implementation::kzg::utils::{split_bigint3, BASE, BLS_PRIME}; +use crate::test_utils::cairo_runner::{ run_cairo_0_entry_point, EndpointArg, - EntryPointRunnerConfig, ImplicitArg, PointerArg, ValueArg, }; -use starknet_os::test_utils::errors::OsSpecificTestError; -use starknet_types_core::felt::Felt; -use tracing::info; - -use crate::os_cli::tests::types::OsPythonTestResult; -use crate::os_cli::tests::utils::{ +use crate::test_utils::utils::{ + get_entrypoint_runner_config, pack_bigint3, seeded_random_prng, test_cairo_function, DEFAULT_PRIME, }; -use crate::shared_utils::types::PythonTestError; const REDUCED_MUL_LIMB_BOUND: i128 = 2_i128.pow(104); -pub(crate) fn test_bls_field(input: &str) -> OsPythonTestResult { - info!("Testing `test_bigint3_to_uint256`..."); - test_bigint3_to_uint256(input)?; - info!("Testing `test_felt_to_bigint3`..."); - test_felt_to_bigint3(input)?; - info!("Testing `test_horner_eval`..."); - test_horner_eval(input)?; - info!("Testing `test_reduced_mul_random`..."); - test_reduced_mul_random(input)?; - info!("Testing `test_reduced_mul_parameterized`..."); - test_reduced_mul_parameterized(input)?; - info!("Testing `test_bls_prime_value`..."); - test_bls_prime_value(input)?; - Ok("".to_string()) -} - -fn get_entrypoint_runner_config() -> EntryPointRunnerConfig { - EntryPointRunnerConfig { - layout: LayoutName::small, - add_main_prefix_to_entrypoint: false, - ..Default::default() - } -} +// TODO(Nimrod): Move this next to the BLS hints implementation. -fn run_reduced_mul_test(input: &str, a_split: &[Felt], b_split: &[Felt]) -> OsPythonTestResult { +fn run_reduced_mul_test(a_split: &[Felt], b_split: &[Felt]) { let explicit_args = [ EndpointArg::Value(ValueArg::Array(a_split.to_vec())), EndpointArg::Value(ValueArg::Array(b_split.to_vec())), @@ -70,22 +45,21 @@ fn run_reduced_mul_test(input: &str, a_split: &[Felt], b_split: &[Felt]) -> OsPy let expected_explicit_args = [EndpointArg::Value(ValueArg::Array(expected_result.to_vec()))]; test_cairo_function( &get_entrypoint_runner_config(), - input, + OS_PROGRAM_BYTES, "starkware.starknet.core.os.data_availability.bls_field.reduced_mul", &explicit_args, &implicit_args, &expected_explicit_args, &expected_implicit_args, HashMap::new(), - )?; - Ok("".to_string()) + ); } -fn test_bigint3_to_uint256(input: &str) -> OsPythonTestResult { +#[test] +fn test_bigint3_to_uint256() { let mut rng = seeded_random_prng(); let random_u256_big_uint: BigUint = rng.sample(RandomBits::new(256)); let random_u256_bigint = BigInt::from_biguint(Sign::Plus, random_u256_big_uint); - info!("random 256 bit bigint in `test_bigint3_to_uint256`: {random_u256_bigint}"); let cairo_bigin3 = EndpointArg::Value(ValueArg::Array( split_bigint3(random_u256_bigint.clone()).unwrap().to_vec(), )); @@ -101,19 +75,19 @@ fn test_bigint3_to_uint256(input: &str) -> OsPythonTestResult { let entrypoint_runner_config = get_entrypoint_runner_config(); test_cairo_function( &entrypoint_runner_config, - input, + OS_PROGRAM_BYTES, "starkware.starknet.core.os.data_availability.bls_field.bigint3_to_uint256", &explicit_args, &implicit_args, &expected_explicit_args, &expected_implicit_args, HashMap::new(), - )?; - Ok("".to_string()) + ); } -fn test_felt_to_bigint3(input: &str) -> OsPythonTestResult { - let values: [BigInt; 9] = [ +#[rstest] +fn test_felt_to_bigint3( + #[values( 0.into(), 1.into(), DEFAULT_PRIME.clone() - 1, @@ -122,38 +96,38 @@ fn test_felt_to_bigint3(input: &str) -> OsPythonTestResult { BASE.clone(), BASE.pow(2_u32) - 1, BASE.pow(2_u32), - DEFAULT_PRIME.clone() / 2, - ]; + DEFAULT_PRIME.clone() / 2 + )] + value: BigInt, +) { let entrypoint_runner_config = get_entrypoint_runner_config(); - for value in values { - let explicit_args: [EndpointArg; 1] = [Felt::from(value.clone()).into()]; - let implicit_args = [ImplicitArg::Builtin(BuiltinName::range_check)]; - let split_value = split_bigint3(value.clone()).unwrap(); - let expected_explicit_args = [EndpointArg::Value(ValueArg::Array(split_value.to_vec()))]; - let n_range_checks = if value == DEFAULT_PRIME.clone() - 1 { 0 } else { 6 }; - let expected_implicit_args: [EndpointArg; 1] = [n_range_checks.into()]; + let explicit_args: [EndpointArg; 1] = [Felt::from(value.clone()).into()]; + let implicit_args = [ImplicitArg::Builtin(BuiltinName::range_check)]; - test_cairo_function( - &entrypoint_runner_config, - input, - "starkware.starknet.core.os.data_availability.bls_field.felt_to_bigint3", - &explicit_args, - &implicit_args, - &expected_explicit_args, - &expected_implicit_args, - HashMap::new(), - )?; - } - Ok("".to_string()) + let split_value = split_bigint3(value.clone()).unwrap(); + let expected_explicit_args = [EndpointArg::Value(ValueArg::Array(split_value.to_vec()))]; + let n_range_checks = if value == DEFAULT_PRIME.clone() - 1 { 0 } else { 6 }; + let expected_implicit_args: [EndpointArg; 1] = [n_range_checks.into()]; + + test_cairo_function( + &entrypoint_runner_config, + OS_PROGRAM_BYTES, + "starkware.starknet.core.os.data_availability.bls_field.felt_to_bigint3", + &explicit_args, + &implicit_args, + &expected_explicit_args, + &expected_implicit_args, + HashMap::new(), + ); } -fn test_horner_eval(input: &str) -> OsPythonTestResult { +#[test] +fn test_horner_eval() { let mut rng = seeded_random_prng(); let entrypoint_runner_config = get_entrypoint_runner_config(); for n_coefficients in [0, 100, 4096] { - info!("Testing horner_eval with {n_coefficients} coefficients."); let mut explicit_args: Vec = vec![]; explicit_args.push(n_coefficients.into()); let coefficients: Vec = (0..n_coefficients) @@ -168,18 +142,18 @@ fn test_horner_eval(input: &str) -> OsPythonTestResult { ))); let implicit_args = [ImplicitArg::Builtin(BuiltinName::range_check)]; + let state_reader = None; let (_, explicit_retdata, _) = run_cairo_0_entry_point( &entrypoint_runner_config, - input, + OS_PROGRAM_BYTES, "starkware.starknet.core.os.data_availability.bls_field.horner_eval", &explicit_args, &implicit_args, &[EndpointArg::Value(ValueArg::Array(vec![Felt::ZERO, Felt::ZERO, Felt::ZERO]))], HashMap::new(), + state_reader, ) - .map_err(|error| { - PythonTestError::SpecificError(OsSpecificTestError::Cairo0EntryPointRunner(error)) - })?; + .unwrap(); // Get actual result. assert_eq!( @@ -213,7 +187,6 @@ fn test_horner_eval(input: &str) -> OsPythonTestResult { .mod_floor(&BLS_PRIME.clone()); // Calculate expected result. - info!("Calculating expected result."); let expected_result = coefficients.iter().enumerate().fold(BigUint::ZERO, |acc, (i, coefficient)| { acc + BigUint::from_bytes_be(&coefficient.to_bytes_be()) @@ -226,12 +199,10 @@ fn test_horner_eval(input: &str) -> OsPythonTestResult { Expected result: {expected_result}" ); } - - Ok("".to_string()) } -#[allow(dead_code)] -fn test_reduced_mul_random(input: &str) -> OsPythonTestResult { +#[test] +fn test_reduced_mul_random() { // Generate a,b in (-REDUCED_MUL_LIMB_LIMIT, REDUCED_MUL_LIMB_LIMIT). let mut rng = seeded_random_prng(); let a_split = (0..3) @@ -241,11 +212,11 @@ fn test_reduced_mul_random(input: &str) -> OsPythonTestResult { .map(|_| rng.gen_range(-REDUCED_MUL_LIMB_BOUND + 1..REDUCED_MUL_LIMB_BOUND).into()) .collect::>(); - run_reduced_mul_test(input, &a_split, &b_split) + run_reduced_mul_test(&a_split, &b_split) } -#[allow(dead_code)] -fn test_reduced_mul_parameterized(input: &str) -> OsPythonTestResult { +#[test] +fn test_reduced_mul_parameterized() { let max_value = Felt::from(REDUCED_MUL_LIMB_BOUND - 1); let min_value = Felt::from(-REDUCED_MUL_LIMB_BOUND + 1); let values: [([Felt; 3], [Felt; 3]); 4] = [ @@ -255,16 +226,14 @@ fn test_reduced_mul_parameterized(input: &str) -> OsPythonTestResult { ([Felt::ONE, Felt::from(2), Felt::from(3)], [Felt::ZERO, Felt::ZERO, Felt::ZERO]), ]; for (a_split, b_split) in values { - info!("Testing `reduced_mul` with a = {a_split:?}, b = {b_split:?}"); - run_reduced_mul_test(input, &a_split, &b_split)?; + run_reduced_mul_test(&a_split, &b_split); } - - Ok("".to_string()) } -fn test_bls_prime_value(input: &str) -> OsPythonTestResult { +#[test] +fn test_bls_prime_value() { let entrypoint = None; - let program = Program::from_bytes(input.as_bytes(), entrypoint).unwrap(); + let program = Program::from_bytes(OS_PROGRAM_BYTES, entrypoint).unwrap(); let actual_split_bls_prime: [Felt; 3] = array::from_fn(|i| { *program .constants @@ -277,5 +246,4 @@ fn test_bls_prime_value(input: &str) -> OsPythonTestResult { "Expected BLS prime value to be {expected_split_bls_prime:?}, got \ {actual_split_bls_prime:?}" ); - Ok("".to_string()) } diff --git a/deployments/images/sequencer/Dockerfile b/deployments/images/sequencer/Dockerfile index 666de60d806..8dedf244436 100644 --- a/deployments/images/sequencer/Dockerfile +++ b/deployments/images/sequencer/Dockerfile @@ -51,8 +51,8 @@ COPY --from=builder /app/target/${BUILD_MODE}/apollo_node ./target/${BUILD_MODE} COPY --from=builder /app/target/${BUILD_MODE}/shared_executables/starknet-sierra-compile ./target/${BUILD_MODE}/shared_executables/starknet-sierra-compile COPY --from=builder /usr/bin/tini /usr/bin/tini -# Copy sequencer config -COPY config/sequencer config/sequencer +# Copy apollo config schema, used when it loads its configuration. +COPY crates/apollo_node/resources/config_schema.json crates/apollo_node/resources/config_schema.json # Create a new user "sequencer". RUN set -ex; \ diff --git a/deployments/sequencer/app/service.py b/deployments/sequencer/app/service.py index 6440d9ccd1c..88f1574e2bb 100644 --- a/deployments/sequencer/app/service.py +++ b/deployments/sequencer/app/service.py @@ -366,7 +366,12 @@ def _get_service_anotations(self) -> typing.Dict[str, str]: self.service_topology.k8s_service_config.get("internal") is True and self._get_service_type() == const.K8SServiceType.LOAD_BALANCER ): - annotations.update({"cloud.google.com/load-balancer-type": "Internal"}) + annotations.update( + { + "cloud.google.com/load-balancer-type": "Internal", + "networking.gke.io/internal-load-balancer-allow-global-access": "true", + } + ) if self.service_topology.k8s_service_config.get("external_dns_name"): annotations.update( { diff --git a/scripts/merge_branches.py b/scripts/merge_branches.py index 08c6446af1b..1022fb82653 100755 --- a/scripts/merge_branches.py +++ b/scripts/merge_branches.py @@ -17,7 +17,7 @@ FINAL_BRANCH = "main" MERGE_PATHS_FILE = "scripts/merge_paths.json" -FILES_TO_PRESERVE = {"rust-toolchain.toml", "scripts/parent_branch.txt"} +FILES_TO_PRESERVE = {"scripts/parent_branch.txt"} def load_merge_paths() -> Dict[str, str]: diff --git a/scripts/system_tests/copy_state_and_restart.py b/scripts/system_tests/copy_state_and_restart.py index aaedcda0e22..2e9dfe92294 100644 --- a/scripts/system_tests/copy_state_and_restart.py +++ b/scripts/system_tests/copy_state_and_restart.py @@ -27,7 +27,7 @@ def copy_state(pod_name: str, data_dir: str) -> None: [ "kubectl", "cp", - data_dir, + f"{data_dir}/.", f"{pod_name}:/data", "--retries=3", ] diff --git a/scripts/system_tests/sequencer_simulator.py b/scripts/system_tests/sequencer_simulator.py new file mode 100644 index 00000000000..9e8ca2e5e8e --- /dev/null +++ b/scripts/system_tests/sequencer_simulator.py @@ -0,0 +1,197 @@ +import os +import json +import subprocess +import argparse +from enum import Enum + + +class NodeType(Enum): + DISTRIBUTED = "distributed" + CONSOLIDATED = "consolidated" + HYBRID = "hybrid" + + +# TODO(Nadin): Add support for hybrid nodes. +def get_service_label(node_type: NodeType, service: str) -> str: + if node_type == NodeType.DISTRIBUTED: + return f"sequencer-{service.lower()}" + elif node_type == NodeType.CONSOLIDATED: + return "sequencer-node" + else: + raise ValueError(f"Unknown node type: {node_type}") + + +def get_config_ports(service_name, deployment_config_path, config_dir, key): + with open(deployment_config_path, "r", encoding="utf-8") as f: + deployment_config = json.load(f) + + ports = [] + for service in deployment_config.get("services", []): + if service.get("name") == service_name: + for path in service.get("config_paths", []): + full_path = os.path.join(config_dir, path) + try: + with open(full_path, "r", encoding="utf-8") as cfg_file: + config_data = json.load(cfg_file) + port = config_data.get(key) + print(f"🔍 Found port: {port}") + if port: + ports.append(port) + except Exception: + continue + return ports + + +def get_pod_name(service_label): + cmd = [ + "kubectl", + "get", + "pods", + "-l", + f"service={service_label}", + "-o", + "jsonpath={.items[0].metadata.name}", + ] + return subprocess.run( + cmd, capture_output=True, check=True, text=True + ).stdout.strip() + + +def port_forward(pod_name, local_port, remote_port): + cmd = ["kubectl", "port-forward", pod_name, f"{local_port}:{remote_port}"] + subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + +def run_simulator(http_port, monitoring_port, sender_address, receiver_address): + cmd = [ + "./target/debug/sequencer_simulator", + "--http-port", + str(http_port), + "--monitoring-port", + str(monitoring_port), + "--sender-address", + sender_address, + "--receiver-address", + receiver_address, + ] + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True + ) + with open("sequencer_simulator.log", "w", encoding="utf-8") as log_file: + for line in proc.stdout: + print(line, end="") + log_file.write(line) + return proc.wait() + + +def setup_port_forwarding( + service_name, deployment_config_path, config_dir, config_key, node_type +): + ports = get_config_ports( + service_name, + deployment_config_path, + config_dir, + config_key, + ) + if not ports: + print(f"❌ No port found for {service_name}! Aborting.") + exit(1) + + port = ports[-1] + pod_name = get_pod_name(get_service_label(node_type, service_name)) + print(f"📡 Port-forwarding {pod_name} on local port {port}...") + port_forward(pod_name, port, port) + + return port + + +def main( + deployment_config_path, config_dir, node_type_str, sender_address, receiver_address +): + print("🚀 Running sequencer simulator....") + + try: + node_type = NodeType(node_type_str) + except ValueError: + print(f"❌ Unknown node type: {node_type_str}.") + exit(1) + + if node_type == NodeType.DISTRIBUTED: + state_sync_service = "StateSync" + http_server_service = "HttpServer" + elif node_type == NodeType.CONSOLIDATED: + state_sync_service = "Node" + http_server_service = "Node" + else: + print(f"❌ {node_type} node type is not supported for the sequencer simulator.") + exit(1) + + # Port-forward services + state_sync_port = setup_port_forwarding( + state_sync_service, + deployment_config_path, + config_dir, + "monitoring_endpoint_config.port", + node_type, + ) + + http_server_port = setup_port_forwarding( + http_server_service, + deployment_config_path, + config_dir, + "http_server_config.port", + node_type, + ) + + print( + f"Running the simulator with http port: {http_server_port} and monitoring port: {state_sync_port}" + ) + exit_code = run_simulator( + http_server_port, state_sync_port, sender_address, receiver_address + ) + + if exit_code != 0: + print("❌ Sequencer simulator failed!") + exit(exit_code) + else: + print("✅ Sequencer simulator completed successfully!") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Run the Sequencer Simulator with port forwarding." + ) + parser.add_argument( + "--deployment_config_path", + required=True, + help="Path to the deployment config JSON file.", + ) + parser.add_argument( + "--config_dir", required=True, help="Directory containing service config files." + ) + parser.add_argument( + "--node_type", + choices=[node_type.value for node_type in NodeType], + required=True, + help="Type of node to deploy: 'distributed' or 'consolidated'.", + ) + parser.add_argument( + "--sender_address", + required=True, + help="Ethereum sender address (e.g., 0xabc...).", + ) + parser.add_argument( + "--receiver_address", + required=True, + help="Ethereum receiver address (e.g., 0xdef...).", + ) + + args = parser.parse_args() + + main( + args.deployment_config_path, + args.config_dir, + args.node_type, + args.sender_address, + args.receiver_address, + )