From 90778df2a56abb01a74429cc0fc2ccd8bca5c903 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 19 Sep 2025 12:27:58 -0600 Subject: [PATCH 001/146] Add service constructor --- crates/services/block_aggregator_api/src/lib.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index abe6914c715..3466c41ce7a 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -20,6 +20,17 @@ pub mod result; pub mod block_range_response; +pub mod integration { + use fuel_core_services::ServiceRunner; + use crate::api::protobuf_adapter::ProtobufAPI; + use crate::BlockAggregator; + use crate::blocks::importer_and_db_source::ImporterAndDbSource; + use crate::db::storage_db::StorageDB; + + pub fn new_service() -> ServiceRunner, ImporterAndDbSource<(), (), ()>>> { + todo!() + } +} #[cfg(test)] mod tests; From 48f1ee31b88850c99163bbe536e00ea804137b3a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 23 Sep 2025 13:58:12 -0600 Subject: [PATCH 002/146] Fmt --- crates/services/block_aggregator_api/src/lib.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 3466c41ce7a..1573d34c5cb 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -21,15 +21,19 @@ pub mod result; pub mod block_range_response; pub mod integration { + use crate::{ + BlockAggregator, + api::protobuf_adapter::ProtobufAPI, + blocks::importer_and_db_source::ImporterAndDbSource, + db::storage_db::StorageDB, + }; use fuel_core_services::ServiceRunner; - use crate::api::protobuf_adapter::ProtobufAPI; - use crate::BlockAggregator; - use crate::blocks::importer_and_db_source::ImporterAndDbSource; - use crate::db::storage_db::StorageDB; - pub fn new_service() -> ServiceRunner, ImporterAndDbSource<(), (), ()>>> { + pub fn new_service() -> ServiceRunner< + BlockAggregator, ImporterAndDbSource<(), (), ()>>, + > { todo!() - } + } } #[cfg(test)] mod tests; From b25b3a96b3018ef5989a780c2902d0275dd2f48a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 25 Sep 2025 15:48:12 -0600 Subject: [PATCH 003/146] Add constructor --- .../services/block_aggregator_api/src/lib.rs | 98 +++++++++++++++++-- 1 file changed, 90 insertions(+), 8 deletions(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 62ab40655d6..2c27f2b5fa5 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -7,6 +7,7 @@ use crate::{ db::BlockAggregatorDB, }; use fuel_core_services::{ + RunnableService, RunnableTask, StateWatcher, TaskNextAction, @@ -23,16 +24,73 @@ pub mod block_range_response; pub mod integration { use crate::{ BlockAggregator, - api::protobuf_adapter::ProtobufAPI, - blocks::importer_and_db_source::ImporterAndDbSource, - db::storage_db::StorageDB, + api::{ + BlockAggregatorApi, + protobuf_adapter::ProtobufAPI, + }, + blocks::importer_and_db_source::{ + BlockSerializer, + ImporterAndDbSource, + }, + db::BlockAggregatorDB, }; - use fuel_core_services::ServiceRunner; + use fuel_core_services::{ + ServiceRunner, + stream::BoxStream, + }; + use fuel_core_storage::{ + StorageInspect, + tables::{ + FuelBlocks, + Transactions, + }, + }; + use fuel_core_types::{ + fuel_types::BlockHeight, + services::block_importer::SharedImportResult, + }; + + pub struct Config { + pub addr: String, + } - pub fn new_service() -> ServiceRunner< - BlockAggregator, ImporterAndDbSource<(), (), ()>>, - > { - todo!() + pub fn new_service( + config: &Config, + db: DB, + serializer: S, + onchain_db: OnchainDB, + importer: BoxStream, + ) -> ServiceRunner< + BlockAggregator>, + > + where + DB: BlockAggregatorDB< + BlockRangeResponse = ::BlockRangeResponse, + >, + S: BlockSerializer + Clone + Send + Sync + 'static, + OnchainDB: Send + Sync, + OnchainDB: StorageInspect, + OnchainDB: StorageInspect, + E: std::fmt::Debug + Send + Sync, + { + let url = config.addr.to_string(); + let api = ProtobufAPI::new(url); + let db_starting_height = BlockHeight::from(0); + let db_ending_height = None; + let block_source = ImporterAndDbSource::new( + importer, + serializer, + onchain_db, + db_starting_height, + db_ending_height, + ); + let block_aggregator = BlockAggregator { + query: api, + database: db, + block_source, + new_block_subscriptions: Vec::new(), + }; + ServiceRunner::new(block_aggregator) } } #[cfg(test)] @@ -89,3 +147,27 @@ where Ok(()) } } + +#[async_trait::async_trait] +impl RunnableService for BlockAggregator +where + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, + Blocks: BlockSource, + BlockRange: Send, +{ + const NAME: &'static str = "BlockAggregatorService"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData {} + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} From 4c910b4ebdab6e5e5ea3fe760d4fb116bbba3a70 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 26 Sep 2025 11:17:25 -0600 Subject: [PATCH 004/146] Get compiling --- Cargo.lock | 59 ++++++++++--------- Cargo.toml | 1 + benches/benches/block_target_gas.rs | 1 + crates/fuel-core/Cargo.toml | 1 + crates/fuel-core/src/combined_database.rs | 20 +++++++ crates/fuel-core/src/database.rs | 7 +++ .../src/database/database_description.rs | 2 + .../database_description/block_aggregator.rs | 27 +++++++++ crates/fuel-core/src/service.rs | 1 + crates/fuel-core/src/service/sub_services.rs | 26 +++++++- .../services/block_aggregator_api/Cargo.toml | 15 +++-- .../src/blocks/importer_and_db_source.rs | 2 + .../serializer_adapter.rs | 23 ++++++++ .../block_aggregator_api/src/db/storage_db.rs | 2 +- 14 files changed, 150 insertions(+), 37 deletions(-) create mode 100644 crates/fuel-core/src/database/database_description/block_aggregator.rs create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs diff --git a/Cargo.lock b/Cargo.lock index f5bfed84de2..f55b3d4bfba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1438,35 +1438,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block_aggregator_api" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "bytes", - "enum-iterator", - "fuel-core-services", - "fuel-core-storage", - "fuel-core-types 0.46.0", - "futures", - "num_enum", - "postcard", - "prost 0.14.1", - "rand 0.8.5", - "serde", - "strum 0.25.0", - "strum_macros 0.25.3", - "thiserror 2.0.12", - "tokio", - "tokio-stream", - "tonic 0.14.2", - "tonic-prost", - "tonic-prost-build", - "tracing", - "tracing-subscriber", -] - [[package]] name = "blocking" version = "1.6.1" @@ -3507,6 +3478,35 @@ dependencies = [ "strum 0.24.1", ] +[[package]] +name = "fuel-block-aggregator-api" +version = "0.46.0" +dependencies = [ + "anyhow", + "async-trait", + "bytes", + "enum-iterator", + "fuel-core-services", + "fuel-core-storage", + "fuel-core-types 0.46.0", + "futures", + "num_enum", + "postcard", + "prost 0.14.1", + "rand 0.8.5", + "serde", + "strum 0.25.0", + "strum_macros 0.25.3", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tonic 0.14.2", + "tonic-prost", + "tonic-prost-build", + "tracing", + "tracing-subscriber", +] + [[package]] name = "fuel-compression" version = "0.63.0" @@ -3532,6 +3532,7 @@ dependencies = [ "cosmrs", "derive_more 0.99.20", "enum-iterator", + "fuel-block-aggregator-api", "fuel-core", "fuel-core-chain-config", "fuel-core-compression-service", diff --git a/Cargo.toml b/Cargo.toml index 4eb930cf390..b40fe32ba21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,6 +82,7 @@ educe = { version = "0.6", default-features = false, features = [ enum-iterator = "1.2" enum_dispatch = "0.3.13" # Workspace members +fuel-block-aggregator-api = { version = "0.46.0", path = "crates/services/block_aggregator_api" } fuel-core = { version = "0.46.0", path = "./crates/fuel-core", default-features = false } fuel-core-bin = { version = "0.46.0", path = "./bin/fuel-core" } fuel-core-chain-config = { version = "0.46.0", path = "./crates/chain-config", default-features = false } diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index 43952cbe26e..f4ced344580 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -361,6 +361,7 @@ fn service_with_many_contracts( Default::default(), Default::default(), Default::default(), + Default::default(), ), config.clone(), ) diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 63701daf609..b326af1f558 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -86,6 +86,7 @@ fuel-core-tx-status-manager = { workspace = true } fuel-core-txpool = { workspace = true } fuel-core-types = { workspace = true, features = ["alloc", "serde"] } fuel-core-upgradable-executor = { workspace = true } +fuel-block-aggregator-api = { workspace = true } futures = { workspace = true } hex = { workspace = true } hyper = { workspace = true } diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index 30d4d76e069..0e54b49c379 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -10,6 +10,7 @@ use crate::{ GenesisDatabase, Result as DatabaseResult, database_description::{ + block_aggregator::BlockAggregatorDatabase, compression::CompressionDatabase, gas_price::GasPriceDatabase, off_chain::OffChain, @@ -60,6 +61,7 @@ pub struct CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, + block_aggregation: Database, } impl CombinedDatabase { @@ -69,6 +71,7 @@ impl CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, + block_aggregation: Database, ) -> Self { Self { on_chain, @@ -76,6 +79,7 @@ impl CombinedDatabase { relayer, gas_price, compression, + block_aggregation, } } @@ -240,12 +244,22 @@ impl CombinedDatabase { ..database_config }, )?; + let block_aggregation = Database::open_rocksdb( + path, + state_rewind_policy, + DatabaseConfig { + max_fds, + ..database_config + }, + )?; + Ok(Self { on_chain, off_chain, relayer, gas_price, compression, + block_aggregation, }) } @@ -261,6 +275,7 @@ impl CombinedDatabase { relayer: Default::default(), gas_price: Default::default(), compression: Default::default(), + block_aggregation: Default::default(), }) } @@ -306,6 +321,7 @@ impl CombinedDatabase { Database::in_memory(), Database::in_memory(), Database::in_memory(), + Database::in_memory(), ) } @@ -326,6 +342,10 @@ impl CombinedDatabase { &self.compression } + pub fn block_aggregation(&self) -> &Database { + &self.block_aggregation + } + #[cfg(any(feature = "test-helpers", test))] pub fn on_chain_mut(&mut self) -> &mut Database { &mut self.on_chain diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index a75871ad3aa..0a65e35f60d 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -84,6 +84,7 @@ use crate::state::{ }; use crate::{ database::database_description::{ + block_aggregator::BlockAggregatorDatabase, gas_price::GasPriceDatabase, indexation_availability, }, @@ -441,6 +442,12 @@ impl Modifiable for Database { } } +impl Modifiable for Database { + fn commit_changes(&mut self, _changes: Changes) -> StorageResult<()> { + todo!() + } +} + #[cfg(feature = "relayer")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { diff --git a/crates/fuel-core/src/database/database_description.rs b/crates/fuel-core/src/database/database_description.rs index f7eebb96762..e991c2bc7f1 100644 --- a/crates/fuel-core/src/database/database_description.rs +++ b/crates/fuel-core/src/database/database_description.rs @@ -13,6 +13,8 @@ pub mod off_chain; pub mod on_chain; pub mod relayer; +pub mod block_aggregator; + pub trait DatabaseHeight: PartialEq + Default + Debug + Copy + Send + Sync { fn as_u64(&self) -> u64; diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs new file mode 100644 index 00000000000..42dde184136 --- /dev/null +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -0,0 +1,27 @@ +use crate::database::database_description::DatabaseDescription; +use fuel_block_aggregator_api::db::storage_db::table::Column; +use fuel_core_types::fuel_types::BlockHeight; + +#[derive(Clone, Copy, Debug)] +pub struct BlockAggregatorDatabase; + +impl DatabaseDescription for BlockAggregatorDatabase { + type Column = Column; + type Height = BlockHeight; + + fn version() -> u32 { + 0 + } + + fn name() -> String { + "block_aggregator".to_string() + } + + fn metadata_column() -> Self::Column { + Column::Metadata + } + + fn prefix(_column: &Self::Column) -> Option { + None + } +} diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 15b87a7f88e..f3703a7955f 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -194,6 +194,7 @@ impl FuelService { Default::default(), Default::default(), Default::default(), + Default::default(), ); Self::from_combined_database(combined_database, config).await } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 59cd02a1b81..d9e119e668f 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -2,8 +2,10 @@ use std::sync::Arc; -use tokio::sync::Mutex; - +use fuel_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, + db::storage_db::StorageDB, +}; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, da_source_service::block_committer_costs::{ @@ -14,7 +16,6 @@ use fuel_core_gas_price_service::v1::{ service::SharedData, uninitialized_task::new_gas_price_service_v1, }; - use fuel_core_poa::Trigger; use fuel_core_storage::{ self, @@ -23,6 +24,7 @@ use fuel_core_storage::{ #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; use fuel_core_types::signer::SignMode; +use tokio::sync::Mutex; use fuel_core_compression_service::service::new_service as new_compression_service; @@ -459,6 +461,23 @@ pub fn init_sub_services( chain_name, }; + let block_aggregator_config = fuel_block_aggregator_api::integration::Config { + addr: String::new(), + }; + let db = database.block_aggregation().clone(); + let db_adapter = StorageDB::new(db); + let serializer = SerializerAdapter; + let onchain_db = database.on_chain().clone(); + let importer = importer_adapter.events_shared_result(); + + let block_aggregator_rpc = fuel_block_aggregator_api::integration::new_service( + &block_aggregator_config, + db_adapter, + serializer, + onchain_db, + importer, + ); + let graph_ql = fuel_core_graphql_api::api_service::new_service( *genesis_block.header().height(), graphql_config, @@ -523,6 +542,7 @@ pub fn init_sub_services( services.push(Box::new(graph_ql)); services.push(Box::new(graphql_worker)); services.push(Box::new(tx_status_manager)); + services.push(Box::new(block_aggregator_rpc)); if let Some(compression_service) = compression_service { services.push(Box::new(compression_service)); diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 01c8d9be2e2..2254199e323 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -1,7 +1,14 @@ [package] -name = "block_aggregator_api" -version = "0.1.0" -edition = "2024" +name = "fuel-block-aggregator-api" +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } +description = "Block Aggregator API Service for Fuel Core" +build = "build.rs" [dependencies] anyhow = { workspace = true } @@ -13,6 +20,7 @@ fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } num_enum = { workspace = true } +postcard = { workspace = true } prost = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -32,6 +40,5 @@ tonic-prost-build = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } -postcard = { workspace = true } tokio-stream = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 7e100575bd6..343559a8cf0 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -34,6 +34,8 @@ pub mod sync_service; #[cfg(test)] mod tests; +pub mod serializer_adapter; + pub trait BlockSerializer { fn serialize_block(&self, block: &FuelBlock) -> Result; } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs new file mode 100644 index 00000000000..028c66081bb --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -0,0 +1,23 @@ +use crate::{ + blocks::{ + Block, + importer_and_db_source::BlockSerializer, + }, + result::Error, +}; + +use anyhow::anyhow; +use fuel_core_types::blockchain::block::Block as FuelBlock; +use postcard::to_allocvec; + +#[derive(Clone)] +pub struct SerializerAdapter; + +impl BlockSerializer for SerializerAdapter { + fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { + let bytes_vec = to_allocvec(block).map_err(|e| { + Error::BlockSource(anyhow!("failed to serialize block: {}", e)) + })?; + Ok(crate::blocks::Block::from(bytes_vec)) + } +} diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index ce7b731f790..cac501b2ddf 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -103,7 +103,7 @@ where for<'b> StorageTransaction<&'b mut S>: StorageMutate, S: AtomicView, T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, - StorageTransaction: AtomicView + StorageInspect, + StorageTransaction: StorageInspect, { type BlockRangeResponse = BlockRangeResponse; From 19100e351ee9e91cb622b57d8715a6da9d4e3676 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 26 Sep 2025 12:16:47 -0600 Subject: [PATCH 005/146] Add config, WIP test --- Cargo.lock | 1 + bin/fuel-core/Cargo.toml | 1 + bin/fuel-core/src/cli/run.rs | 9 ++ bin/fuel-core/src/cli/run/rpc.rs | 20 +++ crates/fuel-core/src/service/config.rs | 9 ++ crates/fuel-core/src/service/sub_services.rs | 4 +- .../services/block_aggregator_api/src/lib.rs | 4 +- tests/tests/lib.rs | 3 + tests/tests/rpc.rs | 130 ++++++++++++++++++ 9 files changed, 177 insertions(+), 4 deletions(-) create mode 100644 bin/fuel-core/src/cli/run/rpc.rs create mode 100644 tests/tests/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index f55b3d4bfba..3821848b1f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3646,6 +3646,7 @@ dependencies = [ "const_format", "dirs 4.0.0", "dotenvy", + "fuel-block-aggregator-api", "fuel-core", "fuel-core-chain-config", "fuel-core-metrics", diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index 3025e322f75..4e72094451a 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -54,6 +54,7 @@ clap = { workspace = true, features = ["derive", "env", "string"] } const_format = { version = "0.2", optional = true } dirs = "4.0" dotenvy = { version = "0.15", optional = true } +fuel-block-aggregator-api = { workspace = true } fuel-core = { workspace = true, features = ["wasm-executor"] } fuel-core-chain-config = { workspace = true } fuel-core-metrics = { workspace = true } diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index 81034cd7dc7..9be0f8f95e0 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -107,6 +107,8 @@ use std::num::NonZeroUsize; #[cfg(feature = "p2p")] mod p2p; +mod rpc; + #[cfg(feature = "shared-sequencer")] mod shared_sequencer; @@ -290,6 +292,9 @@ pub struct Command { #[cfg(feature = "p2p")] pub p2p_args: p2p::P2PArgs, + #[clap(flatten)] + pub rpc_args: rpc::RpcArgs, + #[cfg_attr(feature = "p2p", clap(flatten))] #[cfg(feature = "p2p")] pub sync_args: p2p::SyncArgs, @@ -369,6 +374,7 @@ impl Command { relayer_args, #[cfg(feature = "p2p")] p2p_args, + rpc_args, #[cfg(feature = "p2p")] sync_args, #[cfg(feature = "p2p")] @@ -451,6 +457,8 @@ impl Command { .echo_delegation_interval, }; + let rpc_config = rpc_args.into_config(); + let trigger: Trigger = poa_trigger.into(); if trigger != Trigger::Never { @@ -776,6 +784,7 @@ impl Command { status_cache_ttl: status_cache_ttl.into(), metrics: metrics.is_enabled(Module::TxStatusManager), }, + rpc_config, }; Ok(config) } diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs new file mode 100644 index 00000000000..9de8007d4b3 --- /dev/null +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -0,0 +1,20 @@ +use clap::Args; +use std::net; + +#[derive(Debug, Clone, Args)] +pub struct RpcArgs { + #[clap(long = "ip", default_value = "127.0.0.1", value_parser, env)] + pub ip: net::IpAddr, + + /// The port to bind the GraphQL service to. + #[clap(long = "port", default_value = "4000", env)] + pub port: u16, +} + +impl RpcArgs { + pub fn into_config(self) -> fuel_block_aggregator_api::integration::Config { + fuel_block_aggregator_api::integration::Config { + addr: net::SocketAddr::new(self.ip, self.port), + } + } +} diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 55fc610e40d..46a5cf4a27b 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -76,6 +76,7 @@ pub struct Config { pub tx_status_manager: TxStatusManagerConfig, pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, + pub rpc_config: fuel_block_aggregator_api::integration::Config, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] @@ -156,6 +157,13 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); + let rpc_config = fuel_block_aggregator_api::integration::Config { + addr: std::net::SocketAddr::new( + std::net::Ipv4Addr::new(127, 0, 0, 1).into(), + 1, + ), + }; + Self { graphql_config: GraphQLConfig { addr: std::net::SocketAddr::new( @@ -229,6 +237,7 @@ impl Config { time_until_synced: Duration::ZERO, production_timeout: Duration::from_secs(20), memory_pool_size: 4, + rpc_config, } } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index d9e119e668f..6d9eec3613f 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -461,9 +461,7 @@ pub fn init_sub_services( chain_name, }; - let block_aggregator_config = fuel_block_aggregator_api::integration::Config { - addr: String::new(), - }; + let block_aggregator_config = config.rpc_config.clone(); let db = database.block_aggregation().clone(); let db_adapter = StorageDB::new(db); let serializer = SerializerAdapter; diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 2c27f2b5fa5..fd6498d9fb1 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -49,9 +49,11 @@ pub mod integration { fuel_types::BlockHeight, services::block_importer::SharedImportResult, }; + use std::net::SocketAddr; + #[derive(Clone, Debug)] pub struct Config { - pub addr: String, + pub addr: SocketAddr, } pub fn new_service( diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index 462742e5073..4dfb5a1fd9a 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -58,6 +58,9 @@ mod regenesis; mod relayer; #[cfg(not(feature = "only-p2p"))] mod required_fuel_block_height_extension; +#[cfg(not(feature = "only-p2p"))] +mod rpc; + #[cfg(not(feature = "only-p2p"))] mod snapshot; #[cfg(not(feature = "only-p2p"))] diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs new file mode 100644 index 00000000000..ca6c68d5ba4 --- /dev/null +++ b/tests/tests/rpc.rs @@ -0,0 +1,130 @@ +#![allow(non_snake_case)] + +use fuel_core::{ + chain_config::{ + LastBlockConfig, + StateConfig, + }, + database::Database, + service::{ + Config, + FuelService, + }, +}; +use fuel_core_client::client::{ + FuelClient, + pagination::{ + PageDirection, + PaginationRequest, + }, + types::TransactionStatus, +}; +use fuel_core_poa::Trigger; +use fuel_core_storage::{ + StorageAsMut, + tables::{ + FuelBlocks, + SealedBlockConsensus, + }, + transactional::WriteTransaction, + vm_storage::VmStorageRequirements, +}; +use fuel_core_types::{ + blockchain::{ + block::CompressedBlock, + consensus::Consensus, + }, + fuel_tx::*, + secrecy::ExposeSecret, + signer::SignMode, + tai64::Tai64, +}; +use futures::StreamExt; +use itertools::{ + Itertools, + rev, +}; +use rstest::rstest; +use std::{ + ops::Deref, + time::Duration, +}; +use test_helpers::send_graph_ql_query; + +use rand::{ + SeedableRng, + rngs::StdRng, +}; +// // given +// let path = free_local_addr(); +// let mut api = ProtobufAPI::new(path.to_string()); +// tokio::time::sleep(std::time::Duration::from_millis(100)).await; +// +// // call get current height endpoint with client +// let url = format!("http://{}", path); +// let mut client = BlockAggregatorClient::connect(url.to_string()) +// .await +// .expect("could not connect to server"); +// let request = BlockRangeRequest { start: 0, end: 1 }; +// let handle = tokio::spawn(async move { +// tracing::info!("querying with client"); +// client +// .get_block_range(request) +// .await +// .expect("could not get height") +// }); +// +// // when +// tracing::info!("awaiting query"); +// let query = api.await_query().await.unwrap(); +// +// // then +// let block1 = Block::new(Bytes::from(vec![0u8; 100])); +// let block2 = Block::new(Bytes::from(vec![1u8; 100])); +// let list = vec![block1, block2]; +// // return response through query's channel +// if let BlockAggregatorQuery::GetBlockRange { +// first, +// last, +// response, +// } = query +// { +// assert_eq!(first, BlockHeight::new(0)); +// assert_eq!(last, BlockHeight::new(1)); +// tracing::info!("correct query received, sending response"); +// let stream = tokio_stream::iter(list.clone()).boxed(); +// let range = BlockRangeResponse::Literal(stream); +// response.send(range).unwrap(); +// } else { +// panic!("expected GetBlockRange query"); +// } +// tracing::info!("awaiting query"); +// let response = handle.await.unwrap(); +// let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); +// let actual: Vec> = response +// .into_inner() +// .try_collect::>() +// .await +// .unwrap() +// .into_iter() +// .map(|b| b.data.to_vec()) +// .collect(); +// +// assert_eq!(expected, actual); +#[tokio::test] +async fn get_block_range__can_get_serialized_block_from_rpc() { + let config = Config::local_node(); + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let status = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let mut rpc_client = BlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); +} From d1c00310afee2376b5cee6e0787494b79cf526c4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 26 Sep 2025 16:58:48 -0600 Subject: [PATCH 006/146] WIP get first integ test working --- Cargo.lock | 2 + crates/fuel-core/src/service.rs | 11 +- .../service/adapters/consensus_module/poa.rs | 8 +- .../services/block_aggregator_api/Cargo.toml | 1 + .../src/api/protobuf_adapter.rs | 10 +- .../src/block_aggregator.rs | 3 + .../services/block_aggregator_api/src/lib.rs | 6 +- crates/services/importer/src/importer.rs | 9 +- tests/Cargo.toml | 1 + tests/tests/blocks.rs | 2 +- tests/tests/rpc.rs | 101 ++++++++++-------- 11 files changed, 98 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3821848b1f9..038667fe59d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,6 +3490,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-types 0.46.0", "futures", + "log", "num_enum", "postcard", "prost 0.14.1", @@ -4154,6 +4155,7 @@ dependencies = [ "clap", "cynic", "ethers", + "fuel-block-aggregator-api", "fuel-core", "fuel-core-benches", "fuel-core-bin", diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index f3703a7955f..f4470542884 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -145,7 +145,6 @@ impl FuelService { )?; // initialize sub services - tracing::info!("Initializing sub services"); database.sync_aux_db_heights(shutdown_listener)?; let block_production_ready_signal = ReadySignal::new(); @@ -374,6 +373,7 @@ impl FuelService { .await?; self.shared.block_importer.commit_result(result).await?; + tracing::error!("mouse mouse mouse"); } } @@ -393,7 +393,9 @@ impl FuelService { /// Start all sub services and await for them to start. pub async fn start_and_await(&self) -> anyhow::Result { let watcher = self.runner.state_watcher(); + tracing::error!("preparing genesis"); self.prepare_genesis(&watcher).await?; + tracing::info!("starting fuel service"); self.runner.start_and_await().await } @@ -468,18 +470,23 @@ impl RunnableService for Task { watcher: &StateWatcher, params: Self::TaskParams, ) -> anyhow::Result { + tracing::error!("Starting FuelService sub-services"); let mut watcher = watcher.clone(); for service in self.services.iter() { + tracing::error!("Starting FuelService sub-service"); tokio::select! { _ = watcher.wait_stopping_or_stopped() => { + tracing::error!("FuelService stopped"); break; } result = service.start_and_await() => { + tracing::error!("FuelService sub-services result: {:?}", result); result?; } } } + tracing::error!("All FuelService sub-services started"); params.block_production_ready_signal.send_ready_signal(); @@ -575,7 +582,7 @@ mod tests { // - gas price service // - chain info provider #[allow(unused_mut)] - let mut expected_services = 7; + let mut expected_services = 8; // Relayer service is disabled with `Config::local_node`. // #[cfg(feature = "relayer")] diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index 9155b66879c..43c5eb3a279 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -126,10 +126,14 @@ impl BlockImporter for BlockImporterAdapter { &self, result: UncommittedImporterResult, ) -> anyhow::Result<()> { - self.block_importer + tracing::error!("dog dog dog dog dog dog"); + let m = self + .block_importer .commit_result(result) .await - .map_err(Into::into) + .map_err(Into::into); + tracing::error!("moo moo moo moo moo moo moo"); + m } fn block_stream(&self) -> BoxStream { diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 2254199e323..f18e081f00e 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -32,6 +32,7 @@ tokio-stream = { workspace = true } tonic = { workspace = true } tonic-prost = { workspace = true } tracing = { workspace = true } +log = "0.4.27" [build-dependencies] tonic-prost-build = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 585d726af3c..fe5dde9e747 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -148,11 +148,15 @@ impl ProtobufAPI { let server = Server::new(query_sender); let addr = url.parse().unwrap(); let _server_task_handle = tokio::spawn(async move { - tonic::transport::Server::builder() + // TODO: Handle error + let res = tonic::transport::Server::builder() .add_service(block_aggregator_server::BlockAggregatorServer::new(server)) .serve(addr) - .await - .unwrap(); + .await; + match res { + Ok(_) => tracing::error!("ProtobufAPI server stopped"), + Err(e) => tracing::error!("ProtobufAPI server error: {}", e), + } }); Self { _server_task_handle, diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index a271c129b8e..a33dc2f9214 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -35,6 +35,7 @@ where } pub fn stop(&self) -> TaskNextAction { + tracing::error!("stopping"); TaskNextAction::Stop } @@ -43,6 +44,7 @@ where res: crate::result::Result>, ) -> TaskNextAction { tracing::debug!("Handling query: {res:?}"); + tracing::error!("Handling query: {res:?}"); let query = try_or_stop!(res, |e| { tracing::error!("Error receiving query: {e:?}"); }); @@ -109,6 +111,7 @@ where res: crate::result::Result, ) -> TaskNextAction { tracing::debug!("Handling block: {res:?}"); + tracing::error!("Handling block: {res:?}"); let event = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); }); diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index fd6498d9fb1..1c309bc33e8 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -75,8 +75,8 @@ pub mod integration { OnchainDB: StorageInspect, E: std::fmt::Debug + Send + Sync, { - let url = config.addr.to_string(); - let api = ProtobufAPI::new(url); + let addr = config.addr.to_string(); + let api = ProtobufAPI::new(addr); let db_starting_height = BlockHeight::from(0); let db_ending_height = None; let block_source = ImporterAndDbSource::new( @@ -135,6 +135,7 @@ where { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tracing::debug!("BlockAggregator running"); + tracing::error!("BlockAggregator running"); tokio::select! { query_res = self.query.await_query() => self.handle_query(query_res).await, block_res = self.block_source.next_block() => self.handle_block(block_res).await, @@ -170,6 +171,7 @@ where _state_watcher: &StateWatcher, _params: Self::TaskParams, ) -> anyhow::Result { + tracing::error!("BlockAggregator into task"); Ok(self) } } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 7d95c24270c..5a19d4bd331 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -260,13 +260,20 @@ impl Importer { result: CommitInput, ) -> Result<(), Error> { let (sender, receiver) = oneshot::channel(); + let command = Commands::CommitResult { result, permit, callback: sender, }; self.commands.send(command)?; - receiver.await? + tracing::error!("aaaa"); + let res = tokio::time::timeout(Duration::from_secs(5), receiver) + .await + .expect("why this take so long?")?; + tracing::error!("kkkkkkk"); + // receiver.await? + res } #[cfg(test)] diff --git a/tests/Cargo.toml b/tests/Cargo.toml index af95e0a0f46..77a19a2da89 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -37,6 +37,7 @@ aws-sdk-kms = { version = "1.37.0", optional = true } clap = { workspace = true } cynic = { workspace = true } ethers = "2" +fuel-block-aggregator-api = { version = "0.46.0", path = "../crates/services/block_aggregator_api" } fuel-core = { path = "../crates/fuel-core", default-features = false, features = [ "smt", "p2p", diff --git a/tests/tests/blocks.rs b/tests/tests/blocks.rs index d1df25968e3..a818cf4de58 100644 --- a/tests/tests/blocks.rs +++ b/tests/tests/blocks.rs @@ -371,7 +371,7 @@ async fn missing_first_and_last_parameters_returns_an_error() { assert!(result.contains("The queries for the whole range is not supported")); } -mod full_block { +pub mod full_block { use super::*; use cynic::QueryBuilder; use fuel_core_client::client::{ diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index ca6c68d5ba4..f45e8e47e54 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,60 +1,26 @@ #![allow(non_snake_case)] +use crate::blocks::full_block::ClientExt; +use fuel_block_aggregator_api::api::protobuf_adapter::block_aggregator_client::BlockAggregatorClient; use fuel_core::{ - chain_config::{ - LastBlockConfig, - StateConfig, - }, database::Database, service::{ Config, FuelService, }, }; -use fuel_core_client::client::{ - FuelClient, - pagination::{ - PageDirection, - PaginationRequest, - }, - types::TransactionStatus, -}; -use fuel_core_poa::Trigger; -use fuel_core_storage::{ - StorageAsMut, - tables::{ - FuelBlocks, - SealedBlockConsensus, - }, - transactional::WriteTransaction, - vm_storage::VmStorageRequirements, -}; +use fuel_core_client::client::FuelClient; use fuel_core_types::{ - blockchain::{ - block::CompressedBlock, - consensus::Consensus, - }, + blockchain::block::Block, fuel_tx::*, - secrecy::ExposeSecret, - signer::SignMode, - tai64::Tai64, + fuel_types::BlockHeight, }; use futures::StreamExt; -use itertools::{ - Itertools, - rev, -}; -use rstest::rstest; -use std::{ - ops::Deref, - time::Duration, +use std::net::{ + SocketAddr, + TcpListener, }; -use test_helpers::send_graph_ql_query; -use rand::{ - SeedableRng, - rngs::StdRng, -}; // // given // let path = free_local_addr(); // let mut api = ProtobufAPI::new(path.to_string()); @@ -111,20 +77,65 @@ use rand::{ // .collect(); // // assert_eq!(expected, actual); + +fn free_local_addr() -> SocketAddr { + let listener = TcpListener::bind("[::1]:0").unwrap(); + listener.local_addr().unwrap() // OS picks a free port +} + #[tokio::test] async fn get_block_range__can_get_serialized_block_from_rpc() { - let config = Config::local_node(); + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::ERROR) + .try_init(); + let mut config = Config::local_node(); + config.rpc_config.addr = free_local_addr(); + let rpc_url = config.rpc_config.addr.clone(); let srv = FuelService::from_database(Database::default(), config.clone()) .await .unwrap(); + tracing::error!("starting graphql client"); let graphql_client = FuelClient::from(srv.bound_address); let tx = Transaction::default_test_tx(); - let status = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + tracing::error!("submitting transaction to create block"); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); - let mut rpc_client = BlockAggregatorClient::connect(url.to_string()) + let mut rpc_client = BlockAggregatorClient::connect(rpc_url.to_string()) .await .expect("could not connect to server"); + + tracing::error!("fetching expected block from graphql"); + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let header = expected_block.header; + + // when + let request = fuel_block_aggregator_api::api::protobuf_adapter::BlockRangeRequest { + start: 1, + end: 1, + }; + tracing::error!("sending request: {:?}", request); + let actual_bytes = rpc_client + .get_block_range(request.clone()) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .data; + let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); + + // then + assert_eq!( + *actual_block.header().height(), + BlockHeight::from(header.height.0) + ); } From 59e36c45e5d6a4655724fef83837f224b673efe4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 09:23:30 -0600 Subject: [PATCH 007/146] Fix client url --- crates/fuel-core/src/database.rs | 4 +-- crates/fuel-core/src/service/sub_services.rs | 26 +++++++++----------- tests/tests/rpc.rs | 7 +++--- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 0a65e35f60d..e5fa30a9dca 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -443,8 +443,8 @@ impl Modifiable for Database { } impl Modifiable for Database { - fn commit_changes(&mut self, _changes: Changes) -> StorageResult<()> { - todo!() + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) } } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 6d9eec3613f..0c42bc93f03 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -1,11 +1,18 @@ #![allow(clippy::let_unit_value)] -use std::sync::Arc; - +#[cfg(feature = "relayer")] +use crate::relayer::Config as RelayerConfig; +#[cfg(feature = "p2p")] +use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ + key_generator::Ed25519KeyGenerator, + trigger::TimeBasedTrigger, + tx_receiver::PreconfirmationsReceiver, +}; use fuel_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_db::StorageDB, }; +use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, da_source_service::block_committer_costs::{ @@ -24,20 +31,9 @@ use fuel_core_storage::{ #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; use fuel_core_types::signer::SignMode; +use std::sync::Arc; use tokio::sync::Mutex; -use fuel_core_compression_service::service::new_service as new_compression_service; - -#[cfg(feature = "relayer")] -use crate::relayer::Config as RelayerConfig; - -#[cfg(feature = "p2p")] -use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ - key_generator::Ed25519KeyGenerator, - trigger::TimeBasedTrigger, - tx_receiver::PreconfirmationsReceiver, -}; - use super::{ DbType, adapters::{ @@ -467,6 +463,8 @@ pub fn init_sub_services( let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); + // let fake_importer = + // fuel_core_services::stream::IntoBoxStream::into_boxed(tokio_stream::pending()); let block_aggregator_rpc = fuel_block_aggregator_api::integration::new_service( &block_aggregator_config, diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index f45e8e47e54..e7001af9b83 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -83,7 +83,7 @@ fn free_local_addr() -> SocketAddr { listener.local_addr().unwrap() // OS picks a free port } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc() { let _ = tracing_subscriber::fmt() .with_max_level(tracing::Level::ERROR) @@ -96,14 +96,15 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .unwrap(); - tracing::error!("starting graphql client"); + tracing::error!("starting graphql client at {:?}", srv.bound_address); let graphql_client = FuelClient::from(srv.bound_address); let tx = Transaction::default_test_tx(); tracing::error!("submitting transaction to create block"); let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url.to_string()) + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = BlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); From 21bcf1a5b1195d5efb1aef02fd13cb8709692608 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 10:04:49 -0600 Subject: [PATCH 008/146] Cleanup --- crates/fuel-core/src/service.rs | 7 -- .../service/adapters/consensus_module/poa.rs | 2 - crates/fuel-core/src/service/sub_services.rs | 3 - .../src/api/protobuf_adapter.rs | 9 +-- .../src/block_aggregator.rs | 3 - .../importer_and_db_source/sync_service.rs | 2 + .../services/block_aggregator_api/src/lib.rs | 2 - crates/services/importer/src/importer.rs | 2 - tests/tests/rpc.rs | 73 ++----------------- 9 files changed, 12 insertions(+), 91 deletions(-) diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index f4470542884..3c9bcd189db 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -373,7 +373,6 @@ impl FuelService { .await?; self.shared.block_importer.commit_result(result).await?; - tracing::error!("mouse mouse mouse"); } } @@ -393,9 +392,7 @@ impl FuelService { /// Start all sub services and await for them to start. pub async fn start_and_await(&self) -> anyhow::Result { let watcher = self.runner.state_watcher(); - tracing::error!("preparing genesis"); self.prepare_genesis(&watcher).await?; - tracing::info!("starting fuel service"); self.runner.start_and_await().await } @@ -470,18 +467,14 @@ impl RunnableService for Task { watcher: &StateWatcher, params: Self::TaskParams, ) -> anyhow::Result { - tracing::error!("Starting FuelService sub-services"); let mut watcher = watcher.clone(); for service in self.services.iter() { - tracing::error!("Starting FuelService sub-service"); tokio::select! { _ = watcher.wait_stopping_or_stopped() => { - tracing::error!("FuelService stopped"); break; } result = service.start_and_await() => { - tracing::error!("FuelService sub-services result: {:?}", result); result?; } } diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index 43c5eb3a279..e3a56023a7d 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -126,13 +126,11 @@ impl BlockImporter for BlockImporterAdapter { &self, result: UncommittedImporterResult, ) -> anyhow::Result<()> { - tracing::error!("dog dog dog dog dog dog"); let m = self .block_importer .commit_result(result) .await .map_err(Into::into); - tracing::error!("moo moo moo moo moo moo moo"); m } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 0c42bc93f03..28a8ef79c02 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -463,9 +463,6 @@ pub fn init_sub_services( let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); - // let fake_importer = - // fuel_core_services::stream::IntoBoxStream::into_boxed(tokio_stream::pending()); - let block_aggregator_rpc = fuel_block_aggregator_api::integration::new_service( &block_aggregator_config, db_adapter, diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index fe5dde9e747..84ce7656a56 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -149,14 +149,11 @@ impl ProtobufAPI { let addr = url.parse().unwrap(); let _server_task_handle = tokio::spawn(async move { // TODO: Handle error - let res = tonic::transport::Server::builder() + tonic::transport::Server::builder() .add_service(block_aggregator_server::BlockAggregatorServer::new(server)) .serve(addr) - .await; - match res { - Ok(_) => tracing::error!("ProtobufAPI server stopped"), - Err(e) => tracing::error!("ProtobufAPI server error: {}", e), - } + .await + .unwrap() }); Self { _server_task_handle, diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index a33dc2f9214..a271c129b8e 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -35,7 +35,6 @@ where } pub fn stop(&self) -> TaskNextAction { - tracing::error!("stopping"); TaskNextAction::Stop } @@ -44,7 +43,6 @@ where res: crate::result::Result>, ) -> TaskNextAction { tracing::debug!("Handling query: {res:?}"); - tracing::error!("Handling query: {res:?}"); let query = try_or_stop!(res, |e| { tracing::error!("Error receiving query: {e:?}"); }); @@ -111,7 +109,6 @@ where res: crate::result::Result, ) -> TaskNextAction { tracing::debug!("Handling block: {res:?}"); - tracing::error!("Handling block: {res:?}"); let event = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); }); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 683e36b4ba5..83b271f44b9 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -25,6 +25,7 @@ use fuel_core_types::{ }, fuel_types::BlockHeight, }; +use std::time::Duration; use tokio::sync::mpsc::Sender; pub struct SyncTask { @@ -136,6 +137,7 @@ where self.next_height = BlockHeight::from((*next_height).saturating_add(1)); } else { tracing::warn!("no block found at height {:?}, retrying", next_height); + tokio::time::sleep(Duration::from_millis(10)).await; } TaskNextAction::Continue } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 1c309bc33e8..900e1c56087 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -135,7 +135,6 @@ where { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tracing::debug!("BlockAggregator running"); - tracing::error!("BlockAggregator running"); tokio::select! { query_res = self.query.await_query() => self.handle_query(query_res).await, block_res = self.block_source.next_block() => self.handle_block(block_res).await, @@ -171,7 +170,6 @@ where _state_watcher: &StateWatcher, _params: Self::TaskParams, ) -> anyhow::Result { - tracing::error!("BlockAggregator into task"); Ok(self) } } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 5a19d4bd331..904aea0f09d 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -267,11 +267,9 @@ impl Importer { callback: sender, }; self.commands.send(command)?; - tracing::error!("aaaa"); let res = tokio::time::timeout(Duration::from_secs(5), receiver) .await .expect("why this take so long?")?; - tracing::error!("kkkkkkk"); // receiver.await? res } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index e7001af9b83..8d67e360302 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -21,63 +21,6 @@ use std::net::{ TcpListener, }; -// // given -// let path = free_local_addr(); -// let mut api = ProtobufAPI::new(path.to_string()); -// tokio::time::sleep(std::time::Duration::from_millis(100)).await; -// -// // call get current height endpoint with client -// let url = format!("http://{}", path); -// let mut client = BlockAggregatorClient::connect(url.to_string()) -// .await -// .expect("could not connect to server"); -// let request = BlockRangeRequest { start: 0, end: 1 }; -// let handle = tokio::spawn(async move { -// tracing::info!("querying with client"); -// client -// .get_block_range(request) -// .await -// .expect("could not get height") -// }); -// -// // when -// tracing::info!("awaiting query"); -// let query = api.await_query().await.unwrap(); -// -// // then -// let block1 = Block::new(Bytes::from(vec![0u8; 100])); -// let block2 = Block::new(Bytes::from(vec![1u8; 100])); -// let list = vec![block1, block2]; -// // return response through query's channel -// if let BlockAggregatorQuery::GetBlockRange { -// first, -// last, -// response, -// } = query -// { -// assert_eq!(first, BlockHeight::new(0)); -// assert_eq!(last, BlockHeight::new(1)); -// tracing::info!("correct query received, sending response"); -// let stream = tokio_stream::iter(list.clone()).boxed(); -// let range = BlockRangeResponse::Literal(stream); -// response.send(range).unwrap(); -// } else { -// panic!("expected GetBlockRange query"); -// } -// tracing::info!("awaiting query"); -// let response = handle.await.unwrap(); -// let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); -// let actual: Vec> = response -// .into_inner() -// .try_collect::>() -// .await -// .unwrap() -// .into_iter() -// .map(|b| b.data.to_vec()) -// .collect(); -// -// assert_eq!(expected, actual); - fn free_local_addr() -> SocketAddr { let listener = TcpListener::bind("[::1]:0").unwrap(); listener.local_addr().unwrap() // OS picks a free port @@ -85,9 +28,9 @@ fn free_local_addr() -> SocketAddr { #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::ERROR) - .try_init(); + // let _ = tracing_subscriber::fmt() + // .with_max_level(tracing::Level::ERROR) + // .try_init(); let mut config = Config::local_node(); config.rpc_config.addr = free_local_addr(); let rpc_url = config.rpc_config.addr.clone(); @@ -96,11 +39,9 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .unwrap(); - tracing::error!("starting graphql client at {:?}", srv.bound_address); let graphql_client = FuelClient::from(srv.bound_address); let tx = Transaction::default_test_tx(); - tracing::error!("submitting transaction to create block"); let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); @@ -108,7 +49,6 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .expect("could not connect to server"); - tracing::error!("fetching expected block from graphql"); let expected_block = graphql_client .full_block_by_height(1) .await @@ -121,7 +61,6 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { start: 1, end: 1, }; - tracing::error!("sending request: {:?}", request); let actual_bytes = rpc_client .get_block_range(request.clone()) .await @@ -136,7 +75,9 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { // then assert_eq!( - *actual_block.header().height(), - BlockHeight::from(header.height.0) + BlockHeight::from(header.height.0), + *actual_block.header().height() ); + // check txs + assert_eq!(2, actual_block.transactions().len()); } From 8b0a1cb64f818346d01a4e8ff1cdd8398ce25d2d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 10:24:29 -0600 Subject: [PATCH 009/146] Compare txs directly --- tests/tests/rpc.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 8d67e360302..fa38ab0fef3 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -79,5 +79,9 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { *actual_block.header().height() ); // check txs - assert_eq!(2, actual_block.transactions().len()); + let actual_tx = actual_block.transactions().first().unwrap(); + let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); + let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); + + assert_eq!(&expected_tx, actual_tx); } From 71e21639cc2231880dd8d88c7126be9c49f081cb Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 10:31:56 -0600 Subject: [PATCH 010/146] Update CHANGELOG --- .changes/added/3101.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changes/added/3101.md diff --git a/.changes/added/3101.md b/.changes/added/3101.md new file mode 100644 index 00000000000..551e11a2cc9 --- /dev/null +++ b/.changes/added/3101.md @@ -0,0 +1 @@ +Integrate new block aggregation RPC into Fuel Core \ No newline at end of file From 0ce24fee84f5d0e1f480a1743c9b05be342840c4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 10:32:32 -0600 Subject: [PATCH 011/146] Lint toml --- crates/fuel-core/Cargo.toml | 2 +- crates/services/block_aggregator_api/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index b326af1f558..91b9e443f3e 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -64,6 +64,7 @@ clap = { workspace = true, features = ["derive"] } cosmrs = { version = "0.21", optional = true } derive_more = { version = "0.99" } enum-iterator = { workspace = true } +fuel-block-aggregator-api = { workspace = true } fuel-core-chain-config = { workspace = true, features = ["std"] } fuel-core-compression-service = { workspace = true } fuel-core-consensus-module = { workspace = true } @@ -86,7 +87,6 @@ fuel-core-tx-status-manager = { workspace = true } fuel-core-txpool = { workspace = true } fuel-core-types = { workspace = true, features = ["alloc", "serde"] } fuel-core-upgradable-executor = { workspace = true } -fuel-block-aggregator-api = { workspace = true } futures = { workspace = true } hex = { workspace = true } hyper = { workspace = true } diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index f18e081f00e..29acbc9ee4c 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -19,6 +19,7 @@ fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } +log = "0.4.27" num_enum = { workspace = true } postcard = { workspace = true } prost = { workspace = true } @@ -32,7 +33,6 @@ tokio-stream = { workspace = true } tonic = { workspace = true } tonic-prost = { workspace = true } tracing = { workspace = true } -log = "0.4.27" [build-dependencies] tonic-prost-build = { workspace = true } From dd998811bdfe21cc477a95965a11597e936fb52e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 10:43:28 -0600 Subject: [PATCH 012/146] improve comments on cli args --- bin/fuel-core/src/cli/run/rpc.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index 9de8007d4b3..b55053708a2 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -3,10 +3,11 @@ use std::net; #[derive(Debug, Clone, Args)] pub struct RpcArgs { + /// The IP address to bind the RPC service to #[clap(long = "ip", default_value = "127.0.0.1", value_parser, env)] pub ip: net::IpAddr, - /// The port to bind the GraphQL service to. + /// The port to bind the RPC service to #[clap(long = "port", default_value = "4000", env)] pub port: u16, } From 052bbd1e28ce6f7405aa808f2fdec346ad57f0d1 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 17:11:03 -0600 Subject: [PATCH 013/146] Move socket selection to test helpers and use by default --- crates/fuel-core/src/service/config.rs | 15 +++++++++++---- tests/tests/rpc.rs | 15 +-------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 46a5cf4a27b..5a6862efe8b 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -1,5 +1,9 @@ use clap::ValueEnum; use std::{ + net::{ + SocketAddr, + TcpListener, + }, num::{ NonZeroU32, NonZeroU64, @@ -103,6 +107,12 @@ pub struct Config { pub memory_pool_size: usize, } +#[cfg(feature = "test-helpers")] +fn free_local_addr() -> SocketAddr { + let listener = TcpListener::bind("[::1]:0").unwrap(); + listener.local_addr().unwrap() // OS picks a free port +} + impl Config { #[cfg(feature = "test-helpers")] pub fn local_node() -> Self { @@ -158,10 +168,7 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); let rpc_config = fuel_block_aggregator_api::integration::Config { - addr: std::net::SocketAddr::new( - std::net::Ipv4Addr::new(127, 0, 0, 1).into(), - 1, - ), + addr: free_local_addr(), }; Self { diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index fa38ab0fef3..a539c7612d6 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -16,23 +16,10 @@ use fuel_core_types::{ fuel_types::BlockHeight, }; use futures::StreamExt; -use std::net::{ - SocketAddr, - TcpListener, -}; - -fn free_local_addr() -> SocketAddr { - let listener = TcpListener::bind("[::1]:0").unwrap(); - listener.local_addr().unwrap() // OS picks a free port -} #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc() { - // let _ = tracing_subscriber::fmt() - // .with_max_level(tracing::Level::ERROR) - // .try_init(); - let mut config = Config::local_node(); - config.rpc_config.addr = free_local_addr(); + let config = Config::local_node(); let rpc_url = config.rpc_config.addr.clone(); let srv = FuelService::from_database(Database::default(), config.clone()) From ae579d181313bed2a5e228a31041875e1c5d4723 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 17:40:33 -0600 Subject: [PATCH 014/146] Appease Clippy-sama --- .../fuel-core/src/service/adapters/consensus_module/poa.rs | 6 ++---- crates/services/importer/src/importer.rs | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index e3a56023a7d..9155b66879c 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -126,12 +126,10 @@ impl BlockImporter for BlockImporterAdapter { &self, result: UncommittedImporterResult, ) -> anyhow::Result<()> { - let m = self - .block_importer + self.block_importer .commit_result(result) .await - .map_err(Into::into); - m + .map_err(Into::into) } fn block_stream(&self) -> BoxStream { diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 904aea0f09d..d9063188933 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -267,11 +267,9 @@ impl Importer { callback: sender, }; self.commands.send(command)?; - let res = tokio::time::timeout(Duration::from_secs(5), receiver) + tokio::time::timeout(Duration::from_secs(5), receiver) .await - .expect("why this take so long?")?; - // receiver.await? - res + .expect("why this take so long?")? } #[cfg(test)] From d4e2b48d8c2beb109a30aa2d17959a52b930a216 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 29 Sep 2025 23:28:43 -0600 Subject: [PATCH 015/146] Remove unused --- crates/fuel-core/src/service/config.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 5a6862efe8b..184a1a9127c 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -1,9 +1,5 @@ use clap::ValueEnum; use std::{ - net::{ - SocketAddr, - TcpListener, - }, num::{ NonZeroU32, NonZeroU64, From d945fbe1a9171631fd5419d71bd825d290b0ad24 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 30 Sep 2025 11:20:31 -0600 Subject: [PATCH 016/146] Re-add import --- crates/fuel-core/src/service/config.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 184a1a9127c..95d5638d30b 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -1,4 +1,9 @@ use clap::ValueEnum; +#[cfg(feature = "test-helpers")] +use std::net::{ + SocketAddr, + TcpListener, +}; use std::{ num::{ NonZeroU32, From e43cc0e23f02cfa5ef987ce5c001dcd663de9c2f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 30 Sep 2025 11:38:25 -0600 Subject: [PATCH 017/146] Rename trait, add new adapter template --- .../src/block_aggregator.rs | 4 ++-- .../services/block_aggregator_api/src/db.rs | 3 ++- .../src/db/remote_cache.rs | 22 +++++++++++++++++++ .../block_aggregator_api/src/db/storage_db.rs | 4 ++-- .../services/block_aggregator_api/src/lib.rs | 10 ++++----- .../block_aggregator_api/src/tests.rs | 2 +- 6 files changed, 34 insertions(+), 11 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/db/remote_cache.rs diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index a271c129b8e..01777f5470b 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -9,7 +9,7 @@ use crate::{ BlockSource, BlockSourceEvent, }, - db::BlockAggregatorDB, + db::BlockStorage, }; use fuel_core_services::{ TaskNextAction, @@ -20,7 +20,7 @@ use fuel_core_types::fuel_types::BlockHeight; impl BlockAggregator where Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + DB: BlockStorage, Blocks: BlockSource, BlockRangeResponse: Send, { diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 13a0bcc8489..933440f7e94 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -4,10 +4,11 @@ use crate::{ }; use fuel_core_types::fuel_types::BlockHeight; +pub mod remote_cache; pub mod storage_db; /// The definition of the block aggregator database. -pub trait BlockAggregatorDB: Send + Sync { +pub trait BlockStorage: Send + Sync { /// The type used to report a range of blocks type BlockRangeResponse; diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs new file mode 100644 index 00000000000..ae51dac2d37 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -0,0 +1,22 @@ +use fuel_core_types::fuel_types::BlockHeight; +use crate::block_range_response::BlockRangeResponse; +use crate::blocks::Block; +use crate::db::BlockStorage; + +pub struct RemoteCache; + +impl BlockStorage for RemoteCache { + type BlockRangeResponse = BlockRangeResponse; + + fn store_block(&mut self, height: BlockHeight, block: Block) -> impl Future> + Send { + todo!() + } + + fn get_block_range(&self, first: BlockHeight, last: BlockHeight) -> impl Future> + Send { + todo!() + } + + fn get_current_height(&self) -> impl Future> + Send { + todo!() + } +} \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index cac501b2ddf..3114401474e 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -2,7 +2,7 @@ use crate::{ block_range_response::BlockRangeResponse, blocks::Block, db::{ - BlockAggregatorDB, + BlockStorage, storage_db::table::Column, }, result::{ @@ -96,7 +96,7 @@ impl StorageDB { } } -impl BlockAggregatorDB for StorageDB +impl BlockStorage for StorageDB where S: Modifiable + std::fmt::Debug, S: KeyValueInspect, diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 900e1c56087..c31c142aed3 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -4,7 +4,7 @@ use crate::{ Block, BlockSource, }, - db::BlockAggregatorDB, + db::BlockStorage, }; use fuel_core_services::{ RunnableService, @@ -32,7 +32,7 @@ pub mod integration { BlockSerializer, ImporterAndDbSource, }, - db::BlockAggregatorDB, + db::BlockStorage, }; use fuel_core_services::{ ServiceRunner, @@ -66,7 +66,7 @@ pub mod integration { BlockAggregator>, > where - DB: BlockAggregatorDB< + DB: BlockStorage< BlockRangeResponse = ::BlockRangeResponse, >, S: BlockSerializer + Clone + Send + Sync + 'static, @@ -129,7 +129,7 @@ impl NewBlock { impl RunnableTask for BlockAggregator where Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + DB: BlockStorage, Blocks: BlockSource, BlockRange: Send, { @@ -154,7 +154,7 @@ where impl RunnableService for BlockAggregator where Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + DB: BlockStorage, Blocks: BlockSource, BlockRange: Send, { diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index ac069687760..8c29f1967ab 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -74,7 +74,7 @@ impl FakeDB { } } -impl BlockAggregatorDB for FakeDB { +impl BlockStorage for FakeDB { type BlockRangeResponse = BlockRangeResponse; async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { From a5e11f3c4016c2c2ca316bc0c2dd7e59b137e8fb Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 30 Sep 2025 11:40:28 -0600 Subject: [PATCH 018/146] fmt --- .../src/db/remote_cache.rs | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index ae51dac2d37..9327d16f02c 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -1,22 +1,35 @@ +use crate::{ + block_range_response::BlockRangeResponse, + blocks::Block, + db::BlockStorage, +}; use fuel_core_types::fuel_types::BlockHeight; -use crate::block_range_response::BlockRangeResponse; -use crate::blocks::Block; -use crate::db::BlockStorage; pub struct RemoteCache; impl BlockStorage for RemoteCache { type BlockRangeResponse = BlockRangeResponse; - fn store_block(&mut self, height: BlockHeight, block: Block) -> impl Future> + Send { + fn store_block( + &mut self, + height: BlockHeight, + block: Block, + ) -> impl Future> + Send { todo!() } - fn get_block_range(&self, first: BlockHeight, last: BlockHeight) -> impl Future> + Send { + fn get_block_range( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> impl Future> + Send + { todo!() } - fn get_current_height(&self) -> impl Future> + Send { + fn get_current_height( + &self, + ) -> impl Future> + Send { todo!() } -} \ No newline at end of file +} From d7e75e169cd71967d1a278a9de8d38672e7d2516 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 30 Sep 2025 11:49:44 -0600 Subject: [PATCH 019/146] Make clap args unique --- bin/fuel-core/src/cli/run/rpc.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index b55053708a2..f54297ba0cc 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -4,18 +4,18 @@ use std::net; #[derive(Debug, Clone, Args)] pub struct RpcArgs { /// The IP address to bind the RPC service to - #[clap(long = "ip", default_value = "127.0.0.1", value_parser, env)] - pub ip: net::IpAddr, + #[clap(long = "rpc_ip", default_value = "127.0.0.1", value_parser, env)] + pub rpc_ip: net::IpAddr, /// The port to bind the RPC service to - #[clap(long = "port", default_value = "4000", env)] - pub port: u16, + #[clap(long = "rpc_port", default_value = "4001", env)] + pub rpc_port: u16, } impl RpcArgs { pub fn into_config(self) -> fuel_block_aggregator_api::integration::Config { fuel_block_aggregator_api::integration::Config { - addr: net::SocketAddr::new(self.ip, self.port), + addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), } } } From 91c0e3961b6b544608253afb328103967f9d135b Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 30 Sep 2025 11:58:35 -0600 Subject: [PATCH 020/146] Include protoc installation in CI --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ba34a257092..256a9942a5d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -239,6 +239,7 @@ jobs: - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} + - uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: From 45389178f87a6e8d1a88e64590cfd0a9cad8ca79 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 09:39:14 -0600 Subject: [PATCH 021/146] WIP debugging failing test --- Cargo.lock | 3 +++ bin/e2e-test-client/Cargo.toml | 2 ++ bin/e2e-test-client/src/test_context.rs | 10 ++++++++++ bin/e2e-test-client/tests/integration_tests.rs | 11 +++++++++-- tests/test-helpers/Cargo.toml | 1 + 5 files changed, 25 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b7547de90a..298fa0f8ffd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3840,6 +3840,8 @@ dependencies = [ "tikv-jemallocator", "tokio", "toml 0.5.11", + "tracing", + "tracing-subscriber", ] [[package]] @@ -10067,6 +10069,7 @@ dependencies = [ "serde_json", "tempfile", "tokio", + "tracing", ] [[package]] diff --git a/bin/e2e-test-client/Cargo.toml b/bin/e2e-test-client/Cargo.toml index 877c941c7c0..a55d90460c3 100644 --- a/bin/e2e-test-client/Cargo.toml +++ b/bin/e2e-test-client/Cargo.toml @@ -32,6 +32,8 @@ test-helpers = { path = "../../tests/test-helpers" } tikv-jemallocator = { workspace = true } tokio = { workspace = true } toml = { version = "0.5" } +tracing-subscriber = "0.3.20" +tracing = "0.1.41" [dev-dependencies] assert_cmd = "2.0" diff --git a/bin/e2e-test-client/src/test_context.rs b/bin/e2e-test-client/src/test_context.rs index 24c5a879f09..6ca2e444a6e 100644 --- a/bin/e2e-test-client/src/test_context.rs +++ b/bin/e2e-test-client/src/test_context.rs @@ -62,7 +62,17 @@ pub struct TestContext { impl TestContext { pub async fn new(config: SuiteConfig) -> Self { + tracing::error!( + "Alice Client Endpoint: {:?} or {:?}", + config.endpoint.clone(), + config.wallet_a.endpoint.clone(), + ); let alice_client = Self::new_client(config.endpoint.clone(), &config.wallet_a); + tracing::error!( + "Bob Client Endpoint: {:?} or {:?}", + config.endpoint.clone(), + config.wallet_b.endpoint.clone(), + ); let bob_client = Self::new_client(config.endpoint.clone(), &config.wallet_b); Self { alice: Wallet::new(config.wallet_a.secret, alice_client).await, diff --git a/bin/e2e-test-client/tests/integration_tests.rs b/bin/e2e-test-client/tests/integration_tests.rs index 972d4c5da65..b4edcb55117 100644 --- a/bin/e2e-test-client/tests/integration_tests.rs +++ b/bin/e2e-test-client/tests/integration_tests.rs @@ -50,6 +50,10 @@ async fn works_in_multinode_local_env() { fuel_tx::Input, }; + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::ERROR) + .try_init(); + let config = dev_config(); let mut rng = StdRng::seed_from_u64(line!() as u64); let secret = SecretKey::random(&mut rng); @@ -78,8 +82,11 @@ async fn works_in_multinode_local_env() { ..Default::default() }; - config.wallet_a.endpoint = Some(producer.node.bound_address.to_string()); - config.wallet_b.endpoint = Some(validator.node.bound_address.to_string()); + let producer_bound_addr = producer.node.bound_address.to_string(); + let validator_bound_addr = validator.node.bound_address.to_string(); + + config.wallet_a.endpoint = Some(producer_bound_addr); + config.wallet_b.endpoint = Some(validator_bound_addr); // save config file let config = save_config_file(config); diff --git a/tests/test-helpers/Cargo.toml b/tests/test-helpers/Cargo.toml index 41d270c7691..a6b204e6bb3 100644 --- a/tests/test-helpers/Cargo.toml +++ b/tests/test-helpers/Cargo.toml @@ -38,3 +38,4 @@ reqwest = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tempfile = { workspace = true } +tracing = "0.1.41" From 9d9d54b9afcf63c71d0010e8b029c4d5c568760a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 10:52:16 -0600 Subject: [PATCH 022/146] Fix integ tests by using different rpc addrs --- bin/e2e-test-client/tests/integration_tests.rs | 3 ++- crates/fuel-core/src/p2p_test_helpers.rs | 7 +++++-- crates/fuel-core/src/service/config.rs | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/bin/e2e-test-client/tests/integration_tests.rs b/bin/e2e-test-client/tests/integration_tests.rs index b4edcb55117..ab3fccb5e4b 100644 --- a/bin/e2e-test-client/tests/integration_tests.rs +++ b/bin/e2e-test-client/tests/integration_tests.rs @@ -85,8 +85,9 @@ async fn works_in_multinode_local_env() { let producer_bound_addr = producer.node.bound_address.to_string(); let validator_bound_addr = validator.node.bound_address.to_string(); - config.wallet_a.endpoint = Some(producer_bound_addr); + config.wallet_a.endpoint = Some(producer_bound_addr.clone()); config.wallet_b.endpoint = Some(validator_bound_addr); + // config.wallet_b.endpoint = Some(producer_bound_addr); // save config file let config = save_config_file(config); diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index d1868ba14db..fc5e1c05a58 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -15,6 +15,7 @@ use crate::{ service::{ Config, FuelService, + config::free_local_addr, }, }; use fuel_core_chain_config::{ @@ -357,7 +358,8 @@ pub async fn make_nodes( let mut producers = Vec::with_capacity(producers_with_txs.len()); for (i, s) in producers_with_txs.into_iter().enumerate() { - let config = config.clone(); + let mut config = config.clone(); + config.rpc_config.addr = free_local_addr(); let name = s.as_ref().map_or(String::new(), |s| s.0.name.clone()); let overrides = s .clone() @@ -422,7 +424,8 @@ pub async fn make_nodes( let mut validators = vec![]; for (i, s) in validators_setup.into_iter().enumerate() { - let config = config.clone(); + let mut config = config.clone(); + config.rpc_config.addr = free_local_addr(); let name = s.as_ref().map_or(String::new(), |s| s.name.clone()); let overrides = s .clone() diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 95d5638d30b..dbe45e353e7 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -109,7 +109,7 @@ pub struct Config { } #[cfg(feature = "test-helpers")] -fn free_local_addr() -> SocketAddr { +pub fn free_local_addr() -> SocketAddr { let listener = TcpListener::bind("[::1]:0").unwrap(); listener.local_addr().unwrap() // OS picks a free port } From 7c5e52b7da53551ace6699da243053bbced46a22 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 10:53:33 -0600 Subject: [PATCH 023/146] Remove traces --- bin/e2e-test-client/src/test_context.rs | 10 ---------- crates/fuel-core/src/service.rs | 1 - 2 files changed, 11 deletions(-) diff --git a/bin/e2e-test-client/src/test_context.rs b/bin/e2e-test-client/src/test_context.rs index 6ca2e444a6e..24c5a879f09 100644 --- a/bin/e2e-test-client/src/test_context.rs +++ b/bin/e2e-test-client/src/test_context.rs @@ -62,17 +62,7 @@ pub struct TestContext { impl TestContext { pub async fn new(config: SuiteConfig) -> Self { - tracing::error!( - "Alice Client Endpoint: {:?} or {:?}", - config.endpoint.clone(), - config.wallet_a.endpoint.clone(), - ); let alice_client = Self::new_client(config.endpoint.clone(), &config.wallet_a); - tracing::error!( - "Bob Client Endpoint: {:?} or {:?}", - config.endpoint.clone(), - config.wallet_b.endpoint.clone(), - ); let bob_client = Self::new_client(config.endpoint.clone(), &config.wallet_b); Self { alice: Wallet::new(config.wallet_a.secret, alice_client).await, diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 3c9bcd189db..541657d7c30 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -479,7 +479,6 @@ impl RunnableService for Task { } } } - tracing::error!("All FuelService sub-services started"); params.block_production_ready_signal.send_ready_signal(); From 05516e356d92cf16b42a6854fb7cf1a92e68d1f2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 11:03:29 -0600 Subject: [PATCH 024/146] Remove tracing from deps --- Cargo.lock | 3 --- bin/e2e-test-client/Cargo.toml | 2 -- bin/e2e-test-client/tests/integration_tests.rs | 5 ----- tests/test-helpers/Cargo.toml | 1 - 4 files changed, 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 298fa0f8ffd..6b7547de90a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3840,8 +3840,6 @@ dependencies = [ "tikv-jemallocator", "tokio", "toml 0.5.11", - "tracing", - "tracing-subscriber", ] [[package]] @@ -10069,7 +10067,6 @@ dependencies = [ "serde_json", "tempfile", "tokio", - "tracing", ] [[package]] diff --git a/bin/e2e-test-client/Cargo.toml b/bin/e2e-test-client/Cargo.toml index a55d90460c3..877c941c7c0 100644 --- a/bin/e2e-test-client/Cargo.toml +++ b/bin/e2e-test-client/Cargo.toml @@ -32,8 +32,6 @@ test-helpers = { path = "../../tests/test-helpers" } tikv-jemallocator = { workspace = true } tokio = { workspace = true } toml = { version = "0.5" } -tracing-subscriber = "0.3.20" -tracing = "0.1.41" [dev-dependencies] assert_cmd = "2.0" diff --git a/bin/e2e-test-client/tests/integration_tests.rs b/bin/e2e-test-client/tests/integration_tests.rs index ab3fccb5e4b..d150a490b19 100644 --- a/bin/e2e-test-client/tests/integration_tests.rs +++ b/bin/e2e-test-client/tests/integration_tests.rs @@ -50,10 +50,6 @@ async fn works_in_multinode_local_env() { fuel_tx::Input, }; - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::ERROR) - .try_init(); - let config = dev_config(); let mut rng = StdRng::seed_from_u64(line!() as u64); let secret = SecretKey::random(&mut rng); @@ -87,7 +83,6 @@ async fn works_in_multinode_local_env() { config.wallet_a.endpoint = Some(producer_bound_addr.clone()); config.wallet_b.endpoint = Some(validator_bound_addr); - // config.wallet_b.endpoint = Some(producer_bound_addr); // save config file let config = save_config_file(config); diff --git a/tests/test-helpers/Cargo.toml b/tests/test-helpers/Cargo.toml index a6b204e6bb3..41d270c7691 100644 --- a/tests/test-helpers/Cargo.toml +++ b/tests/test-helpers/Cargo.toml @@ -38,4 +38,3 @@ reqwest = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tempfile = { workspace = true } -tracing = "0.1.41" From ef3671a44e0eb5396e7626f2998ab8e84aa4015f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 15:28:51 -0600 Subject: [PATCH 025/146] Use new proto type in integ test --- tests/tests/rpc.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index a539c7612d6..bc687653802 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,7 +1,10 @@ #![allow(non_snake_case)] use crate::blocks::full_block::ClientExt; -use fuel_block_aggregator_api::api::protobuf_adapter::block_aggregator_client::BlockAggregatorClient; +use fuel_block_aggregator_api::api::protobuf_adapter::{ + block_aggregator_client::BlockAggregatorClient, + block_response::Payload, +}; use fuel_core::{ database::Database, service::{ @@ -48,7 +51,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { start: 1, end: 1, }; - let actual_bytes = rpc_client + let actual_bytes = if let Some(Payload::Literal(block)) = rpc_client .get_block_range(request.clone()) .await .unwrap() @@ -57,7 +60,12 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .unwrap() .unwrap() - .data; + .payload + { + block.data + } else { + panic!("expected literal block payload"); + }; let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); // then From 8f2cda17e75d8739eb1c884ba9147ba92606741c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 16:05:22 -0600 Subject: [PATCH 026/146] add integ tests for other endpoints --- tests/tests/rpc.rs | 92 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index bc687653802..4248e767b2b 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -80,3 +80,95 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { assert_eq!(&expected_tx, actual_tx); } + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_height__can_get_value_from_rpc() { + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr.clone(); + + // given + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + let request = fuel_block_aggregator_api::api::protobuf_adapter::BlockHeightRequest {}; + let expected_height = 1; + let actual_height = rpc_client + .get_block_height(request) + .await + .unwrap() + .into_inner() + .height; + + // then + assert_eq!(expected_height, actual_height); +} + +#[tokio::test(flavor = "multi_thread")] +async fn new_block_subscription__can_get_expect_block() { + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr.clone(); + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let request = + fuel_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; + let mut stream = rpc_client + .new_block_subscription(request.clone()) + .await + .unwrap() + .into_inner(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + // when + let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) + .await + .unwrap(); + let actual_bytes = + if let Some(Payload::Literal(block)) = next.unwrap().unwrap().payload { + block.data + } else { + panic!("expected literal block payload"); + }; + + // then + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let header = expected_block.header; + let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); + assert_eq!( + BlockHeight::from(header.height.0), + *actual_block.header().height() + ); + // check txs + let actual_tx = actual_block.transactions().first().unwrap(); + let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); + let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); + + assert_eq!(&expected_tx, actual_tx); +} From 910761ceb5b0d8ea63576d39a5beeff46fbd0c3a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 10:09:43 -0600 Subject: [PATCH 027/146] Hide rpc behind feature --- bin/fuel-core/Cargo.toml | 1 + bin/fuel-core/src/cli/run.rs | 1 + crates/fuel-core/Cargo.toml | 1 + crates/fuel-core/src/p2p_test_helpers.rs | 15 ++++++---- crates/fuel-core/src/service/config.rs | 3 ++ crates/fuel-core/src/service/sub_services.rs | 31 ++++++++++++-------- tests/Cargo.toml | 1 + tests/tests/lib.rs | 1 + 8 files changed, 36 insertions(+), 18 deletions(-) diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index 4e72094451a..a62b009e408 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -26,6 +26,7 @@ relayer = ["fuel-core/relayer", "dep:url"] parquet = ["fuel-core-chain-config/parquet", "fuel-core-types/serde"] rocksdb = ["fuel-core/rocksdb"] rocksdb-production = ["fuel-core/rocksdb-production", "rocksdb"] +rpc = ["fuel-core/rpc"] # features to enable in production, but increase build times production = [ "env", diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index 9be0f8f95e0..dbd8eb5d9cf 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -784,6 +784,7 @@ impl Command { status_cache_ttl: status_cache_ttl.into(), metrics: metrics.is_enabled(Module::TxStatusManager), }, + #[cfg(feature = "rpc")] rpc_config, }; Ok(config) diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 91b9e443f3e..cda3c499b49 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -21,6 +21,7 @@ smt = [ ] p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"] relayer = ["dep:fuel-core-relayer"] +rpc = [] shared-sequencer = ["dep:fuel-core-shared-sequencer", "dep:cosmrs"] rocksdb = ["dep:rocksdb", "dep:tempfile", "dep:num_cpus"] backup = ["rocksdb", "fuel-core-database/backup"] diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index fc5e1c05a58..c093b128d85 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -1,5 +1,7 @@ //! # Helpers for creating networks of nodes +#[cfg(feature = "rpc")] +use crate::service::config::free_local_addr; use crate::{ chain_config::{ CoinConfig, @@ -15,7 +17,6 @@ use crate::{ service::{ Config, FuelService, - config::free_local_addr, }, }; use fuel_core_chain_config::{ @@ -358,8 +359,10 @@ pub async fn make_nodes( let mut producers = Vec::with_capacity(producers_with_txs.len()); for (i, s) in producers_with_txs.into_iter().enumerate() { - let mut config = config.clone(); - config.rpc_config.addr = free_local_addr(); + #[cfg(feature = "rpc")] + { + config.rpc_config.addr = free_local_addr(); + } let name = s.as_ref().map_or(String::new(), |s| s.0.name.clone()); let overrides = s .clone() @@ -424,8 +427,10 @@ pub async fn make_nodes( let mut validators = vec![]; for (i, s) in validators_setup.into_iter().enumerate() { - let mut config = config.clone(); - config.rpc_config.addr = free_local_addr(); + #[cfg(feature = "rpc")] + { + config.rpc_config.addr = crate::service::config::free_local_addr(); + } let name = s.as_ref().map_or(String::new(), |s| s.name.clone()); let overrides = s .clone() diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index dbe45e353e7..c310e6cf2c7 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -81,6 +81,7 @@ pub struct Config { pub tx_status_manager: TxStatusManagerConfig, pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, + #[cfg(feature = "rpc")] pub rpc_config: fuel_block_aggregator_api::integration::Config, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, @@ -168,6 +169,7 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); + #[cfg(feature = "rpc")] let rpc_config = fuel_block_aggregator_api::integration::Config { addr: free_local_addr(), }; @@ -245,6 +247,7 @@ impl Config { time_until_synced: Duration::ZERO, production_timeout: Duration::from_secs(20), memory_pool_size: 4, + #[cfg(feature = "rpc")] rpc_config, } } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 28a8ef79c02..d7f5d3863d4 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -8,6 +8,7 @@ use crate::service::adapters::consensus_module::poa::pre_confirmation_signature: trigger::TimeBasedTrigger, tx_receiver::PreconfirmationsReceiver, }; +#[cfg(feature = "rpc")] use fuel_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_db::StorageDB, @@ -457,19 +458,22 @@ pub fn init_sub_services( chain_name, }; - let block_aggregator_config = config.rpc_config.clone(); - let db = database.block_aggregation().clone(); - let db_adapter = StorageDB::new(db); - let serializer = SerializerAdapter; - let onchain_db = database.on_chain().clone(); - let importer = importer_adapter.events_shared_result(); - let block_aggregator_rpc = fuel_block_aggregator_api::integration::new_service( - &block_aggregator_config, - db_adapter, - serializer, - onchain_db, - importer, - ); + #[cfg(feature = "rpc")] + let block_aggregator_rpc = { + let block_aggregator_config = config.rpc_config.clone(); + let db = database.block_aggregation().clone(); + let db_adapter = StorageDB::new(db); + let serializer = SerializerAdapter; + let onchain_db = database.on_chain().clone(); + let importer = importer_adapter.events_shared_result(); + fuel_block_aggregator_api::integration::new_service( + &block_aggregator_config, + db_adapter, + serializer, + onchain_db, + importer, + ) + }; let graph_ql = fuel_core_graphql_api::api_service::new_service( *genesis_block.header().height(), @@ -535,6 +539,7 @@ pub fn init_sub_services( services.push(Box::new(graph_ql)); services.push(Box::new(graphql_worker)); services.push(Box::new(tx_status_manager)); + #[cfg(feature = "rpc")] services.push(Box::new(block_aggregator_rpc)); if let Some(compression_service) = compression_service { diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 77a19a2da89..ccae4eaba2e 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -26,6 +26,7 @@ fault-proving = [ "fuel-core-compression-service/fault-proving", "fuel-core-benches/fault-proving", ] +rpc = ["fuel-core/rpc", "fuel-core-bin/rpc"] [dependencies] anyhow = { workspace = true } diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index 4dfb5a1fd9a..c4683ada2b3 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -59,6 +59,7 @@ mod relayer; #[cfg(not(feature = "only-p2p"))] mod required_fuel_block_height_extension; #[cfg(not(feature = "only-p2p"))] +#[cfg(feature = "rpc")] mod rpc; #[cfg(not(feature = "only-p2p"))] From 154abca9d30952ce981629b63eb9f771c6eaf8e1 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 10:16:03 -0600 Subject: [PATCH 028/146] Fix test --- crates/fuel-core/src/service.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 541657d7c30..471ead07ff4 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -574,6 +574,9 @@ mod tests { // - gas price service // - chain info provider #[allow(unused_mut)] + #[cfg(not(feature = "rpc"))] + let mut expected_services = 7; + #[cfg(feature = "rpc")] let mut expected_services = 8; // Relayer service is disabled with `Config::local_node`. From 7b482ffd8f56680074af3f6e4a658317d65ae520 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 11:38:25 -0600 Subject: [PATCH 029/146] Refactor test helpers --- Cargo.lock | 3 + tests/test-helpers/Cargo.toml | 4 + tests/test-helpers/src/client_ext.rs | 71 ++++++++ tests/test-helpers/src/lib.rs | 2 + tests/tests/blocks.rs | 255 ++++++++++----------------- tests/tests/lib.rs | 1 - tests/tests/rpc.rs | 2 +- 7 files changed, 175 insertions(+), 163 deletions(-) create mode 100644 tests/test-helpers/src/client_ext.rs diff --git a/Cargo.lock b/Cargo.lock index 6b7547de90a..85b01e3a477 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10049,10 +10049,13 @@ name = "test-helpers" version = "0.0.0" dependencies = [ "anyhow", + "async-trait", "clap", + "cynic", "fuel-core", "fuel-core-bin", "fuel-core-client", + "fuel-core-executor", "fuel-core-p2p", "fuel-core-poa", "fuel-core-relayer", diff --git a/tests/test-helpers/Cargo.toml b/tests/test-helpers/Cargo.toml index 41d270c7691..cb0f644d839 100644 --- a/tests/test-helpers/Cargo.toml +++ b/tests/test-helpers/Cargo.toml @@ -11,6 +11,7 @@ publish = false [dependencies] anyhow = { workspace = true } clap = { workspace = true } +cynic = { workspace = true } fuel-core = { path = "../../crates/fuel-core", default-features = false, features = [ "test-helpers", ] } @@ -27,6 +28,8 @@ fuel-core-storage = { path = "../../crates/storage", features = [ "test-helpers", ] } fuel-core-trace = { path = "../../crates/trace" } + +fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } fuel-core-txpool = { path = "../../crates/services/txpool_v2", features = [ "test-helpers", ] } @@ -38,3 +41,4 @@ reqwest = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tempfile = { workspace = true } +async-trait = {workspace = true} diff --git a/tests/test-helpers/src/client_ext.rs b/tests/test-helpers/src/client_ext.rs new file mode 100644 index 00000000000..24745795702 --- /dev/null +++ b/tests/test-helpers/src/client_ext.rs @@ -0,0 +1,71 @@ +use super::*; +use cynic::QueryBuilder; +use fuel_core::upgradable_executor::native_executor::executor::max_tx_count; +use fuel_core_client::client::{ + FuelClient, + schema::{ + BlockId, + U32, + block::{ + BlockByHeightArgs, + BlockByHeightArgsFields, + Consensus, + Header, + }, + schema, + tx::OpaqueTransaction, + }, +}; +use fuel_core_txpool::config::{ + HeavyWorkConfig, + PoolLimits, +}; +use fuel_core_types::fuel_types::BlockHeight; + +#[derive(cynic::QueryFragment, Debug)] +#[cynic( + schema_path = "../../crates/client/assets/schema.sdl", + graphql_type = "Query", + variables = "BlockByHeightArgs" +)] +pub struct FullBlockByHeightQuery { + #[arguments(height: $height)] + pub block: Option, +} + +#[derive(cynic::QueryFragment, Debug)] +#[cynic( + schema_path = "../../crates/client/assets/schema.sdl", + graphql_type = "Block" +)] +#[allow(dead_code)] +pub struct FullBlock { + pub id: BlockId, + pub header: Header, + pub consensus: Consensus, + pub transactions: Vec, +} + +#[async_trait::async_trait] +pub trait ClientExt { + async fn full_block_by_height( + &self, + height: u32, + ) -> std::io::Result>; +} + +#[async_trait::async_trait] +impl ClientExt for FuelClient { + async fn full_block_by_height( + &self, + height: u32, + ) -> std::io::Result> { + let query = FullBlockByHeightQuery::build(BlockByHeightArgs { + height: Some(U32(height)), + }); + + let block = self.query(query).await?.block; + + Ok(block) + } +} diff --git a/tests/test-helpers/src/lib.rs b/tests/test-helpers/src/lib.rs index 025765d15bd..71309f77756 100644 --- a/tests/test-helpers/src/lib.rs +++ b/tests/test-helpers/src/lib.rs @@ -37,6 +37,8 @@ pub mod counter_contract; pub mod fuel_core_driver; pub mod mint_contract; +pub mod client_ext; + pub fn predicate() -> Vec { vec![op::ret(1)].into_iter().collect::>() } diff --git a/tests/tests/blocks.rs b/tests/tests/blocks.rs index a818cf4de58..d18fa735083 100644 --- a/tests/tests/blocks.rs +++ b/tests/tests/blocks.rs @@ -17,6 +17,7 @@ use fuel_core_client::client::{ }, types::TransactionStatus, }; +use fuel_core_executor::executor::max_tx_count; use fuel_core_poa::Trigger; use fuel_core_storage::{ StorageAsMut, @@ -27,12 +28,17 @@ use fuel_core_storage::{ transactional::WriteTransaction, vm_storage::VmStorageRequirements, }; +use fuel_core_txpool::config::{ + HeavyWorkConfig, + PoolLimits, +}; use fuel_core_types::{ blockchain::{ block::CompressedBlock, consensus::Consensus, }, fuel_tx::*, + fuel_types::BlockHeight, secrecy::ExposeSecret, signer::SignMode, tai64::Tai64, @@ -42,16 +48,19 @@ use itertools::{ Itertools, rev, }; +use rand::{ + SeedableRng, + prelude::StdRng, +}; use rstest::rstest; use std::{ ops::Deref, time::Duration, }; -use test_helpers::send_graph_ql_query; - -use rand::{ - SeedableRng, - rngs::StdRng, +use test_helpers::{ + client_ext::ClientExt, + make_tx, + send_graph_ql_query, }; #[tokio::test] @@ -370,170 +379,94 @@ async fn missing_first_and_last_parameters_returns_an_error() { let result = send_graph_ql_query(&url, query).await; assert!(result.contains("The queries for the whole range is not supported")); } +#[tokio::test] +async fn get_full_block_with_tx() { + let srv = FuelService::from_database(Database::default(), Config::local_node()) + .await + .unwrap(); -pub mod full_block { - use super::*; - use cynic::QueryBuilder; - use fuel_core_client::client::{ - FuelClient, - schema::{ - BlockId, - U32, - block::{ - BlockByHeightArgs, - BlockByHeightArgsFields, - Consensus, - Header, - }, - schema, - tx::OpaqueTransaction, - }, - }; - use fuel_core_executor::executor::max_tx_count; - use fuel_core_txpool::config::{ - HeavyWorkConfig, - PoolLimits, - }; - use fuel_core_types::fuel_types::BlockHeight; - - #[derive(cynic::QueryFragment, Debug)] - #[cynic( - schema_path = "../crates/client/assets/schema.sdl", - graphql_type = "Query", - variables = "BlockByHeightArgs" - )] - pub struct FullBlockByHeightQuery { - #[arguments(height: $height)] - pub block: Option, - } - - #[derive(cynic::QueryFragment, Debug)] - #[cynic( - schema_path = "../crates/client/assets/schema.sdl", - graphql_type = "Block" - )] - #[allow(dead_code)] - pub struct FullBlock { - pub id: BlockId, - pub header: Header, - pub consensus: Consensus, - pub transactions: Vec, - } - - #[async_trait::async_trait] - pub trait ClientExt { - async fn full_block_by_height( - &self, - height: u32, - ) -> std::io::Result>; - } + let client = FuelClient::from(srv.bound_address); + let tx = Transaction::default_test_tx(); + client.submit_and_await_commit(&tx).await.unwrap(); - #[async_trait::async_trait] - impl ClientExt for FuelClient { - async fn full_block_by_height( - &self, - height: u32, - ) -> std::io::Result> { - let query = FullBlockByHeightQuery::build(BlockByHeightArgs { - height: Some(U32(height)), - }); + let block = client.full_block_by_height(1).await.unwrap().unwrap(); + assert_eq!(block.header.height.0, 1); + assert_eq!(block.transactions.len(), 2 /* mint + our tx */); +} - let block = self.query(query).await?.block; +#[tokio::test] +async fn too_many_transactions_are_split_in_blocks() { + // Given + let max_gas_limit = 50_000_000; + let mut rng = StdRng::seed_from_u64(2322); + + let local_node_config = Config::local_node(); + let txpool = fuel_core_txpool::config::Config { + pool_limits: PoolLimits { + max_txs: 2_000_000, + max_gas: u64::MAX, + max_bytes_size: usize::MAX, + }, + heavy_work: HeavyWorkConfig { + number_threads_to_verify_transactions: 4, + number_threads_p2p_sync: 0, + size_of_verification_queue: u16::MAX as usize, + size_of_p2p_sync_queue: 1, + }, + ..local_node_config.txpool + }; + let chain_config = local_node_config.snapshot_reader.chain_config().clone(); + let mut consensus_parameters = chain_config.consensus_parameters; + consensus_parameters.set_block_gas_limit(u64::MAX); + consensus_parameters + .set_block_transaction_size_limit(u64::MAX) + .expect("should be able to set the limit"); + let snapshot_reader = local_node_config.snapshot_reader.with_chain_config( + fuel_core::chain_config::ChainConfig { + consensus_parameters, + ..chain_config + }, + ); - Ok(block) - } - } + let patched_node_config = Config { + block_production: Trigger::Never, + txpool, + snapshot_reader, + ..local_node_config + }; - #[tokio::test] - async fn get_full_block_with_tx() { - let srv = FuelService::from_database(Database::default(), Config::local_node()) - .await - .unwrap(); + let srv = FuelService::new_node(patched_node_config).await.unwrap(); + let client = FuelClient::from(srv.bound_address); - let client = FuelClient::from(srv.bound_address); - let tx = Transaction::default_test_tx(); - client.submit_and_await_commit(&tx).await.unwrap(); + let tx_count: u64 = max_tx_count() as u64 + 100; + let txs = (1..=tx_count) + .map(|i| make_tx(&mut rng, i, max_gas_limit)) + .collect_vec(); - let block = client.full_block_by_height(1).await.unwrap().unwrap(); - assert_eq!(block.header.height.0, 1); - assert_eq!(block.transactions.len(), 2 /* mint + our tx */); + // When + for tx in txs.iter() { + let _tx_id = client.submit(tx).await.unwrap(); } - #[tokio::test] - async fn too_many_transactions_are_split_in_blocks() { - // Given - let max_gas_limit = 50_000_000; - let mut rng = StdRng::seed_from_u64(2322); - - let local_node_config = Config::local_node(); - let txpool = fuel_core_txpool::config::Config { - pool_limits: PoolLimits { - max_txs: 2_000_000, - max_gas: u64::MAX, - max_bytes_size: usize::MAX, - }, - heavy_work: HeavyWorkConfig { - number_threads_to_verify_transactions: 4, - number_threads_p2p_sync: 0, - size_of_verification_queue: u16::MAX as usize, - size_of_p2p_sync_queue: 1, - }, - ..local_node_config.txpool - }; - let chain_config = local_node_config.snapshot_reader.chain_config().clone(); - let mut consensus_parameters = chain_config.consensus_parameters; - consensus_parameters.set_block_gas_limit(u64::MAX); - consensus_parameters - .set_block_transaction_size_limit(u64::MAX) - .expect("should be able to set the limit"); - let snapshot_reader = local_node_config.snapshot_reader.with_chain_config( - fuel_core::chain_config::ChainConfig { - consensus_parameters, - ..chain_config - }, - ); - - let patched_node_config = Config { - block_production: Trigger::Never, - txpool, - snapshot_reader, - ..local_node_config - }; - - let srv = FuelService::new_node(patched_node_config).await.unwrap(); - let client = FuelClient::from(srv.bound_address); - - let tx_count: u64 = max_tx_count() as u64 + 100; - let txs = (1..=tx_count) - .map(|i| test_helpers::make_tx(&mut rng, i, max_gas_limit)) - .collect_vec(); - - // When - for tx in txs.iter() { - let _tx_id = client.submit(tx).await.unwrap(); - } + // Then + let _last_block_height: u32 = client.produce_blocks(2, None).await.unwrap().into(); + let second_last_block = client + .block_by_height(BlockHeight::from(1)) + .await + .unwrap() + .expect("Second last block should be defined"); + let last_block = client + .block_by_height(BlockHeight::from(2)) + .await + .unwrap() + .expect("Last Block should be defined"); - // Then - let _last_block_height: u32 = - client.produce_blocks(2, None).await.unwrap().into(); - let second_last_block = client - .block_by_height(BlockHeight::from(1)) - .await - .unwrap() - .expect("Second last block should be defined"); - let last_block = client - .block_by_height(BlockHeight::from(2)) - .await - .unwrap() - .expect("Last Block should be defined"); - - assert_eq!( - second_last_block.transactions.len(), - max_tx_count() as usize + 1 // Mint transaction for one block - ); - assert_eq!( - last_block.transactions.len(), - (tx_count as usize - (max_tx_count() as usize)) + 1 /* Mint transaction for second block */ - ); - } + assert_eq!( + second_last_block.transactions.len(), + max_tx_count() as usize + 1 // Mint transaction for one block + ); + assert_eq!( + last_block.transactions.len(), + (tx_count as usize - (max_tx_count() as usize)) + 1 /* Mint transaction for second block */ + ); } diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index c4683ada2b3..5e6b7458d9f 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -58,7 +58,6 @@ mod regenesis; mod relayer; #[cfg(not(feature = "only-p2p"))] mod required_fuel_block_height_extension; -#[cfg(not(feature = "only-p2p"))] #[cfg(feature = "rpc")] mod rpc; diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 4248e767b2b..32027aef781 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,6 +1,5 @@ #![allow(non_snake_case)] -use crate::blocks::full_block::ClientExt; use fuel_block_aggregator_api::api::protobuf_adapter::{ block_aggregator_client::BlockAggregatorClient, block_response::Payload, @@ -19,6 +18,7 @@ use fuel_core_types::{ fuel_types::BlockHeight, }; use futures::StreamExt; +use test_helpers::client_ext::ClientExt; #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc() { From d17c20c0564f515e5ff7fa90653ddac81b021fc4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 11:57:12 -0600 Subject: [PATCH 030/146] Remove unused --- tests/test-helpers/src/client_ext.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/test-helpers/src/client_ext.rs b/tests/test-helpers/src/client_ext.rs index 24745795702..9feb4309579 100644 --- a/tests/test-helpers/src/client_ext.rs +++ b/tests/test-helpers/src/client_ext.rs @@ -1,6 +1,4 @@ -use super::*; use cynic::QueryBuilder; -use fuel_core::upgradable_executor::native_executor::executor::max_tx_count; use fuel_core_client::client::{ FuelClient, schema::{ @@ -16,11 +14,6 @@ use fuel_core_client::client::{ tx::OpaqueTransaction, }, }; -use fuel_core_txpool::config::{ - HeavyWorkConfig, - PoolLimits, -}; -use fuel_core_types::fuel_types::BlockHeight; #[derive(cynic::QueryFragment, Debug)] #[cynic( From a2f0393430b5574373644d4aafe4e9ba56b00707 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 12:05:41 -0600 Subject: [PATCH 031/146] Appease Clippy-sama --- tests/tests/rpc.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 32027aef781..31d23706cf9 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -23,7 +23,7 @@ use test_helpers::client_ext::ClientExt; #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc() { let config = Config::local_node(); - let rpc_url = config.rpc_config.addr.clone(); + let rpc_url = config.rpc_config.addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -52,7 +52,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { end: 1, }; let actual_bytes = if let Some(Payload::Literal(block)) = rpc_client - .get_block_range(request.clone()) + .get_block_range(request) .await .unwrap() .into_inner() @@ -84,7 +84,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { let config = Config::local_node(); - let rpc_url = config.rpc_config.addr.clone(); + let rpc_url = config.rpc_config.addr; // given let srv = FuelService::from_database(Database::default(), config.clone()) @@ -118,7 +118,7 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { let config = Config::local_node(); - let rpc_url = config.rpc_config.addr.clone(); + let rpc_url = config.rpc_config.addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -136,7 +136,7 @@ async fn new_block_subscription__can_get_expect_block() { let request = fuel_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; let mut stream = rpc_client - .new_block_subscription(request.clone()) + .new_block_subscription(request) .await .unwrap() .into_inner(); From 7f917bab8876f14f9ea2a0cb11d9f23d0089a928 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 17:37:04 -0600 Subject: [PATCH 032/146] Fix orphaned task that was causing collisions in socket addrs --- crates/fuel-core/src/p2p_test_helpers.rs | 20 ++++++------ crates/fuel-core/src/service/sub_services.rs | 1 + .../src/api/protobuf_adapter.rs | 31 ++++++++++++++----- 3 files changed, 36 insertions(+), 16 deletions(-) diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index c093b128d85..c0c7d82b916 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -359,10 +359,6 @@ pub async fn make_nodes( let mut producers = Vec::with_capacity(producers_with_txs.len()); for (i, s) in producers_with_txs.into_iter().enumerate() { - #[cfg(feature = "rpc")] - { - config.rpc_config.addr = free_local_addr(); - } let name = s.as_ref().map_or(String::new(), |s| s.0.name.clone()); let overrides = s .clone() @@ -427,10 +423,6 @@ pub async fn make_nodes( let mut validators = vec![]; for (i, s) in validators_setup.into_iter().enumerate() { - #[cfg(feature = "rpc")] - { - config.rpc_config.addr = crate::service::config::free_local_addr(); - } let name = s.as_ref().map_or(String::new(), |s| s.name.clone()); let overrides = s .clone() @@ -506,7 +498,17 @@ pub fn make_config( ) -> Config { node_config.p2p = Config::local_node().p2p; node_config.utxo_validation = true; - node_config.name = name; + node_config.name = name.clone(); + #[cfg(feature = "rpc")] + { + node_config.rpc_config.addr = free_local_addr(); + } + + tracing::error!( + "Creating node config for node: {}, {}", + name, + node_config.rpc_config.addr + ); if let Some(min_gas_price) = config_overrides.min_exec_gas_price { node_config.gas_price_config.min_exec_gas_price = min_gas_price; } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index d7f5d3863d4..f79be0f894c 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -460,6 +460,7 @@ pub fn init_sub_services( #[cfg(feature = "rpc")] let block_aggregator_rpc = { + tracing::error!("rpc addr: {}", config.rpc_config.addr); let block_aggregator_config = config.rpc_config.clone(); let db = database.block_aggregation().clone(); let db_adapter = StorageDB::new(db); diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 71075d675ab..03d5c468655 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -144,6 +144,7 @@ impl BlockAggregator for Server { pub struct ProtobufAPI { _server_task_handle: tokio::task::JoinHandle<()>, + shutdown_sender: Option>, query_receiver: tokio::sync::mpsc::Receiver>, } @@ -153,16 +154,26 @@ impl ProtobufAPI { tokio::sync::mpsc::channel::>(100); let server = Server::new(query_sender); let addr = url.parse().unwrap(); + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::<()>(); let _server_task_handle = tokio::spawn(async move { - // TODO: Handle error - tonic::transport::Server::builder() - .add_service(block_aggregator_server::BlockAggregatorServer::new(server)) - .serve(addr) - .await - .unwrap() + let service = tonic::transport::Server::builder() + .add_service(block_aggregator_server::BlockAggregatorServer::new(server)); + tokio::select! { + res = service.serve(addr) => { + if let Err(e) = res { + tracing::error!("BlockAggregator tonic server error: {}", e); + } else { + tracing::info!("BlockAggregator tonic server stopped"); + } + }, + _ = shutdown_receiver => { + tracing::info!("Shutting down BlockAggregator tonic server"); + }, + } }); Self { _server_task_handle, + shutdown_sender: Some(shutdown_sender), query_receiver, } } @@ -183,4 +194,10 @@ impl BlockAggregatorApi for ProtobufAPI { } } -pub struct ProtobufClient; +impl Drop for ProtobufAPI { + fn drop(&mut self) { + if let Some(shutdown_sender) = self.shutdown_sender.take() { + let _ = shutdown_sender.send(()); + } + } +} From 051d38cac5a90bccb9cab50aeb6c1e904b845a30 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 18:27:27 -0600 Subject: [PATCH 033/146] Remove spurious error traces --- crates/fuel-core/src/p2p_test_helpers.rs | 5 ----- crates/fuel-core/src/service/sub_services.rs | 1 - 2 files changed, 6 deletions(-) diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index c0c7d82b916..4e8167c7d90 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -504,11 +504,6 @@ pub fn make_config( node_config.rpc_config.addr = free_local_addr(); } - tracing::error!( - "Creating node config for node: {}, {}", - name, - node_config.rpc_config.addr - ); if let Some(min_gas_price) = config_overrides.min_exec_gas_price { node_config.gas_price_config.min_exec_gas_price = min_gas_price; } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index f79be0f894c..d7f5d3863d4 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -460,7 +460,6 @@ pub fn init_sub_services( #[cfg(feature = "rpc")] let block_aggregator_rpc = { - tracing::error!("rpc addr: {}", config.rpc_config.addr); let block_aggregator_config = config.rpc_config.clone(); let db = database.block_aggregation().clone(); let db_adapter = StorageDB::new(db); From eeee63515633ebb3688f2eb75c739f5a53881670 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 18:31:05 -0600 Subject: [PATCH 034/146] Add missing feature contraint --- crates/fuel-core/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index cda3c499b49..44fd407fdb9 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -21,7 +21,7 @@ smt = [ ] p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"] relayer = ["dep:fuel-core-relayer"] -rpc = [] +rpc = ["fuel-core/rpc"] shared-sequencer = ["dep:fuel-core-shared-sequencer", "dep:cosmrs"] rocksdb = ["dep:rocksdb", "dep:tempfile", "dep:num_cpus"] backup = ["rocksdb", "fuel-core-database/backup"] From 772dfbd8917b322f1e8f372a1dc881d7b87f8604 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 3 Oct 2025 10:47:10 -0600 Subject: [PATCH 035/146] remove timeout --- crates/services/importer/src/importer.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index d9063188933..b8a24ed16e2 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -267,9 +267,7 @@ impl Importer { callback: sender, }; self.commands.send(command)?; - tokio::time::timeout(Duration::from_secs(5), receiver) - .await - .expect("why this take so long?")? + receiver.await? } #[cfg(test)] @@ -522,6 +520,8 @@ where } } } + let a = 100; + let _ = a; } /// Prepares the block for committing. It includes the execution of the block, From 369acdcac218bc6686a95ccd42529410ff1c9506 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 6 Oct 2025 12:04:58 -0600 Subject: [PATCH 036/146] WIP use proto types in storage --- .../services/block_aggregator_api/Cargo.toml | 2 +- .../services/block_aggregator_api/src/api.rs | 15 ++-- .../src/api/protobuf_adapter.rs | 80 +++++++++++-------- .../src/api/protobuf_adapter/tests.rs | 16 ++-- .../src/block_aggregator.rs | 22 +++-- .../src/block_range_response.rs | 4 +- .../block_aggregator_api/src/blocks.rs | 11 ++- .../src/blocks/importer_and_db_source.rs | 13 +-- .../importer_service.rs | 14 ++-- .../importer_and_db_source/sync_service.rs | 13 +-- .../services/block_aggregator_api/src/db.rs | 3 +- .../block_aggregator_api/src/db/storage_db.rs | 12 ++- .../src/db/storage_db/table.rs | 7 +- .../services/block_aggregator_api/src/lib.rs | 35 +++++--- .../src/protobuf_types.rs | 1 + .../block_aggregator_api/src/tests.rs | 22 ++--- 16 files changed, 165 insertions(+), 105 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/protobuf_types.rs diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 29acbc9ee4c..1d5657b44b8 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -22,7 +22,7 @@ futures = { workspace = true } log = "0.4.27" num_enum = { workspace = true } postcard = { workspace = true } -prost = { workspace = true } +prost = { workspace = true , features = ["derive"]} rand = { workspace = true } serde = { workspace = true, features = ["derive"] } strum = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 3cc652bdd09..08d5511b53f 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -11,14 +11,17 @@ pub mod protobuf_adapter; pub trait BlockAggregatorApi: Send + Sync { /// The type of the block range response. type BlockRangeResponse; + type Block; /// Awaits the next query to the block aggregator service. fn await_query( &mut self, - ) -> impl Future>> + Send; + ) -> impl Future< + Output = Result>, + > + Send; } -pub enum BlockAggregatorQuery { +pub enum BlockAggregatorQuery { GetBlockRange { first: BlockHeight, last: BlockHeight, @@ -29,11 +32,11 @@ pub enum BlockAggregatorQuery { }, // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? NewBlockSubscription { - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender, }, } -impl fmt::Debug for BlockAggregatorQuery { +impl fmt::Debug for BlockAggregatorQuery { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { BlockAggregatorQuery::GetBlockRange { first, last, .. } => f @@ -52,7 +55,7 @@ impl fmt::Debug for BlockAggregatorQuery { } #[cfg(test)] -impl BlockAggregatorQuery { +impl BlockAggregatorQuery { pub fn get_block_range>( first: H, last: H, @@ -74,7 +77,7 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { + pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { const ARBITRARY_CHANNEL_SIZE: usize = 10; let (sender, receiver) = tokio::sync::mpsc::channel(ARBITRARY_CHANNEL_SIZE); let query = Self::NewBlockSubscription { response: sender }; diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 03d5c468655..a2ff3530d45 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -4,39 +4,53 @@ use crate::{ BlockAggregatorQuery, }, block_range_response::BlockRangeResponse, - result::Result, + protobuf_types::{ + Block as ProtoBlock, + BlockHeightRequest as ProtoBlockHeightRequest, + BlockHeightResponse as ProtoBlockHeightResponse, + BlockRangeRequest as ProtoBlockRangeRequest, + BlockResponse as ProtoBlockResponse, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block_aggregator_server::{ + BlockAggregator as ProtoBlockAggregator, + BlockAggregatorServer as ProtoBlockAggregatorServer, + }, + block_response as proto_block_response, + }, + result::{ + Error, + Result, + }, }; use async_trait::async_trait; use futures::StreamExt; use tokio_stream::wrappers::ReceiverStream; use tonic::Status; -tonic::include_proto!("blockaggregator"); - -use crate::result::Error; -use block_aggregator_server::BlockAggregator; - #[cfg(test)] mod tests; pub struct Server { - query_sender: tokio::sync::mpsc::Sender>, + query_sender: + tokio::sync::mpsc::Sender>, } impl Server { pub fn new( - query_sender: tokio::sync::mpsc::Sender>, + query_sender: tokio::sync::mpsc::Sender< + BlockAggregatorQuery, + >, ) -> Self { Self { query_sender } } } #[async_trait] -impl BlockAggregator for Server { +impl ProtoBlockAggregator for Server { async fn get_block_height( &self, - request: tonic::Request, - ) -> Result, tonic::Status> { + request: tonic::Request, + ) -> Result, tonic::Status> { tracing::debug!("get_block_height: {:?}", request); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetCurrentHeight { response }; @@ -45,7 +59,7 @@ impl BlockAggregator for Server { })?; let res = receiver.await; match res { - Ok(height) => Ok(tonic::Response::new(BlockHeightResponse { + Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { height: *height, })), Err(e) => Err(tonic::Status::internal(format!( @@ -54,11 +68,11 @@ impl BlockAggregator for Server { ))), } } - type GetBlockRangeStream = ReceiverStream>; + type GetBlockRangeStream = ReceiverStream>; async fn get_block_range( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { tracing::debug!("get_block_range: {:?}", request); let req = request.into_inner(); @@ -76,17 +90,15 @@ impl BlockAggregator for Server { match res { Ok(block_range_response) => match block_range_response { BlockRangeResponse::Literal(inner) => { - let (tx, rx) = - tokio::sync::mpsc::channel::>(16); + let (tx, rx) = tokio::sync::mpsc::channel::< + Result, + >(16); tokio::spawn(async move { let mut s = inner; - while let Some(block) = s.next().await { - let pb = Block { - data: block.bytes().to_vec(), - }; - let response = BlockResponse { - payload: Some(block_response::Payload::Literal(pb)), + while let Some(pb) = s.next().await { + let response = ProtoBlockResponse { + payload: Some(proto_block_response::Payload::Literal(pb)), }; if tx.send(Ok(response)).await.is_err() { break; @@ -108,11 +120,11 @@ impl BlockAggregator for Server { } } - type NewBlockSubscriptionStream = ReceiverStream>; + type NewBlockSubscriptionStream = ReceiverStream>; async fn new_block_subscription( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { const ARB_CHANNEL_SIZE: usize = 100; tracing::warn!("get_block_range: {:?}", request); @@ -126,11 +138,8 @@ impl BlockAggregator for Server { let (task_sender, task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); tokio::spawn(async move { while let Some(nb) = receiver.recv().await { - let block = Block { - data: nb.block.bytes().to_vec(), - }; - let response = BlockResponse { - payload: Some(block_response::Payload::Literal(block)), + let response = ProtoBlockResponse { + payload: Some(proto_block_response::Payload::Literal(nb)), }; if task_sender.send(Ok(response)).await.is_err() { break; @@ -145,19 +154,21 @@ impl BlockAggregator for Server { pub struct ProtobufAPI { _server_task_handle: tokio::task::JoinHandle<()>, shutdown_sender: Option>, - query_receiver: tokio::sync::mpsc::Receiver>, + query_receiver: + tokio::sync::mpsc::Receiver>, } impl ProtobufAPI { pub fn new(url: String) -> Self { - let (query_sender, query_receiver) = - tokio::sync::mpsc::channel::>(100); + let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< + BlockAggregatorQuery, + >(100); let server = Server::new(query_sender); let addr = url.parse().unwrap(); let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::<()>(); let _server_task_handle = tokio::spawn(async move { let service = tonic::transport::Server::builder() - .add_service(block_aggregator_server::BlockAggregatorServer::new(server)); + .add_service(ProtoBlockAggregatorServer::new(server)); tokio::select! { res = service.serve(addr) => { if let Err(e) = res { @@ -181,10 +192,11 @@ impl ProtobufAPI { impl BlockAggregatorApi for ProtobufAPI { type BlockRangeResponse = BlockRangeResponse; + type Block = ProtoBlock; async fn await_query( &mut self, - ) -> Result> { + ) -> Result> { let query = self .query_receiver .recv() diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 1617090a7dd..5887ba7b4ec 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -5,17 +5,17 @@ use crate::{ api::{ BlockAggregatorApi, BlockAggregatorQuery, - protobuf_adapter::{ - BlockHeightRequest, - BlockRangeRequest, - NewBlockSubscriptionRequest, - ProtobufAPI, - block_aggregator_client::BlockAggregatorClient, - block_response::Payload, - }, + protobuf_adapter::ProtobufAPI, }, block_range_response::BlockRangeResponse, blocks::Block, + protobuf_types::{ + BlockHeightRequest, + BlockRangeRequest, + NewBlockSubscriptionRequest, + block_aggregator_client::BlockAggregatorClient, + block_response::Payload, + }, }; use bytes::Bytes; use fuel_core_types::fuel_types::BlockHeight; diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index a271c129b8e..34061af7bcf 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -8,6 +8,7 @@ use crate::{ blocks::{ BlockSource, BlockSourceEvent, + importer_and_db_source::importer_service::ImporterTask, }, db::BlockAggregatorDB, }; @@ -16,12 +17,14 @@ use fuel_core_services::{ try_or_stop, }; use fuel_core_types::fuel_types::BlockHeight; +use postcard::Serializer; -impl BlockAggregator +impl BlockAggregator where Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + DB: BlockAggregatorDB, Blocks: BlockSource, + ::Block: Clone + std::fmt::Debug, BlockRangeResponse: Send, { pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { @@ -40,7 +43,9 @@ where pub async fn handle_query( &mut self, - res: crate::result::Result>, + res: crate::result::Result< + BlockAggregatorQuery, + >, ) -> TaskNextAction { tracing::debug!("Handling query: {res:?}"); let query = try_or_stop!(res, |e| { @@ -98,7 +103,7 @@ where async fn handle_new_block_subscription( &mut self, - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender, ) -> TaskNextAction { self.new_block_subscriptions.push(response); TaskNextAction::Continue @@ -106,8 +111,11 @@ where pub async fn handle_block( &mut self, - res: crate::result::Result, - ) -> TaskNextAction { + res: crate::result::Result::Block>>, + ) -> TaskNextAction + where + ::Block: std::fmt::Debug, + { tracing::debug!("Handling block: {res:?}"); let event = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); @@ -115,7 +123,7 @@ where let (id, block) = match event { BlockSourceEvent::NewBlock(id, block) => { self.new_block_subscriptions.retain_mut(|sub| { - let send_res = sub.try_send(NewBlock::new(id, block.clone())); + let send_res = sub.try_send(block.clone()); match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 5e071bc3328..24e78af6ff4 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,4 +1,4 @@ -use crate::blocks::Block; +use crate::protobuf_types::Block as ProtoBlock; use fuel_core_services::stream::Stream; pub type BoxStream = core::pin::Pin + Send + 'static>>; @@ -6,7 +6,7 @@ pub type BoxStream = core::pin::Pin + Send + 'static /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { /// A literal stream of blocks - Literal(BoxStream), + Literal(BoxStream), /// A remote URL where the blocks can be fetched Remote(String), } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index de56f280975..fb8dc76a9c1 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -7,17 +7,20 @@ pub mod importer_and_db_source; /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { + type Block; /// Asynchronously fetch the next block and its height - fn next_block(&mut self) -> impl Future> + Send; + fn next_block( + &mut self, + ) -> impl Future>> + Send; /// Drain any remaining blocks from the source fn drain(&mut self) -> impl Future> + Send; } #[derive(Debug, Eq, PartialEq, Hash)] -pub enum BlockSourceEvent { - NewBlock(BlockHeight, Block), - OldBlock(BlockHeight, Block), +pub enum BlockSourceEvent { + NewBlock(BlockHeight, B), + OldBlock(BlockHeight, B), } #[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 48450b118f1..c35e95e9104 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -37,7 +37,8 @@ mod tests; pub mod serializer_adapter; pub trait BlockSerializer { - fn serialize_block(&self, block: &FuelBlock) -> Result; + type Block; + fn serialize_block(&self, block: &FuelBlock) -> Result; } pub struct ImporterAndDbSource @@ -48,10 +49,10 @@ where DB: StorageInspect, E: std::fmt::Debug + Send, { - importer_task: ServiceRunner>, - sync_task: ServiceRunner>, + importer_task: ServiceRunner>, + sync_task: ServiceRunner>, /// Receive blocks from the importer and sync tasks - receiver: tokio::sync::mpsc::Receiver, + receiver: tokio::sync::mpsc::Receiver>, _error_marker: std::marker::PhantomData, } @@ -108,7 +109,9 @@ where DB: StorageInspect, E: std::fmt::Debug + Send + Sync, { - async fn next_block(&mut self) -> Result { + type Block = Serializer::Block; + + async fn next_block(&mut self) -> Result> { tracing::debug!("awaiting next block"); tokio::select! { block_res = self.receiver.recv() => { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 500d7d0de08..2d33d5aa08c 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -18,21 +18,21 @@ use fuel_core_types::{ use futures::StreamExt; use tokio::sync::mpsc::Sender; -pub struct ImporterTask { +pub struct ImporterTask { importer: BoxStream, serializer: Serializer, - block_return_sender: Sender, + block_return_sender: Sender>, new_end_sender: Option>, } -impl ImporterTask +impl ImporterTask where Serializer: BlockSerializer + Send, { pub fn new( importer: BoxStream, serializer: Serializer, - block_return: Sender, + block_return: Sender>, new_end_sender: Option>, ) -> Self { Self { @@ -43,7 +43,7 @@ where } } } -impl RunnableTask for ImporterTask +impl RunnableTask for ImporterTask where Serializer: BlockSerializer + Send + Sync, { @@ -61,7 +61,7 @@ where } } -impl ImporterTask +impl ImporterTask where Serializer: BlockSerializer + Send + Sync, { @@ -110,7 +110,7 @@ where } #[async_trait::async_trait] -impl RunnableService for ImporterTask +impl RunnableService for ImporterTask where Serializer: BlockSerializer + Send + Sync + 'static, { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 8fd6cf7ec1f..ea966333638 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -28,16 +28,16 @@ use fuel_core_types::{ use std::time::Duration; use tokio::sync::mpsc::Sender; -pub struct SyncTask { +pub struct SyncTask { serializer: Serializer, - block_return_sender: Sender, + block_return_sender: Sender>, db: DB, next_height: BlockHeight, maybe_stop_height: Option, new_ending_height: tokio::sync::oneshot::Receiver, } -impl SyncTask +impl SyncTask where Serializer: BlockSerializer + Send, DB: StorageInspect + Send + 'static, @@ -46,7 +46,7 @@ where { pub fn new( serializer: Serializer, - block_return: Sender, + block_return: Sender>, db: DB, db_starting_height: BlockHeight, db_ending_height: Option, @@ -101,9 +101,10 @@ where } } -impl RunnableTask for SyncTask +impl RunnableTask for SyncTask where Serializer: BlockSerializer + Send + Sync, + Serializer::Block: Send + Sync + 'static, DB: Send + Sync + 'static, DB: StorageInspect + Send + 'static, DB: StorageInspect + Send + 'static, @@ -146,7 +147,7 @@ where } #[async_trait::async_trait] -impl RunnableService for SyncTask +impl RunnableService for SyncTask where Serializer: BlockSerializer + Send + Sync + 'static, DB: Send + Sync + 'static, diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 13a0bcc8489..0e503c0c9cd 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -8,6 +8,7 @@ pub mod storage_db; /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { + type Block; /// The type used to report a range of blocks type BlockRangeResponse; @@ -15,7 +16,7 @@ pub trait BlockAggregatorDB: Send + Sync { fn store_block( &mut self, height: BlockHeight, - block: Block, + block: Self::Block, ) -> impl Future> + Send; /// Retrieves a range of blocks from the database diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index cac501b2ddf..a32c5f3a172 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -5,6 +5,7 @@ use crate::{ BlockAggregatorDB, storage_db::table::Column, }, + protobuf_types::Block as ProtoBlock, result::{ Error, Result, @@ -96,7 +97,7 @@ impl StorageDB { } } -impl BlockAggregatorDB for StorageDB +impl BlockAggregatorDB for StorageDB where S: Modifiable + std::fmt::Debug, S: KeyValueInspect, @@ -105,9 +106,14 @@ where T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, StorageTransaction: StorageInspect, { + type Block = ProtoBlock; type BlockRangeResponse = BlockRangeResponse; - async fn store_block(&mut self, height: BlockHeight, block: Block) -> Result<()> { + async fn store_block( + &mut self, + height: BlockHeight, + block: ProtoBlock, + ) -> Result<()> { self.update_highest_contiguous_block(height); let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() @@ -156,7 +162,7 @@ where S: Unpin + ReadTransaction + std::fmt::Debug, for<'a> StorageTransaction<&'a S>: StorageInspect, { - type Item = Block; + type Item = ProtoBlock; fn poll_next( self: Pin<&mut Self>, diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index 525645100e8..0c57cba9fa7 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -1,4 +1,7 @@ -use crate::blocks::Block; +use crate::{ + blocks::Block, + protobuf_types::Block as ProtoBlock, +}; use fuel_core_storage::{ Mappable, blueprint::plain::Plain, @@ -51,7 +54,7 @@ impl Mappable for Blocks { type Key = Self::OwnedKey; type OwnedKey = BlockHeight; type Value = Self::OwnedValue; - type OwnedValue = Block; + type OwnedValue = ProtoBlock; } impl TableWithBlueprint for Blocks { diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 900e1c56087..7582d16f15c 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -13,6 +13,8 @@ use fuel_core_services::{ TaskNextAction, }; use fuel_core_types::fuel_types::BlockHeight; +use protobuf_types::Block as ProtoBlock; +use std::fmt::Debug; pub mod api; pub mod blocks; @@ -21,6 +23,8 @@ pub mod result; pub mod block_range_response; +pub mod protobuf_types; + pub mod integration { use crate::{ BlockAggregator, @@ -33,6 +37,7 @@ pub mod integration { ImporterAndDbSource, }, db::BlockAggregatorDB, + protobuf_types::Block as ProtoBlock, }; use fuel_core_services::{ ServiceRunner, @@ -63,7 +68,12 @@ pub mod integration { onchain_db: OnchainDB, importer: BoxStream, ) -> ServiceRunner< - BlockAggregator>, + BlockAggregator< + ProtobufAPI, + DB, + ImporterAndDbSource, + ProtoBlock, + >, > where DB: BlockAggregatorDB< @@ -104,16 +114,16 @@ pub mod block_aggregator; // but we can change the name later /// The Block Aggregator service, which aggregates blocks from a source and stores them in a database /// Queries can be made to the service to retrieve data from the `DB` -pub struct BlockAggregator { +pub struct BlockAggregator { query: Api, database: DB, block_source: Blocks, - new_block_subscriptions: Vec>, + new_block_subscriptions: Vec>, } pub struct NewBlock { height: BlockHeight, - block: Block, + block: ProtoBlock, } impl NewBlock { @@ -126,11 +136,13 @@ impl NewBlock { } } -impl RunnableTask for BlockAggregator +impl RunnableTask + for BlockAggregator where - Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, Blocks: BlockSource, + ::Block: Clone + std::fmt::Debug + Send, BlockRange: Send, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { @@ -151,12 +163,15 @@ where } #[async_trait::async_trait] -impl RunnableService for BlockAggregator +impl RunnableService + for BlockAggregator where - Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + Api: + BlockAggregatorApi + Send, + DB: BlockAggregatorDB + Send, Blocks: BlockSource, BlockRange: Send, + ::Block: Clone + Debug + Send, { const NAME: &'static str = "BlockAggregatorService"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/protobuf_types.rs b/crates/services/block_aggregator_api/src/protobuf_types.rs new file mode 100644 index 00000000000..648ac0e278d --- /dev/null +++ b/crates/services/block_aggregator_api/src/protobuf_types.rs @@ -0,0 +1 @@ +tonic::include_proto!("blockaggregator"); diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index ac069687760..b1545df18ed 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -36,21 +36,22 @@ use tokio::{ type BlockRangeResponse = BoxStream; -struct FakeApi { - receiver: Receiver>, +struct FakeApi { + receiver: Receiver>, } -impl FakeApi { - fn new() -> (Self, Sender>) { +impl FakeApi { + fn new() -> (Self, Sender>) { let (sender, receiver) = tokio::sync::mpsc::channel(1); let api = Self { receiver }; (api, sender) } } -impl BlockAggregatorApi for FakeApi { +impl BlockAggregatorApi for FakeApi { type BlockRangeResponse = T; - async fn await_query(&mut self) -> Result> { + type Block = B; + async fn await_query(&mut self) -> Result> { Ok(self.receiver.recv().await.unwrap()) } } @@ -75,6 +76,7 @@ impl FakeDB { } impl BlockAggregatorDB for FakeDB { + type Block = Block; type BlockRangeResponse = BlockRangeResponse; async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { @@ -111,11 +113,11 @@ impl BlockAggregatorDB for FakeDB { } struct FakeBlockSource { - blocks: Receiver, + blocks: Receiver>, } impl FakeBlockSource { - fn new() -> (Self, Sender) { + fn new() -> (Self, Sender>) { let (_sender, receiver) = tokio::sync::mpsc::channel(1); let _self = Self { blocks: receiver }; (_self, _sender) @@ -123,7 +125,9 @@ impl FakeBlockSource { } impl BlockSource for FakeBlockSource { - async fn next_block(&mut self) -> Result { + type Block = Block; + + async fn next_block(&mut self) -> Result> { self.blocks .recv() .await From 3bc2c22d7f30a1ef73b7d260339a528ab0bd9c79 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 6 Oct 2025 17:05:38 -0600 Subject: [PATCH 037/146] Maybe fix the Serde issue --- crates/services/block_aggregator_api/build.rs | 4 +++- .../block_aggregator_api/src/api/protobuf_adapter.rs | 11 ++++++----- .../src/api/protobuf_adapter/tests.rs | 10 +++++++--- .../src/blocks/importer_and_db_source.rs | 1 + .../blocks/importer_and_db_source/importer_service.rs | 2 ++ 5 files changed, 19 insertions(+), 9 deletions(-) diff --git a/crates/services/block_aggregator_api/build.rs b/crates/services/block_aggregator_api/build.rs index c438a06453f..3ccf204eb70 100644 --- a/crates/services/block_aggregator_api/build.rs +++ b/crates/services/block_aggregator_api/build.rs @@ -1,4 +1,6 @@ fn main() -> Result<(), Box> { - tonic_prost_build::compile_protos("proto/api.proto")?; + tonic_prost_build::configure() + .type_attribute(".", "#[derive(serde::Serialize,serde::Deserialize)]") + .compile_protos(&["proto/api.proto"], &["proto/"])?; Ok(()) } diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index a2ff3530d45..3e50b2f5866 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -1,3 +1,4 @@ +use crate::protobuf_types::block_aggregator_server::BlockAggregator; use crate::{ api::{ BlockAggregatorApi, @@ -11,10 +12,10 @@ use crate::{ BlockRangeRequest as ProtoBlockRangeRequest, BlockResponse as ProtoBlockResponse, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - block_aggregator_server::{ - BlockAggregator as ProtoBlockAggregator, - BlockAggregatorServer as ProtoBlockAggregatorServer, - }, + // block_aggregator_server::{ + // BlockAggregator as ProtoBlockAggregator, + // BlockAggregatorServer as ProtoBlockAggregatorServer, + // }, block_response as proto_block_response, }, result::{ @@ -46,7 +47,7 @@ impl Server { } #[async_trait] -impl ProtoBlockAggregator for Server { +impl BlockAggregator for Server { async fn get_block_height( &self, request: tonic::Request, diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 5887ba7b4ec..c289d6e004a 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -1,5 +1,9 @@ #![allow(non_snake_case)] +use crate::protobuf_types::block_aggregator_client::{ + BlockAggregatorClient as ProtoBlockAggregatorClient, + BlockAggregatorClient, +}; use crate::{ NewBlock, api::{ @@ -13,7 +17,7 @@ use crate::{ BlockHeightRequest, BlockRangeRequest, NewBlockSubscriptionRequest, - block_aggregator_client::BlockAggregatorClient, + // block_aggregator_client::BlockAggregatorClient, block_response::Payload, }, }; @@ -40,7 +44,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { // call get current height endpoint with client let url = format!("http://{}", path); - let mut client = BlockAggregatorClient::connect(url.to_string()) + let mut client = ProtoBlockAggregatorClient::connect(url.to_string()) .await .expect("could not connect to server"); let handle = tokio::spawn(async move { @@ -77,7 +81,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { // call get current height endpoint with client let url = format!("http://{}", path); - let mut client = BlockAggregatorClient::connect(url.to_string()) + let mut client = ProtoBlockAggregatorClient::connect(url.to_string()) .await .expect("could not connect to server"); let request = BlockRangeRequest { start: 0, end: 1 }; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index c35e95e9104..e25ab6fcd20 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -104,6 +104,7 @@ where impl BlockSource for ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: Send + Sync, DB: StorageInspect, DB: StorageInspect, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 2d33d5aa08c..a22f1eb15f8 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -28,6 +28,7 @@ pub struct ImporterTask { impl ImporterTask where Serializer: BlockSerializer + Send, + ::Block: Send, { pub fn new( importer: BoxStream, @@ -46,6 +47,7 @@ where impl RunnableTask for ImporterTask where Serializer: BlockSerializer + Send + Sync, + ::Block: Send, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tokio::select! { From 06d72208c01043e1aa685f80b9068d5326c19242 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 7 Oct 2025 10:51:28 -0600 Subject: [PATCH 038/146] Get compiling with new type constraints --- .../src/api/protobuf_adapter.rs | 5 ++- .../src/api/protobuf_adapter/tests.rs | 35 ++++++++++----- .../src/blocks/importer_and_db_source.rs | 2 + .../importer_service.rs | 1 + .../serializer_adapter.rs | 14 +++--- .../importer_and_db_source/sync_service.rs | 1 + .../blocks/importer_and_db_source/tests.rs | 2 + .../block_aggregator_api/src/db/storage_db.rs | 2 +- .../src/db/storage_db/tests.rs | 43 ++++++++++++++----- .../services/block_aggregator_api/src/lib.rs | 7 +-- .../block_aggregator_api/src/tests.rs | 8 +--- tests/tests/rpc.rs | 29 ++++++------- 12 files changed, 98 insertions(+), 51 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 3e50b2f5866..1a239d27917 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -1,4 +1,7 @@ -use crate::protobuf_types::block_aggregator_server::BlockAggregator; +use crate::protobuf_types::block_aggregator_server::{ + BlockAggregator, + BlockAggregatorServer as ProtoBlockAggregatorServer, +}; use crate::{ api::{ BlockAggregatorApi, diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index c289d6e004a..c5cebcb177e 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -1,8 +1,11 @@ #![allow(non_snake_case)] -use crate::protobuf_types::block_aggregator_client::{ - BlockAggregatorClient as ProtoBlockAggregatorClient, - BlockAggregatorClient, +use crate::protobuf_types::{ + Block as ProtoBlock, + block_aggregator_client::{ + BlockAggregatorClient as ProtoBlockAggregatorClient, + BlockAggregatorClient, + }, }; use crate::{ NewBlock, @@ -98,8 +101,14 @@ async fn await_query__get_block_range__client_receives_expected_value() { let query = api.await_query().await.unwrap(); // then - let block1 = Block::new(Bytes::from(vec![0u8; 100])); - let block2 = Block::new(Bytes::from(vec![1u8; 100])); + // let block1 = Block::new(Bytes::from(vec![0u8; 100])); + // let block2 = Block::new(Bytes::from(vec![1u8; 100])); + let block1 = ProtoBlock { + data: vec![0u8; 100], + }; + let block2 = ProtoBlock { + data: vec![1u8; 100], + }; let list = vec![block1, block2]; // return response through query's channel if let BlockAggregatorQuery::GetBlockRange { @@ -119,7 +128,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { } tracing::info!("awaiting query"); let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); + let expected: Vec> = list.iter().map(|b| b.data.to_vec()).collect(); let actual: Vec> = response .into_inner() .try_collect::>() @@ -166,13 +175,19 @@ async fn await_query__new_block_stream__client_receives_expected_value() { // then let height1 = BlockHeight::new(0); let height2 = BlockHeight::new(1); - let block1 = Block::new(Bytes::from(vec![0u8; 100])); - let block2 = Block::new(Bytes::from(vec![1u8; 100])); + // let block1 = Block::new(Bytes::from(vec![0u8; 100])); + // let block2 = Block::new(Bytes::from(vec![1u8; 100])); + let block1 = ProtoBlock { + data: vec![0u8; 100], + }; + let block2 = ProtoBlock { + data: vec![1u8; 100], + }; let list = vec![(height1, block1), (height2, block2)]; if let BlockAggregatorQuery::NewBlockSubscription { response } = query { tracing::info!("correct query received, sending response"); for (height, block) in list.clone() { - let new_block = NewBlock::new(height, block); + let new_block = block; response.send(new_block).await.unwrap(); } } else { @@ -180,7 +195,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { } tracing::info!("awaiting query"); let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|(_, b)| b.bytes().to_vec()).collect(); + let expected: Vec> = list.iter().map(|(_, b)| b.data.to_vec()).collect(); let actual: Vec> = response .into_inner() .try_collect::>() diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index e25ab6fcd20..ecdf8898d59 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -44,6 +44,7 @@ pub trait BlockSerializer { pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, DB: StorageInspect, DB: StorageInspect, @@ -60,6 +61,7 @@ where impl ImporterAndDbSource where Serializer: BlockSerializer + Clone + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: StorageInspect + Send + Sync, DB: StorageInspect + Send + 'static, E: std::fmt::Debug + Send, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index a22f1eb15f8..74151e2a0c7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -115,6 +115,7 @@ where impl RunnableService for ImporterTask where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + 'static, { const NAME: &'static str = "BlockSourceImporterTask"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 028c66081bb..05a32eeb262 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -6,6 +6,7 @@ use crate::{ result::Error, }; +use crate::protobuf_types::Block as ProtoBlock; use anyhow::anyhow; use fuel_core_types::blockchain::block::Block as FuelBlock; use postcard::to_allocvec; @@ -14,10 +15,13 @@ use postcard::to_allocvec; pub struct SerializerAdapter; impl BlockSerializer for SerializerAdapter { - fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { - let bytes_vec = to_allocvec(block).map_err(|e| { - Error::BlockSource(anyhow!("failed to serialize block: {}", e)) - })?; - Ok(crate::blocks::Block::from(bytes_vec)) + type Block = ProtoBlock; + + fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { + // let bytes_vec = to_allocvec(block).map_err(|e| { + // Error::BlockSource(anyhow!("failed to serialize block: {}", e)) + // })?; + // Ok(crate::blocks::Block::from(bytes_vec)) + todo!() } } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index ea966333638..2dad4fb54aa 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -150,6 +150,7 @@ where impl RunnableService for SyncTask where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, DB: StorageInspect + Send + 'static, DB: StorageInspect + Send + 'static, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 92e04d69e5f..8893e08577f 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -34,6 +34,8 @@ use std::sync::Arc; pub struct MockSerializer; impl BlockSerializer for MockSerializer { + type Block = Block; + fn serialize_block(&self, block: &FuelBlock) -> Result { let bytes_vec = to_allocvec(block).map_err(|e| { Error::BlockSource(anyhow!("failed to serialize block: {}", e)) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index a32c5f3a172..05bac5fbd3e 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -97,7 +97,7 @@ impl StorageDB { } } -impl BlockAggregatorDB for StorageDB +impl BlockAggregatorDB for StorageDB where S: Modifiable + std::fmt::Debug, S: KeyValueInspect, diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index f09cdaafc2b..43f2a5110d1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -12,12 +12,22 @@ use fuel_core_types::{ fuel_types::BlockHeight, }; use futures::StreamExt; -use rand::rngs::StdRng; +use rand::{ + Rng, + rngs::StdRng, +}; fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() } +fn random_proto_block(rng: &mut StdRng) -> ProtoBlock { + const ARB_SIZE: usize = 1000; + let mut data = vec![0u8; ARB_SIZE]; + rng.fill(&mut data[..]); + ProtoBlock { data } +} + #[tokio::test] async fn store_block__adds_to_storage() { let mut rng = StdRng::seed_from_u64(666); @@ -25,7 +35,9 @@ async fn store_block__adds_to_storage() { let db = database(); let mut adapter = StorageDB::new(db); let height = BlockHeight::from(1u32); - let expected = Block::random(&mut rng); + let mut data = vec![0u8; 1000]; + rng.fill(&mut data[..]); + let expected = ProtoBlock { data }; // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -49,9 +61,10 @@ async fn get_block__can_get_expected_range() { let height_1 = BlockHeight::from(1u32); let height_2 = BlockHeight::from(2u32); let height_3 = BlockHeight::from(3u32); - let expected_1 = Block::random(&mut rng); - let expected_2 = Block::random(&mut rng); - let expected_3 = Block::random(&mut rng); + + let expected_1 = random_proto_block(&mut rng); + let expected_2 = random_proto_block(&mut rng); + let expected_3 = random_proto_block(&mut rng); let mut tx = db.write_transaction(); tx.storage_as_mut::() @@ -87,7 +100,11 @@ async fn store_block__updates_the_highest_continuous_block_if_contiguous() { let db = database(); let mut adapter = StorageDB::new_with_height(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); - let expected = Block::random(&mut rng); + // // let expected = Block::random(&mut rng); + // let mut data = vec![0u8; 1000]; + // rng.fill(&mut data[..]); + // let expected = ProtoBlock { data }; + let expected = random_proto_block(&mut rng); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -106,7 +123,8 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let starting_height = BlockHeight::from(0u32); let mut adapter = StorageDB::new_with_height(db, starting_height); let height = BlockHeight::from(2u32); - let expected = Block::random(&mut rng); + // let expected = Block::random(&mut rng); + let expected = random_proto_block(&mut rng); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -129,7 +147,8 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { for height in 2..=10u32 { let height = BlockHeight::from(height); orphaned_height = Some(height); - let block = Block::random(&mut rng); + // let block = Block::random(&mut rng); + let block = random_proto_block(&mut rng); adapter.store_block(height, block).await.unwrap(); } let expected = starting_height; @@ -138,8 +157,12 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // when let height = BlockHeight::from(1u32); - let expected = Block::random(&mut rng); - adapter.store_block(height, expected.clone()).await.unwrap(); + // let expected = Block::random(&mut rng); + let some_block = random_proto_block(&mut rng); + adapter + .store_block(height, some_block.clone()) + .await + .unwrap(); // then let expected = orphaned_height.unwrap(); diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 7582d16f15c..a056d3d45f7 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -78,8 +78,9 @@ pub mod integration { where DB: BlockAggregatorDB< BlockRangeResponse = ::BlockRangeResponse, + Block = ProtoBlock, >, - S: BlockSerializer + Clone + Send + Sync + 'static, + S: BlockSerializer + Clone + Send + Sync + 'static, OnchainDB: Send + Sync, OnchainDB: StorageInspect, OnchainDB: StorageInspect, @@ -127,11 +128,11 @@ pub struct NewBlock { } impl NewBlock { - pub fn new(height: BlockHeight, block: Block) -> Self { + pub fn new(height: BlockHeight, block: ProtoBlock) -> Self { Self { height, block } } - pub fn into_inner(self) -> (BlockHeight, Block) { + pub fn into_inner(self) -> (BlockHeight, ProtoBlock) { (self.height, self.block) } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index b1545df18ed..d8b9a8744e5 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -48,7 +48,7 @@ impl FakeApi { } } -impl BlockAggregatorApi for FakeApi { +impl BlockAggregatorApi for FakeApi { type BlockRangeResponse = T; type Block = B; async fn await_query(&mut self) -> Result> { @@ -247,12 +247,8 @@ async fn run__new_block_subscription__sends_new_block() { let _ = srv.run(&mut watcher).await; // then - let (actual_height, actual_block) = await_response_with_timeout(response) - .await - .unwrap() - .into_inner(); + let actual_block = await_response_with_timeout(response).await.unwrap(); assert_eq!(expected_block, actual_block); - assert_eq!(expected_height, actual_height); // cleanup drop(source_sender); diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 31d23706cf9..b2a8f6fd650 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,8 +1,11 @@ #![allow(non_snake_case)] -use fuel_block_aggregator_api::api::protobuf_adapter::{ - block_aggregator_client::BlockAggregatorClient, - block_response::Payload, +use fuel_block_aggregator_api::protobuf_types::{ + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, }; use fuel_core::{ database::Database, @@ -35,7 +38,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); @@ -47,11 +50,8 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { let header = expected_block.header; // when - let request = fuel_block_aggregator_api::api::protobuf_adapter::BlockRangeRequest { - start: 1, - end: 1, - }; - let actual_bytes = if let Some(Payload::Literal(block)) = rpc_client + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let actual_bytes = if let Some(ProtoPayload::Literal(block)) = rpc_client .get_block_range(request) .await .unwrap() @@ -97,12 +97,12 @@ async fn get_block_height__can_get_value_from_rpc() { let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); // when - let request = fuel_block_aggregator_api::api::protobuf_adapter::BlockHeightRequest {}; + let request = ProtoBlockHeightRequest {}; let expected_height = 1; let actual_height = rpc_client .get_block_height(request) @@ -129,12 +129,11 @@ async fn new_block_subscription__can_get_expect_block() { let tx = Transaction::default_test_tx(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); - let request = - fuel_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; + let request = ProtoNewBlockSubscriptionRequest {}; let mut stream = rpc_client .new_block_subscription(request) .await @@ -147,7 +146,7 @@ async fn new_block_subscription__can_get_expect_block() { .await .unwrap(); let actual_bytes = - if let Some(Payload::Literal(block)) = next.unwrap().unwrap().payload { + if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { block.data } else { panic!("expected literal block payload"); From 13a7e84ed7b252155e0ac23920f2fff83273253b Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 7 Oct 2025 11:12:31 -0600 Subject: [PATCH 039/146] Fix tests --- .../importer_and_db_source/serializer_adapter.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 05a32eeb262..01aed01e886 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -18,10 +18,10 @@ impl BlockSerializer for SerializerAdapter { type Block = ProtoBlock; fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { - // let bytes_vec = to_allocvec(block).map_err(|e| { - // Error::BlockSource(anyhow!("failed to serialize block: {}", e)) - // })?; - // Ok(crate::blocks::Block::from(bytes_vec)) - todo!() + let data = to_allocvec(block).map_err(|e| { + Error::BlockSource(anyhow!("failed to serialize block: {}", e)) + })?; + let pb = ProtoBlock { data }; + Ok(pb) } } From 0bbe62a0b21cc156a91d6cfcfc38c9d773630e71 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 7 Oct 2025 12:41:39 -0600 Subject: [PATCH 040/146] WIP add proto types for block w/header & txs --- .../block_aggregator_api/proto/api.proto | 428 +++++++++++++++++- .../serializer_adapter.rs | 51 ++- 2 files changed, 467 insertions(+), 12 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 1e34a8fa8de..38f87dae2a2 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -14,9 +14,435 @@ message BlockRangeRequest { } message Block { - bytes data = 1; + oneof versioned_block { + V1Block v1 = 1; + } +} + +message V1Block { + Header header = 1; + repeated Transaction transactions = 2; +} + +message Header { + oneof versioned_header { + V1Header v1 = 1; + } +} + +// pub struct BlockHeaderV1 { +// /// The application header. +// pub(crate) application: ApplicationHeader, +// /// The consensus header. +// pub(crate) consensus: ConsensusHeader, +// /// The header metadata calculated during creation. +// /// The field is pub(crate) to enforce the use of the [`PartialBlockHeader::generate`] method. +// #[cfg_attr(feature = "serde", serde(skip))] +// #[educe(PartialEq(ignore))] +// pub(crate) metadata: Option, +//} +message V1Header { + uint32 version = 1; + bytes previous_block_hash = 2; + bytes merkle_root = 3; + uint32 timestamp = 4; + uint32 bits = 5; + uint32 nonce = 6; +} + +// pub enum Transaction { +// Script(Script), +// Create(Create), +// Mint(Mint), +// Upgrade(Upgrade), +// Upload(Upload), +// Blob(Blob), +//} +message Transaction { + oneof variant { + ScriptTx script = 1; +// CreateTx create = 2; +// MintTx mint = 3; +// UpgradeTx upgrade = 4; +// UploadTx upload = 5; +// BlobTx blob = 6; + } +} + +// pub struct ChargeableTransaction +//where +// Body: BodyConstraints, +//{ +// pub(crate) body: Body, +// pub(crate) policies: Policies, +// pub(crate) inputs: Vec, +// pub(crate) outputs: Vec, +// pub(crate) witnesses: Vec, +// #[serde(skip)] +// #[cfg_attr(feature = "da-compression", compress(skip))] +// #[educe(PartialEq(ignore))] +// #[educe(Hash(ignore))] +// #[canonical(skip)] +// pub(crate) metadata: Option>, +//} +// pub struct ScriptBody { +// pub(crate) script_gas_limit: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub(crate) receipts_root: Bytes32, +// pub(crate) script: ScriptCode, +// #[educe(Debug(method(fmt_truncated_hex::<16>)))] +// pub(crate) script_data: Vec, +//} +// #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] +//pub struct ScriptMetadata { +// pub script_data_offset: usize, +//} +message ScriptTx { + uint32 script_gas_limit = 1; + bytes receipts_root = 2; + bytes script = 3; + bytes script_data = 4; + Policies policies = 5; + repeated Input inputs = 6; + repeated Output outputs = 7; + repeated bytes witnesses = 8; + ScriptMetadata metadata = 9; +} + +// pub struct Policies { +// /// A bitmask that indicates what policies are set. +// bits: PoliciesBits, +// /// The array of policy values. +// values: [Word; POLICIES_NUMBER], +//} +message Policies { + uint32 bits = 1; + repeated uint32 values = 2; +} + +// pub enum Input { +// CoinSigned(CoinSigned), +// CoinPredicate(CoinPredicate), +// Contract(Contract), +// MessageCoinSigned(MessageCoinSigned), +// MessageCoinPredicate(MessageCoinPredicate), +// MessageDataSigned(MessageDataSigned), +// MessageDataPredicate(MessageDataPredicate), +//} +message Input { + oneof variant { + CoinSignedInput coin_signed = 1; + CoinPredicateInput coin_predicate = 2; + ContractInput contract = 3; + MessageCoinSignedInput message_coin_signed = 4; + MessageCoinPredicateInput message_coin_predicate = 5; + MessageDataSignedInput message_data_signed = 6; + MessageDataPredicateInput message_data_predicate = 7; + } +} + +// pub struct Coin +//where +// Specification: CoinSpecification, +//{ +// pub utxo_id: UtxoId, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub owner: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub amount: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub asset_id: AssetId, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub tx_pointer: TxPointer, +// #[educe(Debug(method(fmt_as_field)))] +// pub witness_index: Specification::Witness, +// /// Exact amount of gas used by the predicate. +// /// If the predicate consumes different amount of gas, +// /// it's considered to be false. +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_gas_used: Specification::PredicateGasUsed, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate: Specification::Predicate, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_data: Specification::PredicateData, +//} +// impl CoinSpecification for Signed { +// type Predicate = Empty; +// type PredicateData = Empty>; +// type PredicateGasUsed = Empty; +// type Witness = u16; +//} +message CoinSignedInput { + UtxoId utxo_id = 1; + bytes owner = 2; + uint32 amount = 3; + bytes asset_id = 4; + bytes tx_pointer = 5; + uint32 witness_index = 6; + uint32 predicate_gas_used = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +//impl CoinSpecification for Predicate { +// type Predicate = PredicateCode; +// type PredicateData = Vec; +// type PredicateGasUsed = Word; +// type Witness = Empty; +//} +message CoinPredicateInput { + UtxoId utxo_id = 1; + bytes owner = 2; + uint32 amount = 3; + bytes asset_id = 4; + bytes tx_pointer = 5; + uint32 witness_index = 6; + uint32 predicate_gas_used = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub struct Contract { +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub utxo_id: UtxoId, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub balance_root: Bytes32, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub state_root: Bytes32, +// /// Pointer to transction that last modified the contract state. +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub tx_pointer: TxPointer, +// pub contract_id: ContractId, +//} +message ContractInput { + UtxoId utxo_id = 1; + bytes balance_root = 2; + bytes state_root = 3; + bytes tx_pointer = 4; + bytes contract_id = 5; +} + +// pub struct Message +//where +// Specification: MessageSpecification, +//{ +// /// The sender from the L1 chain. +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub sender: Address, +// /// The receiver on the `Fuel` chain. +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub recipient: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub amount: Word, +// // Unique identifier of the message +// pub nonce: Nonce, +// #[educe(Debug(method(fmt_as_field)))] +// pub witness_index: Specification::Witness, +// /// Exact amount of gas used by the predicate. +// /// If the predicate consumes different amount of gas, +// /// it's considered to be false. +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_gas_used: Specification::PredicateGasUsed, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// #[educe(Debug(method(fmt_as_field)))] +// pub data: Specification::Data, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate: Specification::Predicate, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_data: Specification::PredicateData, +//} +// pub struct MessageCoin(core::marker::PhantomData); +// +// impl MessageSpecification for MessageCoin { +// type Data = Empty>; +// type Predicate = Empty; +// type PredicateData = Empty>; +// type PredicateGasUsed = Empty; +// type Witness = u16; +// } +message MessageCoinSignedInput { + bytes sender = 1; + bytes recipient = 2; + uint32 amount = 3; + uint32 nonce = 4; + uint32 witness_index = 5; + uint32 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; } +// impl MessageSpecification for MessageCoin { +// type Data = Empty>; +// type Predicate = PredicateCode; +// type PredicateData = Vec; +// type PredicateGasUsed = Word; +// type Witness = Empty; +// } +message MessageCoinPredicateInput { + bytes sender = 1; + bytes recipient = 2; + uint32 amount = 3; + uint32 nonce = 4; + uint32 witness_index = 5; + uint32 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub type MessageDataSigned = Message>; +message MessageDataSignedInput { + bytes sender = 1; + bytes recipient = 2; + uint32 amount = 3; + uint32 nonce = 4; + uint32 witness_index = 5; + uint32 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub type MessageDataPredicate = +// Message>; +message MessageDataPredicateInput { + bytes sender = 1; + bytes recipient = 2; + uint32 amount = 3; + uint32 nonce = 4; + uint32 witness_index = 5; + uint32 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub enum Output { +// Coin { +// to: Address, +// amount: Word, +// asset_id: AssetId, +// }, +// +// Contract(Contract), +// +// Change { +// to: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// amount: Word, +// asset_id: AssetId, +// }, +// +// Variable { +// #[cfg_attr(feature = "da-compression", compress(skip))] +// to: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// amount: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// asset_id: AssetId, +// }, +// +// ContractCreated { +// contract_id: ContractId, +// state_root: Bytes32, +// }, +//} +message Output { + oneof variant { + CoinOutput coin = 1; + ContractOutput contract = 2; + ChangeOutput change = 3; + VariableOutput variable = 4; + ContractCreatedOutput contract_created = 5; + } +} +message CoinOutput { + bytes to = 1; + uint32 amount = 2; + bytes asset_id = 3; +} +message ContractOutput { + bytes contract_id = 1; + bytes state_root = 2; +} +message ChangeOutput { + bytes to = 1; + uint32 amount = 2; + bytes asset_id = 3; +} +message VariableOutput { + bytes to = 1; + uint32 amount = 2; + bytes asset_id = 3; +} +message ContractCreatedOutput { + bytes contract_id = 1; + bytes state_root = 2; +} + +// pub struct UtxoId { +// /// transaction id +// tx_id: TxId, +// /// output index +// output_index: u16, +//} +message UtxoId { + bytes tx_id = 1; + uint32 output_index = 2; +} + + +// #[derive(Debug, Clone, PartialEq, Eq, Hash)] +//pub struct ChargeableMetadata { +// pub common: CommonMetadata, +// pub body: Body, +//} +// pub struct ScriptBody { +// pub(crate) script_gas_limit: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub(crate) receipts_root: Bytes32, +// pub(crate) script: ScriptCode, +// #[educe(Debug(method(fmt_truncated_hex::<16>)))] +// pub(crate) script_data: Vec, +//} +// #[derive(Debug, Clone, PartialEq, Eq, Hash)] +//pub struct CommonMetadata { +// pub id: Bytes32, +// pub inputs_offset: usize, +// pub inputs_offset_at: Vec, +// pub inputs_predicate_offset_at: Vec>, +// pub outputs_offset: usize, +// pub outputs_offset_at: Vec, +// pub witnesses_offset: usize, +// pub witnesses_offset_at: Vec, +//} + +message ScriptMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; + uint32 script_gas_limit = 9; + bytes receipts_root = 10; + bytes script = 11; + bytes script_data = 12; +} + +message PredicateOffset { + optional InnerPredicateOffset offset = 1; +} + +message InnerPredicateOffset { + uint32 offset = 1; + uint32 length = 2; +} + + message BlockResponse { oneof payload { Block literal = 1; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 01aed01e886..e1998c372df 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,14 +1,26 @@ use crate::{ - blocks::{ - Block, - importer_and_db_source::BlockSerializer, - }, + blocks::importer_and_db_source::BlockSerializer, result::Error, }; -use crate::protobuf_types::Block as ProtoBlock; +use crate::protobuf_types::{ + Block as ProtoBlock, + Header as ProtoHeader, + Transaction as ProtoTransaction, + V1Block as ProtoV1Block, + block::VersionedBlock as ProtoVersionedBlock, +}; use anyhow::anyhow; -use fuel_core_types::blockchain::block::Block as FuelBlock; +use fuel_core_types::{ + blockchain::{ + block::{ + Block as FuelBlock, + BlockV1, + }, + header::BlockHeader, + }, + fuel_tx::Transaction as FuelTransaction, +}; use postcard::to_allocvec; #[derive(Clone)] @@ -18,10 +30,27 @@ impl BlockSerializer for SerializerAdapter { type Block = ProtoBlock; fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { - let data = to_allocvec(block).map_err(|e| { - Error::BlockSource(anyhow!("failed to serialize block: {}", e)) - })?; - let pb = ProtoBlock { data }; - Ok(pb) + // TODO: Should this be owned to begin with? + let (header, txs) = block.clone().into_inner(); + let proto_header = proto_header_from_header(header); + match &block { + FuelBlock::V1(_) => { + let proto_v1_block = ProtoV1Block { + header: Some(proto_header), + transactions: txs.into_iter().map(proto_tx_from_tx).collect(), + }; + Ok(ProtoBlock { + versioned_block: Some(ProtoVersionedBlock::V1(proto_v1_block)), + }) + } + } } } + +fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { + todo!() +} + +fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { + todo!() +} From 0061ca31424225b5abbc7063ead7247cb6f094b7 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 7 Oct 2025 14:17:51 -0600 Subject: [PATCH 041/146] WIP integrate proto block type more into tests --- .../block_aggregator_api/proto/api.proto | 64 +++++++++++++++++-- tests/tests/rpc.rs | 35 ++++++---- 2 files changed, 80 insertions(+), 19 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 38f87dae2a2..8b0476f0fe6 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -41,13 +41,65 @@ message Header { // #[educe(PartialEq(ignore))] // pub(crate) metadata: Option, //} +// pub struct ApplicationHeader { +// /// The layer 1 height of messages and events to include since the last layer 1 block number. +// /// This is not meant to represent the layer 1 block this was committed to. Validators will need +// /// to have some rules in place to ensure the block number was chosen in a reasonable way. For +// /// example, they should verify that the block number satisfies the finality requirements of the +// /// layer 1 chain. They should also verify that the block number isn't too stale and is increasing. +// /// Some similar concerns are noted in this issue: +// pub da_height: DaBlockHeight, +// /// The version of the consensus parameters used to execute this block. +// pub consensus_parameters_version: ConsensusParametersVersion, +// /// The version of the state transition bytecode used to execute this block. +// pub state_transition_bytecode_version: StateTransitionBytecodeVersion, +// /// Generated application fields. +// pub generated: Generated, +//} +// pub struct GeneratedApplicationFieldsV1 { +// /// Number of transactions in this block. +// pub transactions_count: u16, +// /// Number of message receipts in this block. +// pub message_receipt_count: u32, +// /// Merkle root of transactions. +// pub transactions_root: Bytes32, +// /// Merkle root of message receipts in this block. +// pub message_outbox_root: Bytes32, +// /// Root hash of all imported events from L1 +// pub event_inbox_root: Bytes32, +//} +// pub struct ConsensusHeader { +// /// Merkle root of all previous block header hashes. +// pub prev_root: Bytes32, +// /// Fuel block height. +// pub height: BlockHeight, +// /// The block producer time. +// pub time: Tai64, +// /// generated consensus fields. +// pub generated: Generated, +//} +// pub struct GeneratedConsensusFields { +// /// Hash of the application header. +// pub application_hash: Bytes32, +//} +// pub struct BlockHeaderMetadata { +// /// Hash of the header. +// id: BlockId, +//} message V1Header { - uint32 version = 1; - bytes previous_block_hash = 2; - bytes merkle_root = 3; - uint32 timestamp = 4; - uint32 bits = 5; - uint32 nonce = 6; + uint32 da_height = 1; + uint32 consensus_parameters_version = 2; + uint32 state_transition_bytecode_version = 3; + uint32 transactions_count = 4; + uint32 message_receipt_count = 5; + bytes transactions_root = 6; + bytes message_outbox_root = 7; + bytes event_inbox_root = 8; + bytes prev_root = 9; + uint32 height = 10; + bytes time = 11; + bytes application_hash = 12; + optional bytes block_id = 13; } // pub enum Transaction { diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index b2a8f6fd650..4fd21aa7940 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -4,8 +4,10 @@ use fuel_block_aggregator_api::protobuf_types::{ BlockHeightRequest as ProtoBlockHeightRequest, BlockRangeRequest as ProtoBlockRangeRequest, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block::VersionedBlock as ProtoVersionedBlock, block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, block_response::Payload as ProtoPayload, + header::VersionedHeader as ProtoVersionedHeader, }; use fuel_core::{ database::Database, @@ -47,11 +49,11 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .unwrap() .unwrap(); - let header = expected_block.header; + let expected_header = expected_block.header; // when let request = ProtoBlockRangeRequest { start: 1, end: 1 }; - let actual_bytes = if let Some(ProtoPayload::Literal(block)) = rpc_client + let actual_block = if let Some(ProtoPayload::Literal(block)) = rpc_client .get_block_range(request) .await .unwrap() @@ -62,23 +64,30 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .unwrap() .payload { - block.data + block } else { panic!("expected literal block payload"); }; - let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); + let actual_height = if let ProtoVersionedBlock::V1(v1_block) = + actual_block.versioned_block.unwrap() + { + if let ProtoVersionedHeader::V1(v1_header) = + v1_block.header.unwrap().versioned_header.unwrap() + { + v1_header.height + } else { + panic!("expected V1 header"); + } + } else { + panic!("expected V1 block"); + }; // then - assert_eq!( - BlockHeight::from(header.height.0), - *actual_block.header().height() - ); + assert_eq!(expected_header.height.0, actual_height); // check txs - let actual_tx = actual_block.transactions().first().unwrap(); - let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); - let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); - - assert_eq!(&expected_tx, actual_tx); + // let actual_tx = actual_block.transactions().first().unwrap(); + // let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); + // let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); } #[tokio::test(flavor = "multi_thread")] From 8ff249787a5355309075a7024a7e0f89536bc89f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 7 Oct 2025 16:25:00 -0600 Subject: [PATCH 042/146] WIP conversions --- .../block_aggregator_api/proto/api.proto | 38 +++--- .../serializer_adapter.rs | 128 +++++++++++++++--- tests/tests/rpc.rs | 55 ++------ 3 files changed, 142 insertions(+), 79 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 8b0476f0fe6..94af0cec4a5 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -150,7 +150,7 @@ message Transaction { // pub script_data_offset: usize, //} message ScriptTx { - uint32 script_gas_limit = 1; + uint64 script_gas_limit = 1; bytes receipts_root = 2; bytes script = 3; bytes script_data = 4; @@ -169,7 +169,7 @@ message ScriptTx { //} message Policies { uint32 bits = 1; - repeated uint32 values = 2; + repeated uint64 values = 2; } // pub enum Input { @@ -227,11 +227,11 @@ message Input { message CoinSignedInput { UtxoId utxo_id = 1; bytes owner = 2; - uint32 amount = 3; + uint64 amount = 3; bytes asset_id = 4; bytes tx_pointer = 5; uint32 witness_index = 6; - uint32 predicate_gas_used = 7; + uint64 predicate_gas_used = 7; bytes predicate = 8; bytes predicate_data = 9; } @@ -245,11 +245,11 @@ message CoinSignedInput { message CoinPredicateInput { UtxoId utxo_id = 1; bytes owner = 2; - uint32 amount = 3; + uint64 amount = 3; bytes asset_id = 4; bytes tx_pointer = 5; uint32 witness_index = 6; - uint32 predicate_gas_used = 7; + uint64 predicate_gas_used = 7; bytes predicate = 8; bytes predicate_data = 9; } @@ -315,10 +315,10 @@ message ContractInput { message MessageCoinSignedInput { bytes sender = 1; bytes recipient = 2; - uint32 amount = 3; + uint64 amount = 3; uint32 nonce = 4; uint32 witness_index = 5; - uint32 predicate_gas_used = 6; + uint64 predicate_gas_used = 6; bytes data = 7; bytes predicate = 8; bytes predicate_data = 9; @@ -334,10 +334,10 @@ message MessageCoinSignedInput { message MessageCoinPredicateInput { bytes sender = 1; bytes recipient = 2; - uint32 amount = 3; + uint64 amount = 3; uint32 nonce = 4; uint32 witness_index = 5; - uint32 predicate_gas_used = 6; + uint64 predicate_gas_used = 6; bytes data = 7; bytes predicate = 8; bytes predicate_data = 9; @@ -347,10 +347,10 @@ message MessageCoinPredicateInput { message MessageDataSignedInput { bytes sender = 1; bytes recipient = 2; - uint32 amount = 3; + uint64 amount = 3; uint32 nonce = 4; uint32 witness_index = 5; - uint32 predicate_gas_used = 6; + uint64 predicate_gas_used = 6; bytes data = 7; bytes predicate = 8; bytes predicate_data = 9; @@ -361,10 +361,10 @@ message MessageDataSignedInput { message MessageDataPredicateInput { bytes sender = 1; bytes recipient = 2; - uint32 amount = 3; + uint64 amount = 3; uint32 nonce = 4; uint32 witness_index = 5; - uint32 predicate_gas_used = 6; + uint64 predicate_gas_used = 6; bytes data = 7; bytes predicate = 8; bytes predicate_data = 9; @@ -411,7 +411,7 @@ message Output { } message CoinOutput { bytes to = 1; - uint32 amount = 2; + uint64 amount = 2; bytes asset_id = 3; } message ContractOutput { @@ -420,12 +420,12 @@ message ContractOutput { } message ChangeOutput { bytes to = 1; - uint32 amount = 2; + uint64 amount = 2; bytes asset_id = 3; } message VariableOutput { bytes to = 1; - uint32 amount = 2; + uint64 amount = 2; bytes asset_id = 3; } message ContractCreatedOutput { @@ -479,7 +479,7 @@ message ScriptMetadata { repeated uint32 outputs_offset_at = 6; uint32 witnesses_offset = 7; repeated uint32 witnesses_offset_at = 8; - uint32 script_gas_limit = 9; + uint64 script_gas_limit = 9; bytes receipts_root = 10; bytes script = 11; bytes script_data = 12; @@ -508,4 +508,4 @@ service BlockAggregator { rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); rpc GetBlockRange (BlockRangeRequest) returns (stream BlockResponse); rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream BlockResponse); -} \ No newline at end of file +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index e1998c372df..d69c29401a1 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,27 +1,36 @@ use crate::{ blocks::importer_and_db_source::BlockSerializer, - result::Error, -}; - -use crate::protobuf_types::{ - Block as ProtoBlock, - Header as ProtoHeader, - Transaction as ProtoTransaction, - V1Block as ProtoV1Block, - block::VersionedBlock as ProtoVersionedBlock, + protobuf_types::{ + Block as ProtoBlock, + Header as ProtoHeader, + Policies as ProtoPolicies, + ScriptTx as ProtoScriptTx, + Transaction as ProtoTransaction, + V1Block as ProtoV1Block, + V1Header as ProtoV1Header, + block::VersionedBlock as ProtoVersionedBlock, + header::VersionedHeader as ProtoVersionedHeader, + transaction::Variant as ProtoTransactionVariant, + }, }; -use anyhow::anyhow; use fuel_core_types::{ blockchain::{ - block::{ - Block as FuelBlock, - BlockV1, - }, + block::Block as FuelBlock, header::BlockHeader, }, - fuel_tx::Transaction as FuelTransaction, + fuel_tx::{ + Transaction as FuelTransaction, + field::{ + Policies as _, + ReceiptsRoot as _, + Script as _, + ScriptData as _, + ScriptGasLimit as _, + Witnesses as _, + }, + policies::PolicyType, + }, }; -use postcard::to_allocvec; #[derive(Clone)] pub struct SerializerAdapter; @@ -48,9 +57,92 @@ impl BlockSerializer for SerializerAdapter { } fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { - todo!() + let block_id = header.id(); + let consensus = *header.consensus(); + let versioned_header = match header { + BlockHeader::V1(v1) => { + let application = *v1.application(); + let generated = application.generated; + + let proto_v1_header = ProtoV1Header { + da_height: saturating_u64_to_u32(application.da_height.0), + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application + .state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0.to_be_bytes().to_vec(), + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + }; + + ProtoVersionedHeader::V1(proto_v1_header) + } + }; + + ProtoHeader { + versioned_header: Some(versioned_header), + } } fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { - todo!() + match tx { + FuelTransaction::Script(script) => { + let proto_script = ProtoScriptTx { + script_gas_limit: *script.script_gas_limit(), + receipts_root: bytes32_to_vec(script.receipts_root()), + script: script.script().clone(), + script_data: script.script_data().clone(), + policies: Some(proto_policies_from_policies(script.policies())), + inputs: Vec::new(), + outputs: Vec::new(), + witnesses: script + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Script(proto_script)), + } + } + _ => ProtoTransaction { variant: None }, + } +} + +fn proto_policies_from_policies( + policies: &fuel_core_types::fuel_tx::policies::Policies, +) -> ProtoPolicies { + const POLICY_ORDER: [PolicyType; 5] = [ + PolicyType::Tip, + PolicyType::WitnessLimit, + PolicyType::Maturity, + PolicyType::MaxFee, + PolicyType::Expiration, + ]; + + let values = POLICY_ORDER + .iter() + .map(|policy_type| policies.get(*policy_type).unwrap_or_default()) + .collect(); + + ProtoPolicies { + bits: policies.bits(), + values, + } +} + +fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { + bytes.as_ref().to_vec() +} + +fn saturating_u64_to_u32(value: u64) -> u32 { + value.min(u32::MAX as u64) as u32 } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 4fd21aa7940..456cd0f1049 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -17,11 +17,7 @@ use fuel_core::{ }, }; use fuel_core_client::client::FuelClient; -use fuel_core_types::{ - blockchain::block::Block, - fuel_tx::*, - fuel_types::BlockHeight, -}; +use fuel_core_types::fuel_tx::*; use futures::StreamExt; use test_helpers::client_ext::ClientExt; @@ -68,26 +64,12 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { } else { panic!("expected literal block payload"); }; - - let actual_height = if let ProtoVersionedBlock::V1(v1_block) = - actual_block.versioned_block.unwrap() - { - if let ProtoVersionedHeader::V1(v1_header) = - v1_block.header.unwrap().versioned_header.unwrap() - { - v1_header.height - } else { - panic!("expected V1 header"); - } - } else { - panic!("expected V1 block"); - }; + let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); + let ProtoVersionedHeader::V1(v1_header) = + v1_block.header.unwrap().versioned_header.unwrap(); + let actual_height = v1_header.height; // then assert_eq!(expected_header.height.0, actual_height); - // check txs - // let actual_tx = actual_block.transactions().first().unwrap(); - // let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); - // let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); } #[tokio::test(flavor = "multi_thread")] @@ -154,29 +136,18 @@ async fn new_block_subscription__can_get_expect_block() { let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) .await .unwrap(); - let actual_bytes = + let actual_block = if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { - block.data + block } else { panic!("expected literal block payload"); }; + let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); + let ProtoVersionedHeader::V1(v1_header) = + v1_block.header.unwrap().versioned_header.unwrap(); + let actual_height = v1_header.height; // then - let expected_block = graphql_client - .full_block_by_height(1) - .await - .unwrap() - .unwrap(); - let header = expected_block.header; - let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); - assert_eq!( - BlockHeight::from(header.height.0), - *actual_block.header().height() - ); - // check txs - let actual_tx = actual_block.transactions().first().unwrap(); - let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); - let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); - - assert_eq!(&expected_tx, actual_tx); + let expected_height = 1; + assert_eq!(expected_height, actual_height); } From 9a083ec77d8e44823bf47435c7a39ec4b4bb92f0 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 09:23:32 -0600 Subject: [PATCH 043/146] get tests passing --- .../block_aggregator_api/proto/api.proto | 32 +++++++++++++++++++ .../serializer_adapter.rs | 25 +++++++++++++++ tests/tests/rpc.rs | 14 ++++---- 3 files changed, 65 insertions(+), 6 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 94af0cec4a5..9b9199678b4 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -27,6 +27,7 @@ message V1Block { message Header { oneof versioned_header { V1Header v1 = 1; + V2Header v2 = 2; } } @@ -102,6 +103,37 @@ message V1Header { optional bytes block_id = 13; } +// pub struct GeneratedApplicationFieldsV2 { +// /// Number of transactions in this block. +// pub transactions_count: u16, +// /// Number of message receipts in this block. +// pub message_receipt_count: u32, +// /// Merkle root of transactions. +// pub transactions_root: Bytes32, +// /// Merkle root of message receipts in this block. +// pub message_outbox_root: Bytes32, +// /// Root hash of all imported events from L1 +// pub event_inbox_root: Bytes32, +// /// TxID commitment +// pub tx_id_commitment: Bytes32, +//} +message V2Header { + uint32 da_height = 1; + uint32 consensus_parameters_version = 2; + uint32 state_transition_bytecode_version = 3; + uint32 transactions_count = 4; + uint32 message_receipt_count = 5; + bytes transactions_root = 6; + bytes message_outbox_root = 7; + bytes event_inbox_root = 8; + bytes tx_id_commitment = 9; + bytes prev_root = 10; + uint32 height = 11; + bytes time = 12; + bytes application_hash = 13; + optional bytes block_id = 14; +} + // pub enum Transaction { // Script(Script), // Create(Create), diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index d69c29401a1..b770b80fd98 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -8,6 +8,7 @@ use crate::{ Transaction as ProtoTransaction, V1Block as ProtoV1Block, V1Header as ProtoV1Header, + V2Header as ProtoV2Header, block::VersionedBlock as ProtoVersionedBlock, header::VersionedHeader as ProtoVersionedHeader, transaction::Variant as ProtoTransactionVariant, @@ -83,6 +84,30 @@ fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { ProtoVersionedHeader::V1(proto_v1_header) } + BlockHeader::V2(header) => { + let application = *header.application(); + let generated = application.generated; + + let proto_v2_header = ProtoV2Header { + da_height: saturating_u64_to_u32(application.da_height.0), + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application + .state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0.to_be_bytes().to_vec(), + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + }; + + ProtoVersionedHeader::V2(proto_v2_header) + } }; ProtoHeader { diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 456cd0f1049..c232afeecc1 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -65,9 +65,10 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { panic!("expected literal block payload"); }; let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let ProtoVersionedHeader::V1(v1_header) = - v1_block.header.unwrap().versioned_header.unwrap(); - let actual_height = v1_header.height; + let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { + ProtoVersionedHeader::V1(v1_header) => v1_header.height, + ProtoVersionedHeader::V2(v2_header) => v2_header.height, + }; // then assert_eq!(expected_header.height.0, actual_height); } @@ -144,9 +145,10 @@ async fn new_block_subscription__can_get_expect_block() { }; let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let ProtoVersionedHeader::V1(v1_header) = - v1_block.header.unwrap().versioned_header.unwrap(); - let actual_height = v1_header.height; + let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { + ProtoVersionedHeader::V1(v1_header) => v1_header.height, + ProtoVersionedHeader::V2(v2_header) => v2_header.height, + }; // then let expected_height = 1; assert_eq!(expected_height, actual_height); From 5505ba01059a78a184a511647c6e6e8c3be48ae5 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 09:25:45 -0600 Subject: [PATCH 044/146] lint tomls --- crates/services/block_aggregator_api/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 1d5657b44b8..d6516c7187f 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -22,7 +22,7 @@ futures = { workspace = true } log = "0.4.27" num_enum = { workspace = true } postcard = { workspace = true } -prost = { workspace = true , features = ["derive"]} +prost = { workspace = true, features = ["derive"] } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } strum = { workspace = true } From 3e76d068da1007dfda848244f9b5af4146d5e673 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 09:27:02 -0600 Subject: [PATCH 045/146] Add changelog --- .changes/fixed/3112.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changes/fixed/3112.md diff --git a/.changes/fixed/3112.md b/.changes/fixed/3112.md new file mode 100644 index 00000000000..7efa291b31c --- /dev/null +++ b/.changes/fixed/3112.md @@ -0,0 +1 @@ +Use Protobuf types in serialization rather than opaque bytes \ No newline at end of file From 3a61d5959b0127fc50db87e28eaff822864def0b Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 09:27:54 -0600 Subject: [PATCH 046/146] spellcheck --- crates/services/block_aggregator_api/proto/api.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 9b9199678b4..1679220ee3e 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -293,7 +293,7 @@ message CoinPredicateInput { // pub balance_root: Bytes32, // #[cfg_attr(feature = "da-compression", compress(skip))] // pub state_root: Bytes32, -// /// Pointer to transction that last modified the contract state. +// /// Pointer to transaction that last modified the contract state. // #[cfg_attr(feature = "da-compression", compress(skip))] // pub tx_pointer: TxPointer, // pub contract_id: ContractId, From 9659d11524603889e44b03dadb166443a449d135 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 10:06:41 -0600 Subject: [PATCH 047/146] fix feature issues --- .../services/block_aggregator_api/Cargo.toml | 3 + .../serializer_adapter.rs | 110 +++++++++++------- tests/Cargo.toml | 1 + 3 files changed, 71 insertions(+), 43 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index d6516c7187f..82d882745a4 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -10,6 +10,9 @@ rust-version = { workspace = true } description = "Block Aggregator API Service for Fuel Core" build = "build.rs" +[features] +fault-proving = ["fuel-core-types/fault-proving"] + [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index b770b80fd98..939ce1b1c79 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -14,10 +14,18 @@ use crate::{ transaction::Variant as ProtoTransactionVariant, }, }; +#[cfg(feature = "fault-proving")] +use fuel_core_types::blockchain::header::BlockHeaderV2; use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, - header::BlockHeader, + header::{ + BlockHeader, + BlockHeaderV1, + ConsensusHeader, + GeneratedConsensusFields, + }, + primitives::BlockId, }, fuel_tx::{ Transaction as FuelTransaction, @@ -61,51 +69,15 @@ fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { let block_id = header.id(); let consensus = *header.consensus(); let versioned_header = match header { - BlockHeader::V1(v1) => { - let application = *v1.application(); - let generated = application.generated; - - let proto_v1_header = ProtoV1Header { - da_height: saturating_u64_to_u32(application.da_height.0), - consensus_parameters_version: application.consensus_parameters_version, - state_transition_bytecode_version: application - .state_transition_bytecode_version, - transactions_count: u32::from(generated.transactions_count), - message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - prev_root: bytes32_to_vec(&consensus.prev_root), - height: u32::from(consensus.height), - time: consensus.time.0.to_be_bytes().to_vec(), - application_hash: bytes32_to_vec(&consensus.generated.application_hash), - block_id: Some(block_id.as_slice().to_vec()), - }; - + BlockHeader::V1(header) => { + let proto_v1_header = + proto_v1_header_from_v1_header(consensus, block_id, header); ProtoVersionedHeader::V1(proto_v1_header) } + #[cfg(feature = "fault-proving")] BlockHeader::V2(header) => { - let application = *header.application(); - let generated = application.generated; - - let proto_v2_header = ProtoV2Header { - da_height: saturating_u64_to_u32(application.da_height.0), - consensus_parameters_version: application.consensus_parameters_version, - state_transition_bytecode_version: application - .state_transition_bytecode_version, - transactions_count: u32::from(generated.transactions_count), - message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), - prev_root: bytes32_to_vec(&consensus.prev_root), - height: u32::from(consensus.height), - time: consensus.time.0.to_be_bytes().to_vec(), - application_hash: bytes32_to_vec(&consensus.generated.application_hash), - block_id: Some(block_id.as_slice().to_vec()), - }; - + let proto_v2_header = + proto_v2_header_from_v2_header(consensus, block_id, header); ProtoVersionedHeader::V2(proto_v2_header) } }; @@ -115,6 +87,58 @@ fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { } } +fn proto_v1_header_from_v1_header( + consensus: ConsensusHeader, + block_id: BlockId, + header: BlockHeaderV1, +) -> ProtoV1Header { + let application = header.application(); + let generated = application.generated; + + ProtoV1Header { + da_height: saturating_u64_to_u32(application.da_height.0), + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0.to_be_bytes().to_vec(), + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +#[cfg(feature = "fault-proving")] +fn proto_v2_header_from_v2_header( + consensus: ConsensusHeader, + block_id: BlockId, + header: BlockHeaderV2, +) -> ProtoV2Header { + let application = *header.application(); + let generated = application.generated; + + ProtoV2Header { + da_height: saturating_u64_to_u32(application.da_height.0), + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0.to_be_bytes().to_vec(), + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { match tx { FuelTransaction::Script(script) => { diff --git a/tests/Cargo.toml b/tests/Cargo.toml index ccae4eaba2e..a8f7431ede6 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -19,6 +19,7 @@ aws-kms = ["dep:aws-config", "dep:aws-sdk-kms", "fuel-core-bin/aws-kms"] fault-proving = [ "fuel-core/fault-proving", "fuel-core-types/fault-proving", + "fuel-block-aggregator-api/fault-proving", "fuel-core-storage/fault-proving", "fuel-core-upgradable-executor/fault-proving", "fuel-core-poa/fault-proving", From edecb7eaf197ba1cb5b59855a11ca160d1f5d9fc Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 10:50:58 -0600 Subject: [PATCH 048/146] Fix more tests, appease clippy-sama, remove warnings --- crates/services/block_aggregator_api/build.rs | 1 + .../services/block_aggregator_api/src/api.rs | 5 +- .../src/api/protobuf_adapter/tests.rs | 89 +++++++++++-------- .../src/block_aggregator.rs | 3 - .../src/blocks/importer_and_db_source.rs | 1 - .../blocks/importer_and_db_source/tests.rs | 1 + .../services/block_aggregator_api/src/db.rs | 5 +- .../block_aggregator_api/src/db/storage_db.rs | 1 - .../src/db/storage_db/table.rs | 5 +- .../src/db/storage_db/tests.rs | 56 +++++------- .../services/block_aggregator_api/src/lib.rs | 6 +- 11 files changed, 85 insertions(+), 88 deletions(-) diff --git a/crates/services/block_aggregator_api/build.rs b/crates/services/block_aggregator_api/build.rs index 3ccf204eb70..190a1538000 100644 --- a/crates/services/block_aggregator_api/build.rs +++ b/crates/services/block_aggregator_api/build.rs @@ -1,6 +1,7 @@ fn main() -> Result<(), Box> { tonic_prost_build::configure() .type_attribute(".", "#[derive(serde::Serialize,serde::Deserialize)]") + .type_attribute(".", "#[allow(clippy::large_enum_variant)]") .compile_protos(&["proto/api.proto"], &["proto/"])?; Ok(()) } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 08d5511b53f..4beb51c47f3 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,7 +1,4 @@ -use crate::{ - NewBlock, - result::Result, -}; +use crate::result::Result; use fuel_core_types::fuel_types::BlockHeight; use std::fmt; diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index c5cebcb177e..b6ecc8e01a3 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -1,21 +1,12 @@ #![allow(non_snake_case)] -use crate::protobuf_types::{ - Block as ProtoBlock, - block_aggregator_client::{ - BlockAggregatorClient as ProtoBlockAggregatorClient, - BlockAggregatorClient, - }, -}; use crate::{ - NewBlock, api::{ BlockAggregatorApi, BlockAggregatorQuery, protobuf_adapter::ProtobufAPI, }, block_range_response::BlockRangeResponse, - blocks::Block, protobuf_types::{ BlockHeightRequest, BlockRangeRequest, @@ -24,8 +15,23 @@ use crate::{ block_response::Payload, }, }; -use bytes::Bytes; -use fuel_core_types::fuel_types::BlockHeight; +use crate::{ + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, + protobuf_types::{ + Block as ProtoBlock, + block_aggregator_client::{ + BlockAggregatorClient as ProtoBlockAggregatorClient, + BlockAggregatorClient, + }, + }, +}; +use fuel_core_types::{ + blockchain::block::Block as FuelBlock, + fuel_types::BlockHeight, +}; use futures::{ StreamExt, TryStreamExt, @@ -101,14 +107,25 @@ async fn await_query__get_block_range__client_receives_expected_value() { let query = api.await_query().await.unwrap(); // then + let serializer_adapter = SerializerAdapter; // let block1 = Block::new(Bytes::from(vec![0u8; 100])); // let block2 = Block::new(Bytes::from(vec![1u8; 100])); - let block1 = ProtoBlock { - data: vec![0u8; 100], - }; - let block2 = ProtoBlock { - data: vec![1u8; 100], - }; + // let block1 = ProtoBlock { + // data: vec![0u8; 100], + // }; + // let block2 = ProtoBlock { + // data: vec![1u8; 100], + // }; + let fuel_block_1 = FuelBlock::default(); + let mut fuel_block_2 = FuelBlock::default(); + let block_height_2 = fuel_block_1.header().height().succ().unwrap(); + fuel_block_2.header_mut().set_block_height(block_height_2); + let block1 = serializer_adapter + .serialize_block(&fuel_block_1) + .expect("could not serialize block"); + let block2 = serializer_adapter + .serialize_block(&fuel_block_2) + .expect("could not serialize block"); let list = vec![block1, block2]; // return response through query's channel if let BlockAggregatorQuery::GetBlockRange { @@ -128,8 +145,8 @@ async fn await_query__get_block_range__client_receives_expected_value() { } tracing::info!("awaiting query"); let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|b| b.data.to_vec()).collect(); - let actual: Vec> = response + let expected = list; + let actual: Vec = response .into_inner() .try_collect::>() .await @@ -137,7 +154,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner.data.to_vec() + inner } else { panic!("unexpected response type") } @@ -175,28 +192,30 @@ async fn await_query__new_block_stream__client_receives_expected_value() { // then let height1 = BlockHeight::new(0); let height2 = BlockHeight::new(1); - // let block1 = Block::new(Bytes::from(vec![0u8; 100])); - // let block2 = Block::new(Bytes::from(vec![1u8; 100])); - let block1 = ProtoBlock { - data: vec![0u8; 100], - }; - let block2 = ProtoBlock { - data: vec![1u8; 100], - }; - let list = vec![(height1, block1), (height2, block2)]; + let serializer_adapter = SerializerAdapter; + let mut fuel_block_1 = FuelBlock::default(); + fuel_block_1.header_mut().set_block_height(height1); + let mut fuel_block_2 = FuelBlock::default(); + fuel_block_2.header_mut().set_block_height(height2); + let block1 = serializer_adapter + .serialize_block(&fuel_block_1) + .expect("could not serialize block"); + let block2 = serializer_adapter + .serialize_block(&fuel_block_2) + .expect("could not serialize block"); + let list = vec![block1, block2]; if let BlockAggregatorQuery::NewBlockSubscription { response } = query { tracing::info!("correct query received, sending response"); - for (height, block) in list.clone() { - let new_block = block; - response.send(new_block).await.unwrap(); + for block in list.clone() { + response.send(block).await.unwrap(); } } else { panic!("expected GetBlockRange query"); } tracing::info!("awaiting query"); let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|(_, b)| b.data.to_vec()).collect(); - let actual: Vec> = response + let expected = list; + let actual: Vec = response .into_inner() .try_collect::>() .await @@ -204,7 +223,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner.data.to_vec() + inner } else { panic!("unexpected response type") } diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 34061af7bcf..4fde80d22b7 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -1,6 +1,5 @@ use crate::{ BlockAggregator, - NewBlock, api::{ BlockAggregatorApi, BlockAggregatorQuery, @@ -8,7 +7,6 @@ use crate::{ blocks::{ BlockSource, BlockSourceEvent, - importer_and_db_source::importer_service::ImporterTask, }, db::BlockAggregatorDB, }; @@ -17,7 +15,6 @@ use fuel_core_services::{ try_or_stop, }; use fuel_core_types::fuel_types::BlockHeight; -use postcard::Serializer; impl BlockAggregator where diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index ecdf8898d59..892b2b40120 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -1,6 +1,5 @@ use crate::{ blocks::{ - Block, BlockSource, BlockSourceEvent, importer_and_db_source::importer_service::ImporterTask, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 8893e08577f..c0e46921cc3 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,6 +1,7 @@ #![allow(non_snake_case)] use super::*; +use crate::blocks::Block; use ::postcard::to_allocvec; use fuel_core_services::stream::{ IntoBoxStream, diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 0e503c0c9cd..d664bd13932 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -1,7 +1,4 @@ -use crate::{ - blocks::Block, - result::Result, -}; +use crate::result::Result; use fuel_core_types::fuel_types::BlockHeight; pub mod storage_db; diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 05bac5fbd3e..7aeac0a91d1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,6 +1,5 @@ use crate::{ block_range_response::BlockRangeResponse, - blocks::Block, db::{ BlockAggregatorDB, storage_db::table::Column, diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index 0c57cba9fa7..be11785c7af 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -1,7 +1,4 @@ -use crate::{ - blocks::Block, - protobuf_types::Block as ProtoBlock, -}; +use crate::protobuf_types::Block as ProtoBlock; use fuel_core_storage::{ Mappable, blueprint::plain::Plain, diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 43f2a5110d1..258ba8d7154 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -1,43 +1,45 @@ #![allow(non_snake_case)] use super::*; -use crate::db::storage_db::table::Column; +use crate::{ + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, + db::storage_db::table::Column, +}; use fuel_core_storage::{ StorageAsRef, structured_storage::test::InMemoryStorage, transactional::IntoTransaction, }; use fuel_core_types::{ - ed25519::signature::rand_core::SeedableRng, + blockchain::block::Block as FuelBlock, + fuel_tx::Transaction, fuel_types::BlockHeight, }; use futures::StreamExt; -use rand::{ - Rng, - rngs::StdRng, -}; fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() } -fn random_proto_block(rng: &mut StdRng) -> ProtoBlock { - const ARB_SIZE: usize = 1000; - let mut data = vec![0u8; ARB_SIZE]; - rng.fill(&mut data[..]); - ProtoBlock { data } +fn proto_block_with_height(height: BlockHeight) -> ProtoBlock { + let serializer_adapter = SerializerAdapter; + let mut default_block = FuelBlock::::default(); + default_block.header_mut().set_block_height(height); + serializer_adapter + .serialize_block(&FuelBlock::default()) + .unwrap() } #[tokio::test] async fn store_block__adds_to_storage() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let mut adapter = StorageDB::new(db); let height = BlockHeight::from(1u32); - let mut data = vec![0u8; 1000]; - rng.fill(&mut data[..]); - let expected = ProtoBlock { data }; + let expected = proto_block_with_height(height); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -55,16 +57,15 @@ async fn store_block__adds_to_storage() { #[tokio::test] async fn get_block__can_get_expected_range() { - let mut rng = StdRng::seed_from_u64(666); // given let mut db = database(); let height_1 = BlockHeight::from(1u32); let height_2 = BlockHeight::from(2u32); let height_3 = BlockHeight::from(3u32); - let expected_1 = random_proto_block(&mut rng); - let expected_2 = random_proto_block(&mut rng); - let expected_3 = random_proto_block(&mut rng); + let expected_1 = proto_block_with_height(height_1); + let expected_2 = proto_block_with_height(height_2); + let expected_3 = proto_block_with_height(height_3); let mut tx = db.write_transaction(); tx.storage_as_mut::() @@ -95,16 +96,11 @@ async fn get_block__can_get_expected_range() { #[tokio::test] async fn store_block__updates_the_highest_continuous_block_if_contiguous() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let mut adapter = StorageDB::new_with_height(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); - // // let expected = Block::random(&mut rng); - // let mut data = vec![0u8; 1000]; - // rng.fill(&mut data[..]); - // let expected = ProtoBlock { data }; - let expected = random_proto_block(&mut rng); + let expected = proto_block_with_height(height); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -117,14 +113,12 @@ async fn store_block__updates_the_highest_continuous_block_if_contiguous() { #[tokio::test] async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let starting_height = BlockHeight::from(0u32); let mut adapter = StorageDB::new_with_height(db, starting_height); let height = BlockHeight::from(2u32); - // let expected = Block::random(&mut rng); - let expected = random_proto_block(&mut rng); + let expected = proto_block_with_height(height); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -137,7 +131,6 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig #[tokio::test] async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let starting_height = BlockHeight::from(0u32); @@ -148,7 +141,7 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let height = BlockHeight::from(height); orphaned_height = Some(height); // let block = Block::random(&mut rng); - let block = random_proto_block(&mut rng); + let block = proto_block_with_height(height); adapter.store_block(height, block).await.unwrap(); } let expected = starting_height; @@ -157,8 +150,7 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // when let height = BlockHeight::from(1u32); - // let expected = Block::random(&mut rng); - let some_block = random_proto_block(&mut rng); + let some_block = proto_block_with_height(height); adapter .store_block(height, some_block.clone()) .await diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index a056d3d45f7..e3e9057d7d7 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -1,9 +1,6 @@ use crate::{ api::BlockAggregatorApi, - blocks::{ - Block, - BlockSource, - }, + blocks::BlockSource, db::BlockAggregatorDB, }; use fuel_core_services::{ @@ -61,6 +58,7 @@ pub mod integration { pub addr: SocketAddr, } + #[allow(clippy::type_complexity)] pub fn new_service( config: &Config, db: DB, From 5bd649c1f18e70d2c432fce7c70707aac73c2614 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 10:56:40 -0600 Subject: [PATCH 049/146] Fix import --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 939ce1b1c79..725eef253ef 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "fault-proving")] +use crate::protobuf_types::V2Header as ProtoV2Header; use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ @@ -8,7 +10,6 @@ use crate::{ Transaction as ProtoTransaction, V1Block as ProtoV1Block, V1Header as ProtoV1Header, - V2Header as ProtoV2Header, block::VersionedBlock as ProtoVersionedBlock, header::VersionedHeader as ProtoVersionedHeader, transaction::Variant as ProtoTransactionVariant, From e2c18c3aa8c9ff824ec4108e57b6fb739727d5a3 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 8 Oct 2025 11:29:02 -0600 Subject: [PATCH 050/146] Fix bug in test helper, remove commented code --- .../src/api/protobuf_adapter.rs | 12 ++++------- .../src/api/protobuf_adapter/tests.rs | 21 ++++--------------- .../blocks/importer_and_db_source/tests.rs | 1 - .../src/db/storage_db/tests.rs | 5 +---- 4 files changed, 9 insertions(+), 30 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 1a239d27917..cfc4b0bd33e 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -1,7 +1,3 @@ -use crate::protobuf_types::block_aggregator_server::{ - BlockAggregator, - BlockAggregatorServer as ProtoBlockAggregatorServer, -}; use crate::{ api::{ BlockAggregatorApi, @@ -15,10 +11,10 @@ use crate::{ BlockRangeRequest as ProtoBlockRangeRequest, BlockResponse as ProtoBlockResponse, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - // block_aggregator_server::{ - // BlockAggregator as ProtoBlockAggregator, - // BlockAggregatorServer as ProtoBlockAggregatorServer, - // }, + block_aggregator_server::{ + BlockAggregator, + BlockAggregatorServer as ProtoBlockAggregatorServer, + }, block_response as proto_block_response, }, result::{ diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index b6ecc8e01a3..7807ac02180 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -7,25 +7,20 @@ use crate::{ protobuf_adapter::ProtobufAPI, }, block_range_response::BlockRangeResponse, - protobuf_types::{ - BlockHeightRequest, - BlockRangeRequest, - NewBlockSubscriptionRequest, - // block_aggregator_client::BlockAggregatorClient, - block_response::Payload, - }, -}; -use crate::{ blocks::importer_and_db_source::{ BlockSerializer, serializer_adapter::SerializerAdapter, }, protobuf_types::{ Block as ProtoBlock, + BlockHeightRequest, + BlockRangeRequest, + NewBlockSubscriptionRequest, block_aggregator_client::{ BlockAggregatorClient as ProtoBlockAggregatorClient, BlockAggregatorClient, }, + block_response::Payload, }, }; use fuel_core_types::{ @@ -108,14 +103,6 @@ async fn await_query__get_block_range__client_receives_expected_value() { // then let serializer_adapter = SerializerAdapter; - // let block1 = Block::new(Bytes::from(vec![0u8; 100])); - // let block2 = Block::new(Bytes::from(vec![1u8; 100])); - // let block1 = ProtoBlock { - // data: vec![0u8; 100], - // }; - // let block2 = ProtoBlock { - // data: vec![1u8; 100], - // }; let fuel_block_1 = FuelBlock::default(); let mut fuel_block_2 = FuelBlock::default(); let block_height_2 = fuel_block_1.header().height().succ().unwrap(); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index c0e46921cc3..64d0256dbae 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -49,7 +49,6 @@ fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() } -// let block_stream = tokio_stream::iter(blocks).chain(pending()).into_boxed(); fn stream_with_pending(items: Vec) -> BoxStream { tokio_stream::iter(items).chain(pending()).into_boxed() } diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 258ba8d7154..593839e406a 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -28,9 +28,7 @@ fn proto_block_with_height(height: BlockHeight) -> ProtoBlock { let serializer_adapter = SerializerAdapter; let mut default_block = FuelBlock::::default(); default_block.header_mut().set_block_height(height); - serializer_adapter - .serialize_block(&FuelBlock::default()) - .unwrap() + serializer_adapter.serialize_block(&default_block).unwrap() } #[tokio::test] @@ -140,7 +138,6 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { for height in 2..=10u32 { let height = BlockHeight::from(height); orphaned_height = Some(height); - // let block = Block::random(&mut rng); let block = proto_block_with_height(height); adapter.store_block(height, block).await.unwrap(); } From 141f88673078e0e9a660a06f68304c9925f99c7f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 13 Oct 2025 12:23:12 -0600 Subject: [PATCH 051/146] Start adding deserialization --- .../serializer_adapter.rs | 123 +++++++++++++++++- .../block_aggregator_api/src/result.rs | 2 + crates/types/src/blockchain/header.rs | 19 +++ 3 files changed, 143 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 725eef253ef..c8ff0e4aca0 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -10,11 +10,19 @@ use crate::{ Transaction as ProtoTransaction, V1Block as ProtoV1Block, V1Header as ProtoV1Header, - block::VersionedBlock as ProtoVersionedBlock, + block::{ + VersionedBlock as ProtoVersionedBlock, + VersionedBlock, + }, header::VersionedHeader as ProtoVersionedHeader, transaction::Variant as ProtoTransactionVariant, }, + result::{ + Error, + Result, + }, }; +use anyhow::anyhow; #[cfg(feature = "fault-proving")] use fuel_core_types::blockchain::header::BlockHeaderV2; use fuel_core_types::{ @@ -25,10 +33,12 @@ use fuel_core_types::{ BlockHeaderV1, ConsensusHeader, GeneratedConsensusFields, + PartialBlockHeader, }, primitives::BlockId, }, fuel_tx::{ + Bytes32, Transaction as FuelTransaction, field::{ Policies as _, @@ -40,6 +50,7 @@ use fuel_core_types::{ }, policies::PolicyType, }, + fuel_types::ChainId, }; #[derive(Clone)] @@ -196,3 +207,113 @@ fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { fn saturating_u64_to_u32(value: u64) -> u32 { value.min(u32::MAX as u64) as u32 } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serialize_block__roundtrip() { + // given + let serializer = SerializerAdapter; + let fuel_block = FuelBlock::default(); + + // when + let proto_block = serializer.serialize_block(&fuel_block).unwrap(); + + // then + let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); + assert_eq!(fuel_block, deserialized_block); + } +} + +pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { + let versioned_block = proto_block + .versioned_block + .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) + .map_err(Error::Serialization)?; + let partial_header = match versioned_block { + ProtoVersionedBlock::V1(v1_block) => { + let proto_header = v1_block + .header + .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) + .map_err(Error::Serialization)?; + partial_header_from_proto_header(proto_header)? + } + }; + let txs = match versioned_block { + VersionedBlock::V1(v1_block) => v1_block + .transactions + .iter() + .map(tx_from_proto_tx) + .collect::>()?, + }; + // #[cfg(feature = "fault-proving")] + // Ok(FuelBlock::new(header, txs, &[], Bytes32::default(), ChainId::default())) + // #[cfg(not(feature = "fault-proving"))] + // Ok(FuelBlock::new(header, txs, &[], Bytes32::default())) + // if cfg!(feature = "fault-proving") { + // Ok(FuelBlock::new( + // partial_header, + // txs, + // &[], + // Bytes32::default(), + // ChainId::default(), + // )) + // } else { + // Ok(FuelBlock::new(partial_header, txs, &[], Bytes32::default())) + // } + FuelBlock::new( + partial_header, + txs, + &[], + Bytes32::default(), + #[cfg(feature = "fault-proving")] + &ChainId::default(), + ) + .map_err(|e| anyhow!(e)) + .map_err(Error::Serialization) +} + +pub fn partial_header_from_proto_header( + proto_header: ProtoHeader, +) -> Result { + let versioned_header = proto_header + .versioned_header + .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization)?; + match versioned_header { + ProtoVersionedHeader::V1(v1_header) => { + let header = block_header_from_proto_v1_header(v1_header)?; + Ok(PartialBlockHeader { + application: Default::default(), + consensus: Default::default(), + }) + } + #[cfg(feature = "fault-proving")] + ProtoVersionedHeader::V2(v2_header) => { + let header = block_header_from_proto_v2_header(v2_header)?; + Ok(PartialBlockHeader { + application: header.as_empty_application_header(), + consensus: header.as_empty_consensus_header(), + }) + } + } +} + +fn block_header_from_proto_v1_header( + proto_v1_header: ProtoV1Header, +) -> Result { + todo!() +} + +#[cfg(feature = "fault-proving")] +fn block_header_from_proto_v2_header( + proto_v2_header: ProtoV2Header, +) -> Result { + todo!() +} + +pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { + todo!() +} diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index b687f1ec6cc..ab91f71ece0 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -7,6 +7,8 @@ pub enum Error { BlockSource(anyhow::Error), #[error("Database error: {0}")] DB(anyhow::Error), + #[error("Serialization error: {0}")] + Serialization(anyhow::Error), } pub type Result = core::result::Result; diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 4969355a1ca..39bfe797ff9 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -195,6 +195,25 @@ impl BlockHeader { }, } } + + /// Alias the consensus header into an empty one. + pub fn as_empty_consensus_header(&self) -> ConsensusHeader { + match self { + BlockHeader::V1(header) => ConsensusHeader { + prev_root: header.consensus().prev_root, + height: header.consensus().height, + time: header.consensus().time, + generated: Empty {}, + }, + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => ConsensusHeader { + prev_root: header.consensus().prev_root, + height: header.consensus().height, + time: header.consensus().time, + generated: Empty {}, + }, + } + } } #[cfg(any(test, feature = "test-helpers"))] From c9e0708e4e91121d66e2e05f42bec8eb0bc31c08 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 12:06:12 +0300 Subject: [PATCH 052/146] Finish round-trip test --- .../block_aggregator_api/proto/api.proto | 8 +- .../serializer_adapter.rs | 188 +++++++++++------- crates/types/src/blockchain/header.rs | 13 ++ crates/types/src/blockchain/header/v1.rs | 5 + 4 files changed, 140 insertions(+), 74 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 1679220ee3e..05394fe8735 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -88,7 +88,7 @@ message Header { // id: BlockId, //} message V1Header { - uint32 da_height = 1; + uint64 da_height = 1; uint32 consensus_parameters_version = 2; uint32 state_transition_bytecode_version = 3; uint32 transactions_count = 4; @@ -98,7 +98,7 @@ message V1Header { bytes event_inbox_root = 8; bytes prev_root = 9; uint32 height = 10; - bytes time = 11; + uint64 time = 11; bytes application_hash = 12; optional bytes block_id = 13; } @@ -118,7 +118,7 @@ message V1Header { // pub tx_id_commitment: Bytes32, //} message V2Header { - uint32 da_height = 1; + uint64 da_height = 1; uint32 consensus_parameters_version = 2; uint32 state_transition_bytecode_version = 3; uint32 transactions_count = 4; @@ -129,7 +129,7 @@ message V2Header { bytes tx_id_commitment = 9; bytes prev_root = 10; uint32 height = 11; - bytes time = 12; + uint64 time = 12; bytes application_hash = 13; optional bytes block_id = 14; } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index c8ff0e4aca0..f6bae3c4887 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,5 +1,3 @@ -#[cfg(feature = "fault-proving")] -use crate::protobuf_types::V2Header as ProtoV2Header; use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ @@ -10,6 +8,7 @@ use crate::{ Transaction as ProtoTransaction, V1Block as ProtoV1Block, V1Header as ProtoV1Header, + V2Header as ProtoV2Header, block::{ VersionedBlock as ProtoVersionedBlock, VersionedBlock, @@ -28,14 +27,20 @@ use fuel_core_types::blockchain::header::BlockHeaderV2; use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, + consensus, header::{ + ApplicationHeader, BlockHeader, BlockHeaderV1, ConsensusHeader, GeneratedConsensusFields, PartialBlockHeader, }, - primitives::BlockId, + primitives::{ + BlockId, + DaBlockHeight, + Empty, + }, }, fuel_tx::{ Bytes32, @@ -51,6 +56,7 @@ use fuel_core_types::{ policies::PolicyType, }, fuel_types::ChainId, + tai64, }; #[derive(Clone)] @@ -108,7 +114,7 @@ fn proto_v1_header_from_v1_header( let generated = application.generated; ProtoV1Header { - da_height: saturating_u64_to_u32(application.da_height.0), + da_height: application.da_height.0, consensus_parameters_version: application.consensus_parameters_version, state_transition_bytecode_version: application.state_transition_bytecode_version, transactions_count: u32::from(generated.transactions_count), @@ -118,7 +124,7 @@ fn proto_v1_header_from_v1_header( event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), prev_root: bytes32_to_vec(&consensus.prev_root), height: u32::from(consensus.height), - time: consensus.time.0.to_be_bytes().to_vec(), + time: consensus.time.0, application_hash: bytes32_to_vec(&consensus.generated.application_hash), block_id: Some(block_id.as_slice().to_vec()), } @@ -209,60 +215,28 @@ fn saturating_u64_to_u32(value: u64) -> u32 { } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serialize_block__roundtrip() { - // given - let serializer = SerializerAdapter; - let fuel_block = FuelBlock::default(); - - // when - let proto_block = serializer.serialize_block(&fuel_block).unwrap(); - - // then - let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); - assert_eq!(fuel_block, deserialized_block); - } -} - pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { let versioned_block = proto_block .versioned_block .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) .map_err(Error::Serialization)?; - let partial_header = match versioned_block { + let partial_header = match &versioned_block { ProtoVersionedBlock::V1(v1_block) => { let proto_header = v1_block .header + .clone() .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) .map_err(Error::Serialization)?; partial_header_from_proto_header(proto_header)? } }; let txs = match versioned_block { - VersionedBlock::V1(v1_block) => v1_block + VersionedBlock::V1(v1_inner) => v1_inner .transactions .iter() .map(tx_from_proto_tx) .collect::>()?, }; - // #[cfg(feature = "fault-proving")] - // Ok(FuelBlock::new(header, txs, &[], Bytes32::default(), ChainId::default())) - // #[cfg(not(feature = "fault-proving"))] - // Ok(FuelBlock::new(header, txs, &[], Bytes32::default())) - // if cfg!(feature = "fault-proving") { - // Ok(FuelBlock::new( - // partial_header, - // txs, - // &[], - // Bytes32::default(), - // ChainId::default(), - // )) - // } else { - // Ok(FuelBlock::new(partial_header, txs, &[], Bytes32::default())) - // } FuelBlock::new( partial_header, txs, @@ -278,42 +252,116 @@ pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { pub fn partial_header_from_proto_header( proto_header: ProtoHeader, ) -> Result { - let versioned_header = proto_header - .versioned_header - .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_header")) - .map_err(Error::Serialization)?; - match versioned_header { - ProtoVersionedHeader::V1(v1_header) => { - let header = block_header_from_proto_v1_header(v1_header)?; - Ok(PartialBlockHeader { - application: Default::default(), - consensus: Default::default(), - }) + let partial_header = PartialBlockHeader { + consensus: proto_header_to_empty_consensus_header(&proto_header)?, + application: proto_header_to_empty_application_header(&proto_header)?, + }; + Ok(partial_header) +} + +pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { + todo!() +} + +pub fn proto_header_to_empty_application_header( + proto_header: &ProtoHeader, +) -> Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) } - #[cfg(feature = "fault-proving")] - ProtoVersionedHeader::V2(v2_header) => { - let header = block_header_from_proto_v2_header(v2_header)?; - Ok(PartialBlockHeader { - application: header.as_empty_application_header(), - consensus: header.as_empty_consensus_header(), - }) + Some(ProtoVersionedHeader::V2(header)) => { + cfg!(feature = "fault-proving"); + { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + return Ok(app_header); + } + cfg!(not(feature = "fault-proving")); + { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), } } -fn block_header_from_proto_v1_header( - proto_v1_header: ProtoV1Header, -) -> Result { - todo!() +/// Alias the consensus header into an empty one. +pub fn proto_header_to_empty_consensus_header( + proto_header: &ProtoHeader, +) -> Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root).ok_or( + Error::Serialization(anyhow!("Could create `Bytes32` from bytes")), + )?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + cfg!(feature = "fault-proving"); + { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) + .ok_or(Error::Serialization(anyhow!( + "Could create `Bytes32` from bytes" + )))?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + return Ok(consensus_header); + } + cfg!(not(feature = "fault-proving")); + { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } } -#[cfg(feature = "fault-proving")] -fn block_header_from_proto_v2_header( - proto_v2_header: ProtoV2Header, -) -> Result { - todo!() -} +#[cfg(test)] +mod tests { + use super::*; -pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { - todo!() + #[test] + fn serialize_block__roundtrip() { + // given + let serializer = SerializerAdapter; + let mut fuel_block = FuelBlock::default(); + let transaction_tree = + fuel_core_types::fuel_merkle::binary::root_calculator::MerkleRootCalculator::new( + ); + let root = transaction_tree.root().into(); + fuel_block.header_mut().set_transaction_root(root); + fuel_block.header_mut().set_message_outbox_root(root); + + // when + let proto_block = serializer.serialize_block(&fuel_block).unwrap(); + + // then + let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); + assert_eq!(fuel_block, deserialized_block); + } } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 39bfe797ff9..9b5d19da3e1 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -288,6 +288,19 @@ impl BlockHeader { } } + /// Set the message outbox root for the header + pub fn set_message_outbox_root(&mut self, root: Bytes32) { + match self { + BlockHeader::V1(header) => { + header.set_message_outbox_root(root); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_message_outbox_root(root); + } + } + } + /// Set the consensus parameters version pub fn set_consensus_parameters_version( &mut self, diff --git a/crates/types/src/blockchain/header/v1.rs b/crates/types/src/blockchain/header/v1.rs index 0d450a664e2..316127410ca 100644 --- a/crates/types/src/blockchain/header/v1.rs +++ b/crates/types/src/blockchain/header/v1.rs @@ -132,6 +132,11 @@ impl BlockHeaderV1 { self.recalculate_metadata(); } + pub(crate) fn set_message_outbox_root(&mut self, root: crate::fuel_tx::Bytes32) { + self.application_mut().generated.message_outbox_root = root; + self.recalculate_metadata(); + } + pub(crate) fn set_da_height( &mut self, da_height: crate::blockchain::primitives::DaBlockHeight, From bdd40b1004889bc13ba2fac13b660183658fe910 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 12:08:54 +0300 Subject: [PATCH 053/146] Update changelog, add ignore --- .changes/added/3116.md | 1 + .gitignore | 1 + 2 files changed, 2 insertions(+) create mode 100644 .changes/added/3116.md diff --git a/.changes/added/3116.md b/.changes/added/3116.md new file mode 100644 index 00000000000..96d67f653dc --- /dev/null +++ b/.changes/added/3116.md @@ -0,0 +1 @@ +Complete coverage of proto block types to cover all cases \ No newline at end of file diff --git a/.gitignore b/.gitignore index 42a58004912..5ec17be005d 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,6 @@ package-lock.json package.json bin/fuel-core/chainspec/local-testnet/state_transition_bytecode.wasm .DS_Store +.fueldb/ local-testnet/ From dc1f5bb472e402b0d46600330841ea9150913207 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 12:21:19 +0300 Subject: [PATCH 054/146] Improve documentation for buffer size --- .../services/block_aggregator_api/src/api/protobuf_adapter.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index cfc4b0bd33e..c944e199917 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -74,7 +74,7 @@ impl BlockAggregator for Server { &self, request: tonic::Request, ) -> Result, tonic::Status> { - tracing::debug!("get_block_range: {:?}", request); + const ARB_LITERAL_BLOCK_BUFFER_SIZE: usize = 100; let req = request.into_inner(); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetBlockRange { @@ -92,7 +92,7 @@ impl BlockAggregator for Server { BlockRangeResponse::Literal(inner) => { let (tx, rx) = tokio::sync::mpsc::channel::< Result, - >(16); + >(ARB_LITERAL_BLOCK_BUFFER_SIZE); tokio::spawn(async move { let mut s = inner; From 50cd95c511e24e50dbe78f3f8da365ca768b81fa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 12:46:15 +0300 Subject: [PATCH 055/146] Make compilation happy --- .../serializer_adapter.rs | 41 +++++++------------ crates/types/src/blockchain/header/v2.rs | 5 +++ 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index f6bae3c4887..8ed99b99367 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -9,10 +9,7 @@ use crate::{ V1Block as ProtoV1Block, V1Header as ProtoV1Header, V2Header as ProtoV2Header, - block::{ - VersionedBlock as ProtoVersionedBlock, - VersionedBlock, - }, + block::VersionedBlock as ProtoVersionedBlock, header::VersionedHeader as ProtoVersionedHeader, transaction::Variant as ProtoTransactionVariant, }, @@ -24,10 +21,12 @@ use crate::{ use anyhow::anyhow; #[cfg(feature = "fault-proving")] use fuel_core_types::blockchain::header::BlockHeaderV2; +#[cfg(all(test, feature = "fault-proving"))] +use fuel_core_types::fuel_types::ChainId; + use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, - consensus, header::{ ApplicationHeader, BlockHeader, @@ -55,7 +54,6 @@ use fuel_core_types::{ }, policies::PolicyType, }, - fuel_types::ChainId, tai64, }; @@ -140,7 +138,7 @@ fn proto_v2_header_from_v2_header( let generated = application.generated; ProtoV2Header { - da_height: saturating_u64_to_u32(application.da_height.0), + da_height: application.da_height.0, consensus_parameters_version: application.consensus_parameters_version, state_transition_bytecode_version: application.state_transition_bytecode_version, transactions_count: u32::from(generated.transactions_count), @@ -151,7 +149,7 @@ fn proto_v2_header_from_v2_header( tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), prev_root: bytes32_to_vec(&consensus.prev_root), height: u32::from(consensus.height), - time: consensus.time.0.to_be_bytes().to_vec(), + time: consensus.time.0, application_hash: bytes32_to_vec(&consensus.generated.application_hash), block_id: Some(block_id.as_slice().to_vec()), } @@ -210,10 +208,6 @@ fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { bytes.as_ref().to_vec() } -fn saturating_u64_to_u32(value: u64) -> u32 { - value.min(u32::MAX as u64) as u32 -} - #[cfg(test)] pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { let versioned_block = proto_block @@ -231,7 +225,7 @@ pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { } }; let txs = match versioned_block { - VersionedBlock::V1(v1_inner) => v1_inner + ProtoVersionedBlock::V1(v1_inner) => v1_inner .transactions .iter() .map(tx_from_proto_tx) @@ -259,7 +253,7 @@ pub fn partial_header_from_proto_header( Ok(partial_header) } -pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { +pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result { todo!() } @@ -278,8 +272,7 @@ pub fn proto_header_to_empty_application_header( Ok(app_header) } Some(ProtoVersionedHeader::V2(header)) => { - cfg!(feature = "fault-proving"); - { + if cfg!(feature = "fault-proving") { let app_header = ApplicationHeader { da_height: DaBlockHeight::from(header.da_height), consensus_parameters_version: header.consensus_parameters_version, @@ -287,10 +280,8 @@ pub fn proto_header_to_empty_application_header( .state_transition_bytecode_version, generated: Empty {}, }; - return Ok(app_header); - } - cfg!(not(feature = "fault-proving")); - { + Ok(app_header) + } else { Err(anyhow!("V2 headers require the 'fault-proving' feature")) .map_err(Error::Serialization) } @@ -317,8 +308,7 @@ pub fn proto_header_to_empty_consensus_header( Ok(consensus_header) } Some(ProtoVersionedHeader::V2(header)) => { - cfg!(feature = "fault-proving"); - { + if cfg!(feature = "fault-proving") { let consensus_header = ConsensusHeader { prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) .ok_or(Error::Serialization(anyhow!( @@ -328,10 +318,8 @@ pub fn proto_header_to_empty_consensus_header( time: tai64::Tai64(header.time), generated: Empty {}, }; - return Ok(consensus_header); - } - cfg!(not(feature = "fault-proving")); - { + Ok(consensus_header) + } else { Err(anyhow!("V2 headers require the 'fault-proving' feature")) .map_err(Error::Serialization) } @@ -341,6 +329,7 @@ pub fn proto_header_to_empty_consensus_header( } } +#[allow(non_snake_case)] #[cfg(test)] mod tests { use super::*; diff --git a/crates/types/src/blockchain/header/v2.rs b/crates/types/src/blockchain/header/v2.rs index 9aea3a2a614..ae0f093d000 100644 --- a/crates/types/src/blockchain/header/v2.rs +++ b/crates/types/src/blockchain/header/v2.rs @@ -155,6 +155,11 @@ impl BlockHeaderV2 { self.recalculate_metadata(); } + pub(crate) fn set_message_outbox_root(&mut self, root: crate::fuel_tx::Bytes32) { + self.application_mut().generated.message_outbox_root = root; + self.recalculate_metadata(); + } + pub(crate) fn set_da_height( &mut self, da_height: crate::blockchain::primitives::DaBlockHeight, From 44b3955c0d82e264aa8f21a8d71f047eb00c6114 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 12:57:48 +0300 Subject: [PATCH 056/146] Make compilation happy --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 8ed99b99367..0068f829622 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "fault-proving")] +use crate::protobuf_types::V2Header as ProtoV2Header; use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ @@ -8,7 +10,6 @@ use crate::{ Transaction as ProtoTransaction, V1Block as ProtoV1Block, V1Header as ProtoV1Header, - V2Header as ProtoV2Header, block::VersionedBlock as ProtoVersionedBlock, header::VersionedHeader as ProtoVersionedHeader, transaction::Variant as ProtoTransactionVariant, From 3192e8ba2b48f89481297124b07190f19ad71c60 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 13:08:01 +0300 Subject: [PATCH 057/146] Introduce prop test --- Cargo.lock | 1 + .../services/block_aggregator_api/Cargo.toml | 1 + .../serializer_adapter.rs | 46 ++++++++++++------- 3 files changed, 32 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17ef6eb3a99..dbeac38a2e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3505,6 +3505,7 @@ dependencies = [ "log", "num_enum", "postcard", + "proptest", "prost 0.14.1", "rand 0.8.5", "serde", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 82d882745a4..37d79ad60e4 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -41,6 +41,7 @@ tracing = { workspace = true } tonic-prost-build = { workspace = true } [dev-dependencies] +proptest = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 0068f829622..a68fce291d7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -334,24 +334,38 @@ pub fn proto_header_to_empty_consensus_header( #[cfg(test)] mod tests { use super::*; + use proptest::prelude::*; - #[test] - fn serialize_block__roundtrip() { - // given - let serializer = SerializerAdapter; - let mut fuel_block = FuelBlock::default(); - let transaction_tree = - fuel_core_types::fuel_merkle::binary::root_calculator::MerkleRootCalculator::new( - ); - let root = transaction_tree.root().into(); - fuel_block.header_mut().set_transaction_root(root); - fuel_block.header_mut().set_message_outbox_root(root); + prop_compose! { + fn arb_block()(_ in 1..100u32) -> FuelBlock { + let mut fuel_block = FuelBlock::default(); + let transaction_tree = + fuel_core_types::fuel_merkle::binary::root_calculator::MerkleRootCalculator::new( + ); + let root = transaction_tree.root().into(); + fuel_block.header_mut().set_transaction_root(root); + fuel_block.header_mut().set_message_outbox_root(root); + fuel_block + } + } + + proptest! { + #[test] + fn serialize_block__roundtrip(block in arb_block()) { + // given + let serializer = SerializerAdapter; - // when - let proto_block = serializer.serialize_block(&fuel_block).unwrap(); + // when + let proto_block = serializer.serialize_block(&block).unwrap(); - // then - let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); - assert_eq!(fuel_block, deserialized_block); + // then + let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); + assert_eq!(block, deserialized_block); + + } } + + #[test] + #[ignore] + fn _dummy() {} } From efcbca5e13b78dc3bd579b6e5a98aa82a2b5ee93 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 17 Oct 2025 17:50:36 +0300 Subject: [PATCH 058/146] WIP --- Cargo.lock | 1 + .../serializer_adapter.txt | 7 + .../serializer_adapter.rs | 222 ++++++++++++++---- crates/types/Cargo.toml | 1 + crates/types/src/blockchain/header.rs | 14 ++ crates/types/src/test_helpers.rs | 46 ++++ 6 files changed, 250 insertions(+), 41 deletions(-) create mode 100644 crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt diff --git a/Cargo.lock b/Cargo.lock index dbeac38a2e2..a7a72e988aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4307,6 +4307,7 @@ dependencies = [ "k256", "parking_lot", "postcard", + "proptest", "rand 0.8.5", "secrecy", "serde", diff --git a/crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt b/crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt new file mode 100644 index 00000000000..45867ce5dfb --- /dev/null +++ b/crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 3d8a1dc0826956e2454ff1a3d6b8d75c5b5b0eebe2986c5668745ffb2bb9b0e4 # shrinks to block = V1(BlockV1 { header: V1(BlockHeaderV1 { application: ApplicationHeader { da_height: DaBlockHeight(0), consensus_parameters_version: 0, state_transition_bytecode_version: 31, generated: GeneratedApplicationFieldsV1 { transactions_count: 0, message_receipt_count: 0, transactions_root: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, message_outbox_root: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, event_inbox_root: 0000000000000000000000000000000000000000000000000000000000000000 } }, consensus: ConsensusHeader { prev_root: 0000000000000000000000000000000000000000000000000000000000000000, height: 00000000, time: Tai64(4611686018427387914), generated: GeneratedConsensusFields { application_hash: cda084575be17b88d98713807263d2f5b4ffbe79ba9a4fbf544bf6493a1d641a } }, metadata: Some(BlockHeaderMetadata { id: BlockId(c636fad695fad5e9211cd08b2cb66c024d7b972572cb1005c6ab56aeca4f34b4) }) }), transactions: [Script(ChargeableTransaction { body: ScriptBody { script_gas_limit: 0, receipts_root: 0000000000000000000000000000000000000000000000000000000000000000, script: ScriptCode { bytes: Bytes(24400000) }, script_data: Bytes() }, policies: Policies { bits: PoliciesBits(WitnessLimit | Maturity), values: [0, 10000, 0, 0, 0, 0] }, inputs: [], outputs: [], witnesses: [], metadata: None })] }) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index a68fce291d7..0ed08a21e2e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -25,6 +25,7 @@ use fuel_core_types::blockchain::header::BlockHeaderV2; #[cfg(all(test, feature = "fault-proving"))] use fuel_core_types::fuel_types::ChainId; +use crate::protobuf_types::Policies; use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, @@ -44,8 +45,10 @@ use fuel_core_types::{ }, fuel_tx::{ Bytes32, + Script, Transaction as FuelTransaction, field::{ + ChargeableBody, Policies as _, ReceiptsRoot as _, Script as _, @@ -53,7 +56,11 @@ use fuel_core_types::{ ScriptGasLimit as _, Witnesses as _, }, - policies::PolicyType, + policies::{ + Policies as FuelPolicies, + PoliciesBits, + PolicyType, + }, }, tai64, }; @@ -186,22 +193,28 @@ fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { fn proto_policies_from_policies( policies: &fuel_core_types::fuel_tx::policies::Policies, ) -> ProtoPolicies { - const POLICY_ORDER: [PolicyType; 5] = [ - PolicyType::Tip, - PolicyType::WitnessLimit, - PolicyType::Maturity, - PolicyType::MaxFee, - PolicyType::Expiration, - ]; - - let values = POLICY_ORDER - .iter() - .map(|policy_type| policies.get(*policy_type).unwrap_or_default()) - .collect(); - + let mut values = [0u64; 5]; + if policies.is_set(PolicyType::Tip) { + values[0] = policies.get(PolicyType::Tip).unwrap_or_default(); + } + if policies.is_set(PolicyType::WitnessLimit) { + let value = policies.get(PolicyType::WitnessLimit).unwrap_or_default(); + values[1] = value; + } + if policies.is_set(PolicyType::Maturity) { + let value = policies.get(PolicyType::Maturity).unwrap_or_default(); + values[2] = value; + } + if policies.is_set(PolicyType::MaxFee) { + values[3] = policies.get(PolicyType::MaxFee).unwrap_or_default(); + } + if policies.is_set(PolicyType::Expiration) { + values[4] = policies.get(PolicyType::Expiration).unwrap_or_default(); + } + let bits = policies.bits(); ProtoPolicies { - bits: policies.bits(), - values, + bits, + values: values.to_vec(), } } @@ -255,7 +268,133 @@ pub fn partial_header_from_proto_header( } pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result { - todo!() + match &_proto_tx.variant { + Some(ProtoTransactionVariant::Script(_proto_script)) => { + let ProtoScriptTx { + script_gas_limit, + receipts_root, + script, + script_data, + policies, + inputs, + outputs, + witnesses, + metadata, + } = _proto_script.clone(); + // gas_limit: Word, + // script: Vec, + // script_data: Vec, + // policies: Policies, + // inputs: Vec, + // outputs: Vec, + // witnesses: Vec, + let fuel_policies = policies + .map(policies_from_proto_policies) + .unwrap_or_default(); + let script_tx = FuelTransaction::script( + script_gas_limit, + script, + script_data, + fuel_policies, + vec![], + vec![], + vec![], + ); + + Ok(FuelTransaction::Script(script_tx)) + } + _ => { + Err(anyhow!("Unsupported transaction variant")).map_err(Error::Serialization) + } + } +} + +// /// Sets the `gas_price` policy. +// pub fn with_tip(mut self, tip: Word) -> Self { +// self.set(PolicyType::Tip, Some(tip)); +// self +// } +// +// /// Sets the `witness_limit` policy. +// pub fn with_witness_limit(mut self, witness_limit: Word) -> Self { +// self.set(PolicyType::WitnessLimit, Some(witness_limit)); +// self +// } +// +// /// Sets the `maturity` policy. +// pub fn with_maturity(mut self, maturity: BlockHeight) -> Self { +// self.set(PolicyType::Maturity, Some(*maturity.deref() as u64)); +// self +// } +// +// /// Sets the `expiration` policy. +// pub fn with_expiration(mut self, expiration: BlockHeight) -> Self { +// self.set(PolicyType::Expiration, Some(*expiration.deref() as u64)); +// self +// } +// +// /// Sets the `max_fee` policy. +// pub fn with_max_fee(mut self, max_fee: Word) -> Self { +// self.set(PolicyType::MaxFee, Some(max_fee)); +// self +// } +// +// /// Sets the `owner` policy. +// pub fn with_owner(mut self, owner: Word) -> Self { +// self.set(PolicyType::Owner, Some(owner)); +// self +// } +// +// bitflags::bitflags! { +// /// See https://github.com/FuelLabs/fuel-specs/blob/master/src/tx-format/policy.md#policy +// #[derive(Clone, Copy, Default, Debug, PartialEq, Eq, Hash)] +// #[derive(serde::Serialize, serde::Deserialize)] +// pub struct PoliciesBits: u32 { +// /// If set, the gas price is present in the policies. +// const Tip = 1 << 0; +// /// If set, the witness limit is present in the policies. +// const WitnessLimit = 1 << 1; +// /// If set, the maturity is present in the policies. +// const Maturity = 1 << 2; +// /// If set, the max fee is present in the policies. +// const MaxFee = 1 << 3; +// /// If set, the expiration is present in the policies. +// const Expiration = 1 << 4; +// /// If set, the owner is present in the policies. +// const Owner = 1 << 5; +// } +// } +fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { + let ProtoPolicies { bits, values } = proto_policies; + let mut policies = FuelPolicies::default(); + let bits = + PoliciesBits::from_bits(bits).expect("Should be able to create from `u32`"); + if bits.contains(PoliciesBits::Tip) { + if let Some(tip) = values.get(0) { + policies.set(PolicyType::Tip, Some(*tip)); + } + } + if bits.contains(PoliciesBits::WitnessLimit) { + if let Some(witness_limit) = values.get(1) { + policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); + } + } + if bits.contains(PoliciesBits::Maturity) { + if let Some(maturity) = values.get(2) { + policies.set(PolicyType::Maturity, Some(*maturity)); + } + } + if bits.contains(PoliciesBits::MaxFee) { + if let Some(max_fee) = values.get(3) { + policies.set(PolicyType::MaxFee, Some(*max_fee)); + } + } + if bits.contains(PoliciesBits::Expiration) { + if let Some(expiration) = values.get(4) { + policies.set(PolicyType::Expiration, Some(*expiration)); + } + } + policies } pub fn proto_header_to_empty_application_header( @@ -334,36 +473,37 @@ pub fn proto_header_to_empty_consensus_header( #[cfg(test)] mod tests { use super::*; + use fuel_core_types::{ + fuel_tx::{ + Blob, + Create, + Mint, + Script, + Upgrade, + Upload, + }, + test_helpers::arb_block, + }; use proptest::prelude::*; - prop_compose! { - fn arb_block()(_ in 1..100u32) -> FuelBlock { - let mut fuel_block = FuelBlock::default(); - let transaction_tree = - fuel_core_types::fuel_merkle::binary::root_calculator::MerkleRootCalculator::new( - ); - let root = transaction_tree.root().into(); - fuel_block.header_mut().set_transaction_root(root); - fuel_block.header_mut().set_message_outbox_root(root); - fuel_block - } - } - proptest! { - #[test] - fn serialize_block__roundtrip(block in arb_block()) { - // given - let serializer = SerializerAdapter; + #![proptest_config(ProptestConfig { + cases: 1, .. ProptestConfig::default() + })] + #[test] + fn serialize_block__roundtrip(block in arb_block()) { + // given + let serializer = SerializerAdapter; - // when - let proto_block = serializer.serialize_block(&block).unwrap(); + // when + let proto_block = serializer.serialize_block(&block).unwrap(); - // then - let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); - assert_eq!(block, deserialized_block); + // then + let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); + assert_eq!(block, deserialized_block); - } - } + } + } #[test] #[ignore] diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index ae85771e9de..ca7f8adaa92 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -53,6 +53,7 @@ fuel-vm-private = { workspace = true, default-features = false, features = [ ] } k256 = { version = "0.13", default-features = false, features = ["ecdsa"] } parking_lot = { workspace = true } +proptest = { workspace = true } rand = { workspace = true, optional = true } secrecy = "0.8" serde = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 9b5d19da3e1..1a0689d87a2 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -133,6 +133,20 @@ impl BlockHeader { } } + /// Setter for the transactions count + #[cfg(feature = "test-helpers")] + pub fn set_transactions_count(&mut self, count: u16) { + match self { + BlockHeader::V1(header) => { + header.application_mut().generated.transactions_count = count + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.application_mut().generated.transactions_count = count + } + } + } + /// Getter for the message receipt count pub fn message_receipt_count(&self) -> u32 { match self { diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 3885007eb5d..ba827a961f1 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -1,9 +1,13 @@ use crate::{ + blockchain::block::Block, + fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ ContractId, Create, Finalizable, Output, + Script, + Transaction, TransactionBuilder, }, fuel_vm::{ @@ -11,6 +15,7 @@ use crate::{ Salt, }, }; +use proptest::prelude::*; use rand::Rng; /// Helper function to create a contract creation transaction @@ -36,3 +41,44 @@ pub fn create_contract( .finalize(); (tx, contract_id) } + +// pub enum Transaction { +// Script(Script), +// Create(Create), +// Mint(Mint), +// Upgrade(Upgrade), +// Upload(Upload), +// Blob(Blob), +// } +#[allow(unused)] +fn arb_txs() -> impl Strategy> { + let tx_strategy = prop_oneof![ + 1 => arb_script_tx(), + ]; + + prop::collection::vec(tx_strategy, 1..2) +} + +prop_compose! { + fn arb_script_tx()(_: u32) -> Transaction { + let script = Script::default(); + Transaction::Script(script) + } +} + +prop_compose! { + /// Generate arbitrary blocks with a variable number of transactions + pub fn arb_block()(_ in 1..100u32, txs in arb_txs()) -> Block { + let mut fuel_block = Block::default(); + *fuel_block.transactions_mut() = txs; + let count = fuel_block.transactions().len() as u16; + fuel_block.header_mut().set_transactions_count(count); + let transaction_tree = + MerkleRootCalculator::new( + ); + let root = transaction_tree.root().into(); + fuel_block.header_mut().set_transaction_root(root); + fuel_block.header_mut().set_message_outbox_root(root); + fuel_block + } +} From bb8e87799867e67ab9cc30d96748b46ce70cfe5e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sat, 18 Oct 2025 10:31:53 +0300 Subject: [PATCH 059/146] Rename script tx to be more verbose --- crates/services/block_aggregator_api/proto/api.proto | 4 ++-- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 1679220ee3e..b3c009bbdba 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -144,7 +144,7 @@ message V2Header { //} message Transaction { oneof variant { - ScriptTx script = 1; + ScriptTransaction script = 1; // CreateTx create = 2; // MintTx mint = 3; // UpgradeTx upgrade = 4; @@ -181,7 +181,7 @@ message Transaction { //pub struct ScriptMetadata { // pub script_data_offset: usize, //} -message ScriptTx { +message ScriptTransaction { uint64 script_gas_limit = 1; bytes receipts_root = 2; bytes script = 3; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 725eef253ef..c8e24282fc6 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -6,7 +6,7 @@ use crate::{ Block as ProtoBlock, Header as ProtoHeader, Policies as ProtoPolicies, - ScriptTx as ProtoScriptTx, + ScriptTransaction as ProtoScriptTx, Transaction as ProtoTransaction, V1Block as ProtoV1Block, V1Header as ProtoV1Header, From 99db222049ccfc2b39281de43e69bdcec32b3355 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sat, 18 Oct 2025 11:15:38 +0300 Subject: [PATCH 060/146] Add txs to prop test blocks --- crates/types/src/blockchain/header.rs | 3 ++- crates/types/src/test_helpers.rs | 29 ++++++++++++++++++--------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 1a0689d87a2..fc4ffd0e533 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -720,7 +720,8 @@ impl PartialBlockHeader { } } -fn generate_txns_root(transactions: &[Transaction]) -> Bytes32 { +/// Generate the transactions root from the list of transactions. +pub fn generate_txns_root(transactions: &[Transaction]) -> Bytes32 { let transaction_ids = transactions.iter().map(|tx| tx.to_bytes()); // Generate the transaction merkle root. let mut transaction_tree = diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index ba827a961f1..3fb24baa187 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -1,10 +1,14 @@ use crate::{ - blockchain::block::Block, + blockchain::{ + block::Block, + header::generate_txns_root, + }, fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ ContractId, Create, Finalizable, + MessageId, Output, Script, Transaction, @@ -67,18 +71,25 @@ prop_compose! { } prop_compose! { - /// Generate arbitrary blocks with a variable number of transactions - pub fn arb_block()(_ in 1..100u32, txs in arb_txs()) -> Block { + /// Generate an arbitrary block with a variable number of transactions + pub fn arb_block()(txs in arb_txs()) -> Block { let mut fuel_block = Block::default(); *fuel_block.transactions_mut() = txs; let count = fuel_block.transactions().len() as u16; fuel_block.header_mut().set_transactions_count(count); - let transaction_tree = - MerkleRootCalculator::new( - ); - let root = transaction_tree.root().into(); - fuel_block.header_mut().set_transaction_root(root); - fuel_block.header_mut().set_message_outbox_root(root); + let tx_root = generate_txns_root(fuel_block.transactions()); + fuel_block.header_mut().set_transaction_root(tx_root); + let ids: Vec = Vec::new(); + let msg_root = ids + .iter() + .fold(MerkleRootCalculator::new(), |mut tree, id| { + tree.push(id.as_ref()); + tree + }) + .root() + .into(); + fuel_block.header_mut().set_message_outbox_root(msg_root); + fuel_block.header_mut().recalculate_metadata(); fuel_block } } From 56c648c223a9d774350b06f1c1cc8084d322e33f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sat, 18 Oct 2025 11:33:54 +0300 Subject: [PATCH 061/146] Add msg ids --- .../serializer_adapter.rs | 12 ++++++---- crates/types/src/blockchain/header.rs | 13 +++++++++++ crates/types/src/blockchain/header/v1.rs | 5 +++++ crates/types/src/test_helpers.rs | 22 +++++++++++++++---- 4 files changed, 44 insertions(+), 8 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index c9d908544ef..8416d0d308f 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -45,6 +45,7 @@ use fuel_core_types::{ }, fuel_tx::{ Bytes32, + MessageId, Script, Transaction as FuelTransaction, field::{ @@ -223,7 +224,10 @@ fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { } #[cfg(test)] -pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { +pub fn fuel_block_from_protobuf( + proto_block: ProtoBlock, + msg_ids: &[MessageId], +) -> Result { let versioned_block = proto_block .versioned_block .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) @@ -248,7 +252,7 @@ pub fn fuel_block_from_protobuf(proto_block: ProtoBlock) -> Result { FuelBlock::new( partial_header, txs, - &[], + msg_ids, Bytes32::default(), #[cfg(feature = "fault-proving")] &ChainId::default(), @@ -491,7 +495,7 @@ mod tests { cases: 1, .. ProptestConfig::default() })] #[test] - fn serialize_block__roundtrip(block in arb_block()) { + fn serialize_block__roundtrip((block, msg_ids) in arb_block()) { // given let serializer = SerializerAdapter; @@ -499,7 +503,7 @@ mod tests { let proto_block = serializer.serialize_block(&block).unwrap(); // then - let deserialized_block = fuel_block_from_protobuf(proto_block).unwrap(); + let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids).unwrap(); assert_eq!(block, deserialized_block); } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index fc4ffd0e533..3aafb6305dd 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -315,6 +315,19 @@ impl BlockHeader { } } + /// Set the message receipt count + pub fn set_message_receipt_count(&mut self, count: u32) { + match self { + BlockHeader::V1(header) => { + header.set_message_receipt_count(count); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_message_outbox_count(count); + } + } + } + /// Set the consensus parameters version pub fn set_consensus_parameters_version( &mut self, diff --git a/crates/types/src/blockchain/header/v1.rs b/crates/types/src/blockchain/header/v1.rs index 316127410ca..0ad5059a6d8 100644 --- a/crates/types/src/blockchain/header/v1.rs +++ b/crates/types/src/blockchain/header/v1.rs @@ -137,6 +137,11 @@ impl BlockHeaderV1 { self.recalculate_metadata(); } + pub(crate) fn set_message_receipt_count(&mut self, count: u32) { + self.application_mut().generated.message_receipt_count = count; + self.recalculate_metadata(); + } + pub(crate) fn set_da_height( &mut self, da_height: crate::blockchain::primitives::DaBlockHeight, diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 3fb24baa187..26394a2cf0f 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -70,17 +70,30 @@ prop_compose! { } } +prop_compose! { + fn arb_msg_id()(inner in any::<[u8; 32]>()) -> MessageId { + MessageId::new(inner) + } +} + +#[allow(unused)] +fn arb_msg_ids() -> impl Strategy> { + prop::collection::vec(arb_msg_id(), 0..10usize) +} + prop_compose! { /// Generate an arbitrary block with a variable number of transactions - pub fn arb_block()(txs in arb_txs()) -> Block { + pub fn arb_block()( + txs in arb_txs(), + msg_ids in arb_msg_ids(), + ) -> (Block, Vec) { let mut fuel_block = Block::default(); *fuel_block.transactions_mut() = txs; let count = fuel_block.transactions().len() as u16; fuel_block.header_mut().set_transactions_count(count); let tx_root = generate_txns_root(fuel_block.transactions()); fuel_block.header_mut().set_transaction_root(tx_root); - let ids: Vec = Vec::new(); - let msg_root = ids + let msg_root = msg_ids .iter() .fold(MerkleRootCalculator::new(), |mut tree, id| { tree.push(id.as_ref()); @@ -89,7 +102,8 @@ prop_compose! { .root() .into(); fuel_block.header_mut().set_message_outbox_root(msg_root); + fuel_block.header_mut().set_message_receipt_count(msg_ids.len() as u32); fuel_block.header_mut().recalculate_metadata(); - fuel_block + (fuel_block, msg_ids) } } From 709f6b85914d5067e10e7933fe77cd022c87751f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sat, 18 Oct 2025 13:25:41 +0300 Subject: [PATCH 062/146] Fill in more of script --- .../serializer_adapter.rs | 33 ++++++------ crates/types/src/test_helpers.rs | 54 ++++++++++++++++++- 2 files changed, 69 insertions(+), 18 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 8416d0d308f..8ea40fb69cf 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -46,6 +46,7 @@ use fuel_core_types::{ fuel_tx::{ Bytes32, MessageId, + Receipt, Script, Transaction as FuelTransaction, field::{ @@ -152,14 +153,14 @@ fn proto_v2_header_from_v2_header( state_transition_bytecode_version: application.state_transition_bytecode_version, transactions_count: u32::from(generated.transactions_count), message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), - prev_root: bytes32_to_vec(&consensus.prev_root), + transactions_root: bytes32_to_bytes(&generated.transactions_root), + message_outbox_root: bytes32_to_bytes(&generated.message_outbox_root), + event_inbox_root: bytes32_to_bytes(&generated.event_inbox_root), + tx_id_commitment: bytes32_to_bytes(&generated.tx_id_commitment), + prev_root: bytes32_to_bytes(&consensus.prev_root), height: u32::from(consensus.height), time: consensus.time.0, - application_hash: bytes32_to_vec(&consensus.generated.application_hash), + application_hash: bytes32_to_bytes(&consensus.generated.application_hash), block_id: Some(block_id.as_slice().to_vec()), } } @@ -285,25 +286,25 @@ pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result witnesses, metadata, } = _proto_script.clone(); - // gas_limit: Word, - // script: Vec, - // script_data: Vec, - // policies: Policies, - // inputs: Vec, - // outputs: Vec, - // witnesses: Vec, let fuel_policies = policies .map(policies_from_proto_policies) .unwrap_or_default(); - let script_tx = FuelTransaction::script( + let mut script_tx = FuelTransaction::script( script_gas_limit, - script, - script_data, + script.to_vec(), + script_data.to_vec(), fuel_policies, vec![], vec![], vec![], ); + *script_tx.receipts_root_mut() = Bytes32::try_from(receipts_root.as_ref()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert receipts_root to Bytes32: {}", + e + )) + })?; Ok(FuelTransaction::Script(script_tx)) } diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 26394a2cf0f..9c68cc56559 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -13,7 +13,16 @@ use crate::{ Script, Transaction, TransactionBuilder, + field::{ + Policies as _, + ReceiptsRoot, + Script as _, + ScriptData as _, + ScriptGasLimit, + }, + policies::Policies, }, + fuel_types::BlockHeight, fuel_vm::{ Contract, Salt, @@ -63,13 +72,54 @@ fn arb_txs() -> impl Strategy> { prop::collection::vec(tx_strategy, 1..2) } +// pub(crate) body: Body, +// pub(crate) policies: Policies, +// pub(crate) inputs: Vec, +// pub(crate) outputs: Vec, +// pub(crate) witnesses: Vec, +// pub(crate) metadata: Option>, +// body +// pub(crate) script_gas_limit: Word, +// pub(crate) receipts_root: Bytes32, +// pub(crate) script: ScriptCode, +// pub(crate) script_data: Bytes, prop_compose! { - fn arb_script_tx()(_: u32) -> Transaction { - let script = Script::default(); + fn arb_script_tx()( + script_gas_limit in 1..10000u64, + recipts_root in any::<[u8; 32]>(), + script_bytes in prop::collection::vec(any::(), 0..100), + script_data in prop::collection::vec(any::(), 0..100), + policies in arb_policies(), + // inputs in arb_inputs(), + // outputs in arb_outputs(), + // witnesses in arb_witnesses(), + ) -> Transaction { + let mut script = Script::default(); + *script.script_gas_limit_mut() = script_gas_limit; + *script.receipts_root_mut() = recipts_root.into(); + *script.script_mut() = script_bytes; + *script.script_data_mut() = script_data.into(); + *script.policies_mut() = policies; + // *script.inputs_mut() = inputs; + // *script.outputs_mut() = outputs; + // *script.witnesses_mut() = witnesses; + Transaction::Script(script) } } +prop_compose! { + fn arb_policies()( + maturity in prop::option::of(0..100u32), + ) -> Policies { + let mut policies = Policies::new(); + if let Some(inner) = maturity { + policies = policies.with_maturity(BlockHeight::new(inner)); + } + policies + } +} + prop_compose! { fn arb_msg_id()(inner in any::<[u8; 32]>()) -> MessageId { MessageId::new(inner) From b1cb80b283b11fa3507fc1cdfaccec1989a8afec Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sat, 18 Oct 2025 16:45:05 +0300 Subject: [PATCH 063/146] Add Input :) --- .../block_aggregator_api/proto/api.proto | 7 +- .../serializer_adapter.rs | 119 +++++++++++++++++- crates/types/src/test_helpers.rs | 93 +++++++++++++- 3 files changed, 213 insertions(+), 6 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 13f6f2722ae..be9633421c3 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -261,7 +261,7 @@ message CoinSignedInput { bytes owner = 2; uint64 amount = 3; bytes asset_id = 4; - bytes tx_pointer = 5; + TxPointer tx_pointer = 5; uint32 witness_index = 6; uint64 predicate_gas_used = 7; bytes predicate = 8; @@ -476,6 +476,11 @@ message UtxoId { uint32 output_index = 2; } +message TxPointer { + uint32 block_height = 1; + uint32 tx_index = 2; +} + // #[derive(Debug, Clone, PartialEq, Eq, Hash)] //pub struct ChargeableMetadata { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 8ea40fb69cf..5649c3952e4 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -4,10 +4,13 @@ use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ Block as ProtoBlock, + CoinSignedInput as ProtoCoinSignedInput, Header as ProtoHeader, + Input as ProtoInput, Policies as ProtoPolicies, ScriptTransaction as ProtoScriptTx, Transaction as ProtoTransaction, + UtxoId as ProtoUtxoId, V1Block as ProtoV1Block, V1Header as ProtoV1Header, block::VersionedBlock as ProtoVersionedBlock, @@ -25,7 +28,6 @@ use fuel_core_types::blockchain::header::BlockHeaderV2; #[cfg(all(test, feature = "fault-proving"))] use fuel_core_types::fuel_types::ChainId; -use crate::protobuf_types::Policies; use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, @@ -44,13 +46,19 @@ use fuel_core_types::{ }, }, fuel_tx::{ + Address, Bytes32, + Input, MessageId, Receipt, Script, Transaction as FuelTransaction, + UtxoId, field::{ ChargeableBody, + Inputs, + Maturity, + Owner, Policies as _, ReceiptsRoot as _, Script as _, @@ -174,7 +182,12 @@ fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { script: script.script().clone(), script_data: script.script_data().clone(), policies: Some(proto_policies_from_policies(script.policies())), - inputs: Vec::new(), + inputs: script + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), outputs: Vec::new(), witnesses: script .witnesses() @@ -192,6 +205,33 @@ fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { } } +fn proto_input_from_input(input: Input) -> ProtoInput { + match input { + Input::CoinSigned(coin_signed) => ProtoInput { + variant: Some(crate::protobuf_types::input::Variant::CoinSigned( + ProtoCoinSignedInput { + utxo_id: Some(ProtoUtxoId { + tx_id: coin_signed.utxo_id.tx_id().as_ref().to_vec(), + output_index: coin_signed.utxo_id.output_index().into(), + }), + owner: coin_signed.owner.as_ref().to_vec(), + amount: coin_signed.amount, + asset_id: coin_signed.asset_id.as_ref().to_vec(), + tx_pointer: Some(crate::protobuf_types::TxPointer { + block_height: coin_signed.tx_pointer.block_height().into(), + tx_index: coin_signed.tx_pointer.tx_index().into(), + }), + witness_index: coin_signed.witness_index.into(), + predicate_gas_used: 0, + predicate: vec![], + predicate_data: vec![], + }, + )), + }, + _ => ProtoInput { variant: None }, + } +} + fn proto_policies_from_policies( policies: &fuel_core_types::fuel_tx::policies::Policies, ) -> ProtoPolicies { @@ -294,7 +334,10 @@ pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result script.to_vec(), script_data.to_vec(), fuel_policies, - vec![], + inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?, vec![], vec![], ); @@ -314,6 +357,76 @@ pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result } } +fn input_from_proto_input(proto_input: &ProtoInput) -> Result { + match &proto_input.variant { + Some(crate::protobuf_types::input::Variant::CoinSigned(proto_coin_signed)) => { + let proto_utxo_id = proto_coin_signed + .utxo_id + .as_ref() + .ok_or(Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = UtxoId::new( + Bytes32::try_from(proto_utxo_id.tx_id.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert tx_id to Bytes32: {}", + e + )) + })?, + proto_utxo_id.output_index.try_into().map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert output_index to u8: {}", + e + )) + })?, + ); + let owner = + Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let amount = proto_coin_signed.amount; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_signed.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_index: u16 = u16::try_from( + proto_coin_signed + .tx_pointer + .ok_or(Error::Serialization(anyhow!("Missing tx_pointer")))? + .tx_index, + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer = fuel_core_types::fuel_tx::TxPointer::new( + proto_coin_signed + .tx_pointer + .as_ref() + .map(|tp| tp.block_height.into()) + .unwrap_or_default(), + tx_index, + ); + let witness_index = + proto_coin_signed.witness_index.try_into().map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to Specification::Witness: {}", + e + )) + })?; + + let input = Input::coin_signed( + utxo_id, + owner, + amount, + asset_id, + tx_pointer, + witness_index, + ); + Ok(input) + } + _ => Err(anyhow!("Unsupported input variant")).map_err(Error::Serialization), + } +} + // /// Sets the `gas_price` policy. // pub fn with_tip(mut self, tip: Word) -> Self { // self.set(PolicyType::Tip, Some(tip)); diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 9c68cc56559..16d96c34a89 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -8,18 +8,23 @@ use crate::{ ContractId, Create, Finalizable, + Input, MessageId, Output, Script, Transaction, TransactionBuilder, + TxPointer, + UtxoId, field::{ + Inputs, Policies as _, ReceiptsRoot, Script as _, ScriptData as _, ScriptGasLimit, }, + input::coin::CoinSigned, policies::Policies, }, fuel_types::BlockHeight, @@ -90,7 +95,7 @@ prop_compose! { script_bytes in prop::collection::vec(any::(), 0..100), script_data in prop::collection::vec(any::(), 0..100), policies in arb_policies(), - // inputs in arb_inputs(), + inputs in arb_inputs(), // outputs in arb_outputs(), // witnesses in arb_witnesses(), ) -> Transaction { @@ -100,7 +105,7 @@ prop_compose! { *script.script_mut() = script_bytes; *script.script_data_mut() = script_data.into(); *script.policies_mut() = policies; - // *script.inputs_mut() = inputs; + *script.inputs_mut() = inputs; // *script.outputs_mut() = outputs; // *script.witnesses_mut() = witnesses; @@ -126,6 +131,90 @@ prop_compose! { } } +#[allow(unused)] +fn arb_inputs() -> impl Strategy> { + // pub enum Input { + // CoinSigned(CoinSigned), + // CoinPredicate(CoinPredicate), + // Contract(Contract), + // MessageCoinSigned(MessageCoinSigned), + // MessageCoinPredicate(MessageCoinPredicate), + // MessageDataSigned(MessageDataSigned), + // MessageDataPredicate(MessageDataPredicate), + // } + let strategy = prop_oneof![arb_coin_signed(),]; + prop::collection::vec(strategy, 0..10) +} + +prop_compose! { + // pub utxo_id: UtxoId, + // pub owner: Address, + // pub amount: Word, + // pub asset_id: AssetId, + // pub tx_pointer: TxPointer, + // pub witness_index: Specification::Witness, + // pub predicate_gas_used: Specification::PredicateGasUsed, + // pub predicate: Specification::Predicate, + // pub predicate_data: Specification::PredicateData, + // type Predicate = Empty; + // type PredicateData = Empty; + // type PredicateGasUsed = Empty; + // type Witness = u16; + fn arb_coin_signed()( + utxo_id in arb_utxo_id(), + owner in arb_address(), + amount in 1..1_000_000u64, + asset_id in arb_asset_id(), + tx_pointer in arb_tx_pointer(), + witness_index in 0..1000u16, + ) -> Input { + let inner = CoinSigned { + utxo_id, + owner, + amount, + asset_id, + tx_pointer, + witness_index, + predicate_gas_used: Default::default(), + predicate: Default::default(), + predicate_data: Default::default(), + }; + Input::CoinSigned(inner) + } +} + +prop_compose! { + fn arb_utxo_id()( + inner in any::<[u8; 32]>(), + index in any::(), + ) -> UtxoId { + let tx_id = inner.into(); + UtxoId::new(tx_id, index) + } +} + +prop_compose! { + fn arb_address()(inner in any::<[u8; 32]>()) -> crate::fuel_types::Address { + crate::fuel_types::Address::new(inner) + } +} + +prop_compose! { + fn arb_asset_id()(inner in any::<[u8; 32]>()) -> crate::fuel_types::AssetId { + crate::fuel_types::AssetId::new(inner) + } +} + +prop_compose! { + fn arb_tx_pointer()( + block_height in 0..1_000_000u32, + tx_index in 0..1_000u16, + ) -> TxPointer { + let block_height = block_height.into(); + TxPointer::new(block_height, tx_index) + } +} + #[allow(unused)] fn arb_msg_ids() -> impl Strategy> { prop::collection::vec(arb_msg_id(), 0..10usize) From 36451c4dd986dc12846723acdf937d3ea074c4a9 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 20 Oct 2025 10:55:23 +0300 Subject: [PATCH 064/146] Add todo and issue for missing types --- crates/services/block_aggregator_api/proto/api.proto | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index b3c009bbdba..e95f1399a91 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -142,6 +142,8 @@ message V2Header { // Upload(Upload), // Blob(Blob), //} +// TODO: implement other transaction types +// https://github.com/FuelLabs/fuel-core/issues/3122 message Transaction { oneof variant { ScriptTransaction script = 1; From caa3e331b987456ff8c856feb0d5aacb4ef139ce Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 20 Oct 2025 12:23:36 +0300 Subject: [PATCH 065/146] Add more header fields --- .../serializer_adapter.rs | 7 +- crates/types/src/blockchain/header.rs | 29 ++++++ crates/types/src/blockchain/header/v1.rs | 5 ++ crates/types/src/test_helpers.rs | 90 +++++++++++++++++-- 4 files changed, 123 insertions(+), 8 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 5649c3952e4..fa7442b532a 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -268,6 +268,7 @@ fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { pub fn fuel_block_from_protobuf( proto_block: ProtoBlock, msg_ids: &[MessageId], + event_inbox_root: Bytes32, ) -> Result { let versioned_block = proto_block .versioned_block @@ -294,7 +295,7 @@ pub fn fuel_block_from_protobuf( partial_header, txs, msg_ids, - Bytes32::default(), + event_inbox_root, #[cfg(feature = "fault-proving")] &ChainId::default(), ) @@ -609,7 +610,7 @@ mod tests { cases: 1, .. ProptestConfig::default() })] #[test] - fn serialize_block__roundtrip((block, msg_ids) in arb_block()) { + fn serialize_block__roundtrip((block, msg_ids, event_inbox_root) in arb_block()) { // given let serializer = SerializerAdapter; @@ -617,7 +618,7 @@ mod tests { let proto_block = serializer.serialize_block(&block).unwrap(); // then - let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids).unwrap(); + let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); assert_eq!(block, deserialized_block); } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 3aafb6305dd..107ce2ee6a5 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -328,6 +328,19 @@ impl BlockHeader { } } + /// Set the event inbox root for the header + pub fn set_event_inbox_root(&mut self, root: Bytes32) { + match self { + BlockHeader::V1(header) => { + header.set_event_inbox_root(root); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_event_inbox_root(root); + } + } + } + /// Set the consensus parameters version pub fn set_consensus_parameters_version( &mut self, @@ -344,6 +357,22 @@ impl BlockHeader { } } + /// Set the state transition bytecode version + pub fn set_state_transition_bytecode_version( + &mut self, + version: StateTransitionBytecodeVersion, + ) { + match self { + BlockHeader::V1(header) => { + header.set_stf_version(version); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_stf_version(version); + } + } + } + /// Set the stf version pub fn set_stf_version(&mut self, version: StateTransitionBytecodeVersion) { match self { diff --git a/crates/types/src/blockchain/header/v1.rs b/crates/types/src/blockchain/header/v1.rs index 0ad5059a6d8..ac1e76cb7fd 100644 --- a/crates/types/src/blockchain/header/v1.rs +++ b/crates/types/src/blockchain/header/v1.rs @@ -142,6 +142,11 @@ impl BlockHeaderV1 { self.recalculate_metadata(); } + pub(crate) fn set_event_inbox_root(&mut self, event_inbox_root: Bytes32) { + self.application_mut().generated.event_inbox_root = event_inbox_root; + self.recalculate_metadata(); + } + pub(crate) fn set_da_height( &mut self, da_height: crate::blockchain::primitives::DaBlockHeight, diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 16d96c34a89..09cbe575c36 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -1,10 +1,15 @@ use crate::{ blockchain::{ block::Block, - header::generate_txns_root, + header::{ + GeneratedConsensusFields, + generate_txns_root, + }, + primitives::DaBlockHeight, }, fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ + Bytes32, ContractId, Create, Finalizable, @@ -35,6 +40,7 @@ use crate::{ }; use proptest::prelude::*; use rand::Rng; +use tai64::Tai64; /// Helper function to create a contract creation transaction /// from a given contract bytecode. @@ -220,16 +226,85 @@ fn arb_msg_ids() -> impl Strategy> { prop::collection::vec(arb_msg_id(), 0..10usize) } +prop_compose! { + fn arb_consensus_header()( + prev_root in any::<[u8; 32]>(), + time in any::(), + application_hash in any::<[u8; 32]>(), + ) -> crate::blockchain::header::ConsensusHeader { + let mut consensus_header = crate::blockchain::header::ConsensusHeader::default(); + consensus_header.height = BlockHeight::new(0); + consensus_header.prev_root = prev_root.into(); + consensus_header.time = Tai64(time); + let generated = GeneratedConsensusFields { + application_hash: application_hash.into(), + }; + consensus_header.generated = generated; + consensus_header + } +} + +// message V1Header { +// uint64 da_height = 1; +// uint32 consensus_parameters_version = 2; +// uint32 state_transition_bytecode_version = 3; +// uint32 transactions_count = 4; +// uint32 message_receipt_count = 5; +// bytes transactions_root = 6; +// bytes message_outbox_root = 7; +// bytes event_inbox_root = 8; +// bytes prev_root = 9; +// uint32 height = 10; +// uint64 time = 11; +// bytes application_hash = 12; +// optional bytes block_id = 13; +// } prop_compose! { /// Generate an arbitrary block with a variable number of transactions pub fn arb_block()( txs in arb_txs(), + // + da_height in any::(), + consensus_parameter_version in any::(), + state_transition_bytecode_version in any::(), + // msg_ids in arb_msg_ids(), - ) -> (Block, Vec) { + event_root in any::<[u8; 32]>(), + time in any::(), + height in any::(), + consensus_header in arb_consensus_header(), + ) -> (Block, Vec, Bytes32) { + // pub struct BlockV1 { + // header: BlockHeader, + // transactions: Vec, + // } let mut fuel_block = Block::default(); *fuel_block.transactions_mut() = txs; + // pub struct BlockHeaderV1 { + // pub(crate) application: ApplicationHeader, + // pub(crate) consensus: ConsensusHeader, + // pub(crate) metadata: Option, + // } + // pub struct ApplicationHeader { + // pub da_height: DaBlockHeight, + // pub consensus_parameters_version: ConsensusParametersVersion, + // pub state_transition_bytecode_version: StateTransitionBytecodeVersion, + // pub generated: Generated, + // } + fuel_block.header_mut().set_da_height(DaBlockHeight(da_height)); + fuel_block.header_mut().set_consensus_parameters_version(consensus_parameter_version); + fuel_block.header_mut().set_state_transition_bytecode_version(state_transition_bytecode_version); + + // pub struct GeneratedApplicationFieldsV1 { + // pub transactions_count: u16, + // pub message_receipt_count: u32, + // pub transactions_root: Bytes32, + // pub message_outbox_root: Bytes32, + // pub event_inbox_root: Bytes32, + // } let count = fuel_block.transactions().len() as u16; fuel_block.header_mut().set_transactions_count(count); + fuel_block.header_mut().set_message_receipt_count(msg_ids.len() as u32); let tx_root = generate_txns_root(fuel_block.transactions()); fuel_block.header_mut().set_transaction_root(tx_root); let msg_root = msg_ids @@ -240,9 +315,14 @@ prop_compose! { }) .root() .into(); + let event_root = event_root.into(); + fuel_block.header_mut().set_event_inbox_root(event_root); + + // + fuel_block.header_mut().set_time(Tai64(time)); + fuel_block.header_mut().set_block_height(BlockHeight::new(height)); + fuel_block.header_mut().set_consensus_header(consensus_header); fuel_block.header_mut().set_message_outbox_root(msg_root); - fuel_block.header_mut().set_message_receipt_count(msg_ids.len() as u32); - fuel_block.header_mut().recalculate_metadata(); - (fuel_block, msg_ids) + (fuel_block, msg_ids, event_root) } } From 6009157d6e579d4168b1705a71bae18113f125e0 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 20 Oct 2025 13:00:04 +0300 Subject: [PATCH 066/146] Add full header stuff --- crates/types/src/blockchain/header.rs | 9 +++++++ crates/types/src/test_helpers.rs | 39 ++++++++++++++------------- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 107ce2ee6a5..727b28972a2 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -79,6 +79,15 @@ impl BlockHeader { } } + /// Get the application portion of the header. + pub fn application(&self) -> &ApplicationHeader { + match self { + BlockHeader::V1(header) => header.application(), + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => header.application(), + } + } + /// Get the consensus portion of the header. pub fn consensus(&self) -> &ConsensusHeader { match self { diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 09cbe575c36..ab4b5f2c867 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -227,19 +227,20 @@ fn arb_msg_ids() -> impl Strategy> { } prop_compose! { + // pub struct ConsensusHeader { + // pub prev_root: Bytes32, + // pub height: BlockHeight, + // pub time: Tai64, + // pub generated: Generated, + // } fn arb_consensus_header()( prev_root in any::<[u8; 32]>(), time in any::(), - application_hash in any::<[u8; 32]>(), ) -> crate::blockchain::header::ConsensusHeader { let mut consensus_header = crate::blockchain::header::ConsensusHeader::default(); consensus_header.height = BlockHeight::new(0); consensus_header.prev_root = prev_root.into(); consensus_header.time = Tai64(time); - let generated = GeneratedConsensusFields { - application_hash: application_hash.into(), - }; - consensus_header.generated = generated; consensus_header } } @@ -263,28 +264,30 @@ prop_compose! { /// Generate an arbitrary block with a variable number of transactions pub fn arb_block()( txs in arb_txs(), - // da_height in any::(), consensus_parameter_version in any::(), state_transition_bytecode_version in any::(), - // msg_ids in arb_msg_ids(), event_root in any::<[u8; 32]>(), - time in any::(), - height in any::(), - consensus_header in arb_consensus_header(), + mut consensus_header in arb_consensus_header(), ) -> (Block, Vec, Bytes32) { // pub struct BlockV1 { // header: BlockHeader, // transactions: Vec, // } let mut fuel_block = Block::default(); + + // include txs first to be included in calculations *fuel_block.transactions_mut() = txs; + + // Header // pub struct BlockHeaderV1 { // pub(crate) application: ApplicationHeader, // pub(crate) consensus: ConsensusHeader, // pub(crate) metadata: Option, // } + + // Application // pub struct ApplicationHeader { // pub da_height: DaBlockHeight, // pub consensus_parameters_version: ConsensusParametersVersion, @@ -303,10 +306,6 @@ prop_compose! { // pub event_inbox_root: Bytes32, // } let count = fuel_block.transactions().len() as u16; - fuel_block.header_mut().set_transactions_count(count); - fuel_block.header_mut().set_message_receipt_count(msg_ids.len() as u32); - let tx_root = generate_txns_root(fuel_block.transactions()); - fuel_block.header_mut().set_transaction_root(tx_root); let msg_root = msg_ids .iter() .fold(MerkleRootCalculator::new(), |mut tree, id| { @@ -315,14 +314,18 @@ prop_compose! { }) .root() .into(); + let tx_root = generate_txns_root(fuel_block.transactions()); let event_root = event_root.into(); + fuel_block.header_mut().set_transactions_count(count); + fuel_block.header_mut().set_message_receipt_count(msg_ids.len() as u32); + fuel_block.header_mut().set_transaction_root(tx_root); + fuel_block.header_mut().set_message_outbox_root(msg_root); fuel_block.header_mut().set_event_inbox_root(event_root); - // - fuel_block.header_mut().set_time(Tai64(time)); - fuel_block.header_mut().set_block_height(BlockHeight::new(height)); + // Consensus + let application_hash = fuel_block.header().application().hash(); + consensus_header.generated.application_hash = application_hash; fuel_block.header_mut().set_consensus_header(consensus_header); - fuel_block.header_mut().set_message_outbox_root(msg_root); (fuel_block, msg_ids, event_root) } } From 511026609fc9670b00ecf9253ede92d3328e8d7a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 20 Oct 2025 13:34:22 +0300 Subject: [PATCH 067/146] wip add more inputs --- .../serializer_adapter.rs | 20 ++++++++++ crates/types/src/test_helpers.rs | 38 ++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index fa7442b532a..a17626a6c58 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -360,6 +360,15 @@ pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result fn input_from_proto_input(proto_input: &ProtoInput) -> Result { match &proto_input.variant { + // pub enum Variant { + // CoinSigned(super::CoinSignedInput), + // CoinPredicate(super::CoinPredicateInput), + // Contract(super::ContractInput), + // MessageCoinSigned(super::MessageCoinSignedInput), + // MessageCoinPredicate(super::MessageCoinPredicateInput), + // MessageDataSigned(super::MessageDataSignedInput), + // MessageDataPredicate(super::MessageDataPredicateInput), + // } Some(crate::protobuf_types::input::Variant::CoinSigned(proto_coin_signed)) => { let proto_utxo_id = proto_coin_signed .utxo_id @@ -424,6 +433,17 @@ fn input_from_proto_input(proto_input: &ProtoInput) -> Result { ); Ok(input) } + Some(crate::protobuf_types::input::Variant::CoinPredicate( + proto_coin_predicate, + )) => Err(anyhow!( + "CoinPredicate input deserialization not implemented" + )) + .map_err(Error::Serialization), + + Some(crate::protobuf_types::input::Variant::Message(proto_message)) => { + Err(anyhow!("Message input deserialization not implemented")) + .map_err(Error::Serialization) + } _ => Err(anyhow!("Unsupported input variant")).map_err(Error::Serialization), } } diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index ab4b5f2c867..83fe919ce6a 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -119,6 +119,16 @@ prop_compose! { } } +// prop_compose! { +// fn arb_create_tx()( +// contract_code in prop::collection::vec(any::(), 0..100), +// ) -> Create { +// let mut create = Create::default(); +// *create.contract_code_mut() = contract_code.into(); +// create +// } +// } + prop_compose! { fn arb_policies()( maturity in prop::option::of(0..100u32), @@ -148,7 +158,7 @@ fn arb_inputs() -> impl Strategy> { // MessageDataSigned(MessageDataSigned), // MessageDataPredicate(MessageDataPredicate), // } - let strategy = prop_oneof![arb_coin_signed(),]; + let strategy = prop_oneof![arb_coin_signed(), arb_coin_predicate(),]; prop::collection::vec(strategy, 0..10) } @@ -189,6 +199,32 @@ prop_compose! { } } +prop_compose! { + fn arb_coin_predicate()( + utxo_id in arb_utxo_id(), + owner in arb_address(), + amount in 1..1_000_000u64, + asset_id in arb_asset_id(), + tx_pointer in arb_tx_pointer(), + predicate_gas_used in any::(), + predicate in prop::collection::vec(any::(), 0..100), + predicate_data in prop::collection::vec(any::(), 0..100), + ) -> Input { + let inner = crate::fuel_tx::input::coin::CoinPredicate { + utxo_id, + owner, + amount, + asset_id, + tx_pointer, + witness_index: Default::default(), + predicate_gas_used, + predicate: predicate.into(), + predicate_data: predicate_data.into(), + }; + Input::CoinPredicate(inner) + } +} + prop_compose! { fn arb_utxo_id()( inner in any::<[u8; 32]>(), From 2b4dd0bc18eacb2ae08424463ebe8e1fa215acb2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 11:28:58 +0300 Subject: [PATCH 068/146] fix merge conflicts --- Cargo.lock | 37 ++----------------- Cargo.toml | 2 +- bin/fuel-core/Cargo.toml | 2 +- bin/fuel-core/src/cli/run/rpc.rs | 4 +- crates/fuel-core/Cargo.toml | 2 +- .../database_description/block_aggregator.rs | 2 +- tests/Cargo.toml | 2 +- 7 files changed, 11 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42f65eb798d..2636573c7da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3557,36 +3557,6 @@ dependencies = [ "strum 0.24.1", ] -[[package]] -name = "fuel-block-aggregator-api" -version = "0.46.0" -dependencies = [ - "anyhow", - "async-trait", - "bytes", - "enum-iterator", - "fuel-core-services", - "fuel-core-storage", - "fuel-core-types 0.46.0", - "futures", - "log", - "num_enum", - "postcard", - "prost 0.14.1", - "rand 0.8.5", - "serde", - "strum 0.25.0", - "strum_macros 0.25.3", - "thiserror 2.0.12", - "tokio", - "tokio-stream", - "tonic 0.14.2", - "tonic-prost", - "tonic-prost-build", - "tracing", - "tracing-subscriber", -] - [[package]] name = "fuel-compression" version = "0.65.0" @@ -3612,8 +3582,8 @@ dependencies = [ "cosmrs", "derive_more 0.99.20", "enum-iterator", - "fuel-block-aggregator-api", "fuel-core", + "fuel-core-block-aggregator-api", "fuel-core-chain-config", "fuel-core-compression-service", "fuel-core-consensus-module", @@ -3727,8 +3697,8 @@ dependencies = [ "const_format", "dirs 4.0.0", "dotenvy", - "fuel-block-aggregator-api", "fuel-core", + "fuel-core-block-aggregator-api", "fuel-core-chain-config", "fuel-core-metrics", "fuel-core-poa", @@ -3767,6 +3737,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-types 0.47.1", "futures", + "log", "num_enum", "postcard", "prost 0.14.1", @@ -4265,10 +4236,10 @@ dependencies = [ "clap", "cynic", "ethers", - "fuel-block-aggregator-api", "fuel-core", "fuel-core-benches", "fuel-core-bin", + "fuel-core-block-aggregator-api", "fuel-core-client", "fuel-core-compression", "fuel-core-compression-service", diff --git a/Cargo.toml b/Cargo.toml index 4d1b3abc57f..16a8366d5e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ educe = { version = "0.6", default-features = false, features = [ enum-iterator = "1.2" enum_dispatch = "0.3.13" # Workspace members -fuel-block-aggregator-api = { version = "0.47.1", path = "crates/services/block_aggregator_api" } +fuel-core-block-aggregator-api = { version = "0.47.1", path = "crates/services/block_aggregator_api" } fuel-core = { version = "0.47.1", path = "./crates/fuel-core", default-features = false } fuel-core-bin = { version = "0.47.1", path = "./bin/fuel-core" } fuel-core-chain-config = { version = "0.47.1", path = "./crates/chain-config", default-features = false } diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index a62b009e408..5aa1be41574 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -55,7 +55,7 @@ clap = { workspace = true, features = ["derive", "env", "string"] } const_format = { version = "0.2", optional = true } dirs = "4.0" dotenvy = { version = "0.15", optional = true } -fuel-block-aggregator-api = { workspace = true } +fuel-core-block-aggregator-api = { workspace = true } fuel-core = { workspace = true, features = ["wasm-executor"] } fuel-core-chain-config = { workspace = true } fuel-core-metrics = { workspace = true } diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index f54297ba0cc..324cc8daee5 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -13,8 +13,8 @@ pub struct RpcArgs { } impl RpcArgs { - pub fn into_config(self) -> fuel_block_aggregator_api::integration::Config { - fuel_block_aggregator_api::integration::Config { + pub fn into_config(self) -> fuel_core_block_aggregator_api::integration::Config { + fuel_core_block_aggregator_api::integration::Config { addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), } } diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 92d781d3f99..0a3eaded254 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -60,7 +60,7 @@ clap = { workspace = true, features = ["derive"] } cosmrs = { version = "0.21", optional = true } derive_more = { version = "0.99" } enum-iterator = { workspace = true } -fuel-block-aggregator-api = { workspace = true } +fuel-core-block-aggregator-api = { workspace = true } fuel-core-chain-config = { workspace = true, features = ["std"] } fuel-core-compression-service = { workspace = true } fuel-core-consensus-module = { workspace = true } diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs index 42dde184136..2d55678552f 100644 --- a/crates/fuel-core/src/database/database_description/block_aggregator.rs +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -1,5 +1,5 @@ use crate::database::database_description::DatabaseDescription; -use fuel_block_aggregator_api::db::storage_db::table::Column; +use fuel_core_block_aggregator_api::db::storage_db::table::Column; use fuel_core_types::fuel_types::BlockHeight; #[derive(Clone, Copy, Debug)] diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 82f4b1a4a70..c8be1e95735 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -38,7 +38,7 @@ aws-sdk-kms = { version = "1.37.0", optional = true } clap = { workspace = true } cynic = { workspace = true } ethers = "2" -fuel-block-aggregator-api = { version = "0.46.0", path = "../crates/services/block_aggregator_api" } +fuel-core-block-aggregator-api = { workspace = true } fuel-core = { path = "../crates/fuel-core", default-features = false, features = [ "p2p", "relayer", From f3429440a3fd83bef120a907644147eb591ccae7 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 11:33:41 +0300 Subject: [PATCH 069/146] Lint tomls --- Cargo.toml | 4 ++-- bin/fuel-core/Cargo.toml | 2 +- tests/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 16a8366d5e7..ddeec08f331 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,10 +81,10 @@ educe = { version = "0.6", default-features = false, features = [ ] } enum-iterator = "1.2" enum_dispatch = "0.3.13" -# Workspace members -fuel-core-block-aggregator-api = { version = "0.47.1", path = "crates/services/block_aggregator_api" } fuel-core = { version = "0.47.1", path = "./crates/fuel-core", default-features = false } fuel-core-bin = { version = "0.47.1", path = "./bin/fuel-core" } +# Workspace members +fuel-core-block-aggregator-api = { version = "0.47.1", path = "crates/services/block_aggregator_api" } fuel-core-chain-config = { version = "0.47.1", path = "./crates/chain-config", default-features = false } fuel-core-client = { version = "0.47.1", path = "./crates/client" } fuel-core-compression = { version = "0.47.1", path = "./crates/compression" } diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index 5aa1be41574..9bd49c25fea 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -55,8 +55,8 @@ clap = { workspace = true, features = ["derive", "env", "string"] } const_format = { version = "0.2", optional = true } dirs = "4.0" dotenvy = { version = "0.15", optional = true } -fuel-core-block-aggregator-api = { workspace = true } fuel-core = { workspace = true, features = ["wasm-executor"] } +fuel-core-block-aggregator-api = { workspace = true } fuel-core-chain-config = { workspace = true } fuel-core-metrics = { workspace = true } fuel-core-poa = { workspace = true, optional = true } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index c8be1e95735..d68dcf22481 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -38,7 +38,6 @@ aws-sdk-kms = { version = "1.37.0", optional = true } clap = { workspace = true } cynic = { workspace = true } ethers = "2" -fuel-core-block-aggregator-api = { workspace = true } fuel-core = { path = "../crates/fuel-core", default-features = false, features = [ "p2p", "relayer", @@ -48,6 +47,7 @@ fuel-core = { path = "../crates/fuel-core", default-features = false, features = ] } fuel-core-benches = { path = "../benches" } fuel-core-bin = { path = "../bin/fuel-core", features = ["parquet", "p2p"] } +fuel-core-block-aggregator-api = { workspace = true } fuel-core-client = { path = "../crates/client", features = ["test-helpers"] } fuel-core-compression = { path = "../crates/compression" } fuel-core-compression-service = { path = "../crates/services/compression", features = [ From b89a4e3c25d82a6eeedabd406860a9d3d07caf16 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 11:54:27 +0300 Subject: [PATCH 070/146] Fix broken imports --- crates/fuel-core/src/service/config.rs | 4 ++-- crates/fuel-core/src/service/sub_services.rs | 4 ++-- tests/tests/rpc.rs | 22 +++++++++++--------- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index c310e6cf2c7..e2d0299bd58 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -82,7 +82,7 @@ pub struct Config { pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, #[cfg(feature = "rpc")] - pub rpc_config: fuel_block_aggregator_api::integration::Config, + pub rpc_config: fuel_core_block_aggregator_api::integration::Config, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] @@ -170,7 +170,7 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); #[cfg(feature = "rpc")] - let rpc_config = fuel_block_aggregator_api::integration::Config { + let rpc_config = fuel_core_block_aggregator_api::integration::Config { addr: free_local_addr(), }; diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index d7f5d3863d4..412ba2b4b56 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -9,7 +9,7 @@ use crate::service::adapters::consensus_module::poa::pre_confirmation_signature: tx_receiver::PreconfirmationsReceiver, }; #[cfg(feature = "rpc")] -use fuel_block_aggregator_api::{ +use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_db::StorageDB, }; @@ -466,7 +466,7 @@ pub fn init_sub_services( let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); - fuel_block_aggregator_api::integration::new_service( + fuel_core_block_aggregator_api::integration::new_service( &block_aggregator_config, db_adapter, serializer, diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 31d23706cf9..c2aecedecc1 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,9 +1,5 @@ #![allow(non_snake_case)] -use fuel_block_aggregator_api::api::protobuf_adapter::{ - block_aggregator_client::BlockAggregatorClient, - block_response::Payload, -}; use fuel_core::{ database::Database, service::{ @@ -11,6 +7,10 @@ use fuel_core::{ FuelService, }, }; +use fuel_core_block_aggregator_api::api::protobuf_adapter::{ + block_aggregator_client::BlockAggregatorClient, + block_response::Payload, +}; use fuel_core_client::client::FuelClient; use fuel_core_types::{ blockchain::block::Block, @@ -47,10 +47,11 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { let header = expected_block.header; // when - let request = fuel_block_aggregator_api::api::protobuf_adapter::BlockRangeRequest { - start: 1, - end: 1, - }; + let request = + fuel_core_block_aggregator_api::api::protobuf_adapter::BlockRangeRequest { + start: 1, + end: 1, + }; let actual_bytes = if let Some(Payload::Literal(block)) = rpc_client .get_block_range(request) .await @@ -102,7 +103,8 @@ async fn get_block_height__can_get_value_from_rpc() { .expect("could not connect to server"); // when - let request = fuel_block_aggregator_api::api::protobuf_adapter::BlockHeightRequest {}; + let request = + fuel_core_block_aggregator_api::api::protobuf_adapter::BlockHeightRequest {}; let expected_height = 1; let actual_height = rpc_client .get_block_height(request) @@ -134,7 +136,7 @@ async fn new_block_subscription__can_get_expect_block() { .expect("could not connect to server"); let request = - fuel_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; + fuel_core_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; let mut stream = rpc_client .new_block_subscription(request) .await From 1ca534654760f023ae6115aaafcd626d5358cb43 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 11:54:27 +0300 Subject: [PATCH 071/146] Fix broken imports --- crates/fuel-core/src/service/config.rs | 4 +- crates/fuel-core/src/service/sub_services.rs | 4 +- tests/tests/rpc.rs | 91 ++++++++++++-------- 3 files changed, 60 insertions(+), 39 deletions(-) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index c310e6cf2c7..e2d0299bd58 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -82,7 +82,7 @@ pub struct Config { pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, #[cfg(feature = "rpc")] - pub rpc_config: fuel_block_aggregator_api::integration::Config, + pub rpc_config: fuel_core_block_aggregator_api::integration::Config, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] @@ -170,7 +170,7 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); #[cfg(feature = "rpc")] - let rpc_config = fuel_block_aggregator_api::integration::Config { + let rpc_config = fuel_core_block_aggregator_api::integration::Config { addr: free_local_addr(), }; diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index d7f5d3863d4..412ba2b4b56 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -9,7 +9,7 @@ use crate::service::adapters::consensus_module::poa::pre_confirmation_signature: tx_receiver::PreconfirmationsReceiver, }; #[cfg(feature = "rpc")] -use fuel_block_aggregator_api::{ +use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_db::StorageDB, }; @@ -466,7 +466,7 @@ pub fn init_sub_services( let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); - fuel_block_aggregator_api::integration::new_service( + fuel_core_block_aggregator_api::integration::new_service( &block_aggregator_config, db_adapter, serializer, diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index c232afeecc1..c2aecedecc1 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,14 +1,5 @@ #![allow(non_snake_case)] -use fuel_block_aggregator_api::protobuf_types::{ - BlockHeightRequest as ProtoBlockHeightRequest, - BlockRangeRequest as ProtoBlockRangeRequest, - NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - block::VersionedBlock as ProtoVersionedBlock, - block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, - block_response::Payload as ProtoPayload, - header::VersionedHeader as ProtoVersionedHeader, -}; use fuel_core::{ database::Database, service::{ @@ -16,8 +7,16 @@ use fuel_core::{ FuelService, }, }; +use fuel_core_block_aggregator_api::api::protobuf_adapter::{ + block_aggregator_client::BlockAggregatorClient, + block_response::Payload, +}; use fuel_core_client::client::FuelClient; -use fuel_core_types::fuel_tx::*; +use fuel_core_types::{ + blockchain::block::Block, + fuel_tx::*, + fuel_types::BlockHeight, +}; use futures::StreamExt; use test_helpers::client_ext::ClientExt; @@ -36,7 +35,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + let mut rpc_client = BlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); @@ -45,11 +44,15 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .unwrap() .unwrap(); - let expected_header = expected_block.header; + let header = expected_block.header; // when - let request = ProtoBlockRangeRequest { start: 1, end: 1 }; - let actual_block = if let Some(ProtoPayload::Literal(block)) = rpc_client + let request = + fuel_core_block_aggregator_api::api::protobuf_adapter::BlockRangeRequest { + start: 1, + end: 1, + }; + let actual_bytes = if let Some(Payload::Literal(block)) = rpc_client .get_block_range(request) .await .unwrap() @@ -60,17 +63,23 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .unwrap() .payload { - block + block.data } else { panic!("expected literal block payload"); }; - let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { - ProtoVersionedHeader::V1(v1_header) => v1_header.height, - ProtoVersionedHeader::V2(v2_header) => v2_header.height, - }; + let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); + // then - assert_eq!(expected_header.height.0, actual_height); + assert_eq!( + BlockHeight::from(header.height.0), + *actual_block.header().height() + ); + // check txs + let actual_tx = actual_block.transactions().first().unwrap(); + let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); + let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); + + assert_eq!(&expected_tx, actual_tx); } #[tokio::test(flavor = "multi_thread")] @@ -89,12 +98,13 @@ async fn get_block_height__can_get_value_from_rpc() { let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + let mut rpc_client = BlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); // when - let request = ProtoBlockHeightRequest {}; + let request = + fuel_core_block_aggregator_api::api::protobuf_adapter::BlockHeightRequest {}; let expected_height = 1; let actual_height = rpc_client .get_block_height(request) @@ -121,11 +131,12 @@ async fn new_block_subscription__can_get_expect_block() { let tx = Transaction::default_test_tx(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + let mut rpc_client = BlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); - let request = ProtoNewBlockSubscriptionRequest {}; + let request = + fuel_core_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; let mut stream = rpc_client .new_block_subscription(request) .await @@ -137,19 +148,29 @@ async fn new_block_subscription__can_get_expect_block() { let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) .await .unwrap(); - let actual_block = - if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { - block + let actual_bytes = + if let Some(Payload::Literal(block)) = next.unwrap().unwrap().payload { + block.data } else { panic!("expected literal block payload"); }; - let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { - ProtoVersionedHeader::V1(v1_header) => v1_header.height, - ProtoVersionedHeader::V2(v2_header) => v2_header.height, - }; // then - let expected_height = 1; - assert_eq!(expected_height, actual_height); + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let header = expected_block.header; + let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); + assert_eq!( + BlockHeight::from(header.height.0), + *actual_block.header().height() + ); + // check txs + let actual_tx = actual_block.transactions().first().unwrap(); + let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); + let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); + + assert_eq!(&expected_tx, actual_tx); } From 03788f66916664e9a6e0dffe0a5fc8afbe848a0e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 16:09:19 +0300 Subject: [PATCH 072/146] Fix integ tests --- tests/Cargo.toml | 2 +- tests/tests/rpc.rs | 90 ++++++++++++++++++---------------------------- 2 files changed, 36 insertions(+), 56 deletions(-) diff --git a/tests/Cargo.toml b/tests/Cargo.toml index e97a9d2dd91..377d92b1740 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -19,7 +19,7 @@ aws-kms = ["dep:aws-config", "dep:aws-sdk-kms", "fuel-core-bin/aws-kms"] fault-proving = [ "fuel-core/fault-proving", "fuel-core-types/fault-proving", - "fuel-block-aggregator-api/fault-proving", + "fuel-core-block-aggregator-api/fault-proving", "fuel-core-storage/fault-proving", "fuel-core-upgradable-executor/fault-proving", "fuel-core-poa/fault-proving", diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index c2aecedecc1..aa6c564834b 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,5 +1,4 @@ #![allow(non_snake_case)] - use fuel_core::{ database::Database, service::{ @@ -7,16 +6,17 @@ use fuel_core::{ FuelService, }, }; -use fuel_core_block_aggregator_api::api::protobuf_adapter::{ - block_aggregator_client::BlockAggregatorClient, - block_response::Payload, +use fuel_core_block_aggregator_api::protobuf_types::{ + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block::VersionedBlock as ProtoVersionedBlock, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, + header::VersionedHeader as ProtoVersionedHeader, }; use fuel_core_client::client::FuelClient; -use fuel_core_types::{ - blockchain::block::Block, - fuel_tx::*, - fuel_types::BlockHeight, -}; +use fuel_core_types::fuel_tx::*; use futures::StreamExt; use test_helpers::client_ext::ClientExt; @@ -35,7 +35,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); @@ -44,15 +44,11 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .unwrap() .unwrap(); - let header = expected_block.header; + let expected_header = expected_block.header; // when - let request = - fuel_core_block_aggregator_api::api::protobuf_adapter::BlockRangeRequest { - start: 1, - end: 1, - }; - let actual_bytes = if let Some(Payload::Literal(block)) = rpc_client + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let actual_block = if let Some(ProtoPayload::Literal(block)) = rpc_client .get_block_range(request) .await .unwrap() @@ -63,23 +59,18 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .unwrap() .payload { - block.data + block } else { panic!("expected literal block payload"); }; - let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); + let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); + let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { + ProtoVersionedHeader::V1(v1_header) => v1_header.height, + ProtoVersionedHeader::V2(v2_header) => v2_header.height, + }; // then - assert_eq!( - BlockHeight::from(header.height.0), - *actual_block.header().height() - ); - // check txs - let actual_tx = actual_block.transactions().first().unwrap(); - let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); - let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); - - assert_eq!(&expected_tx, actual_tx); + assert_eq!(expected_header.height.0, actual_height); } #[tokio::test(flavor = "multi_thread")] @@ -98,13 +89,12 @@ async fn get_block_height__can_get_value_from_rpc() { let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); // when - let request = - fuel_core_block_aggregator_api::api::protobuf_adapter::BlockHeightRequest {}; + let request = ProtoBlockHeightRequest {}; let expected_height = 1; let actual_height = rpc_client .get_block_height(request) @@ -131,12 +121,11 @@ async fn new_block_subscription__can_get_expect_block() { let tx = Transaction::default_test_tx(); let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = BlockAggregatorClient::connect(rpc_url) + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) .await .expect("could not connect to server"); - let request = - fuel_core_block_aggregator_api::api::protobuf_adapter::NewBlockSubscriptionRequest {}; + let request = ProtoNewBlockSubscriptionRequest {}; let mut stream = rpc_client .new_block_subscription(request) .await @@ -148,29 +137,20 @@ async fn new_block_subscription__can_get_expect_block() { let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) .await .unwrap(); - let actual_bytes = - if let Some(Payload::Literal(block)) = next.unwrap().unwrap().payload { - block.data + let actual_block = + if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { + block } else { panic!("expected literal block payload"); }; + let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); + let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { + ProtoVersionedHeader::V1(v1_header) => v1_header.height, + ProtoVersionedHeader::V2(v2_header) => v2_header.height, + }; + // then - let expected_block = graphql_client - .full_block_by_height(1) - .await - .unwrap() - .unwrap(); - let header = expected_block.header; - let actual_block: Block = postcard::from_bytes(&actual_bytes).unwrap(); - assert_eq!( - BlockHeight::from(header.height.0), - *actual_block.header().height() - ); - // check txs - let actual_tx = actual_block.transactions().first().unwrap(); - let expected_opaque_tx = expected_block.transactions.first().unwrap().to_owned(); - let expected_tx: Transaction = expected_opaque_tx.try_into().unwrap(); - - assert_eq!(&expected_tx, actual_tx); + let expected_height = 1; + assert_eq!(expected_height, actual_height); } From 04ad81d4ed0e80e4924f49620e77da5a9b2b4517 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 16:55:50 +0300 Subject: [PATCH 073/146] Lint tomls --- crates/services/block_aggregator_api/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 6a4d391b059..03342654df9 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -41,9 +41,9 @@ tracing = { workspace = true } tonic-prost-build = { workspace = true } [dev-dependencies] -proptest = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } +proptest = { workspace = true } tokio-stream = { workspace = true } tracing-subscriber = { workspace = true } From 86eddd9905c7579baf02add0e6783d98f03b0dce Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 16:56:37 +0300 Subject: [PATCH 074/146] Fix spelling --- crates/types/src/test_helpers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 1b701cabee2..dd1a99208e9 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -97,7 +97,7 @@ fn arb_txs() -> impl Strategy> { prop_compose! { fn arb_script_tx()( script_gas_limit in 1..10000u64, - recipts_root in any::<[u8; 32]>(), + receipts_root in any::<[u8; 32]>(), script_bytes in prop::collection::vec(any::(), 0..100), script_data in prop::collection::vec(any::(), 0..100), policies in arb_policies(), @@ -107,7 +107,7 @@ prop_compose! { ) -> Transaction { let mut script = Script::default(); *script.script_gas_limit_mut() = script_gas_limit; - *script.receipts_root_mut() = recipts_root.into(); + *script.receipts_root_mut() = receipts_root.into(); *script.script_mut() = script_bytes; *script.script_data_mut() = script_data.into(); *script.policies_mut() = policies; From 30092796d850a131a953ab901a69060c6ae7ed6a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 17:23:40 +0300 Subject: [PATCH 075/146] fix compilation --- .../serializer_adapter.rs | 1 + crates/types/src/test_helpers.rs | 17 +++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 602babc6833..3e77b6a70ad 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -49,6 +49,7 @@ use fuel_core_types::{ Address, Bytes32, Input, + MessageId, Transaction as FuelTransaction, UtxoId, field::{ diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index dd1a99208e9..d3768973269 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -109,7 +109,7 @@ prop_compose! { *script.script_gas_limit_mut() = script_gas_limit; *script.receipts_root_mut() = receipts_root.into(); *script.script_mut() = script_bytes; - *script.script_data_mut() = script_data.into(); + *script.script_data_mut() = script_data; *script.policies_mut() = policies; *script.inputs_mut() = inputs; // *script.outputs_mut() = outputs; @@ -273,11 +273,12 @@ prop_compose! { prev_root in any::<[u8; 32]>(), time in any::(), ) -> crate::blockchain::header::ConsensusHeader { - let mut consensus_header = crate::blockchain::header::ConsensusHeader::default(); - consensus_header.height = BlockHeight::new(0); - consensus_header.prev_root = prev_root.into(); - consensus_header.time = Tai64(time); - consensus_header + crate::blockchain::header::ConsensusHeader { + prev_root: prev_root.into(), + height: BlockHeight::new(0), + time: Tai64(time), + generated: GeneratedConsensusFields::default(), + } } } @@ -341,7 +342,7 @@ prop_compose! { // pub message_outbox_root: Bytes32, // pub event_inbox_root: Bytes32, // } - let count = fuel_block.transactions().len() as u16; + let count = fuel_block.transactions().len().try_into().expect("we shouldn't have more than u16::MAX transactions"); let msg_root = msg_ids .iter() .fold(MerkleRootCalculator::new(), |mut tree, id| { @@ -353,7 +354,7 @@ prop_compose! { let tx_root = generate_txns_root(fuel_block.transactions()); let event_root = event_root.into(); fuel_block.header_mut().set_transactions_count(count); - fuel_block.header_mut().set_message_receipt_count(msg_ids.len() as u32); + fuel_block.header_mut().set_message_receipt_count(msg_ids.len().try_into().expect("we shouldn't have more than u32::MAX messages")); fuel_block.header_mut().set_transaction_root(tx_root); fuel_block.header_mut().set_message_outbox_root(msg_root); fuel_block.header_mut().set_event_inbox_root(event_root); From bd0f7c32368f30a01bed32408861dd4321554d2a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 17:35:13 +0300 Subject: [PATCH 076/146] Cleanup from review feedback --- .../src/blocks/importer_and_db_source/sync_service.rs | 8 +++++++- crates/services/importer/src/importer.rs | 2 -- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index eedf3ac5e6d..af0cbf801be 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -99,6 +99,12 @@ where } Ok(txs) } + + // For now just have arbitrary 10 ms sleep to avoid busy looping. + // This could be more complicated with increasing backoff times, etc. + async fn go_to_sleep_before_continuing(&self) { + tokio::time::sleep(Duration::from_millis(10)).await; + } } impl RunnableTask for SyncTask @@ -135,7 +141,7 @@ where self.next_height = BlockHeight::from((*next_height).saturating_add(1)); } else { tracing::warn!("no block found at height {:?}, retrying", next_height); - tokio::time::sleep(Duration::from_millis(10)).await; + self.go_to_sleep_before_continuing().await; } TaskNextAction::Continue } diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index 791ba387681..1a0b758d93c 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -520,8 +520,6 @@ where } } } - let a = 100; - let _ = a; } /// Prepares the block for committing. It includes the execution of the block, From 7d0a3cce143860ecdcdddfbee39061d094cef91d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 21 Oct 2025 17:44:11 +0300 Subject: [PATCH 077/146] Remove unused --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 3e77b6a70ad..602babc6833 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -49,7 +49,6 @@ use fuel_core_types::{ Address, Bytes32, Input, - MessageId, Transaction as FuelTransaction, UtxoId, field::{ From 5b332918e3c4d17dea1583b4d7953dddf48e1795 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:01:07 -0700 Subject: [PATCH 078/146] Add full round trip prop tests with all the block components --- .../block_aggregator_api/proto/api.proto | 173 ++- .../serializer_adapter.rs | 1176 +++++++++++++++-- crates/types/src/test_helpers.rs | 873 ++++++++++-- 3 files changed, 1954 insertions(+), 268 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 4bb235e82d6..b478c8b69b2 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -134,24 +134,14 @@ message V2Header { optional bytes block_id = 14; } -// pub enum Transaction { -// Script(Script), -// Create(Create), -// Mint(Mint), -// Upgrade(Upgrade), -// Upload(Upload), -// Blob(Blob), -//} -// TODO: implement other transaction types -// https://github.com/FuelLabs/fuel-core/issues/3122 message Transaction { oneof variant { ScriptTransaction script = 1; -// CreateTx create = 2; -// MintTx mint = 3; -// UpgradeTx upgrade = 4; -// UploadTx upload = 5; -// BlobTx blob = 6; + CreateTransaction create = 2; + MintTransaction mint = 3; + UpgradeTransaction upgrade = 4; + UploadTransaction upload = 5; + BlobTransaction blob = 6; } } @@ -195,6 +185,59 @@ message ScriptTransaction { ScriptMetadata metadata = 9; } +message CreateTransaction { + uint32 bytecode_witness_index = 1; + bytes salt = 2; + repeated StorageSlot storage_slots = 3; + Policies policies = 4; + repeated Input inputs = 5; + repeated Output outputs = 6; + repeated bytes witnesses = 7; + CreateMetadata metadata = 8; +} + +message MintTransaction { + TxPointer tx_pointer = 1; + ContractInput input_contract = 2; + ContractOutput output_contract = 3; + uint64 mint_amount = 4; + bytes mint_asset_id = 5; + uint64 gas_price = 6; + MintMetadata metadata = 7; +} + +message UpgradeTransaction { + UpgradePurpose purpose = 1; + Policies policies = 2; + repeated Input inputs = 3; + repeated Output outputs = 4; + repeated bytes witnesses = 5; + UpgradeMetadata metadata = 6; +} + +message UploadTransaction { + bytes root = 1; + uint32 witness_index = 2; + uint32 subsection_index = 3; + uint32 subsections_number = 4; + repeated bytes proof_set = 5; + Policies policies = 6; + repeated Input inputs = 7; + repeated Output outputs = 8; + repeated bytes witnesses = 9; + UploadMetadata metadata = 10; +} + +message BlobTransaction { + bytes blob_id = 1; + uint32 witness_index = 2; + Policies policies = 3; + repeated Input inputs = 4; + repeated Output outputs = 5; + repeated bytes witnesses = 6; + BlobMetadata metadata = 7; +} + // pub struct Policies { // /// A bitmask that indicates what policies are set. // bits: PoliciesBits, @@ -281,7 +324,7 @@ message CoinPredicateInput { bytes owner = 2; uint64 amount = 3; bytes asset_id = 4; - bytes tx_pointer = 5; + TxPointer tx_pointer = 5; uint32 witness_index = 6; uint64 predicate_gas_used = 7; bytes predicate = 8; @@ -304,7 +347,7 @@ message ContractInput { UtxoId utxo_id = 1; bytes balance_root = 2; bytes state_root = 3; - bytes tx_pointer = 4; + TxPointer tx_pointer = 4; bytes contract_id = 5; } @@ -350,7 +393,7 @@ message MessageCoinSignedInput { bytes sender = 1; bytes recipient = 2; uint64 amount = 3; - uint32 nonce = 4; + bytes nonce = 4; uint32 witness_index = 5; uint64 predicate_gas_used = 6; bytes data = 7; @@ -369,7 +412,7 @@ message MessageCoinPredicateInput { bytes sender = 1; bytes recipient = 2; uint64 amount = 3; - uint32 nonce = 4; + bytes nonce = 4; uint32 witness_index = 5; uint64 predicate_gas_used = 6; bytes data = 7; @@ -382,7 +425,7 @@ message MessageDataSignedInput { bytes sender = 1; bytes recipient = 2; uint64 amount = 3; - uint32 nonce = 4; + bytes nonce = 4; uint32 witness_index = 5; uint64 predicate_gas_used = 6; bytes data = 7; @@ -396,7 +439,7 @@ message MessageDataPredicateInput { bytes sender = 1; bytes recipient = 2; uint64 amount = 3; - uint32 nonce = 4; + bytes nonce = 4; uint32 witness_index = 5; uint64 predicate_gas_used = 6; bytes data = 7; @@ -449,8 +492,9 @@ message CoinOutput { bytes asset_id = 3; } message ContractOutput { - bytes contract_id = 1; - bytes state_root = 2; + uint32 input_index = 1; + bytes balance_root = 2; + bytes state_root = 3; } message ChangeOutput { bytes to = 1; @@ -483,6 +527,11 @@ message TxPointer { uint32 tx_index = 2; } +message StorageSlot { + bytes key = 1; + bytes value = 2; +} + // #[derive(Debug, Clone, PartialEq, Eq, Hash)] //pub struct ChargeableMetadata { @@ -524,6 +573,84 @@ message ScriptMetadata { bytes script_data = 12; } +message CreateMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; + bytes contract_id = 9; + bytes contract_root = 10; + bytes state_root = 11; +} + +message MintMetadata { + bytes id = 1; +} + +message UpgradePurpose { + oneof variant { + UpgradeConsensusParameters consensus_parameters = 1; + UpgradeStateTransition state_transition = 2; + } +} + +message UpgradeConsensusParameters { + uint32 witness_index = 1; + bytes checksum = 2; +} + +message UpgradeStateTransition { + bytes root = 1; +} + +message UpgradeMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; + oneof variant { + UpgradeConsensusParametersMetadata consensus_parameters = 9; + UpgradeStateTransitionMetadata state_transition = 10; + } +} + +message UpgradeConsensusParametersMetadata { + bytes consensus_parameters = 1; + bytes calculated_checksum = 2; +} + +message UpgradeStateTransitionMetadata {} + +message UploadMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; +} + +message BlobMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; +} + message PredicateOffset { optional InnerPredicateOffset offset = 1; } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 602babc6833..195ba3547c6 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -3,19 +3,44 @@ use crate::protobuf_types::V2Header as ProtoV2Header; use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ + BlobTransaction as ProtoBlobTx, Block as ProtoBlock, + ChangeOutput as ProtoChangeOutput, + CoinOutput as ProtoCoinOutput, + CoinPredicateInput as ProtoCoinPredicateInput, CoinSignedInput as ProtoCoinSignedInput, + ContractCreatedOutput as ProtoContractCreatedOutput, + ContractInput as ProtoContractInput, + ContractOutput as ProtoContractOutput, + CreateTransaction as ProtoCreateTx, Header as ProtoHeader, Input as ProtoInput, + MessageCoinPredicateInput as ProtoMessageCoinPredicateInput, + MessageCoinSignedInput as ProtoMessageCoinSignedInput, + MessageDataPredicateInput as ProtoMessageDataPredicateInput, + MessageDataSignedInput as ProtoMessageDataSignedInput, + MintTransaction as ProtoMintTx, + Output as ProtoOutput, Policies as ProtoPolicies, ScriptTransaction as ProtoScriptTx, + StorageSlot as ProtoStorageSlot, Transaction as ProtoTransaction, + TxPointer as ProtoTxPointer, + UpgradeConsensusParameters as ProtoUpgradeConsensusParameters, + UpgradePurpose as ProtoUpgradePurpose, + UpgradeStateTransition as ProtoUpgradeStateTransition, + UpgradeTransaction as ProtoUpgradeTx, + UploadTransaction as ProtoUploadTx, UtxoId as ProtoUtxoId, V1Block as ProtoV1Block, V1Header as ProtoV1Header, + VariableOutput as ProtoVariableOutput, block::VersionedBlock as ProtoVersionedBlock, header::VersionedHeader as ProtoVersionedHeader, + input::Variant as ProtoInputVariant, + output::Variant as ProtoOutputVariant, transaction::Variant as ProtoTransactionVariant, + upgrade_purpose::Variant as ProtoUpgradePurposeVariant, }, result::{ Error, @@ -47,17 +72,40 @@ use fuel_core_types::{ }, fuel_tx::{ Address, + BlobBody, Bytes32, Input, + Output, + StorageSlot, Transaction as FuelTransaction, + TxPointer, + UpgradePurpose, + UploadBody, UtxoId, + Witness, field::{ + BlobId as _, + BytecodeRoot as _, + BytecodeWitnessIndex as _, + InputContract as _, Inputs, + MintAmount as _, + MintAssetId as _, + MintGasPrice as _, + OutputContract as _, + Outputs, Policies as _, + ProofSet as _, ReceiptsRoot as _, + Salt as _, Script as _, ScriptData as _, ScriptGasLimit as _, + StorageSlots as _, + SubsectionIndex as _, + SubsectionsNumber as _, + TxPointer as TxPointerField, + UpgradePurpose as UpgradePurposeField, Witnesses as _, }, policies::{ @@ -182,7 +230,12 @@ fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { .cloned() .map(proto_input_from_input) .collect(), - outputs: Vec::new(), + outputs: script + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), witnesses: script .witnesses() .iter() @@ -195,41 +248,367 @@ fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { variant: Some(ProtoTransactionVariant::Script(proto_script)), } } - _ => ProtoTransaction { variant: None }, + FuelTransaction::Create(create) => { + let proto_create = ProtoCreateTx { + bytecode_witness_index: u32::from(*create.bytecode_witness_index()), + salt: create.salt().as_ref().to_vec(), + storage_slots: create + .storage_slots() + .iter() + .map(proto_storage_slot_from_storage_slot) + .collect(), + policies: Some(proto_policies_from_policies(create.policies())), + inputs: create + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: create + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: create + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Create(proto_create)), + } + } + FuelTransaction::Mint(mint) => { + let proto_mint = ProtoMintTx { + tx_pointer: Some(proto_tx_pointer(mint.tx_pointer())), + input_contract: Some(proto_contract_input_from_contract( + mint.input_contract(), + )), + output_contract: Some(proto_contract_output_from_contract( + mint.output_contract(), + )), + mint_amount: *mint.mint_amount(), + mint_asset_id: mint.mint_asset_id().as_ref().to_vec(), + gas_price: *mint.gas_price(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Mint(proto_mint)), + } + } + FuelTransaction::Upgrade(upgrade) => { + let proto_upgrade = ProtoUpgradeTx { + purpose: Some(proto_upgrade_purpose(upgrade.upgrade_purpose())), + policies: Some(proto_policies_from_policies(upgrade.policies())), + inputs: upgrade + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: upgrade + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: upgrade + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upgrade(proto_upgrade)), + } + } + FuelTransaction::Upload(upload) => { + let proto_upload = ProtoUploadTx { + root: bytes32_to_vec(upload.bytecode_root()), + witness_index: u32::from(*upload.bytecode_witness_index()), + subsection_index: u32::from(*upload.subsection_index()), + subsections_number: u32::from(*upload.subsections_number()), + proof_set: upload.proof_set().iter().map(bytes32_to_vec).collect(), + policies: Some(proto_policies_from_policies(upload.policies())), + inputs: upload + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: upload + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: upload + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upload(proto_upload)), + } + } + FuelTransaction::Blob(blob) => { + let proto_blob = ProtoBlobTx { + blob_id: blob.blob_id().as_ref().to_vec(), + witness_index: u32::from(*blob.bytecode_witness_index()), + policies: Some(proto_policies_from_policies(blob.policies())), + inputs: blob + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: blob + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: blob + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Blob(proto_blob)), + } + } } } fn proto_input_from_input(input: Input) -> ProtoInput { match input { Input::CoinSigned(coin_signed) => ProtoInput { - variant: Some(crate::protobuf_types::input::Variant::CoinSigned( - ProtoCoinSignedInput { - utxo_id: Some(ProtoUtxoId { - tx_id: coin_signed.utxo_id.tx_id().as_ref().to_vec(), - output_index: coin_signed.utxo_id.output_index().into(), - }), - owner: coin_signed.owner.as_ref().to_vec(), - amount: coin_signed.amount, - asset_id: coin_signed.asset_id.as_ref().to_vec(), - tx_pointer: Some(crate::protobuf_types::TxPointer { - block_height: coin_signed.tx_pointer.block_height().into(), - tx_index: coin_signed.tx_pointer.tx_index().into(), - }), - witness_index: coin_signed.witness_index.into(), + variant: Some(ProtoInputVariant::CoinSigned(ProtoCoinSignedInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_signed.utxo_id)), + owner: coin_signed.owner.as_ref().to_vec(), + amount: coin_signed.amount, + asset_id: coin_signed.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_signed.tx_pointer)), + witness_index: coin_signed.witness_index.into(), + predicate_gas_used: 0, + predicate: vec![], + predicate_data: vec![], + })), + }, + Input::CoinPredicate(coin_predicate) => ProtoInput { + variant: Some(ProtoInputVariant::CoinPredicate(ProtoCoinPredicateInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_predicate.utxo_id)), + owner: coin_predicate.owner.as_ref().to_vec(), + amount: coin_predicate.amount, + asset_id: coin_predicate.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_predicate.tx_pointer)), + witness_index: 0, + predicate_gas_used: coin_predicate.predicate_gas_used, + predicate: coin_predicate.predicate.as_ref().to_vec(), + predicate_data: coin_predicate.predicate_data.as_ref().to_vec(), + })), + }, + Input::Contract(contract) => ProtoInput { + variant: Some(ProtoInputVariant::Contract(ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + })), + }, + Input::MessageCoinSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinSigned( + ProtoMessageCoinSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), predicate_gas_used: 0, - predicate: vec![], - predicate_data: vec![], + data: Vec::new(), + predicate: Vec::new(), + predicate_data: Vec::new(), }, )), }, - _ => ProtoInput { variant: None }, + Input::MessageCoinPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinPredicate( + ProtoMessageCoinPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: Vec::new(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + Input::MessageDataSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataSigned( + ProtoMessageDataSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: message.data.as_ref().to_vec(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageDataPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataPredicate( + ProtoMessageDataPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: message.data.as_ref().to_vec(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + } +} + +fn proto_utxo_id_from_utxo_id(utxo_id: &UtxoId) -> ProtoUtxoId { + ProtoUtxoId { + tx_id: utxo_id.tx_id().as_ref().to_vec(), + output_index: utxo_id.output_index().into(), + } +} + +fn proto_tx_pointer(tx_pointer: &TxPointer) -> ProtoTxPointer { + ProtoTxPointer { + block_height: tx_pointer.block_height().into(), + tx_index: tx_pointer.tx_index().into(), + } +} + +fn proto_storage_slot_from_storage_slot(slot: &StorageSlot) -> ProtoStorageSlot { + ProtoStorageSlot { + key: slot.key().as_ref().to_vec(), + value: slot.value().as_ref().to_vec(), + } +} + +fn proto_contract_input_from_contract( + contract: &fuel_core_types::fuel_tx::input::contract::Contract, +) -> ProtoContractInput { + ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + } +} + +fn proto_contract_output_from_contract( + contract: &fuel_core_types::fuel_tx::output::contract::Contract, +) -> ProtoContractOutput { + ProtoContractOutput { + input_index: u32::from(contract.input_index), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + } +} + +fn proto_output_from_output(output: Output) -> ProtoOutput { + let variant = match output { + Output::Coin { + to, + amount, + asset_id, + } => ProtoOutputVariant::Coin(ProtoCoinOutput { + to: to.as_ref().to_vec(), + amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Contract(contract) => { + ProtoOutputVariant::Contract(proto_contract_output_from_contract(&contract)) + } + Output::Change { + to, + amount, + asset_id, + } => ProtoOutputVariant::Change(ProtoChangeOutput { + to: to.as_ref().to_vec(), + amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Variable { + to, + amount, + asset_id, + } => ProtoOutputVariant::Variable(ProtoVariableOutput { + to: to.as_ref().to_vec(), + amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::ContractCreated { + contract_id, + state_root, + } => ProtoOutputVariant::ContractCreated(ProtoContractCreatedOutput { + contract_id: contract_id.as_ref().to_vec(), + state_root: bytes32_to_vec(&state_root), + }), + }; + + ProtoOutput { + variant: Some(variant), + } +} + +fn proto_upgrade_purpose(purpose: &UpgradePurpose) -> ProtoUpgradePurpose { + let variant = match purpose { + UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + } => ProtoUpgradePurposeVariant::ConsensusParameters( + ProtoUpgradeConsensusParameters { + witness_index: u32::from(*witness_index), + checksum: checksum.as_ref().to_vec(), + }, + ), + UpgradePurpose::StateTransition { root } => { + ProtoUpgradePurposeVariant::StateTransition(ProtoUpgradeStateTransition { + root: root.as_ref().to_vec(), + }) + } + }; + + ProtoUpgradePurpose { + variant: Some(variant), } } fn proto_policies_from_policies( policies: &fuel_core_types::fuel_tx::policies::Policies, ) -> ProtoPolicies { - let mut values = [0u64; 5]; + let mut values = [0u64; 6]; if policies.is_set(PolicyType::Tip) { values[0] = policies.get(PolicyType::Tip).unwrap_or_default(); } @@ -247,6 +626,9 @@ fn proto_policies_from_policies( if policies.is_set(PolicyType::Expiration) { values[4] = policies.get(PolicyType::Expiration).unwrap_or_default(); } + if policies.is_set(PolicyType::Owner) { + values[5] = policies.get(PolicyType::Owner).unwrap_or_default(); + } let bits = policies.bits(); ProtoPolicies { bits, @@ -254,6 +636,190 @@ fn proto_policies_from_policies( } } +fn tx_pointer_from_proto(proto: &ProtoTxPointer) -> Result { + let block_height = proto.block_height.into(); + #[allow(clippy::useless_conversion)] + let tx_index = proto.tx_index.try_into().map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_index to target type: {}", e)) + })?; + Ok(TxPointer::new(block_height, tx_index)) +} + +fn storage_slot_from_proto(proto: &ProtoStorageSlot) -> Result { + let key = Bytes32::try_from(proto.key.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot key to Bytes32: {}", + e + )) + })?; + let value = Bytes32::try_from(proto.value.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot value to Bytes32: {}", + e + )) + })?; + Ok(StorageSlot::new(key, value)) +} + +fn contract_input_from_proto( + proto: &ProtoContractInput, +) -> Result { + let utxo_proto = proto.utxo_id.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing utxo_id on contract input")) + })?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert balance_root to Bytes32: {}", e)) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert state_root to Bytes32: {}", e)) + })?; + let tx_pointer_proto = proto.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on contract input")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let contract_id = + fuel_core_types::fuel_types::ContractId::try_from(proto.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(fuel_core_types::fuel_tx::input::contract::Contract { + utxo_id, + balance_root, + state_root, + tx_pointer, + contract_id, + }) +} + +fn contract_output_from_proto( + proto: &ProtoContractOutput, +) -> Result { + let input_index = u16::try_from(proto.input_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output input_index to u16: {}", + e + )) + })?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output balance_root to Bytes32: {}", + e + )) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output state_root to Bytes32: {}", + e + )) + })?; + + Ok(fuel_core_types::fuel_tx::output::contract::Contract { + input_index, + balance_root, + state_root, + }) +} + +fn output_from_proto_output(proto_output: &ProtoOutput) -> Result { + match proto_output + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing output variant")))? + { + ProtoOutputVariant::Coin(coin) => { + let to = Address::try_from(coin.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = + fuel_core_types::fuel_types::AssetId::try_from(coin.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::coin(to, coin.amount, asset_id)) + } + ProtoOutputVariant::Contract(contract) => { + let contract = contract_output_from_proto(contract)?; + Ok(Output::Contract(contract)) + } + ProtoOutputVariant::Change(change) => { + let to = Address::try_from(change.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + change.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::change(to, change.amount, asset_id)) + } + ProtoOutputVariant::Variable(variable) => { + let to = Address::try_from(variable.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + variable.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::variable(to, variable.amount, asset_id)) + } + ProtoOutputVariant::ContractCreated(contract_created) => { + let contract_id = fuel_core_types::fuel_types::ContractId::try_from( + contract_created.contract_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let state_root = Bytes32::try_from(contract_created.state_root.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state_root to Bytes32: {}", + e + )) + })?; + Ok(Output::contract_created(contract_id, state_root)) + } + } +} + +fn upgrade_purpose_from_proto(proto: &ProtoUpgradePurpose) -> Result { + match proto + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing upgrade purpose variant")))? + { + ProtoUpgradePurposeVariant::ConsensusParameters(consensus) => { + let witness_index = u16::try_from(consensus.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let checksum = + Bytes32::try_from(consensus.checksum.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert checksum to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + }) + } + ProtoUpgradePurposeVariant::StateTransition(state) => { + let root = Bytes32::try_from(state.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state transition root to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::StateTransition { root }) + } + } +} + +fn utxo_id_from_proto(proto_utxo: &ProtoUtxoId) -> Result { + let tx_id = Bytes32::try_from(proto_utxo.tx_id.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_id to Bytes32: {}", e)) + })?; + let output_index = u16::try_from(proto_utxo.output_index).map_err(|e| { + Error::Serialization(anyhow!("Could not convert output_index to u16: {}", e)) + })?; + Ok(UtxoId::new(tx_id, output_index)) +} + fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { bytes.as_ref().to_vec() } @@ -261,7 +827,7 @@ fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { #[cfg(test)] pub fn fuel_block_from_protobuf( proto_block: ProtoBlock, - msg_ids: &[MessageId], + msg_ids: &[fuel_core_types::fuel_tx::MessageId], event_inbox_root: Bytes32, ) -> Result { let versioned_block = proto_block @@ -307,138 +873,509 @@ pub fn partial_header_from_proto_header( Ok(partial_header) } -pub fn tx_from_proto_tx(_proto_tx: &ProtoTransaction) -> Result { - match &_proto_tx.variant { - Some(ProtoTransactionVariant::Script(_proto_script)) => { - let ProtoScriptTx { - script_gas_limit, - receipts_root, - script, - script_data, +pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { + let variant = proto_tx + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing transaction variant")))?; + + match variant { + ProtoTransactionVariant::Script(proto_script) => { + let policies = proto_script + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_script + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_script + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_script + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let mut script_tx = FuelTransaction::script( + proto_script.script_gas_limit, + proto_script.script.clone(), + proto_script.script_data.clone(), policies, inputs, - outputs: _, - witnesses: _, - metadata: _, - } = _proto_script.clone(); - let fuel_policies = policies + outputs, + witnesses, + ); + *script_tx.receipts_root_mut() = Bytes32::try_from( + proto_script.receipts_root.as_slice(), + ) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert receipts_root to Bytes32: {}", + e + )) + })?; + + Ok(FuelTransaction::Script(script_tx)) + } + ProtoTransactionVariant::Create(proto_create) => { + let policies = proto_create + .policies + .clone() .map(policies_from_proto_policies) .unwrap_or_default(); - let mut script_tx = FuelTransaction::script( - script_gas_limit, - script.to_vec(), - script_data.to_vec(), - fuel_policies, - inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?, - vec![], - vec![], + let inputs = proto_create + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_create + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_create + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let storage_slots = proto_create + .storage_slots + .iter() + .map(storage_slot_from_proto) + .collect::>>()?; + let salt = + fuel_core_types::fuel_types::Salt::try_from(proto_create.salt.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let bytecode_witness_index = + u16::try_from(proto_create.bytecode_witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert bytecode_witness_index to u16: {}", + e + )) + })?; + + let create_tx = FuelTransaction::create( + bytecode_witness_index, + policies, + salt, + storage_slots, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Create(create_tx)) + } + ProtoTransactionVariant::Mint(proto_mint) => { + let tx_pointer_proto = proto_mint.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on mint transaction")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let input_contract_proto = + proto_mint.input_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing input_contract on mint transaction" + )) + })?; + let input_contract = contract_input_from_proto(input_contract_proto)?; + let output_contract_proto = + proto_mint.output_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing output_contract on mint transaction" + )) + })?; + let output_contract = contract_output_from_proto(output_contract_proto)?; + let mint_asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_mint.mint_asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + let mint_tx = FuelTransaction::mint( + tx_pointer, + input_contract, + output_contract, + proto_mint.mint_amount, + mint_asset_id, + proto_mint.gas_price, + ); + + Ok(FuelTransaction::Mint(mint_tx)) + } + ProtoTransactionVariant::Upgrade(proto_upgrade) => { + let purpose_proto = proto_upgrade.purpose.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing purpose on upgrade transaction")) + })?; + let upgrade_purpose = upgrade_purpose_from_proto(purpose_proto)?; + let policies = proto_upgrade + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_upgrade + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upgrade + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upgrade + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + + let upgrade_tx = FuelTransaction::upgrade( + upgrade_purpose, + policies, + inputs, + outputs, + witnesses, ); - *script_tx.receipts_root_mut() = Bytes32::try_from(receipts_root.as_ref()) + + Ok(FuelTransaction::Upgrade(upgrade_tx)) + } + ProtoTransactionVariant::Upload(proto_upload) => { + let policies = proto_upload + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_upload + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upload + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upload + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let root = Bytes32::try_from(proto_upload.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert upload root to Bytes32: {}", + e + )) + })?; + let witness_index = + u16::try_from(proto_upload.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let subsection_index = + u16::try_from(proto_upload.subsection_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsection_index to u16: {}", + e + )) + })?; + let subsections_number = u16::try_from(proto_upload.subsections_number) .map_err(|e| { Error::Serialization(anyhow!( - "Could not convert receipts_root to Bytes32: {}", + "Could not convert subsections_number to u16: {}", e )) })?; + let proof_set = proto_upload + .proof_set + .iter() + .map(|entry| { + Bytes32::try_from(entry.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert proof_set entry to Bytes32: {}", + e + )) + }) + }) + .collect::>>()?; - Ok(FuelTransaction::Script(script_tx)) + let body = UploadBody { + root, + witness_index, + subsection_index, + subsections_number, + proof_set, + }; + + let upload_tx = + FuelTransaction::upload(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Upload(upload_tx)) } - _ => { - Err(anyhow!("Unsupported transaction variant")).map_err(Error::Serialization) + ProtoTransactionVariant::Blob(proto_blob) => { + let policies = proto_blob + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_blob + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_blob + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_blob + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let blob_id = fuel_core_types::fuel_types::BlobId::try_from( + proto_blob.blob_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = u16::try_from(proto_blob.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert blob witness_index to u16: {}", + e + )) + })?; + let body = BlobBody { + id: blob_id, + witness_index, + }; + + let blob_tx = + FuelTransaction::blob(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Blob(blob_tx)) } } } fn input_from_proto_input(proto_input: &ProtoInput) -> Result { - match &proto_input.variant { - // pub enum Variant { - // CoinSigned(super::CoinSignedInput), - // CoinPredicate(super::CoinPredicateInput), - // Contract(super::ContractInput), - // MessageCoinSigned(super::MessageCoinSignedInput), - // MessageCoinPredicate(super::MessageCoinPredicateInput), - // MessageDataSigned(super::MessageDataSignedInput), - // MessageDataPredicate(super::MessageDataPredicateInput), - // } - Some(crate::protobuf_types::input::Variant::CoinSigned(proto_coin_signed)) => { - let proto_utxo_id = proto_coin_signed + let variant = proto_input + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing input variant")))?; + + match variant { + ProtoInputVariant::CoinSigned(proto_coin_signed) => { + let utxo_proto = proto_coin_signed .utxo_id .as_ref() - .ok_or(Error::Serialization(anyhow!("Missing utxo_id")))?; - let utxo_id = UtxoId::new( - Bytes32::try_from(proto_utxo_id.tx_id.as_slice()).map_err(|e| { + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = + Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { Error::Serialization(anyhow!( - "Could not convert tx_id to Bytes32: {}", + "Could not convert owner to Address: {}", e )) - })?, - proto_utxo_id.output_index.try_into().map_err(|e| { + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_signed.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_signed + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let witness_index = + u16::try_from(proto_coin_signed.witness_index).map_err(|e| { Error::Serialization(anyhow!( - "Could not convert output_index to u8: {}", + "Could not convert witness_index to u16: {}", e )) - })?, - ); - let owner = - Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { + })?; + + Ok(Input::coin_signed( + utxo_id, + owner, + proto_coin_signed.amount, + asset_id, + tx_pointer, + witness_index, + )) + } + ProtoInputVariant::CoinPredicate(proto_coin_predicate) => { + let utxo_proto = proto_coin_predicate + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = Address::try_from(proto_coin_predicate.owner.as_slice()) + .map_err(|e| { Error::Serialization(anyhow!( "Could not convert owner to Address: {}", e )) })?; - let amount = proto_coin_signed.amount; let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_coin_signed.asset_id.as_slice(), + proto_coin_predicate.asset_id.as_slice(), ) .map_err(|e| Error::Serialization(anyhow!(e)))?; - let tx_index: u16 = u16::try_from( - proto_coin_signed - .tx_pointer - .ok_or(Error::Serialization(anyhow!("Missing tx_pointer")))? - .tx_index, + let tx_pointer_proto = proto_coin_predicate + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + + Ok(Input::coin_predicate( + utxo_id, + owner, + proto_coin_predicate.amount, + asset_id, + tx_pointer, + proto_coin_predicate.predicate_gas_used, + proto_coin_predicate.predicate.clone(), + proto_coin_predicate.predicate_data.clone(), + )) + } + ProtoInputVariant::Contract(proto_contract) => { + let contract = contract_input_from_proto(proto_contract)?; + Ok(Input::Contract(contract)) + } + ProtoInputVariant::MessageCoinSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), ) .map_err(|e| Error::Serialization(anyhow!(e)))?; - let tx_pointer = fuel_core_types::fuel_tx::TxPointer::new( - proto_coin_signed - .tx_pointer - .as_ref() - .map(|tp| tp.block_height.into()) - .unwrap_or_default(), - tx_index, - ); let witness_index = - proto_coin_signed.witness_index.try_into().map_err(|e| { + u16::try_from(proto_message.witness_index).map_err(|e| { Error::Serialization(anyhow!( - "Could not convert witness_index to Specification::Witness: {}", + "Could not convert witness_index to u16: {}", e )) })?; - let input = Input::coin_signed( - utxo_id, - owner, - amount, - asset_id, - tx_pointer, + Ok(Input::message_coin_signed( + sender, + recipient, + proto_message.amount, + nonce, witness_index, - ); - Ok(input) + )) } - Some(crate::protobuf_types::input::Variant::CoinPredicate( - _proto_coin_predicate, - )) => Err(anyhow!( - "CoinPredicate input deserialization not implemented" - )) - .map_err(Error::Serialization), + ProtoInputVariant::MessageCoinPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_coin_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + ProtoInputVariant::MessageDataSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; - // Some(crate::protobuf_types::input::Variant::Message(proto_message)) => { - // Err(anyhow!("Message input deserialization not implemented")) - // .map_err(Error::Serialization) - // } - _ => Err(anyhow!("Unsupported input variant")).map_err(Error::Serialization), + Ok(Input::message_data_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + proto_message.data.clone(), + )) + } + ProtoInputVariant::MessageDataPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_data_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.data.clone(), + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } } } @@ -527,6 +1464,11 @@ fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { policies.set(PolicyType::Expiration, Some(*expiration)); } } + if bits.contains(PoliciesBits::Owner) { + if let Some(owner) = values.get(5) { + policies.set(PolicyType::Owner, Some(*owner)); + } + } policies } @@ -606,22 +1548,12 @@ pub fn proto_header_to_empty_consensus_header( #[cfg(test)] mod tests { use super::*; - use fuel_core_types::{ - fuel_tx::{ - Blob, - Create, - Mint, - Script, - Upgrade, - Upload, - }, - test_helpers::arb_block, - }; + use fuel_core_types::test_helpers::arb_block; use proptest::prelude::*; proptest! { #![proptest_config(ProptestConfig { - cases: 1, .. ProptestConfig::default() + cases: 100, .. ProptestConfig::default() })] #[test] fn serialize_block__roundtrip((block, msg_ids, event_inbox_root) in arb_block()) { @@ -635,7 +1567,7 @@ mod tests { let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); assert_eq!(block, deserialized_block); - } + } } #[test] diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index d3768973269..47996069ad4 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -9,6 +9,8 @@ use crate::{ }, fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ + BlobBody, + BlobIdExt, Bytes32, ContractId, Create, @@ -16,23 +18,29 @@ use crate::{ Input, MessageId, Output, - Script, + StorageSlot, Transaction, TransactionBuilder, TxPointer, + UpgradePurpose, + UploadBody, UtxoId, - field::{ - Inputs, - Policies as _, - ReceiptsRoot, - Script as _, - ScriptData as _, - ScriptGasLimit, + Witness, + field::ReceiptsRoot, + input::{ + coin::CoinSigned, + contract::Contract as InputContract, }, - input::coin::CoinSigned, + output::contract::Contract as OutputContract, policies::Policies, }, - fuel_types::BlockHeight, + fuel_types::{ + Address, + AssetId, + BlobId, + BlockHeight, + Nonce, + }, fuel_vm::{ Contract, Salt, @@ -66,76 +74,155 @@ pub fn create_contract( (tx, contract_id) } -// pub enum Transaction { -// Script(Script), -// Create(Create), -// Mint(Mint), -// Upgrade(Upgrade), -// Upload(Upload), -// Blob(Blob), -// } #[allow(unused)] fn arb_txs() -> impl Strategy> { - let tx_strategy = prop_oneof![ - 1 => arb_script_tx(), - ]; + prop::collection::vec(arb_transaction(), 0..10) +} + +fn arb_script_transaction() -> impl Strategy { + ( + 1..10000u64, + any::<[u8; 32]>(), + prop::collection::vec(any::(), 0..100), + prop::collection::vec(any::(), 0..100), + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 0..4), + ) + .prop_map( + |( + script_gas_limit, + receipts_root, + script_bytes, + script_data, + policies, + inputs, + outputs, + witnesses, + )| { + let mut script = crate::fuel_tx::Transaction::script( + script_gas_limit, + script_bytes, + script_data, + policies, + inputs, + outputs, + witnesses, + ); + *script.receipts_root_mut() = receipts_root.into(); + Transaction::Script(script) + }, + ) +} + +prop_compose! { + fn arb_storage_slot()( + key in any::<[u8; 32]>(), + value in any::<[u8; 32]>(), + ) -> StorageSlot { + StorageSlot::new(key.into(), value.into()) + } +} - prop::collection::vec(tx_strategy, 1..2) +prop_compose! { + fn arb_coin_output()( + to in arb_address(), + amount in any::(), + asset in arb_asset_id(), + ) -> Output { + Output::coin(to, amount, asset) + } } -// pub(crate) body: Body, -// pub(crate) policies: Policies, -// pub(crate) inputs: Vec, -// pub(crate) outputs: Vec, -// pub(crate) witnesses: Vec, -// pub(crate) metadata: Option>, -// body -// pub(crate) script_gas_limit: Word, -// pub(crate) receipts_root: Bytes32, -// pub(crate) script: ScriptCode, -// pub(crate) script_data: Bytes, prop_compose! { - fn arb_script_tx()( - script_gas_limit in 1..10000u64, - receipts_root in any::<[u8; 32]>(), - script_bytes in prop::collection::vec(any::(), 0..100), - script_data in prop::collection::vec(any::(), 0..100), - policies in arb_policies(), - inputs in arb_inputs(), - // outputs in arb_outputs(), - // witnesses in arb_witnesses(), - ) -> Transaction { - let mut script = Script::default(); - *script.script_gas_limit_mut() = script_gas_limit; - *script.receipts_root_mut() = receipts_root.into(); - *script.script_mut() = script_bytes; - *script.script_data_mut() = script_data; - *script.policies_mut() = policies; - *script.inputs_mut() = inputs; - // *script.outputs_mut() = outputs; - // *script.witnesses_mut() = witnesses; - - Transaction::Script(script) + fn arb_contract_output()( + input_index in any::(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + ) -> Output { + Output::Contract(OutputContract { + input_index, + balance_root: balance_root.into(), + state_root: state_root.into(), + }) } } -// prop_compose! { -// fn arb_create_tx()( -// contract_code in prop::collection::vec(any::(), 0..100), -// ) -> Create { -// let mut create = Create::default(); -// *create.contract_code_mut() = contract_code.into(); -// create -// } -// } +prop_compose! { + fn arb_change_output()( + to in arb_address(), + amount in any::(), + asset in arb_asset_id(), + ) -> Output { + Output::change(to, amount, asset) + } +} + +prop_compose! { + fn arb_variable_output()( + to in arb_address(), + amount in any::(), + asset in arb_asset_id(), + ) -> Output { + Output::variable(to, amount, asset) + } +} + +prop_compose! { + fn arb_contract_created_output()( + contract_id in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + ) -> Output { + Output::contract_created(ContractId::new(contract_id), state_root.into()) + } +} + +fn arb_output_any() -> impl Strategy { + prop_oneof![ + arb_coin_output(), + arb_contract_output(), + arb_change_output(), + arb_variable_output(), + arb_contract_created_output(), + ] +} + +fn arb_outputs() -> impl Strategy> { + prop::collection::vec(arb_output_any(), 0..10) +} + +fn arb_witness() -> impl Strategy { + prop::collection::vec(any::(), 0..128).prop_map(Witness::from) +} prop_compose! { fn arb_policies()( + tip in prop::option::of(any::()), + witness_limit in prop::option::of(any::()), maturity in prop::option::of(0..100u32), + max_fee in prop::option::of(any::()), + expiration in prop::option::of(0..100u32), + owner in prop::option::of(any::()), ) -> Policies { let mut policies = Policies::new(); - if let Some(inner) = maturity { - policies = policies.with_maturity(BlockHeight::new(inner)); + if let Some(tip) = tip { + policies = policies.with_tip(tip); + } + if let Some(witness_limit) = witness_limit { + policies = policies.with_witness_limit(witness_limit); + } + if let Some(value) = maturity { + policies = policies.with_maturity(BlockHeight::new(value)); + } + if let Some(max_fee) = max_fee { + policies = policies.with_max_fee(max_fee); + } + if let Some(value) = expiration { + policies = policies.with_expiration(BlockHeight::new(value)); + } + if let Some(owner) = owner { + policies = policies.with_owner(owner); } policies } @@ -149,17 +236,7 @@ prop_compose! { #[allow(unused)] fn arb_inputs() -> impl Strategy> { - // pub enum Input { - // CoinSigned(CoinSigned), - // CoinPredicate(CoinPredicate), - // Contract(Contract), - // MessageCoinSigned(MessageCoinSigned), - // MessageCoinPredicate(MessageCoinPredicate), - // MessageDataSigned(MessageDataSigned), - // MessageDataPredicate(MessageDataPredicate), - // } - let strategy = prop_oneof![arb_coin_signed(), arb_coin_predicate(),]; - prop::collection::vec(strategy, 0..10) + prop::collection::vec(arb_input_any(), 0..10) } prop_compose! { @@ -225,6 +302,114 @@ prop_compose! { } } +prop_compose! { + fn arb_contract_input_variant()( + utxo_id in arb_utxo_id(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + tx_pointer in arb_tx_pointer(), + contract_id in any::<[u8; 32]>(), + ) -> Input { + let contract = InputContract { + utxo_id, + balance_root: balance_root.into(), + state_root: state_root.into(), + tx_pointer, + contract_id: ContractId::new(contract_id), + }; + Input::Contract(contract) + } +} + +prop_compose! { + fn arb_nonce()(bytes in any::<[u8; 32]>()) -> Nonce { + Nonce::new(bytes) + } +} + +prop_compose! { + fn arb_message_coin_signed_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + witness_index in any::(), + ) -> Input { + Input::message_coin_signed(sender, recipient, amount, nonce, witness_index) + } +} + +prop_compose! { + fn arb_message_coin_predicate_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + predicate_gas_used in any::(), + predicate in prop::collection::vec(any::(), 0..64), + predicate_data in prop::collection::vec(any::(), 0..64), + ) -> Input { + Input::message_coin_predicate( + sender, + recipient, + amount, + nonce, + predicate_gas_used, + predicate, + predicate_data, + ) + } +} + +prop_compose! { + fn arb_message_data_signed_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + witness_index in any::(), + data in prop::collection::vec(any::(), 0..128), + ) -> Input { + Input::message_data_signed(sender, recipient, amount, nonce, witness_index, data) + } +} + +prop_compose! { + fn arb_message_data_predicate_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + predicate_gas_used in any::(), + data in prop::collection::vec(any::(), 0..128), + predicate in prop::collection::vec(any::(), 0..64), + predicate_data in prop::collection::vec(any::(), 0..64), + ) -> Input { + Input::message_data_predicate( + sender, + recipient, + amount, + nonce, + predicate_gas_used, + data, + predicate, + predicate_data, + ) + } +} + +fn arb_input_any() -> impl Strategy { + prop_oneof![ + arb_coin_signed(), + arb_coin_predicate(), + arb_contract_input_variant(), + arb_message_coin_signed_input(), + arb_message_coin_predicate_input(), + arb_message_data_signed_input(), + arb_message_data_predicate_input(), + ] +} + prop_compose! { fn arb_utxo_id()( inner in any::<[u8; 32]>(), @@ -262,13 +447,490 @@ fn arb_msg_ids() -> impl Strategy> { prop::collection::vec(arb_msg_id(), 0..10usize) } +fn arb_transaction() -> impl Strategy { + prop_oneof![ + arb_script_transaction(), + arb_create_transaction(), + arb_mint_transaction(), + arb_upgrade_transaction(), + arb_upload_transaction(), + arb_blob_transaction(), + ] +} + +prop_compose! { + fn arb_input_contract_core()( + utxo_id in arb_utxo_id(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + tx_pointer in arb_tx_pointer(), + contract_id in any::<[u8; 32]>(), + ) -> InputContract { + InputContract { + utxo_id, + balance_root: balance_root.into(), + state_root: state_root.into(), + tx_pointer, + contract_id: ContractId::new(contract_id), + } + } +} + +prop_compose! { + fn arb_output_contract_core()( + input_index in any::(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + ) -> OutputContract { + OutputContract { + input_index, + balance_root: balance_root.into(), + state_root: state_root.into(), + } + } +} + +fn arb_create_transaction() -> impl Strategy { + ( + arb_policies(), + any::<[u8; 32]>(), + prop::collection::vec(arb_storage_slot(), 0..4), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + ) + .prop_map( + |(policies, salt_bytes, storage_slots, inputs, outputs, witnesses)| { + let create = crate::fuel_tx::Transaction::create( + 0, + policies, + Salt::from(salt_bytes), + storage_slots, + inputs, + outputs, + witnesses, + ); + Transaction::Create(create) + }, + ) +} + +fn arb_mint_transaction() -> impl Strategy { + ( + arb_tx_pointer(), + arb_input_contract_core(), + arb_output_contract_core(), + any::(), + arb_asset_id(), + any::(), + ) + .prop_map( + |( + tx_pointer, + input_contract, + output_contract, + mint_amount, + mint_asset_id, + gas_price, + )| { + let mint = crate::fuel_tx::Transaction::mint( + tx_pointer, + input_contract, + output_contract, + mint_amount, + mint_asset_id, + gas_price, + ); + Transaction::Mint(mint) + }, + ) +} + +fn arb_upgrade_transaction() -> impl Strategy { + prop_oneof![ + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + any::<[u8; 32]>(), + ) + .prop_map( + |(policies, inputs, outputs, witnesses, checksum_bytes)| { + let purpose = UpgradePurpose::ConsensusParameters { + witness_index: 0, + checksum: checksum_bytes.into(), + }; + let upgrade = crate::fuel_tx::Transaction::upgrade( + purpose, policies, inputs, outputs, witnesses, + ); + Transaction::Upgrade(upgrade) + } + ), + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 0..4), + any::<[u8; 32]>(), + ) + .prop_map(|(policies, inputs, outputs, witnesses, root_bytes)| { + let purpose = UpgradePurpose::StateTransition { + root: root_bytes.into(), + }; + let upgrade = crate::fuel_tx::Transaction::upgrade( + purpose, policies, inputs, outputs, witnesses, + ); + Transaction::Upgrade(upgrade) + }) + ] +} + +fn arb_upload_transaction() -> impl Strategy { + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + any::<[u8; 32]>(), + prop::collection::vec(any::<[u8; 32]>(), 0..4), + 1u16..=4, + any::(), + ) + .prop_map( + |( + policies, + inputs, + outputs, + witnesses, + root_bytes, + proof_entries, + subsections_number, + subsection_index_candidate, + )| { + let proof_set = proof_entries + .into_iter() + .map(Bytes32::from) + .collect::>(); + let subsections_number = subsections_number.max(1); + let subsection_index = subsection_index_candidate % subsections_number; + let body = UploadBody { + root: root_bytes.into(), + witness_index: 0, + subsection_index, + subsections_number, + proof_set, + }; + let upload = crate::fuel_tx::Transaction::upload( + body, policies, inputs, outputs, witnesses, + ); + Transaction::Upload(upload) + }, + ) +} + +fn arb_blob_transaction() -> impl Strategy { + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 0..3), + prop::collection::vec(any::(), 0..256), + ) + .prop_map( + |(policies, inputs, outputs, mut extra_witnesses, payload)| { + let mut witnesses = Vec::with_capacity(extra_witnesses.len() + 1); + witnesses.push(Witness::from(payload.clone())); + witnesses.append(&mut extra_witnesses); + let blob_id = BlobId::compute(&payload); + let body = BlobBody { + id: blob_id, + witness_index: 0, + }; + let blob = crate::fuel_tx::Transaction::blob( + body, policies, inputs, outputs, witnesses, + ); + Transaction::Blob(blob) + }, + ) +} + +/// Deterministic `Input::coin_signed` sample used for round-trip testing. +pub fn sample_coin_signed_input() -> Input { + let utxo_id = UtxoId::new(Bytes32::from([1u8; 32]), 0); + let owner = Address::new([2u8; 32]); + let asset_id = AssetId::new([3u8; 32]); + let tx_pointer = TxPointer::new(BlockHeight::new(0), 0); + Input::coin_signed(utxo_id, owner, 42, asset_id, tx_pointer, 0) +} + +/// Deterministic `Input::coin_predicate` sample used for round-trip testing. +pub fn sample_coin_predicate_input() -> Input { + let utxo_id = UtxoId::new(Bytes32::from([4u8; 32]), 1); + let owner = Address::new([5u8; 32]); + let asset_id = AssetId::new([6u8; 32]); + let tx_pointer = TxPointer::new(BlockHeight::new(1), 1); + Input::coin_predicate( + utxo_id, + owner, + 84, + asset_id, + tx_pointer, + 10, + vec![0xaa, 0xbb], + vec![0xcc, 0xdd], + ) +} + +/// Deterministic `Input::Contract` sample used for round-trip testing. +pub fn sample_contract_input() -> Input { + let contract = InputContract { + utxo_id: UtxoId::new(Bytes32::from([7u8; 32]), 2), + balance_root: Bytes32::from([8u8; 32]), + state_root: Bytes32::from([9u8; 32]), + tx_pointer: TxPointer::new(BlockHeight::new(2), 2), + contract_id: ContractId::new([10u8; 32]), + }; + Input::Contract(contract) +} + +/// Deterministic `Input::message_coin_signed` sample used for round-trip testing. +pub fn sample_message_coin_signed_input() -> Input { + let sender = Address::new([11u8; 32]); + let recipient = Address::new([12u8; 32]); + let nonce = Nonce::new([13u8; 32]); + Input::message_coin_signed(sender, recipient, 21, nonce, 0) +} + +/// Deterministic `Input::message_coin_predicate` sample used for round-trip testing. +pub fn sample_message_coin_predicate_input() -> Input { + let sender = Address::new([14u8; 32]); + let recipient = Address::new([15u8; 32]); + let nonce = Nonce::new([16u8; 32]); + Input::message_coin_predicate( + sender, + recipient, + 22, + nonce, + 5, + vec![0x01, 0x02], + vec![0x03, 0x04], + ) +} + +/// Deterministic `Input::message_data_signed` sample used for round-trip testing. +pub fn sample_message_data_signed_input() -> Input { + let sender = Address::new([17u8; 32]); + let recipient = Address::new([18u8; 32]); + let nonce = Nonce::new([19u8; 32]); + Input::message_data_signed( + sender, + recipient, + 23, + nonce, + 1, + vec![0xde, 0xad, 0xbe, 0xef], + ) +} + +/// Deterministic `Input::message_data_predicate` sample used for round-trip testing. +pub fn sample_message_data_predicate_input() -> Input { + let sender = Address::new([20u8; 32]); + let recipient = Address::new([21u8; 32]); + let nonce = Nonce::new([22u8; 32]); + Input::message_data_predicate( + sender, + recipient, + 24, + nonce, + 6, + vec![0x99, 0x88], + vec![0x77], + vec![0x66], + ) +} + +/// Collection of sample inputs covering every input variant. +pub fn sample_inputs() -> Vec { + vec![ + sample_coin_signed_input(), + sample_coin_predicate_input(), + sample_contract_input(), + sample_message_coin_signed_input(), + sample_message_coin_predicate_input(), + sample_message_data_signed_input(), + sample_message_data_predicate_input(), + ] +} + +/// Collection of sample outputs covering every output variant. +pub fn sample_outputs() -> Vec { + vec![ + Output::coin(Address::new([23u8; 32]), 50, AssetId::new([24u8; 32])), + Output::Contract(OutputContract { + input_index: 0, + balance_root: Bytes32::from([25u8; 32]), + state_root: Bytes32::from([26u8; 32]), + }), + Output::change(Address::new([27u8; 32]), 60, AssetId::new([28u8; 32])), + Output::variable(Address::new([29u8; 32]), 70, AssetId::new([30u8; 32])), + Output::contract_created(ContractId::new([31u8; 32]), Bytes32::from([32u8; 32])), + ] +} + +/// Sample `Transaction::Script` covering scripts, inputs, outputs, and witnesses. +pub fn sample_script_transaction() -> Transaction { + let policies = Policies::new().with_witness_limit(10); + let inputs = vec![ + sample_coin_signed_input(), + sample_message_data_signed_input(), + ]; + let outputs = vec![Output::coin( + Address::new([40u8; 32]), + 11, + AssetId::new([41u8; 32]), + )]; + let witnesses = vec![Witness::from(vec![0x01, 0x02, 0x03])]; + let mut script = crate::fuel_tx::Transaction::script( + 1_000, + vec![0x10, 0x20], + vec![0x30, 0x40], + policies, + inputs, + outputs, + witnesses, + ); + *script.receipts_root_mut() = Bytes32::from([33u8; 32]); + Transaction::Script(script) +} + +/// Sample `Transaction::Create` with deterministic storage slots and witnesses. +pub fn sample_create_transaction() -> Transaction { + let policies = Policies::new(); + let storage_slots = vec![StorageSlot::new( + Bytes32::from([34u8; 32]), + Bytes32::from([35u8; 32]), + )]; + let inputs = vec![sample_coin_signed_input()]; + let outputs = vec![Output::contract_created( + ContractId::new([36u8; 32]), + Bytes32::from([37u8; 32]), + )]; + let witnesses = vec![Witness::from(vec![0xaa, 0xbb])]; + let create = crate::fuel_tx::Transaction::create( + 0, + policies, + crate::fuel_types::Salt::from([38u8; 32]), + storage_slots, + inputs, + outputs, + witnesses, + ); + Transaction::Create(create) +} + +/// Sample `Transaction::Mint` with deterministic contracts and asset data. +pub fn sample_mint_transaction() -> Transaction { + let tx_pointer = TxPointer::new(BlockHeight::new(5), 0); + let input_contract = InputContract { + utxo_id: UtxoId::new(Bytes32::from([39u8; 32]), 3), + balance_root: Bytes32::from([40u8; 32]), + state_root: Bytes32::from([41u8; 32]), + tx_pointer, + contract_id: ContractId::new([42u8; 32]), + }; + let output_contract = OutputContract { + input_index: 0, + balance_root: Bytes32::from([43u8; 32]), + state_root: Bytes32::from([44u8; 32]), + }; + let mint_asset_id = AssetId::new([45u8; 32]); + let mint = crate::fuel_tx::Transaction::mint( + tx_pointer, + input_contract, + output_contract, + 99, + mint_asset_id, + 1, + ); + Transaction::Mint(mint) +} + +/// Sample `Transaction::Upgrade` using a state transition purpose. +pub fn sample_upgrade_transaction() -> Transaction { + let policies = Policies::new(); + let inputs = vec![sample_coin_signed_input()]; + let outputs = vec![Output::coin( + Address::new([46u8; 32]), + 5, + AssetId::new([47u8; 32]), + )]; + let witnesses = vec![Witness::from(vec![0x11, 0x22])]; + let purpose = UpgradePurpose::StateTransition { + root: Bytes32::from([48u8; 32]), + }; + let upgrade = crate::fuel_tx::Transaction::upgrade( + purpose, policies, inputs, outputs, witnesses, + ); + Transaction::Upgrade(upgrade) +} + +/// Sample `Transaction::Upload` with deterministic proof set and witness index. +pub fn sample_upload_transaction() -> Transaction { + let policies = Policies::new(); + let inputs = vec![sample_coin_signed_input()]; + let outputs = vec![Output::change( + Address::new([49u8; 32]), + 3, + AssetId::new([50u8; 32]), + )]; + let witnesses = vec![Witness::from(vec![0x33, 0x44, 0x55])]; + let body = UploadBody { + root: Bytes32::from([51u8; 32]), + witness_index: 0, + subsection_index: 0, + subsections_number: 1, + proof_set: vec![Bytes32::from([52u8; 32])], + }; + let upload = + crate::fuel_tx::Transaction::upload(body, policies, inputs, outputs, witnesses); + Transaction::Upload(upload) +} + +/// Sample `Transaction::Blob` using a computed blob ID and payload witness. +pub fn sample_blob_transaction() -> Transaction { + let policies = Policies::new(); + let inputs = vec![sample_coin_signed_input()]; + let outputs = vec![Output::coin( + Address::new([53u8; 32]), + 7, + AssetId::new([54u8; 32]), + )]; + let payload = vec![0x99, 0x00, 0x99]; + let witnesses = vec![Witness::from(payload.clone())]; + let blob_id = BlobId::compute(&payload); + let body = BlobBody { + id: blob_id, + witness_index: 0, + }; + let blob = + crate::fuel_tx::Transaction::blob(body, policies, inputs, outputs, witnesses); + Transaction::Blob(blob) +} + +/// Collection of sample transactions covering every transaction variant. +pub fn sample_transactions() -> Vec { + vec![ + sample_script_transaction(), + sample_create_transaction(), + sample_mint_transaction(), + sample_upgrade_transaction(), + sample_upload_transaction(), + sample_blob_transaction(), + ] +} + prop_compose! { - // pub struct ConsensusHeader { - // pub prev_root: Bytes32, - // pub height: BlockHeight, - // pub time: Tai64, - // pub generated: Generated, - // } fn arb_consensus_header()( prev_root in any::<[u8; 32]>(), time in any::(), @@ -282,25 +944,10 @@ prop_compose! { } } -// message V1Header { -// uint64 da_height = 1; -// uint32 consensus_parameters_version = 2; -// uint32 state_transition_bytecode_version = 3; -// uint32 transactions_count = 4; -// uint32 message_receipt_count = 5; -// bytes transactions_root = 6; -// bytes message_outbox_root = 7; -// bytes event_inbox_root = 8; -// bytes prev_root = 9; -// uint32 height = 10; -// uint64 time = 11; -// bytes application_hash = 12; -// optional bytes block_id = 13; -// } prop_compose! { /// Generate an arbitrary block with a variable number of transactions pub fn arb_block()( - txs in arb_txs(), + script_tx in arb_script_transaction(), da_height in any::(), consensus_parameter_version in any::(), state_transition_bytecode_version in any::(), @@ -308,40 +955,20 @@ prop_compose! { event_root in any::<[u8; 32]>(), mut consensus_header in arb_consensus_header(), ) -> (Block, Vec, Bytes32) { - // pub struct BlockV1 { - // header: BlockHeader, - // transactions: Vec, - // } let mut fuel_block = Block::default(); + let mut txs = sample_transactions(); + if !txs.is_empty() { + txs[0] = script_tx; + } + // include txs first to be included in calculations *fuel_block.transactions_mut() = txs; - // Header - // pub struct BlockHeaderV1 { - // pub(crate) application: ApplicationHeader, - // pub(crate) consensus: ConsensusHeader, - // pub(crate) metadata: Option, - // } - - // Application - // pub struct ApplicationHeader { - // pub da_height: DaBlockHeight, - // pub consensus_parameters_version: ConsensusParametersVersion, - // pub state_transition_bytecode_version: StateTransitionBytecodeVersion, - // pub generated: Generated, - // } fuel_block.header_mut().set_da_height(DaBlockHeight(da_height)); fuel_block.header_mut().set_consensus_parameters_version(consensus_parameter_version); fuel_block.header_mut().set_state_transition_bytecode_version(state_transition_bytecode_version); - // pub struct GeneratedApplicationFieldsV1 { - // pub transactions_count: u16, - // pub message_receipt_count: u32, - // pub transactions_root: Bytes32, - // pub message_outbox_root: Bytes32, - // pub event_inbox_root: Bytes32, - // } let count = fuel_block.transactions().len().try_into().expect("we shouldn't have more than u16::MAX transactions"); let msg_root = msg_ids .iter() From fa3da69643f97b754d9f29ea5944019617e65baa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:06:28 -0700 Subject: [PATCH 079/146] Appease Clippy-sama --- crates/types/src/test_helpers.rs | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 47996069ad4..8b3476e5040 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -613,11 +613,10 @@ fn arb_upload_transaction() -> impl Strategy { .map(Bytes32::from) .collect::>(); let subsections_number = subsections_number.max(1); - let subsection_index = subsection_index_candidate % subsections_number; let body = UploadBody { root: root_bytes.into(), witness_index: 0, - subsection_index, + subsection_index: subsection_index_candidate, subsections_number, proof_set, }; @@ -637,22 +636,17 @@ fn arb_blob_transaction() -> impl Strategy { prop::collection::vec(arb_witness(), 0..3), prop::collection::vec(any::(), 0..256), ) - .prop_map( - |(policies, inputs, outputs, mut extra_witnesses, payload)| { - let mut witnesses = Vec::with_capacity(extra_witnesses.len() + 1); - witnesses.push(Witness::from(payload.clone())); - witnesses.append(&mut extra_witnesses); - let blob_id = BlobId::compute(&payload); - let body = BlobBody { - id: blob_id, - witness_index: 0, - }; - let blob = crate::fuel_tx::Transaction::blob( - body, policies, inputs, outputs, witnesses, - ); - Transaction::Blob(blob) - }, - ) + .prop_map(|(policies, inputs, outputs, witnesses, payload)| { + let blob_id = BlobId::compute(&payload); + let body = BlobBody { + id: blob_id, + witness_index: 0, + }; + let blob = crate::fuel_tx::Transaction::blob( + body, policies, inputs, outputs, witnesses, + ); + Transaction::Blob(blob) + }) } /// Deterministic `Input::coin_signed` sample used for round-trip testing. From 985f9b3c7965c0acde770b22c1b97881a4f0489a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:11:56 -0700 Subject: [PATCH 080/146] Add `aloc` to allowed spelling --- .typos.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/.typos.toml b/.typos.toml index d6069bd48a2..abe48a23756 100644 --- a/.typos.toml +++ b/.typos.toml @@ -4,4 +4,5 @@ extend-ignore-identifiers-re = [ "tro", "Tro", "typ", + "aloc" ] \ No newline at end of file From 60d269417fe1ee98a7506ff0659b6ad32b5e1ad4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:13:23 -0700 Subject: [PATCH 081/146] add other capitalization of spelling --- .typos.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.typos.toml b/.typos.toml index abe48a23756..a6c9bbab2a3 100644 --- a/.typos.toml +++ b/.typos.toml @@ -4,5 +4,6 @@ extend-ignore-identifiers-re = [ "tro", "Tro", "typ", - "aloc" + "aloc", + "ALOC", ] \ No newline at end of file From d4d273ebd71ff625a908e97669e8daf7618c7f91 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:39:14 -0700 Subject: [PATCH 082/146] Cleanup a bunch of unused and confusing things --- .../serializer_adapter.rs | 104 ++---- crates/types/src/test_helpers.rs | 353 +----------------- 2 files changed, 42 insertions(+), 415 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 195ba3547c6..d3f5406be38 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -824,7 +824,6 @@ fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { bytes.as_ref().to_vec() } -#[cfg(test)] pub fn fuel_block_from_protobuf( proto_block: ProtoBlock, msg_ids: &[fuel_core_types::fuel_tx::MessageId], @@ -1379,95 +1378,40 @@ fn input_from_proto_input(proto_input: &ProtoInput) -> Result { } } -// /// Sets the `gas_price` policy. -// pub fn with_tip(mut self, tip: Word) -> Self { -// self.set(PolicyType::Tip, Some(tip)); -// self -// } -// -// /// Sets the `witness_limit` policy. -// pub fn with_witness_limit(mut self, witness_limit: Word) -> Self { -// self.set(PolicyType::WitnessLimit, Some(witness_limit)); -// self -// } -// -// /// Sets the `maturity` policy. -// pub fn with_maturity(mut self, maturity: BlockHeight) -> Self { -// self.set(PolicyType::Maturity, Some(*maturity.deref() as u64)); -// self -// } -// -// /// Sets the `expiration` policy. -// pub fn with_expiration(mut self, expiration: BlockHeight) -> Self { -// self.set(PolicyType::Expiration, Some(*expiration.deref() as u64)); -// self -// } -// -// /// Sets the `max_fee` policy. -// pub fn with_max_fee(mut self, max_fee: Word) -> Self { -// self.set(PolicyType::MaxFee, Some(max_fee)); -// self -// } -// -// /// Sets the `owner` policy. -// pub fn with_owner(mut self, owner: Word) -> Self { -// self.set(PolicyType::Owner, Some(owner)); -// self -// } -// -// bitflags::bitflags! { -// /// See https://github.com/FuelLabs/fuel-specs/blob/master/src/tx-format/policy.md#policy -// #[derive(Clone, Copy, Default, Debug, PartialEq, Eq, Hash)] -// #[derive(serde::Serialize, serde::Deserialize)] -// pub struct PoliciesBits: u32 { -// /// If set, the gas price is present in the policies. -// const Tip = 1 << 0; -// /// If set, the witness limit is present in the policies. -// const WitnessLimit = 1 << 1; -// /// If set, the maturity is present in the policies. -// const Maturity = 1 << 2; -// /// If set, the max fee is present in the policies. -// const MaxFee = 1 << 3; -// /// If set, the expiration is present in the policies. -// const Expiration = 1 << 4; -// /// If set, the owner is present in the policies. -// const Owner = 1 << 5; -// } -// } fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { let ProtoPolicies { bits, values } = proto_policies; let mut policies = FuelPolicies::default(); let bits = PoliciesBits::from_bits(bits).expect("Should be able to create from `u32`"); - if bits.contains(PoliciesBits::Tip) { - if let Some(tip) = values.get(0) { - policies.set(PolicyType::Tip, Some(*tip)); - } + if bits.contains(PoliciesBits::Tip) + && let Some(tip) = values.get(0) + { + policies.set(PolicyType::Tip, Some(*tip)); } - if bits.contains(PoliciesBits::WitnessLimit) { - if let Some(witness_limit) = values.get(1) { - policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); - } + if bits.contains(PoliciesBits::WitnessLimit) + && let Some(witness_limit) = values.get(1) + { + policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); } - if bits.contains(PoliciesBits::Maturity) { - if let Some(maturity) = values.get(2) { - policies.set(PolicyType::Maturity, Some(*maturity)); - } + if bits.contains(PoliciesBits::Maturity) + && let Some(maturity) = values.get(2) + { + policies.set(PolicyType::Maturity, Some(*maturity)); } - if bits.contains(PoliciesBits::MaxFee) { - if let Some(max_fee) = values.get(3) { - policies.set(PolicyType::MaxFee, Some(*max_fee)); - } + if bits.contains(PoliciesBits::MaxFee) + && let Some(max_fee) = values.get(3) + { + policies.set(PolicyType::MaxFee, Some(*max_fee)); } - if bits.contains(PoliciesBits::Expiration) { - if let Some(expiration) = values.get(4) { - policies.set(PolicyType::Expiration, Some(*expiration)); - } + if bits.contains(PoliciesBits::Expiration) + && let Some(expiration) = values.get(4) + { + policies.set(PolicyType::Expiration, Some(*expiration)); } - if bits.contains(PoliciesBits::Owner) { - if let Some(owner) = values.get(5) { - policies.set(PolicyType::Owner, Some(*owner)); - } + if bits.contains(PoliciesBits::Owner) + && let Some(owner) = values.get(5) + { + policies.set(PolicyType::Owner, Some(*owner)); } policies } diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 8b3476e5040..37e187f6b81 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -35,8 +35,6 @@ use crate::{ policies::Policies, }, fuel_types::{ - Address, - AssetId, BlobId, BlockHeight, Nonce, @@ -240,19 +238,6 @@ fn arb_inputs() -> impl Strategy> { } prop_compose! { - // pub utxo_id: UtxoId, - // pub owner: Address, - // pub amount: Word, - // pub asset_id: AssetId, - // pub tx_pointer: TxPointer, - // pub witness_index: Specification::Witness, - // pub predicate_gas_used: Specification::PredicateGasUsed, - // pub predicate: Specification::Predicate, - // pub predicate_data: Specification::PredicateData, - // type Predicate = Empty; - // type PredicateData = Empty; - // type PredicateGasUsed = Empty; - // type Witness = u16; fn arb_coin_signed()( utxo_id in arb_utxo_id(), owner in arb_address(), @@ -442,7 +427,6 @@ prop_compose! { } } -#[allow(unused)] fn arb_msg_ids() -> impl Strategy> { prop::collection::vec(arb_msg_id(), 0..10usize) } @@ -547,43 +531,23 @@ fn arb_mint_transaction() -> impl Strategy { } fn arb_upgrade_transaction() -> impl Strategy { - prop_oneof![ - ( - arb_policies(), - arb_inputs(), - arb_outputs(), - prop::collection::vec(arb_witness(), 1..4), - any::<[u8; 32]>(), - ) - .prop_map( - |(policies, inputs, outputs, witnesses, checksum_bytes)| { - let purpose = UpgradePurpose::ConsensusParameters { - witness_index: 0, - checksum: checksum_bytes.into(), - }; - let upgrade = crate::fuel_tx::Transaction::upgrade( - purpose, policies, inputs, outputs, witnesses, - ); - Transaction::Upgrade(upgrade) - } - ), - ( - arb_policies(), - arb_inputs(), - arb_outputs(), - prop::collection::vec(arb_witness(), 0..4), - any::<[u8; 32]>(), - ) - .prop_map(|(policies, inputs, outputs, witnesses, root_bytes)| { - let purpose = UpgradePurpose::StateTransition { - root: root_bytes.into(), - }; - let upgrade = crate::fuel_tx::Transaction::upgrade( - purpose, policies, inputs, outputs, witnesses, - ); - Transaction::Upgrade(upgrade) - }) - ] + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + any::<[u8; 32]>(), + ) + .prop_map(|(policies, inputs, outputs, witnesses, checksum_bytes)| { + let purpose = UpgradePurpose::ConsensusParameters { + witness_index: 0, + checksum: checksum_bytes.into(), + }; + let upgrade = crate::fuel_tx::Transaction::upgrade( + purpose, policies, inputs, outputs, witnesses, + ); + Transaction::Upgrade(upgrade) + }) } fn arb_upload_transaction() -> impl Strategy { @@ -649,281 +613,6 @@ fn arb_blob_transaction() -> impl Strategy { }) } -/// Deterministic `Input::coin_signed` sample used for round-trip testing. -pub fn sample_coin_signed_input() -> Input { - let utxo_id = UtxoId::new(Bytes32::from([1u8; 32]), 0); - let owner = Address::new([2u8; 32]); - let asset_id = AssetId::new([3u8; 32]); - let tx_pointer = TxPointer::new(BlockHeight::new(0), 0); - Input::coin_signed(utxo_id, owner, 42, asset_id, tx_pointer, 0) -} - -/// Deterministic `Input::coin_predicate` sample used for round-trip testing. -pub fn sample_coin_predicate_input() -> Input { - let utxo_id = UtxoId::new(Bytes32::from([4u8; 32]), 1); - let owner = Address::new([5u8; 32]); - let asset_id = AssetId::new([6u8; 32]); - let tx_pointer = TxPointer::new(BlockHeight::new(1), 1); - Input::coin_predicate( - utxo_id, - owner, - 84, - asset_id, - tx_pointer, - 10, - vec![0xaa, 0xbb], - vec![0xcc, 0xdd], - ) -} - -/// Deterministic `Input::Contract` sample used for round-trip testing. -pub fn sample_contract_input() -> Input { - let contract = InputContract { - utxo_id: UtxoId::new(Bytes32::from([7u8; 32]), 2), - balance_root: Bytes32::from([8u8; 32]), - state_root: Bytes32::from([9u8; 32]), - tx_pointer: TxPointer::new(BlockHeight::new(2), 2), - contract_id: ContractId::new([10u8; 32]), - }; - Input::Contract(contract) -} - -/// Deterministic `Input::message_coin_signed` sample used for round-trip testing. -pub fn sample_message_coin_signed_input() -> Input { - let sender = Address::new([11u8; 32]); - let recipient = Address::new([12u8; 32]); - let nonce = Nonce::new([13u8; 32]); - Input::message_coin_signed(sender, recipient, 21, nonce, 0) -} - -/// Deterministic `Input::message_coin_predicate` sample used for round-trip testing. -pub fn sample_message_coin_predicate_input() -> Input { - let sender = Address::new([14u8; 32]); - let recipient = Address::new([15u8; 32]); - let nonce = Nonce::new([16u8; 32]); - Input::message_coin_predicate( - sender, - recipient, - 22, - nonce, - 5, - vec![0x01, 0x02], - vec![0x03, 0x04], - ) -} - -/// Deterministic `Input::message_data_signed` sample used for round-trip testing. -pub fn sample_message_data_signed_input() -> Input { - let sender = Address::new([17u8; 32]); - let recipient = Address::new([18u8; 32]); - let nonce = Nonce::new([19u8; 32]); - Input::message_data_signed( - sender, - recipient, - 23, - nonce, - 1, - vec![0xde, 0xad, 0xbe, 0xef], - ) -} - -/// Deterministic `Input::message_data_predicate` sample used for round-trip testing. -pub fn sample_message_data_predicate_input() -> Input { - let sender = Address::new([20u8; 32]); - let recipient = Address::new([21u8; 32]); - let nonce = Nonce::new([22u8; 32]); - Input::message_data_predicate( - sender, - recipient, - 24, - nonce, - 6, - vec![0x99, 0x88], - vec![0x77], - vec![0x66], - ) -} - -/// Collection of sample inputs covering every input variant. -pub fn sample_inputs() -> Vec { - vec![ - sample_coin_signed_input(), - sample_coin_predicate_input(), - sample_contract_input(), - sample_message_coin_signed_input(), - sample_message_coin_predicate_input(), - sample_message_data_signed_input(), - sample_message_data_predicate_input(), - ] -} - -/// Collection of sample outputs covering every output variant. -pub fn sample_outputs() -> Vec { - vec![ - Output::coin(Address::new([23u8; 32]), 50, AssetId::new([24u8; 32])), - Output::Contract(OutputContract { - input_index: 0, - balance_root: Bytes32::from([25u8; 32]), - state_root: Bytes32::from([26u8; 32]), - }), - Output::change(Address::new([27u8; 32]), 60, AssetId::new([28u8; 32])), - Output::variable(Address::new([29u8; 32]), 70, AssetId::new([30u8; 32])), - Output::contract_created(ContractId::new([31u8; 32]), Bytes32::from([32u8; 32])), - ] -} - -/// Sample `Transaction::Script` covering scripts, inputs, outputs, and witnesses. -pub fn sample_script_transaction() -> Transaction { - let policies = Policies::new().with_witness_limit(10); - let inputs = vec![ - sample_coin_signed_input(), - sample_message_data_signed_input(), - ]; - let outputs = vec![Output::coin( - Address::new([40u8; 32]), - 11, - AssetId::new([41u8; 32]), - )]; - let witnesses = vec![Witness::from(vec![0x01, 0x02, 0x03])]; - let mut script = crate::fuel_tx::Transaction::script( - 1_000, - vec![0x10, 0x20], - vec![0x30, 0x40], - policies, - inputs, - outputs, - witnesses, - ); - *script.receipts_root_mut() = Bytes32::from([33u8; 32]); - Transaction::Script(script) -} - -/// Sample `Transaction::Create` with deterministic storage slots and witnesses. -pub fn sample_create_transaction() -> Transaction { - let policies = Policies::new(); - let storage_slots = vec![StorageSlot::new( - Bytes32::from([34u8; 32]), - Bytes32::from([35u8; 32]), - )]; - let inputs = vec![sample_coin_signed_input()]; - let outputs = vec![Output::contract_created( - ContractId::new([36u8; 32]), - Bytes32::from([37u8; 32]), - )]; - let witnesses = vec![Witness::from(vec![0xaa, 0xbb])]; - let create = crate::fuel_tx::Transaction::create( - 0, - policies, - crate::fuel_types::Salt::from([38u8; 32]), - storage_slots, - inputs, - outputs, - witnesses, - ); - Transaction::Create(create) -} - -/// Sample `Transaction::Mint` with deterministic contracts and asset data. -pub fn sample_mint_transaction() -> Transaction { - let tx_pointer = TxPointer::new(BlockHeight::new(5), 0); - let input_contract = InputContract { - utxo_id: UtxoId::new(Bytes32::from([39u8; 32]), 3), - balance_root: Bytes32::from([40u8; 32]), - state_root: Bytes32::from([41u8; 32]), - tx_pointer, - contract_id: ContractId::new([42u8; 32]), - }; - let output_contract = OutputContract { - input_index: 0, - balance_root: Bytes32::from([43u8; 32]), - state_root: Bytes32::from([44u8; 32]), - }; - let mint_asset_id = AssetId::new([45u8; 32]); - let mint = crate::fuel_tx::Transaction::mint( - tx_pointer, - input_contract, - output_contract, - 99, - mint_asset_id, - 1, - ); - Transaction::Mint(mint) -} - -/// Sample `Transaction::Upgrade` using a state transition purpose. -pub fn sample_upgrade_transaction() -> Transaction { - let policies = Policies::new(); - let inputs = vec![sample_coin_signed_input()]; - let outputs = vec![Output::coin( - Address::new([46u8; 32]), - 5, - AssetId::new([47u8; 32]), - )]; - let witnesses = vec![Witness::from(vec![0x11, 0x22])]; - let purpose = UpgradePurpose::StateTransition { - root: Bytes32::from([48u8; 32]), - }; - let upgrade = crate::fuel_tx::Transaction::upgrade( - purpose, policies, inputs, outputs, witnesses, - ); - Transaction::Upgrade(upgrade) -} - -/// Sample `Transaction::Upload` with deterministic proof set and witness index. -pub fn sample_upload_transaction() -> Transaction { - let policies = Policies::new(); - let inputs = vec![sample_coin_signed_input()]; - let outputs = vec![Output::change( - Address::new([49u8; 32]), - 3, - AssetId::new([50u8; 32]), - )]; - let witnesses = vec![Witness::from(vec![0x33, 0x44, 0x55])]; - let body = UploadBody { - root: Bytes32::from([51u8; 32]), - witness_index: 0, - subsection_index: 0, - subsections_number: 1, - proof_set: vec![Bytes32::from([52u8; 32])], - }; - let upload = - crate::fuel_tx::Transaction::upload(body, policies, inputs, outputs, witnesses); - Transaction::Upload(upload) -} - -/// Sample `Transaction::Blob` using a computed blob ID and payload witness. -pub fn sample_blob_transaction() -> Transaction { - let policies = Policies::new(); - let inputs = vec![sample_coin_signed_input()]; - let outputs = vec![Output::coin( - Address::new([53u8; 32]), - 7, - AssetId::new([54u8; 32]), - )]; - let payload = vec![0x99, 0x00, 0x99]; - let witnesses = vec![Witness::from(payload.clone())]; - let blob_id = BlobId::compute(&payload); - let body = BlobBody { - id: blob_id, - witness_index: 0, - }; - let blob = - crate::fuel_tx::Transaction::blob(body, policies, inputs, outputs, witnesses); - Transaction::Blob(blob) -} - -/// Collection of sample transactions covering every transaction variant. -pub fn sample_transactions() -> Vec { - vec![ - sample_script_transaction(), - sample_create_transaction(), - sample_mint_transaction(), - sample_upgrade_transaction(), - sample_upload_transaction(), - sample_blob_transaction(), - ] -} - prop_compose! { fn arb_consensus_header()( prev_root in any::<[u8; 32]>(), @@ -941,7 +630,7 @@ prop_compose! { prop_compose! { /// Generate an arbitrary block with a variable number of transactions pub fn arb_block()( - script_tx in arb_script_transaction(), + txs in arb_txs(), da_height in any::(), consensus_parameter_version in any::(), state_transition_bytecode_version in any::(), @@ -951,12 +640,6 @@ prop_compose! { ) -> (Block, Vec, Bytes32) { let mut fuel_block = Block::default(); - let mut txs = sample_transactions(); - if !txs.is_empty() { - txs[0] = script_tx; - } - - // include txs first to be included in calculations *fuel_block.transactions_mut() = txs; fuel_block.header_mut().set_da_height(DaBlockHeight(da_height)); From 0547f55967199f3ce7c702a89eac074373564af9 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:51:40 -0700 Subject: [PATCH 083/146] fix import --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index d3f5406be38..e163ab190f0 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -50,7 +50,6 @@ use crate::{ use anyhow::anyhow; #[cfg(feature = "fault-proving")] use fuel_core_types::blockchain::header::BlockHeaderV2; -#[cfg(all(test, feature = "fault-proving"))] use fuel_core_types::fuel_types::ChainId; use fuel_core_types::{ From 69d202b05dc5132bed29ef7a7997f7d6cd25ea89 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 13:58:52 -0700 Subject: [PATCH 084/146] Fix it more --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index e163ab190f0..bb3f10e293e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -49,8 +49,10 @@ use crate::{ }; use anyhow::anyhow; #[cfg(feature = "fault-proving")] -use fuel_core_types::blockchain::header::BlockHeaderV2; -use fuel_core_types::fuel_types::ChainId; +use fuel_core_types::{ + blockchain::header::BlockHeaderV2, + fuel_types::ChainId, +}; use fuel_core_types::{ blockchain::{ From 27a9bc41231a1d8cef17d9fb7fe1537a6ba520d1 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 14:09:22 -0700 Subject: [PATCH 085/146] Appease Clippy-sama --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index bb3f10e293e..0bc6dc661bf 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1385,7 +1385,7 @@ fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { let bits = PoliciesBits::from_bits(bits).expect("Should be able to create from `u32`"); if bits.contains(PoliciesBits::Tip) - && let Some(tip) = values.get(0) + && let Some(tip) = values.first() { policies.set(PolicyType::Tip, Some(*tip)); } From 15151045ed0b5abf961427114abe78a294dee6f4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 15:54:09 -0700 Subject: [PATCH 086/146] Ignore test in fault-proving feature --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 0bc6dc661bf..6bae00872bc 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1496,6 +1496,7 @@ mod tests { use fuel_core_types::test_helpers::arb_block; use proptest::prelude::*; + #[cfg(not(feature = "fault-proving"))] proptest! { #![proptest_config(ProptestConfig { cases: 100, .. ProptestConfig::default() From 74601d8ab813f2667e96a69af8011177b977399a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 3 Nov 2025 15:59:10 -0700 Subject: [PATCH 087/146] Move feature flage to ignore imports as well --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index 6bae00872bc..ac8c817c6ab 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1489,6 +1489,7 @@ pub fn proto_header_to_empty_consensus_header( } } +#[cfg(not(feature = "fault-proving"))] #[allow(non_snake_case)] #[cfg(test)] mod tests { @@ -1496,7 +1497,6 @@ mod tests { use fuel_core_types::test_helpers::arb_block; use proptest::prelude::*; - #[cfg(not(feature = "fault-proving"))] proptest! { #![proptest_config(ProptestConfig { cases: 100, .. ProptestConfig::default() From fa63c40174ff6705cc4898a8225d759c808a3caa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 4 Nov 2025 07:41:06 -0700 Subject: [PATCH 088/146] Include issue with TODO --- .../src/blocks/importer_and_db_source/serializer_adapter.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index ac8c817c6ab..fa7e7db2d8f 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1489,6 +1489,8 @@ pub fn proto_header_to_empty_consensus_header( } } +// TODO: Add coverage for V2 Block stuff +// https://github.com/FuelLabs/fuel-core/issues/3139 #[cfg(not(feature = "fault-proving"))] #[allow(non_snake_case)] #[cfg(test)] From d1a17ee8d19ded9899210152e31867431a446b3b Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 5 Nov 2025 15:13:23 -0700 Subject: [PATCH 089/146] fix compilation --- .../src/block_aggregator.rs | 3 +-- .../src/db/remote_cache.rs | 18 ++++++++---------- .../block_aggregator_api/src/db/storage_db.rs | 4 ++-- .../services/block_aggregator_api/src/lib.rs | 2 +- 4 files changed, 12 insertions(+), 15 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index c5793919e7a..4fde80d22b7 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -8,7 +8,7 @@ use crate::{ BlockSource, BlockSourceEvent, }, - db::BlockStorage, + db::BlockAggregatorDB, }; use fuel_core_services::{ TaskNextAction, @@ -20,7 +20,6 @@ impl BlockAggregator, DB: BlockAggregatorDB, - Blocks: BlockSource, ::Block: Clone + std::fmt::Debug, BlockRangeResponse: Send, diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 9327d16f02c..61476fd51b9 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -1,35 +1,33 @@ use crate::{ block_range_response::BlockRangeResponse, blocks::Block, - db::BlockStorage, + db::BlockAggregatorDB, }; use fuel_core_types::fuel_types::BlockHeight; pub struct RemoteCache; -impl BlockStorage for RemoteCache { +impl BlockAggregatorDB for RemoteCache { + type Block = Block; type BlockRangeResponse = BlockRangeResponse; - fn store_block( + async fn store_block( &mut self, height: BlockHeight, block: Block, - ) -> impl Future> + Send { + ) -> crate::result::Result<()> { todo!() } - fn get_block_range( + async fn get_block_range( &self, first: BlockHeight, last: BlockHeight, - ) -> impl Future> + Send - { + ) -> crate::result::Result { todo!() } - fn get_current_height( - &self, - ) -> impl Future> + Send { + async fn get_current_height(&self) -> crate::result::Result { todo!() } } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 4ded2d2dcf0..7aeac0a91d1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,7 +1,7 @@ use crate::{ block_range_response::BlockRangeResponse, db::{ - BlockStorage, + BlockAggregatorDB, storage_db::table::Column, }, protobuf_types::Block as ProtoBlock, @@ -96,7 +96,7 @@ impl StorageDB { } } -impl BlockStorage for StorageDB +impl BlockAggregatorDB for StorageDB where S: Modifiable + std::fmt::Debug, S: KeyValueInspect, diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index a0efb6d688a..e3e9057d7d7 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -74,7 +74,7 @@ pub mod integration { >, > where - DB: BlockStorage< + DB: BlockAggregatorDB< BlockRangeResponse = ::BlockRangeResponse, Block = ProtoBlock, >, From 88f9345d3df229e1d199981280ac56cf5a4e0d63 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 5 Nov 2025 17:09:37 -0700 Subject: [PATCH 090/146] Add protobuf serialization --- .../src/db/storage_db/table.rs | 43 ++++++++++++++++++- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index be11785c7af..f161b245240 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -2,11 +2,15 @@ use crate::protobuf_types::Block as ProtoBlock; use fuel_core_storage::{ Mappable, blueprint::plain::Plain, - codec::postcard::Postcard, + codec::{ + Decode, + Encode, + }, kv_store::StorageColumn, structured_storage::TableWithBlueprint, }; use fuel_core_types::fuel_types::BlockHeight; +use std::borrow::Cow; #[repr(u32)] #[derive( @@ -55,10 +59,45 @@ impl Mappable for Blocks { } impl TableWithBlueprint for Blocks { - type Blueprint = Plain; + type Blueprint = Plain, ProtoBufCodec>; type Column = Column; fn column() -> Self::Column { Column::Blocks } } + +use fuel_core_storage::codec::{ + primitive::Primitive, + raw::Raw, +}; +use prost::Message; + +pub struct ProtoBufCodec; + +impl Encode for ProtoBufCodec +where + T: Sized + Message, +{ + type Encoder<'a> + = Cow<'a, [u8]> + where + T: 'a; + + fn encode(value: &T) -> Self::Encoder<'_> { + let mut buffer = Vec::new(); + value.encode(&mut buffer).expect( + "It should be impossible to fail unless serialization is not implemented, which is not true for our types.", + ); + buffer.into() + } +} + +impl Decode for ProtoBufCodec +where + T: Message + Default, +{ + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(T::decode(bytes)?) + } +} From 443b0e4db3381e7c9bbe1ca92a065dade8234864 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 6 Nov 2025 12:21:57 -0700 Subject: [PATCH 091/146] Add s3 client tests --- Cargo.lock | 440 +++++++++++++++--- .../services/block_aggregator_api/Cargo.toml | 3 + .../block_aggregator_api/src/blocks.rs | 6 +- .../blocks/importer_and_db_source/tests.rs | 8 +- .../src/db/remote_cache.rs | 124 ++++- .../block_aggregator_api/src/result.rs | 6 + .../block_aggregator_api/src/tests.rs | 44 +- 7 files changed, 538 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f297766eb7a..27605be9985 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -710,13 +710,14 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.5.12" +version = "1.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa006bb32360ed90ac51203feafb9d02e3d21046e1fd3a450a404b90ea73e5d" +checksum = "9f2402da1a5e16868ba98725e5d73f26b8116eaa892e56f2cd0bf5eec7985f70" dependencies = [ "aws-credential-types", "aws-sigv4", "aws-smithy-async", + "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-runtime", "aws-smithy-runtime-api", @@ -754,6 +755,40 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-sdk-s3" +version = "1.111.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55c660aeffc79b575971b67cd479af02d486f2c97e936d7dea2866bee0dac8ff" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex", + "hmac", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "lru 0.12.5", + "percent-encoding", + "regex-lite", + "sha2 0.10.9", + "tracing", + "url", +] + [[package]] name = "aws-sdk-sso" version = "1.86.0" @@ -823,24 +858,30 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.3.5" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffc03068fbb9c8dd5ce1c6fb240678a5cffb86fb2b7b1985c999c4b83c8df68" +checksum = "c35452ec3f001e1f2f6db107b6373f1f48f05ec63ba2c5c9fa91f07dad32af11" dependencies = [ "aws-credential-types", + "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", + "crypto-bigint 0.5.5", "form_urlencoded", "hex", "hmac", "http 0.2.12", "http 1.3.1", + "p256 0.11.1", "percent-encoding", + "ring 0.17.14", "sha2 0.10.9", + "subtle", "time", "tracing", + "zeroize", ] [[package]] @@ -854,17 +895,50 @@ dependencies = [ "tokio", ] +[[package]] +name = "aws-smithy-checksums" +version = "0.63.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb9a26b2831e728924ec0089e92697a78a2f9cdcf90d81e8cfcc6a6c85080369" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc-fast", + "hex", + "http 0.2.12", + "http-body 0.4.6", + "md-5", + "pin-project-lite", + "sha1", + "sha2 0.10.9", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e29a304f8319781a39808847efb39561351b1bb76e933da7aa90232673638658" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + [[package]] name = "aws-smithy-http" -version = "0.62.4" +version = "0.62.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3feafd437c763db26aa04e0cc7591185d0961e64c61885bece0fb9d50ceac671" +checksum = "445d5d720c99eed0b4aa674ed00d835d9b1427dd73e04adaf2f94c6b2d6f9fca" dependencies = [ + "aws-smithy-eventstream", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", "bytes-utils", "futures-core", + "futures-util", "http 0.2.12", "http 1.3.1", "http-body 0.4.6", @@ -876,28 +950,34 @@ dependencies = [ [[package]] name = "aws-smithy-http-client" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1053b5e587e6fa40ce5a79ea27957b04ba660baa02b28b7436f64850152234f1" +checksum = "623254723e8dfd535f566ee7b2381645f8981da086b5c4aa26c0c41582bb1d2c" dependencies = [ "aws-smithy-async", + "aws-smithy-protocol-test", "aws-smithy-runtime-api", "aws-smithy-types", + "bytes", "h2 0.3.27", "h2 0.4.12", "http 0.2.12", "http 1.3.1", "http-body 0.4.6", + "http-body 1.0.1", "hyper 0.14.32", "hyper 1.7.0", "hyper-rustls 0.24.2", "hyper-rustls 0.27.7", "hyper-util", + "indexmap 2.12.0", "pin-project-lite", "rustls 0.21.12", "rustls 0.23.33", "rustls-native-certs 0.8.2", "rustls-pki-types", + "serde", + "serde_json", "tokio", "tokio-rustls 0.26.4", "tower 0.5.2", @@ -906,11 +986,23 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.61.6" +version = "0.61.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2db31f727935fc63c6eeae8b37b438847639ec330a9161ece694efba257e0c54" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-mocks" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff418fc8ec5cadf8173b10125f05c2e7e1d46771406187b2c878557d4503390" +checksum = "99c0a641ee7f8a95a041659855f473166db87c3135b640029ba42772a4ea0a04" dependencies = [ + "aws-smithy-http-client", + "aws-smithy-runtime-api", "aws-smithy-types", + "http 1.3.1", ] [[package]] @@ -922,6 +1014,25 @@ dependencies = [ "aws-smithy-runtime-api", ] +[[package]] +name = "aws-smithy-protocol-test" +version = "0.63.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa808d23a8edf0da73f6812d06d8c0a48d70f05d2d3696362982aad11ee475b7" +dependencies = [ + "assert-json-diff", + "aws-smithy-runtime-api", + "base64-simd", + "cbor-diag", + "ciborium", + "http 0.2.12", + "pretty_assertions", + "regex-lite", + "roxmltree", + "serde_json", + "thiserror 2.0.17", +] + [[package]] name = "aws-smithy-query" version = "0.60.8" @@ -934,9 +1045,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ab99739082da5347660c556689256438defae3bcefd66c52b095905730e404" +checksum = "0bbe9d018d646b96c7be063dd07987849862b0e6d07c778aad7d93d1be6c1ef0" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -954,13 +1065,14 @@ dependencies = [ "pin-utils", "tokio", "tracing", + "tracing-subscriber", ] [[package]] name = "aws-smithy-runtime-api" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3683c5b152d2ad753607179ed71988e8cfd52964443b4f74fd8e552d0bbfeb46" +checksum = "ec7204f9fd94749a7c53b26da1b961b4ac36bf070ef1e0b94bb09f79d4f6c193" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -975,9 +1087,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f5b3a7486f6690ba25952cabf1e7d75e34d69eaff5081904a47bc79074d6457" +checksum = "25f535879a207fce0db74b679cfc3e91a3159c8144d717d55f5832aea9eef46e" dependencies = [ "base64-simd", "bytes", @@ -1001,18 +1113,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.11" +version = "0.60.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c34127e8c624bc2999f3b657e749c1393bedc9cd97b92a804db8ced4d2e163" +checksum = "eab77cdd036b11056d2a30a7af7b775789fb024bf216acc13884c6c97752ae56" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "1.3.9" +version = "1.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2fd329bf0e901ff3f60425691410c69094dc2a1f34b331f37bfc4e9ac1565a1" +checksum = "d79fb68e3d7fe5d4833ea34dc87d2e97d26d3086cb3da660bb6b1f76d98680b6" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -1226,6 +1338,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base16ct" version = "0.2.0" @@ -1578,6 +1696,25 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbor-diag" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc245b6ecd09b23901a4fbad1ad975701fd5061ceaef6afa93a2d70605a64429" +dependencies = [ + "bs58", + "chrono", + "data-encoding", + "half", + "nom", + "num-bigint", + "num-rational", + "num-traits", + "separator", + "url", + "uuid 1.18.1", +] + [[package]] name = "cc" version = "1.2.41" @@ -2002,13 +2139,13 @@ checksum = "1394c263335da09e8ba8c4b2c675d804e3e0deb44cce0866a5f838d3ddd43d02" dependencies = [ "bip32", "cosmos-sdk-proto", - "ecdsa", + "ecdsa 0.16.9", "eyre", "k256", "rand_core 0.6.4", "serde", "serde_json", - "signature", + "signature 2.2.0", "subtle-encoding", "tendermint 0.40.4", "thiserror 1.0.69", @@ -2169,6 +2306,34 @@ version = "0.121.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3d9071bc5ee5573e723d9d84a45b7025a29e8f2c5ad81b3b9d0293129541d9" +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crc-fast" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bf62af4cc77d8fe1c22dde4e721d87f2f54056139d8c412e1366b740305f56f" +dependencies = [ + "crc", + "digest 0.10.7", + "libc", + "rand 0.9.2", + "regex", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -2287,6 +2452,18 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -2540,6 +2717,16 @@ dependencies = [ "uuid 1.18.1", ] +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der" version = "0.7.10" @@ -2786,18 +2973,30 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der", + "der 0.7.10", "digest 0.10.7", - "elliptic-curve", - "rfc6979", - "signature", - "spki", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] @@ -2806,9 +3005,9 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8", + "pkcs8 0.10.2", "serde", - "signature", + "signature 2.2.0", ] [[package]] @@ -2857,21 +3056,41 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.5", "digest 0.10.7", - "ff", + "ff 0.13.1", "generic-array", - "group", - "pkcs8", + "group 0.13.0", + "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -3178,7 +3397,7 @@ dependencies = [ "cargo_metadata", "chrono", "const-hex", - "elliptic-curve", + "elliptic-curve 0.13.8", "ethabi", "generic-array", "k256", @@ -3288,7 +3507,7 @@ dependencies = [ "coins-bip32", "coins-bip39", "const-hex", - "elliptic-curve", + "elliptic-curve 0.13.8", "eth-keystore", "ethers-core", "rand 0.8.5", @@ -3400,6 +3619,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.1" @@ -3666,7 +3895,7 @@ dependencies = [ "hex", "itertools 0.12.1", "num_enum", - "p256", + "p256 0.13.2", "postcard", "primitive-types", "quanta", @@ -3731,6 +3960,8 @@ version = "0.47.1" dependencies = [ "anyhow", "async-trait", + "aws-sdk-s3", + "aws-smithy-mocks", "bytes", "enum-iterator", "fuel-core-services", @@ -4269,7 +4500,7 @@ dependencies = [ "reqwest 0.12.24", "rstest", "serde_json", - "spki", + "spki 0.7.3", "tempfile", "test-case", "test-helpers", @@ -4424,11 +4655,11 @@ version = "0.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33548590131674e8f272a3e056be4dbaa1de7cb364eab2b17987cd5c0dc31cb0" dependencies = [ - "ecdsa", + "ecdsa 0.16.9", "ed25519-dalek", "fuel-types 0.56.0", "k256", - "p256", + "p256 0.13.2", "serde", "sha2 0.10.9", "zeroize", @@ -4443,11 +4674,11 @@ dependencies = [ "base64ct", "coins-bip32", "coins-bip39", - "ecdsa", + "ecdsa 0.16.9", "ed25519-dalek", "fuel-types 0.65.0", "k256", - "p256", + "p256 0.13.2", "rand 0.8.5", "secp256k1 0.30.0", "serde", @@ -4945,13 +5176,24 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.1", "rand_core 0.6.4", "subtle", ] @@ -5906,11 +6148,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", "sha2 0.10.9", - "signature", + "signature 2.2.0", ] [[package]] @@ -7348,14 +7590,25 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.9", +] + [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.9", ] @@ -7687,14 +7940,24 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", +] + [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.10", + "spki 0.7.3", ] [[package]] @@ -7904,7 +8167,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.8", ] [[package]] @@ -8680,6 +8943,17 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac", + "zeroize", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -8775,6 +9049,15 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "roxmltree" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" +dependencies = [ + "xmlparser", +] + [[package]] name = "rstest" version = "0.15.0" @@ -9130,16 +9413,30 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct", - "der", + "base16ct 0.2.0", + "der 0.7.10", "generic-array", - "pkcs8", + "pkcs8 0.10.2", "subtle", "zeroize", ] @@ -9249,6 +9546,12 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" +[[package]] +name = "separator" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" + [[package]] name = "seq-macro" version = "0.3.6" @@ -9301,6 +9604,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ + "indexmap 2.12.0", "itoa", "memchr", "ryu", @@ -9474,6 +9778,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "signature" version = "2.2.0" @@ -9627,6 +9941,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -9634,7 +9958,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.10", ] [[package]] @@ -9990,7 +10314,7 @@ dependencies = [ "serde_json", "serde_repr", "sha2 0.10.9", - "signature", + "signature 2.2.0", "subtle", "subtle-encoding", "tendermint-proto 0.36.0", @@ -10020,7 +10344,7 @@ dependencies = [ "serde_json", "serde_repr", "sha2 0.10.9", - "signature", + "signature 2.2.0", "subtle", "subtle-encoding", "tendermint-proto 0.40.4", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 03342654df9..d74a655b29f 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -16,6 +16,8 @@ fault-proving = ["fuel-core-types/fault-proving"] [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } +aws-sdk-s3 = "1.111.0" +aws-smithy-mocks = "0.2.0" bytes = { workspace = true, features = ["serde"] } enum-iterator = { workspace = true } fuel-core-services = { workspace = true } @@ -46,4 +48,5 @@ fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } proptest = { workspace = true } tokio-stream = { workspace = true } +aws-sdk-s3 = { version = "1.111.0", features = ["test-util"] } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index fb8dc76a9c1..397b83b7755 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -24,11 +24,11 @@ pub enum BlockSourceEvent { } #[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] -pub struct Block { +pub struct BlockBytes { bytes: Bytes, } -impl Block { +impl BlockBytes { pub fn new(bytes: Bytes) -> Self { Self { bytes } } @@ -50,7 +50,7 @@ impl Block { } } -impl From> for Block { +impl From> for BlockBytes { fn from(value: Vec) -> Self { let bytes = Bytes::from(value); Self::new(bytes) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 64d0256dbae..3820fafbf0e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,7 +1,7 @@ #![allow(non_snake_case)] use super::*; -use crate::blocks::Block; +use crate::blocks::BlockBytes; use ::postcard::to_allocvec; use fuel_core_services::stream::{ IntoBoxStream, @@ -35,13 +35,13 @@ use std::sync::Arc; pub struct MockSerializer; impl BlockSerializer for MockSerializer { - type Block = Block; + type Block = BlockBytes; - fn serialize_block(&self, block: &FuelBlock) -> Result { + fn serialize_block(&self, block: &FuelBlock) -> Result { let bytes_vec = to_allocvec(block).map_err(|e| { Error::BlockSource(anyhow!("failed to serialize block: {}", e)) })?; - Ok(Block::from(bytes_vec)) + Ok(BlockBytes::from(bytes_vec)) } } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 61476fd51b9..537ce2484d0 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -1,22 +1,71 @@ use crate::{ block_range_response::BlockRangeResponse, - blocks::Block, + blocks::BlockBytes, db::BlockAggregatorDB, + protobuf_types::Block as ProtoBlock, + result::Error, }; -use fuel_core_types::fuel_types::BlockHeight; +use aws_sdk_s3::{ + self, + Client, + primitives::ByteStream, +}; +use fuel_core_types::{ + blockchain::block::Block as FuelBlock, + fuel_types::BlockHeight, +}; +use prost::Message; -pub struct RemoteCache; +#[allow(unused)] +pub struct RemoteCache { + aws_id: String, + aws_secret: String, + aws_region: String, + aws_bucket: String, + client: Client, + head: Option, +} + +impl RemoteCache { + pub fn new( + aws_id: String, + aws_secret: String, + aws_region: String, + aws_bucket: String, + client: Client, + ) -> RemoteCache { + RemoteCache { + aws_id, + aws_secret, + aws_region, + aws_bucket, + client, + } + } +} impl BlockAggregatorDB for RemoteCache { - type Block = Block; + type Block = ProtoBlock; type BlockRangeResponse = BlockRangeResponse; async fn store_block( &mut self, height: BlockHeight, - block: Block, + block: ProtoBlock, ) -> crate::result::Result<()> { - todo!() + let key = block_height_to_key(&height); + let mut buf = Vec::new(); + block.encode(&mut buf).map_err(Error::db_error)?; + let body = ByteStream::from(buf); + let req = self + .client + .put_object() + .bucket(&self.aws_bucket) + .key(&key) + .body(body) + .content_type("application/octet-stream"); + let _ = req.send().await.map_err(Error::db_error)?; + Ok(()) } async fn get_block_range( @@ -31,3 +80,66 @@ impl BlockAggregatorDB for RemoteCache { todo!() } } + +pub fn block_height_to_key(height: &BlockHeight) -> String { + format!("{:08x}", height) +} + +#[allow(non_snake_case)] +#[cfg(test)] +mod tests { + use super::*; + use crate::blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }; + use aws_sdk_s3::{ + operation::{ + get_object::GetObjectOutput, + put_object::PutObjectOutput, + }, + primitives::ByteStream, + }; + use aws_smithy_mocks::{ + RuleMode, + mock, + mock_client, + }; + + fn arb_proto_block() -> ProtoBlock { + let block = FuelBlock::default(); + let mut serializer = SerializerAdapter; + let proto_block = serializer.serialize_block(&block).unwrap(); + proto_block + } + + #[tokio::test] + async fn store_block__happy_path() { + let put_happy_rule = mock!(Client::put_object) + .match_requests(|req| req.bucket() == Some("test-bucket")) + .sequence() + .output(|| PutObjectOutput::builder().build()) + .build(); + // given + let client = mock_client!(aws_sdk_s3, [&put_happy_rule]); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let mut adapter = + RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client); + let block_height = BlockHeight::new(123); + let block = arb_proto_block(); + + // when + let res = adapter.store_block(block_height, block).await; + + // then + assert!(res.is_ok()); + } + + #[tokio::test] + async fn get_block_range__happy_path() { + todo!() + } +} diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index ab91f71ece0..bbe500cab56 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -11,4 +11,10 @@ pub enum Error { Serialization(anyhow::Error), } +impl Error { + pub fn db_error>(err: T) -> Self { + Error::DB(err.into()) + } +} + pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index d8b9a8744e5..a763f75cef5 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -4,7 +4,7 @@ use super::*; use crate::{ api::BlockAggregatorQuery, blocks::{ - Block, + BlockBytes, BlockSourceEvent, }, result::{ @@ -34,7 +34,7 @@ use tokio::{ time::error::Elapsed, }; -type BlockRangeResponse = BoxStream; +type BlockRangeResponse = BoxStream; struct FakeApi { receiver: Receiver>, @@ -57,7 +57,7 @@ impl BlockAggregatorApi for FakeApi { } struct FakeDB { - map: Arc>>, + map: Arc>>, } impl FakeDB { @@ -66,20 +66,20 @@ impl FakeDB { Self { map } } - fn add_block(&mut self, height: BlockHeight, block: Block) { + fn add_block(&mut self, height: BlockHeight, block: BlockBytes) { self.map.lock().unwrap().insert(height, block); } - fn clone_inner(&self) -> Arc>> { + fn clone_inner(&self) -> Arc>> { self.map.clone() } } impl BlockAggregatorDB for FakeDB { - type Block = Block; + type Block = BlockBytes; type BlockRangeResponse = BlockRangeResponse; - async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { + async fn store_block(&mut self, id: BlockHeight, block: BlockBytes) -> Result<()> { self.map.lock().unwrap().insert(id, block); Ok(()) } @@ -88,7 +88,7 @@ impl BlockAggregatorDB for FakeDB { &self, first: BlockHeight, last: BlockHeight, - ) -> Result> { + ) -> Result> { let mut blocks = vec![]; let first: u32 = first.into(); let last: u32 = last.into(); @@ -113,11 +113,11 @@ impl BlockAggregatorDB for FakeDB { } struct FakeBlockSource { - blocks: Receiver>, + blocks: Receiver>, } impl FakeBlockSource { - fn new() -> (Self, Sender>) { + fn new() -> (Self, Sender>) { let (_sender, receiver) = tokio::sync::mpsc::channel(1); let _self = Self { blocks: receiver }; (_self, _sender) @@ -125,9 +125,9 @@ impl FakeBlockSource { } impl BlockSource for FakeBlockSource { - type Block = Block; + type Block = BlockBytes; - async fn next_block(&mut self) -> Result> { + async fn next_block(&mut self) -> Result> { self.blocks .recv() .await @@ -145,9 +145,9 @@ async fn run__get_block_range__returns_expected_blocks() { // given let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); - db.add_block(1.into(), Block::random(&mut rng)); - db.add_block(2.into(), Block::random(&mut rng)); - db.add_block(3.into(), Block::random(&mut rng)); + db.add_block(1.into(), BlockBytes::random(&mut rng)); + db.add_block(2.into(), BlockBytes::random(&mut rng)); + db.add_block(3.into(), BlockBytes::random(&mut rng)); let (source, _block_sender) = FakeBlockSource::new(); @@ -161,7 +161,7 @@ async fn run__get_block_range__returns_expected_blocks() { // then let stream = response.await.unwrap(); - let blocks = stream.collect::>().await; + let blocks = stream.collect::>().await; // TODO: Check values assert_eq!(blocks.len(), 2); @@ -180,7 +180,7 @@ async fn run__new_block_gets_added_to_db() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - let block = Block::random(&mut rng); + let block = BlockBytes::random(&mut rng); let id = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); @@ -202,9 +202,9 @@ async fn run__get_current_height__returns_expected_height() { let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); let expected_height = BlockHeight::from(3u32); - db.add_block(1.into(), Block::random(&mut rng)); - db.add_block(2.into(), Block::random(&mut rng)); - db.add_block(expected_height, Block::random(&mut rng)); + db.add_block(1.into(), BlockBytes::random(&mut rng)); + db.add_block(2.into(), BlockBytes::random(&mut rng)); + db.add_block(expected_height, BlockBytes::random(&mut rng)); let (source, _block_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); @@ -234,7 +234,7 @@ async fn run__new_block_subscription__sends_new_block() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - let expected_block = Block::random(&mut rng); + let expected_block = BlockBytes::random(&mut rng); let expected_height = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); let (query, response) = BlockAggregatorQuery::new_block_subscription(); @@ -263,7 +263,7 @@ async fn run__new_block_subscription__does_not_send_syncing_blocks() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - let block = Block::random(&mut rng); + let block = BlockBytes::random(&mut rng); let height = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); let (query, response) = BlockAggregatorQuery::new_block_subscription(); From ba4d17fe0a4b5e3bceeec94b88860e45832700c8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 7 Nov 2025 08:52:59 -0700 Subject: [PATCH 092/146] Add basic block range test --- .../src/block_range_response.rs | 14 ++-- .../src/blocks/importer_and_db_source.rs | 3 + .../src/db/remote_cache.rs | 64 +++++++++++++++++-- 3 files changed, 72 insertions(+), 9 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 24e78af6ff4..6a80da26ac8 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -8,7 +8,15 @@ pub enum BlockRangeResponse { /// A literal stream of blocks Literal(BoxStream), /// A remote URL where the blocks can be fetched - Remote(String), + Remote(BoxStream), +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RemoteBlockRangeResponse { + pub region: String, + pub bucket: String, + pub key: String, + pub url: String, } #[cfg(test)] @@ -16,9 +24,7 @@ impl std::fmt::Debug for BlockRangeResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { BlockRangeResponse::Literal(_) => f.debug_struct("Literal").finish(), - BlockRangeResponse::Remote(url) => { - f.debug_struct("Remote").field("url", url).finish() - } + BlockRangeResponse::Remote(_url) => f.debug_struct("Remote").finish(), } } } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 892b2b40120..497aab2ec9b 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -40,6 +40,9 @@ pub trait BlockSerializer { fn serialize_block(&self, block: &FuelBlock) -> Result; } +/// A block source that combines an importer and a database sync task. +/// Old blocks will be synced from a target database and new blocks will be received from +/// the importer pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 537ce2484d0..6ac176fa23c 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -40,6 +40,7 @@ impl RemoteCache { aws_region, aws_bucket, client, + head: None, } } } @@ -73,7 +74,21 @@ impl BlockAggregatorDB for RemoteCache { first: BlockHeight, last: BlockHeight, ) -> crate::result::Result { - todo!() + // TODO: Check if it exists + let region = self.aws_region.clone(); + let bucket = self.aws_bucket.clone(); + + let stream = futures::stream::iter((*first..=*last).map(move |height| { + let key = block_height_to_key(&BlockHeight::new(height)); + let url = "todo".to_string(); + crate::block_range_response::RemoteBlockRangeResponse { + region: region.clone(), + bucket: bucket.clone(), + key: key.clone(), + url, + } + })); + Ok(BlockRangeResponse::Remote(Box::pin(stream))) } async fn get_current_height(&self) -> crate::result::Result { @@ -89,9 +104,12 @@ pub fn block_height_to_key(height: &BlockHeight) -> String { #[cfg(test)] mod tests { use super::*; - use crate::blocks::importer_and_db_source::{ - BlockSerializer, - serializer_adapter::SerializerAdapter, + use crate::{ + block_range_response::RemoteBlockRangeResponse, + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, }; use aws_sdk_s3::{ operation::{ @@ -105,6 +123,7 @@ mod tests { mock, mock_client, }; + use futures::StreamExt; fn arb_proto_block() -> ProtoBlock { let block = FuelBlock::default(); @@ -140,6 +159,41 @@ mod tests { #[tokio::test] async fn get_block_range__happy_path() { - todo!() + // given + let client = mock_client!(aws_sdk_s3, []); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let mut adapter = RemoteCache::new( + aws_id.clone(), + aws_secret.clone(), + aws_region.clone(), + aws_bucket.clone(), + client, + ); + let start = BlockHeight::new(999); + let end = BlockHeight::new(1003); + let block = arb_proto_block(); + + // when + let addresses = adapter.get_block_range(start, end).await.unwrap(); + + // then + let actual = match addresses { + BlockRangeResponse::Literal(_) => { + panic!("Expected remote response, got literal"); + } + BlockRangeResponse::Remote(stream) => stream.collect::>().await, + }; + let expected = (999..=1003) + .map(|height| RemoteBlockRangeResponse { + region: aws_region.clone(), + bucket: aws_bucket.clone(), + key: block_height_to_key(&BlockHeight::new(height)), + url: "todo".to_string(), + }) + .collect::>(); + assert_eq!(actual, expected); } } From 563683047c796c4e5d744971448da8a5f171e928 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 7 Nov 2025 11:40:26 -0700 Subject: [PATCH 093/146] persist height information --- .../services/block_aggregator_api/src/api.rs | 5 +- .../src/api/protobuf_adapter.rs | 2 +- .../src/api/protobuf_adapter/tests.rs | 2 +- .../src/block_aggregator.rs | 15 +- .../block_aggregator_api/src/blocks.rs | 12 +- .../services/block_aggregator_api/src/db.rs | 12 +- .../src/db/remote_cache.rs | 28 ++- .../block_aggregator_api/src/db/storage_db.rs | 165 +++++++++++++----- .../src/db/storage_db/table.rs | 19 ++ .../src/db/storage_db/tests.rs | 51 +++--- .../block_aggregator_api/src/tests.rs | 9 +- 11 files changed, 223 insertions(+), 97 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 4beb51c47f3..20d6d58003d 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -25,7 +25,7 @@ pub enum BlockAggregatorQuery { response: tokio::sync::oneshot::Sender, }, GetCurrentHeight { - response: tokio::sync::oneshot::Sender, + response: tokio::sync::oneshot::Sender>, }, // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? NewBlockSubscription { @@ -68,7 +68,8 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn get_current_height() -> (Self, tokio::sync::oneshot::Receiver) { + pub fn get_current_height() + -> (Self, tokio::sync::oneshot::Receiver>) { let (sender, receiver) = tokio::sync::oneshot::channel(); let query = Self::GetCurrentHeight { response: sender }; (query, receiver) diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index c944e199917..74ca9e992b4 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -60,7 +60,7 @@ impl BlockAggregator for Server { let res = receiver.await; match res { Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { - height: *height, + height: *height.unwrap(), })), Err(e) => Err(tonic::Status::internal(format!( "Failed to receive height: {}", diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 7807ac02180..fb9ef78edf5 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -66,7 +66,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { // then // return response through query's channel if let BlockAggregatorQuery::GetCurrentHeight { response } = query { - response.send(BlockHeight::new(42)).unwrap(); + response.send(Some(BlockHeight::new(42))).unwrap(); } else { panic!("expected GetCurrentHeight query"); } diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 4fde80d22b7..128b715fb40 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -85,7 +85,7 @@ where async fn handle_get_current_height_query( &mut self, - response: tokio::sync::oneshot::Sender, + response: tokio::sync::oneshot::Sender>, ) -> TaskNextAction { let res = self.database.get_current_height().await; let height = try_or_stop!(res, |e| { @@ -117,14 +117,14 @@ where let event = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); }); - let (id, block) = match event { - BlockSourceEvent::NewBlock(id, block) => { + match &event { + BlockSourceEvent::NewBlock(height, block) => { self.new_block_subscriptions.retain_mut(|sub| { let send_res = sub.try_send(block.clone()); match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { - tracing::error!("Error sending new block to subscriber due to full channel: {id:?}"); + tracing::error!("Error sending new block to subscriber due to full channel: {height:?}"); true }, Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { @@ -133,11 +133,12 @@ where }, } }); - (id, block) } - BlockSourceEvent::OldBlock(id, block) => (id, block), + BlockSourceEvent::OldBlock(_id, _block) => { + // Do nothing + } }; - let res = self.database.store_block(id, block).await; + let res = self.database.store_block(event).await; try_or_stop!(res, |e| { tracing::error!("Error storing block in database: {e:?}"); }); diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 397b83b7755..55f8ff15216 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -17,12 +17,22 @@ pub trait BlockSource: Send + Sync { fn drain(&mut self) -> impl Future> + Send; } -#[derive(Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum BlockSourceEvent { NewBlock(BlockHeight, B), OldBlock(BlockHeight, B), } +impl BlockSourceEvent { + pub fn into_inner(self) -> (BlockHeight, B) { + match self { + Self::NewBlock(height, block) | Self::OldBlock(height, block) => { + (height, block) + } + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct BlockBytes { bytes: Bytes, diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 06e1eb5630b..2883b3a720a 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -1,4 +1,7 @@ -use crate::result::Result; +use crate::{ + blocks::BlockSourceEvent, + result::Result, +}; use fuel_core_types::fuel_types::BlockHeight; pub mod remote_cache; @@ -13,8 +16,7 @@ pub trait BlockAggregatorDB: Send + Sync { /// Stores a block with the given ID fn store_block( &mut self, - height: BlockHeight, - block: Self::Block, + block: BlockSourceEvent, ) -> impl Future> + Send; /// Retrieves a range of blocks from the database @@ -27,5 +29,7 @@ pub trait BlockAggregatorDB: Send + Sync { /// Retrieves the current height of the aggregated blocks If there is a break in the blocks, /// i.e. the blocks are being aggregated out of order, return the height of the last /// contiguous block - fn get_current_height(&self) -> impl Future> + Send; + fn get_current_height( + &self, + ) -> impl Future>> + Send; } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 6ac176fa23c..8811e9d246b 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -1,8 +1,14 @@ use crate::{ block_range_response::BlockRangeResponse, - blocks::BlockBytes, + blocks::{ + BlockBytes, + BlockSourceEvent, + }, db::BlockAggregatorDB, - protobuf_types::Block as ProtoBlock, + protobuf_types::{ + Block as ProtoBlock, + Block, + }, result::Error, }; use aws_sdk_s3::{ @@ -51,9 +57,18 @@ impl BlockAggregatorDB for RemoteCache { async fn store_block( &mut self, - height: BlockHeight, - block: ProtoBlock, + block: BlockSourceEvent, ) -> crate::result::Result<()> { + let (height, block) = match block { + BlockSourceEvent::NewBlock(height, block) => { + // Do nothing extra + (height, block) + } + BlockSourceEvent::OldBlock(height, block) => { + // TODO: record latest block + (height, block) + } + }; let key = block_height_to_key(&height); let mut buf = Vec::new(); block.encode(&mut buf).map_err(Error::db_error)?; @@ -91,7 +106,7 @@ impl BlockAggregatorDB for RemoteCache { Ok(BlockRangeResponse::Remote(Box::pin(stream))) } - async fn get_current_height(&self) -> crate::result::Result { + async fn get_current_height(&self) -> crate::result::Result> { todo!() } } @@ -149,9 +164,10 @@ mod tests { RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client); let block_height = BlockHeight::new(123); let block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(block_height, block); // when - let res = adapter.store_block(block_height, block).await; + let res = adapter.store_block(block).await; // then assert!(res.is_ok()); diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 7aeac0a91d1..9871929e67e 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,10 +1,17 @@ use crate::{ block_range_response::BlockRangeResponse, + blocks::BlockSourceEvent, db::{ BlockAggregatorDB, - storage_db::table::Column, + storage_db::table::{ + Column, + LatestBlock, + }, + }, + protobuf_types::{ + Block as ProtoBlock, + Block, }, - protobuf_types::Block as ProtoBlock, result::{ Error, Result, @@ -29,6 +36,7 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ + borrow::Cow, cmp::Ordering, collections::BTreeSet, pin::Pin, @@ -44,55 +52,89 @@ pub mod table; mod tests; pub struct StorageDB { - highest_contiguous_block: BlockHeight, - orphaned_heights: BTreeSet, + highest_new_height: Option, + orphaned_new_height: Option, storage: S, } impl StorageDB { pub fn new(storage: S) -> Self { - let height = BlockHeight::new(0); - Self::new_with_height(storage, height) - } - - pub fn new_with_height(storage: S, highest_contiguous_block: BlockHeight) -> Self { - let orphaned_heights = BTreeSet::new(); Self { - highest_contiguous_block, - orphaned_heights, + highest_new_height: None, + orphaned_new_height: None, storage, } } - fn update_highest_contiguous_block(&mut self, height: BlockHeight) { - let next_height = self.next_height(); - match height.cmp(&next_height) { - Ordering::Equal => { - self.highest_contiguous_block = height; - while let Some(next_height) = self.orphaned_heights.first() { - if next_height == &self.next_height() { - self.highest_contiguous_block = *next_height; - let _ = self.orphaned_heights.pop_first(); - } else { - break; - } - } - } - Ordering::Greater => { - self.orphaned_heights.insert(height); - } - Ordering::Less => { - tracing::warn!( - "Received block at height {:?}, but the syncing is already at height {:?}. Ignoring block.", - height, - self.highest_contiguous_block - ); - } - } - } - fn next_height(&self) -> BlockHeight { - let last_height = *self.highest_contiguous_block; - BlockHeight::new(last_height.saturating_add(1)) + // fn update_highest_contiguous_block(&mut self, height: BlockHeight) { + // let next_height = self.next_height(); + // match height.cmp(&next_height) { + // Ordering::Equal => { + // self.highest_contiguous_block = height; + // while let Some(next_height) = self.orphaned_heights.first() { + // if next_height == &self.next_height() { + // self.highest_contiguous_block = *next_height; + // let _ = self.orphaned_heights.pop_first(); + // } else { + // break; + // } + // } + // } + // Ordering::Greater => { + // self.orphaned_heights.insert(height); + // } + // Ordering::Less => { + // tracing::warn!( + // "Received block at height {:?}, but the syncing is already at height {:?}. Ignoring block.", + // height, + // self.highest_contiguous_block + // ); + // } + // } + // } + // fn next_height(&self) -> BlockHeight { + // let last_height = *self.get_current_height(); + // BlockHeight::new(last_height.saturating_add(1)) + // } + + // fn update_latest_block(&mut self, block_event: &BlockSourceEvent) { + // match block_event { + // BlockSourceEvent::NewBlock(height, _) => { + // self.highest_height = Some(*height); + // if height == self.next_height() { + // self.orphaned_height = None; + // // TODO + // } else if self.orphaned_height.is_none() { + // self.orphaned_height = Some(*height); + // } + // } + // BlockSourceEvent::OldBlock(height, _) => {} + // } + // } +} + +impl StorageDB +where + S: Modifiable + std::fmt::Debug, + S: KeyValueInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, +{ + fn next_height(&self) -> Result> { + let storage = self + .storage + .latest_view() + .map_err(|e| Error::DB(anyhow!(e))) + .unwrap(); + let binding = storage.read_transaction(); + let latest_height = binding + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + let next_height = latest_height.and_then(|h| h.succ()); + Ok(next_height) } } @@ -100,7 +142,10 @@ impl BlockAggregatorDB for StorageDB where S: Modifiable + std::fmt::Debug, S: KeyValueInspect, + S: StorageInspect, for<'b> StorageTransaction<&'b mut S>: StorageMutate, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, S: AtomicView, T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, StorageTransaction: StorageInspect, @@ -110,14 +155,36 @@ where async fn store_block( &mut self, - height: BlockHeight, - block: ProtoBlock, + block_event: BlockSourceEvent, ) -> Result<()> { - self.update_highest_contiguous_block(height); + let (height, block) = block_event.clone().into_inner(); + let next_height = self.next_height()?; let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() .insert(&height, &block) .map_err(|e| Error::DB(anyhow!(e)))?; + + match block_event { + BlockSourceEvent::NewBlock(new_height, _) => { + tracing::debug!("New block: {:?}", new_height); + self.highest_new_height = Some(new_height); + if self.orphaned_new_height.is_none() { + self.orphaned_new_height = Some(new_height); + } + } + BlockSourceEvent::OldBlock(height, _) => { + tracing::debug!("Old block: {:?}", height); + let latest_height = if height.succ() == self.orphaned_new_height { + self.orphaned_new_height = None; + self.highest_new_height.clone().unwrap_or(height) + } else { + height + }; + tx.storage_as_mut::() + .insert(&(), &latest_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + } + } tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; Ok(()) } @@ -135,8 +202,14 @@ where Ok(BlockRangeResponse::Literal(Box::pin(stream))) } - async fn get_current_height(&self) -> Result { - Ok(self.highest_contiguous_block) + async fn get_current_height(&self) -> Result> { + let height = self + .storage + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + + Ok(height.map(|b| b.into_owned())) } } diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index f161b245240..2868812ced2 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -28,6 +28,7 @@ use std::borrow::Cow; pub enum Column { Metadata = 0, Blocks = 1, + LatestBlock = 2, } impl Column { @@ -67,7 +68,25 @@ impl TableWithBlueprint for Blocks { } } +pub struct LatestBlock; + +impl Mappable for LatestBlock { + type Key = Self::OwnedKey; + type OwnedKey = (); + type Value = Self::OwnedValue; + type OwnedValue = BlockHeight; +} + +impl TableWithBlueprint for LatestBlock { + type Blueprint = Plain>; + type Column = Column; + fn column() -> Self::Column { + Column::LatestBlock + } +} + use fuel_core_storage::codec::{ + postcard::Postcard, primitive::Primitive, raw::Raw, }; diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 593839e406a..6a9ca51f262 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -38,9 +38,10 @@ async fn store_block__adds_to_storage() { let mut adapter = StorageDB::new(db); let height = BlockHeight::from(1u32); let expected = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, expected.clone()); // when - adapter.store_block(height, expected.clone()).await.unwrap(); + adapter.store_block(block).await.unwrap(); // then let actual = adapter @@ -96,34 +97,41 @@ async fn get_block__can_get_expected_range() { async fn store_block__updates_the_highest_continuous_block_if_contiguous() { // given let db = database(); - let mut adapter = StorageDB::new_with_height(db, BlockHeight::from(0u32)); + let mut adapter = StorageDB::new(db); let height = BlockHeight::from(1u32); let expected = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, expected.clone()); // when - adapter.store_block(height, expected.clone()).await.unwrap(); + adapter.store_block(block).await.unwrap(); // then let expected = height; - let actual = adapter.get_current_height().await.unwrap(); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } #[tokio::test] async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { // given - let db = database(); - let starting_height = BlockHeight::from(0u32); - let mut adapter = StorageDB::new_with_height(db, starting_height); - let height = BlockHeight::from(2u32); - let expected = proto_block_with_height(height); + let mut db = database(); + let mut tx = db.write_transaction(); + let starting_height = BlockHeight::from(1u32); + tx.storage_as_mut::() + .insert(&(), &starting_height) + .unwrap(); + tx.commit().unwrap(); + let mut adapter = StorageDB::new(db); + let height = BlockHeight::from(3u32); + let proto = proto_block_with_height(height); + let block = BlockSourceEvent::NewBlock(height, proto.clone()); // when - adapter.store_block(height, expected.clone()).await.unwrap(); + adapter.store_block(block).await.unwrap(); // then let expected = starting_height; - let actual = adapter.get_current_height().await.unwrap(); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } @@ -132,29 +140,22 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // given let db = database(); let starting_height = BlockHeight::from(0u32); - let mut adapter = StorageDB::new_with_height(db, starting_height); + let mut adapter = StorageDB::new(db); - let mut orphaned_height = None; for height in 2..=10u32 { let height = BlockHeight::from(height); - orphaned_height = Some(height); let block = proto_block_with_height(height); - adapter.store_block(height, block).await.unwrap(); + let block = BlockSourceEvent::NewBlock(height, block.clone()); + adapter.store_block(block).await.unwrap(); } - let expected = starting_height; - let actual = adapter.get_current_height().await.unwrap(); - assert_eq!(expected, actual); - // when let height = BlockHeight::from(1u32); let some_block = proto_block_with_height(height); - adapter - .store_block(height, some_block.clone()) - .await - .unwrap(); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); // then - let expected = orphaned_height.unwrap(); - let actual = adapter.get_current_height().await.unwrap(); + let expected = BlockHeight::from(10u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index a763f75cef5..81feb61d064 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -79,7 +79,8 @@ impl BlockAggregatorDB for FakeDB { type Block = BlockBytes; type BlockRangeResponse = BlockRangeResponse; - async fn store_block(&mut self, id: BlockHeight, block: BlockBytes) -> Result<()> { + async fn store_block(&mut self, block: BlockSourceEvent) -> Result<()> { + let (id, block) = block.into_inner(); self.map.lock().unwrap().insert(id, block); Ok(()) } @@ -105,9 +106,9 @@ impl BlockAggregatorDB for FakeDB { Ok(Box::pin(futures::stream::iter(blocks))) } - async fn get_current_height(&self) -> Result { + async fn get_current_height(&self) -> Result> { let map = self.map.lock().unwrap(); - let max_height = map.keys().max().cloned().unwrap_or(BlockHeight::from(0u32)); + let max_height = map.keys().max().cloned(); Ok(max_height) } } @@ -218,7 +219,7 @@ async fn run__get_current_height__returns_expected_height() { // then tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let height = response.await.unwrap(); + let height = response.await.unwrap().unwrap(); assert_eq!(expected_height, height); // cleanup From b6ee334de0da2b4a3f417bf92f7c43bddb1248ce Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 7 Nov 2025 11:52:47 -0700 Subject: [PATCH 094/146] Start adding persitance to remote storage --- .../database_description/block_aggregator.rs | 2 +- .../services/block_aggregator_api/src/db.rs | 1 + .../src/db/remote_cache.rs | 116 ++------------- .../src/db/remote_cache/tests.rs | 137 ++++++++++++++++++ .../block_aggregator_api/src/db/storage_db.rs | 51 +------ .../src/db/storage_db/tests.rs | 3 +- .../src/db/{storage_db => }/table.rs | 0 7 files changed, 153 insertions(+), 157 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/db/remote_cache/tests.rs rename crates/services/block_aggregator_api/src/db/{storage_db => }/table.rs (100%) diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs index 2d55678552f..9aff58c3cd4 100644 --- a/crates/fuel-core/src/database/database_description/block_aggregator.rs +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -1,5 +1,5 @@ use crate::database::database_description::DatabaseDescription; -use fuel_core_block_aggregator_api::db::storage_db::table::Column; +use fuel_core_block_aggregator_api::db::table::Column; use fuel_core_types::fuel_types::BlockHeight; #[derive(Clone, Copy, Debug)] diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 2883b3a720a..6629726eb7a 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -6,6 +6,7 @@ use fuel_core_types::fuel_types::BlockHeight; pub mod remote_cache; pub mod storage_db; +pub mod table; /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 8811e9d246b..c22c68155a1 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -22,36 +22,41 @@ use fuel_core_types::{ }; use prost::Message; +#[allow(non_snake_case)] +#[cfg(test)] +mod tests; + #[allow(unused)] -pub struct RemoteCache { +pub struct RemoteCache { aws_id: String, aws_secret: String, aws_region: String, aws_bucket: String, client: Client, - head: Option, + local_persisted: S, } -impl RemoteCache { +impl RemoteCache { pub fn new( aws_id: String, aws_secret: String, aws_region: String, aws_bucket: String, client: Client, - ) -> RemoteCache { + local_persisted: S, + ) -> RemoteCache { RemoteCache { aws_id, aws_secret, aws_region, aws_bucket, client, - head: None, + local_persisted, } } } -impl BlockAggregatorDB for RemoteCache { +impl BlockAggregatorDB for RemoteCache { type Block = ProtoBlock; type BlockRangeResponse = BlockRangeResponse; @@ -114,102 +119,3 @@ impl BlockAggregatorDB for RemoteCache { pub fn block_height_to_key(height: &BlockHeight) -> String { format!("{:08x}", height) } - -#[allow(non_snake_case)] -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - block_range_response::RemoteBlockRangeResponse, - blocks::importer_and_db_source::{ - BlockSerializer, - serializer_adapter::SerializerAdapter, - }, - }; - use aws_sdk_s3::{ - operation::{ - get_object::GetObjectOutput, - put_object::PutObjectOutput, - }, - primitives::ByteStream, - }; - use aws_smithy_mocks::{ - RuleMode, - mock, - mock_client, - }; - use futures::StreamExt; - - fn arb_proto_block() -> ProtoBlock { - let block = FuelBlock::default(); - let mut serializer = SerializerAdapter; - let proto_block = serializer.serialize_block(&block).unwrap(); - proto_block - } - - #[tokio::test] - async fn store_block__happy_path() { - let put_happy_rule = mock!(Client::put_object) - .match_requests(|req| req.bucket() == Some("test-bucket")) - .sequence() - .output(|| PutObjectOutput::builder().build()) - .build(); - // given - let client = mock_client!(aws_sdk_s3, [&put_happy_rule]); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); - let aws_bucket = "test-bucket".to_string(); - let mut adapter = - RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client); - let block_height = BlockHeight::new(123); - let block = arb_proto_block(); - let block = BlockSourceEvent::OldBlock(block_height, block); - - // when - let res = adapter.store_block(block).await; - - // then - assert!(res.is_ok()); - } - - #[tokio::test] - async fn get_block_range__happy_path() { - // given - let client = mock_client!(aws_sdk_s3, []); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); - let aws_bucket = "test-bucket".to_string(); - let mut adapter = RemoteCache::new( - aws_id.clone(), - aws_secret.clone(), - aws_region.clone(), - aws_bucket.clone(), - client, - ); - let start = BlockHeight::new(999); - let end = BlockHeight::new(1003); - let block = arb_proto_block(); - - // when - let addresses = adapter.get_block_range(start, end).await.unwrap(); - - // then - let actual = match addresses { - BlockRangeResponse::Literal(_) => { - panic!("Expected remote response, got literal"); - } - BlockRangeResponse::Remote(stream) => stream.collect::>().await, - }; - let expected = (999..=1003) - .map(|height| RemoteBlockRangeResponse { - region: aws_region.clone(), - bucket: aws_bucket.clone(), - key: block_height_to_key(&BlockHeight::new(height)), - url: "todo".to_string(), - }) - .collect::>(); - assert_eq!(actual, expected); - } -} diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs new file mode 100644 index 00000000000..f1653f3a16f --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -0,0 +1,137 @@ +use super::*; +use crate::{ + block_range_response::RemoteBlockRangeResponse, + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, + db::table::Column, +}; +use aws_sdk_s3::{ + operation::{ + get_object::GetObjectOutput, + put_object::PutObjectOutput, + }, + primitives::ByteStream, +}; +use aws_smithy_mocks::{ + RuleMode, + mock, + mock_client, +}; +use fuel_core_storage::{ + structured_storage::test::InMemoryStorage, + transactional::{ + IntoTransaction, + StorageTransaction, + }, +}; +use futures::StreamExt; + +fn database() -> StorageTransaction> { + InMemoryStorage::default().into_transaction() +} + +fn arb_proto_block() -> ProtoBlock { + let block = FuelBlock::default(); + let mut serializer = SerializerAdapter; + let proto_block = serializer.serialize_block(&block).unwrap(); + proto_block +} + +#[tokio::test] +async fn store_block__happy_path() { + let put_happy_rule = mock!(Client::put_object) + .match_requests(|req| req.bucket() == Some("test-bucket")) + .sequence() + .output(|| PutObjectOutput::builder().build()) + .build(); + // given + let client = mock_client!(aws_sdk_s3, [&put_happy_rule]); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let storage = database(); + let mut adapter = + RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, storage); + let block_height = BlockHeight::new(123); + let block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(block_height, block); + + // when + let res = adapter.store_block(block).await; + + // then + assert!(res.is_ok()); +} + +#[tokio::test] +async fn get_block_range__happy_path() { + // given + let client = mock_client!(aws_sdk_s3, []); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let storage = database(); + let mut adapter = RemoteCache::new( + aws_id.clone(), + aws_secret.clone(), + aws_region.clone(), + aws_bucket.clone(), + client, + storage, + ); + let start = BlockHeight::new(999); + let end = BlockHeight::new(1003); + let block = arb_proto_block(); + + // when + let addresses = adapter.get_block_range(start, end).await.unwrap(); + + // then + let actual = match addresses { + BlockRangeResponse::Literal(_) => { + panic!("Expected remote response, got literal"); + } + BlockRangeResponse::Remote(stream) => stream.collect::>().await, + }; + let expected = (999..=1003) + .map(|height| RemoteBlockRangeResponse { + region: aws_region.clone(), + bucket: aws_bucket.clone(), + key: block_height_to_key(&BlockHeight::new(height)), + url: "todo".to_string(), + }) + .collect::>(); + assert_eq!(actual, expected); +} + +#[tokio::test] +async fn get_current_height__returns_highest_continuos_block() { + let put_happy_rule = mock!(Client::put_object) + .match_requests(|req| req.bucket() == Some("test-bucket")) + .sequence() + .output(|| PutObjectOutput::builder().build()) + .build(); + // given + let client = mock_client!(aws_sdk_s3, [&put_happy_rule]); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let storage = database(); + let mut adapter = + RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, storage); + let expected = BlockHeight::new(123); + let block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(expected, block); + adapter.store_block(block).await.unwrap(); + + // when + let actual = adapter.get_current_height().await.unwrap().unwrap(); + + // then + assert_eq!(expected, actual); +} diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 9871929e67e..e227eef2099 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -3,7 +3,8 @@ use crate::{ blocks::BlockSourceEvent, db::{ BlockAggregatorDB, - storage_db::table::{ + table::{ + Blocks, Column, LatestBlock, }, @@ -45,9 +46,7 @@ use std::{ Poll, }, }; -use table::Blocks; -pub mod table; #[cfg(test)] mod tests; @@ -65,52 +64,6 @@ impl StorageDB { storage, } } - - // fn update_highest_contiguous_block(&mut self, height: BlockHeight) { - // let next_height = self.next_height(); - // match height.cmp(&next_height) { - // Ordering::Equal => { - // self.highest_contiguous_block = height; - // while let Some(next_height) = self.orphaned_heights.first() { - // if next_height == &self.next_height() { - // self.highest_contiguous_block = *next_height; - // let _ = self.orphaned_heights.pop_first(); - // } else { - // break; - // } - // } - // } - // Ordering::Greater => { - // self.orphaned_heights.insert(height); - // } - // Ordering::Less => { - // tracing::warn!( - // "Received block at height {:?}, but the syncing is already at height {:?}. Ignoring block.", - // height, - // self.highest_contiguous_block - // ); - // } - // } - // } - // fn next_height(&self) -> BlockHeight { - // let last_height = *self.get_current_height(); - // BlockHeight::new(last_height.saturating_add(1)) - // } - - // fn update_latest_block(&mut self, block_event: &BlockSourceEvent) { - // match block_event { - // BlockSourceEvent::NewBlock(height, _) => { - // self.highest_height = Some(*height); - // if height == self.next_height() { - // self.orphaned_height = None; - // // TODO - // } else if self.orphaned_height.is_none() { - // self.orphaned_height = Some(*height); - // } - // } - // BlockSourceEvent::OldBlock(height, _) => {} - // } - // } } impl StorageDB diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 6a9ca51f262..04ecf143a24 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -6,7 +6,7 @@ use crate::{ BlockSerializer, serializer_adapter::SerializerAdapter, }, - db::storage_db::table::Column, + db::table::Column, }; use fuel_core_storage::{ StorageAsRef, @@ -139,7 +139,6 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // given let db = database(); - let starting_height = BlockHeight::from(0u32); let mut adapter = StorageDB::new(db); for height in 2..=10u32 { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/table.rs similarity index 100% rename from crates/services/block_aggregator_api/src/db/storage_db/table.rs rename to crates/services/block_aggregator_api/src/db/table.rs From 4b0ab289dedd8fe86a8376b20b43d129afcf5d87 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 7 Nov 2025 15:43:35 -0700 Subject: [PATCH 095/146] Add persisted storage for remote consistency --- .../src/db/remote_cache.rs | 86 ++++++++++++++----- .../src/db/remote_cache/tests.rs | 14 +-- .../block_aggregator_api/src/db/storage_db.rs | 34 +------- .../block_aggregator_api/src/db/table.rs | 1 - 4 files changed, 69 insertions(+), 66 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index c22c68155a1..88f94221cf4 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -1,21 +1,31 @@ use crate::{ block_range_response::BlockRangeResponse, - blocks::{ - BlockBytes, - BlockSourceEvent, - }, - db::BlockAggregatorDB, - protobuf_types::{ - Block as ProtoBlock, - Block, + blocks::BlockSourceEvent, + db::{ + BlockAggregatorDB, + table::LatestBlock, }, + protobuf_types::Block as ProtoBlock, result::Error, }; +use anyhow::anyhow; use aws_sdk_s3::{ self, Client, primitives::ByteStream, }; +use fuel_core_storage::{ + Error as StorageError, + StorageAsMut, + StorageAsRef, + StorageInspect, + StorageMutate, + transactional::{ + Modifiable, + StorageTransaction, + WriteTransaction, + }, +}; use fuel_core_types::{ blockchain::block::Block as FuelBlock, fuel_types::BlockHeight, @@ -28,12 +38,17 @@ mod tests; #[allow(unused)] pub struct RemoteCache { + // aws configuration aws_id: String, aws_secret: String, aws_region: String, aws_bucket: String, client: Client, + + // track consistency between runs local_persisted: S, + highest_new_height: Option, + orphaned_new_height: Option, } impl RemoteCache { @@ -52,28 +67,28 @@ impl RemoteCache { aws_bucket, client, local_persisted, + highest_new_height: None, + orphaned_new_height: None, } } } -impl BlockAggregatorDB for RemoteCache { +impl BlockAggregatorDB for RemoteCache +where + S: Send + Sync, + S: Modifiable, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, +{ type Block = ProtoBlock; type BlockRangeResponse = BlockRangeResponse; async fn store_block( &mut self, - block: BlockSourceEvent, + block_event: BlockSourceEvent, ) -> crate::result::Result<()> { - let (height, block) = match block { - BlockSourceEvent::NewBlock(height, block) => { - // Do nothing extra - (height, block) - } - BlockSourceEvent::OldBlock(height, block) => { - // TODO: record latest block - (height, block) - } - }; + let (height, block) = block_event.clone().into_inner(); let key = block_height_to_key(&height); let mut buf = Vec::new(); block.encode(&mut buf).map_err(Error::db_error)?; @@ -86,6 +101,29 @@ impl BlockAggregatorDB for RemoteCache { .body(body) .content_type("application/octet-stream"); let _ = req.send().await.map_err(Error::db_error)?; + match block_event { + BlockSourceEvent::NewBlock(new_height, _) => { + tracing::debug!("New block: {:?}", new_height); + self.highest_new_height = Some(new_height); + if self.orphaned_new_height.is_none() { + self.orphaned_new_height = Some(new_height); + } + } + BlockSourceEvent::OldBlock(height, _) => { + tracing::debug!("Old block: {:?}", height); + let mut tx = self.local_persisted.write_transaction(); + let latest_height = if height.succ() == self.orphaned_new_height { + self.orphaned_new_height = None; + self.highest_new_height.clone().unwrap_or(height) + } else { + height + }; + tx.storage_as_mut::() + .insert(&(), &latest_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } + } Ok(()) } @@ -112,7 +150,13 @@ impl BlockAggregatorDB for RemoteCache { } async fn get_current_height(&self) -> crate::result::Result> { - todo!() + let height = self + .local_persisted + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + + Ok(height.map(|b| b.into_owned())) } } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index f1653f3a16f..bf21443c9fe 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -7,15 +7,8 @@ use crate::{ }, db::table::Column, }; -use aws_sdk_s3::{ - operation::{ - get_object::GetObjectOutput, - put_object::PutObjectOutput, - }, - primitives::ByteStream, -}; +use aws_sdk_s3::operation::put_object::PutObjectOutput; use aws_smithy_mocks::{ - RuleMode, mock, mock_client, }; @@ -34,7 +27,7 @@ fn database() -> StorageTransaction> { fn arb_proto_block() -> ProtoBlock { let block = FuelBlock::default(); - let mut serializer = SerializerAdapter; + let serializer = SerializerAdapter; let proto_block = serializer.serialize_block(&block).unwrap(); proto_block } @@ -75,7 +68,7 @@ async fn get_block_range__happy_path() { let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); let storage = database(); - let mut adapter = RemoteCache::new( + let adapter = RemoteCache::new( aws_id.clone(), aws_secret.clone(), aws_region.clone(), @@ -85,7 +78,6 @@ async fn get_block_range__happy_path() { ); let start = BlockHeight::new(999); let end = BlockHeight::new(1003); - let block = arb_proto_block(); // when let addresses = adapter.get_block_range(start, end).await.unwrap(); diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index e227eef2099..fdc31ef3faf 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -9,10 +9,7 @@ use crate::{ LatestBlock, }, }, - protobuf_types::{ - Block as ProtoBlock, - Block, - }, + protobuf_types::Block as ProtoBlock, result::{ Error, Result, @@ -37,9 +34,6 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ - borrow::Cow, - cmp::Ordering, - collections::BTreeSet, pin::Pin, task::{ Context, @@ -66,31 +60,6 @@ impl StorageDB { } } -impl StorageDB -where - S: Modifiable + std::fmt::Debug, - S: KeyValueInspect, - for<'b> StorageTransaction<&'b mut S>: - StorageMutate, - S: AtomicView, - T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, -{ - fn next_height(&self) -> Result> { - let storage = self - .storage - .latest_view() - .map_err(|e| Error::DB(anyhow!(e))) - .unwrap(); - let binding = storage.read_transaction(); - let latest_height = binding - .storage_as_ref::() - .get(&()) - .map_err(|e| Error::DB(anyhow!(e)))?; - let next_height = latest_height.and_then(|h| h.succ()); - Ok(next_height) - } -} - impl BlockAggregatorDB for StorageDB where S: Modifiable + std::fmt::Debug, @@ -111,7 +80,6 @@ where block_event: BlockSourceEvent, ) -> Result<()> { let (height, block) = block_event.clone().into_inner(); - let next_height = self.next_height()?; let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() .insert(&height, &block) diff --git a/crates/services/block_aggregator_api/src/db/table.rs b/crates/services/block_aggregator_api/src/db/table.rs index 2868812ced2..215c5cecd1f 100644 --- a/crates/services/block_aggregator_api/src/db/table.rs +++ b/crates/services/block_aggregator_api/src/db/table.rs @@ -88,7 +88,6 @@ impl TableWithBlueprint for LatestBlock { use fuel_core_storage::codec::{ postcard::Postcard, primitive::Primitive, - raw::Raw, }; use prost::Message; From 2eff09b497c21497a31b6a9026eb7b6dcb3f84fa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 7 Nov 2025 16:04:31 -0700 Subject: [PATCH 096/146] Add more coverage --- .../serializer_adapter.rs | 1 - .../src/db/remote_cache.rs | 5 +- .../src/db/remote_cache/tests.rs | 83 ++++++++++++++++--- 3 files changed, 72 insertions(+), 17 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index fa7e7db2d8f..b497145f2da 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -125,7 +125,6 @@ impl BlockSerializer for SerializerAdapter { type Block = ProtoBlock; fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { - // TODO: Should this be owned to begin with? let (header, txs) = block.clone().into_inner(); let proto_header = proto_header_from_header(header); match &block { diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 88f94221cf4..b91792d8a27 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -26,10 +26,7 @@ use fuel_core_storage::{ WriteTransaction, }, }; -use fuel_core_types::{ - blockchain::block::Block as FuelBlock, - fuel_types::BlockHeight, -}; +use fuel_core_types::fuel_types::BlockHeight; use prost::Message; #[allow(non_snake_case)] diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index bf21443c9fe..a825340a605 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -9,6 +9,7 @@ use crate::{ }; use aws_sdk_s3::operation::put_object::PutObjectOutput; use aws_smithy_mocks::{ + Rule, mock, mock_client, }; @@ -20,6 +21,7 @@ use fuel_core_storage::{ }, }; use futures::StreamExt; +use std::iter; fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() @@ -31,16 +33,18 @@ fn arb_proto_block() -> ProtoBlock { let proto_block = serializer.serialize_block(&block).unwrap(); proto_block } - -#[tokio::test] -async fn store_block__happy_path() { - let put_happy_rule = mock!(Client::put_object) +fn put_happy_rule() -> Rule { + mock!(Client::put_object) .match_requests(|req| req.bucket() == Some("test-bucket")) .sequence() .output(|| PutObjectOutput::builder().build()) - .build(); + .build() +} + +#[tokio::test] +async fn store_block__happy_path() { // given - let client = mock_client!(aws_sdk_s3, [&put_happy_rule]); + let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); let aws_id = "test-id".to_string(); let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); @@ -102,13 +106,8 @@ async fn get_block_range__happy_path() { #[tokio::test] async fn get_current_height__returns_highest_continuos_block() { - let put_happy_rule = mock!(Client::put_object) - .match_requests(|req| req.bucket() == Some("test-bucket")) - .sequence() - .output(|| PutObjectOutput::builder().build()) - .build(); // given - let client = mock_client!(aws_sdk_s3, [&put_happy_rule]); + let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); let aws_id = "test-id".to_string(); let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); @@ -127,3 +126,63 @@ async fn get_current_height__returns_highest_continuos_block() { // then assert_eq!(expected, actual); } + +#[tokio::test] +async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { + // given + let mut storage = database(); + let mut tx = storage.write_transaction(); + let starting_height = BlockHeight::from(1u32); + tx.storage_as_mut::() + .insert(&(), &starting_height) + .unwrap(); + tx.commit().unwrap(); + let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let mut adapter = + RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, storage); + let expected = BlockHeight::new(3); + let block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(expected, block); + adapter.store_block(block).await.unwrap(); + + // when + let expected = starting_height; + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); +} + +#[tokio::test] +async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { + let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); + let client = mock_client!(aws_sdk_s3, rules.iter()); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + + // given + let db = database(); + let mut adapter = + RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, db); + + for height in 2..=10u32 { + let height = BlockHeight::from(height); + let block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(height, block.clone()); + adapter.store_block(block).await.unwrap(); + } + // when + let height = BlockHeight::from(1u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(10u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); +} From 11f075488ac992628ab9e02205d8a9ff94f47197 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 7 Nov 2025 16:14:57 -0700 Subject: [PATCH 097/146] Fix url construction --- .../src/db/remote_cache.rs | 10 ++++- .../src/db/remote_cache/tests.rs | 40 +++++++++++++------ 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index b91792d8a27..72d03cb0fbf 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -40,6 +40,7 @@ pub struct RemoteCache { aws_secret: String, aws_region: String, aws_bucket: String, + url_base: String, client: Client, // track consistency between runs @@ -54,6 +55,7 @@ impl RemoteCache { aws_secret: String, aws_region: String, aws_bucket: String, + url_base: String, client: Client, local_persisted: S, ) -> RemoteCache { @@ -62,12 +64,17 @@ impl RemoteCache { aws_secret, aws_region, aws_bucket, + url_base, client, local_persisted, highest_new_height: None, orphaned_new_height: None, } } + + fn url_for_block(base: &str, key: &str) -> String { + format!("{}/blocks/{}", base, key,) + } } impl BlockAggregatorDB for RemoteCache @@ -132,10 +139,11 @@ where // TODO: Check if it exists let region = self.aws_region.clone(); let bucket = self.aws_bucket.clone(); + let base = self.url_base.clone(); let stream = futures::stream::iter((*first..=*last).map(move |height| { let key = block_height_to_key(&BlockHeight::new(height)); - let url = "todo".to_string(); + let url = Self::url_for_block(&base, &key); crate::block_range_response::RemoteBlockRangeResponse { region: region.clone(), bucket: bucket.clone(), diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index a825340a605..855ed9f4ff0 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -20,6 +20,7 @@ use fuel_core_storage::{ StorageTransaction, }, }; +use fuel_core_types::blockchain::block::Block as FuelBlock; use futures::StreamExt; use std::iter; @@ -49,9 +50,11 @@ async fn store_block__happy_path() { let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); + let base = "http://good.com".to_string(); let storage = database(); - let mut adapter = - RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, storage); + let mut adapter = RemoteCache::new( + aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, + ); let block_height = BlockHeight::new(123); let block = arb_proto_block(); let block = BlockSourceEvent::OldBlock(block_height, block); @@ -71,12 +74,14 @@ async fn get_block_range__happy_path() { let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); + let base = "http://good.com".to_string(); let storage = database(); let adapter = RemoteCache::new( aws_id.clone(), aws_secret.clone(), aws_region.clone(), aws_bucket.clone(), + base.clone(), client, storage, ); @@ -94,27 +99,33 @@ async fn get_block_range__happy_path() { BlockRangeResponse::Remote(stream) => stream.collect::>().await, }; let expected = (999..=1003) - .map(|height| RemoteBlockRangeResponse { - region: aws_region.clone(), - bucket: aws_bucket.clone(), - key: block_height_to_key(&BlockHeight::new(height)), - url: "todo".to_string(), + .map(|height| { + let key = block_height_to_key(&BlockHeight::new(height)); + let url = RemoteCache::<()>::url_for_block(&base, &key); + RemoteBlockRangeResponse { + region: aws_region.clone(), + bucket: aws_bucket.clone(), + key, + url, + } }) .collect::>(); assert_eq!(actual, expected); } #[tokio::test] -async fn get_current_height__returns_highest_continuos_block() { +async fn get_current_height__returns_highest_continuous_block() { // given let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); let aws_id = "test-id".to_string(); let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); + let base = "http://good.com".to_string(); let storage = database(); - let mut adapter = - RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, storage); + let mut adapter = RemoteCache::new( + aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, + ); let expected = BlockHeight::new(123); let block = arb_proto_block(); let block = BlockSourceEvent::OldBlock(expected, block); @@ -142,8 +153,10 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let mut adapter = - RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, storage); + let base = "http://good.com".to_string(); + let mut adapter = RemoteCache::new( + aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, + ); let expected = BlockHeight::new(3); let block = arb_proto_block(); let block = BlockSourceEvent::NewBlock(expected, block); @@ -163,11 +176,12 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let aws_secret = "test-secret".to_string(); let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); + let base = "http://good.com".to_string(); // given let db = database(); let mut adapter = - RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, client, db); + RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, base, client, db); for height in 2..=10u32 { let height = BlockHeight::from(height); From 2c0a2c29ef925f0d3ec16dac0177d5d0994f3bff Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 10 Nov 2025 07:29:32 -0700 Subject: [PATCH 098/146] Update changelog, lint toml --- .changes/added/3106.md | 1 + crates/services/block_aggregator_api/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 .changes/added/3106.md diff --git a/.changes/added/3106.md b/.changes/added/3106.md new file mode 100644 index 00000000000..e9d038e7f85 --- /dev/null +++ b/.changes/added/3106.md @@ -0,0 +1 @@ +Add adapter for storing blocks on AWS S3 bucket \ No newline at end of file diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index d74a655b29f..a9c8269046d 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -43,10 +43,10 @@ tracing = { workspace = true } tonic-prost-build = { workspace = true } [dev-dependencies] +aws-sdk-s3 = { version = "1.111.0", features = ["test-util"] } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } proptest = { workspace = true } tokio-stream = { workspace = true } -aws-sdk-s3 = { version = "1.111.0", features = ["test-util"] } tracing-subscriber = { workspace = true } From 75a0eb05ed76449be13e16aa77eb65583a728f01 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 10 Nov 2025 14:35:29 -0700 Subject: [PATCH 099/146] wip integrate into fuel-core --- crates/fuel-core/src/combined_database.rs | 41 +++-- crates/fuel-core/src/database.rs | 13 +- .../database_description/block_aggregator.rs | 30 +++- crates/fuel-core/src/service.rs | 1 + crates/fuel-core/src/service/sub_services.rs | 100 +++++++----- .../services/block_aggregator_api/src/db.rs | 2 + .../src/db/remote_cache/tests.rs | 5 + .../src/db/storage_or_remote_db.rs | 143 ++++++++++++++++++ .../services/block_aggregator_api/src/lib.rs | 1 + tests/tests/rpc.rs | 23 +++ 10 files changed, 308 insertions(+), 51 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index 6d6b128ff74..ac8ce32f48f 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -10,7 +10,10 @@ use crate::{ GenesisDatabase, Result as DatabaseResult, database_description::{ - block_aggregator::BlockAggregatorDatabase, + block_aggregator::{ + BlockAggregatorDatabaseS3, + BlockAggregatorDatabaseStorage, + }, compression::CompressionDatabase, gas_price::GasPriceDatabase, off_chain::OffChain, @@ -61,7 +64,8 @@ pub struct CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, - block_aggregation: Database, + block_aggregation_storage: Database, + block_aggregation_s3: Database, } impl CombinedDatabase { @@ -71,7 +75,8 @@ impl CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, - block_aggregation: Database, + block_aggregation_storage: Database, + block_aggregation_s3: Database, ) -> Self { Self { on_chain, @@ -79,7 +84,8 @@ impl CombinedDatabase { relayer, gas_price, compression, - block_aggregation, + block_aggregation_storage, + block_aggregation_s3, } } @@ -244,7 +250,15 @@ impl CombinedDatabase { ..database_config }, )?; - let block_aggregation = Database::open_rocksdb( + let block_aggregation_storage = Database::open_rocksdb( + path, + state_rewind_policy, + DatabaseConfig { + max_fds, + ..database_config + }, + )?; + let block_aggregation_s3 = Database::open_rocksdb( path, state_rewind_policy, DatabaseConfig { @@ -259,7 +273,8 @@ impl CombinedDatabase { relayer, gas_price, compression, - block_aggregation, + block_aggregation_storage, + block_aggregation_s3, }) } @@ -275,7 +290,8 @@ impl CombinedDatabase { relayer: Default::default(), gas_price: Default::default(), compression: Default::default(), - block_aggregation: Default::default(), + block_aggregation_storage: Default::default(), + block_aggregation_s3: Default::default(), }) } @@ -322,6 +338,7 @@ impl CombinedDatabase { Database::in_memory(), Database::in_memory(), Database::in_memory(), + Database::in_memory(), ) } @@ -342,8 +359,12 @@ impl CombinedDatabase { &self.compression } - pub fn block_aggregation(&self) -> &Database { - &self.block_aggregation + pub fn block_aggregation_storage(&self) -> &Database { + &self.block_aggregation_storage + } + + pub fn block_aggregation_s3(&self) -> &Database { + &self.block_aggregation_s3 } #[cfg(any(feature = "test-helpers", test))] @@ -615,6 +636,8 @@ impl CombinedDatabase { self.relayer.shutdown(); self.gas_price.shutdown(); self.compression.shutdown(); + self.block_aggregation_storage.shutdown(); + self.block_aggregation_s3.shutdown(); } } diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 7e8f6ec7061..96b03caad7b 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -84,7 +84,10 @@ use crate::state::{ }; use crate::{ database::database_description::{ - block_aggregator::BlockAggregatorDatabase, + block_aggregator::{ + BlockAggregatorDatabaseS3, + BlockAggregatorDatabaseStorage, + }, gas_price::GasPriceDatabase, indexation_availability, }, @@ -442,7 +445,13 @@ impl Modifiable for Database { } } -impl Modifiable for Database { +impl Modifiable for Database { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) + } +} + +impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) } diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs index 9aff58c3cd4..45bea30ea27 100644 --- a/crates/fuel-core/src/database/database_description/block_aggregator.rs +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -3,9 +3,9 @@ use fuel_core_block_aggregator_api::db::table::Column; use fuel_core_types::fuel_types::BlockHeight; #[derive(Clone, Copy, Debug)] -pub struct BlockAggregatorDatabase; +pub struct BlockAggregatorDatabaseStorage; -impl DatabaseDescription for BlockAggregatorDatabase { +impl DatabaseDescription for BlockAggregatorDatabaseStorage { type Column = Column; type Height = BlockHeight; @@ -14,7 +14,31 @@ impl DatabaseDescription for BlockAggregatorDatabase { } fn name() -> String { - "block_aggregator".to_string() + "block_aggregator_storage".to_string() + } + + fn metadata_column() -> Self::Column { + Column::Metadata + } + + fn prefix(_column: &Self::Column) -> Option { + None + } +} + +#[derive(Clone, Copy, Debug)] +pub struct BlockAggregatorDatabaseS3; + +impl DatabaseDescription for BlockAggregatorDatabaseS3 { + type Column = Column; + type Height = BlockHeight; + + fn version() -> u32 { + 0 + } + + fn name() -> String { + "block_aggregator_s3".to_string() } fn metadata_column() -> Self::Column { diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 2075361ebc6..3b344f21559 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -194,6 +194,7 @@ impl FuelService { Default::default(), Default::default(), Default::default(), + Default::default(), ); Self::from_combined_database(combined_database, config).await } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 412ba2b4b56..c0ee1aa6597 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -1,40 +1,5 @@ #![allow(clippy::let_unit_value)] -#[cfg(feature = "relayer")] -use crate::relayer::Config as RelayerConfig; -#[cfg(feature = "p2p")] -use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ - key_generator::Ed25519KeyGenerator, - trigger::TimeBasedTrigger, - tx_receiver::PreconfirmationsReceiver, -}; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::{ - blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, - db::storage_db::StorageDB, -}; -use fuel_core_compression_service::service::new_service as new_compression_service; -use fuel_core_gas_price_service::v1::{ - algorithm::AlgorithmV1, - da_source_service::block_committer_costs::{ - BlockCommitterDaBlockCosts, - BlockCommitterHttpApi, - }, - metadata::V1AlgorithmConfig, - service::SharedData, - uninitialized_task::new_gas_price_service_v1, -}; -use fuel_core_poa::Trigger; -use fuel_core_storage::{ - self, - transactional::AtomicView, -}; -#[cfg(feature = "relayer")] -use fuel_core_types::blockchain::primitives::DaBlockHeight; -use fuel_core_types::signer::SignMode; -use std::sync::Arc; -use tokio::sync::Mutex; - use super::{ DbType, adapters::{ @@ -49,6 +14,14 @@ use super::{ config::DaCompressionMode, genesis::create_genesis_block, }; +#[cfg(feature = "relayer")] +use crate::relayer::Config as RelayerConfig; +#[cfg(feature = "p2p")] +use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ + key_generator::Ed25519KeyGenerator, + trigger::TimeBasedTrigger, + tx_receiver::PreconfirmationsReceiver, +}; use crate::{ combined_database::CombinedDatabase, database::Database, @@ -87,6 +60,33 @@ use crate::{ }, }, }; +use fuel_core_block_aggregator_api::db::storage_or_remote_db::get_env_vars; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, + db::storage_or_remote_db::StorageOrRemoteDB, +}; +use fuel_core_compression_service::service::new_service as new_compression_service; +use fuel_core_gas_price_service::v1::{ + algorithm::AlgorithmV1, + da_source_service::block_committer_costs::{ + BlockCommitterDaBlockCosts, + BlockCommitterHttpApi, + }, + metadata::V1AlgorithmConfig, + service::SharedData, + uninitialized_task::new_gas_price_service_v1, +}; +use fuel_core_poa::Trigger; +use fuel_core_storage::{ + self, + transactional::AtomicView, +}; +#[cfg(feature = "relayer")] +use fuel_core_types::blockchain::primitives::DaBlockHeight; +use fuel_core_types::signer::SignMode; +use std::sync::Arc; +use tokio::sync::Mutex; pub type PoAService = fuel_core_poa::Service< BlockProducerAdapter, @@ -461,8 +461,34 @@ pub fn init_sub_services( #[cfg(feature = "rpc")] let block_aggregator_rpc = { let block_aggregator_config = config.rpc_config.clone(); - let db = database.block_aggregation().clone(); - let db_adapter = StorageDB::new(db); + let db_adapter = if let Some(( + aws_access_key_id, + aws_secrete_access_key, + aws_region, + aws_bucket, + )) = get_env_vars() + { + let url_base = "good.com"; + StorageOrRemoteDB::new_s3( + database.block_aggregation_s3().clone(), + &aws_access_key_id, + &aws_secrete_access_key, + &aws_region, + &aws_bucket, + url_base, + ) + } else { + tracing::info!( + "Required environment variables for S3 bucket not set. Requires: \n\ + AWS_ACCESS_KEY_ID \n\ + AWS_SECRET_ACCESS_KEY \n\ + AWS_REGION \n\ + AWS_BUCKET \n\ + Using local storage" + ); + let db = database.block_aggregation_storage().clone(); + StorageOrRemoteDB::new_storage(db) + }; let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 6629726eb7a..7e326bdc737 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -6,6 +6,8 @@ use fuel_core_types::fuel_types::BlockHeight; pub mod remote_cache; pub mod storage_db; + +pub mod storage_or_remote_db; pub mod table; /// The definition of the block aggregator database. diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index 855ed9f4ff0..f4c3cc53b14 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -200,3 +200,8 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } + +#[test] +fn store_block__real_test_for_science() { + todo!() +} diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs new file mode 100644 index 00000000000..505a1481dea --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -0,0 +1,143 @@ +use crate::{ + block_range_response::BlockRangeResponse, + blocks::BlockSourceEvent, + db::{ + BlockAggregatorDB, + remote_cache::RemoteCache, + storage_db::StorageDB, + table::{ + Blocks, + Column, + LatestBlock, + }, + }, + result::Result, +}; +use fuel_core_storage::{ + Error as StorageError, + StorageInspect, + StorageMutate, + kv_store::KeyValueInspect, + transactional::{ + AtomicView, + Modifiable, + StorageTransaction, + }, +}; +use fuel_core_types::fuel_types::BlockHeight; + +/// A union of a storage and a remote cache for the block aggregator. This allows both to be +/// supported in production depending on the configuration +pub enum StorageOrRemoteDB { + Remote(RemoteCache), + Storage(StorageDB), +} + +impl StorageOrRemoteDB { + pub fn new_storage(storage: S) -> Self { + StorageOrRemoteDB::Storage(StorageDB::new(storage)) + } + + pub fn new_s3( + _storage: R, + _aws_id: &str, + _aws_secret: &str, + _aws_region: &str, + _aws_bucket: &str, + _url_base: &str, + ) -> Self { + todo!("create client etc") + // let client = { + // let config = aws_sdk_s3::config::Builder::new() + // .region(aws_region) + // .credentials_provider(aws_sdk_s3::Credentials::new( + // aws_id.clone(), + // aws_secret.clone(), + // None, + // None, + // "block-aggregator", + // )) + // .build(); + // aws_sdk_s3::Client::from_conf(config) + // }; + // let remote_cache = RemoteCache::new( + // aws_id.to_string(), + // aws_secret.to_string(), + // aws_region.to_string(), + // aws_bucket.to_string(), + // url_base.to_string(), + // client, + // storage, + // ); + // StorageOrRemoteDB::Remote(remote_cache) + } +} + +pub fn get_env_vars() -> Option<(String, String, String, String)> { + let aws_id = std::env::var("AWS_ACCESS_KEY_ID").ok()?; + let aws_secret = std::env::var("AWS_SECRET_ACCESS_KEY").ok()?; + let aws_region = std::env::var("AWS_REGION").ok()?; + let aws_bucket = std::env::var("AWS_BUCKET").ok()?; + Some((aws_id, aws_secret, aws_region, aws_bucket)) +} + +impl BlockAggregatorDB for StorageOrRemoteDB +where + // Storage Constraints + S: Modifiable + std::fmt::Debug, + S: KeyValueInspect, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: StorageMutate, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, + StorageTransaction: StorageInspect, + // Remote Constraints + R: Send + Sync, + R: Modifiable, + R: StorageInspect, + for<'b> StorageTransaction<&'b mut R>: + StorageMutate, +{ + type Block = crate::protobuf_types::Block; + type BlockRangeResponse = BlockRangeResponse; + + async fn store_block(&mut self, block: BlockSourceEvent) -> Result<()> { + match self { + StorageOrRemoteDB::Remote(remote_db) => remote_db.store_block(block).await?, + StorageOrRemoteDB::Storage(storage_db) => { + storage_db.store_block(block).await? + } + } + Ok(()) + } + + async fn get_block_range( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> Result { + let range_response = match self { + StorageOrRemoteDB::Remote(remote_db) => { + remote_db.get_block_range(first, last).await? + } + StorageOrRemoteDB::Storage(storage_db) => { + storage_db.get_block_range(first, last).await? + } + }; + Ok(range_response) + } + + async fn get_current_height(&self) -> Result> { + let height = match self { + StorageOrRemoteDB::Remote(remote_db) => { + remote_db.get_current_height().await? + } + StorageOrRemoteDB::Storage(storage_db) => { + storage_db.get_current_height().await? + } + }; + Ok(height) + } +} diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index e3e9057d7d7..757c49383a5 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -104,6 +104,7 @@ pub mod integration { ServiceRunner::new(block_aggregator) } } + #[cfg(test)] mod tests; diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index aa6c564834b..a3e499dfec3 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -154,3 +154,26 @@ async fn new_block_subscription__can_get_expect_block() { let expected_height = 1; assert_eq!(expected_height, actual_height); } + +macro_rules! require_env_var_or_skip { + ($($var:literal),+) => { + $(if std::env::var($var).is_err() { + eprintln!("Skipping test: missing {}", $var); + return; + })+ + }; +} + +#[tokio::test] +async fn get_block_range__can_get_from_remote_s3_bucket() { + require_env_var_or_skip!( + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_REGION", + "AWS_BUCKET" + ); + let config = Config::local_node(); + let _srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); +} From 920bf7432fb12ada0d075bdc2711a8e05c93bc4d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 10 Nov 2025 16:06:32 -0700 Subject: [PATCH 100/146] Get client working in test --- crates/fuel-core/src/service/sub_services.rs | 2 + .../src/db/storage_or_remote_db.rs | 81 +++++++++++-------- 2 files changed, 51 insertions(+), 32 deletions(-) diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index c0ee1aa6597..b853e0e3b53 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -466,6 +466,7 @@ pub fn init_sub_services( aws_secrete_access_key, aws_region, aws_bucket, + aws_endpoint_url, )) = get_env_vars() { let url_base = "good.com"; @@ -476,6 +477,7 @@ pub fn init_sub_services( &aws_region, &aws_bucket, url_base, + aws_endpoint_url, ) } else { tracing::info!( diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 505a1481dea..1d35633eab1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -13,6 +13,14 @@ use crate::{ }, result::Result, }; +use aws_sdk_s3::{ + Config, + config::{ + BehaviorVersion, + Credentials, + Region, + }, +}; use fuel_core_storage::{ Error as StorageError, StorageInspect, @@ -25,6 +33,7 @@ use fuel_core_storage::{ }, }; use fuel_core_types::fuel_types::BlockHeight; +use std::borrow::Cow; /// A union of a storage and a remote cache for the block aggregator. This allows both to be /// supported in production depending on the configuration @@ -39,46 +48,54 @@ impl StorageOrRemoteDB { } pub fn new_s3( - _storage: R, - _aws_id: &str, - _aws_secret: &str, - _aws_region: &str, - _aws_bucket: &str, - _url_base: &str, + storage: R, + aws_id: &str, + aws_secret: &str, + aws_region: &str, + aws_bucket: &str, + url_base: &str, + aws_endpoint_url: Option, ) -> Self { - todo!("create client etc") - // let client = { - // let config = aws_sdk_s3::config::Builder::new() - // .region(aws_region) - // .credentials_provider(aws_sdk_s3::Credentials::new( - // aws_id.clone(), - // aws_secret.clone(), - // None, - // None, - // "block-aggregator", - // )) - // .build(); - // aws_sdk_s3::Client::from_conf(config) - // }; - // let remote_cache = RemoteCache::new( - // aws_id.to_string(), - // aws_secret.to_string(), - // aws_region.to_string(), - // aws_bucket.to_string(), - // url_base.to_string(), - // client, - // storage, - // ); - // StorageOrRemoteDB::Remote(remote_cache) + let region_str = aws_region.to_string(); + let client = { + let mut builder = aws_sdk_s3::config::Builder::new(); + if let Some(aws_endpoint_url) = aws_endpoint_url { + builder.set_endpoint_url(Some(aws_endpoint_url.clone())); + } + + let config = builder + .region(Region::new(Cow::Owned(region_str.clone()))) + .credentials_provider(Credentials::new( + aws_id, + aws_secret, + None, + None, + "block-aggregator", + )) + .behavior_version_latest() + .build(); + aws_sdk_s3::Client::from_conf(config) + }; + let remote_cache = RemoteCache::new( + aws_id.to_string(), + aws_secret.to_string(), + aws_region.to_string(), + aws_bucket.to_string(), + url_base.to_string(), + client, + storage, + ); + StorageOrRemoteDB::Remote(remote_cache) } } -pub fn get_env_vars() -> Option<(String, String, String, String)> { +pub fn get_env_vars() -> Option<(String, String, String, String, Option)> { let aws_id = std::env::var("AWS_ACCESS_KEY_ID").ok()?; let aws_secret = std::env::var("AWS_SECRET_ACCESS_KEY").ok()?; let aws_region = std::env::var("AWS_REGION").ok()?; let aws_bucket = std::env::var("AWS_BUCKET").ok()?; - Some((aws_id, aws_secret, aws_region, aws_bucket)) + let aws_endpoint_url = std::env::var("AWS_ENDPOINT_URL").ok(); + Some((aws_id, aws_secret, aws_region, aws_bucket, aws_endpoint_url)) } impl BlockAggregatorDB for StorageOrRemoteDB From 29eea3614cf72fe7594b96d5fce931bdbfae3c8f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 10 Nov 2025 17:12:01 -0700 Subject: [PATCH 101/146] wip get test working with localstack --- Cargo.lock | 1 + crates/fuel-core/src/service/sub_services.rs | 2 +- .../src/db/remote_cache.rs | 1 + .../src/db/storage_or_remote_db.rs | 3 +- tests/Cargo.toml | 1 + tests/tests/rpc.rs | 44 ++++++++++++++++++- 6 files changed, 48 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27605be9985..c1220444faa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4465,6 +4465,7 @@ dependencies = [ "async-trait", "aws-config", "aws-sdk-kms", + "aws-sdk-s3", "clap", "cynic", "ethers", diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index b853e0e3b53..18f52bf8817 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -60,11 +60,11 @@ use crate::{ }, }, }; -use fuel_core_block_aggregator_api::db::storage_or_remote_db::get_env_vars; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_or_remote_db::StorageOrRemoteDB, + db::storage_or_remote_db::get_env_vars, }; use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 72d03cb0fbf..60d00bfcee2 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -97,6 +97,7 @@ where let mut buf = Vec::new(); block.encode(&mut buf).map_err(Error::db_error)?; let body = ByteStream::from(buf); + tracing::info!("Storing block in bucket: {:?}", &self.aws_bucket); let req = self .client .put_object() diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 1d35633eab1..7e22eb63d8c 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -56,7 +56,6 @@ impl StorageOrRemoteDB { url_base: &str, aws_endpoint_url: Option, ) -> Self { - let region_str = aws_region.to_string(); let client = { let mut builder = aws_sdk_s3::config::Builder::new(); if let Some(aws_endpoint_url) = aws_endpoint_url { @@ -64,7 +63,7 @@ impl StorageOrRemoteDB { } let config = builder - .region(Region::new(Cow::Owned(region_str.clone()))) + .region(Region::new(Cow::Owned(aws_region.to_string()))) .credentials_provider(Credentials::new( aws_id, aws_secret, diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 377d92b1740..957d7fcbaf3 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -94,6 +94,7 @@ tokio = { workspace = true, features = [ ] } tracing-subscriber = { workspace = true } url = { workspace = true } +aws-sdk-s3 = "1.111.0" [dev-dependencies] fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index a3e499dfec3..8ede75fe4ca 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,4 +1,5 @@ #![allow(non_snake_case)] + use fuel_core::{ database::Database, service::{ @@ -19,6 +20,7 @@ use fuel_core_client::client::FuelClient; use fuel_core_types::fuel_tx::*; use futures::StreamExt; use test_helpers::client_ext::ClientExt; +use tokio::time::sleep; #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc() { @@ -164,8 +166,39 @@ macro_rules! require_env_var_or_skip { }; } +// fn aws_client() -> Client { +// let ( +// aws_access_key_id, +// aws_secret_access_key, +// aws_region, +// _, +// aws_endpoint_url, +// ) = get_env_vars().unwrap(); +// +// let mut builder = aws_sdk_s3::config::Builder::new(); +// if let Some(aws_endpoint_url) = aws_endpoint_url { +// builder.set_endpoint_url(Some(aws_endpoint_url.clone())); +// } +// +// let config = builder +// .region(Region::new(Cow::Owned(aws_region.clone()))) +// .credentials_provider(Credentials::new( +// aws_access_key_id, +// aws_secret_access_key, +// None, +// None, +// "block-aggregator", +// )) +// .behavior_version_latest() +// .build(); +// aws_sdk_s3::Client::from_conf(config) +// } + #[tokio::test] async fn get_block_range__can_get_from_remote_s3_bucket() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .try_init(); require_env_var_or_skip!( "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", @@ -173,7 +206,16 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { "AWS_BUCKET" ); let config = Config::local_node(); - let _srv = FuelService::from_database(Database::default(), config.clone()) + let srv = FuelService::from_database(Database::default(), config.clone()) .await .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + sleep(std::time::Duration::from_secs(1)).await; + + drop(srv) } From e23cf75a5c21095e55a08e2deaa6ebc0c4fadc2a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 11 Nov 2025 08:25:34 -0700 Subject: [PATCH 102/146] Add checks and cleanup to test --- Cargo.lock | 1 + .../src/db/storage_or_remote_db.rs | 10 +- tests/Cargo.toml | 1 + tests/tests/rpc.rs | 133 ++++++++++++------ 4 files changed, 96 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1220444faa..9a59453087e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4496,6 +4496,7 @@ dependencies = [ "pretty_assertions", "primitive-types", "proptest", + "prost 0.14.1", "rand 0.8.5", "regex", "reqwest 0.12.24", diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 7e22eb63d8c..e9af2dc936b 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -13,13 +13,9 @@ use crate::{ }, result::Result, }; -use aws_sdk_s3::{ - Config, - config::{ - BehaviorVersion, - Credentials, - Region, - }, +use aws_sdk_s3::config::{ + Credentials, + Region, }; use fuel_core_storage::{ Error as StorageError, diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 957d7fcbaf3..c6af4372bb4 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -95,6 +95,7 @@ tokio = { workspace = true, features = [ tracing-subscriber = { workspace = true } url = { workspace = true } aws-sdk-s3 = "1.111.0" +prost = { workspace = true } [dev-dependencies] fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 8ede75fe4ca..8aaa57fe67d 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,5 +1,12 @@ #![allow(non_snake_case)] +use aws_sdk_s3::{ + Client, + config::{ + Credentials, + Region, + }, +}; use fuel_core::{ database::Database, service::{ @@ -7,18 +14,31 @@ use fuel_core::{ FuelService, }, }; -use fuel_core_block_aggregator_api::protobuf_types::{ - BlockHeightRequest as ProtoBlockHeightRequest, - BlockRangeRequest as ProtoBlockRangeRequest, - NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - block::VersionedBlock as ProtoVersionedBlock, - block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, - block_response::Payload as ProtoPayload, - header::VersionedHeader as ProtoVersionedHeader, +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::fuel_block_from_protobuf, + db::{ + remote_cache::block_height_to_key, + storage_or_remote_db::get_env_vars, + }, + protobuf_types::{ + Block as ProtoBlock, + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block::VersionedBlock as ProtoVersionedBlock, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, + header::VersionedHeader as ProtoVersionedHeader, + }, }; use fuel_core_client::client::FuelClient; -use fuel_core_types::fuel_tx::*; +use fuel_core_types::{ + fuel_tx::*, + fuel_types::BlockHeight, +}; use futures::StreamExt; +use prost::bytes::Bytes; +use std::borrow::Cow; use test_helpers::client_ext::ClientExt; use tokio::time::sleep; @@ -166,56 +186,85 @@ macro_rules! require_env_var_or_skip { }; } -// fn aws_client() -> Client { -// let ( -// aws_access_key_id, -// aws_secret_access_key, -// aws_region, -// _, -// aws_endpoint_url, -// ) = get_env_vars().unwrap(); -// -// let mut builder = aws_sdk_s3::config::Builder::new(); -// if let Some(aws_endpoint_url) = aws_endpoint_url { -// builder.set_endpoint_url(Some(aws_endpoint_url.clone())); -// } -// -// let config = builder -// .region(Region::new(Cow::Owned(aws_region.clone()))) -// .credentials_provider(Credentials::new( -// aws_access_key_id, -// aws_secret_access_key, -// None, -// None, -// "block-aggregator", -// )) -// .behavior_version_latest() -// .build(); -// aws_sdk_s3::Client::from_conf(config) -// } +fn aws_client() -> Client { + let (aws_access_key_id, aws_secret_access_key, aws_region, _, aws_endpoint_url) = + get_env_vars().unwrap(); + + let mut builder = aws_sdk_s3::config::Builder::new(); + if let Some(aws_endpoint_url) = aws_endpoint_url { + builder.set_endpoint_url(Some(aws_endpoint_url.clone())); + } + + let config = builder + .region(Region::new(Cow::Owned(aws_region.clone()))) + .credentials_provider(Credentials::new( + aws_access_key_id, + aws_secret_access_key, + None, + None, + "block-aggregator", + )) + .behavior_version_latest() + .build(); + aws_sdk_s3::Client::from_conf(config) +} + +async fn get_block_height_from_remote_s3_bucket() -> Bytes { + let client = aws_client(); + let bucket = std::env::var("AWS_BUCKET").unwrap(); + let key = block_height_to_key(&BlockHeight::new(1)); + let req = client.get_object().bucket(&bucket).key(&key); + let obj = req.send().await.unwrap(); + obj.body.collect().await.unwrap().into_bytes() +} + +async fn clean_s3_bucket() { + let client = aws_client(); + let bucket = std::env::var("AWS_BUCKET").unwrap(); + let req = client.list_objects().bucket(&bucket); + let objs = req.send().await.unwrap(); + for obj in objs.contents.unwrap_or_default() { + let req = client + .delete_object() + .bucket(&bucket) + .key(&obj.key.unwrap()); + let _ = req.send().await.unwrap(); + } +} #[tokio::test] async fn get_block_range__can_get_from_remote_s3_bucket() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .try_init(); require_env_var_or_skip!( "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_BUCKET" ); + // let _ = tracing_subscriber::fmt() + // .with_max_level(tracing::Level::INFO) + // .try_init(); + clean_s3_bucket().await; + + // given let config = Config::local_node(); let srv = FuelService::from_database(Database::default(), config.clone()) .await .unwrap(); - let graphql_client = FuelClient::from(srv.bound_address); - let tx = Transaction::default_test_tx(); + + // when let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); sleep(std::time::Duration::from_secs(1)).await; - drop(srv) + // then + let data = get_block_height_from_remote_s3_bucket().await; + // can deserialize + let actual_proto: ProtoBlock = prost::Message::decode(data.as_ref()).unwrap(); + let _ = fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); + + // cleanup + clean_s3_bucket().await; + drop(srv); } From 44eb878f584de1fdf863d0bdececac6d6ffb48e2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 11 Nov 2025 08:55:36 -0700 Subject: [PATCH 103/146] Lint toml --- tests/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Cargo.toml b/tests/Cargo.toml index c6af4372bb4..00662cfcbfb 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -36,6 +36,7 @@ aws-config = { version = "1.1.7", features = [ "behavior-version-latest", ], optional = true } aws-sdk-kms = { version = "1.37.0", optional = true } +aws-sdk-s3 = "1.111.0" clap = { workspace = true } cynic = { workspace = true } ethers = "2" @@ -79,6 +80,7 @@ itertools = { workspace = true } k256 = { version = "0.13.3", features = ["ecdsa-core"] } postcard = { workspace = true } primitive-types = { workspace = true, default-features = false } +prost = { workspace = true } rand = { workspace = true } reqwest = { workspace = true } rstest = "0.15" @@ -94,8 +96,6 @@ tokio = { workspace = true, features = [ ] } tracing-subscriber = { workspace = true } url = { workspace = true } -aws-sdk-s3 = "1.111.0" -prost = { workspace = true } [dev-dependencies] fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } From 54fc102e1b7d6f107598f2385cfe178795b74ddd Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 11 Nov 2025 08:59:03 -0700 Subject: [PATCH 104/146] remove dummy test, add env-vars to trigger failing integ test --- .github/workflows/ci.yml | 4 ++++ .../block_aggregator_api/src/db/remote_cache/tests.rs | 7 +------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 703996769c9..41093b34148 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,6 +108,10 @@ jobs: runs-on: buildjet-4vcpu-ubuntu-2204 env: RUSTFLAGS: -D warnings + AWS_ACCESS_KEY_ID: test + AWS_SECRET_ACCESS_KEY: test + AWS_REGION: us-east-1 + AWS_ENDPOINT: http://127.0.0.1:4566 strategy: matrix: include: diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index f4c3cc53b14..904a34ae62b 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -199,9 +199,4 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let expected = BlockHeight::from(10u32); let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); -} - -#[test] -fn store_block__real_test_for_science() { - todo!() -} +} \ No newline at end of file From 70ef644b55194d1dbf1f74c1377a39a596b366e4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 11 Nov 2025 09:01:04 -0700 Subject: [PATCH 105/146] fm --- .../services/block_aggregator_api/src/db/remote_cache/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index 904a34ae62b..855ed9f4ff0 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -199,4 +199,4 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let expected = BlockHeight::from(10u32); let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); -} \ No newline at end of file +} From 0d4dfe092918b294c2e723315067eda1087d6913 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 11 Nov 2025 09:13:43 -0700 Subject: [PATCH 106/146] Appease Clippy-sama --- benches/benches/block_target_gas.rs | 1 + tests/tests/rpc.rs | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index b363f7d3233..792783333b4 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -402,6 +402,7 @@ fn service_with_many_contracts( Default::default(), Default::default(), Default::default(), + Default::default(), ), config.clone(), ) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 8aaa57fe67d..d8ed4e534ee 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -224,10 +224,7 @@ async fn clean_s3_bucket() { let req = client.list_objects().bucket(&bucket); let objs = req.send().await.unwrap(); for obj in objs.contents.unwrap_or_default() { - let req = client - .delete_object() - .bucket(&bucket) - .key(&obj.key.unwrap()); + let req = client.delete_object().bucket(&bucket).key(obj.key.unwrap()); let _ = req.send().await.unwrap(); } } From d25c15ff3289e3ba0ffe71197e73e93bc0dd252f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 11 Nov 2025 09:28:09 -0700 Subject: [PATCH 107/146] Appease Clippy-sama, ensure bucket exists --- .../services/block_aggregator_api/src/db/remote_cache.rs | 2 +- .../block_aggregator_api/src/db/remote_cache/tests.rs | 3 +-- crates/services/block_aggregator_api/src/db/storage_db.rs | 2 +- tests/tests/rpc.rs | 8 ++++++++ 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 60d00bfcee2..d19e2ca26d9 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -119,7 +119,7 @@ where let mut tx = self.local_persisted.write_transaction(); let latest_height = if height.succ() == self.orphaned_new_height { self.orphaned_new_height = None; - self.highest_new_height.clone().unwrap_or(height) + self.highest_new_height.unwrap_or(height) } else { height }; diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index 855ed9f4ff0..a06981ff853 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -31,8 +31,7 @@ fn database() -> StorageTransaction> { fn arb_proto_block() -> ProtoBlock { let block = FuelBlock::default(); let serializer = SerializerAdapter; - let proto_block = serializer.serialize_block(&block).unwrap(); - proto_block + serializer.serialize_block(&block).unwrap() } fn put_happy_rule() -> Rule { mock!(Client::put_object) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index fdc31ef3faf..c010bb9bb46 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -97,7 +97,7 @@ where tracing::debug!("Old block: {:?}", height); let latest_height = if height.succ() == self.orphaned_new_height { self.orphaned_new_height = None; - self.highest_new_height.clone().unwrap_or(height) + self.highest_new_height.unwrap_or(height) } else { height }; diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index d8ed4e534ee..fc7b5d2f5d4 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -218,6 +218,13 @@ async fn get_block_height_from_remote_s3_bucket() -> Bytes { obj.body.collect().await.unwrap().into_bytes() } +async fn ensure_bucket_exists() { + let client = aws_client(); + let bucket = std::env::var("AWS_BUCKET").unwrap(); + let req = client.create_bucket().bucket(&bucket); + let _ = req.send().await.unwrap(); +} + async fn clean_s3_bucket() { let client = aws_client(); let bucket = std::env::var("AWS_BUCKET").unwrap(); @@ -240,6 +247,7 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { // let _ = tracing_subscriber::fmt() // .with_max_level(tracing::Level::INFO) // .try_init(); + ensure_bucket_exists().await; clean_s3_bucket().await; // given From 1c6eb7a7ce688072abcd2d9a8cb153c3ad24ab92 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 07:59:05 -0700 Subject: [PATCH 108/146] WIP get all integ tests passing --- .../block_aggregator_api/proto/api.proto | 67 ++------------ .../src/api/protobuf_adapter.rs | 32 ++++++- .../src/api/protobuf_adapter/tests.rs | 91 ++++++++++++++++++- .../src/db/remote_cache.rs | 1 + .../src/db/storage_or_remote_db.rs | 13 ++- 5 files changed, 137 insertions(+), 67 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index b478c8b69b2..89ac3776b72 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -5,7 +5,7 @@ package blockaggregator; message BlockHeightRequest {} message BlockHeightResponse { - uint32 height = 1; + optional uint32 height = 1; } message BlockRangeRequest { @@ -13,6 +13,13 @@ message BlockRangeRequest { uint32 end = 2; } +message RemoteBlockRangeResponse { + string region = 1; + string bucket = 2; + string key = 3; + string url = 4; +} + message Block { oneof versioned_block { V1Block v1 = 1; @@ -31,62 +38,6 @@ message Header { } } -// pub struct BlockHeaderV1 { -// /// The application header. -// pub(crate) application: ApplicationHeader, -// /// The consensus header. -// pub(crate) consensus: ConsensusHeader, -// /// The header metadata calculated during creation. -// /// The field is pub(crate) to enforce the use of the [`PartialBlockHeader::generate`] method. -// #[cfg_attr(feature = "serde", serde(skip))] -// #[educe(PartialEq(ignore))] -// pub(crate) metadata: Option, -//} -// pub struct ApplicationHeader { -// /// The layer 1 height of messages and events to include since the last layer 1 block number. -// /// This is not meant to represent the layer 1 block this was committed to. Validators will need -// /// to have some rules in place to ensure the block number was chosen in a reasonable way. For -// /// example, they should verify that the block number satisfies the finality requirements of the -// /// layer 1 chain. They should also verify that the block number isn't too stale and is increasing. -// /// Some similar concerns are noted in this issue: -// pub da_height: DaBlockHeight, -// /// The version of the consensus parameters used to execute this block. -// pub consensus_parameters_version: ConsensusParametersVersion, -// /// The version of the state transition bytecode used to execute this block. -// pub state_transition_bytecode_version: StateTransitionBytecodeVersion, -// /// Generated application fields. -// pub generated: Generated, -//} -// pub struct GeneratedApplicationFieldsV1 { -// /// Number of transactions in this block. -// pub transactions_count: u16, -// /// Number of message receipts in this block. -// pub message_receipt_count: u32, -// /// Merkle root of transactions. -// pub transactions_root: Bytes32, -// /// Merkle root of message receipts in this block. -// pub message_outbox_root: Bytes32, -// /// Root hash of all imported events from L1 -// pub event_inbox_root: Bytes32, -//} -// pub struct ConsensusHeader { -// /// Merkle root of all previous block header hashes. -// pub prev_root: Bytes32, -// /// Fuel block height. -// pub height: BlockHeight, -// /// The block producer time. -// pub time: Tai64, -// /// generated consensus fields. -// pub generated: Generated, -//} -// pub struct GeneratedConsensusFields { -// /// Hash of the application header. -// pub application_hash: Bytes32, -//} -// pub struct BlockHeaderMetadata { -// /// Hash of the header. -// id: BlockId, -//} message V1Header { uint64 da_height = 1; uint32 consensus_parameters_version = 2; @@ -664,7 +615,7 @@ message InnerPredicateOffset { message BlockResponse { oneof payload { Block literal = 1; - string remote = 2; + RemoteBlockRangeResponse remote = 2; } } diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 74ca9e992b4..181c0963a53 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -11,6 +11,7 @@ use crate::{ BlockRangeRequest as ProtoBlockRangeRequest, BlockResponse as ProtoBlockResponse, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + RemoteBlockRangeResponse as ProtoRemoteBlockRangeResponse, block_aggregator_server::{ BlockAggregator, BlockAggregatorServer as ProtoBlockAggregatorServer, @@ -60,7 +61,7 @@ impl BlockAggregator for Server { let res = receiver.await; match res { Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { - height: *height.unwrap(), + height: height.map(|inner| *inner), })), Err(e) => Err(tonic::Status::internal(format!( "Failed to receive height: {}", @@ -108,9 +109,32 @@ impl BlockAggregator for Server { Ok(tonic::Response::new(ReceiverStream::new(rx))) } - BlockRangeResponse::Remote(_) => { - tracing::error!("Remote block range not implemented"); - todo!() + BlockRangeResponse::Remote(inner) => { + let (tx, rx) = tokio::sync::mpsc::channel::< + Result, + >(ARB_LITERAL_BLOCK_BUFFER_SIZE); + + tokio::spawn(async move { + let mut s = inner; + while let Some(pb) = s.next().await { + let proto_response = ProtoRemoteBlockRangeResponse { + region: pb.region.clone(), + bucket: pb.bucket.clone(), + key: pb.key.clone(), + url: pb.url.clone(), + }; + let response = ProtoBlockResponse { + payload: Some(proto_block_response::Payload::Remote( + proto_response, + )), + }; + if tx.send(Ok(response)).await.is_err() { + break; + } + } + }); + + Ok(tonic::Response::new(ReceiverStream::new(rx))) } }, Err(e) => Err(tonic::Status::internal(format!( diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index fb9ef78edf5..e39f0f84d46 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -6,7 +6,10 @@ use crate::{ BlockAggregatorQuery, protobuf_adapter::ProtobufAPI, }, - block_range_response::BlockRangeResponse, + block_range_response::{ + BlockRangeResponse, + RemoteBlockRangeResponse, + }, blocks::importer_and_db_source::{ BlockSerializer, serializer_adapter::SerializerAdapter, @@ -73,11 +76,11 @@ async fn await_query__get_current_height__client_receives_expected_value() { let res = handle.await.unwrap(); // assert client received expected value - assert_eq!(res.into_inner().height, 42); + assert_eq!(res.into_inner().height, Some(42)); } #[tokio::test] -async fn await_query__get_block_range__client_receives_expected_value() { +async fn await_query__get_block_range__client_receives_expected_value__literal() { // given let path = free_local_addr(); let mut api = ProtobufAPI::new(path.to_string()); @@ -150,6 +153,88 @@ async fn await_query__get_block_range__client_receives_expected_value() { assert_eq!(expected, actual); } +#[tokio::test] +async fn await_query__get_block_range__client_receives_expected_value__remote() { + // given + let path = free_local_addr(); + let mut api = ProtobufAPI::new(path.to_string()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = format!("http://{}", path); + let mut client = ProtoBlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let request = BlockRangeRequest { start: 0, end: 1 }; + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .get_block_range(request) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + let list: Vec<_> = vec!["1", "2"] + .iter() + .map(|height| { + let region = "test-region".to_string(); + let bucket = "test-bucket".to_string(); + let key = height.to_string(); + let url = "good.url".to_string(); + RemoteBlockRangeResponse { + region, + bucket, + key, + url, + } + }) + .collect(); + // return response through query's channel + if let BlockAggregatorQuery::GetBlockRange { + first, + last, + response, + } = query + { + assert_eq!(first, BlockHeight::new(0)); + assert_eq!(last, BlockHeight::new(1)); + tracing::info!("correct query received, sending response"); + let stream = tokio_stream::iter(list.clone()).boxed(); + let range = BlockRangeResponse::Remote(stream); + response.send(range).unwrap(); + } else { + panic!("expected GetBlockRange query"); + } + tracing::info!("awaiting query"); + let response = handle.await.unwrap(); + let expected = list; + let actual: Vec = response + .into_inner() + .try_collect::>() + .await + .unwrap() + .into_iter() + .map(|b| { + if let Some(Payload::Remote(inner)) = b.payload { + RemoteBlockRangeResponse { + region: inner.region, + bucket: inner.bucket, + key: inner.key, + url: inner.url, + } + } else { + panic!("unexpected response type") + } + }) + .collect(); + + assert_eq!(expected, actual); +} #[tokio::test] async fn await_query__new_block_stream__client_receives_expected_value() { diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index d19e2ca26d9..3d5b00f3e87 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -156,6 +156,7 @@ where } async fn get_current_height(&self) -> crate::result::Result> { + tracing::debug!("Getting current height from local cache"); let height = self .local_persisted .storage_as_ref::() diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index e9af2dc936b..4f1b2174fb9 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -84,13 +84,22 @@ impl StorageOrRemoteDB { } } -pub fn get_env_vars() -> Option<(String, String, String, String, Option)> { +pub fn get_env_vars() -> Option<(String, String, String, String, String, Option)> +{ let aws_id = std::env::var("AWS_ACCESS_KEY_ID").ok()?; let aws_secret = std::env::var("AWS_SECRET_ACCESS_KEY").ok()?; let aws_region = std::env::var("AWS_REGION").ok()?; let aws_bucket = std::env::var("AWS_BUCKET").ok()?; + let bucket_url_base = std::env::var("BUCKET_URL_BASE").ok()?; let aws_endpoint_url = std::env::var("AWS_ENDPOINT_URL").ok(); - Some((aws_id, aws_secret, aws_region, aws_bucket, aws_endpoint_url)) + Some(( + aws_id, + aws_secret, + aws_region, + aws_bucket, + bucket_url_base, + aws_endpoint_url, + )) } impl BlockAggregatorDB for StorageOrRemoteDB From 1b97ed5f67b2274cefcfc2d62e52726a8a83a958 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 08:06:56 -0700 Subject: [PATCH 109/146] Finsh merging --- Cargo.lock | 325 +------------------ crates/fuel-core/src/service/sub_services.rs | 4 +- tests/tests/rpc.rs | 93 +++++- 3 files changed, 95 insertions(+), 327 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 469800c2398..2c8fae248ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -451,7 +451,7 @@ dependencies = [ "async-trait", "auto_impl", "either", - "elliptic-curve", + "elliptic-curve 0.13.8", "k256", "thiserror 2.0.17", ] @@ -2330,7 +2330,7 @@ dependencies = [ "num-traits", "separator", "url", - "uuid 1.18.1", + "uuid", ] [[package]] @@ -3824,325 +3824,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "eth-keystore" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" -dependencies = [ - "aes", - "ctr", - "digest 0.10.7", - "hex", - "hmac", - "pbkdf2 0.11.0", - "rand 0.8.5", - "scrypt", - "serde", - "serde_json", - "sha2 0.10.9", - "sha3", - "thiserror 1.0.69", - "uuid 0.8.2", -] - -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror 1.0.69", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", - "scale-info", - "uint", -] - -[[package]] -name = "ethers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0" -dependencies = [ - "ethers-addressbook", - "ethers-contract", - "ethers-core", - "ethers-etherscan", - "ethers-middleware", - "ethers-providers", - "ethers-signers", - "ethers-solc", -] - -[[package]] -name = "ethers-addressbook" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759" -dependencies = [ - "ethers-core", - "once_cell", - "serde", - "serde_json", -] - -[[package]] -name = "ethers-contract" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa" -dependencies = [ - "const-hex", - "ethers-contract-abigen", - "ethers-contract-derive", - "ethers-core", - "ethers-providers", - "futures-util", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror 1.0.69", -] - -[[package]] -name = "ethers-contract-abigen" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b" -dependencies = [ - "Inflector", - "const-hex", - "dunce", - "ethers-core", - "ethers-etherscan", - "eyre", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "reqwest 0.11.27", - "serde", - "serde_json", - "syn 2.0.107", - "toml 0.8.23", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f" -dependencies = [ - "Inflector", - "const-hex", - "ethers-contract-abigen", - "ethers-core", - "proc-macro2", - "quote", - "serde_json", - "syn 2.0.107", -] - -[[package]] -name = "ethers-core" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" -dependencies = [ - "arrayvec", - "bytes", - "cargo_metadata", - "chrono", - "const-hex", - "elliptic-curve 0.13.8", - "ethabi", - "generic-array", - "k256", - "num_enum", - "once_cell", - "open-fastrlp", - "rand 0.8.5", - "rlp", - "serde", - "serde_json", - "strum 0.26.3", - "syn 2.0.107", - "tempfile", - "thiserror 1.0.69", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-etherscan" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649" -dependencies = [ - "chrono", - "ethers-core", - "reqwest 0.11.27", - "semver", - "serde", - "serde_json", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "ethers-middleware" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de" -dependencies = [ - "async-trait", - "auto_impl", - "ethers-contract", - "ethers-core", - "ethers-etherscan", - "ethers-providers", - "ethers-signers", - "futures-channel", - "futures-locks", - "futures-util", - "instant", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "url", -] - -[[package]] -name = "ethers-providers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2" -dependencies = [ - "async-trait", - "auto_impl", - "base64 0.21.7", - "bytes", - "const-hex", - "enr", - "ethers-core", - "futures-channel", - "futures-core", - "futures-timer", - "futures-util", - "hashers", - "http 0.2.12", - "instant", - "jsonwebtoken", - "once_cell", - "pin-project", - "reqwest 0.11.27", - "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "tokio-tungstenite", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "ws_stream_wasm", -] - -[[package]] -name = "ethers-signers" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2" -dependencies = [ - "async-trait", - "coins-bip32", - "coins-bip39", - "const-hex", - "elliptic-curve 0.13.8", - "eth-keystore", - "ethers-core", - "rand 0.8.5", - "sha2 0.10.9", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "ethers-solc" -version = "2.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd" -dependencies = [ - "cfg-if", - "const-hex", - "dirs 5.0.1", - "dunce", - "ethers-core", - "glob", - "home", - "md-5", - "num_cpus", - "once_cell", - "path-slash", - "rayon", - "regex", - "semver", - "serde", - "serde_json", - "solang-parser", - "svm-rs", - "thiserror 1.0.69", - "tiny-keccak", - "tokio", - "tracing", - "walkdir", - "yansi 0.5.1", -] - [[package]] name = "ethnum" version = "1.5.2" @@ -10100,7 +9781,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" dependencies = [ - "base16ct", + "base16ct 0.2.0", "serde", ] diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 18f52bf8817..aee6880e0e8 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -466,17 +466,17 @@ pub fn init_sub_services( aws_secrete_access_key, aws_region, aws_bucket, + url_base, aws_endpoint_url, )) = get_env_vars() { - let url_base = "good.com"; StorageOrRemoteDB::new_s3( database.block_aggregation_s3().clone(), &aws_access_key_id, &aws_secrete_access_key, &aws_region, &aws_bucket, - url_base, + &url_base, aws_endpoint_url, ) } else { diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index fc7b5d2f5d4..0b408b9aa6c 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -25,6 +25,7 @@ use fuel_core_block_aggregator_api::{ BlockHeightRequest as ProtoBlockHeightRequest, BlockRangeRequest as ProtoBlockRangeRequest, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + RemoteBlockRangeResponse as ProtoRemoteBlockRangeResponse, block::VersionedBlock as ProtoVersionedBlock, block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, block_response::Payload as ProtoPayload, @@ -43,7 +44,11 @@ use test_helpers::client_ext::ClientExt; use tokio::time::sleep; #[tokio::test(flavor = "multi_thread")] -async fn get_block_range__can_get_serialized_block_from_rpc() { +async fn get_block_range__can_get_serialized_block_from_rpc__literal() { + if env_vars_are_set() { + tracing::info!("Skipping test: AWS credentials are set"); + return; + } let config = Config::local_node(); let rpc_url = config.rpc_config.addr; @@ -91,12 +96,81 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { ProtoVersionedHeader::V1(v1_header) => v1_header.height, ProtoVersionedHeader::V2(v2_header) => v2_header.height, }; + // then assert_eq!(expected_header.height.0, actual_height); } +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_serialized_block_from_rpc__remote() { + let Some((_, _, aws_region, aws_bucket, url_base, _)) = get_env_vars() else { + tracing::info!("Skipping test: AWS credentials are not set"); + return; + }; + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let expected_header = expected_block.header; + let expected_height = BlockHeight::new(expected_header.height.0); + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let remote_info = if let Some(ProtoPayload::Remote(remote_info)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + remote_info + } else { + panic!("expected literal block payload"); + }; + + // then + let key = block_height_to_key(&expected_height); + let expected = ProtoRemoteBlockRangeResponse { + region: aws_region.clone(), + bucket: aws_bucket.clone(), + key: key.clone(), + url: format!("{}/blocks/{}", url_base, key), + }; + assert_eq!(expected, remote_info); + clean_s3_bucket().await; +} + #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { + if get_env_vars().is_some() { + ensure_bucket_exists().await; + clean_s3_bucket().await; + } + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .try_init(); let config = Config::local_node(); let rpc_url = config.rpc_config.addr; @@ -116,8 +190,9 @@ async fn get_block_height__can_get_value_from_rpc() { .expect("could not connect to server"); // when + sleep(std::time::Duration::from_secs(1)).await; let request = ProtoBlockHeightRequest {}; - let expected_height = 1; + let expected_height = Some(1); let actual_height = rpc_client .get_block_height(request) .await @@ -125,6 +200,11 @@ async fn get_block_height__can_get_value_from_rpc() { .into_inner() .height; + // cleanup + if get_env_vars().is_some() { + clean_s3_bucket().await; + } + // then assert_eq!(expected_height, actual_height); } @@ -186,8 +266,15 @@ macro_rules! require_env_var_or_skip { }; } +fn env_vars_are_set() -> bool { + std::env::var("AWS_ACCESS_KEY_ID").is_ok() + && std::env::var("AWS_SECRET_ACCESS_KEY").is_ok() + && std::env::var("AWS_REGION").is_ok() + && std::env::var("AWS_BUCKET").is_ok() +} + fn aws_client() -> Client { - let (aws_access_key_id, aws_secret_access_key, aws_region, _, aws_endpoint_url) = + let (aws_access_key_id, aws_secret_access_key, aws_region, _, _, aws_endpoint_url) = get_env_vars().unwrap(); let mut builder = aws_sdk_s3::config::Builder::new(); From ea5cbf60065ddc362a351ebd59182c456cec76dc Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 10:43:45 -0700 Subject: [PATCH 110/146] Get integ test passing --- bin/fuel-core/src/cli/run/rpc.rs | 2 + crates/fuel-core/src/service/config.rs | 2 + crates/fuel-core/src/service/sub_services.rs | 2 + .../src/api/protobuf_adapter.rs | 2 + .../src/db/remote_cache.rs | 62 ++++++++++++- .../src/db/remote_cache/tests.rs | 92 +++++++++++++++++-- .../block_aggregator_api/src/db/storage_db.rs | 46 +++++++++- .../src/db/storage_db/tests.rs | 52 +++++++++++ .../src/db/storage_or_remote_db.rs | 2 + .../services/block_aggregator_api/src/lib.rs | 10 +- tests/tests/rpc.rs | 5 +- 11 files changed, 257 insertions(+), 20 deletions(-) diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index 324cc8daee5..355353b45a2 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -1,4 +1,5 @@ use clap::Args; +use fuel_core_types::fuel_types::BlockHeight; use std::net; #[derive(Debug, Clone, Args)] @@ -16,6 +17,7 @@ impl RpcArgs { pub fn into_config(self) -> fuel_core_block_aggregator_api::integration::Config { fuel_core_block_aggregator_api::integration::Config { addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), + sync_from: Some(BlockHeight::from(0)), } } } diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index e2d0299bd58..24a0384d292 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -48,6 +48,7 @@ use crate::{ use fuel_core_types::fuel_types::{ AssetId, + BlockHeight, ChainId, }; #[cfg(feature = "parallel-executor")] @@ -172,6 +173,7 @@ impl Config { #[cfg(feature = "rpc")] let rpc_config = fuel_core_block_aggregator_api::integration::Config { addr: free_local_addr(), + sync_from: Some(BlockHeight::from(0)), }; Self { diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index aee6880e0e8..a242a2b95a5 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -461,6 +461,7 @@ pub fn init_sub_services( #[cfg(feature = "rpc")] let block_aggregator_rpc = { let block_aggregator_config = config.rpc_config.clone(); + let sync_from = block_aggregator_config.sync_from.unwrap_or_default(); let db_adapter = if let Some(( aws_access_key_id, aws_secrete_access_key, @@ -478,6 +479,7 @@ pub fn init_sub_services( &aws_bucket, &url_base, aws_endpoint_url, + sync_from, ) } else { tracing::info!( diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 181c0963a53..3e43e89a662 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -53,12 +53,14 @@ impl BlockAggregator for Server { request: tonic::Request, ) -> Result, tonic::Status> { tracing::debug!("get_block_height: {:?}", request); + tracing::info!("get_block_height: {:?}", request); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetCurrentHeight { response }; self.query_sender.send(query).await.map_err(|e| { tonic::Status::internal(format!("Failed to send query: {}", e)) })?; let res = receiver.await; + tracing::info!("query result: {:?}", &res); match res { Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { height: height.map(|inner| *inner), diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 3d5b00f3e87..02faf6974cc 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -45,8 +45,10 @@ pub struct RemoteCache { // track consistency between runs local_persisted: S, + sync_from: BlockHeight, highest_new_height: Option, orphaned_new_height: Option, + synced: bool, } impl RemoteCache { @@ -58,6 +60,7 @@ impl RemoteCache { url_base: String, client: Client, local_persisted: S, + sync_from: BlockHeight, ) -> RemoteCache { RemoteCache { aws_id, @@ -67,8 +70,10 @@ impl RemoteCache { url_base, client, local_persisted, + sync_from, highest_new_height: None, orphaned_new_height: None, + synced: false, } } @@ -97,7 +102,6 @@ where let mut buf = Vec::new(); block.encode(&mut buf).map_err(Error::db_error)?; let body = ByteStream::from(buf); - tracing::info!("Storing block in bucket: {:?}", &self.aws_bucket); let req = self .client .put_object() @@ -109,18 +113,51 @@ where match block_event { BlockSourceEvent::NewBlock(new_height, _) => { tracing::debug!("New block: {:?}", new_height); + tracing::info!("New block: {:?}", new_height); self.highest_new_height = Some(new_height); - if self.orphaned_new_height.is_none() { + if self.synced { + tracing::info!("Updating latest block to {:?}", new_height); + let mut tx = self.local_persisted.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &new_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if new_height == self.sync_from { + tracing::info!("Updating latest block to {:?}", new_height); + self.synced = true; + self.highest_new_height = Some(new_height); + self.orphaned_new_height = None; + let mut tx = self.local_persisted.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &new_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if self.height_is_next_height(new_height)? { + tracing::info!("Updating latest block to {:?}", new_height); + self.synced = true; + self.highest_new_height = Some(new_height); + self.orphaned_new_height = None; + let mut tx = self.local_persisted.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &new_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if self.orphaned_new_height.is_none() { + tracing::info!("Marking block as orphaned: {:?}", new_height); self.orphaned_new_height = Some(new_height); } } BlockSourceEvent::OldBlock(height, _) => { tracing::debug!("Old block: {:?}", height); + tracing::info!("Old block: {:?}", height); let mut tx = self.local_persisted.write_transaction(); let latest_height = if height.succ() == self.orphaned_new_height { + tracing::info!("Marking block as synced: {:?}", height); self.orphaned_new_height = None; + self.synced = true; self.highest_new_height.unwrap_or(height) } else { + tracing::info!("Updating latest block to {:?}", height); height }; tx.storage_as_mut::() @@ -167,6 +204,27 @@ where } } +impl RemoteCache +where + S: Send + Sync, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, +{ + fn height_is_next_height(&self, height: BlockHeight) -> crate::result::Result { + let maybe_latest_height = self + .local_persisted + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + if let Some(latest_height) = maybe_latest_height { + Ok(latest_height.succ() == Some(height)) + } else { + Ok(false) + } + } +} + pub fn block_height_to_key(height: &BlockHeight) -> String { format!("{:08x}", height) } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index a06981ff853..16b52776de7 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -51,8 +51,9 @@ async fn store_block__happy_path() { let aws_bucket = "test-bucket".to_string(); let base = "http://good.com".to_string(); let storage = database(); + let sync_from = BlockHeight::new(0); let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, + aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, sync_from, ); let block_height = BlockHeight::new(123); let block = arb_proto_block(); @@ -75,14 +76,16 @@ async fn get_block_range__happy_path() { let aws_bucket = "test-bucket".to_string(); let base = "http://good.com".to_string(); let storage = database(); + let sync_from = BlockHeight::new(0); let adapter = RemoteCache::new( - aws_id.clone(), - aws_secret.clone(), + aws_id, + aws_secret, aws_region.clone(), aws_bucket.clone(), base.clone(), client, storage, + sync_from, ); let start = BlockHeight::new(999); let end = BlockHeight::new(1003); @@ -122,9 +125,11 @@ async fn get_current_height__returns_highest_continuous_block() { let aws_bucket = "test-bucket".to_string(); let base = "http://good.com".to_string(); let storage = database(); + let sync_from = BlockHeight::new(0); let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, + aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, sync_from, ); + let expected = BlockHeight::new(123); let block = arb_proto_block(); let block = BlockSourceEvent::OldBlock(expected, block); @@ -153,9 +158,11 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); let base = "http://good.com".to_string(); + let sync_from = BlockHeight::new(0); let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, + aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, sync_from, ); + let expected = BlockHeight::new(3); let block = arb_proto_block(); let block = BlockSourceEvent::NewBlock(expected, block); @@ -179,8 +186,10 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // given let db = database(); - let mut adapter = - RemoteCache::new(aws_id, aws_secret, aws_region, aws_bucket, base, client, db); + let sync_from = BlockHeight::new(0); + let mut adapter = RemoteCache::new( + aws_id, aws_secret, aws_region, aws_bucket, base, client, db, sync_from, + ); for height in 2..=10u32 { let height = BlockHeight::from(height); @@ -198,4 +207,73 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let expected = BlockHeight::from(10u32); let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); + + assert!(adapter.synced) +} + +#[tokio::test] +async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() { + let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); + let client = mock_client!(aws_sdk_s3, rules.iter()); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let base = "http://good.com".to_string(); + + // given + let db = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = RemoteCache::new( + aws_id, aws_secret, aws_region, aws_bucket, base, client, db, sync_from, + ); + + let height = BlockHeight::from(0u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // when + let height = BlockHeight::from(1u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(1u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced) +} + +#[tokio::test] +async fn store_block__new_block_comes_first() { + let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); + let client = mock_client!(aws_sdk_s3, rules.iter()); + let aws_id = "test-id".to_string(); + let aws_secret = "test-secret".to_string(); + let aws_region = "test-region".to_string(); + let aws_bucket = "test-bucket".to_string(); + let base = "http://good.com".to_string(); + + // given + let db = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = RemoteCache::new( + aws_id, aws_secret, aws_region, aws_bucket, base, client, db, sync_from, + ); + + // when + let height = BlockHeight::from(0u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(0u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced); } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index c010bb9bb46..00810dcbf49 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -47,6 +47,7 @@ mod tests; pub struct StorageDB { highest_new_height: Option, orphaned_new_height: Option, + synced: bool, storage: S, } @@ -55,6 +56,7 @@ impl StorageDB { Self { highest_new_height: None, orphaned_new_height: None, + synced: false, storage, } } @@ -84,12 +86,27 @@ where tx.storage_as_mut::() .insert(&height, &block) .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; match block_event { BlockSourceEvent::NewBlock(new_height, _) => { tracing::debug!("New block: {:?}", new_height); self.highest_new_height = Some(new_height); - if self.orphaned_new_height.is_none() { + if self.synced { + let mut tx = self.storage.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &new_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if self.height_is_next_height(new_height)? { + let mut tx = self.storage.write_transaction(); + self.synced = true; + self.highest_new_height = Some(new_height); + tx.storage_as_mut::() + .insert(&(), &new_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if self.orphaned_new_height.is_none() { self.orphaned_new_height = Some(new_height); } } @@ -97,16 +114,18 @@ where tracing::debug!("Old block: {:?}", height); let latest_height = if height.succ() == self.orphaned_new_height { self.orphaned_new_height = None; + self.synced = true; self.highest_new_height.unwrap_or(height) } else { height }; + let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() .insert(&(), &latest_height) .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } } - tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; Ok(()) } @@ -134,6 +153,29 @@ where } } +impl StorageDB +where + S: Modifiable + std::fmt::Debug, + S: KeyValueInspect, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, +{ + fn height_is_next_height(&self, height: BlockHeight) -> Result { + let maybe_latest_height = self + .storage + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + if let Some(latest_height) = maybe_latest_height { + Ok(latest_height.succ() == Some(height)) + } else { + Ok(false) + } + } +} pub struct StorageStream { inner: S, next: Option, diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 04ecf143a24..b841c27d7b3 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -158,3 +158,55 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } +#[tokio::test] +async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() { + // given + let db = database(); + let mut adapter = StorageDB::new(db); + + let height = BlockHeight::from(0u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // when + let height = BlockHeight::from(1u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(1u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced) +} + +#[tokio::test] +async fn store_block__new_block_comes_first() { + // given + let db = database(); + let mut adapter = StorageDB::new(db); + + // when + let height = BlockHeight::from(1u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + assert!(!adapter.synced); + // (with old block after) + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + let height = BlockHeight::from(0u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + + let expected = BlockHeight::from(1u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced); +} diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 4f1b2174fb9..9a4eed49d03 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -51,6 +51,7 @@ impl StorageOrRemoteDB { aws_bucket: &str, url_base: &str, aws_endpoint_url: Option, + sync_from: BlockHeight, ) -> Self { let client = { let mut builder = aws_sdk_s3::config::Builder::new(); @@ -79,6 +80,7 @@ impl StorageOrRemoteDB { url_base.to_string(), client, storage, + sync_from, ); StorageOrRemoteDB::Remote(remote_cache) } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 757c49383a5..12d02c66c19 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -20,8 +20,12 @@ pub mod result; pub mod block_range_response; +pub mod block_aggregator; pub mod protobuf_types; +#[cfg(test)] +mod tests; + pub mod integration { use crate::{ BlockAggregator, @@ -56,6 +60,7 @@ pub mod integration { #[derive(Clone, Debug)] pub struct Config { pub addr: SocketAddr, + pub sync_from: Option, } #[allow(clippy::type_complexity)] @@ -105,11 +110,6 @@ pub mod integration { } } -#[cfg(test)] -mod tests; - -pub mod block_aggregator; - // TODO: this doesn't need to limited to the blocks, // but we can change the name later /// The Block Aggregator service, which aggregates blocks from a source and stores them in a database diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 0b408b9aa6c..9641af2f77e 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -323,7 +323,7 @@ async fn clean_s3_bucket() { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_from_remote_s3_bucket() { require_env_var_or_skip!( "AWS_ACCESS_KEY_ID", @@ -331,9 +331,6 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { "AWS_REGION", "AWS_BUCKET" ); - // let _ = tracing_subscriber::fmt() - // .with_max_level(tracing::Level::INFO) - // .try_init(); ensure_bucket_exists().await; clean_s3_bucket().await; From eff8f0bdd07a7ceb31ad7478d1d44a4cb061b4e8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 11:02:32 -0700 Subject: [PATCH 111/146] fix sync-from height stuff --- crates/fuel-core/src/service/sub_services.rs | 28 +++++++++++++++++-- .../block_aggregator_api/src/db/storage_db.rs | 12 +++++++- .../src/db/storage_db/tests.rs | 26 ++++++----------- .../src/db/storage_or_remote_db.rs | 5 ++-- .../services/block_aggregator_api/src/lib.rs | 4 +-- 5 files changed, 51 insertions(+), 24 deletions(-) diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index a242a2b95a5..2eb47cf0063 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -60,12 +60,17 @@ use crate::{ }, }, }; +use anyhow::anyhow; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_or_remote_db::StorageOrRemoteDB, db::storage_or_remote_db::get_env_vars, }; +use fuel_core_block_aggregator_api::{ + db::table::LatestBlock, + result::Error, +}; use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, @@ -80,6 +85,7 @@ use fuel_core_gas_price_service::v1::{ use fuel_core_poa::Trigger; use fuel_core_storage::{ self, + StorageAsRef, transactional::AtomicView, }; #[cfg(feature = "relayer")] @@ -462,6 +468,7 @@ pub fn init_sub_services( let block_aggregator_rpc = { let block_aggregator_config = config.rpc_config.clone(); let sync_from = block_aggregator_config.sync_from.unwrap_or_default(); + let sync_from_height; let db_adapter = if let Some(( aws_access_key_id, aws_secrete_access_key, @@ -471,8 +478,17 @@ pub fn init_sub_services( aws_endpoint_url, )) = get_env_vars() { + let db = database.block_aggregation_s3().clone(); + let maybe_sync_from_height = db + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))? + .map(|c| *c) + .and_then(|h| h.succ()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from.clone()); + StorageOrRemoteDB::new_s3( - database.block_aggregation_s3().clone(), + db, &aws_access_key_id, &aws_secrete_access_key, &aws_region, @@ -491,7 +507,14 @@ pub fn init_sub_services( Using local storage" ); let db = database.block_aggregation_storage().clone(); - StorageOrRemoteDB::new_storage(db) + let maybe_sync_from_height = db + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))? + .map(|c| *c) + .and_then(|h| h.succ()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from.clone()); + StorageOrRemoteDB::new_storage(db, sync_from) }; let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); @@ -502,6 +525,7 @@ pub fn init_sub_services( serializer, onchain_db, importer, + sync_from_height, ) }; diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 00810dcbf49..4672e420625 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -48,15 +48,17 @@ pub struct StorageDB { highest_new_height: Option, orphaned_new_height: Option, synced: bool, + sync_from: BlockHeight, storage: S, } impl StorageDB { - pub fn new(storage: S) -> Self { + pub fn new(storage: S, sync_from: BlockHeight) -> Self { Self { highest_new_height: None, orphaned_new_height: None, synced: false, + sync_from, storage, } } @@ -98,6 +100,14 @@ where .insert(&(), &new_height) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if new_height == self.sync_from { + let mut tx = self.storage.write_transaction(); + self.synced = true; + self.highest_new_height = Some(new_height); + tx.storage_as_mut::() + .insert(&(), &new_height) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } else if self.height_is_next_height(new_height)? { let mut tx = self.storage.write_transaction(); self.synced = true; diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index b841c27d7b3..fe030080da0 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -35,7 +35,7 @@ fn proto_block_with_height(height: BlockHeight) -> ProtoBlock { async fn store_block__adds_to_storage() { // given let db = database(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); let expected = proto_block_with_height(height); let block = BlockSourceEvent::OldBlock(height, expected.clone()); @@ -79,7 +79,7 @@ async fn get_block__can_get_expected_range() { tx.commit().unwrap(); let db = db.commit().unwrap(); let tx = db.into_transaction(); - let adapter = StorageDB::new(tx); + let adapter = StorageDB::new(tx, BlockHeight::from(0u32)); // when let BlockRangeResponse::Literal(stream) = @@ -97,7 +97,7 @@ async fn get_block__can_get_expected_range() { async fn store_block__updates_the_highest_continuous_block_if_contiguous() { // given let db = database(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); let expected = proto_block_with_height(height); let block = BlockSourceEvent::OldBlock(height, expected.clone()); @@ -121,7 +121,7 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig .insert(&(), &starting_height) .unwrap(); tx.commit().unwrap(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); let height = BlockHeight::from(3u32); let proto = proto_block_with_height(height); let block = BlockSourceEvent::NewBlock(height, proto.clone()); @@ -139,7 +139,7 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // given let db = database(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); for height in 2..=10u32 { let height = BlockHeight::from(height); @@ -162,7 +162,7 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() { // given let db = database(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); let height = BlockHeight::from(0u32); let some_block = proto_block_with_height(height); @@ -187,24 +187,16 @@ async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() async fn store_block__new_block_comes_first() { // given let db = database(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); // when - let height = BlockHeight::from(1u32); - let some_block = proto_block_with_height(height); - let block = BlockSourceEvent::NewBlock(height, some_block.clone()); - adapter.store_block(block).await.unwrap(); - assert!(!adapter.synced); - // (with old block after) - tokio::time::sleep(std::time::Duration::from_millis(100)).await; let height = BlockHeight::from(0u32); let some_block = proto_block_with_height(height); - let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); adapter.store_block(block).await.unwrap(); // then - - let expected = BlockHeight::from(1u32); + let expected = BlockHeight::from(0u32); let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 9a4eed49d03..98f3530cc5e 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -39,10 +39,11 @@ pub enum StorageOrRemoteDB { } impl StorageOrRemoteDB { - pub fn new_storage(storage: S) -> Self { - StorageOrRemoteDB::Storage(StorageDB::new(storage)) + pub fn new_storage(storage: S, sync_from: BlockHeight) -> Self { + StorageOrRemoteDB::Storage(StorageDB::new(storage, sync_from)) } + #[allow(clippy::too_many_arguments)] pub fn new_s3( storage: R, aws_id: &str, diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 12d02c66c19..8df0f0010ed 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -70,6 +70,7 @@ pub mod integration { serializer: S, onchain_db: OnchainDB, importer: BoxStream, + sync_from_height: BlockHeight, ) -> ServiceRunner< BlockAggregator< ProtobufAPI, @@ -91,13 +92,12 @@ pub mod integration { { let addr = config.addr.to_string(); let api = ProtobufAPI::new(addr); - let db_starting_height = BlockHeight::from(0); let db_ending_height = None; let block_source = ImporterAndDbSource::new( importer, serializer, onchain_db, - db_starting_height, + sync_from_height, db_ending_height, ); let block_aggregator = BlockAggregator { From 397bc33d208a34594c466b2cffc5fd71d8107615 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 11:14:40 -0700 Subject: [PATCH 112/146] Lint and appease Clippy-sama --- crates/fuel-core/src/service/config.rs | 4 +++- crates/fuel-core/src/service/sub_services.rs | 11 ++++++----- .../src/api/protobuf_adapter/tests.rs | 2 +- .../block_aggregator_api/src/db/remote_cache.rs | 1 + 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 24a0384d292..2bc98848e1c 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -46,11 +46,13 @@ use crate::{ graphql_api::ServiceConfig as GraphQLConfig, }; +#[cfg(feature = "rpc")] +use fuel_core_types::fuel_types::BlockHeight; use fuel_core_types::fuel_types::{ AssetId, - BlockHeight, ChainId, }; + #[cfg(feature = "parallel-executor")] use std::num::NonZeroUsize; diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 2eb47cf0063..c36f7a07403 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -60,14 +60,13 @@ use crate::{ }, }, }; +#[cfg(feature = "rpc")] use anyhow::anyhow; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_or_remote_db::StorageOrRemoteDB, db::storage_or_remote_db::get_env_vars, -}; -use fuel_core_block_aggregator_api::{ db::table::LatestBlock, result::Error, }; @@ -83,11 +82,13 @@ use fuel_core_gas_price_service::v1::{ uninitialized_task::new_gas_price_service_v1, }; use fuel_core_poa::Trigger; +#[cfg(feature = "rpc")] +use fuel_core_storage::StorageAsRef; use fuel_core_storage::{ self, - StorageAsRef, transactional::AtomicView, }; + #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; use fuel_core_types::signer::SignMode; @@ -485,7 +486,7 @@ pub fn init_sub_services( .map_err(|e| Error::DB(anyhow!(e)))? .map(|c| *c) .and_then(|h| h.succ()); - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from.clone()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); StorageOrRemoteDB::new_s3( db, @@ -513,7 +514,7 @@ pub fn init_sub_services( .map_err(|e| Error::DB(anyhow!(e)))? .map(|c| *c) .and_then(|h| h.succ()); - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from.clone()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); StorageOrRemoteDB::new_storage(db, sync_from) }; let serializer = SerializerAdapter; diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index e39f0f84d46..380b74ef318 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -179,7 +179,7 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() let query = api.await_query().await.unwrap(); // then - let list: Vec<_> = vec!["1", "2"] + let list: Vec<_> = ["1", "2"] .iter() .map(|height| { let region = "test-region".to_string(); diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 02faf6974cc..411bdd085b0 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -52,6 +52,7 @@ pub struct RemoteCache { } impl RemoteCache { + #[allow(clippy::too_many_arguments)] pub fn new( aws_id: String, aws_secret: String, From c684dbcc70668814c100a4dfe74b7deed4d93e8a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 12:32:19 -0700 Subject: [PATCH 113/146] Add more env var checks to make sure tests fail in CI --- .github/workflows/ci.yml | 1 + tests/tests/rpc.rs | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 41093b34148..cb5f17f0249 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,6 +111,7 @@ jobs: AWS_ACCESS_KEY_ID: test AWS_SECRET_ACCESS_KEY: test AWS_REGION: us-east-1 + AWS_BUCKET: test-bucket AWS_ENDPOINT: http://127.0.0.1:4566 strategy: matrix: diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 9641af2f77e..33eafc705b6 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -271,6 +271,8 @@ fn env_vars_are_set() -> bool { && std::env::var("AWS_SECRET_ACCESS_KEY").is_ok() && std::env::var("AWS_REGION").is_ok() && std::env::var("AWS_BUCKET").is_ok() + && std::env::var("AWS_ENDPOINT_URL").is_ok() + && std::env::var("BUCKET_URL_BASE").is_ok() } fn aws_client() -> Client { @@ -329,7 +331,9 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", - "AWS_BUCKET" + "AWS_BUCKET", + "AWS_ENDPOINT_URL", + "BUCKET_URL_BASE" ); ensure_bucket_exists().await; clean_s3_bucket().await; From 361e7138b497ce234e7d1fa185831d2889fdd2f6 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 12:39:20 -0700 Subject: [PATCH 114/146] Add missing env var --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cb5f17f0249..5f5c8f9065f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -113,6 +113,7 @@ jobs: AWS_REGION: us-east-1 AWS_BUCKET: test-bucket AWS_ENDPOINT: http://127.0.0.1:4566 + BUCKET_URL_BASE: test-url.com strategy: matrix: include: From e8ff2a69aba9df2fb39a7691e3baaa4fe11f80c8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 12:52:52 -0700 Subject: [PATCH 115/146] Add new integ test --- .github/workflows/ci.yml | 54 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5f5c8f9065f..306d0660c1b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -206,6 +206,59 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} + rpc-s3-integration-tests: + name: RPC S3 Integration Tests (w/LocalStack) + needs: + - lint-toml-files + - prevent-openssl + - rustfmt + - check-changelog + runs-on: buildjet-4vcpu-ubuntu-2204 + timeout-minutes: 45 + services: + localstack: + image: localstack/localstack:3.6 + ports: + - 4566:4566 + env: + SERVICES: s3 + DEBUG: 1 + options: >- + --health-cmd="curl -f http://localhost:4566/health || exit 1" + --health-interval=5s + --health-timeout=5s + --health-retries=20 + + env: + AWS_ACCESS_KEY_ID: test + AWS_SECRET_ACCESS_KEY: test + AWS_REGION: us-east-1 + AWS_BUCKET: test-bucket + AWS_ENDPOINT: http://localhost:4566 + BUCKET_URL_BASE: test-url.com + RUSTFLAGS: -D warnings + + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_VERSION }} + - name: Wait for LocalStack + run: | + for i in {1..20}; do + if curl -s http://localhost:4566/health | grep "\"s3\": \"running\"" > /dev/null; then + echo "LocalStack ready!" + break + fi + echo "Waiting for LocalStack..." + sleep 3 + done + - name: Create S3 bucket + run: aws --endpoint-url=http://localhost:4566 s3 mb s3://test-bucket + - name: Run RPC Integration Tests + run: cargo test --package fuel-core-tests --test integration_tests rpc --features rpc -- --test-threads=1 + publish-crates-check: runs-on: buildjet-4vcpu-ubuntu-2204 steps: @@ -264,6 +317,7 @@ jobs: - cargo-verifications - publish-crates-check - cargo-test-kms + - rpc-integration-tests runs-on: ubuntu-latest steps: - run: echo "pass" From 5fd155525c38fc068670f5ae878978cb73a1d1ea Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 13:02:05 -0700 Subject: [PATCH 116/146] bump ci From 30705a6c3836b7d7b695c04eb8dce268a9a9cb49 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 13:06:53 -0700 Subject: [PATCH 117/146] Fix name --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 306d0660c1b..ff9423cbee8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -317,7 +317,7 @@ jobs: - cargo-verifications - publish-crates-check - cargo-test-kms - - rpc-integration-tests + - rpc-s3-integration-tests runs-on: ubuntu-latest steps: - run: echo "pass" From 29fb92539a9197398ad7349fbd71da037a4faf7a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 13:12:46 -0700 Subject: [PATCH 118/146] Remove health check --- .github/workflows/ci.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ff9423cbee8..5451737231a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -244,16 +244,6 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} - - name: Wait for LocalStack - run: | - for i in {1..20}; do - if curl -s http://localhost:4566/health | grep "\"s3\": \"running\"" > /dev/null; then - echo "LocalStack ready!" - break - fi - echo "Waiting for LocalStack..." - sleep 3 - done - name: Create S3 bucket run: aws --endpoint-url=http://localhost:4566 s3 mb s3://test-bucket - name: Run RPC Integration Tests From 765f808cb5b6ea989c1b1b23c447bf4b6bd3d96d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 13:33:52 -0700 Subject: [PATCH 119/146] Remove other health check --- .github/workflows/ci.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5451737231a..bb0940998a0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -217,17 +217,12 @@ jobs: timeout-minutes: 45 services: localstack: - image: localstack/localstack:3.6 + image: localstack/localstack:latest ports: - 4566:4566 env: SERVICES: s3 DEBUG: 1 - options: >- - --health-cmd="curl -f http://localhost:4566/health || exit 1" - --health-interval=5s - --health-timeout=5s - --health-retries=20 env: AWS_ACCESS_KEY_ID: test From a3341707b808ed7dd1529cfc636bdeb62f52072e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 13:44:02 -0700 Subject: [PATCH 120/146] Edit CI down --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bb0940998a0..608a4801d18 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -239,8 +239,8 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} - - name: Create S3 bucket - run: aws --endpoint-url=http://localhost:4566 s3 mb s3://test-bucket + - name: Install Protoc + uses: arduino/setup-protoc@v3 - name: Run RPC Integration Tests run: cargo test --package fuel-core-tests --test integration_tests rpc --features rpc -- --test-threads=1 From 1709f03fdc775ccff3f792ae62cfa587869204ac Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 13:59:09 -0700 Subject: [PATCH 121/146] Fix env vars --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 608a4801d18..23c621783d4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,12 +108,12 @@ jobs: runs-on: buildjet-4vcpu-ubuntu-2204 env: RUSTFLAGS: -D warnings - AWS_ACCESS_KEY_ID: test - AWS_SECRET_ACCESS_KEY: test - AWS_REGION: us-east-1 - AWS_BUCKET: test-bucket - AWS_ENDPOINT: http://127.0.0.1:4566 - BUCKET_URL_BASE: test-url.com + # AWS_ACCESS_KEY_ID: test + # AWS_SECRET_ACCESS_KEY: test + # AWS_REGION: us-east-1 + # AWS_BUCKET: test-bucket + # AWS_ENDPOINT: http://127.0.0.1:4566 + # BUCKET_URL_BASE: test-url.com strategy: matrix: include: @@ -229,7 +229,7 @@ jobs: AWS_SECRET_ACCESS_KEY: test AWS_REGION: us-east-1 AWS_BUCKET: test-bucket - AWS_ENDPOINT: http://localhost:4566 + AWS_ENDPOINT_URL: http://localhost:4566 BUCKET_URL_BASE: test-url.com RUSTFLAGS: -D warnings From 232989c740d99b39f0d5290ad4aad4e49b8b1bc4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 14:29:23 -0700 Subject: [PATCH 122/146] Add better error messages --- tests/tests/rpc.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 33eafc705b6..c1bd0e53ea8 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -107,6 +107,8 @@ async fn get_block_range__can_get_serialized_block_from_rpc__remote() { tracing::info!("Skipping test: AWS credentials are not set"); return; }; + ensure_bucket_exists().await; + clean_s3_bucket().await; let config = Config::local_node(); let rpc_url = config.rpc_config.addr; @@ -211,6 +213,10 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { + if get_env_vars().is_some() { + ensure_bucket_exists().await; + clean_s3_bucket().await; + } let config = Config::local_node(); let rpc_url = config.rpc_config.addr; @@ -255,6 +261,9 @@ async fn new_block_subscription__can_get_expect_block() { // then let expected_height = 1; assert_eq!(expected_height, actual_height); + if get_env_vars().is_some() { + clean_s3_bucket().await; + } } macro_rules! require_env_var_or_skip { @@ -298,20 +307,25 @@ fn aws_client() -> Client { aws_sdk_s3::Client::from_conf(config) } -async fn get_block_height_from_remote_s3_bucket() -> Bytes { +async fn get_block_from_s3_bucket() -> Bytes { let client = aws_client(); let bucket = std::env::var("AWS_BUCKET").unwrap(); let key = block_height_to_key(&BlockHeight::new(1)); let req = client.get_object().bucket(&bucket).key(&key); let obj = req.send().await.unwrap(); - obj.body.collect().await.unwrap().into_bytes() + let message = format!( + "should be able to get block from bucket: {} with key {}", + bucket, key + ); + obj.body.collect().await.expect(&message).into_bytes() } async fn ensure_bucket_exists() { let client = aws_client(); let bucket = std::env::var("AWS_BUCKET").unwrap(); let req = client.create_bucket().bucket(&bucket); - let _ = req.send().await.unwrap(); + let expect_message = format!("should be able to create bucket: {}", bucket); + let _ = req.send().await.expect(&expect_message); } async fn clean_s3_bucket() { @@ -352,7 +366,7 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { sleep(std::time::Duration::from_secs(1)).await; // then - let data = get_block_height_from_remote_s3_bucket().await; + let data = get_block_from_s3_bucket().await; // can deserialize let actual_proto: ProtoBlock = prost::Message::decode(data.as_ref()).unwrap(); let _ = fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); From d3c01020f916f6e2d6fbeb46c6f51b67141ed5d8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 14:46:55 -0700 Subject: [PATCH 123/146] Modify client to use path style --- .../services/block_aggregator_api/src/db/storage_or_remote_db.rs | 1 + tests/tests/rpc.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 98f3530cc5e..fe14512cfcf 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -61,6 +61,7 @@ impl StorageOrRemoteDB { } let config = builder + .force_path_style(true) .region(Region::new(Cow::Owned(aws_region.to_string()))) .credentials_provider(Credentials::new( aws_id, diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index c1bd0e53ea8..21fa3231fc0 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -294,6 +294,7 @@ fn aws_client() -> Client { } let config = builder + .force_path_style(true) .region(Region::new(Cow::Owned(aws_region.clone()))) .credentials_provider(Credentials::new( aws_access_key_id, From bbd7c9a130b68e338d6ef8673f14053fb47a5356 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 14:58:34 -0700 Subject: [PATCH 124/146] Add some traces to help understand what is being run --- tests/tests/rpc.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 21fa3231fc0..6a1adff9bc9 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -170,9 +170,6 @@ async fn get_block_height__can_get_value_from_rpc() { ensure_bucket_exists().await; clean_s3_bucket().await; } - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .try_init(); let config = Config::local_node(); let rpc_url = config.rpc_config.addr; @@ -342,6 +339,9 @@ async fn clean_s3_bucket() { #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_from_remote_s3_bucket() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .try_init(); require_env_var_or_skip!( "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", @@ -375,4 +375,7 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { // cleanup clean_s3_bucket().await; drop(srv); + tracing::info!( + "Successfully ran test: get_block_range__can_get_from_remote_s3_bucket" + ); } From dcc507803e8b34f52a3ba813fd1662c274be26bc Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 15:17:50 -0700 Subject: [PATCH 125/146] Cleanup --- .github/workflows/ci.yml | 6 - crates/fuel-core/src/service/sub_services.rs | 2 + .../block_aggregator_api/proto/api.proto | 216 ------------------ .../src/block_aggregator.rs | 1 + 4 files changed, 3 insertions(+), 222 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 23c621783d4..e2479d37a0a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -108,12 +108,6 @@ jobs: runs-on: buildjet-4vcpu-ubuntu-2204 env: RUSTFLAGS: -D warnings - # AWS_ACCESS_KEY_ID: test - # AWS_SECRET_ACCESS_KEY: test - # AWS_REGION: us-east-1 - # AWS_BUCKET: test-bucket - # AWS_ENDPOINT: http://127.0.0.1:4566 - # BUCKET_URL_BASE: test-url.com strategy: matrix: include: diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index c36f7a07403..4cb189f0bdb 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -505,6 +505,8 @@ pub fn init_sub_services( AWS_SECRET_ACCESS_KEY \n\ AWS_REGION \n\ AWS_BUCKET \n\ + AWS_ENDPOINT_URL \n\ + AWS_S3_URL_BASE (Optional)\n\ Using local storage" ); let db = database.block_aggregation_storage().clone(); diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 89ac3776b72..d1ef647f3ed 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -54,20 +54,6 @@ message V1Header { optional bytes block_id = 13; } -// pub struct GeneratedApplicationFieldsV2 { -// /// Number of transactions in this block. -// pub transactions_count: u16, -// /// Number of message receipts in this block. -// pub message_receipt_count: u32, -// /// Merkle root of transactions. -// pub transactions_root: Bytes32, -// /// Merkle root of message receipts in this block. -// pub message_outbox_root: Bytes32, -// /// Root hash of all imported events from L1 -// pub event_inbox_root: Bytes32, -// /// TxID commitment -// pub tx_id_commitment: Bytes32, -//} message V2Header { uint64 da_height = 1; uint32 consensus_parameters_version = 2; @@ -96,34 +82,6 @@ message Transaction { } } -// pub struct ChargeableTransaction -//where -// Body: BodyConstraints, -//{ -// pub(crate) body: Body, -// pub(crate) policies: Policies, -// pub(crate) inputs: Vec, -// pub(crate) outputs: Vec, -// pub(crate) witnesses: Vec, -// #[serde(skip)] -// #[cfg_attr(feature = "da-compression", compress(skip))] -// #[educe(PartialEq(ignore))] -// #[educe(Hash(ignore))] -// #[canonical(skip)] -// pub(crate) metadata: Option>, -//} -// pub struct ScriptBody { -// pub(crate) script_gas_limit: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub(crate) receipts_root: Bytes32, -// pub(crate) script: ScriptCode, -// #[educe(Debug(method(fmt_truncated_hex::<16>)))] -// pub(crate) script_data: Vec, -//} -// #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] -//pub struct ScriptMetadata { -// pub script_data_offset: usize, -//} message ScriptTransaction { uint64 script_gas_limit = 1; bytes receipts_root = 2; @@ -189,26 +147,11 @@ message BlobTransaction { BlobMetadata metadata = 7; } -// pub struct Policies { -// /// A bitmask that indicates what policies are set. -// bits: PoliciesBits, -// /// The array of policy values. -// values: [Word; POLICIES_NUMBER], -//} message Policies { uint32 bits = 1; repeated uint64 values = 2; } -// pub enum Input { -// CoinSigned(CoinSigned), -// CoinPredicate(CoinPredicate), -// Contract(Contract), -// MessageCoinSigned(MessageCoinSigned), -// MessageCoinPredicate(MessageCoinPredicate), -// MessageDataSigned(MessageDataSigned), -// MessageDataPredicate(MessageDataPredicate), -//} message Input { oneof variant { CoinSignedInput coin_signed = 1; @@ -221,37 +164,6 @@ message Input { } } -// pub struct Coin -//where -// Specification: CoinSpecification, -//{ -// pub utxo_id: UtxoId, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub owner: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub amount: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub asset_id: AssetId, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub tx_pointer: TxPointer, -// #[educe(Debug(method(fmt_as_field)))] -// pub witness_index: Specification::Witness, -// /// Exact amount of gas used by the predicate. -// /// If the predicate consumes different amount of gas, -// /// it's considered to be false. -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_gas_used: Specification::PredicateGasUsed, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate: Specification::Predicate, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_data: Specification::PredicateData, -//} -// impl CoinSpecification for Signed { -// type Predicate = Empty; -// type PredicateData = Empty>; -// type PredicateGasUsed = Empty; -// type Witness = u16; -//} message CoinSignedInput { UtxoId utxo_id = 1; bytes owner = 2; @@ -264,12 +176,6 @@ message CoinSignedInput { bytes predicate_data = 9; } -//impl CoinSpecification for Predicate { -// type Predicate = PredicateCode; -// type PredicateData = Vec; -// type PredicateGasUsed = Word; -// type Witness = Empty; -//} message CoinPredicateInput { UtxoId utxo_id = 1; bytes owner = 2; @@ -282,18 +188,6 @@ message CoinPredicateInput { bytes predicate_data = 9; } -// pub struct Contract { -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub utxo_id: UtxoId, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub balance_root: Bytes32, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub state_root: Bytes32, -// /// Pointer to transaction that last modified the contract state. -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub tx_pointer: TxPointer, -// pub contract_id: ContractId, -//} message ContractInput { UtxoId utxo_id = 1; bytes balance_root = 2; @@ -302,44 +196,6 @@ message ContractInput { bytes contract_id = 5; } -// pub struct Message -//where -// Specification: MessageSpecification, -//{ -// /// The sender from the L1 chain. -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub sender: Address, -// /// The receiver on the `Fuel` chain. -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub recipient: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub amount: Word, -// // Unique identifier of the message -// pub nonce: Nonce, -// #[educe(Debug(method(fmt_as_field)))] -// pub witness_index: Specification::Witness, -// /// Exact amount of gas used by the predicate. -// /// If the predicate consumes different amount of gas, -// /// it's considered to be false. -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_gas_used: Specification::PredicateGasUsed, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// #[educe(Debug(method(fmt_as_field)))] -// pub data: Specification::Data, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate: Specification::Predicate, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_data: Specification::PredicateData, -//} -// pub struct MessageCoin(core::marker::PhantomData); -// -// impl MessageSpecification for MessageCoin { -// type Data = Empty>; -// type Predicate = Empty; -// type PredicateData = Empty>; -// type PredicateGasUsed = Empty; -// type Witness = u16; -// } message MessageCoinSignedInput { bytes sender = 1; bytes recipient = 2; @@ -352,13 +208,6 @@ message MessageCoinSignedInput { bytes predicate_data = 9; } -// impl MessageSpecification for MessageCoin { -// type Data = Empty>; -// type Predicate = PredicateCode; -// type PredicateData = Vec; -// type PredicateGasUsed = Word; -// type Witness = Empty; -// } message MessageCoinPredicateInput { bytes sender = 1; bytes recipient = 2; @@ -371,7 +220,6 @@ message MessageCoinPredicateInput { bytes predicate_data = 9; } -// pub type MessageDataSigned = Message>; message MessageDataSignedInput { bytes sender = 1; bytes recipient = 2; @@ -384,8 +232,6 @@ message MessageDataSignedInput { bytes predicate_data = 9; } -// pub type MessageDataPredicate = -// Message>; message MessageDataPredicateInput { bytes sender = 1; bytes recipient = 2; @@ -398,36 +244,6 @@ message MessageDataPredicateInput { bytes predicate_data = 9; } -// pub enum Output { -// Coin { -// to: Address, -// amount: Word, -// asset_id: AssetId, -// }, -// -// Contract(Contract), -// -// Change { -// to: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// amount: Word, -// asset_id: AssetId, -// }, -// -// Variable { -// #[cfg_attr(feature = "da-compression", compress(skip))] -// to: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// amount: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// asset_id: AssetId, -// }, -// -// ContractCreated { -// contract_id: ContractId, -// state_root: Bytes32, -// }, -//} message Output { oneof variant { CoinOutput coin = 1; @@ -462,12 +278,6 @@ message ContractCreatedOutput { bytes state_root = 2; } -// pub struct UtxoId { -// /// transaction id -// tx_id: TxId, -// /// output index -// output_index: u16, -//} message UtxoId { bytes tx_id = 1; uint32 output_index = 2; @@ -483,32 +293,6 @@ message StorageSlot { bytes value = 2; } - -// #[derive(Debug, Clone, PartialEq, Eq, Hash)] -//pub struct ChargeableMetadata { -// pub common: CommonMetadata, -// pub body: Body, -//} -// pub struct ScriptBody { -// pub(crate) script_gas_limit: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub(crate) receipts_root: Bytes32, -// pub(crate) script: ScriptCode, -// #[educe(Debug(method(fmt_truncated_hex::<16>)))] -// pub(crate) script_data: Vec, -//} -// #[derive(Debug, Clone, PartialEq, Eq, Hash)] -//pub struct CommonMetadata { -// pub id: Bytes32, -// pub inputs_offset: usize, -// pub inputs_offset_at: Vec, -// pub inputs_predicate_offset_at: Vec>, -// pub outputs_offset: usize, -// pub outputs_offset_at: Vec, -// pub witnesses_offset: usize, -// pub witnesses_offset_at: Vec, -//} - message ScriptMetadata { bytes id = 1; uint32 inputs_offset = 2; diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 128b715fb40..42ff7ecd16c 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -136,6 +136,7 @@ where } BlockSourceEvent::OldBlock(_id, _block) => { // Do nothing + // Only stream new blocks } }; let res = self.database.store_block(event).await; From 2e210663489d50840172c0ffd67a1598c73a4227 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 15:19:37 -0700 Subject: [PATCH 126/146] remove /blocks from url path, we do not know if that will be the case --- crates/services/block_aggregator_api/src/db/remote_cache.rs | 2 +- tests/tests/rpc.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 411bdd085b0..884deff70d3 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -79,7 +79,7 @@ impl RemoteCache { } fn url_for_block(base: &str, key: &str) -> String { - format!("{}/blocks/{}", base, key,) + format!("{}/{}", base, key,) } } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 6a1adff9bc9..df4263bf919 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -158,7 +158,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc__remote() { region: aws_region.clone(), bucket: aws_bucket.clone(), key: key.clone(), - url: format!("{}/blocks/{}", url_base, key), + url: format!("{}/{}", url_base, key), }; assert_eq!(expected, remote_info); clean_s3_bucket().await; From f56d01a736744cd449c93b11a92bafcc5c37ccfa Mon Sep 17 00:00:00 2001 From: Mitchell Turner Date: Tue, 25 Nov 2025 08:15:40 -0700 Subject: [PATCH 127/146] Block RPC PR Followup (#3145) ## Linked Issues/PRs Complete feedback from: https://github.com/FuelLabs/fuel-core/pull/3100 https://github.com/FuelLabs/fuel-core/pull/3101 https://github.com/FuelLabs/fuel-core/pull/3112 https://github.com/FuelLabs/fuel-core/pull/3116 As they have all been merged into https://github.com/FuelLabs/fuel-core/pull/3100 ### TODOs #### Completed - [x] Move proto types to new repo (or at least remove dep on protoc) https://github.com/FuelLabs/fuel-core-protobuf/pull/1 - [x] Use ipv4 instead of ipv4 `let listener = TcpListener::bind("[::1]:0").unwrap();` - [x] make sure rpc is optional (may already be good with feature) - [x] "If we add a new database, we should include it in all places where we already interact with databases, like check_version, rollback_to, and so on(check where do we use databa liek relayer)" - [x] "The CombinedDatabase::check_version() method is missing a version check for the new block_aggregation database. This omission is inconsistent with other database checks and could lead to version compatibility issues." - [x] "I would like to mention the reason why this database doesn't force a monotonic increase in height." - [x] "Also, becuase we don't force monotonic height increase, we can't actually re-use rollback feature from historicat database. But we still need the ability to reset the state of the block aggregator to height X. So you can add another function, which we can call in rollback_to." - [x] "I know that ServiceRunner here is an overkill, but we have GraphQL that looks the same, and it was fiiiiine=D Maybe for consistency plus some logging we could reuse it here as well. It's just an internal service for your main service" - [x] `// TODO: Should this be owned to begin with?` Yeah, we should work with reference in proto_header_from_header and in proto_tx_from_tx - [x] maybe "If you used BoxStream, then you could avoid usage of the tokio::spawn( below. You can just do inner.map({...}).into_boxed()" in protobuf_adapater impl of protobuf trait: `type GetBlockRangeStream = BoxStream>;` - [x] `#[allow(unused)] fn arb_inputs()` - [x] in `serializer_adapter.rs`: "We should work with reference to everywhere in this file. And we should also avoid usage of unswap_or_default. All fields are always set, so it is strange why it can be None` - [x] `if let Some(value) = policies.get(PolicyType::Owner) { values[5] = value; }` - [x] in `serializer_adapter.rs`: "Why values for policies in an empty array? If the policy is not set, we shouldn't include them. If no policy is set, then it will be empty vector." - [x] in `crates/types/src/blockchain/header.rs`: "We don't need to expose the application header. You can find an example on how to create a header here: https://github.com/FuelLabs/fuel-rust-indexer/blob/main/crates/receipts_manager/adapters/graphql_event_adapter.rs#L250", "And the same for the block, you don't need manually calculate it, you can do that from the Block::new." - [x] in `types/src/test_helpers.rs`: "Block::new should be enough for you to create a header and a block. You don't need to implement all the logic by yourself.You can clean up getters and setters which you've added, after you udpated code here=) " - [x] Maybe remove `self.go_to_sleep_before_continuing().await;` #### Need Feedback - [ ] in `serializer_adapter.rs`: "If we use Rust definition to define variants inside of enums, then we can remove useless fields like data for MessageCoinPredicate and witness_index for MessageCoinPredicate" (@xgreenx I don't understand this. What "Rust definition"?) #### Noop - [ ] ~in `serializer_adapter.rs`: "I think instead of creating many procedure fucntions, we could implement TryFrom and From for types, plus, we could split them into its own folders by transactions and some common folder."~ **(Since this is in a separate repo now, the TryFrom/From impls wouldn't be valid due to the orphan rule)** - [ ] ~in `api.proto`: "I think all txs have these, maybe we could move them to the top-level Transaction?" DRY up chargeable txs?~**( I don't really care for this abstraction, but I could be convinced to DRY this up. Just doesn't seem like much gain)** - [ ] ~I think we can move fetching of the full blocks to be a part of default functionality provided by FuelClient along with old functionality (in `test-helpers/src/client_ext.rs`)~ **(I don't want to add new functionality in this work, we could do a followup)** - [ ] ~"[nit] https://github.com/FuelLabs/fuel-rust-indexer/blob/main/crates/receipts_manager/service.rs#L584 We can replace the whole service with stream of joined events(old + new)"~ **(WE could do this as a followup)** ## Description ## Checklist - [ ] Breaking changes are clearly marked as such in the PR description and changelog - [ ] New behavior is reflected in tests - [ ] [The specification](https://github.com/FuelLabs/fuel-specs/) matches the implemented behavior (link update PR if changes are needed) ### Before requesting review - [ ] I have reviewed the code myself - [ ] I have created follow-up issues caused by this PR and linked them here ### After merging, notify other teams [Add or remove entries as needed] - [ ] [Rust SDK](https://github.com/FuelLabs/fuels-rs/) - [ ] [Sway compiler](https://github.com/FuelLabs/sway/) - [ ] [Platform documentation](https://github.com/FuelLabs/devrel-requests/issues/new?assignees=&labels=new+request&projects=&template=NEW-REQUEST.yml&title=%5BRequest%5D%3A+) (for out-of-organization contributors, the person merging the PR will do this) - [ ] Someone else? --- .../actions/slack-notify-template/action.yml | 25 + .github/workflows/ci.yml | 15 +- .github/workflows/docker-images.yml | 4 +- Cargo.lock | 148 +- Cargo.toml | 7 +- benches/Cargo.toml | 1 + benches/benches/block_target_gas.rs | 2 + bin/fuel-core/src/cli/rollback.rs | 2 +- bin/fuel-core/src/cli/run.rs | 4 + bin/fuel-core/src/cli/run/rpc.rs | 41 +- bin/fuel-core/src/lib.rs | 1 + crates/fuel-core/Cargo.toml | 3 +- crates/fuel-core/src/combined_database.rs | 130 +- crates/fuel-core/src/database.rs | 46 +- .../src/database/database_description.rs | 1 + crates/fuel-core/src/lib.rs | 2 + crates/fuel-core/src/query/message.rs | 2 +- crates/fuel-core/src/schema/tx/assemble_tx.rs | 16 +- crates/fuel-core/src/service.rs | 4 +- crates/fuel-core/src/service/adapters.rs | 3 + crates/fuel-core/src/service/adapters/rpc.rs | 53 + .../src/service/adapters/rpc/tests.rs | 46 + crates/fuel-core/src/service/config.rs | 3 + crates/fuel-core/src/service/sub_services.rs | 98 +- .../services/block_aggregator_api/Cargo.toml | 8 +- crates/services/block_aggregator_api/build.rs | 7 - .../block_aggregator_api/proto/api.proto | 412 ----- .../services/block_aggregator_api/src/api.rs | 5 +- .../src/api/protobuf_adapter.rs | 175 +- .../src/api/protobuf_adapter/tests.rs | 86 +- .../src/block_aggregator.rs | 4 +- .../src/block_range_response.rs | 13 +- .../src/blocks/importer_and_db_source.rs | 63 +- .../importer_service.rs | 32 +- .../serializer_adapter.rs | 1489 +---------------- .../fuel_to_proto_conversions.rs | 855 ++++++++++ .../proto_to_fuel_conversions.rs | 1169 +++++++++++++ .../importer_and_db_source/sync_service.rs | 129 +- .../blocks/importer_and_db_source/tests.rs | 188 +-- .../src/db/remote_cache.rs | 94 +- .../src/db/remote_cache/tests.rs | 82 +- .../block_aggregator_api/src/db/storage_db.rs | 4 +- .../src/db/storage_db/tests.rs | 6 +- .../src/db/storage_or_remote_db.rs | 57 +- .../services/block_aggregator_api/src/lib.rs | 45 +- .../src/protobuf_types.rs | 2 +- .../block_aggregator_api/src/result.rs | 10 + .../block_aggregator_api/src/tests.rs | 2 +- .../consensus_module/poa/src/service.rs | 4 +- crates/services/executor/src/executor.rs | 26 +- crates/services/p2p/src/peer_manager.rs | 2 +- .../services/producer/src/block_producer.rs | 4 +- .../services/shared-sequencer/src/service.rs | 2 +- .../src/selection_algorithms/ratio_tip_gas.rs | 4 +- crates/services/txpool_v2/src/service.rs | 2 +- .../services/txpool_v2/src/storage/graph.rs | 14 +- crates/types/src/blockchain/header.rs | 11 - crates/types/src/test_helpers.rs | 256 ++- tests/Cargo.toml | 2 + tests/tests/rpc.rs | 270 +-- tests/tests/trigger_integration/interval.rs | 2 +- 61 files changed, 3410 insertions(+), 2783 deletions(-) create mode 100644 .github/actions/slack-notify-template/action.yml create mode 100644 crates/fuel-core/src/service/adapters/rpc.rs create mode 100644 crates/fuel-core/src/service/adapters/rpc/tests.rs delete mode 100644 crates/services/block_aggregator_api/build.rs delete mode 100644 crates/services/block_aggregator_api/proto/api.proto create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs diff --git a/.github/actions/slack-notify-template/action.yml b/.github/actions/slack-notify-template/action.yml new file mode 100644 index 00000000000..4988191d3f6 --- /dev/null +++ b/.github/actions/slack-notify-template/action.yml @@ -0,0 +1,25 @@ +name: Notify Slack on Failure +description: Sends notification to Slack if job fails + +inputs: + github_token: + description: Github Token Secret + required: true + slack_webhook: + description: Slack webhook URL + required: true + +runs: + using: composite + steps: + - name: Notify if Job Fails + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + token: ${{ inputs.github_token }} + notification_title: '{workflow} has {status_message}' + message_format: '{emoji} *{workflow}* {status_message} in <{repo_url}|{repo}> : <{run_url}|View Run Results>' + footer: '' + notify_when: failure + env: + SLACK_WEBHOOK_URL: ${{ inputs.slack_webhook }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e2479d37a0a..9a2f4d399a3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -80,7 +80,7 @@ jobs: run: cargo install cargo-sort - name: Run Cargo.toml sort check run: cargo sort -w --check - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && github.ref == 'refs/heads/master' with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -93,7 +93,7 @@ jobs: # ensure openssl hasn't crept into the dep tree - name: Check if openssl is included run: ./.github/workflows/scripts/verify_openssl.sh - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && github.ref == 'refs/heads/master' with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -174,8 +174,6 @@ jobs: uses: davidB/rust-cargo-make@v1 with: version: "0.36.4" - - name: Install Protoc - uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: @@ -194,7 +192,7 @@ jobs: continue-on-error: true - name: ${{ matrix.command }} ${{ matrix.args }} run: ${{ matrix.env }} cargo ${{ matrix.command }} ${{ matrix.args }} - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && github.ref == 'refs/heads/master' with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -222,9 +220,6 @@ jobs: AWS_ACCESS_KEY_ID: test AWS_SECRET_ACCESS_KEY: test AWS_REGION: us-east-1 - AWS_BUCKET: test-bucket - AWS_ENDPOINT_URL: http://localhost:4566 - BUCKET_URL_BASE: test-url.com RUSTFLAGS: -D warnings steps: @@ -341,7 +336,7 @@ jobs: publish-delay: 60000 registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -487,7 +482,7 @@ jobs: asset_name: ${{ env.ZIP_FILE_NAME }} asset_content_type: application/gzip - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') && matrix.job.os != 'macos-latest' with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docker-images.yml b/.github/workflows/docker-images.yml index 78f58d37ffc..eb2e6322614 100644 --- a/.github/workflows/docker-images.yml +++ b/.github/workflows/docker-images.yml @@ -290,7 +290,7 @@ jobs: cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest,mode=max,image-manifest=true,oci-mediatypes=true - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -390,7 +390,7 @@ jobs: cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest,mode=max,image-manifest=true,oci-mediatypes=true - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 46fd64ad09a..d63ea62f43d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1290,9 +1290,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-config" -version = "1.8.8" +version = "1.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37cf2b6af2a95a20e266782b4f76f1a5e12bf412a9db2de9c1e9123b9d8c0ad8" +checksum = "1856b1b48b65f71a4dd940b1c0931f9a7b646d4a924b9828ffefc1454714668a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1436,9 +1436,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.86.0" +version = "1.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0abbfab841446cce6e87af853a3ba2cc1bc9afcd3f3550dd556c43d434c86d" +checksum = "d05b276777560aa9a196dbba2e3aada4d8006d3d7eeb3ba7fe0c317227d933c4" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1458,9 +1458,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.88.0" +version = "1.90.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a68d675582afea0e94d38b6ca9c5aaae4ca14f1d36faa6edb19b42e687e70d7" +checksum = "f9be14d6d9cd761fac3fd234a0f47f7ed6c0df62d83c0eeb7012750e4732879b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1480,9 +1480,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.88.0" +version = "1.90.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30990923f4f675523c51eb1c0dec9b752fb267b36a61e83cbc219c9d86da715" +checksum = "98a862d704c817d865c8740b62d8bbeb5adcb30965e93b471df8a5bcefa20a80" dependencies = [ "aws-credential-types", "aws-runtime", @@ -3983,6 +3983,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "flex-error" version = "0.4.4" @@ -4251,10 +4261,13 @@ version = "0.47.1" dependencies = [ "anyhow", "async-trait", + "aws-config", "aws-sdk-s3", "aws-smithy-mocks", "bytes", "enum-iterator", + "flate2", + "fuel-core-protobuf", "fuel-core-services", "fuel-core-storage", "fuel-core-types 0.47.1", @@ -4272,8 +4285,6 @@ dependencies = [ "tokio", "tokio-stream", "tonic 0.14.2", - "tonic-prost", - "tonic-prost-build", "tracing", "tracing-subscriber", ] @@ -4622,6 +4633,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "fuel-core-protobuf" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a616726038fbe445bd3294d2700afa8487e38fbc6abc86a8af12be4b596db598" +dependencies = [ + "prost 0.14.1", + "serde", + "tonic 0.14.2", + "tonic-prost", +] + [[package]] name = "fuel-core-provider" version = "0.47.1" @@ -4780,6 +4803,7 @@ dependencies = [ "aws-sdk-s3", "clap", "cynic", + "flate2", "fuel-core", "fuel-core-benches", "fuel-core-bin", @@ -5072,7 +5096,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbdd607c9c70921cc016becde659e5062ae460b7bb3f525a1dd65f8209c0083" dependencies = [ "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "regex", "tonic 0.11.0", ] @@ -7245,6 +7269,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] @@ -7379,12 +7404,6 @@ dependencies = [ "unsigned-varint 0.8.0", ] -[[package]] -name = "multimap" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" - [[package]] name = "multistream-select" version = "0.13.0" @@ -8406,28 +8425,6 @@ dependencies = [ "prost-derive 0.14.1", ] -[[package]] -name = "prost-build" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" -dependencies = [ - "heck 0.5.0", - "itertools 0.14.0", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost 0.14.1", - "prost-types 0.14.1", - "pulldown-cmark", - "pulldown-cmark-to-cmark", - "regex", - "syn 2.0.107", - "tempfile", -] - [[package]] name = "prost-derive" version = "0.11.9" @@ -8489,15 +8486,6 @@ dependencies = [ "prost 0.12.6", ] -[[package]] -name = "prost-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" -dependencies = [ - "prost 0.14.1", -] - [[package]] name = "psl-types" version = "2.0.11" @@ -8514,26 +8502,6 @@ dependencies = [ "psl-types", ] -[[package]] -name = "pulldown-cmark" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" -dependencies = [ - "bitflags 2.9.4", - "memchr", - "unicase", -] - -[[package]] -name = "pulldown-cmark-to-cmark" -version = "21.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" -dependencies = [ - "pulldown-cmark", -] - [[package]] name = "pulley-interpreter" version = "38.0.4" @@ -9888,6 +9856,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "similar" version = "2.7.0" @@ -10372,7 +10346,7 @@ dependencies = [ "num-traits", "once_cell", "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "serde", "serde_bytes", "serde_json", @@ -10439,7 +10413,7 @@ dependencies = [ "bytes", "flex-error", "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "serde", "serde_bytes", "subtle-encoding", @@ -11030,18 +11004,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tonic-build" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" -dependencies = [ - "prettyplease", - "proc-macro2", - "quote", - "syn 2.0.107", -] - [[package]] name = "tonic-prost" version = "0.14.2" @@ -11053,22 +11015,6 @@ dependencies = [ "tonic 0.14.2", ] -[[package]] -name = "tonic-prost-build" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "prost-types 0.14.1", - "quote", - "syn 2.0.107", - "tempfile", - "tonic-build", -] - [[package]] name = "tower" version = "0.4.13" @@ -11322,12 +11268,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicase" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" - [[package]] name = "unicode-ident" version = "1.0.19" diff --git a/Cargo.toml b/Cargo.toml index 96bd7d0ef0c..09ff8ca2d26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,6 +75,9 @@ async-graphql = { version = "=7.0.15", features = [ ], default-features = false } async-graphql-value = { version = "=7.0.15" } async-trait = "0.1" + +# Fuel dependencies +aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } aws-sdk-kms = "1.37" axum = "0.5" bytes = "1.5.0" @@ -91,6 +94,7 @@ educe = { version = "0.6", default-features = false, features = [ ] } enum-iterator = "1.2" enum_dispatch = "0.3.13" +flate2 = "1.1.5" fuel-core = { version = "0.47.1", path = "./crates/fuel-core", default-features = false } fuel-core-bin = { version = "0.47.1", path = "./bin/fuel-core" } # Workspace members @@ -110,6 +114,7 @@ fuel-core-p2p = { version = "0.47.1", path = "./crates/services/p2p" } fuel-core-parallel-executor = { version = "0.47.1", path = "./crates/services/parallel-executor" } fuel-core-poa = { version = "0.47.1", path = "./crates/services/consensus_module/poa" } fuel-core-producer = { version = "0.47.1", path = "./crates/services/producer" } +fuel-core-protobuf = { version = "0.4.0" } fuel-core-provider = { version = "0.47.1", path = "./crates/provider" } fuel-core-relayer = { version = "0.47.1", path = "./crates/services/relayer" } fuel-core-services = { version = "0.47.1", path = "./crates/services" } @@ -124,8 +129,6 @@ fuel-core-types = { version = "0.47.1", path = "./crates/types", default-feature fuel-core-upgradable-executor = { version = "0.47.1", path = "./crates/services/upgradable-executor" } fuel-core-wasm-executor = { version = "0.47.1", path = "./crates/services/upgradable-executor/wasm-executor", default-features = false } fuel-gas-price-algorithm = { version = "0.47.1", path = "crates/fuel-gas-price-algorithm" } - -# Fuel dependencies fuel-vm-private = { version = "0.65.0", package = "fuel-vm", default-features = false } futures = "0.3" hex = { version = "0.4", features = ["serde"] } diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 4baceaa0968..3d635e3fd6c 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -16,6 +16,7 @@ fault-proving = [ "fuel-core-database/fault-proving", "fuel-core-sync/fault-proving", ] +rpc = ["fuel-core/rpc"] [dependencies] anyhow = { workspace = true } diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index 792783333b4..80ad7697a83 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -401,7 +401,9 @@ fn service_with_many_contracts( Default::default(), Default::default(), Default::default(), + #[cfg(feature = "rpc")] Default::default(), + #[cfg(feature = "rpc")] Default::default(), ), config.clone(), diff --git a/bin/fuel-core/src/cli/rollback.rs b/bin/fuel-core/src/cli/rollback.rs index cdc99f092fd..afeaf945718 100644 --- a/bin/fuel-core/src/cli/rollback.rs +++ b/bin/fuel-core/src/cli/rollback.rs @@ -59,7 +59,7 @@ pub async fn exec(command: Command) -> anyhow::Result<()> { use crate::cli::ShutdownListener; let path = command.database_path.as_path(); - let db = CombinedDatabase::open( + let mut db = CombinedDatabase::open( path, StateRewindPolicy::RewindFullRange, DatabaseConfig { diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index dbd8eb5d9cf..c229ed0cb2f 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -107,6 +107,7 @@ use std::num::NonZeroUsize; #[cfg(feature = "p2p")] mod p2p; +#[cfg(feature = "rpc")] mod rpc; #[cfg(feature = "shared-sequencer")] @@ -293,6 +294,7 @@ pub struct Command { pub p2p_args: p2p::P2PArgs, #[clap(flatten)] + #[cfg(feature = "rpc")] pub rpc_args: rpc::RpcArgs, #[cfg_attr(feature = "p2p", clap(flatten))] @@ -374,6 +376,7 @@ impl Command { relayer_args, #[cfg(feature = "p2p")] p2p_args, + #[cfg(feature = "rpc")] rpc_args, #[cfg(feature = "p2p")] sync_args, @@ -457,6 +460,7 @@ impl Command { .echo_delegation_interval, }; + #[cfg(feature = "rpc")] let rpc_config = rpc_args.into_config(); let trigger: Trigger = poa_trigger.into(); diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index 355353b45a2..a367443112f 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -1,4 +1,7 @@ -use clap::Args; +use clap::{ + Args, + Subcommand, +}; use fuel_core_types::fuel_types::BlockHeight; use std::net; @@ -11,6 +14,22 @@ pub struct RpcArgs { /// The port to bind the RPC service to #[clap(long = "rpc_port", default_value = "4001", env)] pub rpc_port: u16, + + #[command(subcommand)] + pub storage_method: Option, +} + +#[derive(Debug, Clone, Subcommand)] +pub enum StorageMethod { + Local, + S3 { + #[clap(long = "bucket", env)] + bucket: String, + #[clap(long = "endpoint_url", env)] + endpoint_url: Option, + #[clap(long = "requester_pays", env, default_value = "false")] + requester_pays: bool, + }, } impl RpcArgs { @@ -18,6 +37,26 @@ impl RpcArgs { fuel_core_block_aggregator_api::integration::Config { addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), sync_from: Some(BlockHeight::from(0)), + storage_method: self.storage_method.map(Into::into).unwrap_or_default(), + } + } +} + +impl From for fuel_core_block_aggregator_api::integration::StorageMethod { + fn from(storage_method: StorageMethod) -> Self { + match storage_method { + StorageMethod::Local => { + fuel_core_block_aggregator_api::integration::StorageMethod::Local + } + StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + } => fuel_core_block_aggregator_api::integration::StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + }, } } } diff --git a/bin/fuel-core/src/lib.rs b/bin/fuel-core/src/lib.rs index 704f0f3fa5b..67b5193783f 100644 --- a/bin/fuel-core/src/lib.rs +++ b/bin/fuel-core/src/lib.rs @@ -6,4 +6,5 @@ pub mod cli; pub use fuel_core::service::FuelService; +use fuel_core_block_aggregator_api as _; use tikv_jemallocator as _; // Used only by the binary diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 808d8b5920c..6a00676b342 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -42,6 +42,7 @@ fault-proving = [ "fuel-core-executor/fault-proving", "fuel-core-storage/fault-proving", "fuel-core-chain-config/fault-proving", + "fuel-core-block-aggregator-api/fault-proving", "fuel-core-database/fault-proving", "fuel-core-sync?/fault-proving", "fuel-core-importer/fault-proving", @@ -126,7 +127,7 @@ fuel-core-executor = { workspace = true, features = [ fuel-core-services = { path = "./../services", features = ["test-helpers"] } fuel-core-storage = { path = "./../storage", features = ["test-helpers"] } fuel-core-trace = { path = "./../trace" } -fuel-core-types = { path = "./../types", features = ["test-helpers"] } +fuel-core-types = { path = "./../types", features = ["test-helpers", "random"] } fuel-core-upgradable-executor = { workspace = true, features = [ "test-helpers", ] } diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index ac8ce32f48f..2af72f8050a 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -3,6 +3,8 @@ use crate::state::{ historical_rocksdb::StateRewindPolicy, rocks_db::DatabaseConfig, }; +#[cfg(feature = "rpc")] +use anyhow::anyhow; use crate::{ database::{ @@ -10,10 +12,6 @@ use crate::{ GenesisDatabase, Result as DatabaseResult, database_description::{ - block_aggregator::{ - BlockAggregatorDatabaseS3, - BlockAggregatorDatabaseStorage, - }, compression::CompressionDatabase, gas_price::GasPriceDatabase, off_chain::OffChain, @@ -23,6 +21,14 @@ use crate::{ }, service::DbType, }; + +#[cfg(feature = "rpc")] +use crate::database::database_description::block_aggregator::{ + BlockAggregatorDatabaseS3, + BlockAggregatorDatabaseStorage, +}; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::db::table::LatestBlock; #[cfg(feature = "test-helpers")] use fuel_core_chain_config::{ StateConfig, @@ -40,6 +46,11 @@ use fuel_core_storage::tables::{ ContractsState, Messages, }; +#[cfg(feature = "rpc")] +use fuel_core_storage::{ + Error as StorageError, + StorageAsRef, +}; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, fuel_types::BlockHeight, @@ -64,7 +75,9 @@ pub struct CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, + #[cfg(feature = "rpc")] block_aggregation_storage: Database, + #[cfg(feature = "rpc")] block_aggregation_s3: Database, } @@ -75,8 +88,10 @@ impl CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, - block_aggregation_storage: Database, - block_aggregation_s3: Database, + #[cfg(feature = "rpc")] block_aggregation_storage: Database< + BlockAggregatorDatabaseStorage, + >, + #[cfg(feature = "rpc")] block_aggregation_s3: Database, ) -> Self { Self { on_chain, @@ -84,7 +99,9 @@ impl CombinedDatabase { relayer, gas_price, compression, + #[cfg(feature = "rpc")] block_aggregation_storage, + #[cfg(feature = "rpc")] block_aggregation_s3, } } @@ -96,6 +113,10 @@ impl CombinedDatabase { crate::state::rocks_db::RocksDb::::prune(path)?; crate::state::rocks_db::RocksDb::::prune(path)?; crate::state::rocks_db::RocksDb::::prune(path)?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::prune(path)?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::prune(path)?; Ok(()) } @@ -139,6 +160,18 @@ impl CombinedDatabase { crate::state::rocks_db::RocksDb::::backup(db_dir, temp_dir) .trace_err("Failed to backup compression database")?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::backup( + db_dir, temp_dir, + ) + .trace_err("Failed to backup block aggregation storage database")?; + + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::backup( + db_dir, temp_dir, + ) + .trace_err("Failed to backup block aggregation s3 database")?; + Ok(()) } @@ -193,6 +226,20 @@ impl CombinedDatabase { ) .trace_err("Failed to restore compression database")?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::restore( + temp_restore_dir, + backup_dir, + ) + .trace_err("Failed to restore block aggregation storage database")?; + + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::restore( + temp_restore_dir, + backup_dir, + ) + .trace_err("Failed to restore block aggregation s3 database")?; + Ok(()) } @@ -250,6 +297,7 @@ impl CombinedDatabase { ..database_config }, )?; + #[cfg(feature = "rpc")] let block_aggregation_storage = Database::open_rocksdb( path, state_rewind_policy, @@ -258,6 +306,7 @@ impl CombinedDatabase { ..database_config }, )?; + #[cfg(feature = "rpc")] let block_aggregation_s3 = Database::open_rocksdb( path, state_rewind_policy, @@ -273,7 +322,9 @@ impl CombinedDatabase { relayer, gas_price, compression, + #[cfg(feature = "rpc")] block_aggregation_storage, + #[cfg(feature = "rpc")] block_aggregation_s3, }) } @@ -290,7 +341,9 @@ impl CombinedDatabase { relayer: Default::default(), gas_price: Default::default(), compression: Default::default(), + #[cfg(feature = "rpc")] block_aggregation_storage: Default::default(), + #[cfg(feature = "rpc")] block_aggregation_s3: Default::default(), }) } @@ -337,7 +390,9 @@ impl CombinedDatabase { Database::in_memory(), Database::in_memory(), Database::in_memory(), + #[cfg(feature = "rpc")] Database::in_memory(), + #[cfg(feature = "rpc")] Database::in_memory(), ) } @@ -348,6 +403,10 @@ impl CombinedDatabase { self.relayer.check_version()?; self.gas_price.check_version()?; self.compression.check_version()?; + #[cfg(feature = "rpc")] + self.block_aggregation_storage.check_version()?; + #[cfg(feature = "rpc")] + self.block_aggregation_s3.check_version()?; Ok(()) } @@ -359,13 +418,28 @@ impl CombinedDatabase { &self.compression } + #[cfg(feature = "rpc")] pub fn block_aggregation_storage(&self) -> &Database { &self.block_aggregation_storage } + #[cfg(feature = "rpc")] + pub fn block_aggregation_storage_mut( + &mut self, + ) -> &mut Database { + &mut self.block_aggregation_storage + } + + #[cfg(feature = "rpc")] pub fn block_aggregation_s3(&self) -> &Database { &self.block_aggregation_s3 } + #[cfg(feature = "rpc")] + pub fn block_aggregation_s3_mut( + &mut self, + ) -> &mut Database { + &mut self.block_aggregation_s3 + } #[cfg(any(feature = "test-helpers", test))] pub fn on_chain_mut(&mut self) -> &mut Database { @@ -445,7 +519,7 @@ impl CombinedDatabase { /// Rollbacks the state of the blockchain to a specific block height. pub fn rollback_to( - &self, + &mut self, target_block_height: BlockHeight, shutdown_listener: &mut S, ) -> anyhow::Result<()> @@ -473,6 +547,41 @@ impl CombinedDatabase { let compression_db_rolled_back = is_equal_or_none(compression_db_height, target_block_height); + #[cfg(feature = "rpc")] + { + let block_aggregation_storage_height = self + .block_aggregation_storage() + .storage_as_ref::() + .get(&()) + .map_err(|e: StorageError| anyhow!(e))? + .map(|b| b.into_owned()); + let block_aggregation_storage_rolled_back = is_equal_or_less_than_or_none( + block_aggregation_storage_height, + target_block_height, + ); + + let block_aggregation_s3_height = self + .block_aggregation_s3() + .storage_as_ref::() + .get(&()) + .map_err(|e: StorageError| anyhow!(e))? + .map(|b| b.into_owned()); + let block_aggregation_s3_rolled_back = is_equal_or_less_than_or_none( + block_aggregation_s3_height, + target_block_height, + ); + + if !block_aggregation_storage_rolled_back { + self.block_aggregation_storage_mut() + .rollback_to(target_block_height)?; + } + + if !block_aggregation_s3_rolled_back { + self.block_aggregation_s3_mut() + .rollback_to(target_block_height)?; + } + } + if on_chain_height == target_block_height && off_chain_height == target_block_height && gas_price_rolled_back @@ -636,7 +745,9 @@ impl CombinedDatabase { self.relayer.shutdown(); self.gas_price.shutdown(); self.compression.shutdown(); + #[cfg(feature = "rpc")] self.block_aggregation_storage.shutdown(); + #[cfg(feature = "rpc")] self.block_aggregation_s3.shutdown(); } } @@ -669,6 +780,11 @@ fn is_equal_or_none(maybe_left: Option, right: T) -> bool { maybe_left.map(|left| left == right).unwrap_or(true) } +#[cfg(feature = "rpc")] +fn is_equal_or_less_than_or_none(maybe_left: Option, right: T) -> bool { + maybe_left.map(|left| left <= right).unwrap_or(true) +} + #[allow(non_snake_case)] #[cfg(feature = "backup")] #[cfg(test)] diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 96b03caad7b..1f427ab3669 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -69,6 +69,11 @@ use std::{ pub type Result = core::result::Result; // TODO: Extract `Database` and all belongs into `fuel-core-database`. +#[cfg(feature = "rpc")] +use crate::database::database_description::block_aggregator::{ + BlockAggregatorDatabaseS3, + BlockAggregatorDatabaseStorage, +}; #[cfg(feature = "rocksdb")] use crate::state::{ historical_rocksdb::{ @@ -84,15 +89,18 @@ use crate::state::{ }; use crate::{ database::database_description::{ - block_aggregator::{ - BlockAggregatorDatabaseS3, - BlockAggregatorDatabaseStorage, - }, gas_price::GasPriceDatabase, indexation_availability, }, state::HeightType, }; + +#[cfg(feature = "rpc")] +use anyhow::anyhow; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::db::table::LatestBlock; +#[cfg(feature = "rpc")] +use fuel_core_storage::transactional::WriteTransaction; #[cfg(feature = "rocksdb")] use std::path::Path; @@ -445,18 +453,48 @@ impl Modifiable for Database { } } +#[cfg(feature = "rpc")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + // Does not need to be monotonically increasing because + // storage values are modified in parallel from different heights commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) } } +#[cfg(feature = "rpc")] +impl Database { + pub fn rollback_to(&mut self, block_height: BlockHeight) -> StorageResult<()> { + let mut tx = self.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &block_height) + .map_err(|e: StorageError| anyhow!(e))?; + tx.commit().map_err(|e: StorageError| anyhow!(e))?; + Ok(()) + } +} + +#[cfg(feature = "rpc")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + // Does not need to be monotonically increasing because + // storage values are modified in parallel from different heights commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) } } +#[cfg(feature = "rpc")] +impl Database { + pub fn rollback_to(&mut self, block_height: BlockHeight) -> StorageResult<()> { + let mut tx = self.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &block_height) + .map_err(|e: StorageError| anyhow!(e))?; + tx.commit().map_err(|e: StorageError| anyhow!(e))?; + Ok(()) + } +} + #[cfg(feature = "relayer")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { diff --git a/crates/fuel-core/src/database/database_description.rs b/crates/fuel-core/src/database/database_description.rs index e991c2bc7f1..9a300158fd4 100644 --- a/crates/fuel-core/src/database/database_description.rs +++ b/crates/fuel-core/src/database/database_description.rs @@ -13,6 +13,7 @@ pub mod off_chain; pub mod on_chain; pub mod relayer; +#[cfg(feature = "rpc")] pub mod block_aggregator; pub trait DatabaseHeight: PartialEq + Default + Debug + Copy + Send + Sync { diff --git a/crates/fuel-core/src/lib.rs b/crates/fuel-core/src/lib.rs index d464a46d073..ea490b8c1b2 100644 --- a/crates/fuel-core/src/lib.rs +++ b/crates/fuel-core/src/lib.rs @@ -55,6 +55,8 @@ pub mod state; // In the future this module will be a separate crate for `fuel-core-graphql-api`. mod graphql_api; +use fuel_core_block_aggregator_api as _; + pub mod fuel_core_graphql_api { pub use crate::graphql_api::*; } diff --git a/crates/fuel-core/src/query/message.rs b/crates/fuel-core/src/query/message.rs index ce8da628f6f..106bedcdb0c 100644 --- a/crates/fuel-core/src/query/message.rs +++ b/crates/fuel-core/src/query/message.rs @@ -313,7 +313,7 @@ fn message_receipts_proof( return Err(anyhow::anyhow!( "Unable to generate the Merkle proof for the message from its receipts" ) - .into()); + .into()) }; // Return the proof. diff --git a/crates/fuel-core/src/schema/tx/assemble_tx.rs b/crates/fuel-core/src/schema/tx/assemble_tx.rs index c73f67cd6c6..84033001ee5 100644 --- a/crates/fuel-core/src/schema/tx/assemble_tx.rs +++ b/crates/fuel-core/src/schema/tx/assemble_tx.rs @@ -637,10 +637,10 @@ where if *amount == 0 { self.tx.outputs_mut().pop(); } else { - break; + break } } else { - break; + break } } } @@ -852,7 +852,7 @@ where } if contracts_not_in_inputs.is_empty() { - break; + break } for contract_id in contracts_not_in_inputs { @@ -913,13 +913,13 @@ where for input in self.tx.inputs() { if input_is_spendable_as_fee(input) { let Some(amount) = input.amount() else { - continue; + continue }; let Some(asset_id) = input.asset_id(&base_asset_id) else { - continue; + continue }; let Some(owner) = input.input_owner() else { - continue; + continue }; if asset_id == &base_asset_id && &fee_payer_account.owner() == owner { @@ -949,7 +949,7 @@ where let need_to_cover = final_fee.saturating_add(self.base_asset_reserved); if need_to_cover <= total_base_asset { - break; + break } let remaining_input_slots = self.remaining_input_slots()?; @@ -1021,7 +1021,7 @@ where for item in items { let key = extractor(item); if !duplicates.insert(key) { - return true + return true; } } diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 3b344f21559..11bea62351a 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -193,7 +193,9 @@ impl FuelService { Default::default(), Default::default(), Default::default(), + #[cfg(feature = "rpc")] Default::default(), + #[cfg(feature = "rpc")] Default::default(), ); Self::from_combined_database(combined_database, config).await @@ -551,7 +553,7 @@ mod tests { service.start_and_await().await.unwrap(); sleep(Duration::from_secs(1)); for service in service.sub_services() { - assert_eq!(service.state(), State::Started); + assert_eq!(service.state(), State::Started,); } if i < service.sub_services().len() { diff --git a/crates/fuel-core/src/service/adapters.rs b/crates/fuel-core/src/service/adapters.rs index d697a8d344b..1abb981ffa7 100644 --- a/crates/fuel-core/src/service/adapters.rs +++ b/crates/fuel-core/src/service/adapters.rs @@ -80,12 +80,15 @@ pub mod fuel_gas_price_provider; pub mod gas_price_adapters; pub mod graphql_api; pub mod import_result_provider; + #[cfg(feature = "p2p")] pub mod p2p; pub mod producer; pub mod ready_signal; #[cfg(feature = "relayer")] pub mod relayer; +#[cfg(feature = "rpc")] +pub mod rpc; #[cfg(feature = "shared-sequencer")] pub mod shared_sequencer; #[cfg(feature = "p2p")] diff --git a/crates/fuel-core/src/service/adapters/rpc.rs b/crates/fuel-core/src/service/adapters/rpc.rs new file mode 100644 index 00000000000..13395b56c46 --- /dev/null +++ b/crates/fuel-core/src/service/adapters/rpc.rs @@ -0,0 +1,53 @@ +use crate::{ + database::{ + Database, + database_description::off_chain::OffChain, + }, + fuel_core_graphql_api::storage::transactions::TransactionStatuses, +}; +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::sync_service::TxReceipts, + result::{ + Error as RPCError, + Result as RPCResult, + }, +}; +use fuel_core_storage::StorageInspect; +use fuel_core_types::{ + fuel_tx::{ + Receipt, + TxId, + }, + services::transaction_status::TransactionExecutionStatus, +}; + +pub struct ReceiptSource { + off_chain: Database, +} + +impl ReceiptSource { + pub fn new(off_chain: Database) -> Self { + Self { off_chain } + } +} + +impl TxReceipts for ReceiptSource { + async fn get_receipts(&self, tx_id: &TxId) -> RPCResult> { + let tx_status = + StorageInspect::::get(&self.off_chain, tx_id) + .map_err(RPCError::receipt_error)?; + if let Some(status) = tx_status { + match status.into_owned() { + TransactionExecutionStatus::Success { receipts, .. } => { + Ok(receipts.to_vec()) + } + _ => Ok(Vec::new()), + } + } else { + Ok(Vec::new()) + } + } +} + +#[cfg(test)] +mod tests; diff --git a/crates/fuel-core/src/service/adapters/rpc/tests.rs b/crates/fuel-core/src/service/adapters/rpc/tests.rs new file mode 100644 index 00000000000..d3065e89eab --- /dev/null +++ b/crates/fuel-core/src/service/adapters/rpc/tests.rs @@ -0,0 +1,46 @@ +#![allow(non_snake_case)] + +use super::*; +use fuel_core_storage::{ + StorageMutate, + transactional::WriteTransaction, +}; +use rand::{ + Rng, + SeedableRng, + prelude::StdRng, +}; +use std::sync::Arc; + +#[tokio::test] +async fn get_receipt__gets_the_receipt_for_expected_tx() { + let mut rng = StdRng::seed_from_u64(9999); + + // given + let mut db = Database::in_memory(); + let tx_id = rng.r#gen(); + let expected = vec![Receipt::Return { + id: rng.r#gen(), + val: 987, + pc: 123, + is: 456, + }]; + let status = TransactionExecutionStatus::Success { + block_height: Default::default(), + time: fuel_core_types::tai64::Tai64(123u64), + result: None, + receipts: Arc::new(expected.clone()), + total_gas: 0, + total_fee: 0, + }; + let mut tx = db.write_transaction(); + StorageMutate::::insert(&mut tx, &tx_id, &status).unwrap(); + tx.commit().unwrap(); + let receipt_source = ReceiptSource::new(db); + + // when + let actual = receipt_source.get_receipts(&tx_id).await.unwrap(); + + // then + assert_eq!(actual, expected); +} diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 2bc98848e1c..2200818018d 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -53,6 +53,8 @@ use fuel_core_types::fuel_types::{ ChainId, }; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::integration::StorageMethod; #[cfg(feature = "parallel-executor")] use std::num::NonZeroUsize; @@ -176,6 +178,7 @@ impl Config { let rpc_config = fuel_core_block_aggregator_api::integration::Config { addr: free_local_addr(), sync_from: Some(BlockHeight::from(0)), + storage_method: StorageMethod::Local, }; Self { diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 4cb189f0bdb..d7aa20ef95a 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -22,6 +22,8 @@ use crate::service::adapters::consensus_module::poa::pre_confirmation_signature: trigger::TimeBasedTrigger, tx_receiver::PreconfirmationsReceiver, }; +#[cfg(feature = "rpc")] +use crate::service::adapters::rpc::ReceiptSource; use crate::{ combined_database::CombinedDatabase, database::Database, @@ -66,8 +68,8 @@ use anyhow::anyhow; use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_or_remote_db::StorageOrRemoteDB, - db::storage_or_remote_db::get_env_vars, db::table::LatestBlock, + integration::StorageMethod, result::Error, }; use fuel_core_compression_service::service::new_service as new_compression_service; @@ -82,13 +84,15 @@ use fuel_core_gas_price_service::v1::{ uninitialized_task::new_gas_price_service_v1, }; use fuel_core_poa::Trigger; -#[cfg(feature = "rpc")] -use fuel_core_storage::StorageAsRef; use fuel_core_storage::{ self, transactional::AtomicView, }; - +#[cfg(feature = "rpc")] +use fuel_core_storage::{ + Error as StorageError, + StorageAsRef, +}; #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; use fuel_core_types::signer::SignMode; @@ -470,54 +474,41 @@ pub fn init_sub_services( let block_aggregator_config = config.rpc_config.clone(); let sync_from = block_aggregator_config.sync_from.unwrap_or_default(); let sync_from_height; - let db_adapter = if let Some(( - aws_access_key_id, - aws_secrete_access_key, - aws_region, - aws_bucket, - url_base, - aws_endpoint_url, - )) = get_env_vars() - { - let db = database.block_aggregation_s3().clone(); - let maybe_sync_from_height = db - .storage_as_ref::() - .get(&()) - .map_err(|e| Error::DB(anyhow!(e)))? - .map(|c| *c) - .and_then(|h| h.succ()); - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - - StorageOrRemoteDB::new_s3( - db, - &aws_access_key_id, - &aws_secrete_access_key, - &aws_region, - &aws_bucket, - &url_base, - aws_endpoint_url, - sync_from, - ) - } else { - tracing::info!( - "Required environment variables for S3 bucket not set. Requires: \n\ - AWS_ACCESS_KEY_ID \n\ - AWS_SECRET_ACCESS_KEY \n\ - AWS_REGION \n\ - AWS_BUCKET \n\ - AWS_ENDPOINT_URL \n\ - AWS_S3_URL_BASE (Optional)\n\ - Using local storage" - ); - let db = database.block_aggregation_storage().clone(); - let maybe_sync_from_height = db - .storage_as_ref::() - .get(&()) - .map_err(|e| Error::DB(anyhow!(e)))? - .map(|c| *c) - .and_then(|h| h.succ()); - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - StorageOrRemoteDB::new_storage(db, sync_from) + let receipts = ReceiptSource::new(database.off_chain().clone()); + let db_adapter = match &block_aggregator_config.storage_method { + StorageMethod::Local => { + let db = database.block_aggregation_storage().clone(); + let maybe_sync_from_height = db + .storage_as_ref::() + .get(&()) + .map_err(|e: StorageError| Error::DB(anyhow!(e)))? + .map(|c| *c) + .and_then(|h| h.succ()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + StorageOrRemoteDB::new_storage(db, sync_from) + } + StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + } => { + let db = database.block_aggregation_s3().clone(); + let maybe_sync_from_height = db + .storage_as_ref::() + .get(&()) + .map_err(|e: StorageError| Error::DB(anyhow!(e)))? + .map(|c| *c) + .and_then(|h| h.succ()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + + StorageOrRemoteDB::new_s3( + db, + bucket, + *requester_pays, + endpoint_url.clone(), + sync_from, + ) + } }; let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); @@ -527,9 +518,10 @@ pub fn init_sub_services( db_adapter, serializer, onchain_db, + receipts, importer, sync_from_height, - ) + )? }; let graph_ql = fuel_core_graphql_api::api_service::new_service( diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index a9c8269046d..85112fdfc4d 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -8,7 +8,6 @@ homepage = { workspace = true } license = { workspace = true } repository = { workspace = true } rust-version = { workspace = true } -build = "build.rs" [features] fault-proving = ["fuel-core-types/fault-proving"] @@ -16,10 +15,13 @@ fault-proving = ["fuel-core-types/fault-proving"] [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } +aws-config = { workspace = true } aws-sdk-s3 = "1.111.0" aws-smithy-mocks = "0.2.0" bytes = { workspace = true, features = ["serde"] } enum-iterator = { workspace = true } +flate2 = { workspace = true } +fuel-core-protobuf = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } @@ -36,12 +38,8 @@ thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } -tonic-prost = { workspace = true } tracing = { workspace = true } -[build-dependencies] -tonic-prost-build = { workspace = true } - [dev-dependencies] aws-sdk-s3 = { version = "1.111.0", features = ["test-util"] } fuel-core-services = { workspace = true, features = ["test-helpers"] } diff --git a/crates/services/block_aggregator_api/build.rs b/crates/services/block_aggregator_api/build.rs deleted file mode 100644 index 190a1538000..00000000000 --- a/crates/services/block_aggregator_api/build.rs +++ /dev/null @@ -1,7 +0,0 @@ -fn main() -> Result<(), Box> { - tonic_prost_build::configure() - .type_attribute(".", "#[derive(serde::Serialize,serde::Deserialize)]") - .type_attribute(".", "#[allow(clippy::large_enum_variant)]") - .compile_protos(&["proto/api.proto"], &["proto/"])?; - Ok(()) -} diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto deleted file mode 100644 index d1ef647f3ed..00000000000 --- a/crates/services/block_aggregator_api/proto/api.proto +++ /dev/null @@ -1,412 +0,0 @@ -syntax = "proto3"; - -package blockaggregator; - -message BlockHeightRequest {} - -message BlockHeightResponse { - optional uint32 height = 1; -} - -message BlockRangeRequest { - uint32 start = 1; - uint32 end = 2; -} - -message RemoteBlockRangeResponse { - string region = 1; - string bucket = 2; - string key = 3; - string url = 4; -} - -message Block { - oneof versioned_block { - V1Block v1 = 1; - } -} - -message V1Block { - Header header = 1; - repeated Transaction transactions = 2; -} - -message Header { - oneof versioned_header { - V1Header v1 = 1; - V2Header v2 = 2; - } -} - -message V1Header { - uint64 da_height = 1; - uint32 consensus_parameters_version = 2; - uint32 state_transition_bytecode_version = 3; - uint32 transactions_count = 4; - uint32 message_receipt_count = 5; - bytes transactions_root = 6; - bytes message_outbox_root = 7; - bytes event_inbox_root = 8; - bytes prev_root = 9; - uint32 height = 10; - uint64 time = 11; - bytes application_hash = 12; - optional bytes block_id = 13; -} - -message V2Header { - uint64 da_height = 1; - uint32 consensus_parameters_version = 2; - uint32 state_transition_bytecode_version = 3; - uint32 transactions_count = 4; - uint32 message_receipt_count = 5; - bytes transactions_root = 6; - bytes message_outbox_root = 7; - bytes event_inbox_root = 8; - bytes tx_id_commitment = 9; - bytes prev_root = 10; - uint32 height = 11; - uint64 time = 12; - bytes application_hash = 13; - optional bytes block_id = 14; -} - -message Transaction { - oneof variant { - ScriptTransaction script = 1; - CreateTransaction create = 2; - MintTransaction mint = 3; - UpgradeTransaction upgrade = 4; - UploadTransaction upload = 5; - BlobTransaction blob = 6; - } -} - -message ScriptTransaction { - uint64 script_gas_limit = 1; - bytes receipts_root = 2; - bytes script = 3; - bytes script_data = 4; - Policies policies = 5; - repeated Input inputs = 6; - repeated Output outputs = 7; - repeated bytes witnesses = 8; - ScriptMetadata metadata = 9; -} - -message CreateTransaction { - uint32 bytecode_witness_index = 1; - bytes salt = 2; - repeated StorageSlot storage_slots = 3; - Policies policies = 4; - repeated Input inputs = 5; - repeated Output outputs = 6; - repeated bytes witnesses = 7; - CreateMetadata metadata = 8; -} - -message MintTransaction { - TxPointer tx_pointer = 1; - ContractInput input_contract = 2; - ContractOutput output_contract = 3; - uint64 mint_amount = 4; - bytes mint_asset_id = 5; - uint64 gas_price = 6; - MintMetadata metadata = 7; -} - -message UpgradeTransaction { - UpgradePurpose purpose = 1; - Policies policies = 2; - repeated Input inputs = 3; - repeated Output outputs = 4; - repeated bytes witnesses = 5; - UpgradeMetadata metadata = 6; -} - -message UploadTransaction { - bytes root = 1; - uint32 witness_index = 2; - uint32 subsection_index = 3; - uint32 subsections_number = 4; - repeated bytes proof_set = 5; - Policies policies = 6; - repeated Input inputs = 7; - repeated Output outputs = 8; - repeated bytes witnesses = 9; - UploadMetadata metadata = 10; -} - -message BlobTransaction { - bytes blob_id = 1; - uint32 witness_index = 2; - Policies policies = 3; - repeated Input inputs = 4; - repeated Output outputs = 5; - repeated bytes witnesses = 6; - BlobMetadata metadata = 7; -} - -message Policies { - uint32 bits = 1; - repeated uint64 values = 2; -} - -message Input { - oneof variant { - CoinSignedInput coin_signed = 1; - CoinPredicateInput coin_predicate = 2; - ContractInput contract = 3; - MessageCoinSignedInput message_coin_signed = 4; - MessageCoinPredicateInput message_coin_predicate = 5; - MessageDataSignedInput message_data_signed = 6; - MessageDataPredicateInput message_data_predicate = 7; - } -} - -message CoinSignedInput { - UtxoId utxo_id = 1; - bytes owner = 2; - uint64 amount = 3; - bytes asset_id = 4; - TxPointer tx_pointer = 5; - uint32 witness_index = 6; - uint64 predicate_gas_used = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -message CoinPredicateInput { - UtxoId utxo_id = 1; - bytes owner = 2; - uint64 amount = 3; - bytes asset_id = 4; - TxPointer tx_pointer = 5; - uint32 witness_index = 6; - uint64 predicate_gas_used = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -message ContractInput { - UtxoId utxo_id = 1; - bytes balance_root = 2; - bytes state_root = 3; - TxPointer tx_pointer = 4; - bytes contract_id = 5; -} - -message MessageCoinSignedInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -message MessageCoinPredicateInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -message MessageDataSignedInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -message MessageDataPredicateInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -message Output { - oneof variant { - CoinOutput coin = 1; - ContractOutput contract = 2; - ChangeOutput change = 3; - VariableOutput variable = 4; - ContractCreatedOutput contract_created = 5; - } -} -message CoinOutput { - bytes to = 1; - uint64 amount = 2; - bytes asset_id = 3; -} -message ContractOutput { - uint32 input_index = 1; - bytes balance_root = 2; - bytes state_root = 3; -} -message ChangeOutput { - bytes to = 1; - uint64 amount = 2; - bytes asset_id = 3; -} -message VariableOutput { - bytes to = 1; - uint64 amount = 2; - bytes asset_id = 3; -} -message ContractCreatedOutput { - bytes contract_id = 1; - bytes state_root = 2; -} - -message UtxoId { - bytes tx_id = 1; - uint32 output_index = 2; -} - -message TxPointer { - uint32 block_height = 1; - uint32 tx_index = 2; -} - -message StorageSlot { - bytes key = 1; - bytes value = 2; -} - -message ScriptMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; - uint64 script_gas_limit = 9; - bytes receipts_root = 10; - bytes script = 11; - bytes script_data = 12; -} - -message CreateMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; - bytes contract_id = 9; - bytes contract_root = 10; - bytes state_root = 11; -} - -message MintMetadata { - bytes id = 1; -} - -message UpgradePurpose { - oneof variant { - UpgradeConsensusParameters consensus_parameters = 1; - UpgradeStateTransition state_transition = 2; - } -} - -message UpgradeConsensusParameters { - uint32 witness_index = 1; - bytes checksum = 2; -} - -message UpgradeStateTransition { - bytes root = 1; -} - -message UpgradeMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; - oneof variant { - UpgradeConsensusParametersMetadata consensus_parameters = 9; - UpgradeStateTransitionMetadata state_transition = 10; - } -} - -message UpgradeConsensusParametersMetadata { - bytes consensus_parameters = 1; - bytes calculated_checksum = 2; -} - -message UpgradeStateTransitionMetadata {} - -message UploadMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; -} - -message BlobMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; -} - -message PredicateOffset { - optional InnerPredicateOffset offset = 1; -} - -message InnerPredicateOffset { - uint32 offset = 1; - uint32 length = 2; -} - - -message BlockResponse { - oneof payload { - Block literal = 1; - RemoteBlockRangeResponse remote = 2; - } -} - -message NewBlockSubscriptionRequest {} - -service BlockAggregator { - rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); - rpc GetBlockRange (BlockRangeRequest) returns (stream BlockResponse); - rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream BlockResponse); -} diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 20d6d58003d..0e07b31b0bb 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -29,7 +29,7 @@ pub enum BlockAggregatorQuery { }, // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? NewBlockSubscription { - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender<(BlockHeight, Block)>, }, } @@ -75,7 +75,8 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { + pub fn new_block_subscription() + -> (Self, tokio::sync::mpsc::Receiver<(BlockHeight, B)>) { const ARBITRARY_CHANNEL_SIZE: usize = 10; let (sender, receiver) = tokio::sync::mpsc::channel(ARBITRARY_CHANNEL_SIZE); let query = Self::NewBlockSubscription { response: sender }; diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 3e43e89a662..0c0df12ffbe 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -3,7 +3,10 @@ use crate::{ BlockAggregatorApi, BlockAggregatorQuery, }, - block_range_response::BlockRangeResponse, + block_range_response::{ + BlockRangeResponse, + BoxStream, + }, protobuf_types::{ Block as ProtoBlock, BlockHeightRequest as ProtoBlockHeightRequest, @@ -11,19 +14,30 @@ use crate::{ BlockRangeRequest as ProtoBlockRangeRequest, BlockResponse as ProtoBlockResponse, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - RemoteBlockRangeResponse as ProtoRemoteBlockRangeResponse, + RemoteBlockResponse as ProtoRemoteBlockResponse, + RemoteS3Bucket as ProtoRemoteS3Bucket, block_aggregator_server::{ BlockAggregator, BlockAggregatorServer as ProtoBlockAggregatorServer, }, block_response as proto_block_response, + remote_block_response::Location as ProtoRemoteLocation, }, result::{ Error, Result, }, }; +use anyhow::anyhow; use async_trait::async_trait; +use fuel_core_services::{ + RunnableService, + RunnableTask, + Service, + ServiceRunner, + StateWatcher, + TaskNextAction, +}; use futures::StreamExt; use tokio_stream::wrappers::ReceiverStream; use tonic::Status; @@ -48,7 +62,7 @@ impl Server { #[async_trait] impl BlockAggregator for Server { - async fn get_block_height( + async fn get_synced_block_height( &self, request: tonic::Request, ) -> Result, tonic::Status> { @@ -71,13 +85,13 @@ impl BlockAggregator for Server { ))), } } - type GetBlockRangeStream = ReceiverStream>; + // type GetBlockRangeStream = ReceiverStream>; + type GetBlockRangeStream = BoxStream>; async fn get_block_range( &self, request: tonic::Request, ) -> Result, tonic::Status> { - const ARB_LITERAL_BLOCK_BUFFER_SIZE: usize = 100; let req = request.into_inner(); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetBlockRange { @@ -93,50 +107,42 @@ impl BlockAggregator for Server { match res { Ok(block_range_response) => match block_range_response { BlockRangeResponse::Literal(inner) => { - let (tx, rx) = tokio::sync::mpsc::channel::< - Result, - >(ARB_LITERAL_BLOCK_BUFFER_SIZE); - - tokio::spawn(async move { - let mut s = inner; - while let Some(pb) = s.next().await { + let stream = inner + .map(|(height, res)| { let response = ProtoBlockResponse { - payload: Some(proto_block_response::Payload::Literal(pb)), + height: *height, + payload: Some(proto_block_response::Payload::Literal( + res, + )), }; - if tx.send(Ok(response)).await.is_err() { - break; - } - } - }); - - Ok(tonic::Response::new(ReceiverStream::new(rx))) + Ok(response) + }) + .boxed(); + Ok(tonic::Response::new(stream)) } - BlockRangeResponse::Remote(inner) => { - let (tx, rx) = tokio::sync::mpsc::channel::< - Result, - >(ARB_LITERAL_BLOCK_BUFFER_SIZE); - - tokio::spawn(async move { - let mut s = inner; - while let Some(pb) = s.next().await { - let proto_response = ProtoRemoteBlockRangeResponse { - region: pb.region.clone(), - bucket: pb.bucket.clone(), - key: pb.key.clone(), - url: pb.url.clone(), + BlockRangeResponse::S3(inner) => { + let stream = inner + .map(|(height, res)| { + let s3 = ProtoRemoteS3Bucket { + bucket: res.bucket, + key: res.key, + requester_pays: res.requester_pays, + endpoint: res.aws_endpoint, + }; + let location = ProtoRemoteLocation::S3(s3); + let proto_response = ProtoRemoteBlockResponse { + location: Some(location), }; let response = ProtoBlockResponse { + height: *height, payload: Some(proto_block_response::Payload::Remote( proto_response, )), }; - if tx.send(Ok(response)).await.is_err() { - break; - } - } - }); - - Ok(tonic::Response::new(ReceiverStream::new(rx))) + Ok(response) + }) + .boxed(); + Ok(tonic::Response::new(stream)) } }, Err(e) => Err(tonic::Status::internal(format!( @@ -163,8 +169,9 @@ impl BlockAggregator for Server { let (task_sender, task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); tokio::spawn(async move { - while let Some(nb) = receiver.recv().await { + while let Some((height, nb)) = receiver.recv().await { let response = ProtoBlockResponse { + height: *height, payload: Some(proto_block_response::Payload::Literal(nb)), }; if task_sender.send(Ok(response)).await.is_err() { @@ -178,42 +185,74 @@ impl BlockAggregator for Server { } pub struct ProtobufAPI { - _server_task_handle: tokio::task::JoinHandle<()>, - shutdown_sender: Option>, + _server_service: ServiceRunner, query_receiver: tokio::sync::mpsc::Receiver>, } -impl ProtobufAPI { - pub fn new(url: String) -> Self { - let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< - BlockAggregatorQuery, - >(100); - let server = Server::new(query_sender); - let addr = url.parse().unwrap(); - let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::<()>(); - let _server_task_handle = tokio::spawn(async move { - let service = tonic::transport::Server::builder() - .add_service(ProtoBlockAggregatorServer::new(server)); - tokio::select! { - res = service.serve(addr) => { +pub struct ServerTask { + addr: std::net::SocketAddr, + query_sender: + tokio::sync::mpsc::Sender>, +} +#[async_trait::async_trait] +impl RunnableService for ServerTask { + const NAME: &'static str = "ProtobufServerTask"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData {} + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} + +impl RunnableTask for ServerTask { + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + let server = Server::new(self.query_sender.clone()); + let router = tonic::transport::Server::builder() + .add_service(ProtoBlockAggregatorServer::new(server)); + tokio::select! { + res = router.serve(self.addr) => { if let Err(e) = res { tracing::error!("BlockAggregator tonic server error: {}", e); + TaskNextAction::ErrorContinue(anyhow!(e)) } else { tracing::info!("BlockAggregator tonic server stopped"); + TaskNextAction::Stop } }, - _ = shutdown_receiver => { - tracing::info!("Shutting down BlockAggregator tonic server"); - }, + _ = watcher.while_started() => { + TaskNextAction::Stop } - }); - Self { - _server_task_handle, - shutdown_sender: Some(shutdown_sender), - query_receiver, } } + + async fn shutdown(self) -> anyhow::Result<()> { + Ok(()) + } +} + +impl ProtobufAPI { + pub fn new(url: String) -> Result { + let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< + BlockAggregatorQuery, + >(100); + let addr = url.parse().unwrap(); + let _server_service = ServiceRunner::new(ServerTask { addr, query_sender }); + _server_service.start().map_err(Error::Api)?; + let api = Self { + _server_service, + query_receiver, + }; + Ok(api) + } } impl BlockAggregatorApi for ProtobufAPI { @@ -231,11 +270,3 @@ impl BlockAggregatorApi for ProtobufAPI { Ok(query) } } - -impl Drop for ProtobufAPI { - fn drop(&mut self) { - if let Some(shutdown_sender) = self.shutdown_sender.take() { - let _ = shutdown_sender.send(()); - } - } -} diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 380b74ef318..111cf1d303f 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -8,7 +8,7 @@ use crate::{ }, block_range_response::{ BlockRangeResponse, - RemoteBlockRangeResponse, + RemoteS3Response, }, blocks::importer_and_db_source::{ BlockSerializer, @@ -26,6 +26,7 @@ use crate::{ block_response::Payload, }, }; +use fuel_core_protobuf::remote_block_response::Location; use fuel_core_types::{ blockchain::block::Block as FuelBlock, fuel_types::BlockHeight, @@ -34,19 +35,18 @@ use futures::{ StreamExt, TryStreamExt, }; -use std::net::TcpListener; fn free_local_addr() -> String { - let listener = TcpListener::bind("[::1]:0").unwrap(); - let addr = listener.local_addr().unwrap(); // OS picks a free port - format!("[::1]:{}", addr.port()) + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + format!("127.0.0.1:{}", addr.port()) } #[tokio::test] async fn await_query__get_current_height__client_receives_expected_value() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string()).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -57,7 +57,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { let handle = tokio::spawn(async move { tracing::info!("querying with client"); client - .get_block_height(BlockHeightRequest {}) + .get_synced_block_height(BlockHeightRequest {}) .await .expect("could not get height") }); @@ -83,7 +83,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { async fn await_query__get_block_range__client_receives_expected_value__literal() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string()).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -108,15 +108,16 @@ async fn await_query__get_block_range__client_receives_expected_value__literal() let serializer_adapter = SerializerAdapter; let fuel_block_1 = FuelBlock::default(); let mut fuel_block_2 = FuelBlock::default(); - let block_height_2 = fuel_block_1.header().height().succ().unwrap(); + let block_height_1 = fuel_block_1.header().height(); + let block_height_2 = block_height_1.succ().unwrap(); fuel_block_2.header_mut().set_block_height(block_height_2); let block1 = serializer_adapter - .serialize_block(&fuel_block_1) + .serialize_block(&fuel_block_1, &[]) .expect("could not serialize block"); let block2 = serializer_adapter - .serialize_block(&fuel_block_2) + .serialize_block(&fuel_block_2, &[]) .expect("could not serialize block"); - let list = vec![block1, block2]; + let list = vec![(*block_height_1, block1), (block_height_2, block2)]; // return response through query's channel if let BlockAggregatorQuery::GetBlockRange { first, @@ -136,7 +137,7 @@ async fn await_query__get_block_range__client_receives_expected_value__literal() tracing::info!("awaiting query"); let response = handle.await.unwrap(); let expected = list; - let actual: Vec = response + let actual: Vec<(BlockHeight, ProtoBlock)> = response .into_inner() .try_collect::>() .await @@ -144,7 +145,7 @@ async fn await_query__get_block_range__client_receives_expected_value__literal() .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner + (BlockHeight::new(b.height), inner) } else { panic!("unexpected response type") } @@ -157,7 +158,7 @@ async fn await_query__get_block_range__client_receives_expected_value__literal() async fn await_query__get_block_range__client_receives_expected_value__remote() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string()).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -179,19 +180,18 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() let query = api.await_query().await.unwrap(); // then - let list: Vec<_> = ["1", "2"] + let list: Vec<_> = [(BlockHeight::new(1), "1"), (BlockHeight::new(2), "2")] .iter() - .map(|height| { - let region = "test-region".to_string(); + .map(|(height, key)| { let bucket = "test-bucket".to_string(); - let key = height.to_string(); - let url = "good.url".to_string(); - RemoteBlockRangeResponse { - region, + let key = key.to_string(); + let res = RemoteS3Response { bucket, key, - url, - } + requester_pays: false, + aws_endpoint: None, + }; + (*height, res) }) .collect(); // return response through query's channel @@ -205,7 +205,7 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() assert_eq!(last, BlockHeight::new(1)); tracing::info!("correct query received, sending response"); let stream = tokio_stream::iter(list.clone()).boxed(); - let range = BlockRangeResponse::Remote(stream); + let range = BlockRangeResponse::S3(stream); response.send(range).unwrap(); } else { panic!("expected GetBlockRange query"); @@ -213,7 +213,7 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() tracing::info!("awaiting query"); let response = handle.await.unwrap(); let expected = list; - let actual: Vec = response + let actual: Vec<(BlockHeight, RemoteS3Response)> = response .into_inner() .try_collect::>() .await @@ -221,12 +221,18 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() .into_iter() .map(|b| { if let Some(Payload::Remote(inner)) = b.payload { - RemoteBlockRangeResponse { - region: inner.region, - bucket: inner.bucket, - key: inner.key, - url: inner.url, - } + let height = BlockHeight::new(b.height); + let location = inner.location.unwrap(); + let Location::S3(s3) = location else { + panic!("unexpected location type") + }; + let res = RemoteS3Response { + bucket: s3.bucket, + key: s3.key, + requester_pays: false, + aws_endpoint: None, + }; + (height, res) } else { panic!("unexpected response type") } @@ -240,7 +246,7 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() async fn await_query__new_block_stream__client_receives_expected_value() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string()).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -270,16 +276,16 @@ async fn await_query__new_block_stream__client_receives_expected_value() { let mut fuel_block_2 = FuelBlock::default(); fuel_block_2.header_mut().set_block_height(height2); let block1 = serializer_adapter - .serialize_block(&fuel_block_1) + .serialize_block(&fuel_block_1, &[]) .expect("could not serialize block"); let block2 = serializer_adapter - .serialize_block(&fuel_block_2) + .serialize_block(&fuel_block_2, &[]) .expect("could not serialize block"); - let list = vec![block1, block2]; + let list = vec![(height1, block1), (height2, block2)]; if let BlockAggregatorQuery::NewBlockSubscription { response } = query { tracing::info!("correct query received, sending response"); - for block in list.clone() { - response.send(block).await.unwrap(); + for (height, block) in list.clone() { + response.send((height, block)).await.unwrap(); } } else { panic!("expected GetBlockRange query"); @@ -287,7 +293,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { tracing::info!("awaiting query"); let response = handle.await.unwrap(); let expected = list; - let actual: Vec = response + let actual: Vec<(BlockHeight, ProtoBlock)> = response .into_inner() .try_collect::>() .await @@ -295,7 +301,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner + (BlockHeight::new(b.height), inner) } else { panic!("unexpected response type") } diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 42ff7ecd16c..48009d6cfa0 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -100,7 +100,7 @@ where async fn handle_new_block_subscription( &mut self, - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender<(BlockHeight, Blocks::Block)>, ) -> TaskNextAction { self.new_block_subscriptions.push(response); TaskNextAction::Continue @@ -120,7 +120,7 @@ where match &event { BlockSourceEvent::NewBlock(height, block) => { self.new_block_subscriptions.retain_mut(|sub| { - let send_res = sub.try_send(block.clone()); + let send_res = sub.try_send((*height, block.clone())); match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 6a80da26ac8..76d05465906 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,22 +1,23 @@ use crate::protobuf_types::Block as ProtoBlock; use fuel_core_services::stream::Stream; +use fuel_core_types::fuel_types::BlockHeight; pub type BoxStream = core::pin::Pin + Send + 'static>>; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { /// A literal stream of blocks - Literal(BoxStream), + Literal(BoxStream<(BlockHeight, ProtoBlock)>), /// A remote URL where the blocks can be fetched - Remote(BoxStream), + S3(BoxStream<(BlockHeight, RemoteS3Response)>), } #[derive(Debug, Clone, PartialEq, Eq)] -pub struct RemoteBlockRangeResponse { - pub region: String, +pub struct RemoteS3Response { pub bucket: String, pub key: String, - pub url: String, + pub requester_pays: bool, + pub aws_endpoint: Option, } #[cfg(test)] @@ -24,7 +25,7 @@ impl std::fmt::Debug for BlockRangeResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { BlockRangeResponse::Literal(_) => f.debug_struct("Literal").finish(), - BlockRangeResponse::Remote(_url) => f.debug_struct("Remote").finish(), + BlockRangeResponse::S3(_url) => f.debug_struct("Remote").finish(), } } } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 497aab2ec9b..872bbb74a43 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -16,6 +16,7 @@ use fuel_core_services::{ stream::BoxStream, }; use fuel_core_storage::{ + Error as StorageError, StorageInspect, tables::FuelBlocks, }; @@ -25,8 +26,12 @@ use fuel_core_types::{ services::block_importer::SharedImportResult, }; -use crate::blocks::importer_and_db_source::sync_service::SyncTask; +use crate::blocks::importer_and_db_source::sync_service::{ + SyncTask, + TxReceipts, +}; use fuel_core_storage::tables::Transactions; +use fuel_core_types::fuel_tx::Receipt as FuelReceipt; pub mod importer_service; pub mod sync_service; @@ -37,62 +42,60 @@ pub mod serializer_adapter; pub trait BlockSerializer { type Block; - fn serialize_block(&self, block: &FuelBlock) -> Result; + fn serialize_block( + &self, + block: &FuelBlock, + receipts: &[FuelReceipt], + ) -> Result; } /// A block source that combines an importer and a database sync task. /// Old blocks will be synced from a target database and new blocks will be received from /// the importer -pub struct ImporterAndDbSource +pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect, - DB: StorageInspect, - E: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { importer_task: ServiceRunner>, - sync_task: ServiceRunner>, + sync_task: ServiceRunner>, /// Receive blocks from the importer and sync tasks receiver: tokio::sync::mpsc::Receiver>, - - _error_marker: std::marker::PhantomData, } -impl ImporterAndDbSource +impl ImporterAndDbSource where Serializer: BlockSerializer + Clone + Send + Sync + 'static, ::Block: Send + Sync + 'static, - DB: StorageInspect + Send + Sync, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: StorageInspect + Send + Sync, + DB: StorageInspect + Send + 'static, + Receipts: TxReceipts, { pub fn new( importer: BoxStream, serializer: Serializer, - database: DB, + db: DB, + receipts: Receipts, db_starting_height: BlockHeight, - db_ending_height: Option, + db_ending_height: BlockHeight, ) -> Self { const ARB_CHANNEL_SIZE: usize = 100; let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let (new_end_sender, new_end_receiver) = tokio::sync::oneshot::channel(); - let importer_task = ImporterTask::new( - importer, - serializer.clone(), - block_return.clone(), - Some(new_end_sender), - ); + let importer_task = + ImporterTask::new(importer, serializer.clone(), block_return.clone()); let importer_runner = ServiceRunner::new(importer_task); importer_runner.start().unwrap(); let sync_task = SyncTask::new( serializer, block_return, - database, + db, + receipts, db_starting_height, db_ending_height, - new_end_receiver, ); let sync_runner = ServiceRunner::new(sync_task); sync_runner.start().unwrap(); @@ -100,19 +103,19 @@ where importer_task: importer_runner, sync_task: sync_runner, receiver, - _error_marker: std::marker::PhantomData, } } } -impl BlockSource for ImporterAndDbSource +impl BlockSource + for ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, ::Block: Send + Sync + 'static, - DB: Send + Sync, - DB: StorageInspect, - DB: StorageInspect, - E: std::fmt::Debug + Send + Sync, + DB: Send + Sync + 'static, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { type Block = Serializer::Block; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 74151e2a0c7..99721b06ad2 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -11,10 +11,7 @@ use fuel_core_services::{ try_or_continue, try_or_stop, }; -use fuel_core_types::{ - fuel_types::BlockHeight, - services::block_importer::SharedImportResult, -}; +use fuel_core_types::services::block_importer::SharedImportResult; use futures::StreamExt; use tokio::sync::mpsc::Sender; @@ -22,7 +19,6 @@ pub struct ImporterTask { importer: BoxStream, serializer: Serializer, block_return_sender: Sender>, - new_end_sender: Option>, } impl ImporterTask @@ -34,13 +30,11 @@ where importer: BoxStream, serializer: Serializer, block_return: Sender>, - new_end_sender: Option>, ) -> Self { Self { importer, serializer, block_return_sender: block_return, - new_end_sender, } } } @@ -75,25 +69,15 @@ where match maybe_import_result { Some(import_result) => { let height = import_result.sealed_block.entity.header().height(); - if let Some(sender) = self.new_end_sender.take() { - match sender.send(*height) { - Ok(_) => { - tracing::debug!( - "sent new end height to sync task: {:?}", - height - ); - } - Err(e) => { - tracing::error!( - "failed to send new end height to sync task: {:?}", - e - ); - } - } - } + let receipts = import_result + .tx_status + .iter() + .flat_map(|status| status.result.receipts()) + .map(Clone::clone) + .collect::>(); let res = self .serializer - .serialize_block(&import_result.sealed_block.entity); + .serialize_block(&import_result.sealed_block.entity, &receipts); let block = try_or_continue!(res); let event = BlockSourceEvent::NewBlock(*height, block); let res = self.block_return_sender.send(event).await; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index b497145f2da..e24932de4b3 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,122 +1,21 @@ -#[cfg(feature = "fault-proving")] -use crate::protobuf_types::V2Header as ProtoV2Header; use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ - BlobTransaction as ProtoBlobTx, Block as ProtoBlock, - ChangeOutput as ProtoChangeOutput, - CoinOutput as ProtoCoinOutput, - CoinPredicateInput as ProtoCoinPredicateInput, - CoinSignedInput as ProtoCoinSignedInput, - ContractCreatedOutput as ProtoContractCreatedOutput, - ContractInput as ProtoContractInput, - ContractOutput as ProtoContractOutput, - CreateTransaction as ProtoCreateTx, - Header as ProtoHeader, - Input as ProtoInput, - MessageCoinPredicateInput as ProtoMessageCoinPredicateInput, - MessageCoinSignedInput as ProtoMessageCoinSignedInput, - MessageDataPredicateInput as ProtoMessageDataPredicateInput, - MessageDataSignedInput as ProtoMessageDataSignedInput, - MintTransaction as ProtoMintTx, - Output as ProtoOutput, - Policies as ProtoPolicies, - ScriptTransaction as ProtoScriptTx, - StorageSlot as ProtoStorageSlot, - Transaction as ProtoTransaction, - TxPointer as ProtoTxPointer, - UpgradeConsensusParameters as ProtoUpgradeConsensusParameters, - UpgradePurpose as ProtoUpgradePurpose, - UpgradeStateTransition as ProtoUpgradeStateTransition, - UpgradeTransaction as ProtoUpgradeTx, - UploadTransaction as ProtoUploadTx, - UtxoId as ProtoUtxoId, V1Block as ProtoV1Block, - V1Header as ProtoV1Header, - VariableOutput as ProtoVariableOutput, block::VersionedBlock as ProtoVersionedBlock, - header::VersionedHeader as ProtoVersionedHeader, - input::Variant as ProtoInputVariant, - output::Variant as ProtoOutputVariant, - transaction::Variant as ProtoTransactionVariant, - upgrade_purpose::Variant as ProtoUpgradePurposeVariant, - }, - result::{ - Error, - Result, }, }; -use anyhow::anyhow; #[cfg(feature = "fault-proving")] -use fuel_core_types::{ - blockchain::header::BlockHeaderV2, - fuel_types::ChainId, -}; +use fuel_core_types::fuel_types::ChainId; use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, - header::{ - ApplicationHeader, - BlockHeader, - BlockHeaderV1, - ConsensusHeader, - GeneratedConsensusFields, - PartialBlockHeader, - }, - primitives::{ - BlockId, - DaBlockHeight, - Empty, - }, - }, - fuel_tx::{ - Address, - BlobBody, - Bytes32, - Input, - Output, - StorageSlot, - Transaction as FuelTransaction, - TxPointer, - UpgradePurpose, - UploadBody, - UtxoId, - Witness, - field::{ - BlobId as _, - BytecodeRoot as _, - BytecodeWitnessIndex as _, - InputContract as _, - Inputs, - MintAmount as _, - MintAssetId as _, - MintGasPrice as _, - OutputContract as _, - Outputs, - Policies as _, - ProofSet as _, - ReceiptsRoot as _, - Salt as _, - Script as _, - ScriptData as _, - ScriptGasLimit as _, - StorageSlots as _, - SubsectionIndex as _, - SubsectionsNumber as _, - TxPointer as TxPointerField, - UpgradePurpose as UpgradePurposeField, - Witnesses as _, - }, - policies::{ - Policies as FuelPolicies, - PoliciesBits, - PolicyType, - }, }, - tai64, }; +use fuel_core_types::fuel_tx::Receipt as FuelReceipt; +use crate::blocks::importer_and_db_source::serializer_adapter::fuel_to_proto_conversions::{proto_header_from_header, proto_receipt_from_receipt, proto_tx_from_tx}; #[derive(Clone)] pub struct SerializerAdapter; @@ -124,14 +23,22 @@ pub struct SerializerAdapter; impl BlockSerializer for SerializerAdapter { type Block = ProtoBlock; - fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { - let (header, txs) = block.clone().into_inner(); - let proto_header = proto_header_from_header(header); + fn serialize_block( + &self, + block: &FuelBlock, + receipts: &[FuelReceipt], + ) -> crate::result::Result { + let proto_header = proto_header_from_header(block.header()); match &block { FuelBlock::V1(_) => { let proto_v1_block = ProtoV1Block { header: Some(proto_header), - transactions: txs.into_iter().map(proto_tx_from_tx).collect(), + transactions: block + .transactions() + .iter() + .map(proto_tx_from_tx) + .collect(), + receipts: receipts.iter().map(proto_receipt_from_receipt).collect(), }; Ok(ProtoBlock { versioned_block: Some(ProtoVersionedBlock::V1(proto_v1_block)), @@ -141,1352 +48,8 @@ impl BlockSerializer for SerializerAdapter { } } -fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { - let block_id = header.id(); - let consensus = *header.consensus(); - let versioned_header = match header { - BlockHeader::V1(header) => { - let proto_v1_header = - proto_v1_header_from_v1_header(consensus, block_id, header); - ProtoVersionedHeader::V1(proto_v1_header) - } - #[cfg(feature = "fault-proving")] - BlockHeader::V2(header) => { - let proto_v2_header = - proto_v2_header_from_v2_header(consensus, block_id, header); - ProtoVersionedHeader::V2(proto_v2_header) - } - }; - - ProtoHeader { - versioned_header: Some(versioned_header), - } -} - -fn proto_v1_header_from_v1_header( - consensus: ConsensusHeader, - block_id: BlockId, - header: BlockHeaderV1, -) -> ProtoV1Header { - let application = header.application(); - let generated = application.generated; - - ProtoV1Header { - da_height: application.da_height.0, - consensus_parameters_version: application.consensus_parameters_version, - state_transition_bytecode_version: application.state_transition_bytecode_version, - transactions_count: u32::from(generated.transactions_count), - message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - prev_root: bytes32_to_vec(&consensus.prev_root), - height: u32::from(consensus.height), - time: consensus.time.0, - application_hash: bytes32_to_vec(&consensus.generated.application_hash), - block_id: Some(block_id.as_slice().to_vec()), - } -} - -#[cfg(feature = "fault-proving")] -fn proto_v2_header_from_v2_header( - consensus: ConsensusHeader, - block_id: BlockId, - header: BlockHeaderV2, -) -> ProtoV2Header { - let application = *header.application(); - let generated = application.generated; - - ProtoV2Header { - da_height: application.da_height.0, - consensus_parameters_version: application.consensus_parameters_version, - state_transition_bytecode_version: application.state_transition_bytecode_version, - transactions_count: u32::from(generated.transactions_count), - message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), - prev_root: bytes32_to_vec(&consensus.prev_root), - height: u32::from(consensus.height), - time: consensus.time.0, - application_hash: bytes32_to_vec(&consensus.generated.application_hash), - block_id: Some(block_id.as_slice().to_vec()), - } -} - -fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { - match tx { - FuelTransaction::Script(script) => { - let proto_script = ProtoScriptTx { - script_gas_limit: *script.script_gas_limit(), - receipts_root: bytes32_to_vec(script.receipts_root()), - script: script.script().clone(), - script_data: script.script_data().clone(), - policies: Some(proto_policies_from_policies(script.policies())), - inputs: script - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: script - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: script - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Script(proto_script)), - } - } - FuelTransaction::Create(create) => { - let proto_create = ProtoCreateTx { - bytecode_witness_index: u32::from(*create.bytecode_witness_index()), - salt: create.salt().as_ref().to_vec(), - storage_slots: create - .storage_slots() - .iter() - .map(proto_storage_slot_from_storage_slot) - .collect(), - policies: Some(proto_policies_from_policies(create.policies())), - inputs: create - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: create - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: create - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Create(proto_create)), - } - } - FuelTransaction::Mint(mint) => { - let proto_mint = ProtoMintTx { - tx_pointer: Some(proto_tx_pointer(mint.tx_pointer())), - input_contract: Some(proto_contract_input_from_contract( - mint.input_contract(), - )), - output_contract: Some(proto_contract_output_from_contract( - mint.output_contract(), - )), - mint_amount: *mint.mint_amount(), - mint_asset_id: mint.mint_asset_id().as_ref().to_vec(), - gas_price: *mint.gas_price(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Mint(proto_mint)), - } - } - FuelTransaction::Upgrade(upgrade) => { - let proto_upgrade = ProtoUpgradeTx { - purpose: Some(proto_upgrade_purpose(upgrade.upgrade_purpose())), - policies: Some(proto_policies_from_policies(upgrade.policies())), - inputs: upgrade - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: upgrade - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: upgrade - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Upgrade(proto_upgrade)), - } - } - FuelTransaction::Upload(upload) => { - let proto_upload = ProtoUploadTx { - root: bytes32_to_vec(upload.bytecode_root()), - witness_index: u32::from(*upload.bytecode_witness_index()), - subsection_index: u32::from(*upload.subsection_index()), - subsections_number: u32::from(*upload.subsections_number()), - proof_set: upload.proof_set().iter().map(bytes32_to_vec).collect(), - policies: Some(proto_policies_from_policies(upload.policies())), - inputs: upload - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: upload - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: upload - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Upload(proto_upload)), - } - } - FuelTransaction::Blob(blob) => { - let proto_blob = ProtoBlobTx { - blob_id: blob.blob_id().as_ref().to_vec(), - witness_index: u32::from(*blob.bytecode_witness_index()), - policies: Some(proto_policies_from_policies(blob.policies())), - inputs: blob - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: blob - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: blob - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Blob(proto_blob)), - } - } - } -} - -fn proto_input_from_input(input: Input) -> ProtoInput { - match input { - Input::CoinSigned(coin_signed) => ProtoInput { - variant: Some(ProtoInputVariant::CoinSigned(ProtoCoinSignedInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_signed.utxo_id)), - owner: coin_signed.owner.as_ref().to_vec(), - amount: coin_signed.amount, - asset_id: coin_signed.asset_id.as_ref().to_vec(), - tx_pointer: Some(proto_tx_pointer(&coin_signed.tx_pointer)), - witness_index: coin_signed.witness_index.into(), - predicate_gas_used: 0, - predicate: vec![], - predicate_data: vec![], - })), - }, - Input::CoinPredicate(coin_predicate) => ProtoInput { - variant: Some(ProtoInputVariant::CoinPredicate(ProtoCoinPredicateInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_predicate.utxo_id)), - owner: coin_predicate.owner.as_ref().to_vec(), - amount: coin_predicate.amount, - asset_id: coin_predicate.asset_id.as_ref().to_vec(), - tx_pointer: Some(proto_tx_pointer(&coin_predicate.tx_pointer)), - witness_index: 0, - predicate_gas_used: coin_predicate.predicate_gas_used, - predicate: coin_predicate.predicate.as_ref().to_vec(), - predicate_data: coin_predicate.predicate_data.as_ref().to_vec(), - })), - }, - Input::Contract(contract) => ProtoInput { - variant: Some(ProtoInputVariant::Contract(ProtoContractInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), - balance_root: bytes32_to_vec(&contract.balance_root), - state_root: bytes32_to_vec(&contract.state_root), - tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), - contract_id: contract.contract_id.as_ref().to_vec(), - })), - }, - Input::MessageCoinSigned(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageCoinSigned( - ProtoMessageCoinSignedInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: message.witness_index.into(), - predicate_gas_used: 0, - data: Vec::new(), - predicate: Vec::new(), - predicate_data: Vec::new(), - }, - )), - }, - Input::MessageCoinPredicate(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageCoinPredicate( - ProtoMessageCoinPredicateInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: 0, - predicate_gas_used: message.predicate_gas_used, - data: Vec::new(), - predicate: message.predicate.as_ref().to_vec(), - predicate_data: message.predicate_data.as_ref().to_vec(), - }, - )), - }, - Input::MessageDataSigned(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageDataSigned( - ProtoMessageDataSignedInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: message.witness_index.into(), - predicate_gas_used: 0, - data: message.data.as_ref().to_vec(), - predicate: Vec::new(), - predicate_data: Vec::new(), - }, - )), - }, - Input::MessageDataPredicate(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageDataPredicate( - ProtoMessageDataPredicateInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: 0, - predicate_gas_used: message.predicate_gas_used, - data: message.data.as_ref().to_vec(), - predicate: message.predicate.as_ref().to_vec(), - predicate_data: message.predicate_data.as_ref().to_vec(), - }, - )), - }, - } -} - -fn proto_utxo_id_from_utxo_id(utxo_id: &UtxoId) -> ProtoUtxoId { - ProtoUtxoId { - tx_id: utxo_id.tx_id().as_ref().to_vec(), - output_index: utxo_id.output_index().into(), - } -} - -fn proto_tx_pointer(tx_pointer: &TxPointer) -> ProtoTxPointer { - ProtoTxPointer { - block_height: tx_pointer.block_height().into(), - tx_index: tx_pointer.tx_index().into(), - } -} - -fn proto_storage_slot_from_storage_slot(slot: &StorageSlot) -> ProtoStorageSlot { - ProtoStorageSlot { - key: slot.key().as_ref().to_vec(), - value: slot.value().as_ref().to_vec(), - } -} - -fn proto_contract_input_from_contract( - contract: &fuel_core_types::fuel_tx::input::contract::Contract, -) -> ProtoContractInput { - ProtoContractInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), - balance_root: bytes32_to_vec(&contract.balance_root), - state_root: bytes32_to_vec(&contract.state_root), - tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), - contract_id: contract.contract_id.as_ref().to_vec(), - } -} - -fn proto_contract_output_from_contract( - contract: &fuel_core_types::fuel_tx::output::contract::Contract, -) -> ProtoContractOutput { - ProtoContractOutput { - input_index: u32::from(contract.input_index), - balance_root: bytes32_to_vec(&contract.balance_root), - state_root: bytes32_to_vec(&contract.state_root), - } -} - -fn proto_output_from_output(output: Output) -> ProtoOutput { - let variant = match output { - Output::Coin { - to, - amount, - asset_id, - } => ProtoOutputVariant::Coin(ProtoCoinOutput { - to: to.as_ref().to_vec(), - amount, - asset_id: asset_id.as_ref().to_vec(), - }), - Output::Contract(contract) => { - ProtoOutputVariant::Contract(proto_contract_output_from_contract(&contract)) - } - Output::Change { - to, - amount, - asset_id, - } => ProtoOutputVariant::Change(ProtoChangeOutput { - to: to.as_ref().to_vec(), - amount, - asset_id: asset_id.as_ref().to_vec(), - }), - Output::Variable { - to, - amount, - asset_id, - } => ProtoOutputVariant::Variable(ProtoVariableOutput { - to: to.as_ref().to_vec(), - amount, - asset_id: asset_id.as_ref().to_vec(), - }), - Output::ContractCreated { - contract_id, - state_root, - } => ProtoOutputVariant::ContractCreated(ProtoContractCreatedOutput { - contract_id: contract_id.as_ref().to_vec(), - state_root: bytes32_to_vec(&state_root), - }), - }; - - ProtoOutput { - variant: Some(variant), - } -} - -fn proto_upgrade_purpose(purpose: &UpgradePurpose) -> ProtoUpgradePurpose { - let variant = match purpose { - UpgradePurpose::ConsensusParameters { - witness_index, - checksum, - } => ProtoUpgradePurposeVariant::ConsensusParameters( - ProtoUpgradeConsensusParameters { - witness_index: u32::from(*witness_index), - checksum: checksum.as_ref().to_vec(), - }, - ), - UpgradePurpose::StateTransition { root } => { - ProtoUpgradePurposeVariant::StateTransition(ProtoUpgradeStateTransition { - root: root.as_ref().to_vec(), - }) - } - }; - - ProtoUpgradePurpose { - variant: Some(variant), - } -} - -fn proto_policies_from_policies( - policies: &fuel_core_types::fuel_tx::policies::Policies, -) -> ProtoPolicies { - let mut values = [0u64; 6]; - if policies.is_set(PolicyType::Tip) { - values[0] = policies.get(PolicyType::Tip).unwrap_or_default(); - } - if policies.is_set(PolicyType::WitnessLimit) { - let value = policies.get(PolicyType::WitnessLimit).unwrap_or_default(); - values[1] = value; - } - if policies.is_set(PolicyType::Maturity) { - let value = policies.get(PolicyType::Maturity).unwrap_or_default(); - values[2] = value; - } - if policies.is_set(PolicyType::MaxFee) { - values[3] = policies.get(PolicyType::MaxFee).unwrap_or_default(); - } - if policies.is_set(PolicyType::Expiration) { - values[4] = policies.get(PolicyType::Expiration).unwrap_or_default(); - } - if policies.is_set(PolicyType::Owner) { - values[5] = policies.get(PolicyType::Owner).unwrap_or_default(); - } - let bits = policies.bits(); - ProtoPolicies { - bits, - values: values.to_vec(), - } -} - -fn tx_pointer_from_proto(proto: &ProtoTxPointer) -> Result { - let block_height = proto.block_height.into(); - #[allow(clippy::useless_conversion)] - let tx_index = proto.tx_index.try_into().map_err(|e| { - Error::Serialization(anyhow!("Could not convert tx_index to target type: {}", e)) - })?; - Ok(TxPointer::new(block_height, tx_index)) -} - -fn storage_slot_from_proto(proto: &ProtoStorageSlot) -> Result { - let key = Bytes32::try_from(proto.key.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert storage slot key to Bytes32: {}", - e - )) - })?; - let value = Bytes32::try_from(proto.value.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert storage slot value to Bytes32: {}", - e - )) - })?; - Ok(StorageSlot::new(key, value)) -} - -fn contract_input_from_proto( - proto: &ProtoContractInput, -) -> Result { - let utxo_proto = proto.utxo_id.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing utxo_id on contract input")) - })?; - let utxo_id = utxo_id_from_proto(utxo_proto)?; - let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!("Could not convert balance_root to Bytes32: {}", e)) - })?; - let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!("Could not convert state_root to Bytes32: {}", e)) - })?; - let tx_pointer_proto = proto.tx_pointer.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing tx_pointer on contract input")) - })?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - let contract_id = - fuel_core_types::fuel_types::ContractId::try_from(proto.contract_id.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - Ok(fuel_core_types::fuel_tx::input::contract::Contract { - utxo_id, - balance_root, - state_root, - tx_pointer, - contract_id, - }) -} - -fn contract_output_from_proto( - proto: &ProtoContractOutput, -) -> Result { - let input_index = u16::try_from(proto.input_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert contract output input_index to u16: {}", - e - )) - })?; - let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert contract output balance_root to Bytes32: {}", - e - )) - })?; - let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert contract output state_root to Bytes32: {}", - e - )) - })?; - - Ok(fuel_core_types::fuel_tx::output::contract::Contract { - input_index, - balance_root, - state_root, - }) -} - -fn output_from_proto_output(proto_output: &ProtoOutput) -> Result { - match proto_output - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing output variant")))? - { - ProtoOutputVariant::Coin(coin) => { - let to = Address::try_from(coin.to.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let asset_id = - fuel_core_types::fuel_types::AssetId::try_from(coin.asset_id.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - Ok(Output::coin(to, coin.amount, asset_id)) - } - ProtoOutputVariant::Contract(contract) => { - let contract = contract_output_from_proto(contract)?; - Ok(Output::Contract(contract)) - } - ProtoOutputVariant::Change(change) => { - let to = Address::try_from(change.to.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - change.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - Ok(Output::change(to, change.amount, asset_id)) - } - ProtoOutputVariant::Variable(variable) => { - let to = Address::try_from(variable.to.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - variable.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - Ok(Output::variable(to, variable.amount, asset_id)) - } - ProtoOutputVariant::ContractCreated(contract_created) => { - let contract_id = fuel_core_types::fuel_types::ContractId::try_from( - contract_created.contract_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let state_root = Bytes32::try_from(contract_created.state_root.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert state_root to Bytes32: {}", - e - )) - })?; - Ok(Output::contract_created(contract_id, state_root)) - } - } -} - -fn upgrade_purpose_from_proto(proto: &ProtoUpgradePurpose) -> Result { - match proto - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing upgrade purpose variant")))? - { - ProtoUpgradePurposeVariant::ConsensusParameters(consensus) => { - let witness_index = u16::try_from(consensus.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - let checksum = - Bytes32::try_from(consensus.checksum.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert checksum to Bytes32: {}", - e - )) - })?; - Ok(UpgradePurpose::ConsensusParameters { - witness_index, - checksum, - }) - } - ProtoUpgradePurposeVariant::StateTransition(state) => { - let root = Bytes32::try_from(state.root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert state transition root to Bytes32: {}", - e - )) - })?; - Ok(UpgradePurpose::StateTransition { root }) - } - } -} - -fn utxo_id_from_proto(proto_utxo: &ProtoUtxoId) -> Result { - let tx_id = Bytes32::try_from(proto_utxo.tx_id.as_slice()).map_err(|e| { - Error::Serialization(anyhow!("Could not convert tx_id to Bytes32: {}", e)) - })?; - let output_index = u16::try_from(proto_utxo.output_index).map_err(|e| { - Error::Serialization(anyhow!("Could not convert output_index to u16: {}", e)) - })?; - Ok(UtxoId::new(tx_id, output_index)) -} - -fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { - bytes.as_ref().to_vec() -} - -pub fn fuel_block_from_protobuf( - proto_block: ProtoBlock, - msg_ids: &[fuel_core_types::fuel_tx::MessageId], - event_inbox_root: Bytes32, -) -> Result { - let versioned_block = proto_block - .versioned_block - .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) - .map_err(Error::Serialization)?; - let partial_header = match &versioned_block { - ProtoVersionedBlock::V1(v1_block) => { - let proto_header = v1_block - .header - .clone() - .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) - .map_err(Error::Serialization)?; - partial_header_from_proto_header(proto_header)? - } - }; - let txs = match versioned_block { - ProtoVersionedBlock::V1(v1_inner) => v1_inner - .transactions - .iter() - .map(tx_from_proto_tx) - .collect::>()?, - }; - FuelBlock::new( - partial_header, - txs, - msg_ids, - event_inbox_root, - #[cfg(feature = "fault-proving")] - &ChainId::default(), - ) - .map_err(|e| anyhow!(e)) - .map_err(Error::Serialization) -} - -pub fn partial_header_from_proto_header( - proto_header: ProtoHeader, -) -> Result { - let partial_header = PartialBlockHeader { - consensus: proto_header_to_empty_consensus_header(&proto_header)?, - application: proto_header_to_empty_application_header(&proto_header)?, - }; - Ok(partial_header) -} - -pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { - let variant = proto_tx - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing transaction variant")))?; - - match variant { - ProtoTransactionVariant::Script(proto_script) => { - let policies = proto_script - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_script - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_script - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_script - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let mut script_tx = FuelTransaction::script( - proto_script.script_gas_limit, - proto_script.script.clone(), - proto_script.script_data.clone(), - policies, - inputs, - outputs, - witnesses, - ); - *script_tx.receipts_root_mut() = Bytes32::try_from( - proto_script.receipts_root.as_slice(), - ) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert receipts_root to Bytes32: {}", - e - )) - })?; - - Ok(FuelTransaction::Script(script_tx)) - } - ProtoTransactionVariant::Create(proto_create) => { - let policies = proto_create - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_create - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_create - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_create - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let storage_slots = proto_create - .storage_slots - .iter() - .map(storage_slot_from_proto) - .collect::>>()?; - let salt = - fuel_core_types::fuel_types::Salt::try_from(proto_create.salt.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let bytecode_witness_index = - u16::try_from(proto_create.bytecode_witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert bytecode_witness_index to u16: {}", - e - )) - })?; - - let create_tx = FuelTransaction::create( - bytecode_witness_index, - policies, - salt, - storage_slots, - inputs, - outputs, - witnesses, - ); - - Ok(FuelTransaction::Create(create_tx)) - } - ProtoTransactionVariant::Mint(proto_mint) => { - let tx_pointer_proto = proto_mint.tx_pointer.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing tx_pointer on mint transaction")) - })?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - let input_contract_proto = - proto_mint.input_contract.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!( - "Missing input_contract on mint transaction" - )) - })?; - let input_contract = contract_input_from_proto(input_contract_proto)?; - let output_contract_proto = - proto_mint.output_contract.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!( - "Missing output_contract on mint transaction" - )) - })?; - let output_contract = contract_output_from_proto(output_contract_proto)?; - let mint_asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_mint.mint_asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - let mint_tx = FuelTransaction::mint( - tx_pointer, - input_contract, - output_contract, - proto_mint.mint_amount, - mint_asset_id, - proto_mint.gas_price, - ); - - Ok(FuelTransaction::Mint(mint_tx)) - } - ProtoTransactionVariant::Upgrade(proto_upgrade) => { - let purpose_proto = proto_upgrade.purpose.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing purpose on upgrade transaction")) - })?; - let upgrade_purpose = upgrade_purpose_from_proto(purpose_proto)?; - let policies = proto_upgrade - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_upgrade - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_upgrade - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_upgrade - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - - let upgrade_tx = FuelTransaction::upgrade( - upgrade_purpose, - policies, - inputs, - outputs, - witnesses, - ); - - Ok(FuelTransaction::Upgrade(upgrade_tx)) - } - ProtoTransactionVariant::Upload(proto_upload) => { - let policies = proto_upload - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_upload - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_upload - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_upload - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let root = Bytes32::try_from(proto_upload.root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert upload root to Bytes32: {}", - e - )) - })?; - let witness_index = - u16::try_from(proto_upload.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - let subsection_index = - u16::try_from(proto_upload.subsection_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert subsection_index to u16: {}", - e - )) - })?; - let subsections_number = u16::try_from(proto_upload.subsections_number) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert subsections_number to u16: {}", - e - )) - })?; - let proof_set = proto_upload - .proof_set - .iter() - .map(|entry| { - Bytes32::try_from(entry.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert proof_set entry to Bytes32: {}", - e - )) - }) - }) - .collect::>>()?; - - let body = UploadBody { - root, - witness_index, - subsection_index, - subsections_number, - proof_set, - }; - - let upload_tx = - FuelTransaction::upload(body, policies, inputs, outputs, witnesses); - - Ok(FuelTransaction::Upload(upload_tx)) - } - ProtoTransactionVariant::Blob(proto_blob) => { - let policies = proto_blob - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_blob - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_blob - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_blob - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let blob_id = fuel_core_types::fuel_types::BlobId::try_from( - proto_blob.blob_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let witness_index = u16::try_from(proto_blob.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert blob witness_index to u16: {}", - e - )) - })?; - let body = BlobBody { - id: blob_id, - witness_index, - }; - - let blob_tx = - FuelTransaction::blob(body, policies, inputs, outputs, witnesses); - - Ok(FuelTransaction::Blob(blob_tx)) - } - } -} - -fn input_from_proto_input(proto_input: &ProtoInput) -> Result { - let variant = proto_input - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing input variant")))?; - - match variant { - ProtoInputVariant::CoinSigned(proto_coin_signed) => { - let utxo_proto = proto_coin_signed - .utxo_id - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; - let utxo_id = utxo_id_from_proto(utxo_proto)?; - let owner = - Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert owner to Address: {}", - e - )) - })?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_coin_signed.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let tx_pointer_proto = proto_coin_signed - .tx_pointer - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - let witness_index = - u16::try_from(proto_coin_signed.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - - Ok(Input::coin_signed( - utxo_id, - owner, - proto_coin_signed.amount, - asset_id, - tx_pointer, - witness_index, - )) - } - ProtoInputVariant::CoinPredicate(proto_coin_predicate) => { - let utxo_proto = proto_coin_predicate - .utxo_id - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; - let utxo_id = utxo_id_from_proto(utxo_proto)?; - let owner = Address::try_from(proto_coin_predicate.owner.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert owner to Address: {}", - e - )) - })?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_coin_predicate.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let tx_pointer_proto = proto_coin_predicate - .tx_pointer - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - - Ok(Input::coin_predicate( - utxo_id, - owner, - proto_coin_predicate.amount, - asset_id, - tx_pointer, - proto_coin_predicate.predicate_gas_used, - proto_coin_predicate.predicate.clone(), - proto_coin_predicate.predicate_data.clone(), - )) - } - ProtoInputVariant::Contract(proto_contract) => { - let contract = contract_input_from_proto(proto_contract)?; - Ok(Input::Contract(contract)) - } - ProtoInputVariant::MessageCoinSigned(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let witness_index = - u16::try_from(proto_message.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - - Ok(Input::message_coin_signed( - sender, - recipient, - proto_message.amount, - nonce, - witness_index, - )) - } - ProtoInputVariant::MessageCoinPredicate(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - Ok(Input::message_coin_predicate( - sender, - recipient, - proto_message.amount, - nonce, - proto_message.predicate_gas_used, - proto_message.predicate.clone(), - proto_message.predicate_data.clone(), - )) - } - ProtoInputVariant::MessageDataSigned(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let witness_index = - u16::try_from(proto_message.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - - Ok(Input::message_data_signed( - sender, - recipient, - proto_message.amount, - nonce, - witness_index, - proto_message.data.clone(), - )) - } - ProtoInputVariant::MessageDataPredicate(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - Ok(Input::message_data_predicate( - sender, - recipient, - proto_message.amount, - nonce, - proto_message.predicate_gas_used, - proto_message.data.clone(), - proto_message.predicate.clone(), - proto_message.predicate_data.clone(), - )) - } - } -} - -fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { - let ProtoPolicies { bits, values } = proto_policies; - let mut policies = FuelPolicies::default(); - let bits = - PoliciesBits::from_bits(bits).expect("Should be able to create from `u32`"); - if bits.contains(PoliciesBits::Tip) - && let Some(tip) = values.first() - { - policies.set(PolicyType::Tip, Some(*tip)); - } - if bits.contains(PoliciesBits::WitnessLimit) - && let Some(witness_limit) = values.get(1) - { - policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); - } - if bits.contains(PoliciesBits::Maturity) - && let Some(maturity) = values.get(2) - { - policies.set(PolicyType::Maturity, Some(*maturity)); - } - if bits.contains(PoliciesBits::MaxFee) - && let Some(max_fee) = values.get(3) - { - policies.set(PolicyType::MaxFee, Some(*max_fee)); - } - if bits.contains(PoliciesBits::Expiration) - && let Some(expiration) = values.get(4) - { - policies.set(PolicyType::Expiration, Some(*expiration)); - } - if bits.contains(PoliciesBits::Owner) - && let Some(owner) = values.get(5) - { - policies.set(PolicyType::Owner, Some(*owner)); - } - policies -} - -pub fn proto_header_to_empty_application_header( - proto_header: &ProtoHeader, -) -> Result> { - match proto_header.versioned_header.clone() { - Some(ProtoVersionedHeader::V1(header)) => { - let app_header = ApplicationHeader { - da_height: DaBlockHeight::from(header.da_height), - consensus_parameters_version: header.consensus_parameters_version, - state_transition_bytecode_version: header - .state_transition_bytecode_version, - generated: Empty {}, - }; - Ok(app_header) - } - Some(ProtoVersionedHeader::V2(header)) => { - if cfg!(feature = "fault-proving") { - let app_header = ApplicationHeader { - da_height: DaBlockHeight::from(header.da_height), - consensus_parameters_version: header.consensus_parameters_version, - state_transition_bytecode_version: header - .state_transition_bytecode_version, - generated: Empty {}, - }; - Ok(app_header) - } else { - Err(anyhow!("V2 headers require the 'fault-proving' feature")) - .map_err(Error::Serialization) - } - } - None => Err(anyhow!("Missing protobuf versioned_header")) - .map_err(Error::Serialization), - } -} - -/// Alias the consensus header into an empty one. -pub fn proto_header_to_empty_consensus_header( - proto_header: &ProtoHeader, -) -> Result> { - match proto_header.versioned_header.clone() { - Some(ProtoVersionedHeader::V1(header)) => { - let consensus_header = ConsensusHeader { - prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root).ok_or( - Error::Serialization(anyhow!("Could create `Bytes32` from bytes")), - )?, - height: header.height.into(), - time: tai64::Tai64(header.time), - generated: Empty {}, - }; - Ok(consensus_header) - } - Some(ProtoVersionedHeader::V2(header)) => { - if cfg!(feature = "fault-proving") { - let consensus_header = ConsensusHeader { - prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) - .ok_or(Error::Serialization(anyhow!( - "Could create `Bytes32` from bytes" - )))?, - height: header.height.into(), - time: tai64::Tai64(header.time), - generated: Empty {}, - }; - Ok(consensus_header) - } else { - Err(anyhow!("V2 headers require the 'fault-proving' feature")) - .map_err(Error::Serialization) - } - } - None => Err(anyhow!("Missing protobuf versioned_header")) - .map_err(Error::Serialization), - } -} +pub mod fuel_to_proto_conversions; +pub mod proto_to_fuel_conversions; // TODO: Add coverage for V2 Block stuff // https://github.com/FuelLabs/fuel-core/issues/3139 @@ -1495,26 +58,30 @@ pub fn proto_header_to_empty_consensus_header( #[cfg(test)] mod tests { use super::*; - use fuel_core_types::test_helpers::arb_block; + use fuel_core_types::test_helpers::{arb_block, arb_receipts}; use proptest::prelude::*; + use crate::blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf; proptest! { #![proptest_config(ProptestConfig { - cases: 100, .. ProptestConfig::default() + cases: 1, .. ProptestConfig::default() })] #[test] - fn serialize_block__roundtrip((block, msg_ids, event_inbox_root) in arb_block()) { + fn serialize_block__roundtrip( + (block, msg_ids, event_inbox_root) in arb_block(), + receipts in arb_receipts()) + { // given let serializer = SerializerAdapter; // when - let proto_block = serializer.serialize_block(&block).unwrap(); + let proto_block = serializer.serialize_block(&block, &receipts).unwrap(); // then - let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); + let (deserialized_block, deserialized_receipts) = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); assert_eq!(block, deserialized_block); - - } + assert_eq!(receipts, deserialized_receipts); + } } #[test] diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs new file mode 100644 index 00000000000..66c3a84ce29 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs @@ -0,0 +1,855 @@ +#[cfg(feature = "fault-proving")] +use crate::protobuf_types::V2Header as ProtoV2Header; +use crate::{ + blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::bytes32_to_vec, + protobuf_types::{ + BlobTransaction as ProtoBlobTx, + ChangeOutput as ProtoChangeOutput, + CoinOutput as ProtoCoinOutput, + CoinPredicateInput as ProtoCoinPredicateInput, + CoinSignedInput as ProtoCoinSignedInput, + ContractCreatedOutput as ProtoContractCreatedOutput, + ContractInput as ProtoContractInput, + ContractOutput as ProtoContractOutput, + CreateTransaction as ProtoCreateTx, + Header as ProtoHeader, + Input as ProtoInput, + MessageCoinPredicateInput as ProtoMessageCoinPredicateInput, + MessageCoinSignedInput as ProtoMessageCoinSignedInput, + MessageDataPredicateInput as ProtoMessageDataPredicateInput, + MessageDataSignedInput as ProtoMessageDataSignedInput, + MintTransaction as ProtoMintTx, + Output as ProtoOutput, + Policies as ProtoPolicies, + Receipt as ProtoReceipt, + ScriptTransaction as ProtoScriptTx, + StorageSlot as ProtoStorageSlot, + Transaction as ProtoTransaction, + TxPointer as ProtoTxPointer, + UpgradeConsensusParameters as ProtoUpgradeConsensusParameters, + UpgradePurpose as ProtoUpgradePurpose, + UpgradeStateTransition as ProtoUpgradeStateTransition, + UpgradeTransaction as ProtoUpgradeTx, + UploadTransaction as ProtoUploadTx, + UtxoId as ProtoUtxoId, + V1Header as ProtoV1Header, + VariableOutput as ProtoVariableOutput, + header::VersionedHeader as ProtoVersionedHeader, + input::Variant as ProtoInputVariant, + output::Variant as ProtoOutputVariant, + receipt::Variant as ProtoReceiptVariant, + script_execution_result::Variant as ProtoScriptExecutionResultVariant, + transaction::Variant as ProtoTransactionVariant, + upgrade_purpose::Variant as ProtoUpgradePurposeVariant, + }, +}; + +#[cfg(feature = "fault-proving")] +use fuel_core_types::blockchain::header::BlockHeaderV2; +use fuel_core_types::{ + blockchain::{ + header::{ + BlockHeader, + BlockHeaderV1, + ConsensusHeader, + GeneratedConsensusFields, + }, + primitives::BlockId, + }, + fuel_asm::PanicInstruction, + fuel_tx::{ + Input, + Output, + Receipt as FuelReceipt, + ScriptExecutionResult, + StorageSlot, + Transaction as FuelTransaction, + TxPointer, + UpgradePurpose, + UtxoId, + field::{ + BlobId as _, + BytecodeRoot as _, + BytecodeWitnessIndex as _, + InputContract as _, + Inputs, + MintAmount as _, + MintAssetId as _, + MintGasPrice as _, + OutputContract as _, + Outputs, + Policies as _, + ProofSet as _, + ReceiptsRoot as _, + Salt as _, + Script as _, + ScriptData as _, + ScriptGasLimit as _, + StorageSlots as _, + SubsectionIndex as _, + SubsectionsNumber as _, + TxPointer as TxPointerField, + UpgradePurpose as UpgradePurposeField, + Witnesses as _, + }, + policies::PolicyType, + }, +}; + +pub fn proto_header_from_header(header: &BlockHeader) -> ProtoHeader { + let block_id = header.id(); + let consensus = header.consensus(); + let versioned_header = match header { + BlockHeader::V1(header) => { + let proto_v1_header = + proto_v1_header_from_v1_header(consensus, &block_id, header); + ProtoVersionedHeader::V1(proto_v1_header) + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + let proto_v2_header = + proto_v2_header_from_v2_header(consensus, &block_id, header); + ProtoVersionedHeader::V2(proto_v2_header) + } + }; + + ProtoHeader { + versioned_header: Some(versioned_header), + } +} + +fn proto_v1_header_from_v1_header( + consensus: &ConsensusHeader, + block_id: &BlockId, + header: &BlockHeaderV1, +) -> ProtoV1Header { + let application = header.application(); + let generated = application.generated; + + ProtoV1Header { + da_height: application.da_height.0, + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0, + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +#[cfg(feature = "fault-proving")] +fn proto_v2_header_from_v2_header( + consensus: &ConsensusHeader, + block_id: &BlockId, + header: &BlockHeaderV2, +) -> ProtoV2Header { + let application = *header.application(); + let generated = application.generated; + + ProtoV2Header { + da_height: application.da_height.0, + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0, + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +pub fn proto_tx_from_tx(tx: &FuelTransaction) -> ProtoTransaction { + match tx { + FuelTransaction::Script(script) => { + let proto_script = ProtoScriptTx { + script_gas_limit: *script.script_gas_limit(), + receipts_root: bytes32_to_vec(script.receipts_root()), + script: script.script().clone(), + script_data: script.script_data().clone(), + policies: Some(proto_policies_from_policies(script.policies())), + inputs: script.inputs().iter().map(proto_input_from_input).collect(), + outputs: script + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: script + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Script(proto_script)), + } + } + FuelTransaction::Create(create) => { + let proto_create = ProtoCreateTx { + bytecode_witness_index: u32::from(*create.bytecode_witness_index()), + salt: create.salt().as_ref().to_vec(), + storage_slots: create + .storage_slots() + .iter() + .map(proto_storage_slot_from_storage_slot) + .collect(), + policies: Some(proto_policies_from_policies(create.policies())), + inputs: create.inputs().iter().map(proto_input_from_input).collect(), + outputs: create + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: create + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Create(proto_create)), + } + } + FuelTransaction::Mint(mint) => { + let proto_mint = ProtoMintTx { + tx_pointer: Some(proto_tx_pointer(mint.tx_pointer())), + input_contract: Some(proto_contract_input_from_contract( + mint.input_contract(), + )), + output_contract: Some(proto_contract_output_from_contract( + mint.output_contract(), + )), + mint_amount: *mint.mint_amount(), + mint_asset_id: mint.mint_asset_id().as_ref().to_vec(), + gas_price: *mint.gas_price(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Mint(proto_mint)), + } + } + FuelTransaction::Upgrade(upgrade) => { + let proto_upgrade = ProtoUpgradeTx { + purpose: Some(proto_upgrade_purpose(upgrade.upgrade_purpose())), + policies: Some(proto_policies_from_policies(upgrade.policies())), + inputs: upgrade + .inputs() + .iter() + .map(proto_input_from_input) + .collect(), + outputs: upgrade + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: upgrade + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upgrade(proto_upgrade)), + } + } + FuelTransaction::Upload(upload) => { + let proto_upload = ProtoUploadTx { + root: bytes32_to_vec(upload.bytecode_root()), + witness_index: u32::from(*upload.bytecode_witness_index()), + subsection_index: u32::from(*upload.subsection_index()), + subsections_number: u32::from(*upload.subsections_number()), + proof_set: upload.proof_set().iter().map(bytes32_to_vec).collect(), + policies: Some(proto_policies_from_policies(upload.policies())), + inputs: upload.inputs().iter().map(proto_input_from_input).collect(), + outputs: upload + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: upload + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upload(proto_upload)), + } + } + FuelTransaction::Blob(blob) => { + let proto_blob = ProtoBlobTx { + blob_id: blob.blob_id().as_ref().to_vec(), + witness_index: u32::from(*blob.bytecode_witness_index()), + policies: Some(proto_policies_from_policies(blob.policies())), + inputs: blob.inputs().iter().map(proto_input_from_input).collect(), + outputs: blob + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: blob + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Blob(proto_blob)), + } + } + } +} + +fn proto_input_from_input(input: &Input) -> ProtoInput { + match input { + Input::CoinSigned(coin_signed) => ProtoInput { + variant: Some(ProtoInputVariant::CoinSigned(ProtoCoinSignedInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_signed.utxo_id)), + owner: coin_signed.owner.as_ref().to_vec(), + amount: coin_signed.amount, + asset_id: coin_signed.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_signed.tx_pointer)), + witness_index: coin_signed.witness_index.into(), + predicate_gas_used: 0, + predicate: vec![], + predicate_data: vec![], + })), + }, + Input::CoinPredicate(coin_predicate) => ProtoInput { + variant: Some(ProtoInputVariant::CoinPredicate(ProtoCoinPredicateInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_predicate.utxo_id)), + owner: coin_predicate.owner.as_ref().to_vec(), + amount: coin_predicate.amount, + asset_id: coin_predicate.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_predicate.tx_pointer)), + witness_index: 0, + predicate_gas_used: coin_predicate.predicate_gas_used, + predicate: coin_predicate.predicate.as_ref().to_vec(), + predicate_data: coin_predicate.predicate_data.as_ref().to_vec(), + })), + }, + Input::Contract(contract) => ProtoInput { + variant: Some(ProtoInputVariant::Contract(ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + })), + }, + Input::MessageCoinSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinSigned( + ProtoMessageCoinSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: Vec::new(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageCoinPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinPredicate( + ProtoMessageCoinPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: Vec::new(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + Input::MessageDataSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataSigned( + ProtoMessageDataSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: message.data.as_ref().to_vec(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageDataPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataPredicate( + ProtoMessageDataPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: message.data.as_ref().to_vec(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + } +} + +fn proto_utxo_id_from_utxo_id(utxo_id: &UtxoId) -> ProtoUtxoId { + ProtoUtxoId { + tx_id: utxo_id.tx_id().as_ref().to_vec(), + output_index: utxo_id.output_index().into(), + } +} + +fn proto_tx_pointer(tx_pointer: &TxPointer) -> ProtoTxPointer { + ProtoTxPointer { + block_height: tx_pointer.block_height().into(), + tx_index: tx_pointer.tx_index().into(), + } +} + +fn proto_storage_slot_from_storage_slot(slot: &StorageSlot) -> ProtoStorageSlot { + ProtoStorageSlot { + key: slot.key().as_ref().to_vec(), + value: slot.value().as_ref().to_vec(), + } +} + +fn proto_contract_input_from_contract( + contract: &fuel_core_types::fuel_tx::input::contract::Contract, +) -> ProtoContractInput { + ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + } +} + +fn proto_contract_output_from_contract( + contract: &fuel_core_types::fuel_tx::output::contract::Contract, +) -> ProtoContractOutput { + ProtoContractOutput { + input_index: u32::from(contract.input_index), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + } +} + +fn proto_output_from_output(output: &Output) -> ProtoOutput { + let variant = match output { + Output::Coin { + to, + amount, + asset_id, + } => ProtoOutputVariant::Coin(ProtoCoinOutput { + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Contract(contract) => { + ProtoOutputVariant::Contract(proto_contract_output_from_contract(contract)) + } + Output::Change { + to, + amount, + asset_id, + } => ProtoOutputVariant::Change(ProtoChangeOutput { + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Variable { + to, + amount, + asset_id, + } => ProtoOutputVariant::Variable(ProtoVariableOutput { + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::ContractCreated { + contract_id, + state_root, + } => ProtoOutputVariant::ContractCreated(ProtoContractCreatedOutput { + contract_id: contract_id.as_ref().to_vec(), + state_root: bytes32_to_vec(state_root), + }), + }; + + ProtoOutput { + variant: Some(variant), + } +} + +fn proto_upgrade_purpose(purpose: &UpgradePurpose) -> ProtoUpgradePurpose { + let variant = match purpose { + UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + } => ProtoUpgradePurposeVariant::ConsensusParameters( + ProtoUpgradeConsensusParameters { + witness_index: u32::from(*witness_index), + checksum: checksum.as_ref().to_vec(), + }, + ), + UpgradePurpose::StateTransition { root } => { + ProtoUpgradePurposeVariant::StateTransition(ProtoUpgradeStateTransition { + root: root.as_ref().to_vec(), + }) + } + }; + + ProtoUpgradePurpose { + variant: Some(variant), + } +} + +fn proto_policies_from_policies( + policies: &fuel_core_types::fuel_tx::policies::Policies, +) -> ProtoPolicies { + let mut values = [0u64; 6]; + let mut truncated_len = 0; + if let Some(value) = policies.get(PolicyType::Tip) { + values[0] = value; + truncated_len = 1; + } + if let Some(value) = policies.get(PolicyType::WitnessLimit) { + values[1] = value; + truncated_len = 2; + } + if let Some(value) = policies.get(PolicyType::Maturity) { + values[2] = value; + truncated_len = 3; + } + if let Some(value) = policies.get(PolicyType::MaxFee) { + values[3] = value; + truncated_len = 4; + } + if let Some(value) = policies.get(PolicyType::Expiration) { + values[4] = value; + truncated_len = 5; + } + if let Some(value) = policies.get(PolicyType::Owner) { + values[5] = value; + truncated_len = 6; + } + let bits = policies.bits(); + values[..truncated_len].to_vec(); + ProtoPolicies { + bits, + values: values.to_vec(), + } +} + +fn proto_script_execution_result( + result: &ScriptExecutionResult, +) -> crate::protobuf_types::ScriptExecutionResult { + use crate::protobuf_types::{ + ScriptExecutionResult as ProtoScriptExecutionResult, + ScriptExecutionResultGenericFailure as ProtoScriptExecutionResultGenericFailure, + ScriptExecutionResultPanic as ProtoScriptExecutionResultPanic, + ScriptExecutionResultRevert as ProtoScriptExecutionResultRevert, + ScriptExecutionResultSuccess as ProtoScriptExecutionResultSuccess, + }; + + let variant = match result { + ScriptExecutionResult::Success => ProtoScriptExecutionResultVariant::Success( + ProtoScriptExecutionResultSuccess {}, + ), + ScriptExecutionResult::Revert => { + ProtoScriptExecutionResultVariant::Revert(ProtoScriptExecutionResultRevert {}) + } + ScriptExecutionResult::Panic => { + ProtoScriptExecutionResultVariant::Panic(ProtoScriptExecutionResultPanic {}) + } + ScriptExecutionResult::GenericFailure(code) => { + ProtoScriptExecutionResultVariant::GenericFailure( + ProtoScriptExecutionResultGenericFailure { code: *code }, + ) + } + }; + + ProtoScriptExecutionResult { + variant: Some(variant), + } +} + +fn proto_panic_instruction( + panic_instruction: &PanicInstruction, +) -> crate::protobuf_types::PanicInstruction { + use crate::protobuf_types::PanicReason as ProtoPanicReason; + + let reason_value = *panic_instruction.reason() as u8; + let reason = ProtoPanicReason::try_from(i32::from(reason_value)) + .unwrap_or(ProtoPanicReason::Unknown); + + crate::protobuf_types::PanicInstruction { + reason: reason as i32, + instruction: *panic_instruction.instruction(), + } +} + +pub fn proto_receipt_from_receipt(receipt: &FuelReceipt) -> ProtoReceipt { + match receipt { + FuelReceipt::Call { + id, + to, + amount, + asset_id, + gas, + param1, + param2, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Call( + crate::protobuf_types::CallReceipt { + id: id.as_ref().to_vec(), + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + gas: *gas, + param1: *param1, + param2: *param2, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::Return { id, val, pc, is } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::ReturnReceipt( + crate::protobuf_types::ReturnReceipt { + id: id.as_ref().to_vec(), + val: *val, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::ReturnData { + id, + ptr, + len, + digest, + pc, + is, + data, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::ReturnData( + crate::protobuf_types::ReturnDataReceipt { + id: id.as_ref().to_vec(), + ptr: *ptr, + len: *len, + digest: digest.as_ref().to_vec(), + pc: *pc, + is: *is, + data: data.as_ref().map(|b| b.to_vec()), + }, + )), + }, + FuelReceipt::Panic { + id, + reason, + pc, + is, + contract_id, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Panic( + crate::protobuf_types::PanicReceipt { + id: id.as_ref().to_vec(), + reason: Some(proto_panic_instruction(reason)), + pc: *pc, + is: *is, + contract_id: contract_id.as_ref().map(|cid| cid.as_ref().to_vec()), + }, + )), + }, + FuelReceipt::Revert { id, ra, pc, is } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Revert( + crate::protobuf_types::RevertReceipt { + id: id.as_ref().to_vec(), + ra: *ra, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::Log { + id, + ra, + rb, + rc, + rd, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Log( + crate::protobuf_types::LogReceipt { + id: id.as_ref().to_vec(), + ra: *ra, + rb: *rb, + rc: *rc, + rd: *rd, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::LogData { + id, + ra, + rb, + ptr, + len, + digest, + pc, + is, + data, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::LogData( + crate::protobuf_types::LogDataReceipt { + id: id.as_ref().to_vec(), + ra: *ra, + rb: *rb, + ptr: *ptr, + len: *len, + digest: digest.as_ref().to_vec(), + pc: *pc, + is: *is, + data: data.as_ref().map(|b| b.to_vec()), + }, + )), + }, + FuelReceipt::Transfer { + id, + to, + amount, + asset_id, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Transfer( + crate::protobuf_types::TransferReceipt { + id: id.as_ref().to_vec(), + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::TransferOut { + id, + to, + amount, + asset_id, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::TransferOut( + crate::protobuf_types::TransferOutReceipt { + id: id.as_ref().to_vec(), + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::ScriptResult { result, gas_used } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::ScriptResult( + crate::protobuf_types::ScriptResultReceipt { + result: Some(proto_script_execution_result(result)), + gas_used: *gas_used, + }, + )), + }, + FuelReceipt::MessageOut { + sender, + recipient, + amount, + nonce, + len, + digest, + data, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::MessageOut( + crate::protobuf_types::MessageOutReceipt { + sender: sender.as_ref().to_vec(), + recipient: recipient.as_ref().to_vec(), + amount: *amount, + nonce: nonce.as_ref().to_vec(), + len: *len, + digest: digest.as_ref().to_vec(), + data: data.as_ref().map(|b| b.to_vec()), + }, + )), + }, + FuelReceipt::Mint { + sub_id, + contract_id, + val, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Mint( + crate::protobuf_types::MintReceipt { + sub_id: sub_id.as_ref().to_vec(), + contract_id: contract_id.as_ref().to_vec(), + val: *val, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::Burn { + sub_id, + contract_id, + val, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Burn( + crate::protobuf_types::BurnReceipt { + sub_id: sub_id.as_ref().to_vec(), + contract_id: contract_id.as_ref().to_vec(), + val: *val, + pc: *pc, + is: *is, + }, + )), + }, + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs new file mode 100644 index 00000000000..bf715b0e30e --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs @@ -0,0 +1,1169 @@ +#[cfg(feature = "fault-proving")] +use crate::blocks::importer_and_db_source::serializer_adapter::ChainId; +use crate::{ + protobuf_types::{ + Block as ProtoBlock, + ContractInput as ProtoContractInput, + ContractOutput as ProtoContractOutput, + Header as ProtoHeader, + Input as ProtoInput, + Output as ProtoOutput, + PanicInstruction as ProtoPanicInstruction, + Policies as ProtoPolicies, + StorageSlot as ProtoStorageSlot, + Transaction as ProtoTransaction, + TxPointer as ProtoTxPointer, + UpgradePurpose as ProtoUpgradePurpose, + UtxoId as ProtoUtxoId, + block::VersionedBlock as ProtoVersionedBlock, + header::VersionedHeader as ProtoVersionedHeader, + input::Variant as ProtoInputVariant, + output::Variant as ProtoOutputVariant, + receipt::Variant as ProtoReceiptVariant, + script_execution_result::Variant as ProtoScriptExecutionResultVariant, + transaction::Variant as ProtoTransactionVariant, + upgrade_purpose::Variant as ProtoUpgradePurposeVariant, + }, + result::Error, +}; +use anyhow::anyhow; +use fuel_core_types::{ + blockchain::{ + block::Block as FuelBlock, + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::{ + DaBlockHeight, + Empty, + }, + }, + fuel_asm::{ + PanicInstruction, + PanicReason, + }, + fuel_tx::{ + Address, + BlobBody, + Bytes32, + Input, + Output, + Receipt as FuelReceipt, + ScriptExecutionResult, + StorageSlot, + Transaction as FuelTransaction, + TxPointer, + UpgradePurpose, + UploadBody, + UtxoId, + Witness, + field::ReceiptsRoot as _, + policies::{ + Policies as FuelPolicies, + PoliciesBits, + PolicyType, + }, + }, + fuel_types::{ + AssetId, + ContractId, + Nonce, + SubAssetId, + }, + tai64, +}; + +fn tx_pointer_from_proto(proto: &ProtoTxPointer) -> crate::result::Result { + let block_height = proto.block_height.into(); + #[allow(clippy::useless_conversion)] + let tx_index = proto.tx_index.try_into().map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_index to target type: {}", e)) + })?; + Ok(TxPointer::new(block_height, tx_index)) +} + +fn storage_slot_from_proto( + proto: &ProtoStorageSlot, +) -> crate::result::Result { + let key = Bytes32::try_from(proto.key.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot key to Bytes32: {}", + e + )) + })?; + let value = Bytes32::try_from(proto.value.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot value to Bytes32: {}", + e + )) + })?; + Ok(StorageSlot::new(key, value)) +} + +fn contract_input_from_proto( + proto: &ProtoContractInput, +) -> crate::result::Result { + let utxo_proto = proto.utxo_id.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing utxo_id on contract input")) + })?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert balance_root to Bytes32: {}", e)) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert state_root to Bytes32: {}", e)) + })?; + let tx_pointer_proto = proto.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on contract input")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let contract_id = + fuel_core_types::fuel_types::ContractId::try_from(proto.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(fuel_core_types::fuel_tx::input::contract::Contract { + utxo_id, + balance_root, + state_root, + tx_pointer, + contract_id, + }) +} + +fn contract_output_from_proto( + proto: &ProtoContractOutput, +) -> crate::result::Result { + let input_index = u16::try_from(proto.input_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output input_index to u16: {}", + e + )) + })?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output balance_root to Bytes32: {}", + e + )) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output state_root to Bytes32: {}", + e + )) + })?; + + Ok(fuel_core_types::fuel_tx::output::contract::Contract { + input_index, + balance_root, + state_root, + }) +} + +fn output_from_proto_output(proto_output: &ProtoOutput) -> crate::result::Result { + match proto_output + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing output variant")))? + { + ProtoOutputVariant::Coin(coin) => { + let to = Address::try_from(coin.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = + fuel_core_types::fuel_types::AssetId::try_from(coin.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::coin(to, coin.amount, asset_id)) + } + ProtoOutputVariant::Contract(contract) => { + let contract = contract_output_from_proto(contract)?; + Ok(Output::Contract(contract)) + } + ProtoOutputVariant::Change(change) => { + let to = Address::try_from(change.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + change.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::change(to, change.amount, asset_id)) + } + ProtoOutputVariant::Variable(variable) => { + let to = Address::try_from(variable.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + variable.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::variable(to, variable.amount, asset_id)) + } + ProtoOutputVariant::ContractCreated(contract_created) => { + let contract_id = fuel_core_types::fuel_types::ContractId::try_from( + contract_created.contract_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let state_root = Bytes32::try_from(contract_created.state_root.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state_root to Bytes32: {}", + e + )) + })?; + Ok(Output::contract_created(contract_id, state_root)) + } + } +} + +fn upgrade_purpose_from_proto( + proto: &ProtoUpgradePurpose, +) -> crate::result::Result { + match proto + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing upgrade purpose variant")))? + { + ProtoUpgradePurposeVariant::ConsensusParameters(consensus) => { + let witness_index = u16::try_from(consensus.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let checksum = + Bytes32::try_from(consensus.checksum.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert checksum to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + }) + } + ProtoUpgradePurposeVariant::StateTransition(state) => { + let root = Bytes32::try_from(state.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state transition root to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::StateTransition { root }) + } + } +} + +fn utxo_id_from_proto(proto_utxo: &ProtoUtxoId) -> crate::result::Result { + let tx_id = Bytes32::try_from(proto_utxo.tx_id.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_id to Bytes32: {}", e)) + })?; + let output_index = u16::try_from(proto_utxo.output_index).map_err(|e| { + Error::Serialization(anyhow!("Could not convert output_index to u16: {}", e)) + })?; + Ok(UtxoId::new(tx_id, output_index)) +} + +pub fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { + bytes.as_ref().to_vec() +} + +fn script_execution_result_from_proto( + proto: &crate::protobuf_types::ScriptExecutionResult, +) -> crate::result::Result { + let variant = proto.variant.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing script execution result variant")) + })?; + + let result = match variant { + ProtoScriptExecutionResultVariant::Success(_) => ScriptExecutionResult::Success, + ProtoScriptExecutionResultVariant::Revert(_) => ScriptExecutionResult::Revert, + ProtoScriptExecutionResultVariant::Panic(_) => ScriptExecutionResult::Panic, + ProtoScriptExecutionResultVariant::GenericFailure(failure) => { + ScriptExecutionResult::GenericFailure(failure.code) + } + }; + + Ok(result) +} + +fn panic_instruction_from_proto(proto: &ProtoPanicInstruction) -> PanicInstruction { + use crate::protobuf_types::PanicReason as ProtoPanicReason; + + let reason_proto = + ProtoPanicReason::try_from(proto.reason).unwrap_or(ProtoPanicReason::Unknown); + let reason = PanicReason::from(reason_proto as u8); + PanicInstruction::error(reason, proto.instruction) +} + +fn receipt_from_proto( + proto_receipt: &crate::protobuf_types::Receipt, +) -> crate::result::Result { + let variant = proto_receipt + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing receipt variant")))?; + + let receipt = match variant { + ProtoReceiptVariant::Call(call) => { + let id = ContractId::try_from(call.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let to = ContractId::try_from(call.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = AssetId::try_from(call.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::call( + id, + to, + call.amount, + asset_id, + call.gas, + call.param1, + call.param2, + call.pc, + call.is, + )) + } + ProtoReceiptVariant::ReturnReceipt(ret) => { + let id = ContractId::try_from(ret.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::ret(id, ret.val, ret.pc, ret.is)) + } + ProtoReceiptVariant::ReturnData(rd) => { + let id = ContractId::try_from(rd.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let digest = Bytes32::try_from(rd.digest.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert return data digest to Bytes32: {}", + e + )) + })?; + Ok(FuelReceipt::return_data_with_len( + id, + rd.ptr, + rd.len, + digest, + rd.pc, + rd.is, + rd.data.clone(), + )) + } + ProtoReceiptVariant::Panic(panic_receipt) => { + let id = ContractId::try_from(panic_receipt.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let reason_proto = panic_receipt + .reason + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing panic reason")))?; + let reason = panic_instruction_from_proto(reason_proto); + let contract_id = panic_receipt + .contract_id + .as_ref() + .map(|cid| { + ContractId::try_from(cid.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e))) + }) + .transpose()?; + Ok( + FuelReceipt::panic(id, reason, panic_receipt.pc, panic_receipt.is) + .with_panic_contract_id(contract_id), + ) + } + ProtoReceiptVariant::Revert(revert) => { + let id = ContractId::try_from(revert.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::revert(id, revert.ra, revert.pc, revert.is)) + } + ProtoReceiptVariant::Log(log) => { + let id = ContractId::try_from(log.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::log( + id, log.ra, log.rb, log.rc, log.rd, log.pc, log.is, + )) + } + ProtoReceiptVariant::LogData(log) => { + let id = ContractId::try_from(log.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let digest = Bytes32::try_from(log.digest.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert log data digest to Bytes32: {}", + e + )) + })?; + Ok(FuelReceipt::log_data_with_len( + id, + log.ra, + log.rb, + log.ptr, + log.len, + digest, + log.pc, + log.is, + log.data.clone(), + )) + } + ProtoReceiptVariant::Transfer(transfer) => { + let id = ContractId::try_from(transfer.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let to = ContractId::try_from(transfer.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = AssetId::try_from(transfer.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::transfer( + id, + to, + transfer.amount, + asset_id, + transfer.pc, + transfer.is, + )) + } + ProtoReceiptVariant::TransferOut(transfer) => { + let id = ContractId::try_from(transfer.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let to = Address::try_from(transfer.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = AssetId::try_from(transfer.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::transfer_out( + id, + to, + transfer.amount, + asset_id, + transfer.pc, + transfer.is, + )) + } + ProtoReceiptVariant::ScriptResult(result) => { + let script_result = result.result.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing script result payload")) + })?; + let execution_result = script_execution_result_from_proto(script_result)?; + Ok(FuelReceipt::script_result( + execution_result, + result.gas_used, + )) + } + ProtoReceiptVariant::MessageOut(msg) => { + let sender = Address::try_from(msg.sender.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let recipient = Address::try_from(msg.recipient.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let nonce = Nonce::try_from(msg.nonce.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let digest = Bytes32::try_from(msg.digest.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert message digest to Bytes32: {}", + e + )) + })?; + Ok(FuelReceipt::message_out_with_len( + sender, + recipient, + msg.amount, + nonce, + msg.len, + digest, + msg.data.clone(), + )) + } + ProtoReceiptVariant::Mint(mint) => { + let sub_id = SubAssetId::try_from(mint.sub_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let contract_id = ContractId::try_from(mint.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::mint( + sub_id, + contract_id, + mint.val, + mint.pc, + mint.is, + )) + } + ProtoReceiptVariant::Burn(burn) => { + let sub_id = SubAssetId::try_from(burn.sub_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let contract_id = ContractId::try_from(burn.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::burn( + sub_id, + contract_id, + burn.val, + burn.pc, + burn.is, + )) + } + }?; + + Ok(receipt) +} + +pub fn fuel_block_from_protobuf( + proto_block: ProtoBlock, + msg_ids: &[fuel_core_types::fuel_tx::MessageId], + event_inbox_root: Bytes32, +) -> crate::result::Result<(FuelBlock, Vec)> { + let versioned_block = proto_block + .versioned_block + .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) + .map_err(Error::Serialization)?; + let (partial_header, txs, receipts) = match versioned_block { + ProtoVersionedBlock::V1(v1_inner) => { + let proto_header = v1_inner + .header + .clone() + .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) + .map_err(Error::Serialization)?; + let partial_header = partial_header_from_proto_header(&proto_header)?; + let txs = v1_inner + .transactions + .iter() + .map(tx_from_proto_tx) + .collect::>()?; + let receipts = v1_inner + .receipts + .iter() + .map(receipt_from_proto) + .collect::>()?; + (partial_header, txs, receipts) + } + }; + let block = FuelBlock::new( + partial_header, + txs, + msg_ids, + event_inbox_root, + #[cfg(feature = "fault-proving")] + &ChainId::default(), + ) + .map_err(|e| anyhow!(e)) + .map_err(Error::Serialization)?; + Ok((block, receipts)) +} + +pub fn partial_header_from_proto_header( + proto_header: &ProtoHeader, +) -> crate::result::Result { + let partial_header = PartialBlockHeader { + consensus: proto_header_to_empty_consensus_header(proto_header)?, + application: proto_header_to_empty_application_header(proto_header)?, + }; + Ok(partial_header) +} + +pub fn tx_from_proto_tx( + proto_tx: &ProtoTransaction, +) -> crate::result::Result { + let variant = proto_tx + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing transaction variant")))?; + + match variant { + ProtoTransactionVariant::Script(proto_script) => { + let policies = proto_script + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_script + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_script + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_script + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let mut script_tx = FuelTransaction::script( + proto_script.script_gas_limit, + proto_script.script.clone(), + proto_script.script_data.clone(), + policies, + inputs, + outputs, + witnesses, + ); + *script_tx.receipts_root_mut() = Bytes32::try_from( + proto_script.receipts_root.as_slice(), + ) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert receipts_root to Bytes32: {}", + e + )) + })?; + + Ok(FuelTransaction::Script(script_tx)) + } + ProtoTransactionVariant::Create(proto_create) => { + let policies = proto_create + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_create + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_create + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_create + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let storage_slots = proto_create + .storage_slots + .iter() + .map(storage_slot_from_proto) + .collect::>>()?; + let salt = + fuel_core_types::fuel_types::Salt::try_from(proto_create.salt.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let bytecode_witness_index = + u16::try_from(proto_create.bytecode_witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert bytecode_witness_index to u16: {}", + e + )) + })?; + + let create_tx = FuelTransaction::create( + bytecode_witness_index, + policies, + salt, + storage_slots, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Create(create_tx)) + } + ProtoTransactionVariant::Mint(proto_mint) => { + let tx_pointer_proto = proto_mint.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on mint transaction")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let input_contract_proto = + proto_mint.input_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing input_contract on mint transaction" + )) + })?; + let input_contract = contract_input_from_proto(input_contract_proto)?; + let output_contract_proto = + proto_mint.output_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing output_contract on mint transaction" + )) + })?; + let output_contract = contract_output_from_proto(output_contract_proto)?; + let mint_asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_mint.mint_asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + let mint_tx = FuelTransaction::mint( + tx_pointer, + input_contract, + output_contract, + proto_mint.mint_amount, + mint_asset_id, + proto_mint.gas_price, + ); + + Ok(FuelTransaction::Mint(mint_tx)) + } + ProtoTransactionVariant::Upgrade(proto_upgrade) => { + let purpose_proto = proto_upgrade.purpose.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing purpose on upgrade transaction")) + })?; + let upgrade_purpose = upgrade_purpose_from_proto(purpose_proto)?; + let policies = proto_upgrade + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_upgrade + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upgrade + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upgrade + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + + let upgrade_tx = FuelTransaction::upgrade( + upgrade_purpose, + policies, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Upgrade(upgrade_tx)) + } + ProtoTransactionVariant::Upload(proto_upload) => { + let policies = proto_upload + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_upload + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upload + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upload + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let root = Bytes32::try_from(proto_upload.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert upload root to Bytes32: {}", + e + )) + })?; + let witness_index = + u16::try_from(proto_upload.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let subsection_index = + u16::try_from(proto_upload.subsection_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsection_index to u16: {}", + e + )) + })?; + let subsections_number = u16::try_from(proto_upload.subsections_number) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsections_number to u16: {}", + e + )) + })?; + let proof_set = proto_upload + .proof_set + .iter() + .map(|entry| { + Bytes32::try_from(entry.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert proof_set entry to Bytes32: {}", + e + )) + }) + }) + .collect::>>()?; + + let body = UploadBody { + root, + witness_index, + subsection_index, + subsections_number, + proof_set, + }; + + let upload_tx = + FuelTransaction::upload(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Upload(upload_tx)) + } + ProtoTransactionVariant::Blob(proto_blob) => { + let policies = proto_blob + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_blob + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_blob + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_blob + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let blob_id = fuel_core_types::fuel_types::BlobId::try_from( + proto_blob.blob_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = u16::try_from(proto_blob.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert blob witness_index to u16: {}", + e + )) + })?; + let body = BlobBody { + id: blob_id, + witness_index, + }; + + let blob_tx = + FuelTransaction::blob(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Blob(blob_tx)) + } + } +} + +fn input_from_proto_input(proto_input: &ProtoInput) -> crate::result::Result { + let variant = proto_input + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing input variant")))?; + + match variant { + ProtoInputVariant::CoinSigned(proto_coin_signed) => { + let utxo_proto = proto_coin_signed + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = + Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_signed.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_signed + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let witness_index = + u16::try_from(proto_coin_signed.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::coin_signed( + utxo_id, + owner, + proto_coin_signed.amount, + asset_id, + tx_pointer, + witness_index, + )) + } + ProtoInputVariant::CoinPredicate(proto_coin_predicate) => { + let utxo_proto = proto_coin_predicate + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = Address::try_from(proto_coin_predicate.owner.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_predicate.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_predicate + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + + Ok(Input::coin_predicate( + utxo_id, + owner, + proto_coin_predicate.amount, + asset_id, + tx_pointer, + proto_coin_predicate.predicate_gas_used, + proto_coin_predicate.predicate.clone(), + proto_coin_predicate.predicate_data.clone(), + )) + } + ProtoInputVariant::Contract(proto_contract) => { + let contract = contract_input_from_proto(proto_contract)?; + Ok(Input::Contract(contract)) + } + ProtoInputVariant::MessageCoinSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::message_coin_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + )) + } + ProtoInputVariant::MessageCoinPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_coin_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + ProtoInputVariant::MessageDataSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::message_data_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + proto_message.data.clone(), + )) + } + ProtoInputVariant::MessageDataPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_data_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.data.clone(), + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + } +} + +fn policies_from_proto_policies(proto_policies: &ProtoPolicies) -> FuelPolicies { + let ProtoPolicies { bits, values } = proto_policies; + let mut policies = FuelPolicies::default(); + let bits = + PoliciesBits::from_bits(*bits).expect("Should be able to create from `u32`"); + if bits.contains(PoliciesBits::Tip) + && let Some(tip) = values.first() + { + policies.set(PolicyType::Tip, Some(*tip)); + } + if bits.contains(PoliciesBits::WitnessLimit) + && let Some(witness_limit) = values.get(1) + { + policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); + } + if bits.contains(PoliciesBits::Maturity) + && let Some(maturity) = values.get(2) + { + policies.set(PolicyType::Maturity, Some(*maturity)); + } + if bits.contains(PoliciesBits::MaxFee) + && let Some(max_fee) = values.get(3) + { + policies.set(PolicyType::MaxFee, Some(*max_fee)); + } + if bits.contains(PoliciesBits::Expiration) + && let Some(expiration) = values.get(4) + { + policies.set(PolicyType::Expiration, Some(*expiration)); + } + if bits.contains(PoliciesBits::Owner) + && let Some(owner) = values.get(5) + { + policies.set(PolicyType::Owner, Some(*owner)); + } + policies +} + +pub fn proto_header_to_empty_application_header( + proto_header: &ProtoHeader, +) -> crate::result::Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + if cfg!(feature = "fault-proving") { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) + } else { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } +} + +/// Alias the consensus header into an empty one. +pub fn proto_header_to_empty_consensus_header( + proto_header: &ProtoHeader, +) -> crate::result::Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root).ok_or( + Error::Serialization(anyhow!("Could create `Bytes32` from bytes")), + )?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + if cfg!(feature = "fault-proving") { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) + .ok_or(Error::Serialization(anyhow!( + "Could create `Bytes32` from bytes" + )))?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } else { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index be8b6b19e94..15287180a6a 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -1,6 +1,12 @@ -use crate::blocks::{ - BlockSourceEvent, - importer_and_db_source::BlockSerializer, +use crate::{ + blocks::{ + BlockSourceEvent, + importer_and_db_source::BlockSerializer, + }, + result::{ + Error, + Result, + }, }; use fuel_core_services::{ RunnableService, @@ -12,6 +18,7 @@ use fuel_core_services::{ }; use fuel_core_storage::{ self, + Error as StorageError, StorageInspect, tables::{ FuelBlocks, @@ -19,75 +26,88 @@ use fuel_core_storage::{ }, }; use fuel_core_types::{ + blockchain::block::Block as FuelBlock, fuel_tx::{ + Receipt, Transaction, TxId, }, fuel_types::BlockHeight, }; -use std::time::Duration; +use futures::{ + StreamExt, + TryStreamExt, + stream::FuturesOrdered, +}; use tokio::sync::mpsc::Sender; -pub struct SyncTask { +pub struct SyncTask { serializer: Serializer, block_return_sender: Sender>, db: DB, + receipts: Receipts, next_height: BlockHeight, - maybe_stop_height: Option, - new_ending_height: tokio::sync::oneshot::Receiver, + // exclusive, does not ask for this block + stop_height: BlockHeight, +} + +pub trait TxReceipts: 'static + Send + Sync { + fn get_receipts( + &self, + tx_id: &TxId, + ) -> impl Future>> + Send; } -impl SyncTask +impl SyncTask where Serializer: BlockSerializer + Send, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: Send + Sync + 'static, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { pub fn new( serializer: Serializer, block_return: Sender>, db: DB, + receipts: Receipts, db_starting_height: BlockHeight, - db_ending_height: Option, - new_ending_height: tokio::sync::oneshot::Receiver, + // does not ask for this block (exclusive) + db_ending_height: BlockHeight, ) -> Self { Self { serializer, block_return_sender: block_return, db, + receipts, next_height: db_starting_height, - maybe_stop_height: db_ending_height, - new_ending_height, - } - } - - async fn maybe_update_stop_height(&mut self) { - if let Ok(last_height) = self.new_ending_height.try_recv() { - tracing::info!("updating last height to {}", last_height); - self.maybe_stop_height = Some(last_height); + stop_height: db_ending_height, } } - fn get_block( + async fn get_block_and_receipts( &self, height: &BlockHeight, - ) -> Result, E> { - let maybe_block = StorageInspect::::get(&self.db, height)?; + ) -> Result)>> { + let maybe_block = StorageInspect::::get(&self.db, height) + .map_err(Error::block_source_error)?; if let Some(block) = maybe_block { let tx_ids = block.transactions(); let txs = self.get_txs(tx_ids)?; + let receipts = self.get_receipts(tx_ids).await?; let block = block.into_owned().uncompress(txs); - Ok(Some(block)) + Ok(Some((block, receipts))) } else { Ok(None) } } - fn get_txs(&self, tx_ids: &[TxId]) -> Result, E> { + fn get_txs(&self, tx_ids: &[TxId]) -> Result> { let mut txs = Vec::new(); for tx_id in tx_ids { - match StorageInspect::::get(&self.db, tx_id)? { + match StorageInspect::::get(&self.db, tx_id) + .map_err(Error::block_source_error)? + { Some(tx) => { tracing::debug!("found tx id: {:?}", tx_id); txs.push(tx.into_owned()); @@ -100,51 +120,55 @@ where Ok(txs) } - // For now just have arbitrary 10 ms sleep to avoid busy looping. - // This could be more complicated with increasing backoff times, etc. - async fn go_to_sleep_before_continuing(&self) { - tokio::time::sleep(Duration::from_millis(10)).await; + async fn get_receipts(&self, tx_ids: &[TxId]) -> Result> { + let receipt_futs = tx_ids.iter().map(|tx_id| self.receipts.get_receipts(tx_id)); + FuturesOrdered::from_iter(receipt_futs) + .then(|res| async move { res.map_err(Error::block_source_error) }) + .try_concat() + .await } } -impl RunnableTask for SyncTask +impl RunnableTask + for SyncTask where Serializer: BlockSerializer + Send + Sync, Serializer::Block: Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { - self.maybe_update_stop_height().await; - if let Some(last_height) = self.maybe_stop_height - && self.next_height >= last_height - { + if self.next_height >= self.stop_height { tracing::info!( - "reached end height {}, putting task into hibernation", - last_height + "reached stop height {}, putting task into hibernation", + self.stop_height ); futures::future::pending().await } let next_height = self.next_height; - let res = self.get_block(&next_height); - let maybe_block = try_or_stop!(res, |e| { + let res = self.get_block_and_receipts(&next_height).await; + let maybe_block_and_receipts = try_or_stop!(res, |e| { tracing::error!("error fetching block at height {}: {:?}", next_height, e); }); - if let Some(block) = maybe_block { - let res = self.serializer.serialize_block(&block); + if let Some((block, receipts)) = maybe_block_and_receipts { + tracing::debug!( + "found block at height {:?}, sending to return channel", + next_height + ); + let res = self.serializer.serialize_block(&block, &receipts); let block = try_or_continue!(res); let event = BlockSourceEvent::OldBlock(BlockHeight::from(*next_height), block); let res = self.block_return_sender.send(event).await; try_or_continue!(res); self.next_height = BlockHeight::from((*next_height).saturating_add(1)); + TaskNextAction::Continue } else { tracing::warn!("no block found at height {:?}, retrying", next_height); - self.go_to_sleep_before_continuing().await; + TaskNextAction::Stop } - TaskNextAction::Continue } async fn shutdown(self) -> anyhow::Result<()> { @@ -153,14 +177,15 @@ where } #[async_trait::async_trait] -impl RunnableService for SyncTask +impl RunnableService + for SyncTask where Serializer: BlockSerializer + Send + Sync + 'static, ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { const NAME: &'static str = "BlockSourceSyncTask"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 3820fafbf0e..9f2570d546e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,8 +1,6 @@ #![allow(non_snake_case)] use super::*; -use crate::blocks::BlockBytes; -use ::postcard::to_allocvec; use fuel_core_services::stream::{ IntoBoxStream, pending, @@ -18,12 +16,17 @@ use fuel_core_storage::{ }, }; use futures::StreamExt; -use std::collections::HashSet; +use std::collections::HashMap; +use crate::blocks::importer_and_db_source::{ + serializer_adapter::SerializerAdapter, + sync_service::TxReceipts, +}; use fuel_core_types::{ blockchain::SealedBlock, fuel_tx::{ Transaction, + TxId, UniqueIdentifier, }, fuel_types::ChainId, @@ -31,26 +34,28 @@ use fuel_core_types::{ }; use std::sync::Arc; -#[derive(Clone)] -pub struct MockSerializer; - -impl BlockSerializer for MockSerializer { - type Block = BlockBytes; +fn onchain_db() -> StorageTransaction> { + InMemoryStorage::default().into_transaction() +} - fn serialize_block(&self, block: &FuelBlock) -> Result { - let bytes_vec = to_allocvec(block).map_err(|e| { - Error::BlockSource(anyhow!("failed to serialize block: {}", e)) - })?; - Ok(BlockBytes::from(bytes_vec)) - } +struct MockTxReceiptsSource { + receipts_map: HashMap>, } -fn database() -> StorageTransaction> { - InMemoryStorage::default().into_transaction() +impl MockTxReceiptsSource { + fn new(receipts: &[(TxId, Vec)]) -> Self { + let receipts_map = receipts.iter().cloned().collect(); + Self { receipts_map } + } } -fn stream_with_pending(items: Vec) -> BoxStream { - tokio_stream::iter(items).chain(pending()).into_boxed() +impl TxReceipts for MockTxReceiptsSource { + async fn get_receipts(&self, tx_id: &TxId) -> Result> { + let receipts = self.receipts_map.get(tx_id).cloned().ok_or_else(|| { + Error::BlockSource(anyhow!("no receipts found for a tx with id {}", tx_id)) + })?; + Ok(receipts) + } } #[tokio::test] @@ -69,22 +74,26 @@ async fn next_block__gets_new_block_from_importer() { ); let blocks: Vec = vec![import_result]; let block_stream = tokio_stream::iter(blocks).chain(pending()).into_boxed(); - let serializer = MockSerializer; - let db = database(); + let serializer = SerializerAdapter; + let db = onchain_db(); + let receipt_source = MockTxReceiptsSource::new(&[]); let db_starting_height = BlockHeight::from(0u32); + // we don't need to sync anything, so we can use the same height for both + let db_ending_height = db_starting_height; let mut adapter = ImporterAndDbSource::new( block_stream, serializer.clone(), db, + receipt_source, db_starting_height, - None, + db_ending_height, ); // when let actual = adapter.next_block().await.unwrap(); // then - let serialized = serializer.serialize_block(&block.entity).unwrap(); + let serialized = serializer.serialize_block(&block.entity, &[]).unwrap(); let expected = BlockSourceEvent::NewBlock(*height, serialized); assert_eq!(expected, actual); } @@ -97,6 +106,25 @@ fn arbitrary_block_with_txs(height: BlockHeight) -> FuelBlock { block } +fn arbitrary_receipts() -> Vec { + let one = FuelReceipt::Mint { + sub_id: Default::default(), + contract_id: Default::default(), + val: 100, + pc: 0, + is: 0, + }; + let two = FuelReceipt::Transfer { + id: Default::default(), + to: Default::default(), + amount: 50, + asset_id: Default::default(), + pc: 0, + is: 0, + }; + vec![one, two] +} + #[tokio::test] async fn next_block__can_get_block_from_db() { // given @@ -104,14 +132,16 @@ async fn next_block__can_get_block_from_db() { let height1 = BlockHeight::from(0u32); let height2 = BlockHeight::from(1u32); let block = arbitrary_block_with_txs(height1); + let receipts = arbitrary_receipts(); let height = block.header().height(); - let serializer = MockSerializer; - let mut db = database(); - let mut tx = db.write_transaction(); + let serializer = SerializerAdapter; + let mut onchain_db = onchain_db(); + let mut tx = onchain_db.write_transaction(); let compressed_block = block.compress(&chain_id); tx.storage_as_mut::() .insert(height, &compressed_block) .unwrap(); + let tx_id = block.transactions()[0].id(&chain_id); tx.storage_as_mut::() .insert( &block.transactions()[0].id(&chain_id), @@ -119,13 +149,15 @@ async fn next_block__can_get_block_from_db() { ) .unwrap(); tx.commit().unwrap(); + let receipt_source = MockTxReceiptsSource::new(&[(tx_id, receipts.clone())]); let block_stream = tokio_stream::pending().into_boxed(); let db_starting_height = *height; - let db_ending_height = Some(height2); + let db_ending_height = height2; let mut adapter = ImporterAndDbSource::new( block_stream, serializer.clone(), - db, + onchain_db, + receipt_source, db_starting_height, db_ending_height, ); @@ -134,107 +166,7 @@ async fn next_block__can_get_block_from_db() { let actual = adapter.next_block().await.unwrap(); // then - let serialized = serializer.serialize_block(&block).unwrap(); + let serialized = serializer.serialize_block(&block, &receipts).unwrap(); let expected = BlockSourceEvent::OldBlock(*height, serialized); assert_eq!(expected, actual); } - -#[tokio::test] -async fn next_block__will_sync_blocks_from_db_after_receiving_height_from_new_end() { - // given - let chain_id = ChainId::default(); - let height1 = BlockHeight::from(0u32); - let height2 = BlockHeight::from(1u32); - let height3 = BlockHeight::from(2u32); - let block1 = arbitrary_block_with_txs(height1); - let block2 = arbitrary_block_with_txs(height2); - let serializer = MockSerializer; - let mut db = database(); - let mut tx = db.write_transaction(); - let compressed_block = block1.compress(&chain_id); - tx.storage_as_mut::() - .insert(&height1, &compressed_block) - .unwrap(); - tx.storage_as_mut::() - .insert( - &block1.transactions()[0].id(&chain_id), - &block1.transactions()[0], - ) - .unwrap(); - tx.commit().unwrap(); - let mut tx = db.write_transaction(); - let compressed_block = block2.compress(&chain_id); - tx.storage_as_mut::() - .insert(&height2, &compressed_block) - .unwrap(); - tx.storage_as_mut::() - .insert( - &block2.transactions()[0].id(&chain_id), - &block2.transactions()[0], - ) - .unwrap(); - tx.commit().unwrap(); - - // Add the imported block to db as well as streaming - let block3 = arbitrary_block_with_txs(height3); - let mut tx = db.write_transaction(); - let compressed_block = block3.compress(&chain_id); - tx.storage_as_mut::() - .insert(&height3, &compressed_block) - .unwrap(); - tx.storage_as_mut::() - .insert( - &block3.transactions()[0].id(&chain_id), - &block3.transactions()[0], - ) - .unwrap(); - tx.commit().unwrap(); - - let sealed_block = SealedBlock { - entity: block3.clone(), - consensus: Default::default(), - }; - let import_result = Arc::new( - ImportResult { - sealed_block, - tx_status: vec![], - events: vec![], - source: Default::default(), - } - .wrap(), - ); - let blocks: Vec = vec![import_result]; - let block_stream = stream_with_pending(blocks); - let db_starting_height = height1; - let mut adapter = ImporterAndDbSource::new( - block_stream, - serializer.clone(), - db, - db_starting_height, - None, - ); - - // when - let actual1 = adapter.next_block().await.unwrap(); - let actual2 = adapter.next_block().await.unwrap(); - let actual3 = adapter.next_block().await.unwrap(); - - // then - let actual = vec![actual1, actual2, actual3] - .into_iter() - .collect::>(); - // should receive the - let expected = vec![ - BlockSourceEvent::OldBlock(height1, serializer.serialize_block(&block1).unwrap()), - BlockSourceEvent::OldBlock(height2, serializer.serialize_block(&block2).unwrap()), - BlockSourceEvent::NewBlock(height3, serializer.serialize_block(&block3).unwrap()), - ]; - let expected: HashSet<_> = expected.into_iter().collect(); - let length = actual.len(); - let expected_length = expected.len(); - for event in &actual { - tracing::debug!("actual event: {:?}", event); - } - assert_eq!(length, expected_length); - assert_eq!(expected, actual); -} diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 884deff70d3..db3a385a008 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -9,11 +9,19 @@ use crate::{ result::Error, }; use anyhow::anyhow; +use aws_config::{ + BehaviorVersion, + default_provider::credentials::DefaultCredentialsChain, +}; use aws_sdk_s3::{ self, Client, primitives::ByteStream, }; +use flate2::{ + Compression, + write::GzEncoder, +}; use fuel_core_storage::{ Error as StorageError, StorageAsMut, @@ -28,6 +36,7 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use prost::Message; +use std::io::Write; #[allow(non_snake_case)] #[cfg(test)] @@ -36,12 +45,10 @@ mod tests; #[allow(unused)] pub struct RemoteCache { // aws configuration - aws_id: String, - aws_secret: String, - aws_region: String, aws_bucket: String, - url_base: String, - client: Client, + requester_pays: bool, + aws_endpoint: Option, + client: Option, // track consistency between runs local_persisted: S, @@ -54,21 +61,17 @@ pub struct RemoteCache { impl RemoteCache { #[allow(clippy::too_many_arguments)] pub fn new( - aws_id: String, - aws_secret: String, - aws_region: String, aws_bucket: String, - url_base: String, - client: Client, + requester_pays: bool, + aws_endpoint: Option, + client: Option, local_persisted: S, sync_from: BlockHeight, ) -> RemoteCache { RemoteCache { - aws_id, - aws_secret, - aws_region, aws_bucket, - url_base, + requester_pays, + aws_endpoint, client, local_persisted, sync_from, @@ -78,8 +81,29 @@ impl RemoteCache { } } - fn url_for_block(base: &str, key: &str) -> String { - format!("{}/{}", base, key,) + async fn client(&mut self) -> crate::result::Result<&Client> { + self.init_client().await; + self.client + .as_ref() + .ok_or(Error::db_error(anyhow!("AWS S3 client is uninitialized"))) + } + + // only runs the first time + async fn init_client(&mut self) { + if self.client.is_none() { + let credentials = DefaultCredentialsChain::builder().build().await; + let sdk_config = aws_config::defaults(BehaviorVersion::latest()) + .credentials_provider(credentials) + .load() + .await; + let mut config_builder = aws_sdk_s3::config::Builder::from(&sdk_config); + if let Some(endpoint) = &self.aws_endpoint { + config_builder.set_endpoint_url(Some(endpoint.to_string())); + } + let config = config_builder.force_path_style(true).build(); + let client = aws_sdk_s3::Client::from_conf(config); + self.client = Some(client); + } } } @@ -102,13 +126,16 @@ where let key = block_height_to_key(&height); let mut buf = Vec::new(); block.encode(&mut buf).map_err(Error::db_error)?; - let body = ByteStream::from(buf); + let zipped = gzip_bytes(&buf)?; + let body = ByteStream::from(zipped); let req = self - .client + .client() + .await? .put_object() .bucket(&self.aws_bucket) .key(&key) .body(body) + .content_encoding("gzip") .content_type("application/octet-stream"); let _ = req.send().await.map_err(Error::db_error)?; match block_event { @@ -176,21 +203,22 @@ where last: BlockHeight, ) -> crate::result::Result { // TODO: Check if it exists - let region = self.aws_region.clone(); let bucket = self.aws_bucket.clone(); - let base = self.url_base.clone(); + let requester_pays = self.requester_pays; + let aws_endpoint = self.aws_endpoint.clone(); let stream = futures::stream::iter((*first..=*last).map(move |height| { - let key = block_height_to_key(&BlockHeight::new(height)); - let url = Self::url_for_block(&base, &key); - crate::block_range_response::RemoteBlockRangeResponse { - region: region.clone(), + let block_height = BlockHeight::new(height); + let key = block_height_to_key(&block_height); + let res = crate::block_range_response::RemoteS3Response { bucket: bucket.clone(), key: key.clone(), - url, - } + requester_pays, + aws_endpoint: aws_endpoint.clone(), + }; + (block_height, res) })); - Ok(BlockRangeResponse::Remote(Box::pin(stream))) + Ok(BlockRangeResponse::S3(Box::pin(stream))) } async fn get_current_height(&self) -> crate::result::Result> { @@ -227,5 +255,15 @@ where } pub fn block_height_to_key(height: &BlockHeight) -> String { - format!("{:08x}", height) + let raw: [u8; 4] = height.to_bytes(); + format!( + "{:02}/{:02}/{:02}/{:02}", + &raw[0], &raw[1], &raw[2], &raw[3] + ) +} + +pub fn gzip_bytes(data: &[u8]) -> crate::result::Result> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(data).map_err(Error::db_error)?; + encoder.finish().map_err(Error::db_error) } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index 16b52776de7..1e261281953 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -1,6 +1,6 @@ use super::*; use crate::{ - block_range_response::RemoteBlockRangeResponse, + block_range_response::RemoteS3Response, blocks::importer_and_db_source::{ BlockSerializer, serializer_adapter::SerializerAdapter, @@ -31,7 +31,7 @@ fn database() -> StorageTransaction> { fn arb_proto_block() -> ProtoBlock { let block = FuelBlock::default(); let serializer = SerializerAdapter; - serializer.serialize_block(&block).unwrap() + serializer.serialize_block(&block, &[]).unwrap() } fn put_happy_rule() -> Rule { mock!(Client::put_object) @@ -45,16 +45,11 @@ fn put_happy_rule() -> Rule { async fn store_block__happy_path() { // given let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); let storage = database(); let sync_from = BlockHeight::new(0); - let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, sync_from, - ); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, Some(client), storage, sync_from); let block_height = BlockHeight::new(123); let block = arb_proto_block(); let block = BlockSourceEvent::OldBlock(block_height, block); @@ -70,20 +65,14 @@ async fn store_block__happy_path() { async fn get_block_range__happy_path() { // given let client = mock_client!(aws_sdk_s3, []); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); let storage = database(); let sync_from = BlockHeight::new(0); let adapter = RemoteCache::new( - aws_id, - aws_secret, - aws_region.clone(), aws_bucket.clone(), - base.clone(), - client, + false, + None, + Some(client), storage, sync_from, ); @@ -98,18 +87,18 @@ async fn get_block_range__happy_path() { BlockRangeResponse::Literal(_) => { panic!("Expected remote response, got literal"); } - BlockRangeResponse::Remote(stream) => stream.collect::>().await, + BlockRangeResponse::S3(stream) => stream.collect::>().await, }; let expected = (999..=1003) .map(|height| { let key = block_height_to_key(&BlockHeight::new(height)); - let url = RemoteCache::<()>::url_for_block(&base, &key); - RemoteBlockRangeResponse { - region: aws_region.clone(), + let res = RemoteS3Response { bucket: aws_bucket.clone(), key, - url, - } + requester_pays: false, + aws_endpoint: None, + }; + (BlockHeight::new(height), res) }) .collect::>(); assert_eq!(actual, expected); @@ -119,16 +108,11 @@ async fn get_block_range__happy_path() { async fn get_current_height__returns_highest_continuous_block() { // given let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); let storage = database(); let sync_from = BlockHeight::new(0); - let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, sync_from, - ); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, Some(client), storage, sync_from); let expected = BlockHeight::new(123); let block = arb_proto_block(); @@ -153,15 +137,10 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig .unwrap(); tx.commit().unwrap(); let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); let sync_from = BlockHeight::new(0); - let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, storage, sync_from, - ); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, Some(client), storage, sync_from); let expected = BlockHeight::new(3); let block = arb_proto_block(); @@ -178,18 +157,13 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); let client = mock_client!(aws_sdk_s3, rules.iter()); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); // given let db = database(); let sync_from = BlockHeight::new(0); - let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, db, sync_from, - ); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, Some(client), db, sync_from); for height in 2..=10u32 { let height = BlockHeight::from(height); @@ -215,18 +189,13 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() { let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); let client = mock_client!(aws_sdk_s3, rules.iter()); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); // given let db = database(); let sync_from = BlockHeight::new(0); - let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, db, sync_from, - ); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, Some(client), db, sync_from); let height = BlockHeight::from(0u32); let some_block = arb_proto_block(); @@ -251,18 +220,13 @@ async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() async fn store_block__new_block_comes_first() { let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); let client = mock_client!(aws_sdk_s3, rules.iter()); - let aws_id = "test-id".to_string(); - let aws_secret = "test-secret".to_string(); - let aws_region = "test-region".to_string(); let aws_bucket = "test-bucket".to_string(); - let base = "http://good.com".to_string(); // given let db = database(); let sync_from = BlockHeight::new(0); - let mut adapter = RemoteCache::new( - aws_id, aws_secret, aws_region, aws_bucket, base, client, db, sync_from, - ); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, Some(client), db, sync_from); // when let height = BlockHeight::from(0u32); diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 4672e420625..d3f701748c2 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -207,7 +207,7 @@ where S: Unpin + ReadTransaction + std::fmt::Debug, for<'a> StorageTransaction<&'a S>: StorageInspect, { - type Item = ProtoBlock; + type Item = (BlockHeight, ProtoBlock); fn poll_next( self: Pin<&mut Self>, @@ -233,7 +233,7 @@ where None }; this.next = next; - Poll::Ready(Some(block.into_owned())) + Poll::Ready(Some((height, block.into_owned()))) } Ok(None) => { tracing::debug!("No block at height: {:?}", height); diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index fe030080da0..6ed9f2c851c 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -28,7 +28,9 @@ fn proto_block_with_height(height: BlockHeight) -> ProtoBlock { let serializer_adapter = SerializerAdapter; let mut default_block = FuelBlock::::default(); default_block.header_mut().set_block_height(height); - serializer_adapter.serialize_block(&default_block).unwrap() + serializer_adapter + .serialize_block(&default_block, &[]) + .unwrap() } #[tokio::test] @@ -90,7 +92,7 @@ async fn get_block__can_get_expected_range() { let actual = stream.collect::>().await; // then - assert_eq!(actual, vec![expected_2, expected_3]); + assert_eq!(actual, vec![(height_2, expected_2), (height_3, expected_3)]); } #[tokio::test] diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index fe14512cfcf..1236ccda961 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -13,10 +13,7 @@ use crate::{ }, result::Result, }; -use aws_sdk_s3::config::{ - Credentials, - Region, -}; + use fuel_core_storage::{ Error as StorageError, StorageInspect, @@ -29,7 +26,6 @@ use fuel_core_storage::{ }, }; use fuel_core_types::fuel_types::BlockHeight; -use std::borrow::Cow; /// A union of a storage and a remote cache for the block aggregator. This allows both to be /// supported in production depending on the configuration @@ -46,41 +42,16 @@ impl StorageOrRemoteDB { #[allow(clippy::too_many_arguments)] pub fn new_s3( storage: R, - aws_id: &str, - aws_secret: &str, - aws_region: &str, aws_bucket: &str, - url_base: &str, + requester_pays: bool, aws_endpoint_url: Option, sync_from: BlockHeight, ) -> Self { - let client = { - let mut builder = aws_sdk_s3::config::Builder::new(); - if let Some(aws_endpoint_url) = aws_endpoint_url { - builder.set_endpoint_url(Some(aws_endpoint_url.clone())); - } - - let config = builder - .force_path_style(true) - .region(Region::new(Cow::Owned(aws_region.to_string()))) - .credentials_provider(Credentials::new( - aws_id, - aws_secret, - None, - None, - "block-aggregator", - )) - .behavior_version_latest() - .build(); - aws_sdk_s3::Client::from_conf(config) - }; let remote_cache = RemoteCache::new( - aws_id.to_string(), - aws_secret.to_string(), - aws_region.to_string(), aws_bucket.to_string(), - url_base.to_string(), - client, + requester_pays, + aws_endpoint_url, + None, storage, sync_from, ); @@ -88,24 +59,6 @@ impl StorageOrRemoteDB { } } -pub fn get_env_vars() -> Option<(String, String, String, String, String, Option)> -{ - let aws_id = std::env::var("AWS_ACCESS_KEY_ID").ok()?; - let aws_secret = std::env::var("AWS_SECRET_ACCESS_KEY").ok()?; - let aws_region = std::env::var("AWS_REGION").ok()?; - let aws_bucket = std::env::var("AWS_BUCKET").ok()?; - let bucket_url_base = std::env::var("BUCKET_URL_BASE").ok()?; - let aws_endpoint_url = std::env::var("AWS_ENDPOINT_URL").ok(); - Some(( - aws_id, - aws_secret, - aws_region, - aws_bucket, - bucket_url_base, - aws_endpoint_url, - )) -} - impl BlockAggregatorDB for StorageOrRemoteDB where // Storage Constraints diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 8df0f0010ed..ffdeb220800 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -36,6 +36,7 @@ pub mod integration { blocks::importer_and_db_source::{ BlockSerializer, ImporterAndDbSource, + sync_service::TxReceipts, }, db::BlockAggregatorDB, protobuf_types::Block as ProtoBlock, @@ -45,11 +46,13 @@ pub mod integration { stream::BoxStream, }; use fuel_core_storage::{ + Error as StorageError, StorageInspect, tables::{ FuelBlocks, Transactions, }, + transactional::HistoricalView, }; use fuel_core_types::{ fuel_types::BlockHeight, @@ -61,24 +64,37 @@ pub mod integration { pub struct Config { pub addr: SocketAddr, pub sync_from: Option, + pub storage_method: StorageMethod, + } + + #[derive(Clone, Debug, Default)] + pub enum StorageMethod { + #[default] + Local, + S3 { + bucket: String, + endpoint_url: Option, + requester_pays: bool, + }, } #[allow(clippy::type_complexity)] - pub fn new_service( + pub fn new_service( config: &Config, db: DB, serializer: S, onchain_db: OnchainDB, + receipts: Receipts, importer: BoxStream, sync_from_height: BlockHeight, - ) -> ServiceRunner< + ) -> anyhow::Result, + ImporterAndDbSource, ProtoBlock, >, - > + >> where DB: BlockAggregatorDB< BlockRangeResponse = ::BlockRangeResponse, @@ -86,17 +102,23 @@ pub mod integration { >, S: BlockSerializer + Clone + Send + Sync + 'static, OnchainDB: Send + Sync, - OnchainDB: StorageInspect, - OnchainDB: StorageInspect, - E: std::fmt::Debug + Send + Sync, + OnchainDB: StorageInspect, + OnchainDB: StorageInspect, + OnchainDB: HistoricalView, + Receipts: TxReceipts, { let addr = config.addr.to_string(); - let api = ProtobufAPI::new(addr); - let db_ending_height = None; + let api = ProtobufAPI::new(addr) + .map_err(|e| anyhow::anyhow!("Error creating API: {e}"))?; + let db_ending_height = onchain_db + .latest_height() + .and_then(BlockHeight::succ) + .unwrap_or(BlockHeight::from(0)); let block_source = ImporterAndDbSource::new( importer, serializer, onchain_db, + receipts, sync_from_height, db_ending_height, ); @@ -106,7 +128,8 @@ pub mod integration { block_source, new_block_subscriptions: Vec::new(), }; - ServiceRunner::new(block_aggregator) + let runner = ServiceRunner::new(block_aggregator); + Ok(runner) } } @@ -118,7 +141,7 @@ pub struct BlockAggregator { query: Api, database: DB, block_source: Blocks, - new_block_subscriptions: Vec>, + new_block_subscriptions: Vec>, } pub struct NewBlock { diff --git a/crates/services/block_aggregator_api/src/protobuf_types.rs b/crates/services/block_aggregator_api/src/protobuf_types.rs index 648ac0e278d..256ed21a634 100644 --- a/crates/services/block_aggregator_api/src/protobuf_types.rs +++ b/crates/services/block_aggregator_api/src/protobuf_types.rs @@ -1 +1 @@ -tonic::include_proto!("blockaggregator"); +pub use fuel_core_protobuf::*; diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index bbe500cab56..3a49b0b58ff 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -9,12 +9,22 @@ pub enum Error { DB(anyhow::Error), #[error("Serialization error: {0}")] Serialization(anyhow::Error), + #[error("Receipt error: {0}")] + Receipt(anyhow::Error), } impl Error { pub fn db_error>(err: T) -> Self { Error::DB(err.into()) } + + pub fn block_source_error>(err: T) -> Self { + Error::BlockSource(err.into()) + } + + pub fn receipt_error>(err: T) -> Self { + Error::Receipt(err.into()) + } } pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 81feb61d064..dc00e5a0efe 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -249,7 +249,7 @@ async fn run__new_block_subscription__sends_new_block() { // then let actual_block = await_response_with_timeout(response).await.unwrap(); - assert_eq!(expected_block, actual_block); + assert_eq!((expected_height, expected_block), actual_block); // cleanup drop(source_sender); diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 6545f32f653..2867d64688d 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -523,7 +523,7 @@ where return match res { Ok(()) => Some(TaskNextAction::Continue), Err(err) => Some(TaskNextAction::ErrorContinue(err)), - } + }; } None } @@ -599,7 +599,7 @@ where return Ok(Self { last_block_created: Instant::now(), ..self - }) + }); } } diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 9a0591f5709..8906f742c0f 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -1670,12 +1670,12 @@ where let Input::Contract(input) = core::mem::take(input) else { return Err(ExecutorError::Other( "Input of the `Mint` transaction is not a contract".to_string(), - )) + )); }; let Output::Contract(output) = outputs[0] else { return Err(ExecutorError::Other( "The output of the `Mint` transaction is not a contract".to_string(), - )) + )); }; Ok((input, output)) } @@ -1793,7 +1793,7 @@ where ); return Err(ExecutorError::InvalidTransactionOutcome { transaction_id: tx_id, - }) + }); } } } @@ -2026,14 +2026,14 @@ where return Err(TransactionValidityError::CoinMismatch( *utxo_id, ) - .into()) + .into()); } } _ => { return Err(TransactionValidityError::CoinDoesNotExist( *utxo_id, ) - .into()) + .into()); } } } @@ -2045,7 +2045,7 @@ where return Err(TransactionValidityError::ContractDoesNotExist( contract.contract_id, ) - .into()) + .into()); } } Input::MessageCoinSigned(MessageCoinSigned { nonce, .. }) @@ -2060,21 +2060,21 @@ where *nonce, ) .into(), - ) + ); } if !message.matches_input(input).unwrap_or_default() { return Err(TransactionValidityError::MessageMismatch( *nonce, ) - .into()) + .into()); } } _ => { return Err(TransactionValidityError::MessageDoesNotExist( *nonce, ) - .into()) + .into()); } } } @@ -2133,7 +2133,7 @@ where if reverted => { // Don't spend the retryable messages if transaction is reverted - continue + continue; } Input::MessageCoinSigned(MessageCoinSigned { nonce, .. }) | Input::MessageCoinPredicate(MessageCoinPredicate { nonce, .. }) @@ -2170,7 +2170,7 @@ where for r in receipts.iter().rev() { if let Receipt::ScriptResult { gas_used, .. } = r { used_gas = *gas_used; - break + break; } } @@ -2278,7 +2278,7 @@ where } else { return Err(ExecutorError::InvalidTransactionOutcome { transaction_id: tx_id, - }) + }); }; let empty = ContractAccessesWithValues::default(); @@ -2369,7 +2369,7 @@ where } else { return Err(ExecutorError::TransactionValidity( TransactionValidityError::InvalidContractInputIndex(utxo_id), - )) + )); } } Output::Change { diff --git a/crates/services/p2p/src/peer_manager.rs b/crates/services/p2p/src/peer_manager.rs index 92a0c3ab9b8..fcdb1ab69d3 100644 --- a/crates/services/p2p/src/peer_manager.rs +++ b/crates/services/p2p/src/peer_manager.rs @@ -252,7 +252,7 @@ impl PeerManager { // check if all the slots are already taken if non_reserved_peers_connected >= self.max_non_reserved_peers { // Too many peers already connected, disconnect the Peer - return true + return true; } if non_reserved_peers_connected.saturating_add(1) diff --git a/crates/services/producer/src/block_producer.rs b/crates/services/producer/src/block_producer.rs index a7d09072839..2505fc84d02 100644 --- a/crates/services/producer/src/block_producer.rs +++ b/crates/services/producer/src/block_producer.rs @@ -145,7 +145,7 @@ where height, previous_block: latest_height, } - .into()) + .into()); } let maybe_mint_tx = transactions_source.pop(); @@ -229,7 +229,7 @@ where height, previous_block: latest_height, } - .into()) + .into()); } let component = Components { diff --git a/crates/services/shared-sequencer/src/service.rs b/crates/services/shared-sequencer/src/service.rs index c81bf1b63be..b807b0a9229 100644 --- a/crates/services/shared-sequencer/src/service.rs +++ b/crates/services/shared-sequencer/src/service.rs @@ -178,7 +178,7 @@ where async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { if !self.config.enabled { let _ = watcher.while_started().await; - return TaskNextAction::Stop; + return TaskNextAction::Stop } if let Err(err) = self.ensure_account_metadata().await { diff --git a/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs b/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs index e944959f3f3..05465a0f751 100644 --- a/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs +++ b/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs @@ -206,7 +206,7 @@ where < constraints.minimal_gas_price; if less_price { - continue; + continue } let not_enough_gas = stored_transaction.transaction.max_gas() > gas_left; @@ -214,7 +214,7 @@ where stored_transaction.transaction.metered_bytes_size() > space_left; if not_enough_gas || too_big_tx { - continue; + continue } gas_left = diff --git a/crates/services/txpool_v2/src/service.rs b/crates/services/txpool_v2/src/service.rs index 8f6af4fb3e4..9db5ac74b1c 100644 --- a/crates/services/txpool_v2/src/service.rs +++ b/crates/services/txpool_v2/src/service.rs @@ -581,7 +581,7 @@ where // We already synced with this peer in the past. if !tx_sync_history.insert(peer_id.clone()) { - return + return; } } diff --git a/crates/services/txpool_v2/src/storage/graph.rs b/crates/services/txpool_v2/src/storage/graph.rs index d042f445d0d..1a0a741ef28 100644 --- a/crates/services/txpool_v2/src/storage/graph.rs +++ b/crates/services/txpool_v2/src/storage/graph.rs @@ -221,17 +221,17 @@ impl GraphStorage { if to != i_owner { return Err(Error::InputValidation( InputValidationError::NotInsertedIoWrongOwner, - )); + )) } if amount != i_amount { return Err(Error::InputValidation( InputValidationError::NotInsertedIoWrongAmount, - )); + )) } if asset_id != i_asset_id { return Err(Error::InputValidation( InputValidationError::NotInsertedIoWrongAssetId, - )); + )) } } Output::Contract(_) => { @@ -687,10 +687,10 @@ impl Storage for GraphStorage { if extracted_outputs .coin_exists(utxo_id, owner, amount, asset_id) { - continue; + continue } missing_inputs.push(MissingInput::Utxo(*utxo_id)); - continue; + continue } Err(e) => { return Err(InputValidationErrorType::Inconsistency( @@ -746,10 +746,10 @@ impl Storage for GraphStorage { Ok(true) => {} Ok(false) => { if extracted_outputs.contract_exists(contract_id) { - continue; + continue } missing_inputs.push(MissingInput::Contract(*contract_id)); - continue; + continue } Err(e) => { return Err(InputValidationErrorType::Inconsistency( diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 896cab86c3a..6620ee27cdf 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -79,17 +79,6 @@ impl BlockHeader { } } - /// Get the application portion of the header. - pub fn application_v1( - &self, - ) -> Option<&ApplicationHeader> { - match self { - BlockHeader::V1(header) => Some(header.application()), - #[cfg(feature = "fault-proving")] - BlockHeader::V2(_header) => None, - } - } - /// Get the consensus portion of the header. pub fn consensus(&self) -> &ConsensusHeader { match self { diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 37e187f6b81..014d8d4b563 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -1,12 +1,18 @@ +#[cfg(feature = "fault-proving")] +use crate::fuel_types::ChainId; use crate::{ blockchain::{ block::Block, header::{ - GeneratedConsensusFields, + ApplicationHeader, + BlockHeader, + BlockHeaderV1, + PartialBlockHeader, generate_txns_root, + v1::GeneratedApplicationFieldsV1, }, - primitives::DaBlockHeight, }, + fuel_asm::PanicInstruction, fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ BlobBody, @@ -18,6 +24,8 @@ use crate::{ Input, MessageId, Output, + Receipt, + ScriptExecutionResult, StorageSlot, Transaction, TransactionBuilder, @@ -38,6 +46,7 @@ use crate::{ BlobId, BlockHeight, Nonce, + SubAssetId, }, fuel_vm::{ Contract, @@ -46,7 +55,6 @@ use crate::{ }; use proptest::prelude::*; use rand::Rng; -use tai64::Tai64; /// Helper function to create a contract creation transaction /// from a given contract bytecode. @@ -72,7 +80,6 @@ pub fn create_contract( (tx, contract_id) } -#[allow(unused)] fn arb_txs() -> impl Strategy> { prop::collection::vec(arb_transaction(), 0..10) } @@ -427,6 +434,27 @@ prop_compose! { } } +fn arb_contract_id() -> impl Strategy { + any::<[u8; 32]>().prop_map(ContractId::new) +} + +fn arb_sub_asset_id() -> impl Strategy { + any::<[u8; 32]>().prop_map(SubAssetId::new) +} + +fn arb_panic_instruction() -> impl Strategy { + any::().prop_map(PanicInstruction::from) +} + +fn arb_script_execution_result() -> impl Strategy { + prop_oneof![ + Just(ScriptExecutionResult::Success), + Just(ScriptExecutionResult::Revert), + Just(ScriptExecutionResult::Panic), + any::().prop_map(ScriptExecutionResult::GenericFailure), + ] +} + fn arb_msg_ids() -> impl Strategy> { prop::collection::vec(arb_msg_id(), 0..10usize) } @@ -485,7 +513,7 @@ fn arb_create_transaction() -> impl Strategy { ) .prop_map( |(policies, salt_bytes, storage_slots, inputs, outputs, witnesses)| { - let create = crate::fuel_tx::Transaction::create( + let create = Transaction::create( 0, policies, Salt::from(salt_bytes), @@ -517,7 +545,7 @@ fn arb_mint_transaction() -> impl Strategy { mint_asset_id, gas_price, )| { - let mint = crate::fuel_tx::Transaction::mint( + let mint = Transaction::mint( tx_pointer, input_contract, output_contract, @@ -613,41 +641,21 @@ fn arb_blob_transaction() -> impl Strategy { }) } -prop_compose! { - fn arb_consensus_header()( - prev_root in any::<[u8; 32]>(), - time in any::(), - ) -> crate::blockchain::header::ConsensusHeader { - crate::blockchain::header::ConsensusHeader { - prev_root: prev_root.into(), - height: BlockHeight::new(0), - time: Tai64(time), - generated: GeneratedConsensusFields::default(), - } - } -} - prop_compose! { /// Generate an arbitrary block with a variable number of transactions pub fn arb_block()( txs in arb_txs(), da_height in any::(), - consensus_parameter_version in any::(), + consensus_parameters_version in any::(), state_transition_bytecode_version in any::(), msg_ids in arb_msg_ids(), event_root in any::<[u8; 32]>(), - mut consensus_header in arb_consensus_header(), + chain_id in any::(), ) -> (Block, Vec, Bytes32) { - let mut fuel_block = Block::default(); - - *fuel_block.transactions_mut() = txs; - - fuel_block.header_mut().set_da_height(DaBlockHeight(da_height)); - fuel_block.header_mut().set_consensus_parameters_version(consensus_parameter_version); - fuel_block.header_mut().set_state_transition_bytecode_version(state_transition_bytecode_version); - - let count = fuel_block.transactions().len().try_into().expect("we shouldn't have more than u16::MAX transactions"); - let msg_root = msg_ids + let transactions_count = txs.len().try_into().expect("we shouldn't have more than u16::MAX transactions"); + let message_receipt_count = msg_ids.len().try_into().expect("we shouldn't have more than u32::MAX messages"); + let transactions_root = generate_txns_root(&txs); + let message_outbox_root = msg_ids .iter() .fold(MerkleRootCalculator::new(), |mut tree, id| { tree.push(id.as_ref()); @@ -655,19 +663,177 @@ prop_compose! { }) .root() .into(); - let tx_root = generate_txns_root(fuel_block.transactions()); - let event_root = event_root.into(); - fuel_block.header_mut().set_transactions_count(count); - fuel_block.header_mut().set_message_receipt_count(msg_ids.len().try_into().expect("we shouldn't have more than u32::MAX messages")); - fuel_block.header_mut().set_transaction_root(tx_root); - fuel_block.header_mut().set_message_outbox_root(msg_root); - fuel_block.header_mut().set_event_inbox_root(event_root); - - // Consensus - // TODO: Include V2 Application with V2 Header - let application_hash = fuel_block.header().application_v1().unwrap().hash(); - consensus_header.generated.application_hash = application_hash; - fuel_block.header_mut().set_consensus_header(consensus_header); + let event_root: Bytes32 = event_root.into(); + let header = { + let mut default = BlockHeaderV1::default(); + default.set_application_header(ApplicationHeader { + da_height: da_height.into(), + consensus_parameters_version, + state_transition_bytecode_version, + generated: GeneratedApplicationFieldsV1 { + transactions_count, + message_receipt_count, + transactions_root, + message_outbox_root, + event_inbox_root: event_root, + }, + }); + + BlockHeader::V1(default) + }; + let partial_block_header = PartialBlockHeader::from(&header); + #[cfg(feature = "fault-proving")] + let fuel_block = { + let chain_id = ChainId::new(chain_id); + Block::new(partial_block_header, txs, &msg_ids, event_root, &chain_id).unwrap() + }; + #[cfg(not(feature = "fault-proving"))] + let fuel_block = { + let _ = chain_id; + Block::new(partial_block_header, txs, &msg_ids, event_root).unwrap() + }; (fuel_block, msg_ids, event_root) } } + +fn arb_receipt() -> impl Strategy { + prop_oneof![ + ( + arb_contract_id(), + arb_contract_id(), + any::(), + arb_asset_id(), + any::(), + any::(), + any::(), + any::(), + any::(), + ) + .prop_map( + |(id, to, amount, asset_id, gas, param1, param2, pc, is)| { + Receipt::call(id, to, amount, asset_id, gas, param1, param2, pc, is) + }, + ), + (arb_contract_id(), any::(), any::(), any::(),) + .prop_map(|(id, val, pc, is)| Receipt::ret(id, val, pc, is)), + ( + arb_contract_id(), + any::(), + any::(), + any::(), + prop::collection::vec(any::(), 0..64), + ) + .prop_map(|(id, ptr, pc, is, data)| Receipt::return_data( + id, ptr, pc, is, data, + )), + ( + arb_contract_id(), + arb_panic_instruction(), + any::(), + any::(), + prop::option::of(arb_contract_id()), + ) + .prop_map(|(id, reason, pc, is, panic_contract)| { + Receipt::panic(id, reason, pc, is).with_panic_contract_id(panic_contract) + }), + (arb_contract_id(), any::(), any::(), any::(),) + .prop_map(|(id, ra, pc, is)| Receipt::revert(id, ra, pc, is)), + ( + arb_contract_id(), + any::(), + any::(), + any::(), + any::(), + any::(), + any::(), + ) + .prop_map(|(id, ra, rb, rc, rd, pc, is)| { + Receipt::log(id, ra, rb, rc, rd, pc, is) + }), + ( + arb_contract_id(), + any::(), + any::(), + any::(), + any::(), + any::(), + prop::collection::vec(any::(), 0..64), + ) + .prop_map(|(id, ra, rb, ptr, pc, is, data)| { + Receipt::log_data(id, ra, rb, ptr, pc, is, data) + }), + ( + arb_contract_id(), + arb_contract_id(), + any::(), + arb_asset_id(), + any::(), + any::(), + ) + .prop_map(|(id, to, amount, asset_id, pc, is)| { + Receipt::transfer(id, to, amount, asset_id, pc, is) + }), + ( + arb_contract_id(), + arb_address(), + any::(), + arb_asset_id(), + any::(), + any::(), + ) + .prop_map(|(id, to, amount, asset_id, pc, is)| { + Receipt::transfer_out(id, to, amount, asset_id, pc, is) + }), + (arb_script_execution_result(), any::()) + .prop_map(|(result, gas_used)| Receipt::script_result(result, gas_used),), + ( + arb_address(), + arb_address(), + any::(), + arb_nonce(), + prop::collection::vec(any::(), 0..64), + ) + .prop_map(|(sender, recipient, amount, nonce, data)| { + let len = data.len() as u64; + let digest = Output::message_digest(&data); + Receipt::message_out_with_len( + sender, + recipient, + amount, + nonce, + len, + digest, + Some(data), + ) + }), + ( + arb_sub_asset_id(), + arb_contract_id(), + any::(), + any::(), + any::(), + ) + .prop_map(|(sub_id, contract_id, val, pc, is)| { + Receipt::mint(sub_id, contract_id, val, pc, is) + }), + ( + arb_sub_asset_id(), + arb_contract_id(), + any::(), + any::(), + any::(), + ) + .prop_map(|(sub_id, contract_id, val, pc, is)| { + Receipt::burn(sub_id, contract_id, val, pc, is) + }), + ] +} + +prop_compose! { + /// generates a list of random receipts + pub fn arb_receipts()( + receipts in prop::collection::vec(arb_receipt(), 0..10), + ) -> Vec { + receipts + } +} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 27b6472c395..6e5f896ccba 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -101,6 +101,8 @@ url = { workspace = true } alloy-primitives = { workspace = true } alloy-provider = { workspace = true, default-features = false, features = ["reqwest-rustls-tls"] } alloy-rpc-types-eth = { workspace = true } +aws-config = "1.8.10" +flate2 = { workspace = true } fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } pretty_assertions = "1.4" proptest = { workspace = true } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index df4263bf919..18efca1d964 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,12 +1,11 @@ #![allow(non_snake_case)] -use aws_sdk_s3::{ - Client, - config::{ - Credentials, - Region, - }, +use aws_config::{ + BehaviorVersion, + default_provider::credentials::DefaultCredentialsChain, }; +use aws_sdk_s3::Client; +use flate2::read::GzDecoder; use fuel_core::{ database::Database, service::{ @@ -15,21 +14,19 @@ use fuel_core::{ }, }; use fuel_core_block_aggregator_api::{ - blocks::importer_and_db_source::serializer_adapter::fuel_block_from_protobuf, - db::{ - remote_cache::block_height_to_key, - storage_or_remote_db::get_env_vars, - }, + blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf, + db::remote_cache::block_height_to_key, + integration::StorageMethod, protobuf_types::{ Block as ProtoBlock, BlockHeightRequest as ProtoBlockHeightRequest, BlockRangeRequest as ProtoBlockRangeRequest, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - RemoteBlockRangeResponse as ProtoRemoteBlockRangeResponse, - block::VersionedBlock as ProtoVersionedBlock, + RemoteBlockResponse as ProtoRemoteBlockResponse, + RemoteS3Bucket, block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, block_response::Payload as ProtoPayload, - header::VersionedHeader as ProtoVersionedHeader, + remote_block_response::Location, }, }; use fuel_core_client::client::FuelClient; @@ -39,10 +36,26 @@ use fuel_core_types::{ }; use futures::StreamExt; use prost::bytes::Bytes; -use std::borrow::Cow; +use std::io::Read; use test_helpers::client_ext::ClientExt; use tokio::time::sleep; +macro_rules! require_env_var_or_skip { + ($($var:literal),+) => { + $(if std::env::var($var).is_err() { + eprintln!("Skipping test: missing {}", $var); + return; + })+ + }; +} + +pub fn get_env_vars() -> Option<(String, String, String)> { + let aws_id = std::env::var("AWS_ACCESS_KEY_ID").ok()?; + let aws_secret = std::env::var("AWS_SECRET_ACCESS_KEY").ok()?; + let aws_region = std::env::var("AWS_REGION").ok()?; + Some((aws_id, aws_secret, aws_region)) +} + #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc__literal() { if env_vars_are_set() { @@ -66,16 +79,9 @@ async fn get_block_range__can_get_serialized_block_from_rpc__literal() { .await .expect("could not connect to server"); - let expected_block = graphql_client - .full_block_by_height(1) - .await - .unwrap() - .unwrap(); - let expected_header = expected_block.header; - // when let request = ProtoBlockRangeRequest { start: 1, end: 1 }; - let actual_block = if let Some(ProtoPayload::Literal(block)) = rpc_client + let proto_block = if let Some(ProtoPayload::Literal(block)) = rpc_client .get_block_range(request) .await .unwrap() @@ -90,26 +96,46 @@ async fn get_block_range__can_get_serialized_block_from_rpc__literal() { } else { panic!("expected literal block payload"); }; - let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { - ProtoVersionedHeader::V1(v1_header) => v1_header.height, - ProtoVersionedHeader::V2(v2_header) => v2_header.height, - }; + let (actual_block, receipts) = + fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); + let actual_height = actual_block.header().height(); // then - assert_eq!(expected_header.height.0, actual_height); + let expected_height = BlockHeight::new(1); + assert_eq!(&expected_height, actual_height); + + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); } #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc__remote() { - let Some((_, _, aws_region, aws_bucket, url_base, _)) = get_env_vars() else { - tracing::info!("Skipping test: AWS credentials are not set"); - return; - }; + require_env_var_or_skip!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"); + ensure_bucket_exists().await; clean_s3_bucket().await; - let config = Config::local_node(); + let mut config = Config::local_node(); + let endpoint_url = "http://127.0.0.1:4566".to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; let rpc_url = config.rpc_config.addr; let srv = FuelService::from_database(Database::default(), config.clone()) @@ -154,11 +180,13 @@ async fn get_block_range__can_get_serialized_block_from_rpc__remote() { // then let key = block_height_to_key(&expected_height); - let expected = ProtoRemoteBlockRangeResponse { - region: aws_region.clone(), - bucket: aws_bucket.clone(), - key: key.clone(), - url: format!("{}/{}", url_base, key), + let expected = ProtoRemoteBlockResponse { + location: Some(Location::S3(RemoteS3Bucket { + bucket: "test-bucket".to_string(), + key, + requester_pays: false, + endpoint: Some("http://127.0.0.1:4566".to_string()), + })), }; assert_eq!(expected, remote_info); clean_s3_bucket().await; @@ -166,11 +194,17 @@ async fn get_block_range__can_get_serialized_block_from_rpc__remote() { #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { + let mut config = Config::local_node(); if get_env_vars().is_some() { ensure_bucket_exists().await; clean_s3_bucket().await; + let endpoint_url = "http://127.0.0.1:4566".to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; } - let config = Config::local_node(); let rpc_url = config.rpc_config.addr; // given @@ -193,7 +227,7 @@ async fn get_block_height__can_get_value_from_rpc() { let request = ProtoBlockHeightRequest {}; let expected_height = Some(1); let actual_height = rpc_client - .get_block_height(request) + .get_synced_block_height(request) .await .unwrap() .into_inner() @@ -210,11 +244,18 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { + let mut config = Config::local_node(); if get_env_vars().is_some() { ensure_bucket_exists().await; clean_s3_bucket().await; + let endpoint_url = "http://127.0.0.1:4566".to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; } - let config = Config::local_node(); + let rpc_url = config.rpc_config.addr; let srv = FuelService::from_database(Database::default(), config.clone()) @@ -242,73 +283,67 @@ async fn new_block_subscription__can_get_expect_block() { let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) .await .unwrap(); - let actual_block = + let proto_block = if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { block } else { panic!("expected literal block payload"); }; - let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { - ProtoVersionedHeader::V1(v1_header) => v1_header.height, - ProtoVersionedHeader::V2(v2_header) => v2_header.height, - }; + let (actual_block, receipts) = + fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); + let actual_height = actual_block.header().height(); // then - let expected_height = 1; - assert_eq!(expected_height, actual_height); + let expected_height = BlockHeight::new(1); + assert_eq!(&expected_height, actual_height); + + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); + if get_env_vars().is_some() { clean_s3_bucket().await; } } -macro_rules! require_env_var_or_skip { - ($($var:literal),+) => { - $(if std::env::var($var).is_err() { - eprintln!("Skipping test: missing {}", $var); - return; - })+ - }; -} - fn env_vars_are_set() -> bool { std::env::var("AWS_ACCESS_KEY_ID").is_ok() && std::env::var("AWS_SECRET_ACCESS_KEY").is_ok() - && std::env::var("AWS_REGION").is_ok() - && std::env::var("AWS_BUCKET").is_ok() - && std::env::var("AWS_ENDPOINT_URL").is_ok() - && std::env::var("BUCKET_URL_BASE").is_ok() } -fn aws_client() -> Client { - let (aws_access_key_id, aws_secret_access_key, aws_region, _, _, aws_endpoint_url) = - get_env_vars().unwrap(); - - let mut builder = aws_sdk_s3::config::Builder::new(); - if let Some(aws_endpoint_url) = aws_endpoint_url { - builder.set_endpoint_url(Some(aws_endpoint_url.clone())); - } - - let config = builder - .force_path_style(true) - .region(Region::new(Cow::Owned(aws_region.clone()))) - .credentials_provider(Credentials::new( - aws_access_key_id, - aws_secret_access_key, - None, - None, - "block-aggregator", - )) - .behavior_version_latest() - .build(); - aws_sdk_s3::Client::from_conf(config) +async fn aws_client() -> Client { + let credentials = DefaultCredentialsChain::builder().build().await; + let _aws_region = + std::env::var("AWS_REGION").expect("AWS_REGION env var must be set"); + let sdk_config = aws_config::defaults(BehaviorVersion::latest()) + .credentials_provider(credentials) + .endpoint_url("http://127.0.0.1:4566") + .load() + .await; + let builder = aws_sdk_s3::config::Builder::from(&sdk_config); + let config = builder.force_path_style(true).build(); + Client::from_conf(config) } async fn get_block_from_s3_bucket() -> Bytes { - let client = aws_client(); - let bucket = std::env::var("AWS_BUCKET").unwrap(); + let client = aws_client().await; + let bucket = "test-bucket".to_string(); let key = block_height_to_key(&BlockHeight::new(1)); + tracing::info!("getting block from bucket: {} with key {}", bucket, key); let req = client.get_object().bucket(&bucket).key(&key); let obj = req.send().await.unwrap(); let message = format!( @@ -319,20 +354,20 @@ async fn get_block_from_s3_bucket() -> Bytes { } async fn ensure_bucket_exists() { - let client = aws_client(); - let bucket = std::env::var("AWS_BUCKET").unwrap(); - let req = client.create_bucket().bucket(&bucket); + let client = aws_client().await; + let bucket = "test-bucket"; + let req = client.create_bucket().bucket(bucket); let expect_message = format!("should be able to create bucket: {}", bucket); let _ = req.send().await.expect(&expect_message); } async fn clean_s3_bucket() { - let client = aws_client(); - let bucket = std::env::var("AWS_BUCKET").unwrap(); - let req = client.list_objects().bucket(&bucket); + let client = aws_client().await; + let bucket = "test-bucket"; + let req = client.list_objects().bucket(bucket); let objs = req.send().await.unwrap(); for obj in objs.contents.unwrap_or_default() { - let req = client.delete_object().bucket(&bucket).key(obj.key.unwrap()); + let req = client.delete_object().bucket(bucket).key(obj.key.unwrap()); let _ = req.send().await.unwrap(); } } @@ -342,19 +377,19 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { let _ = tracing_subscriber::fmt() .with_max_level(tracing::Level::INFO) .try_init(); - require_env_var_or_skip!( - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "AWS_REGION", - "AWS_BUCKET", - "AWS_ENDPOINT_URL", - "BUCKET_URL_BASE" - ); + + require_env_var_or_skip!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); ensure_bucket_exists().await; clean_s3_bucket().await; // given - let config = Config::local_node(); + let mut config = Config::local_node(); + let endpoint_url = "http://127.0.0.1:4566".to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; let srv = FuelService::from_database(Database::default(), config.clone()) .await .unwrap(); @@ -367,10 +402,28 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { sleep(std::time::Duration::from_secs(1)).await; // then - let data = get_block_from_s3_bucket().await; + let zipped_data = get_block_from_s3_bucket().await; + let data = unzip_bytes(&zipped_data); // can deserialize let actual_proto: ProtoBlock = prost::Message::decode(data.as_ref()).unwrap(); - let _ = fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); + let (_, receipts) = + fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); // cleanup clean_s3_bucket().await; @@ -379,3 +432,10 @@ async fn get_block_range__can_get_from_remote_s3_bucket() { "Successfully ran test: get_block_range__can_get_from_remote_s3_bucket" ); } + +fn unzip_bytes(bytes: &[u8]) -> Vec { + let mut decoder = GzDecoder::new(bytes); + let mut output = Vec::new(); + decoder.read_to_end(&mut output).unwrap(); + output +} diff --git a/tests/tests/trigger_integration/interval.rs b/tests/tests/trigger_integration/interval.rs index ad238fa8403..73a2fa94d78 100644 --- a/tests/tests/trigger_integration/interval.rs +++ b/tests/tests/trigger_integration/interval.rs @@ -160,7 +160,7 @@ async fn poa_interval_produces_nonempty_blocks_at_correct_rate() { let count_now = resp.results.len(); if count_now > count_start + rounds { - break + break; } } From fea570efcef02d7a2b0d11d5088d800740e09900 Mon Sep 17 00:00:00 2001 From: green Date: Tue, 25 Nov 2025 16:02:51 +0000 Subject: [PATCH 128/146] Remoed semicolons Cleaned up some deps --- Cargo.lock | 40 +++++------ Cargo.toml | 6 +- bin/fuel-core/Cargo.toml | 4 +- crates/fuel-core/Cargo.toml | 8 +-- crates/fuel-core/src/lib.rs | 2 - crates/fuel-core/src/service/config.rs | 71 ++++++++++--------- .../services/block_aggregator_api/Cargo.toml | 8 +-- .../block_aggregator_api/src/blocks.rs | 10 +-- .../consensus_module/poa/src/service.rs | 4 +- crates/services/executor/src/executor.rs | 26 +++---- crates/services/p2p/src/peer_manager.rs | 2 +- .../services/producer/src/block_producer.rs | 4 +- crates/services/txpool_v2/src/service.rs | 2 +- crates/types/Cargo.toml | 4 +- tests/Cargo.toml | 12 ++-- tests/tests/trigger_integration/interval.rs | 2 +- 16 files changed, 96 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d63ea62f43d..f5f167dfded 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1290,9 +1290,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-config" -version = "1.8.10" +version = "1.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1856b1b48b65f71a4dd940b1c0931f9a7b646d4a924b9828ffefc1454714668a" +checksum = "a0149602eeaf915158e14029ba0c78dedb8c08d554b024d54c8f239aab46511d" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1320,9 +1320,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.8" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf26925f4a5b59eb76722b63c2892b1d70d06fa053c72e4a100ec308c1d47bc" +checksum = "b01c9521fa01558f750d183c8c68c81b0155b9d193a4ba7f84c36bd1b6d04a06" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -1355,9 +1355,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.5.13" +version = "1.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2402da1a5e16868ba98725e5d73f26b8116eaa892e56f2cd0bf5eec7985f70" +checksum = "7ce527fb7e53ba9626fc47824f25e256250556c40d8f81d27dd92aa38239d632" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -1380,9 +1380,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.90.0" +version = "1.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08a2b564e660ad69be524f569fea985380b15eea28694b8fd9f6206a437702b" +checksum = "995d40070271994fb774137aa603c10e7d29c4567a9605c6b801dff199c3d221" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1436,9 +1436,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.88.0" +version = "1.90.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05b276777560aa9a196dbba2e3aada4d8006d3d7eeb3ba7fe0c317227d933c4" +checksum = "4f18e53542c522459e757f81e274783a78f8c81acdfc8d1522ee8a18b5fb1c66" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1458,9 +1458,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.90.0" +version = "1.92.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9be14d6d9cd761fac3fd234a0f47f7ed6c0df62d83c0eeb7012750e4732879b" +checksum = "532f4d866012ffa724a4385c82e8dd0e59f0ca0e600f3f22d4c03b6824b34e4a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1480,9 +1480,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.90.0" +version = "1.94.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a862d704c817d865c8740b62d8bbeb5adcb30965e93b471df8a5bcefa20a80" +checksum = "1be6fbbfa1a57724788853a623378223fe828fc4c09b146c992f0c95b6256174" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1640,9 +1640,9 @@ dependencies = [ [[package]] name = "aws-smithy-mocks" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99c0a641ee7f8a95a041659855f473166db87c3135b640029ba42772a4ea0a04" +checksum = "ea6c97048c104438d40390bd5211267ea783fa0880a35017ff42730b6dfc9136" dependencies = [ "aws-smithy-http-client", "aws-smithy-runtime-api", @@ -4264,7 +4264,6 @@ dependencies = [ "aws-config", "aws-sdk-s3", "aws-smithy-mocks", - "bytes", "enum-iterator", "flate2", "fuel-core-protobuf", @@ -4272,9 +4271,7 @@ dependencies = [ "fuel-core-storage", "fuel-core-types 0.47.1", "futures", - "log", "num_enum", - "postcard", "proptest", "prost 0.14.1", "rand 0.8.5", @@ -4286,7 +4283,6 @@ dependencies = [ "tokio-stream", "tonic 0.14.2", "tracing", - "tracing-subscriber", ] [[package]] @@ -4797,12 +4793,10 @@ dependencies = [ "alloy-provider", "alloy-rpc-types-eth", "anyhow", - "async-trait", "aws-config", "aws-sdk-kms", "aws-sdk-s3", "clap", - "cynic", "flate2", "fuel-core", "fuel-core-benches", @@ -4823,7 +4817,6 @@ dependencies = [ "fuel-core-types 0.47.1", "fuel-core-upgradable-executor", "futures", - "hex", "hyper 0.14.32", "insta", "itertools 0.12.1", @@ -4838,7 +4831,6 @@ dependencies = [ "reqwest 0.12.24", "rstest", "serde_json", - "spki 0.7.3", "tempfile", "test-case", "test-helpers", diff --git a/Cargo.toml b/Cargo.toml index 09ff8ca2d26..4ddeed4aedb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,8 +77,10 @@ async-graphql-value = { version = "=7.0.15" } async-trait = "0.1" # Fuel dependencies -aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-kms = "1.37" +aws-config = { version = "1.8.11", features = ["behavior-version-latest"] } +aws-sdk-kms = "1.96" +aws-sdk-s3 = "1" +aws-smithy-mocks = "0.2.0" axum = "0.5" bytes = "1.5.0" clap = "4.4" diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index d94b550976c..8aaea4c79d2 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -48,10 +48,10 @@ fault-proving = [ [dependencies] anyhow = { workspace = true } -aws-config = { version = "1.1.7", features = [ +aws-config = { workspace = true, features = [ "behavior-version-latest", ], optional = true } -aws-sdk-kms = { version = "1.37.0", optional = true } +aws-sdk-kms = { workspace = true, optional = true } clap = { workspace = true, features = ["derive", "env", "string"] } const_format = { version = "0.2", optional = true } dirs = "4.0" diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 6a00676b342..222cd43d17f 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -16,7 +16,7 @@ default = ["rocksdb", "serde"] serde = ["dep:serde_with"] p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"] relayer = ["dep:fuel-core-relayer"] -rpc = ["fuel-core/rpc"] +rpc = ["fuel-core/rpc", "dep:fuel-core-block-aggregator-api"] shared-sequencer = ["dep:fuel-core-shared-sequencer", "dep:cosmrs"] rocksdb = ["dep:rocksdb", "dep:tempfile", "dep:num_cpus"] backup = ["rocksdb", "fuel-core-database/backup"] @@ -42,7 +42,7 @@ fault-proving = [ "fuel-core-executor/fault-proving", "fuel-core-storage/fault-proving", "fuel-core-chain-config/fault-proving", - "fuel-core-block-aggregator-api/fault-proving", + "fuel-core-block-aggregator-api?/fault-proving", "fuel-core-database/fault-proving", "fuel-core-sync?/fault-proving", "fuel-core-importer/fault-proving", @@ -59,9 +59,9 @@ async-trait = { workspace = true } axum = { workspace = true } clap = { workspace = true, features = ["derive"] } cosmrs = { version = "0.21", optional = true } -derive_more = { version = "0.99" } +derive_more = { workspace = true } enum-iterator = { workspace = true } -fuel-core-block-aggregator-api = { workspace = true } +fuel-core-block-aggregator-api = { workspace = true, optional = true } fuel-core-chain-config = { workspace = true, features = ["std"] } fuel-core-compression-service = { workspace = true } fuel-core-consensus-module = { workspace = true } diff --git a/crates/fuel-core/src/lib.rs b/crates/fuel-core/src/lib.rs index ea490b8c1b2..d464a46d073 100644 --- a/crates/fuel-core/src/lib.rs +++ b/crates/fuel-core/src/lib.rs @@ -55,8 +55,6 @@ pub mod state; // In the future this module will be a separate crate for `fuel-core-graphql-api`. mod graphql_api; -use fuel_core_block_aggregator_api as _; - pub mod fuel_core_graphql_api { pub use crate::graphql_api::*; } diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 2200818018d..dc8a81f65d1 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -1,8 +1,21 @@ +use crate::{ + combined_database::CombinedDatabaseConfig, + graphql_api::ServiceConfig as GraphQLConfig, +}; use clap::ValueEnum; -#[cfg(feature = "test-helpers")] -use std::net::{ - SocketAddr, - TcpListener, +use fuel_core_chain_config::SnapshotReader; +pub use fuel_core_consensus_module::RelayerConsensusConfig; +pub use fuel_core_importer; +pub use fuel_core_poa::Trigger; +use fuel_core_tx_status_manager::config::Config as TxStatusManagerConfig; +use fuel_core_txpool::config::Config as TxPoolConfig; +use fuel_core_types::{ + blockchain::header::StateTransitionBytecodeVersion, + fuel_types::{ + AssetId, + ChainId, + }, + signer::SignMode, }; use std::{ num::{ @@ -18,46 +31,34 @@ use strum_macros::{ EnumVariantNames, }; -use fuel_core_chain_config::SnapshotReader; -#[cfg(feature = "test-helpers")] -use fuel_core_chain_config::{ - ChainConfig, - StateConfig, -}; -pub use fuel_core_consensus_module::RelayerConsensusConfig; -pub use fuel_core_importer; +#[cfg(feature = "parallel-executor")] +use std::num::NonZeroUsize; + +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::integration::StorageMethod; +#[cfg(feature = "rpc")] +use fuel_core_types::fuel_types::BlockHeight; + +#[cfg(feature = "relayer")] +use fuel_core_relayer::Config as RelayerConfig; + #[cfg(feature = "p2p")] use fuel_core_p2p::config::{ Config as P2PConfig, NotInitialized, }; -pub use fuel_core_poa::Trigger; -#[cfg(feature = "relayer")] -use fuel_core_relayer::Config as RelayerConfig; -use fuel_core_tx_status_manager::config::Config as TxStatusManagerConfig; -use fuel_core_txpool::config::Config as TxPoolConfig; -use fuel_core_types::{ - blockchain::header::StateTransitionBytecodeVersion, - signer::SignMode, -}; -use crate::{ - combined_database::CombinedDatabaseConfig, - graphql_api::ServiceConfig as GraphQLConfig, +#[cfg(feature = "test-helpers")] +use fuel_core_chain_config::{ + ChainConfig, + StateConfig, }; - -#[cfg(feature = "rpc")] -use fuel_core_types::fuel_types::BlockHeight; -use fuel_core_types::fuel_types::{ - AssetId, - ChainId, +#[cfg(feature = "test-helpers")] +use std::net::{ + SocketAddr, + TcpListener, }; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::integration::StorageMethod; -#[cfg(feature = "parallel-executor")] -use std::num::NonZeroUsize; - #[derive(Clone, Debug)] pub struct Config { pub graphql_config: GraphQLConfig, diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 85112fdfc4d..4792b7a73ee 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -16,9 +16,7 @@ fault-proving = ["fuel-core-types/fault-proving"] anyhow = { workspace = true } async-trait = { workspace = true } aws-config = { workspace = true } -aws-sdk-s3 = "1.111.0" -aws-smithy-mocks = "0.2.0" -bytes = { workspace = true, features = ["serde"] } +aws-sdk-s3 = { workspace = true } enum-iterator = { workspace = true } flate2 = { workspace = true } fuel-core-protobuf = { workspace = true } @@ -26,9 +24,7 @@ fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } -log = "0.4.27" num_enum = { workspace = true } -postcard = { workspace = true } prost = { workspace = true, features = ["derive"] } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -42,9 +38,9 @@ tracing = { workspace = true } [dev-dependencies] aws-sdk-s3 = { version = "1.111.0", features = ["test-util"] } +aws-smithy-mocks = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } proptest = { workspace = true } tokio-stream = { workspace = true } -tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 55f8ff15216..cc846995477 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,6 +1,8 @@ use crate::result::Result; -use bytes::Bytes; -use fuel_core_types::fuel_types::BlockHeight; +use fuel_core_types::fuel_types::{ + BlockHeight, + bytes::Bytes, +}; use std::fmt::Debug; pub mod importer_and_db_source; @@ -45,8 +47,8 @@ impl BlockBytes { #[cfg(test)] pub fn arb_size(rng: &mut Rng, size: usize) -> Self { - let bytes: Bytes = (0..size).map(|_| rng.r#gen()).collect(); - Self::new(bytes) + let bytes: Vec = (0..size).map(|_| rng.r#gen::()).collect(); + Self::new(bytes.into()) } #[cfg(test)] diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 2867d64688d..6545f32f653 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -523,7 +523,7 @@ where return match res { Ok(()) => Some(TaskNextAction::Continue), Err(err) => Some(TaskNextAction::ErrorContinue(err)), - }; + } } None } @@ -599,7 +599,7 @@ where return Ok(Self { last_block_created: Instant::now(), ..self - }); + }) } } diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index 8906f742c0f..9a0591f5709 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -1670,12 +1670,12 @@ where let Input::Contract(input) = core::mem::take(input) else { return Err(ExecutorError::Other( "Input of the `Mint` transaction is not a contract".to_string(), - )); + )) }; let Output::Contract(output) = outputs[0] else { return Err(ExecutorError::Other( "The output of the `Mint` transaction is not a contract".to_string(), - )); + )) }; Ok((input, output)) } @@ -1793,7 +1793,7 @@ where ); return Err(ExecutorError::InvalidTransactionOutcome { transaction_id: tx_id, - }); + }) } } } @@ -2026,14 +2026,14 @@ where return Err(TransactionValidityError::CoinMismatch( *utxo_id, ) - .into()); + .into()) } } _ => { return Err(TransactionValidityError::CoinDoesNotExist( *utxo_id, ) - .into()); + .into()) } } } @@ -2045,7 +2045,7 @@ where return Err(TransactionValidityError::ContractDoesNotExist( contract.contract_id, ) - .into()); + .into()) } } Input::MessageCoinSigned(MessageCoinSigned { nonce, .. }) @@ -2060,21 +2060,21 @@ where *nonce, ) .into(), - ); + ) } if !message.matches_input(input).unwrap_or_default() { return Err(TransactionValidityError::MessageMismatch( *nonce, ) - .into()); + .into()) } } _ => { return Err(TransactionValidityError::MessageDoesNotExist( *nonce, ) - .into()); + .into()) } } } @@ -2133,7 +2133,7 @@ where if reverted => { // Don't spend the retryable messages if transaction is reverted - continue; + continue } Input::MessageCoinSigned(MessageCoinSigned { nonce, .. }) | Input::MessageCoinPredicate(MessageCoinPredicate { nonce, .. }) @@ -2170,7 +2170,7 @@ where for r in receipts.iter().rev() { if let Receipt::ScriptResult { gas_used, .. } = r { used_gas = *gas_used; - break; + break } } @@ -2278,7 +2278,7 @@ where } else { return Err(ExecutorError::InvalidTransactionOutcome { transaction_id: tx_id, - }); + }) }; let empty = ContractAccessesWithValues::default(); @@ -2369,7 +2369,7 @@ where } else { return Err(ExecutorError::TransactionValidity( TransactionValidityError::InvalidContractInputIndex(utxo_id), - )); + )) } } Output::Change { diff --git a/crates/services/p2p/src/peer_manager.rs b/crates/services/p2p/src/peer_manager.rs index fcdb1ab69d3..92a0c3ab9b8 100644 --- a/crates/services/p2p/src/peer_manager.rs +++ b/crates/services/p2p/src/peer_manager.rs @@ -252,7 +252,7 @@ impl PeerManager { // check if all the slots are already taken if non_reserved_peers_connected >= self.max_non_reserved_peers { // Too many peers already connected, disconnect the Peer - return true; + return true } if non_reserved_peers_connected.saturating_add(1) diff --git a/crates/services/producer/src/block_producer.rs b/crates/services/producer/src/block_producer.rs index 2505fc84d02..a7d09072839 100644 --- a/crates/services/producer/src/block_producer.rs +++ b/crates/services/producer/src/block_producer.rs @@ -145,7 +145,7 @@ where height, previous_block: latest_height, } - .into()); + .into()) } let maybe_mint_tx = transactions_source.pop(); @@ -229,7 +229,7 @@ where height, previous_block: latest_height, } - .into()); + .into()) } let component = Components { diff --git a/crates/services/txpool_v2/src/service.rs b/crates/services/txpool_v2/src/service.rs index 9db5ac74b1c..8f6af4fb3e4 100644 --- a/crates/services/txpool_v2/src/service.rs +++ b/crates/services/txpool_v2/src/service.rs @@ -581,7 +581,7 @@ where // We already synced with this peer in the past. if !tx_sync_history.insert(peer_id.clone()) { - return; + return } } diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index f23b0e2126c..5792af72134 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -44,7 +44,7 @@ anyhow = { workspace = true } aws-sdk-kms = { workspace = true, optional = true } bs58 = { version = "0.5", optional = true } -derive_more = { version = "0.99" } +derive_more = { workspace = true } ed25519 = { workspace = true, default-features = false } ed25519-dalek = { workspace = true, default-features = false } educe = { workspace = true, optional = true } @@ -62,7 +62,7 @@ tai64 = { version = "=4.0.0", features = ["serde"] } zeroize = "1.5" [dev-dependencies] -aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } +aws-config = { workspace = true, features = ["behavior-version-latest"] } fuel-core-types = { path = ".", features = ["test-helpers", "serde"] } postcard = { workspace = true } tokio = { workspace = true, features = ["macros"] } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 6e5f896ccba..c09b3892d52 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -31,14 +31,12 @@ rpc = ["fuel-core/rpc", "fuel-core-bin/rpc"] [dependencies] anyhow = { workspace = true } -async-trait = { workspace = true } -aws-config = { version = "1.1.7", features = [ +aws-config = { workspace = true, features = [ "behavior-version-latest", ], optional = true } -aws-sdk-kms = { version = "1.37.0", optional = true } -aws-sdk-s3 = "1.111.0" +aws-sdk-kms = { workspace = true, optional = true } +aws-sdk-s3 = { workspace = true } clap = { workspace = true } -cynic = { workspace = true } fuel-core = { path = "../crates/fuel-core", default-features = false, features = [ "p2p", "relayer", @@ -73,7 +71,6 @@ fuel-core-txpool = { path = "../crates/services/txpool_v2", features = [ fuel-core-types = { path = "../crates/types", features = ["test-helpers"] } fuel-core-upgradable-executor = { path = "../crates/services/upgradable-executor" } futures = { workspace = true } -hex = { workspace = true } hyper = { workspace = true, features = ["server"] } insta = { workspace = true } itertools = { workspace = true } @@ -85,7 +82,6 @@ rand = { workspace = true } reqwest = { workspace = true } rstest = "0.15" serde_json = { workspace = true } -spki = "0.7.3" tempfile = { workspace = true } test-case = { workspace = true } test-helpers = { path = "./test-helpers" } @@ -101,7 +97,7 @@ url = { workspace = true } alloy-primitives = { workspace = true } alloy-provider = { workspace = true, default-features = false, features = ["reqwest-rustls-tls"] } alloy-rpc-types-eth = { workspace = true } -aws-config = "1.8.10" +aws-config = { workspace = true } flate2 = { workspace = true } fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } pretty_assertions = "1.4" diff --git a/tests/tests/trigger_integration/interval.rs b/tests/tests/trigger_integration/interval.rs index 73a2fa94d78..ad238fa8403 100644 --- a/tests/tests/trigger_integration/interval.rs +++ b/tests/tests/trigger_integration/interval.rs @@ -160,7 +160,7 @@ async fn poa_interval_produces_nonempty_blocks_at_correct_rate() { let count_now = resp.results.len(); if count_now > count_start + rounds { - break; + break } } From cb0a65a7d94c2d9a05e56868f40d3ea23b7fef9a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 25 Nov 2025 10:49:03 -0700 Subject: [PATCH 129/146] Unify db definitions, extract rpc init logic --- benches/benches/block_target_gas.rs | 2 - crates/fuel-core/src/combined_database.rs | 85 +------- crates/fuel-core/src/database.rs | 30 +-- .../database_description/block_aggregator.rs | 28 +-- crates/fuel-core/src/service.rs | 2 - crates/fuel-core/src/service/sub_services.rs | 186 ++++++++++++------ .../src/db/storage_or_remote_db.rs | 18 +- .../block_aggregator_api/src/db/table.rs | 25 +++ 8 files changed, 176 insertions(+), 200 deletions(-) diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index 80ad7697a83..b809e208af0 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -403,8 +403,6 @@ fn service_with_many_contracts( Default::default(), #[cfg(feature = "rpc")] Default::default(), - #[cfg(feature = "rpc")] - Default::default(), ), config.clone(), ) diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index 2af72f8050a..573ee9a3a65 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -23,10 +23,7 @@ use crate::{ }; #[cfg(feature = "rpc")] -use crate::database::database_description::block_aggregator::{ - BlockAggregatorDatabaseS3, - BlockAggregatorDatabaseStorage, -}; +use crate::database::database_description::block_aggregator::BlockAggregatorDatabase; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::db::table::LatestBlock; #[cfg(feature = "test-helpers")] @@ -76,9 +73,7 @@ pub struct CombinedDatabase { gas_price: Database, compression: Database, #[cfg(feature = "rpc")] - block_aggregation_storage: Database, - #[cfg(feature = "rpc")] - block_aggregation_s3: Database, + block_aggregation_storage: Database, } impl CombinedDatabase { @@ -89,9 +84,8 @@ impl CombinedDatabase { gas_price: Database, compression: Database, #[cfg(feature = "rpc")] block_aggregation_storage: Database< - BlockAggregatorDatabaseStorage, + BlockAggregatorDatabase, >, - #[cfg(feature = "rpc")] block_aggregation_s3: Database, ) -> Self { Self { on_chain, @@ -101,8 +95,6 @@ impl CombinedDatabase { compression, #[cfg(feature = "rpc")] block_aggregation_storage, - #[cfg(feature = "rpc")] - block_aggregation_s3, } } @@ -114,9 +106,7 @@ impl CombinedDatabase { crate::state::rocks_db::RocksDb::::prune(path)?; crate::state::rocks_db::RocksDb::::prune(path)?; #[cfg(feature = "rpc")] - crate::state::rocks_db::RocksDb::::prune(path)?; - #[cfg(feature = "rpc")] - crate::state::rocks_db::RocksDb::::prune(path)?; + crate::state::rocks_db::RocksDb::::prune(path)?; Ok(()) } @@ -161,17 +151,11 @@ impl CombinedDatabase { .trace_err("Failed to backup compression database")?; #[cfg(feature = "rpc")] - crate::state::rocks_db::RocksDb::::backup( + crate::state::rocks_db::RocksDb::::backup( db_dir, temp_dir, ) .trace_err("Failed to backup block aggregation storage database")?; - #[cfg(feature = "rpc")] - crate::state::rocks_db::RocksDb::::backup( - db_dir, temp_dir, - ) - .trace_err("Failed to backup block aggregation s3 database")?; - Ok(()) } @@ -227,19 +211,12 @@ impl CombinedDatabase { .trace_err("Failed to restore compression database")?; #[cfg(feature = "rpc")] - crate::state::rocks_db::RocksDb::::restore( + crate::state::rocks_db::RocksDb::::restore( temp_restore_dir, backup_dir, ) .trace_err("Failed to restore block aggregation storage database")?; - #[cfg(feature = "rpc")] - crate::state::rocks_db::RocksDb::::restore( - temp_restore_dir, - backup_dir, - ) - .trace_err("Failed to restore block aggregation s3 database")?; - Ok(()) } @@ -306,15 +283,6 @@ impl CombinedDatabase { ..database_config }, )?; - #[cfg(feature = "rpc")] - let block_aggregation_s3 = Database::open_rocksdb( - path, - state_rewind_policy, - DatabaseConfig { - max_fds, - ..database_config - }, - )?; Ok(Self { on_chain, @@ -324,8 +292,6 @@ impl CombinedDatabase { compression, #[cfg(feature = "rpc")] block_aggregation_storage, - #[cfg(feature = "rpc")] - block_aggregation_s3, }) } @@ -343,8 +309,6 @@ impl CombinedDatabase { compression: Default::default(), #[cfg(feature = "rpc")] block_aggregation_storage: Default::default(), - #[cfg(feature = "rpc")] - block_aggregation_s3: Default::default(), }) } @@ -392,8 +356,6 @@ impl CombinedDatabase { Database::in_memory(), #[cfg(feature = "rpc")] Database::in_memory(), - #[cfg(feature = "rpc")] - Database::in_memory(), ) } @@ -405,8 +367,6 @@ impl CombinedDatabase { self.compression.check_version()?; #[cfg(feature = "rpc")] self.block_aggregation_storage.check_version()?; - #[cfg(feature = "rpc")] - self.block_aggregation_s3.check_version()?; Ok(()) } @@ -419,28 +379,17 @@ impl CombinedDatabase { } #[cfg(feature = "rpc")] - pub fn block_aggregation_storage(&self) -> &Database { + pub fn block_aggregation_storage(&self) -> &Database { &self.block_aggregation_storage } #[cfg(feature = "rpc")] pub fn block_aggregation_storage_mut( &mut self, - ) -> &mut Database { + ) -> &mut Database { &mut self.block_aggregation_storage } - #[cfg(feature = "rpc")] - pub fn block_aggregation_s3(&self) -> &Database { - &self.block_aggregation_s3 - } - #[cfg(feature = "rpc")] - pub fn block_aggregation_s3_mut( - &mut self, - ) -> &mut Database { - &mut self.block_aggregation_s3 - } - #[cfg(any(feature = "test-helpers", test))] pub fn on_chain_mut(&mut self) -> &mut Database { &mut self.on_chain @@ -560,26 +509,10 @@ impl CombinedDatabase { target_block_height, ); - let block_aggregation_s3_height = self - .block_aggregation_s3() - .storage_as_ref::() - .get(&()) - .map_err(|e: StorageError| anyhow!(e))? - .map(|b| b.into_owned()); - let block_aggregation_s3_rolled_back = is_equal_or_less_than_or_none( - block_aggregation_s3_height, - target_block_height, - ); - if !block_aggregation_storage_rolled_back { self.block_aggregation_storage_mut() .rollback_to(target_block_height)?; } - - if !block_aggregation_s3_rolled_back { - self.block_aggregation_s3_mut() - .rollback_to(target_block_height)?; - } } if on_chain_height == target_block_height @@ -747,8 +680,6 @@ impl CombinedDatabase { self.compression.shutdown(); #[cfg(feature = "rpc")] self.block_aggregation_storage.shutdown(); - #[cfg(feature = "rpc")] - self.block_aggregation_s3.shutdown(); } } diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 1f427ab3669..eededc0282e 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -70,10 +70,7 @@ pub type Result = core::result::Result; // TODO: Extract `Database` and all belongs into `fuel-core-database`. #[cfg(feature = "rpc")] -use crate::database::database_description::block_aggregator::{ - BlockAggregatorDatabaseS3, - BlockAggregatorDatabaseStorage, -}; +use crate::database::database_description::block_aggregator::BlockAggregatorDatabase; #[cfg(feature = "rocksdb")] use crate::state::{ historical_rocksdb::{ @@ -454,28 +451,7 @@ impl Modifiable for Database { } #[cfg(feature = "rpc")] -impl Modifiable for Database { - fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { - // Does not need to be monotonically increasing because - // storage values are modified in parallel from different heights - commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) - } -} - -#[cfg(feature = "rpc")] -impl Database { - pub fn rollback_to(&mut self, block_height: BlockHeight) -> StorageResult<()> { - let mut tx = self.write_transaction(); - tx.storage_as_mut::() - .insert(&(), &block_height) - .map_err(|e: StorageError| anyhow!(e))?; - tx.commit().map_err(|e: StorageError| anyhow!(e))?; - Ok(()) - } -} - -#[cfg(feature = "rpc")] -impl Modifiable for Database { +impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { // Does not need to be monotonically increasing because // storage values are modified in parallel from different heights @@ -484,7 +460,7 @@ impl Modifiable for Database { } #[cfg(feature = "rpc")] -impl Database { +impl Database { pub fn rollback_to(&mut self, block_height: BlockHeight) -> StorageResult<()> { let mut tx = self.write_transaction(); tx.storage_as_mut::() diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs index 45bea30ea27..2357e3d5cf9 100644 --- a/crates/fuel-core/src/database/database_description/block_aggregator.rs +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -3,9 +3,9 @@ use fuel_core_block_aggregator_api::db::table::Column; use fuel_core_types::fuel_types::BlockHeight; #[derive(Clone, Copy, Debug)] -pub struct BlockAggregatorDatabaseStorage; +pub struct BlockAggregatorDatabase; -impl DatabaseDescription for BlockAggregatorDatabaseStorage { +impl DatabaseDescription for BlockAggregatorDatabase { type Column = Column; type Height = BlockHeight; @@ -25,27 +25,3 @@ impl DatabaseDescription for BlockAggregatorDatabaseStorage { None } } - -#[derive(Clone, Copy, Debug)] -pub struct BlockAggregatorDatabaseS3; - -impl DatabaseDescription for BlockAggregatorDatabaseS3 { - type Column = Column; - type Height = BlockHeight; - - fn version() -> u32 { - 0 - } - - fn name() -> String { - "block_aggregator_s3".to_string() - } - - fn metadata_column() -> Self::Column { - Column::Metadata - } - - fn prefix(_column: &Self::Column) -> Option { - None - } -} diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 11bea62351a..9a13e00d93c 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -195,8 +195,6 @@ impl FuelService { Default::default(), #[cfg(feature = "rpc")] Default::default(), - #[cfg(feature = "rpc")] - Default::default(), ); Self::from_combined_database(combined_database, config).await } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index d7aa20ef95a..a63ed23fa8a 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -26,7 +26,13 @@ use crate::service::adapters::consensus_module::poa::pre_confirmation_signature: use crate::service::adapters::rpc::ReceiptSource; use crate::{ combined_database::CombinedDatabase, - database::Database, + database::{ + Database, + database_description::{ + block_aggregator::BlockAggregatorDatabase, + on_chain::OnChain, + }, + }, fuel_core_graphql_api::{ self, Config as GraphQLConfig, @@ -63,13 +69,23 @@ use crate::{ }, }; #[cfg(feature = "rpc")] -use anyhow::anyhow; +use anyhow::{ + anyhow, + bail, +}; +use fuel_core_block_aggregator_api::{ + BlockAggregator, + api::protobuf_adapter::ProtobufAPI, + blocks::importer_and_db_source::ImporterAndDbSource, +}; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, db::storage_or_remote_db::StorageOrRemoteDB, db::table::LatestBlock, + db::table::Mode, integration::StorageMethod, + protobuf_types::Block as ProtoBlock, result::Error, }; use fuel_core_compression_service::service::new_service as new_compression_service; @@ -84,19 +100,27 @@ use fuel_core_gas_price_service::v1::{ uninitialized_task::new_gas_price_service_v1, }; use fuel_core_poa::Trigger; +use fuel_core_services::ServiceRunner; use fuel_core_storage::{ self, - transactional::AtomicView, }; #[cfg(feature = "rpc")] use fuel_core_storage::{ Error as StorageError, + StorageAsMut, StorageAsRef, + transactional::{ + AtomicView, + WriteTransaction, + }, }; #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; use fuel_core_types::signer::SignMode; -use std::sync::Arc; +use std::{ + borrow::Cow, + sync::Arc, +}; use tokio::sync::Mutex; pub type PoAService = fuel_core_poa::Service< @@ -470,59 +494,7 @@ pub fn init_sub_services( }; #[cfg(feature = "rpc")] - let block_aggregator_rpc = { - let block_aggregator_config = config.rpc_config.clone(); - let sync_from = block_aggregator_config.sync_from.unwrap_or_default(); - let sync_from_height; - let receipts = ReceiptSource::new(database.off_chain().clone()); - let db_adapter = match &block_aggregator_config.storage_method { - StorageMethod::Local => { - let db = database.block_aggregation_storage().clone(); - let maybe_sync_from_height = db - .storage_as_ref::() - .get(&()) - .map_err(|e: StorageError| Error::DB(anyhow!(e)))? - .map(|c| *c) - .and_then(|h| h.succ()); - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - StorageOrRemoteDB::new_storage(db, sync_from) - } - StorageMethod::S3 { - bucket, - endpoint_url, - requester_pays, - } => { - let db = database.block_aggregation_s3().clone(); - let maybe_sync_from_height = db - .storage_as_ref::() - .get(&()) - .map_err(|e: StorageError| Error::DB(anyhow!(e)))? - .map(|c| *c) - .and_then(|h| h.succ()); - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - - StorageOrRemoteDB::new_s3( - db, - bucket, - *requester_pays, - endpoint_url.clone(), - sync_from, - ) - } - }; - let serializer = SerializerAdapter; - let onchain_db = database.on_chain().clone(); - let importer = importer_adapter.events_shared_result(); - fuel_core_block_aggregator_api::integration::new_service( - &block_aggregator_config, - db_adapter, - serializer, - onchain_db, - receipts, - importer, - sync_from_height, - )? - }; + let block_aggregator_rpc = init_rpc_server(config, &database, &importer_adapter)?; let graph_ql = fuel_core_graphql_api::api_service::new_service( *genesis_block.header().height(), @@ -602,3 +574,103 @@ pub fn init_sub_services( Ok((services, shared)) } + +#[allow(clippy::type_complexity)] +#[cfg(feature = "rpc")] +fn init_rpc_server( + config: &Config, + database: &CombinedDatabase, + importer_adapter: &BlockImporterAdapter, +) -> anyhow::Result< + ServiceRunner< + BlockAggregator< + ProtobufAPI, + StorageOrRemoteDB>, + ImporterAndDbSource, ReceiptSource>, + ProtoBlock, + >, + >, +> { + let block_aggregator_config = config.rpc_config.clone(); + let sync_from = block_aggregator_config.sync_from.unwrap_or_default(); + let sync_from_height; + let receipts = ReceiptSource::new(database.off_chain().clone()); + let db_adapter = match &block_aggregator_config.storage_method { + StorageMethod::Local => { + let mut db = database.block_aggregation_storage().clone(); + let mode = db.storage_as_ref::().get(&())?; + match mode.clone().map(Cow::into_owned) { + Some(Mode::S3) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + Some(Mode::Local) => { + // good, it's in the correct mode + } + None => { + let mut tx = db.write_transaction(); + tx.storage_as_mut::().insert(&(), &Mode::Local)?; + tx.commit()?; + } + } + let maybe_sync_from_height = db + .storage_as_ref::() + .get(&()) + .map_err(|e: StorageError| Error::DB(anyhow!(e)))? + .map(|c| *c) + .and_then(|h| h.succ()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + StorageOrRemoteDB::new_storage(db, sync_from) + } + StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + } => { + let mut db = database.block_aggregation_storage().clone(); + let mode = db.storage_as_ref::().get(&())?; + match mode.clone().map(Cow::into_owned) { + Some(Mode::S3) => { + // good, it's in the correct mode + } + Some(Mode::Local) => { + bail!( + "Database is configured in Local mode, but S3 storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + None => { + let mut tx = db.write_transaction(); + tx.storage_as_mut::().insert(&(), &Mode::S3)?; + tx.commit()?; + } + } + let maybe_sync_from_height = db + .storage_as_ref::() + .get(&())? + .map(|c| *c) + .and_then(|h| h.succ()); + sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + + StorageOrRemoteDB::new_s3( + db, + bucket, + *requester_pays, + endpoint_url.clone(), + sync_from, + ) + } + }; + let serializer = SerializerAdapter; + let onchain_db = database.on_chain().clone(); + let importer = importer_adapter.events_shared_result(); + fuel_core_block_aggregator_api::integration::new_service( + &block_aggregator_config, + db_adapter, + serializer, + onchain_db, + receipts, + importer, + sync_from_height, + ) +} diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index 1236ccda961..f2ac805657a 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -29,19 +29,19 @@ use fuel_core_types::fuel_types::BlockHeight; /// A union of a storage and a remote cache for the block aggregator. This allows both to be /// supported in production depending on the configuration -pub enum StorageOrRemoteDB { - Remote(RemoteCache), +pub enum StorageOrRemoteDB { + Remote(RemoteCache), Storage(StorageDB), } -impl StorageOrRemoteDB { +impl StorageOrRemoteDB { pub fn new_storage(storage: S, sync_from: BlockHeight) -> Self { StorageOrRemoteDB::Storage(StorageDB::new(storage, sync_from)) } #[allow(clippy::too_many_arguments)] pub fn new_s3( - storage: R, + storage: S, aws_bucket: &str, requester_pays: bool, aws_endpoint_url: Option, @@ -59,7 +59,7 @@ impl StorageOrRemoteDB { } } -impl BlockAggregatorDB for StorageOrRemoteDB +impl BlockAggregatorDB for StorageOrRemoteDB where // Storage Constraints S: Modifiable + std::fmt::Debug, @@ -72,10 +72,10 @@ where T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, StorageTransaction: StorageInspect, // Remote Constraints - R: Send + Sync, - R: Modifiable, - R: StorageInspect, - for<'b> StorageTransaction<&'b mut R>: + S: Send + Sync, + S: Modifiable, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: StorageMutate, { type Block = crate::protobuf_types::Block; diff --git a/crates/services/block_aggregator_api/src/db/table.rs b/crates/services/block_aggregator_api/src/db/table.rs index 215c5cecd1f..7cd897b1187 100644 --- a/crates/services/block_aggregator_api/src/db/table.rs +++ b/crates/services/block_aggregator_api/src/db/table.rs @@ -29,6 +29,7 @@ pub enum Column { Metadata = 0, Blocks = 1, LatestBlock = 2, + Mode = 3, } impl Column { @@ -84,12 +85,36 @@ impl TableWithBlueprint for LatestBlock { Column::LatestBlock } } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum Mode { + Local, + S3, +} + +impl Mappable for Mode { + type Key = Self::OwnedKey; + type OwnedKey = (); + type Value = Self::OwnedValue; + type OwnedValue = Mode; +} + +impl TableWithBlueprint for Mode { + type Blueprint = Plain; + type Column = Column; + fn column() -> Self::Column { + Column::Mode + } +} use fuel_core_storage::codec::{ postcard::Postcard, primitive::Primitive, }; use prost::Message; +use serde::{ + Deserialize, + Serialize, +}; pub struct ProtoBufCodec; From 9c6eb73bc46c046d974b1c997bd8d4a300e62a47 Mon Sep 17 00:00:00 2001 From: green Date: Tue, 25 Nov 2025 19:25:48 +0000 Subject: [PATCH 130/146] Missed deps --- crates/services/block_aggregator_api/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 4792b7a73ee..294c41e218f 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -37,7 +37,7 @@ tonic = { workspace = true } tracing = { workspace = true } [dev-dependencies] -aws-sdk-s3 = { version = "1.111.0", features = ["test-util"] } +aws-sdk-s3 = { workspace = true, features = ["test-util"] } aws-smithy-mocks = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } From c2b392961104b35bedea1728ec3fbe6ff5c82872 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 25 Nov 2025 14:42:46 -0700 Subject: [PATCH 131/146] remove protoc --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a2f4d399a3..9265adefe1a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -228,8 +228,6 @@ jobs: uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} - - name: Install Protoc - uses: arduino/setup-protoc@v3 - name: Run RPC Integration Tests run: cargo test --package fuel-core-tests --test integration_tests rpc --features rpc -- --test-threads=1 @@ -272,7 +270,6 @@ jobs: - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} - - uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: From d8fcda7a4e35395f97ac37190915bfdf7ebd1e66 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 25 Nov 2025 15:16:38 -0700 Subject: [PATCH 132/146] wip PR changes --- crates/fuel-core/src/service.rs | 2 +- crates/fuel-core/src/service/sub_services.rs | 11 ++++-- .../src/api/protobuf_adapter.rs | 39 ++++++++++++++++--- .../src/blocks/importer_and_db_source.rs | 9 +++++ .../importer_and_db_source/sync_service.rs | 5 ++- .../src/db/remote_cache.rs | 26 ++++--------- 6 files changed, 62 insertions(+), 30 deletions(-) diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 9a13e00d93c..34326e4abda 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -551,7 +551,7 @@ mod tests { service.start_and_await().await.unwrap(); sleep(Duration::from_secs(1)); for service in service.sub_services() { - assert_eq!(service.state(), State::Started,); + assert_eq!(service.state(), State::Started); } if i < service.sub_services().len() { diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index a63ed23fa8a..881e4faea18 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -116,7 +116,10 @@ use fuel_core_storage::{ }; #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; -use fuel_core_types::signer::SignMode; +use fuel_core_types::{ + fuel_types::BlockHeight, + signer::SignMode, +}; use std::{ borrow::Cow, sync::Arc, @@ -494,7 +497,8 @@ pub fn init_sub_services( }; #[cfg(feature = "rpc")] - let block_aggregator_rpc = init_rpc_server(config, &database, &importer_adapter)?; + let block_aggregator_rpc = + init_rpc_server(config, &database, &importer_adapter, genesis_block_height)?; let graph_ql = fuel_core_graphql_api::api_service::new_service( *genesis_block.header().height(), @@ -581,6 +585,7 @@ fn init_rpc_server( config: &Config, database: &CombinedDatabase, importer_adapter: &BlockImporterAdapter, + genesis_height: BlockHeight, ) -> anyhow::Result< ServiceRunner< BlockAggregator< @@ -592,7 +597,7 @@ fn init_rpc_server( >, > { let block_aggregator_config = config.rpc_config.clone(); - let sync_from = block_aggregator_config.sync_from.unwrap_or_default(); + let sync_from = block_aggregator_config.sync_from.unwrap_or(genesis_height); let sync_from_height; let receipts = ReceiptSource::new(database.off_chain().clone()); let db_adapter = match &block_aggregator_config.storage_method { diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 0c0df12ffbe..37aaf6f2697 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -37,10 +37,14 @@ use fuel_core_services::{ ServiceRunner, StateWatcher, TaskNextAction, + try_or_stop, }; use futures::StreamExt; use tokio_stream::wrappers::ReceiverStream; -use tonic::Status; +use tonic::{ + Status, + transport::server::Router, +}; #[cfg(test)] mod tests; @@ -85,7 +89,6 @@ impl BlockAggregator for Server { ))), } } - // type GetBlockRangeStream = ReceiverStream>; type GetBlockRangeStream = BoxStream>; async fn get_block_range( @@ -194,6 +197,7 @@ pub struct ServerTask { addr: std::net::SocketAddr, query_sender: tokio::sync::mpsc::Sender>, + router: Option, } #[async_trait::async_trait] impl RunnableService for ServerTask { @@ -205,19 +209,38 @@ impl RunnableService for ServerTask { fn shared_data(&self) -> Self::SharedData {} async fn into_task( - self, + mut self, _state_watcher: &StateWatcher, _params: Self::TaskParams, ) -> anyhow::Result { + self.start_router()?; Ok(self) } } -impl RunnableTask for ServerTask { - async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { +impl ServerTask { + fn start_router(&mut self) -> anyhow::Result<()> { let server = Server::new(self.query_sender.clone()); let router = tonic::transport::Server::builder() .add_service(ProtoBlockAggregatorServer::new(server)); + self.router = Some(router); + Ok(()) + } + + fn get_router(&mut self) -> anyhow::Result { + self.router + .take() + .ok_or_else(|| anyhow!("Router has not been initialized yet")) + } +} + +impl RunnableTask for ServerTask { + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + let router_res = self.get_router(); + let router = try_or_stop!(router_res, |e| tracing::error!( + "Failed to get router, has not been started: {:?}", + e + )); tokio::select! { res = router.serve(self.addr) => { if let Err(e) = res { @@ -245,7 +268,11 @@ impl ProtobufAPI { BlockAggregatorQuery, >(100); let addr = url.parse().unwrap(); - let _server_service = ServiceRunner::new(ServerTask { addr, query_sender }); + let _server_service = ServiceRunner::new(ServerTask { + addr, + query_sender, + router: None, + }); _server_service.start().map_err(Error::Api)?; let api = Self { _server_service, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 872bbb74a43..d2dfce06c50 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -125,6 +125,12 @@ where block_res = self.receiver.recv() => { block_res.ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) } + _ = self.importer_task.await_stop() => { + Err(Error::BlockSource(anyhow!("Importer task stopped unexpectedly"))) + } + _ = self.sync_task.await_stop() => { + Err(Error::BlockSource(anyhow!("Sync task stopped unexpectedly"))) + } importer_error = self.importer_task.await_stop() => { Err(Error::BlockSource(anyhow!("Importer task stopped unexpectedly: {:?}", importer_error))) } @@ -135,6 +141,9 @@ where } async fn drain(&mut self) -> Result<()> { + self.importer_task.stop(); + self.sync_task.stop(); + self.receiver.close(); Ok(()) } } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 15287180a6a..ef275bbc85d 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -145,7 +145,8 @@ where "reached stop height {}, putting task into hibernation", self.stop_height ); - futures::future::pending().await + let _ = _watcher.while_started().await; + return TaskNextAction::Stop } let next_height = self.next_height; let res = self.get_block_and_receipts(&next_height).await; @@ -166,7 +167,7 @@ where self.next_height = BlockHeight::from((*next_height).saturating_add(1)); TaskNextAction::Continue } else { - tracing::warn!("no block found at height {:?}, retrying", next_height); + tracing::error!("no block found at height {:?}, retrying", next_height); TaskNextAction::Stop } } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index db3a385a008..e9f84722a82 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -136,32 +136,23 @@ where .key(&key) .body(body) .content_encoding("gzip") - .content_type("application/octet-stream"); + .content_type("application/grpc-web"); let _ = req.send().await.map_err(Error::db_error)?; match block_event { BlockSourceEvent::NewBlock(new_height, _) => { tracing::debug!("New block: {:?}", new_height); - tracing::info!("New block: {:?}", new_height); self.highest_new_height = Some(new_height); if self.synced { - tracing::info!("Updating latest block to {:?}", new_height); + tracing::debug!("Updating latest block to {:?}", new_height); let mut tx = self.local_persisted.write_transaction(); tx.storage_as_mut::() .insert(&(), &new_height) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; - } else if new_height == self.sync_from { - tracing::info!("Updating latest block to {:?}", new_height); - self.synced = true; - self.highest_new_height = Some(new_height); - self.orphaned_new_height = None; - let mut tx = self.local_persisted.write_transaction(); - tx.storage_as_mut::() - .insert(&(), &new_height) - .map_err(|e| Error::DB(anyhow!(e)))?; - tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; - } else if self.height_is_next_height(new_height)? { - tracing::info!("Updating latest block to {:?}", new_height); + } else if new_height == self.sync_from + || self.height_is_next_height(new_height)? + { + tracing::debug!("Updating latest block to {:?}", new_height); self.synced = true; self.highest_new_height = Some(new_height); self.orphaned_new_height = None; @@ -177,15 +168,14 @@ where } BlockSourceEvent::OldBlock(height, _) => { tracing::debug!("Old block: {:?}", height); - tracing::info!("Old block: {:?}", height); let mut tx = self.local_persisted.write_transaction(); let latest_height = if height.succ() == self.orphaned_new_height { - tracing::info!("Marking block as synced: {:?}", height); + tracing::debug!("Marking block as synced: {:?}", height); self.orphaned_new_height = None; self.synced = true; self.highest_new_height.unwrap_or(height) } else { - tracing::info!("Updating latest block to {:?}", height); + tracing::debug!("Updating latest block to {:?}", height); height }; tx.storage_as_mut::() From 1bdcb053a81d8b8fcae9257be04c728cf8617f21 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 25 Nov 2025 16:14:57 -0700 Subject: [PATCH 133/146] More PR changes --- crates/fuel-core/src/combined_database.rs | 2 +- crates/fuel-core/src/database.rs | 19 ++++-- crates/fuel-core/src/service/sub_services.rs | 64 +++++-------------- .../src/db/remote_cache.rs | 16 +++-- .../src/db/remote_cache/tests.rs | 7 +- .../block_aggregator_api/src/db/storage_db.rs | 24 +++---- .../src/db/storage_db/tests.rs | 7 +- .../block_aggregator_api/src/db/table.rs | 44 ++++++------- 8 files changed, 82 insertions(+), 101 deletions(-) diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index 573ee9a3a65..633572f181c 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -503,7 +503,7 @@ impl CombinedDatabase { .storage_as_ref::() .get(&()) .map_err(|e: StorageError| anyhow!(e))? - .map(|b| b.into_owned()); + .map(|b| b.height()); let block_aggregation_storage_rolled_back = is_equal_or_less_than_or_none( block_aggregation_storage_height, target_block_height, diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index eededc0282e..a376f914310 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -35,6 +35,7 @@ use fuel_core_storage::{ Mappable, Result as StorageResult, StorageAsMut, + StorageAsRef, StorageInspect, StorageMutate, iter::{ @@ -96,6 +97,7 @@ use crate::{ use anyhow::anyhow; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::db::table::LatestBlock; +use fuel_core_block_aggregator_api::db::table::Mode; #[cfg(feature = "rpc")] use fuel_core_storage::transactional::WriteTransaction; #[cfg(feature = "rocksdb")] @@ -463,10 +465,19 @@ impl Modifiable for Database { impl Database { pub fn rollback_to(&mut self, block_height: BlockHeight) -> StorageResult<()> { let mut tx = self.write_transaction(); - tx.storage_as_mut::() - .insert(&(), &block_height) - .map_err(|e: StorageError| anyhow!(e))?; - tx.commit().map_err(|e: StorageError| anyhow!(e))?; + let mode = tx + .storage_as_ref::() + .get(&())? + .map(|m| m.into_owned()); + let new = match mode { + None => None, + Some(Mode::Local(_)) => Some(Mode::new_local(block_height)), + Some(Mode::S3(_)) => Some(Mode::new_s3(block_height)), + }; + if let Some(new) = new { + tx.storage_as_mut::().insert(&(), &new)?; + tx.commit().map_err(|e: StorageError| anyhow!(e))?; + } Ok(()) } } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 881e4faea18..07c2f72c051 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -69,10 +69,7 @@ use crate::{ }, }; #[cfg(feature = "rpc")] -use anyhow::{ - anyhow, - bail, -}; +use anyhow::bail; use fuel_core_block_aggregator_api::{ BlockAggregator, api::protobuf_adapter::ProtobufAPI, @@ -86,7 +83,6 @@ use fuel_core_block_aggregator_api::{ db::table::Mode, integration::StorageMethod, protobuf_types::Block as ProtoBlock, - result::Error, }; use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ @@ -106,13 +102,8 @@ use fuel_core_storage::{ }; #[cfg(feature = "rpc")] use fuel_core_storage::{ - Error as StorageError, - StorageAsMut, StorageAsRef, - transactional::{ - AtomicView, - WriteTransaction, - }, + transactional::AtomicView, }; #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; @@ -602,29 +593,16 @@ fn init_rpc_server( let receipts = ReceiptSource::new(database.off_chain().clone()); let db_adapter = match &block_aggregator_config.storage_method { StorageMethod::Local => { - let mut db = database.block_aggregation_storage().clone(); - let mode = db.storage_as_ref::().get(&())?; - match mode.clone().map(Cow::into_owned) { - Some(Mode::S3) => { + let db = database.block_aggregation_storage().clone(); + let mode = db.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode.clone().map(Cow::into_owned) { + Some(Mode::S3(_)) => { bail!( "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" ); } - Some(Mode::Local) => { - // good, it's in the correct mode - } - None => { - let mut tx = db.write_transaction(); - tx.storage_as_mut::().insert(&(), &Mode::Local)?; - tx.commit()?; - } - } - let maybe_sync_from_height = db - .storage_as_ref::() - .get(&()) - .map_err(|e: StorageError| Error::DB(anyhow!(e)))? - .map(|c| *c) - .and_then(|h| h.succ()); + _ => mode.map(|m| m.height()), + }; sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); StorageOrRemoteDB::new_storage(db, sync_from) } @@ -633,28 +611,16 @@ fn init_rpc_server( endpoint_url, requester_pays, } => { - let mut db = database.block_aggregation_storage().clone(); - let mode = db.storage_as_ref::().get(&())?; - match mode.clone().map(Cow::into_owned) { - Some(Mode::S3) => { - // good, it's in the correct mode - } - Some(Mode::Local) => { + let db = database.block_aggregation_storage().clone(); + let mode = db.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode.clone().map(Cow::into_owned) { + Some(Mode::Local(_)) => { bail!( - "Database is configured in Local mode, but S3 storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" ); } - None => { - let mut tx = db.write_transaction(); - tx.storage_as_mut::().insert(&(), &Mode::S3)?; - tx.commit()?; - } - } - let maybe_sync_from_height = db - .storage_as_ref::() - .get(&())? - .map(|c| *c) - .and_then(|h| h.succ()); + _ => mode.map(|m| m.height()), + }; sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); StorageOrRemoteDB::new_s3( diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index e9f84722a82..ad1bbcf141f 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -3,7 +3,10 @@ use crate::{ blocks::BlockSourceEvent, db::{ BlockAggregatorDB, - table::LatestBlock, + table::{ + LatestBlock, + Mode, + }, }, protobuf_types::Block as ProtoBlock, result::Error, @@ -146,7 +149,7 @@ where tracing::debug!("Updating latest block to {:?}", new_height); let mut tx = self.local_persisted.write_transaction(); tx.storage_as_mut::() - .insert(&(), &new_height) + .insert(&(), &Mode::new_s3(new_height)) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } else if new_height == self.sync_from @@ -158,7 +161,7 @@ where self.orphaned_new_height = None; let mut tx = self.local_persisted.write_transaction(); tx.storage_as_mut::() - .insert(&(), &new_height) + .insert(&(), &Mode::new_s3(new_height)) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } else if self.orphaned_new_height.is_none() { @@ -179,7 +182,7 @@ where height }; tx.storage_as_mut::() - .insert(&(), &latest_height) + .insert(&(), &Mode::new_s3(latest_height)) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } @@ -219,7 +222,7 @@ where .get(&()) .map_err(|e| Error::DB(anyhow!(e)))?; - Ok(height.map(|b| b.into_owned())) + Ok(height.map(|b| b.height())) } } @@ -235,7 +238,8 @@ where .local_persisted .storage_as_ref::() .get(&()) - .map_err(|e| Error::DB(anyhow!(e)))?; + .map_err(|e| Error::DB(anyhow!(e)))? + .map(|m| m.height()); if let Some(latest_height) = maybe_latest_height { Ok(latest_height.succ() == Some(height)) } else { diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index 1e261281953..d2436e139f3 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -5,7 +5,10 @@ use crate::{ BlockSerializer, serializer_adapter::SerializerAdapter, }, - db::table::Column, + db::table::{ + Column, + Mode, + }, }; use aws_sdk_s3::operation::put_object::PutObjectOutput; use aws_smithy_mocks::{ @@ -133,7 +136,7 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let mut tx = storage.write_transaction(); let starting_height = BlockHeight::from(1u32); tx.storage_as_mut::() - .insert(&(), &starting_height) + .insert(&(), &Mode::new_s3(starting_height)) .unwrap(); tx.commit().unwrap(); let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index d3f701748c2..8f57c925eb2 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -7,6 +7,7 @@ use crate::{ Blocks, Column, LatestBlock, + Mode, }, }, protobuf_types::Block as ProtoBlock, @@ -97,23 +98,17 @@ where if self.synced { let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() - .insert(&(), &new_height) + .insert(&(), &Mode::Local(new_height)) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; - } else if new_height == self.sync_from { + } else if new_height == self.sync_from + || self.height_is_next_height(new_height)? + { let mut tx = self.storage.write_transaction(); self.synced = true; self.highest_new_height = Some(new_height); tx.storage_as_mut::() - .insert(&(), &new_height) - .map_err(|e| Error::DB(anyhow!(e)))?; - tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; - } else if self.height_is_next_height(new_height)? { - let mut tx = self.storage.write_transaction(); - self.synced = true; - self.highest_new_height = Some(new_height); - tx.storage_as_mut::() - .insert(&(), &new_height) + .insert(&(), &Mode::Local(new_height)) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } else if self.orphaned_new_height.is_none() { @@ -131,7 +126,7 @@ where }; let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() - .insert(&(), &latest_height) + .insert(&(), &Mode::Local(latest_height)) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; } @@ -159,7 +154,7 @@ where .get(&()) .map_err(|e| Error::DB(anyhow!(e)))?; - Ok(height.map(|b| b.into_owned())) + Ok(height.map(|b| b.height())) } } @@ -178,7 +173,8 @@ where .storage .storage_as_ref::() .get(&()) - .map_err(|e| Error::DB(anyhow!(e)))?; + .map_err(|e| Error::DB(anyhow!(e)))? + .map(|m| m.height()); if let Some(latest_height) = maybe_latest_height { Ok(latest_height.succ() == Some(height)) } else { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 6ed9f2c851c..0b116b4a246 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -6,7 +6,10 @@ use crate::{ BlockSerializer, serializer_adapter::SerializerAdapter, }, - db::table::Column, + db::table::{ + Column, + Mode, + }, }; use fuel_core_storage::{ StorageAsRef, @@ -120,7 +123,7 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let mut tx = db.write_transaction(); let starting_height = BlockHeight::from(1u32); tx.storage_as_mut::() - .insert(&(), &starting_height) + .insert(&(), &Mode::Local(starting_height)) .unwrap(); tx.commit().unwrap(); let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); diff --git a/crates/services/block_aggregator_api/src/db/table.rs b/crates/services/block_aggregator_api/src/db/table.rs index 7cd897b1187..1221c59c2c1 100644 --- a/crates/services/block_aggregator_api/src/db/table.rs +++ b/crates/services/block_aggregator_api/src/db/table.rs @@ -29,7 +29,6 @@ pub enum Column { Metadata = 0, Blocks = 1, LatestBlock = 2, - Mode = 3, } impl Column { @@ -71,38 +70,41 @@ impl TableWithBlueprint for Blocks { pub struct LatestBlock; -impl Mappable for LatestBlock { - type Key = Self::OwnedKey; - type OwnedKey = (); - type Value = Self::OwnedValue; - type OwnedValue = BlockHeight; +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +pub enum Mode { + Local(BlockHeight), + S3(BlockHeight), } -impl TableWithBlueprint for LatestBlock { - type Blueprint = Plain>; - type Column = Column; - fn column() -> Self::Column { - Column::LatestBlock +impl Mode { + pub fn new_s3(height: BlockHeight) -> Self { + Self::S3(height) + } + + pub fn new_local(height: BlockHeight) -> Self { + Self::Local(height) + } + + pub fn height(&self) -> BlockHeight { + match self { + Self::Local(height) => *height, + Self::S3(height) => *height, + } } -} -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum Mode { - Local, - S3, } -impl Mappable for Mode { +impl Mappable for LatestBlock { type Key = Self::OwnedKey; type OwnedKey = (); type Value = Self::OwnedValue; type OwnedValue = Mode; } -impl TableWithBlueprint for Mode { +impl TableWithBlueprint for LatestBlock { type Blueprint = Plain; type Column = Column; fn column() -> Self::Column { - Column::Mode + Column::LatestBlock } } @@ -111,10 +113,6 @@ use fuel_core_storage::codec::{ primitive::Primitive, }; use prost::Message; -use serde::{ - Deserialize, - Serialize, -}; pub struct ProtoBufCodec; From eb7f04fa8df045b0e8e771458792d0073b3004cd Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 08:01:35 -0700 Subject: [PATCH 134/146] Fix compilation for different features --- crates/fuel-core/src/database.rs | 12 ++++-- crates/fuel-core/src/service/sub_services.rs | 41 +++++++++----------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index a376f914310..8c490591e19 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -35,7 +35,6 @@ use fuel_core_storage::{ Mappable, Result as StorageResult, StorageAsMut, - StorageAsRef, StorageInspect, StorageMutate, iter::{ @@ -96,10 +95,15 @@ use crate::{ #[cfg(feature = "rpc")] use anyhow::anyhow; #[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::db::table::LatestBlock; -use fuel_core_block_aggregator_api::db::table::Mode; +use fuel_core_block_aggregator_api::db::table::{ + LatestBlock, + Mode, +}; #[cfg(feature = "rpc")] -use fuel_core_storage::transactional::WriteTransaction; +use fuel_core_storage::{ + StorageAsRef, + transactional::WriteTransaction, +}; #[cfg(feature = "rocksdb")] use std::path::Path; diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 07c2f72c051..1dfff39cca5 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -14,6 +14,8 @@ use super::{ config::DaCompressionMode, genesis::create_genesis_block, }; +#[cfg(feature = "rpc")] +use crate::database::database_description::on_chain::OnChain; #[cfg(feature = "relayer")] use crate::relayer::Config as RelayerConfig; #[cfg(feature = "p2p")] @@ -22,17 +24,9 @@ use crate::service::adapters::consensus_module::poa::pre_confirmation_signature: trigger::TimeBasedTrigger, tx_receiver::PreconfirmationsReceiver, }; -#[cfg(feature = "rpc")] -use crate::service::adapters::rpc::ReceiptSource; use crate::{ combined_database::CombinedDatabase, - database::{ - Database, - database_description::{ - block_aggregator::BlockAggregatorDatabase, - on_chain::OnChain, - }, - }, + database::Database, fuel_core_graphql_api::{ self, Config as GraphQLConfig, @@ -69,7 +63,13 @@ use crate::{ }, }; #[cfg(feature = "rpc")] +use crate::{ + database::database_description::block_aggregator::BlockAggregatorDatabase, + service::adapters::rpc::ReceiptSource, +}; +#[cfg(feature = "rpc")] use anyhow::bail; +#[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ BlockAggregator, api::protobuf_adapter::ProtobufAPI, @@ -96,25 +96,20 @@ use fuel_core_gas_price_service::v1::{ uninitialized_task::new_gas_price_service_v1, }; use fuel_core_poa::Trigger; +#[cfg(feature = "rpc")] use fuel_core_services::ServiceRunner; -use fuel_core_storage::{ - self, -}; #[cfg(feature = "rpc")] +use fuel_core_storage::StorageAsRef; use fuel_core_storage::{ - StorageAsRef, + self, transactional::AtomicView, }; #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; -use fuel_core_types::{ - fuel_types::BlockHeight, - signer::SignMode, -}; -use std::{ - borrow::Cow, - sync::Arc, -}; +#[cfg(feature = "rpc")] +use fuel_core_types::fuel_types::BlockHeight; +use fuel_core_types::signer::SignMode; +use std::sync::Arc; use tokio::sync::Mutex; pub type PoAService = fuel_core_poa::Service< @@ -595,7 +590,7 @@ fn init_rpc_server( StorageMethod::Local => { let db = database.block_aggregation_storage().clone(); let mode = db.storage_as_ref::().get(&())?; - let maybe_sync_from_height = match mode.clone().map(Cow::into_owned) { + let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { Some(Mode::S3(_)) => { bail!( "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" @@ -613,7 +608,7 @@ fn init_rpc_server( } => { let db = database.block_aggregation_storage().clone(); let mode = db.storage_as_ref::().get(&())?; - let maybe_sync_from_height = match mode.clone().map(Cow::into_owned) { + let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { Some(Mode::Local(_)) => { bail!( "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" From 760bc0e2fc3d9160bd041dc4af5ccb6a3b969d41 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 09:22:49 -0700 Subject: [PATCH 135/146] Add uninitialized task for rpc --- crates/fuel-core/src/service/sub_services.rs | 114 ++++---- .../src/db/remote_cache.rs | 38 +-- .../src/db/remote_cache/tests.rs | 23 +- .../src/db/storage_or_remote_db.rs | 22 +- .../services/block_aggregator_api/src/lib.rs | 260 ++++++++++++++---- 5 files changed, 288 insertions(+), 169 deletions(-) diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 1dfff39cca5..7c7a3cf5d85 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -68,22 +68,13 @@ use crate::{ service::adapters::rpc::ReceiptSource, }; #[cfg(feature = "rpc")] -use anyhow::bail; +use fuel_core_block_aggregator_api::blocks::importer_and_db_source::serializer_adapter::SerializerAdapter; +use fuel_core_block_aggregator_api::integration::UninitializedTask; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ - BlockAggregator, api::protobuf_adapter::ProtobufAPI, blocks::importer_and_db_source::ImporterAndDbSource, }; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::{ - blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, - db::storage_or_remote_db::StorageOrRemoteDB, - db::table::LatestBlock, - db::table::Mode, - integration::StorageMethod, - protobuf_types::Block as ProtoBlock, -}; use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, @@ -98,8 +89,6 @@ use fuel_core_gas_price_service::v1::{ use fuel_core_poa::Trigger; #[cfg(feature = "rpc")] use fuel_core_services::ServiceRunner; -#[cfg(feature = "rpc")] -use fuel_core_storage::StorageAsRef; use fuel_core_storage::{ self, transactional::AtomicView, @@ -574,69 +563,68 @@ fn init_rpc_server( genesis_height: BlockHeight, ) -> anyhow::Result< ServiceRunner< - BlockAggregator< + UninitializedTask< ProtobufAPI, - StorageOrRemoteDB>, ImporterAndDbSource, ReceiptSource>, - ProtoBlock, + Database, >, >, > { - let block_aggregator_config = config.rpc_config.clone(); - let sync_from = block_aggregator_config.sync_from.unwrap_or(genesis_height); - let sync_from_height; + // let block_aggregator_config = config.rpc_config.clone(); + // // let sync_from = block_aggregator_config.sync_from.unwrap_or(genesis_height); + // let sync_from_height; let receipts = ReceiptSource::new(database.off_chain().clone()); - let db_adapter = match &block_aggregator_config.storage_method { - StorageMethod::Local => { - let db = database.block_aggregation_storage().clone(); - let mode = db.storage_as_ref::().get(&())?; - let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { - Some(Mode::S3(_)) => { - bail!( - "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" - ); - } - _ => mode.map(|m| m.height()), - }; - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - StorageOrRemoteDB::new_storage(db, sync_from) - } - StorageMethod::S3 { - bucket, - endpoint_url, - requester_pays, - } => { - let db = database.block_aggregation_storage().clone(); - let mode = db.storage_as_ref::().get(&())?; - let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { - Some(Mode::Local(_)) => { - bail!( - "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" - ); - } - _ => mode.map(|m| m.height()), - }; - sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - - StorageOrRemoteDB::new_s3( - db, - bucket, - *requester_pays, - endpoint_url.clone(), - sync_from, - ) - } - }; + // let db_adapter = match &block_aggregator_config.storage_method { + // StorageMethod::Local => { + // let db = database.block_aggregation_storage().clone(); + // let mode = db.storage_as_ref::().get(&())?; + // let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { + // Some(Mode::S3(_)) => { + // bail!( + // "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + // ); + // } + // _ => mode.map(|m| m.height()), + // }; + // sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + // StorageOrRemoteDB::new_storage(db, sync_from) + // } + // StorageMethod::S3 { + // bucket, + // endpoint_url, + // requester_pays, + // } => { + // let db = database.block_aggregation_storage().clone(); + // let mode = db.storage_as_ref::().get(&())?; + // let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { + // Some(Mode::Local(_)) => { + // bail!( + // "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + // ); + // } + // _ => mode.map(|m| m.height()), + // }; + // sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + // + // StorageOrRemoteDB::new_s3( + // db, + // bucket, + // *requester_pays, + // endpoint_url.clone(), + // sync_from, + // ) + // } + // }; let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); fuel_core_block_aggregator_api::integration::new_service( - &block_aggregator_config, - db_adapter, + database.block_aggregation_storage().clone(), serializer, onchain_db, receipts, importer, - sync_from_height, + config.rpc_config.clone(), + genesis_height, ) } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index ad1bbcf141f..c9044b7df48 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -12,10 +12,6 @@ use crate::{ result::Error, }; use anyhow::anyhow; -use aws_config::{ - BehaviorVersion, - default_provider::credentials::DefaultCredentialsChain, -}; use aws_sdk_s3::{ self, Client, @@ -51,7 +47,7 @@ pub struct RemoteCache { aws_bucket: String, requester_pays: bool, aws_endpoint: Option, - client: Option, + client: Client, // track consistency between runs local_persisted: S, @@ -63,11 +59,11 @@ pub struct RemoteCache { impl RemoteCache { #[allow(clippy::too_many_arguments)] - pub fn new( + pub async fn new( aws_bucket: String, requester_pays: bool, aws_endpoint: Option, - client: Option, + client: Client, local_persisted: S, sync_from: BlockHeight, ) -> RemoteCache { @@ -83,31 +79,6 @@ impl RemoteCache { synced: false, } } - - async fn client(&mut self) -> crate::result::Result<&Client> { - self.init_client().await; - self.client - .as_ref() - .ok_or(Error::db_error(anyhow!("AWS S3 client is uninitialized"))) - } - - // only runs the first time - async fn init_client(&mut self) { - if self.client.is_none() { - let credentials = DefaultCredentialsChain::builder().build().await; - let sdk_config = aws_config::defaults(BehaviorVersion::latest()) - .credentials_provider(credentials) - .load() - .await; - let mut config_builder = aws_sdk_s3::config::Builder::from(&sdk_config); - if let Some(endpoint) = &self.aws_endpoint { - config_builder.set_endpoint_url(Some(endpoint.to_string())); - } - let config = config_builder.force_path_style(true).build(); - let client = aws_sdk_s3::Client::from_conf(config); - self.client = Some(client); - } - } } impl BlockAggregatorDB for RemoteCache @@ -132,8 +103,7 @@ where let zipped = gzip_bytes(&buf)?; let body = ByteStream::from(zipped); let req = self - .client() - .await? + .client .put_object() .bucket(&self.aws_bucket) .key(&key) diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index d2436e139f3..e708827277b 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -52,7 +52,7 @@ async fn store_block__happy_path() { let storage = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, Some(client), storage, sync_from); + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from).await; let block_height = BlockHeight::new(123); let block = arb_proto_block(); let block = BlockSourceEvent::OldBlock(block_height, block); @@ -71,14 +71,9 @@ async fn get_block_range__happy_path() { let aws_bucket = "test-bucket".to_string(); let storage = database(); let sync_from = BlockHeight::new(0); - let adapter = RemoteCache::new( - aws_bucket.clone(), - false, - None, - Some(client), - storage, - sync_from, - ); + let adapter = + RemoteCache::new(aws_bucket.clone(), false, None, client, storage, sync_from) + .await; let start = BlockHeight::new(999); let end = BlockHeight::new(1003); @@ -115,7 +110,7 @@ async fn get_current_height__returns_highest_continuous_block() { let storage = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, Some(client), storage, sync_from); + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from).await; let expected = BlockHeight::new(123); let block = arb_proto_block(); @@ -143,7 +138,7 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let aws_bucket = "test-bucket".to_string(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, Some(client), storage, sync_from); + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from).await; let expected = BlockHeight::new(3); let block = arb_proto_block(); @@ -166,7 +161,7 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let db = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, Some(client), db, sync_from); + RemoteCache::new(aws_bucket, false, None, client, db, sync_from).await; for height in 2..=10u32 { let height = BlockHeight::from(height); @@ -198,7 +193,7 @@ async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() let db = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, Some(client), db, sync_from); + RemoteCache::new(aws_bucket, false, None, client, db, sync_from).await; let height = BlockHeight::from(0u32); let some_block = arb_proto_block(); @@ -229,7 +224,7 @@ async fn store_block__new_block_comes_first() { let db = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, Some(client), db, sync_from); + RemoteCache::new(aws_bucket, false, None, client, db, sync_from).await; // when let height = BlockHeight::from(0u32); diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index f2ac805657a..ce0d46d3d8f 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -13,6 +13,10 @@ use crate::{ }, result::Result, }; +use aws_config::{ + BehaviorVersion, + default_provider::credentials::DefaultCredentialsChain, +}; use fuel_core_storage::{ Error as StorageError, @@ -40,21 +44,33 @@ impl StorageOrRemoteDB { } #[allow(clippy::too_many_arguments)] - pub fn new_s3( + pub async fn new_s3( storage: S, aws_bucket: &str, requester_pays: bool, aws_endpoint_url: Option, sync_from: BlockHeight, ) -> Self { + let credentials = DefaultCredentialsChain::builder().build().await; + let sdk_config = aws_config::defaults(BehaviorVersion::latest()) + .credentials_provider(credentials) + .load() + .await; + let mut config_builder = aws_sdk_s3::config::Builder::from(&sdk_config); + if let Some(endpoint) = &aws_endpoint_url { + config_builder.set_endpoint_url(Some(endpoint.to_string())); + } + let config = config_builder.force_path_style(true).build(); + let client = aws_sdk_s3::Client::from_conf(config); let remote_cache = RemoteCache::new( aws_bucket.to_string(), requester_pays, aws_endpoint_url, - None, + client, storage, sync_from, - ); + ) + .await; StorageOrRemoteDB::Remote(remote_cache) } } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index ffdeb220800..4343d76e083 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -4,14 +4,12 @@ use crate::{ db::BlockAggregatorDB, }; use fuel_core_services::{ - RunnableService, RunnableTask, StateWatcher, TaskNextAction, }; use fuel_core_types::fuel_types::BlockHeight; use protobuf_types::Block as ProtoBlock; -use std::fmt::Debug; pub mod api; pub mod blocks; @@ -33,32 +31,57 @@ pub mod integration { BlockAggregatorApi, protobuf_adapter::ProtobufAPI, }, - blocks::importer_and_db_source::{ - BlockSerializer, - ImporterAndDbSource, - sync_service::TxReceipts, + block_range_response::BlockRangeResponse, + blocks::{ + BlockSource, + importer_and_db_source::{ + BlockSerializer, + ImporterAndDbSource, + sync_service::TxReceipts, + }, + }, + db::{ + storage_or_remote_db::StorageOrRemoteDB, + table::{ + Column, + LatestBlock, + Mode, + }, }, - db::BlockAggregatorDB, protobuf_types::Block as ProtoBlock, }; + use anyhow::bail; use fuel_core_services::{ + RunnableService, ServiceRunner, + StateWatcher, stream::BoxStream, }; use fuel_core_storage::{ Error as StorageError, + StorageAsRef, StorageInspect, + StorageMutate, + kv_store::KeyValueInspect, tables::{ FuelBlocks, Transactions, }, - transactional::HistoricalView, + transactional::{ + AtomicView, + HistoricalView, + Modifiable, + StorageTransaction, + }, }; use fuel_core_types::{ fuel_types::BlockHeight, services::block_importer::SharedImportResult, }; - use std::net::SocketAddr; + use std::{ + fmt::Debug, + net::SocketAddr, + }; #[derive(Clone, Debug)] pub struct Config { @@ -78,34 +101,159 @@ pub mod integration { }, } + pub struct UninitializedTask { + api: API, + block_source: Blocks, + storage: S, + config: Config, + genesis_block_height: BlockHeight, + } + + #[async_trait::async_trait] + impl RunnableService for UninitializedTask + where + Api: BlockAggregatorApi< + Block = ProtoBlock, + BlockRangeResponse = BlockRangeResponse, + >, + Blocks: BlockSource, + // Storage Constraints + S: Modifiable + Debug, + S: KeyValueInspect, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + Debug, + StorageTransaction: + StorageInspect, + // Remote Constraints + S: Send + Sync, + S: Modifiable, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + { + const NAME: &'static str = "BlockAggregatorService"; + type SharedData = (); + type Task = BlockAggregator, Blocks, Blocks::Block>; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData {} + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + let UninitializedTask { + api, + block_source, + storage, + config, + genesis_block_height, + } = self; + let sync_from = config.sync_from.unwrap_or(genesis_block_height); + let db_adapter = match config.storage_method { + StorageMethod::Local => { + let mode = storage.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode + .clone() + .map(|c| c.into_owned()) + { + Some(Mode::S3(_)) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + _ => mode.map(|m| m.height()), + }; + let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + StorageOrRemoteDB::new_storage(storage, sync_from_height) + } + StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + } => { + let mode = storage.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode + .clone() + .map(|c| c.into_owned()) + { + Some(Mode::Local(_)) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + _ => mode.map(|m| m.height()), + }; + let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + + StorageOrRemoteDB::new_s3( + storage, + &bucket, + requester_pays, + endpoint_url.clone(), + sync_from_height, + ) + .await + } + }; + Ok(BlockAggregator { + query: api, + database: db_adapter, + block_source, + new_block_subscriptions: vec![], + }) + } + } + #[allow(clippy::type_complexity)] - pub fn new_service( - config: &Config, + pub fn new_service( db: DB, serializer: S, onchain_db: OnchainDB, receipts: Receipts, importer: BoxStream, - sync_from_height: BlockHeight, - ) -> anyhow::Result, - ProtoBlock, + config: Config, + genesis_block_height: BlockHeight, + ) -> anyhow::Result< + ServiceRunner< + UninitializedTask< + ProtobufAPI, + ImporterAndDbSource, + DB, + >, >, - >> + > where - DB: BlockAggregatorDB< - BlockRangeResponse = ::BlockRangeResponse, - Block = ProtoBlock, - >, - S: BlockSerializer + Clone + Send + Sync + 'static, + S: BlockSerializer + Clone + Send + Sync + 'static, OnchainDB: Send + Sync, OnchainDB: StorageInspect, OnchainDB: StorageInspect, OnchainDB: HistoricalView, Receipts: TxReceipts, + // Storage Constraints + DB: Modifiable + Debug, + DB: KeyValueInspect, + DB: StorageInspect, + for<'b> StorageTransaction<&'b mut DB>: + StorageMutate, + for<'b> StorageTransaction<&'b mut DB>: + StorageMutate, + DB: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + Debug, + StorageTransaction: + StorageInspect, + // Remote Constraints + DB: Send + Sync, + DB: Modifiable, + DB: StorageInspect, + for<'b> StorageTransaction<&'b mut DB>: + StorageMutate, { let addr = config.addr.to_string(); let api = ProtobufAPI::new(addr) @@ -114,6 +262,7 @@ pub mod integration { .latest_height() .and_then(BlockHeight::succ) .unwrap_or(BlockHeight::from(0)); + let sync_from_height = config.sync_from.unwrap_or(genesis_block_height); let block_source = ImporterAndDbSource::new( importer, serializer, @@ -122,13 +271,14 @@ pub mod integration { sync_from_height, db_ending_height, ); - let block_aggregator = BlockAggregator { - query: api, - database: db, + let uninitialized_task = UninitializedTask { + api, block_source, - new_block_subscriptions: Vec::new(), + storage: db, + config, + genesis_block_height, }; - let runner = ServiceRunner::new(block_aggregator); + let runner = ServiceRunner::new(uninitialized_task); Ok(runner) } } @@ -185,29 +335,29 @@ where } } -#[async_trait::async_trait] -impl RunnableService - for BlockAggregator -where - Api: - BlockAggregatorApi + Send, - DB: BlockAggregatorDB + Send, - Blocks: BlockSource, - BlockRange: Send, - ::Block: Clone + Debug + Send, -{ - const NAME: &'static str = "BlockAggregatorService"; - type SharedData = (); - type Task = Self; - type TaskParams = (); - - fn shared_data(&self) -> Self::SharedData {} - - async fn into_task( - self, - _state_watcher: &StateWatcher, - _params: Self::TaskParams, - ) -> anyhow::Result { - Ok(self) - } -} +// #[async_trait::async_trait] +// impl RunnableService +// for BlockAggregator +// where +// Api: +// BlockAggregatorApi + Send, +// DB: BlockAggregatorDB + Send, +// Blocks: BlockSource, +// BlockRange: Send, +// ::Block: Clone + Debug + Send, +// { +// const NAME: &'static str = "BlockAggregatorService"; +// type SharedData = (); +// type Task = Self; +// type TaskParams = (); +// +// fn shared_data(&self) -> Self::SharedData {} +// +// async fn into_task( +// self, +// _state_watcher: &StateWatcher, +// _params: Self::TaskParams, +// ) -> anyhow::Result { +// Ok(self) +// } +// } From 735ee5729bc494b09e07e721132bc650c2461286 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 09:23:38 -0700 Subject: [PATCH 136/146] fix featureless compilation --- crates/fuel-core/src/service/sub_services.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 7c7a3cf5d85..885e895f235 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -68,13 +68,15 @@ use crate::{ service::adapters::rpc::ReceiptSource, }; #[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::blocks::importer_and_db_source::serializer_adapter::SerializerAdapter; -use fuel_core_block_aggregator_api::integration::UninitializedTask; -#[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::{ api::protobuf_adapter::ProtobufAPI, blocks::importer_and_db_source::ImporterAndDbSource, }; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, + integration::UninitializedTask, +}; use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, From 538e319fd60eac67aadea9a608e963263db2a96d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 09:42:43 -0700 Subject: [PATCH 137/146] separate out tests for local and s3 --- .github/workflows/ci.yml | 2 +- tests/Cargo.toml | 1 + tests/tests/lib.rs | 4 + tests/tests/rpc.rs | 238 ---------------------- tests/tests/rpc_s3.rs | 427 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 433 insertions(+), 239 deletions(-) create mode 100644 tests/tests/rpc_s3.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9265adefe1a..e9e675e835f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -229,7 +229,7 @@ jobs: with: toolchain: ${{ env.RUST_VERSION }} - name: Run RPC Integration Tests - run: cargo test --package fuel-core-tests --test integration_tests rpc --features rpc -- --test-threads=1 + run: cargo test --package fuel-core-tests --test integration_tests rpc_s3 --features rpc -- --test-threads=1 publish-crates-check: runs-on: buildjet-4vcpu-ubuntu-2204 diff --git a/tests/Cargo.toml b/tests/Cargo.toml index c09b3892d52..2cf55a94f58 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -28,6 +28,7 @@ fault-proving = [ "fuel-core-benches/fault-proving", ] rpc = ["fuel-core/rpc", "fuel-core-bin/rpc"] +local_rpc = [] [dependencies] anyhow = { workspace = true } diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index 5e6b7458d9f..05aacb115bd 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -59,7 +59,11 @@ mod relayer; #[cfg(not(feature = "only-p2p"))] mod required_fuel_block_height_extension; #[cfg(feature = "rpc")] +#[cfg(feature = "local_rpc")] mod rpc; +#[cfg(feature = "rpc")] +#[cfg(not(feature = "local_rpc"))] +mod rpc_s3; #[cfg(not(feature = "only-p2p"))] mod snapshot; diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 18efca1d964..033aaf40d0f 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -40,28 +40,8 @@ use std::io::Read; use test_helpers::client_ext::ClientExt; use tokio::time::sleep; -macro_rules! require_env_var_or_skip { - ($($var:literal),+) => { - $(if std::env::var($var).is_err() { - eprintln!("Skipping test: missing {}", $var); - return; - })+ - }; -} - -pub fn get_env_vars() -> Option<(String, String, String)> { - let aws_id = std::env::var("AWS_ACCESS_KEY_ID").ok()?; - let aws_secret = std::env::var("AWS_SECRET_ACCESS_KEY").ok()?; - let aws_region = std::env::var("AWS_REGION").ok()?; - Some((aws_id, aws_secret, aws_region)) -} - #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc__literal() { - if env_vars_are_set() { - tracing::info!("Skipping test: AWS credentials are set"); - return; - } let config = Config::local_node(); let rpc_url = config.rpc_config.addr; @@ -123,88 +103,9 @@ async fn get_block_range__can_get_serialized_block_from_rpc__literal() { ); } -#[tokio::test(flavor = "multi_thread")] -async fn get_block_range__can_get_serialized_block_from_rpc__remote() { - require_env_var_or_skip!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"); - - ensure_bucket_exists().await; - clean_s3_bucket().await; - let mut config = Config::local_node(); - let endpoint_url = "http://127.0.0.1:4566".to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { - bucket: "test-bucket".to_string(), - endpoint_url: Some(endpoint_url), - requester_pays: false, - }; - let rpc_url = config.rpc_config.addr; - - let srv = FuelService::from_database(Database::default(), config.clone()) - .await - .unwrap(); - - let graphql_client = FuelClient::from(srv.bound_address); - - let tx = Transaction::default_test_tx(); - let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); - - let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) - .await - .expect("could not connect to server"); - - let expected_block = graphql_client - .full_block_by_height(1) - .await - .unwrap() - .unwrap(); - let expected_header = expected_block.header; - let expected_height = BlockHeight::new(expected_header.height.0); - - // when - let request = ProtoBlockRangeRequest { start: 1, end: 1 }; - let remote_info = if let Some(ProtoPayload::Remote(remote_info)) = rpc_client - .get_block_range(request) - .await - .unwrap() - .into_inner() - .next() - .await - .unwrap() - .unwrap() - .payload - { - remote_info - } else { - panic!("expected literal block payload"); - }; - - // then - let key = block_height_to_key(&expected_height); - let expected = ProtoRemoteBlockResponse { - location: Some(Location::S3(RemoteS3Bucket { - bucket: "test-bucket".to_string(), - key, - requester_pays: false, - endpoint: Some("http://127.0.0.1:4566".to_string()), - })), - }; - assert_eq!(expected, remote_info); - clean_s3_bucket().await; -} - #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { let mut config = Config::local_node(); - if get_env_vars().is_some() { - ensure_bucket_exists().await; - clean_s3_bucket().await; - let endpoint_url = "http://127.0.0.1:4566".to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { - bucket: "test-bucket".to_string(), - endpoint_url: Some(endpoint_url), - requester_pays: false, - }; - } let rpc_url = config.rpc_config.addr; // given @@ -233,11 +134,6 @@ async fn get_block_height__can_get_value_from_rpc() { .into_inner() .height; - // cleanup - if get_env_vars().is_some() { - clean_s3_bucket().await; - } - // then assert_eq!(expected_height, actual_height); } @@ -245,16 +141,6 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { let mut config = Config::local_node(); - if get_env_vars().is_some() { - ensure_bucket_exists().await; - clean_s3_bucket().await; - let endpoint_url = "http://127.0.0.1:4566".to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { - bucket: "test-bucket".to_string(), - endpoint_url: Some(endpoint_url), - requester_pays: false, - }; - } let rpc_url = config.rpc_config.addr; @@ -314,128 +200,4 @@ async fn new_block_subscription__can_get_expect_block() { "should have a return receipt, received: {:?}", receipts ); - - if get_env_vars().is_some() { - clean_s3_bucket().await; - } -} - -fn env_vars_are_set() -> bool { - std::env::var("AWS_ACCESS_KEY_ID").is_ok() - && std::env::var("AWS_SECRET_ACCESS_KEY").is_ok() -} - -async fn aws_client() -> Client { - let credentials = DefaultCredentialsChain::builder().build().await; - let _aws_region = - std::env::var("AWS_REGION").expect("AWS_REGION env var must be set"); - let sdk_config = aws_config::defaults(BehaviorVersion::latest()) - .credentials_provider(credentials) - .endpoint_url("http://127.0.0.1:4566") - .load() - .await; - let builder = aws_sdk_s3::config::Builder::from(&sdk_config); - let config = builder.force_path_style(true).build(); - Client::from_conf(config) -} - -async fn get_block_from_s3_bucket() -> Bytes { - let client = aws_client().await; - let bucket = "test-bucket".to_string(); - let key = block_height_to_key(&BlockHeight::new(1)); - tracing::info!("getting block from bucket: {} with key {}", bucket, key); - let req = client.get_object().bucket(&bucket).key(&key); - let obj = req.send().await.unwrap(); - let message = format!( - "should be able to get block from bucket: {} with key {}", - bucket, key - ); - obj.body.collect().await.expect(&message).into_bytes() -} - -async fn ensure_bucket_exists() { - let client = aws_client().await; - let bucket = "test-bucket"; - let req = client.create_bucket().bucket(bucket); - let expect_message = format!("should be able to create bucket: {}", bucket); - let _ = req.send().await.expect(&expect_message); -} - -async fn clean_s3_bucket() { - let client = aws_client().await; - let bucket = "test-bucket"; - let req = client.list_objects().bucket(bucket); - let objs = req.send().await.unwrap(); - for obj in objs.contents.unwrap_or_default() { - let req = client.delete_object().bucket(bucket).key(obj.key.unwrap()); - let _ = req.send().await.unwrap(); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn get_block_range__can_get_from_remote_s3_bucket() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .try_init(); - - require_env_var_or_skip!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); - ensure_bucket_exists().await; - clean_s3_bucket().await; - - // given - let mut config = Config::local_node(); - let endpoint_url = "http://127.0.0.1:4566".to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { - bucket: "test-bucket".to_string(), - endpoint_url: Some(endpoint_url), - requester_pays: false, - }; - let srv = FuelService::from_database(Database::default(), config.clone()) - .await - .unwrap(); - let graphql_client = FuelClient::from(srv.bound_address); - let tx = Transaction::default_test_tx(); - - // when - let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); - - sleep(std::time::Duration::from_secs(1)).await; - - // then - let zipped_data = get_block_from_s3_bucket().await; - let data = unzip_bytes(&zipped_data); - // can deserialize - let actual_proto: ProtoBlock = prost::Message::decode(data.as_ref()).unwrap(); - let (_, receipts) = - fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); - assert!( - matches!( - receipts[1], - Receipt::ScriptResult { - result: ScriptExecutionResult::Success, - .. - } - ), - "should have a script result receipt, received: {:?}", - receipts - ); - assert!( - matches!(receipts[0], Receipt::Return { .. }), - "should have a return receipt, received: {:?}", - receipts - ); - - // cleanup - clean_s3_bucket().await; - drop(srv); - tracing::info!( - "Successfully ran test: get_block_range__can_get_from_remote_s3_bucket" - ); -} - -fn unzip_bytes(bytes: &[u8]) -> Vec { - let mut decoder = GzDecoder::new(bytes); - let mut output = Vec::new(); - decoder.read_to_end(&mut output).unwrap(); - output } diff --git a/tests/tests/rpc_s3.rs b/tests/tests/rpc_s3.rs new file mode 100644 index 00000000000..7b00911d48e --- /dev/null +++ b/tests/tests/rpc_s3.rs @@ -0,0 +1,427 @@ +#![allow(non_snake_case)] + +use aws_config::{ + BehaviorVersion, + default_provider::credentials::DefaultCredentialsChain, +}; +use aws_sdk_s3::Client; +use flate2::read::GzDecoder; +use fuel_core::{ + database::Database, + service::{ + Config, + FuelService, + }, +}; +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf, + db::remote_cache::block_height_to_key, + integration::StorageMethod, + protobuf_types::{ + Block as ProtoBlock, + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + RemoteBlockResponse as ProtoRemoteBlockResponse, + RemoteS3Bucket, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, + remote_block_response::Location, + }, +}; +use fuel_core_client::client::FuelClient; +use fuel_core_types::{ + fuel_tx::*, + fuel_types::BlockHeight, +}; +use futures::StreamExt; +use prost::bytes::Bytes; +use std::io::Read; +use test_helpers::client_ext::ClientExt; +use tokio::time::sleep; + +const AWS_ENDPOINT_URL: &str = "http://127.0.0.1:4566"; + +macro_rules! require_env_var_or_panic { + ($($var:literal),+) => { + $(if std::env::var($var).is_err() { + panic!("missing env var: {}", $var); + })+ + }; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_serialized_block_from_rpc__literal() { + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let proto_block = if let Some(ProtoPayload::Literal(block)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + block + } else { + panic!("expected literal block payload"); + }; + + let (actual_block, receipts) = + fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); + let actual_height = actual_block.header().height(); + + // then + let expected_height = BlockHeight::new(1); + assert_eq!(&expected_height, actual_height); + + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_serialized_block_from_rpc__remote() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let mut config = Config::local_node(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(AWS_ENDPOINT_URL.to_string()), + requester_pays: false, + }; + let rpc_url = config.rpc_config.addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let expected_header = expected_block.header; + let expected_height = BlockHeight::new(expected_header.height.0); + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let remote_info = if let Some(ProtoPayload::Remote(remote_info)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + remote_info + } else { + panic!("expected literal block payload"); + }; + + // then + let key = block_height_to_key(&expected_height); + let expected = ProtoRemoteBlockResponse { + location: Some(Location::S3(RemoteS3Bucket { + bucket: "test-bucket".to_string(), + key, + requester_pays: false, + endpoint: Some(AWS_ENDPOINT_URL.to_string()), + })), + }; + assert_eq!(expected, remote_info); + + // cleanup + clean_s3_bucket().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_height__can_get_value_from_rpc() { + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + + // setup + let mut config = Config::local_node(); + ensure_bucket_exists().await; + clean_s3_bucket().await; + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let rpc_url = config.rpc_config.addr; + + // given + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + sleep(std::time::Duration::from_secs(1)).await; + let request = ProtoBlockHeightRequest {}; + let expected_height = Some(1); + let actual_height = rpc_client + .get_synced_block_height(request) + .await + .unwrap() + .into_inner() + .height; + + // then + assert_eq!(expected_height, actual_height); + + // cleanup + clean_s3_bucket().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn new_block_subscription__can_get_expect_block() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + let mut config = Config::local_node(); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + + let rpc_url = config.rpc_config.addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let request = ProtoNewBlockSubscriptionRequest {}; + let mut stream = rpc_client + .new_block_subscription(request) + .await + .unwrap() + .into_inner(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + // when + let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) + .await + .unwrap(); + let proto_block = + if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { + block + } else { + panic!("expected literal block payload"); + }; + + let (actual_block, receipts) = + fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); + let actual_height = actual_block.header().height(); + + // then + let expected_height = BlockHeight::new(1); + assert_eq!(&expected_height, actual_height); + + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); + + clean_s3_bucket().await; +} + +async fn aws_client() -> Client { + let credentials = DefaultCredentialsChain::builder().build().await; + let _aws_region = + std::env::var("AWS_REGION").expect("AWS_REGION env var must be set"); + let sdk_config = aws_config::defaults(BehaviorVersion::latest()) + .credentials_provider(credentials) + .endpoint_url(AWS_ENDPOINT_URL) + .load() + .await; + let builder = aws_sdk_s3::config::Builder::from(&sdk_config); + let config = builder.force_path_style(true).build(); + Client::from_conf(config) +} + +async fn get_block_from_s3_bucket() -> Bytes { + let client = aws_client().await; + let bucket = "test-bucket".to_string(); + let key = block_height_to_key(&BlockHeight::new(1)); + tracing::info!("getting block from bucket: {} with key {}", bucket, key); + let req = client.get_object().bucket(&bucket).key(&key); + let obj = req.send().await.unwrap(); + let message = format!( + "should be able to get block from bucket: {} with key {}", + bucket, key + ); + obj.body.collect().await.expect(&message).into_bytes() +} + +async fn ensure_bucket_exists() { + let client = aws_client().await; + let bucket = "test-bucket"; + let req = client.create_bucket().bucket(bucket); + let expect_message = format!("should be able to create bucket: {}", bucket); + let _ = req.send().await.expect(&expect_message); +} + +async fn clean_s3_bucket() { + let client = aws_client().await; + let bucket = "test-bucket"; + let req = client.list_objects().bucket(bucket); + let objs = req.send().await.unwrap(); + for obj in objs.contents.unwrap_or_default() { + let req = client.delete_object().bucket(bucket).key(obj.key.unwrap()); + let _ = req.send().await.unwrap(); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_from_remote_s3_bucket() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let mut config = Config::local_node(); + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + config.rpc_config.storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + let graphql_client = FuelClient::from(srv.bound_address); + let tx = Transaction::default_test_tx(); + + // when + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + sleep(std::time::Duration::from_secs(1)).await; + + // then + let zipped_data = get_block_from_s3_bucket().await; + let data = unzip_bytes(&zipped_data); + let actual_proto: ProtoBlock = prost::Message::decode(data.as_ref()).unwrap(); + let (_, receipts) = + fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); + + // cleanup + clean_s3_bucket().await; + drop(srv); + tracing::info!( + "Successfully ran test: get_block_range__can_get_from_remote_s3_bucket" + ); +} + +fn unzip_bytes(bytes: &[u8]) -> Vec { + let mut decoder = GzDecoder::new(bytes); + let mut output = Vec::new(); + decoder.read_to_end(&mut output).unwrap(); + output +} From 12926ae401599fabdb572a885c9a270ac47db89f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 09:51:02 -0700 Subject: [PATCH 138/146] cleanup imports --- tests/tests/rpc.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 033aaf40d0f..1ab02f018bf 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,11 +1,5 @@ #![allow(non_snake_case)] -use aws_config::{ - BehaviorVersion, - default_provider::credentials::DefaultCredentialsChain, -}; -use aws_sdk_s3::Client; -use flate2::read::GzDecoder; use fuel_core::{ database::Database, service::{ @@ -15,18 +9,12 @@ use fuel_core::{ }; use fuel_core_block_aggregator_api::{ blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf, - db::remote_cache::block_height_to_key, - integration::StorageMethod, protobuf_types::{ - Block as ProtoBlock, BlockHeightRequest as ProtoBlockHeightRequest, BlockRangeRequest as ProtoBlockRangeRequest, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - RemoteBlockResponse as ProtoRemoteBlockResponse, - RemoteS3Bucket, block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, block_response::Payload as ProtoPayload, - remote_block_response::Location, }, }; use fuel_core_client::client::FuelClient; @@ -35,9 +23,6 @@ use fuel_core_types::{ fuel_types::BlockHeight, }; use futures::StreamExt; -use prost::bytes::Bytes; -use std::io::Read; -use test_helpers::client_ext::ClientExt; use tokio::time::sleep; #[tokio::test(flavor = "multi_thread")] @@ -105,7 +90,7 @@ async fn get_block_range__can_get_serialized_block_from_rpc__literal() { #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { - let mut config = Config::local_node(); + let config = Config::local_node(); let rpc_url = config.rpc_config.addr; // given @@ -140,7 +125,7 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { - let mut config = Config::local_node(); + let config = Config::local_node(); let rpc_url = config.rpc_config.addr; From a525e53c2abf4cce0bdffc66bcbbfec69c3cb90c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 11:16:32 -0700 Subject: [PATCH 139/146] Make RPC optional --- bin/fuel-core/src/cli/run.rs | 4 +- crates/fuel-core/src/p2p_test_helpers.rs | 8 +- crates/fuel-core/src/service/config.rs | 41 ++++++--- crates/fuel-core/src/service/sub_services.rs | 64 ++++---------- tests/tests/rpc.rs | 13 ++- tests/tests/rpc_s3.rs | 91 +++----------------- 6 files changed, 71 insertions(+), 150 deletions(-) diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index c229ed0cb2f..78f473786f1 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -295,7 +295,7 @@ pub struct Command { #[clap(flatten)] #[cfg(feature = "rpc")] - pub rpc_args: rpc::RpcArgs, + pub rpc_args: Option, #[cfg_attr(feature = "p2p", clap(flatten))] #[cfg(feature = "p2p")] @@ -461,7 +461,7 @@ impl Command { }; #[cfg(feature = "rpc")] - let rpc_config = rpc_args.into_config(); + let rpc_config = rpc_args.map(|args| args.into_config()); let trigger: Trigger = poa_trigger.into(); diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index 508764d3c76..71daef2fa21 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -1,7 +1,5 @@ //! # Helpers for creating networks of nodes -#[cfg(feature = "rpc")] -use crate::service::config::free_local_addr; use crate::{ chain_config::{ CoinConfig, @@ -94,6 +92,8 @@ pub struct CustomizeConfig { max_functional_peers_connected: Option, max_discovery_peers_connected: Option, subscribe_to_transactions: Option, + #[cfg(feature = "rpc")] + rpc_config: Option, } impl CustomizeConfig { @@ -103,6 +103,8 @@ impl CustomizeConfig { max_functional_peers_connected: None, max_discovery_peers_connected: None, subscribe_to_transactions: None, + #[cfg(feature = "rpc")] + rpc_config: None, } } @@ -502,7 +504,7 @@ pub fn make_config( node_config.name = name.clone(); #[cfg(feature = "rpc")] { - node_config.rpc_config.addr = free_local_addr(); + node_config.rpc_config = config_overrides.rpc_config; } if let Some(min_gas_price) = config_overrides.min_exec_gas_price { diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index dc8a81f65d1..dbe53d5a8ae 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -34,11 +34,6 @@ use strum_macros::{ #[cfg(feature = "parallel-executor")] use std::num::NonZeroUsize; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::integration::StorageMethod; -#[cfg(feature = "rpc")] -use fuel_core_types::fuel_types::BlockHeight; - #[cfg(feature = "relayer")] use fuel_core_relayer::Config as RelayerConfig; @@ -48,11 +43,13 @@ use fuel_core_p2p::config::{ NotInitialized, }; +use fuel_core_block_aggregator_api::integration::StorageMethod; #[cfg(feature = "test-helpers")] use fuel_core_chain_config::{ ChainConfig, StateConfig, }; +use fuel_core_types::fuel_types::BlockHeight; #[cfg(feature = "test-helpers")] use std::net::{ SocketAddr, @@ -88,7 +85,7 @@ pub struct Config { pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, #[cfg(feature = "rpc")] - pub rpc_config: fuel_core_block_aggregator_api::integration::Config, + pub rpc_config: Option, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] @@ -127,6 +124,32 @@ impl Config { Self::local_node_with_state_config(StateConfig::local_testnet()) } + #[cfg(feature = "test-helpers")] + #[cfg(feature = "rpc")] + pub fn local_node_with_rpc() -> Self { + let mut config = Self::local_node_with_state_config(StateConfig::local_testnet()); + let rpc_config = fuel_core_block_aggregator_api::integration::Config { + addr: free_local_addr(), + sync_from: Some(BlockHeight::new(0)), + storage_method: StorageMethod::Local, + }; + config.rpc_config = Some(rpc_config); + config + } + + #[cfg(feature = "test-helpers")] + #[cfg(feature = "rpc")] + pub fn local_node_with_rpc_and_storage_method(storage_method: StorageMethod) -> Self { + let mut config = Self::local_node_with_state_config(StateConfig::local_testnet()); + let rpc_config = fuel_core_block_aggregator_api::integration::Config { + addr: free_local_addr(), + sync_from: Some(BlockHeight::new(0)), + storage_method, + }; + config.rpc_config = Some(rpc_config); + config + } + #[cfg(feature = "test-helpers")] pub fn local_node_with_state_config(state_config: StateConfig) -> Self { Self::local_node_with_configs(ChainConfig::local_testnet(), state_config) @@ -176,11 +199,7 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); #[cfg(feature = "rpc")] - let rpc_config = fuel_core_block_aggregator_api::integration::Config { - addr: free_local_addr(), - sync_from: Some(BlockHeight::from(0)), - storage_method: StorageMethod::Local, - }; + let rpc_config = None; Self { graphql_config: GraphQLConfig { diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 885e895f235..2fbb153bfbf 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -474,8 +474,16 @@ pub fn init_sub_services( }; #[cfg(feature = "rpc")] - let block_aggregator_rpc = - init_rpc_server(config, &database, &importer_adapter, genesis_block_height)?; + let block_aggregator_rpc = if let Some(config) = config.rpc_config.as_ref() { + Some(init_rpc_server( + config, + &database, + &importer_adapter, + genesis_block_height, + )?) + } else { + None + }; let graph_ql = fuel_core_graphql_api::api_service::new_service( *genesis_block.header().height(), @@ -542,7 +550,9 @@ pub fn init_sub_services( services.push(Box::new(graphql_worker)); services.push(Box::new(tx_status_manager)); #[cfg(feature = "rpc")] - services.push(Box::new(block_aggregator_rpc)); + if let Some(block_aggregator_rpc) = block_aggregator_rpc { + services.push(Box::new(block_aggregator_rpc)); + } if let Some(compression_service) = compression_service { services.push(Box::new(compression_service)); @@ -559,7 +569,7 @@ pub fn init_sub_services( #[allow(clippy::type_complexity)] #[cfg(feature = "rpc")] fn init_rpc_server( - config: &Config, + config: &fuel_core_block_aggregator_api::integration::Config, database: &CombinedDatabase, importer_adapter: &BlockImporterAdapter, genesis_height: BlockHeight, @@ -572,51 +582,7 @@ fn init_rpc_server( >, >, > { - // let block_aggregator_config = config.rpc_config.clone(); - // // let sync_from = block_aggregator_config.sync_from.unwrap_or(genesis_height); - // let sync_from_height; let receipts = ReceiptSource::new(database.off_chain().clone()); - // let db_adapter = match &block_aggregator_config.storage_method { - // StorageMethod::Local => { - // let db = database.block_aggregation_storage().clone(); - // let mode = db.storage_as_ref::().get(&())?; - // let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { - // Some(Mode::S3(_)) => { - // bail!( - // "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" - // ); - // } - // _ => mode.map(|m| m.height()), - // }; - // sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - // StorageOrRemoteDB::new_storage(db, sync_from) - // } - // StorageMethod::S3 { - // bucket, - // endpoint_url, - // requester_pays, - // } => { - // let db = database.block_aggregation_storage().clone(); - // let mode = db.storage_as_ref::().get(&())?; - // let maybe_sync_from_height = match mode.clone().map(|c| c.into_owned()) { - // Some(Mode::Local(_)) => { - // bail!( - // "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" - // ); - // } - // _ => mode.map(|m| m.height()), - // }; - // sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); - // - // StorageOrRemoteDB::new_s3( - // db, - // bucket, - // *requester_pays, - // endpoint_url.clone(), - // sync_from, - // ) - // } - // }; let serializer = SerializerAdapter; let onchain_db = database.on_chain().clone(); let importer = importer_adapter.events_shared_result(); @@ -626,7 +592,7 @@ fn init_rpc_server( onchain_db, receipts, importer, - config.rpc_config.clone(), + config.clone(), genesis_height, ) } diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index 1ab02f018bf..9aea72823e6 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -27,8 +27,8 @@ use tokio::time::sleep; #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc__literal() { - let config = Config::local_node(); - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc(); + let rpc_url = config.rpc_config.clone().unwrap().addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -90,8 +90,8 @@ async fn get_block_range__can_get_serialized_block_from_rpc__literal() { #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { - let config = Config::local_node(); - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc(); + let rpc_url = config.rpc_config.clone().unwrap().addr; // given let srv = FuelService::from_database(Database::default(), config.clone()) @@ -125,9 +125,8 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { - let config = Config::local_node(); - - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc(); + let rpc_url = config.rpc_config.clone().unwrap().addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await diff --git a/tests/tests/rpc_s3.rs b/tests/tests/rpc_s3.rs index 7b00911d48e..adeeb5676df 100644 --- a/tests/tests/rpc_s3.rs +++ b/tests/tests/rpc_s3.rs @@ -50,71 +50,6 @@ macro_rules! require_env_var_or_panic { }; } -#[tokio::test(flavor = "multi_thread")] -async fn get_block_range__can_get_serialized_block_from_rpc__literal() { - require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); - - let config = Config::local_node(); - let rpc_url = config.rpc_config.addr; - - let srv = FuelService::from_database(Database::default(), config.clone()) - .await - .unwrap(); - - let graphql_client = FuelClient::from(srv.bound_address); - - let tx = Transaction::default_test_tx(); - let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); - - let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) - .await - .expect("could not connect to server"); - - // when - let request = ProtoBlockRangeRequest { start: 1, end: 1 }; - let proto_block = if let Some(ProtoPayload::Literal(block)) = rpc_client - .get_block_range(request) - .await - .unwrap() - .into_inner() - .next() - .await - .unwrap() - .unwrap() - .payload - { - block - } else { - panic!("expected literal block payload"); - }; - - let (actual_block, receipts) = - fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); - let actual_height = actual_block.header().height(); - - // then - let expected_height = BlockHeight::new(1); - assert_eq!(&expected_height, actual_height); - - assert!( - matches!( - receipts[1], - Receipt::ScriptResult { - result: ScriptExecutionResult::Success, - .. - } - ), - "should have a script result receipt, received: {:?}", - receipts - ); - assert!( - matches!(receipts[0], Receipt::Return { .. }), - "should have a return receipt, received: {:?}", - receipts - ); -} - #[tokio::test(flavor = "multi_thread")] async fn get_block_range__can_get_serialized_block_from_rpc__remote() { // setup @@ -123,13 +58,14 @@ async fn get_block_range__can_get_serialized_block_from_rpc__remote() { clean_s3_bucket().await; // given - let mut config = Config::local_node(); - config.rpc_config.storage_method = StorageMethod::S3 { + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3 { bucket: "test-bucket".to_string(), - endpoint_url: Some(AWS_ENDPOINT_URL.to_string()), + endpoint_url: Some(endpoint_url), requester_pays: false, }; - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -192,16 +128,16 @@ async fn get_block_height__can_get_value_from_rpc() { require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); // setup - let mut config = Config::local_node(); ensure_bucket_exists().await; clean_s3_bucket().await; let endpoint_url = AWS_ENDPOINT_URL.to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { + let storage_method = StorageMethod::S3 { bucket: "test-bucket".to_string(), endpoint_url: Some(endpoint_url), requester_pays: false, }; - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; // given let srv = FuelService::from_database(Database::default(), config.clone()) @@ -240,19 +176,19 @@ async fn get_block_height__can_get_value_from_rpc() { async fn new_block_subscription__can_get_expect_block() { // setup require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); - let mut config = Config::local_node(); ensure_bucket_exists().await; clean_s3_bucket().await; // given let endpoint_url = AWS_ENDPOINT_URL.to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { + let storage_method = StorageMethod::S3 { bucket: "test-bucket".to_string(), endpoint_url: Some(endpoint_url), requester_pays: false, }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); - let rpc_url = config.rpc_config.addr; + let rpc_url = config.rpc_config.clone().unwrap().addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -365,18 +301,17 @@ async fn clean_s3_bucket() { async fn get_block_range__can_get_from_remote_s3_bucket() { // setup require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); - ensure_bucket_exists().await; clean_s3_bucket().await; // given - let mut config = Config::local_node(); let endpoint_url = AWS_ENDPOINT_URL.to_string(); - config.rpc_config.storage_method = StorageMethod::S3 { + let storage_method = StorageMethod::S3 { bucket: "test-bucket".to_string(), endpoint_url: Some(endpoint_url), requester_pays: false, }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); let srv = FuelService::from_database(Database::default(), config.clone()) .await .unwrap(); From 7fe073fb002c9f19b6a19f232c8032d20ecffa62 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 11:49:07 -0700 Subject: [PATCH 140/146] Add no-publish option for s3 --- bin/fuel-core/src/cli/run/rpc.rs | 19 ++ .../src/db/remote_cache.rs | 23 +- .../src/db/remote_cache/tests.rs | 25 ++- .../src/db/storage_or_remote_db.rs | 2 + .../services/block_aggregator_api/src/lib.rs | 43 ++++ tests/tests/rpc_s3.rs | 196 +++++++++++------- 6 files changed, 211 insertions(+), 97 deletions(-) diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index a367443112f..723e6b3f1ef 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -30,6 +30,14 @@ pub enum StorageMethod { #[clap(long = "requester_pays", env, default_value = "false")] requester_pays: bool, }, + S3NoPublish { + #[clap(long = "bucket", env)] + bucket: String, + #[clap(long = "endpoint_url", env)] + endpoint_url: Option, + #[clap(long = "requester_pays", env, default_value = "false")] + requester_pays: bool, + }, } impl RpcArgs { @@ -57,6 +65,17 @@ impl From for fuel_core_block_aggregator_api::integration::Storag endpoint_url, requester_pays, }, + StorageMethod::S3NoPublish { + bucket, + endpoint_url, + requester_pays, + } => { + fuel_core_block_aggregator_api::integration::StorageMethod::S3NoPublish { + bucket, + endpoint_url, + requester_pays, + } + } } } } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index c9044b7df48..26a4c64449c 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -48,6 +48,7 @@ pub struct RemoteCache { requester_pays: bool, aws_endpoint: Option, client: Client, + publishes_blocks: bool, // track consistency between runs local_persisted: S, @@ -66,12 +67,14 @@ impl RemoteCache { client: Client, local_persisted: S, sync_from: BlockHeight, + publish: bool, ) -> RemoteCache { RemoteCache { aws_bucket, requester_pays, aws_endpoint, client, + publishes_blocks: publish, local_persisted, sync_from, highest_new_height: None, @@ -102,15 +105,17 @@ where block.encode(&mut buf).map_err(Error::db_error)?; let zipped = gzip_bytes(&buf)?; let body = ByteStream::from(zipped); - let req = self - .client - .put_object() - .bucket(&self.aws_bucket) - .key(&key) - .body(body) - .content_encoding("gzip") - .content_type("application/grpc-web"); - let _ = req.send().await.map_err(Error::db_error)?; + if self.publishes_blocks { + let req = self + .client + .put_object() + .bucket(&self.aws_bucket) + .key(&key) + .body(body) + .content_encoding("gzip") + .content_type("application/grpc-web"); + let _ = req.send().await.map_err(Error::db_error)?; + } match block_event { BlockSourceEvent::NewBlock(new_height, _) => { tracing::debug!("New block: {:?}", new_height); diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs index e708827277b..ec444a2bb70 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -52,7 +52,7 @@ async fn store_block__happy_path() { let storage = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, client, storage, sync_from).await; + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from, true).await; let block_height = BlockHeight::new(123); let block = arb_proto_block(); let block = BlockSourceEvent::OldBlock(block_height, block); @@ -71,9 +71,16 @@ async fn get_block_range__happy_path() { let aws_bucket = "test-bucket".to_string(); let storage = database(); let sync_from = BlockHeight::new(0); - let adapter = - RemoteCache::new(aws_bucket.clone(), false, None, client, storage, sync_from) - .await; + let adapter = RemoteCache::new( + aws_bucket.clone(), + false, + None, + client, + storage, + sync_from, + true, + ) + .await; let start = BlockHeight::new(999); let end = BlockHeight::new(1003); @@ -110,7 +117,7 @@ async fn get_current_height__returns_highest_continuous_block() { let storage = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, client, storage, sync_from).await; + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from, true).await; let expected = BlockHeight::new(123); let block = arb_proto_block(); @@ -138,7 +145,7 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig let aws_bucket = "test-bucket".to_string(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, client, storage, sync_from).await; + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from, true).await; let expected = BlockHeight::new(3); let block = arb_proto_block(); @@ -161,7 +168,7 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { let db = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, client, db, sync_from).await; + RemoteCache::new(aws_bucket, false, None, client, db, sync_from, true).await; for height in 2..=10u32 { let height = BlockHeight::from(height); @@ -193,7 +200,7 @@ async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() let db = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, client, db, sync_from).await; + RemoteCache::new(aws_bucket, false, None, client, db, sync_from, true).await; let height = BlockHeight::from(0u32); let some_block = arb_proto_block(); @@ -224,7 +231,7 @@ async fn store_block__new_block_comes_first() { let db = database(); let sync_from = BlockHeight::new(0); let mut adapter = - RemoteCache::new(aws_bucket, false, None, client, db, sync_from).await; + RemoteCache::new(aws_bucket, false, None, client, db, sync_from, true).await; // when let height = BlockHeight::from(0u32); diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs index ce0d46d3d8f..b501e801678 100644 --- a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -50,6 +50,7 @@ impl StorageOrRemoteDB { requester_pays: bool, aws_endpoint_url: Option, sync_from: BlockHeight, + publish: bool, ) -> Self { let credentials = DefaultCredentialsChain::builder().build().await; let sdk_config = aws_config::defaults(BehaviorVersion::latest()) @@ -69,6 +70,7 @@ impl StorageOrRemoteDB { client, storage, sync_from, + publish, ) .await; StorageOrRemoteDB::Remote(remote_cache) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 4343d76e083..91d6a4be211 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -92,13 +92,21 @@ pub mod integration { #[derive(Clone, Debug, Default)] pub enum StorageMethod { + // Stores blocks in local DB #[default] Local, + // Publishes blocks to S3 bucket S3 { bucket: String, endpoint_url: Option, requester_pays: bool, }, + // Assumes another node is publishing blocks to S3 bucket, but relaying details + S3NoPublish { + bucket: String, + endpoint_url: Option, + requester_pays: bool, + }, } pub struct UninitializedTask { @@ -192,12 +200,47 @@ pub mod integration { }; let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + let publish = true; + + StorageOrRemoteDB::new_s3( + storage, + &bucket, + requester_pays, + endpoint_url.clone(), + sync_from_height, + publish, + ) + .await + } + + StorageMethod::S3NoPublish { + bucket, + endpoint_url, + requester_pays, + } => { + let mode = storage.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode + .clone() + .map(|c| c.into_owned()) + { + Some(Mode::Local(_)) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + _ => mode.map(|m| m.height()), + }; + let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + + let publish = false; + StorageOrRemoteDB::new_s3( storage, &bucket, requester_pays, endpoint_url.clone(), sync_from_height, + publish, ) .await } diff --git a/tests/tests/rpc_s3.rs b/tests/tests/rpc_s3.rs index adeeb5676df..5ffcb6e6e42 100644 --- a/tests/tests/rpc_s3.rs +++ b/tests/tests/rpc_s3.rs @@ -21,7 +21,6 @@ use fuel_core_block_aggregator_api::{ Block as ProtoBlock, BlockHeightRequest as ProtoBlockHeightRequest, BlockRangeRequest as ProtoBlockRangeRequest, - NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, RemoteBlockResponse as ProtoRemoteBlockResponse, RemoteS3Bucket, block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, @@ -172,84 +171,6 @@ async fn get_block_height__can_get_value_from_rpc() { clean_s3_bucket().await; } -#[tokio::test(flavor = "multi_thread")] -async fn new_block_subscription__can_get_expect_block() { - // setup - require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); - ensure_bucket_exists().await; - clean_s3_bucket().await; - - // given - let endpoint_url = AWS_ENDPOINT_URL.to_string(); - let storage_method = StorageMethod::S3 { - bucket: "test-bucket".to_string(), - endpoint_url: Some(endpoint_url), - requester_pays: false, - }; - let config = Config::local_node_with_rpc_and_storage_method(storage_method); - - let rpc_url = config.rpc_config.clone().unwrap().addr; - - let srv = FuelService::from_database(Database::default(), config.clone()) - .await - .unwrap(); - - let graphql_client = FuelClient::from(srv.bound_address); - - let tx = Transaction::default_test_tx(); - - let rpc_url = format!("http://{}", rpc_url); - let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) - .await - .expect("could not connect to server"); - - let request = ProtoNewBlockSubscriptionRequest {}; - let mut stream = rpc_client - .new_block_subscription(request) - .await - .unwrap() - .into_inner(); - let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); - - // when - let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) - .await - .unwrap(); - let proto_block = - if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { - block - } else { - panic!("expected literal block payload"); - }; - - let (actual_block, receipts) = - fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); - let actual_height = actual_block.header().height(); - - // then - let expected_height = BlockHeight::new(1); - assert_eq!(&expected_height, actual_height); - - assert!( - matches!( - receipts[1], - Receipt::ScriptResult { - result: ScriptExecutionResult::Success, - .. - } - ), - "should have a script result receipt, received: {:?}", - receipts - ); - assert!( - matches!(receipts[0], Receipt::Return { .. }), - "should have a return receipt, received: {:?}", - receipts - ); - - clean_s3_bucket().await; -} - async fn aws_client() -> Client { let credentials = DefaultCredentialsChain::builder().build().await; let _aws_region = @@ -278,6 +199,19 @@ async fn get_block_from_s3_bucket() -> Bytes { obj.body.collect().await.expect(&message).into_bytes() } +async fn block_found_in_s3_bucket() -> bool { + let client = aws_client().await; + let bucket = "test-bucket".to_string(); + let key = block_height_to_key(&BlockHeight::new(1)); + tracing::info!( + "checking if block is in bucket: {} with key {}", + bucket, + key + ); + let req = client.get_object().bucket(&bucket).key(&key); + req.send().await.is_ok() +} + async fn ensure_bucket_exists() { let client = aws_client().await; let bucket = "test-bucket"; @@ -360,3 +294,107 @@ fn unzip_bytes(bytes: &[u8]) -> Vec { decoder.read_to_end(&mut output).unwrap(); output } + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__no_publish__can_get_serialized_block_from_rpc__remote() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3NoPublish { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let expected_header = expected_block.header; + let expected_height = BlockHeight::new(expected_header.height.0); + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let remote_info = if let Some(ProtoPayload::Remote(remote_info)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + remote_info + } else { + panic!("expected literal block payload"); + }; + + // then + let key = block_height_to_key(&expected_height); + let expected = ProtoRemoteBlockResponse { + location: Some(Location::S3(RemoteS3Bucket { + bucket: "test-bucket".to_string(), + key, + requester_pays: false, + endpoint: Some(AWS_ENDPOINT_URL.to_string()), + })), + }; + assert_eq!(expected, remote_info); + + // cleanup + clean_s3_bucket().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__no_publish__does_not_publish_to_s3_bucket() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3NoPublish { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + let graphql_client = FuelClient::from(srv.bound_address); + let tx = Transaction::default_test_tx(); + + // when + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + sleep(std::time::Duration::from_secs(1)).await; + + // then + let found_block = block_found_in_s3_bucket().await; + assert!(!found_block); +} From 3b3fa5b7df18e460def863ecb67487779580a4cb Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 11:50:50 -0700 Subject: [PATCH 141/146] Add one more s3 test --- tests/tests/rpc_s3.rs | 51 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/tests/tests/rpc_s3.rs b/tests/tests/rpc_s3.rs index 5ffcb6e6e42..727fcf34c20 100644 --- a/tests/tests/rpc_s3.rs +++ b/tests/tests/rpc_s3.rs @@ -296,7 +296,7 @@ fn unzip_bytes(bytes: &[u8]) -> Vec { } #[tokio::test(flavor = "multi_thread")] -async fn get_block_range__no_publish__can_get_serialized_block_from_rpc__remote() { +async fn get_block_range__no_publish__can_get_block_info_from_rpc__remote() { // setup require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); ensure_bucket_exists().await; @@ -368,6 +368,55 @@ async fn get_block_range__no_publish__can_get_serialized_block_from_rpc__remote( clean_s3_bucket().await; } +#[tokio::test(flavor = "multi_thread")] +async fn get_block_height__no_publish__can_get_value_from_rpc() { + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + + // setup + ensure_bucket_exists().await; + clean_s3_bucket().await; + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3NoPublish { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; + + // given + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + sleep(std::time::Duration::from_secs(1)).await; + let request = ProtoBlockHeightRequest {}; + let expected_height = Some(1); + let actual_height = rpc_client + .get_synced_block_height(request) + .await + .unwrap() + .into_inner() + .height; + + // then + assert_eq!(expected_height, actual_height); + + // cleanup + clean_s3_bucket().await; +} + #[tokio::test(flavor = "multi_thread")] async fn get_block_range__no_publish__does_not_publish_to_s3_bucket() { // setup From 8177c07d5e515b44a43f600c913f97f4b4695462 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 11:52:09 -0700 Subject: [PATCH 142/146] Appease Clippy-sama --- crates/fuel-core/src/service/config.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index dbe53d5a8ae..3d880ede125 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -43,12 +43,14 @@ use fuel_core_p2p::config::{ NotInitialized, }; +#[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::integration::StorageMethod; #[cfg(feature = "test-helpers")] use fuel_core_chain_config::{ ChainConfig, StateConfig, }; +#[cfg(feature = "rpc")] use fuel_core_types::fuel_types::BlockHeight; #[cfg(feature = "test-helpers")] use std::net::{ From d05ef2977965b932e3a71bfeec5456ad4b552447 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 12:05:44 -0700 Subject: [PATCH 143/146] Add rollback for blocks data as well --- crates/fuel-core/src/database.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 8c490591e19..8576ce1641f 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -96,6 +96,7 @@ use crate::{ use anyhow::anyhow; #[cfg(feature = "rpc")] use fuel_core_block_aggregator_api::db::table::{ + Blocks, LatestBlock, Mode, }; @@ -478,6 +479,16 @@ impl Database { Some(Mode::Local(_)) => Some(Mode::new_local(block_height)), Some(Mode::S3(_)) => Some(Mode::new_s3(block_height)), }; + if let Some(Mode::Local(_)) = mode { + let remove_heights = tx + .iter_all_keys::(Some(IterDirection::Reverse)) + .flatten() + .take_while(|height| height <= &block_height) + .collect::>(); + for height in remove_heights { + tx.storage_as_mut::().remove(&height)?; + } + } if let Some(new) = new { tx.storage_as_mut::().insert(&(), &new)?; tx.commit().map_err(|e: StorageError| anyhow!(e))?; From 549d5cc7a082772e46265dafc19efa9332e3dd68 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 12:35:31 -0700 Subject: [PATCH 144/146] Fix service shutdown testg --- crates/fuel-core/src/service.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 34326e4abda..367972feae3 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -545,9 +545,12 @@ mod tests { let mut i = 0; loop { let mut shutdown = ShutdownListener::spawn(); + #[cfg(not(feature = "rpc"))] + let config = Config::local_node(); + #[cfg(feature = "rpc")] + let config = Config::local_node_with_rpc(); let service = - FuelService::new(Default::default(), Config::local_node(), &mut shutdown) - .unwrap(); + FuelService::new(Default::default(), config, &mut shutdown).unwrap(); service.start_and_await().await.unwrap(); sleep(Duration::from_secs(1)); for service in service.sub_services() { From ccfe14424b92bbfcf09e677a7c9f1b4372414968 Mon Sep 17 00:00:00 2001 From: green Date: Wed, 26 Nov 2025 20:48:38 +0000 Subject: [PATCH 145/146] Small clean ups --- crates/fuel-core/src/service/sub_services.rs | 39 ++++++++++--------- .../src/api/protobuf_adapter.rs | 4 +- .../src/blocks/importer_and_db_source.rs | 6 --- .../services/block_aggregator_api/src/lib.rs | 34 +--------------- 4 files changed, 22 insertions(+), 61 deletions(-) diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 2fbb153bfbf..e3a2df0aff4 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -62,21 +62,6 @@ use crate::{ }, }, }; -#[cfg(feature = "rpc")] -use crate::{ - database::database_description::block_aggregator::BlockAggregatorDatabase, - service::adapters::rpc::ReceiptSource, -}; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::{ - api::protobuf_adapter::ProtobufAPI, - blocks::importer_and_db_source::ImporterAndDbSource, -}; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::{ - blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, - integration::UninitializedTask, -}; use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, @@ -89,20 +74,36 @@ use fuel_core_gas_price_service::v1::{ uninitialized_task::new_gas_price_service_v1, }; use fuel_core_poa::Trigger; -#[cfg(feature = "rpc")] -use fuel_core_services::ServiceRunner; use fuel_core_storage::{ self, transactional::AtomicView, }; #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; -#[cfg(feature = "rpc")] -use fuel_core_types::fuel_types::BlockHeight; use fuel_core_types::signer::SignMode; +#[cfg(feature = "rpc")] +use rpc::*; use std::sync::Arc; use tokio::sync::Mutex; +#[cfg(feature = "rpc")] +mod rpc { + pub use crate::{ + database::database_description::block_aggregator::BlockAggregatorDatabase, + service::adapters::rpc::ReceiptSource, + }; + pub use fuel_core_block_aggregator_api::{ + api::protobuf_adapter::ProtobufAPI, + blocks::importer_and_db_source::{ + ImporterAndDbSource, + serializer_adapter::SerializerAdapter, + }, + integration::UninitializedTask, + }; + pub use fuel_core_services::ServiceRunner; + pub use fuel_core_types::fuel_types::BlockHeight; +} + pub type PoAService = fuel_core_poa::Service< BlockProducerAdapter, BlockImporterAdapter, diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 37aaf6f2697..2f373c2c4b9 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -71,14 +71,12 @@ impl BlockAggregator for Server { request: tonic::Request, ) -> Result, tonic::Status> { tracing::debug!("get_block_height: {:?}", request); - tracing::info!("get_block_height: {:?}", request); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetCurrentHeight { response }; self.query_sender.send(query).await.map_err(|e| { tonic::Status::internal(format!("Failed to send query: {}", e)) })?; let res = receiver.await; - tracing::info!("query result: {:?}", &res); match res { Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { height: height.map(|inner| *inner), @@ -162,7 +160,7 @@ impl BlockAggregator for Server { request: tonic::Request, ) -> Result, tonic::Status> { const ARB_CHANNEL_SIZE: usize = 100; - tracing::warn!("get_block_range: {:?}", request); + tracing::debug!("get_block_range: {:?}", request); let (response, mut receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); let query = BlockAggregatorQuery::NewBlockSubscription { response }; self.query_sender diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index d2dfce06c50..d6dfbc78fa7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -125,12 +125,6 @@ where block_res = self.receiver.recv() => { block_res.ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) } - _ = self.importer_task.await_stop() => { - Err(Error::BlockSource(anyhow!("Importer task stopped unexpectedly"))) - } - _ = self.sync_task.await_stop() => { - Err(Error::BlockSource(anyhow!("Sync task stopped unexpectedly"))) - } importer_error = self.importer_task.await_stop() => { Err(Error::BlockSource(anyhow!("Importer task stopped unexpectedly: {:?}", importer_error))) } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 91d6a4be211..e2b251d4726 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -245,12 +245,7 @@ pub mod integration { .await } }; - Ok(BlockAggregator { - query: api, - database: db_adapter, - block_source, - new_block_subscriptions: vec![], - }) + Ok(BlockAggregator::new(api, db_adapter, block_source)) } } @@ -377,30 +372,3 @@ where Ok(()) } } - -// #[async_trait::async_trait] -// impl RunnableService -// for BlockAggregator -// where -// Api: -// BlockAggregatorApi + Send, -// DB: BlockAggregatorDB + Send, -// Blocks: BlockSource, -// BlockRange: Send, -// ::Block: Clone + Debug + Send, -// { -// const NAME: &'static str = "BlockAggregatorService"; -// type SharedData = (); -// type Task = Self; -// type TaskParams = (); -// -// fn shared_data(&self) -> Self::SharedData {} -// -// async fn into_task( -// self, -// _state_watcher: &StateWatcher, -// _params: Self::TaskParams, -// ) -> anyhow::Result { -// Ok(self) -// } -// } From 5d16a334f6060ef3faabe5358a0a4d804ff4693a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 26 Nov 2025 15:55:32 -0700 Subject: [PATCH 146/146] Address feedback --- bin/fuel-core/src/cli/run/rpc.rs | 4 ++ crates/fuel-core/src/combined_database.rs | 21 ++++++-- crates/fuel-core/src/database.rs | 2 +- crates/fuel-core/src/service/config.rs | 2 + .../src/api/protobuf_adapter.rs | 6 +-- .../src/api/protobuf_adapter/tests.rs | 8 +-- .../src/db/remote_cache.rs | 52 +++++++++++++------ .../services/block_aggregator_api/src/lib.rs | 4 +- 8 files changed, 68 insertions(+), 31 deletions(-) diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index 723e6b3f1ef..88f1fc64849 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -17,6 +17,9 @@ pub struct RpcArgs { #[command(subcommand)] pub storage_method: Option, + + #[clap(long = "api_buffer_size", default_value = "1000", env)] + pub api_buffer_size: usize, } #[derive(Debug, Clone, Subcommand)] @@ -46,6 +49,7 @@ impl RpcArgs { addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), sync_from: Some(BlockHeight::from(0)), storage_method: self.storage_method.map(Into::into).unwrap_or_default(), + api_buffer_size: self.api_buffer_size, } } } diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index 633572f181c..464e4d62707 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -513,14 +513,25 @@ impl CombinedDatabase { self.block_aggregation_storage_mut() .rollback_to(target_block_height)?; } + if on_chain_height == target_block_height + && off_chain_height == target_block_height + && gas_price_rolled_back + && compression_db_rolled_back + && block_aggregation_storage_rolled_back + { + break; + } } - if on_chain_height == target_block_height - && off_chain_height == target_block_height - && gas_price_rolled_back - && compression_db_rolled_back + #[cfg(not(feature = "rpc"))] { - break; + if on_chain_height == target_block_height + && off_chain_height == target_block_height + && gas_price_rolled_back + && compression_db_rolled_back + { + break; + } } if on_chain_height < target_block_height { diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 8576ce1641f..6149787a893 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -483,7 +483,7 @@ impl Database { let remove_heights = tx .iter_all_keys::(Some(IterDirection::Reverse)) .flatten() - .take_while(|height| height <= &block_height) + .take_while(|height| height > &block_height) .collect::>(); for height in remove_heights { tx.storage_as_mut::().remove(&height)?; diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 3d880ede125..9464d83ce0b 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -134,6 +134,7 @@ impl Config { addr: free_local_addr(), sync_from: Some(BlockHeight::new(0)), storage_method: StorageMethod::Local, + api_buffer_size: 100, }; config.rpc_config = Some(rpc_config); config @@ -147,6 +148,7 @@ impl Config { addr: free_local_addr(), sync_from: Some(BlockHeight::new(0)), storage_method, + api_buffer_size: 100, }; config.rpc_config = Some(rpc_config); config diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 2f373c2c4b9..e771b662fd4 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -243,7 +243,7 @@ impl RunnableTask for ServerTask { res = router.serve(self.addr) => { if let Err(e) = res { tracing::error!("BlockAggregator tonic server error: {}", e); - TaskNextAction::ErrorContinue(anyhow!(e)) + TaskNextAction::Stop } else { tracing::info!("BlockAggregator tonic server stopped"); TaskNextAction::Stop @@ -261,10 +261,10 @@ impl RunnableTask for ServerTask { } impl ProtobufAPI { - pub fn new(url: String) -> Result { + pub fn new(url: String, buffer_size: usize) -> Result { let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< BlockAggregatorQuery, - >(100); + >(buffer_size); let addr = url.parse().unwrap(); let _server_service = ServiceRunner::new(ServerTask { addr, diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 111cf1d303f..86c52b650c8 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -46,7 +46,7 @@ fn free_local_addr() -> String { async fn await_query__get_current_height__client_receives_expected_value() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()).unwrap(); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -83,7 +83,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { async fn await_query__get_block_range__client_receives_expected_value__literal() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()).unwrap(); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -158,7 +158,7 @@ async fn await_query__get_block_range__client_receives_expected_value__literal() async fn await_query__get_block_range__client_receives_expected_value__remote() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()).unwrap(); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -246,7 +246,7 @@ async fn await_query__get_block_range__client_receives_expected_value__remote() async fn await_query__new_block_stream__client_receives_expected_value() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()).unwrap(); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs index 26a4c64449c..ae0e4b03173 100644 --- a/crates/services/block_aggregator_api/src/db/remote_cache.rs +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -82,6 +82,28 @@ impl RemoteCache { synced: false, } } + + fn stream_blocks( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> crate::result::Result { + let bucket = self.aws_bucket.clone(); + let requester_pays = self.requester_pays; + let aws_endpoint = self.aws_endpoint.clone(); + let stream = futures::stream::iter((*first..=*last).map(move |height| { + let block_height = BlockHeight::new(height); + let key = block_height_to_key(&block_height); + let res = crate::block_range_response::RemoteS3Response { + bucket: bucket.clone(), + key: key.clone(), + requester_pays, + aws_endpoint: aws_endpoint.clone(), + }; + (block_height, res) + })); + Ok(BlockRangeResponse::S3(Box::pin(stream))) + } } impl BlockAggregatorDB for RemoteCache @@ -170,23 +192,19 @@ where first: BlockHeight, last: BlockHeight, ) -> crate::result::Result { - // TODO: Check if it exists - let bucket = self.aws_bucket.clone(); - let requester_pays = self.requester_pays; - let aws_endpoint = self.aws_endpoint.clone(); - - let stream = futures::stream::iter((*first..=*last).map(move |height| { - let block_height = BlockHeight::new(height); - let key = block_height_to_key(&block_height); - let res = crate::block_range_response::RemoteS3Response { - bucket: bucket.clone(), - key: key.clone(), - requester_pays, - aws_endpoint: aws_endpoint.clone(), - }; - (block_height, res) - })); - Ok(BlockRangeResponse::S3(Box::pin(stream))) + let current_height = self + .get_current_height() + .await? + .unwrap_or(BlockHeight::new(0)); + if last > current_height { + Err(Error::db_error(anyhow!( + "Requested block height {} is greater than current synced height {}", + last, + current_height + ))) + } else { + self.stream_blocks(first, last) + } } async fn get_current_height(&self) -> crate::result::Result> { diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index e2b251d4726..0a231687117 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -86,6 +86,7 @@ pub mod integration { #[derive(Clone, Debug)] pub struct Config { pub addr: SocketAddr, + pub api_buffer_size: usize, pub sync_from: Option, pub storage_method: StorageMethod, } @@ -294,7 +295,8 @@ pub mod integration { StorageMutate, { let addr = config.addr.to_string(); - let api = ProtobufAPI::new(addr) + let api_buffer_size = config.api_buffer_size; + let api = ProtobufAPI::new(addr, api_buffer_size) .map_err(|e| anyhow::anyhow!("Error creating API: {e}"))?; let db_ending_height = onchain_db .latest_height()