From 9fb03680b3e6033adbf455ccf0a19aec4bc396d3 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 1 Sep 2025 11:11:53 -0600 Subject: [PATCH 001/100] Add naive service with test --- Cargo.lock | 9 ++ Cargo.toml | 1 + .../services/block_aggregator_api/Cargo.toml | 12 +++ .../services/block_aggregator_api/src/api.rs | 38 +++++++ .../block_aggregator_api/src/blocks.rs | 11 +++ .../services/block_aggregator_api/src/db.rs | 12 +++ .../services/block_aggregator_api/src/lib.rs | 86 ++++++++++++++++ .../block_aggregator_api/src/result.rs | 4 + .../block_aggregator_api/src/tests.rs | 98 +++++++++++++++++++ 9 files changed, 271 insertions(+) create mode 100644 crates/services/block_aggregator_api/Cargo.toml create mode 100644 crates/services/block_aggregator_api/src/api.rs create mode 100644 crates/services/block_aggregator_api/src/blocks.rs create mode 100644 crates/services/block_aggregator_api/src/db.rs create mode 100644 crates/services/block_aggregator_api/src/lib.rs create mode 100644 crates/services/block_aggregator_api/src/result.rs create mode 100644 crates/services/block_aggregator_api/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 69f3bc40332..3bc4e37a1bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1393,6 +1393,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block_aggregator_api" +version = "0.1.0" +dependencies = [ + "anyhow", + "fuel-core-services", + "tokio", +] + [[package]] name = "blocking" version = "1.6.1" diff --git a/Cargo.toml b/Cargo.toml index e58307a166f..883dde7023f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "crates/keygen", "crates/metrics", "crates/services", + "crates/services/block_aggregator_api", "crates/services/compression", "crates/services/consensus_module", "crates/services/consensus_module/bft", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml new file mode 100644 index 00000000000..ae6c11fdb17 --- /dev/null +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "block_aggregator_api" +version = "0.1.0" +edition = "2024" + +[dependencies] +fuel-core-services = { workspace = true} +tokio = "1.45.1" +anyhow = "1.0.98" + +[dev-dependencies] +fuel-core-services = { workspace = true, features = ["test-helpers"] } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs new file mode 100644 index 00000000000..3ba82e76eb9 --- /dev/null +++ b/crates/services/block_aggregator_api/src/api.rs @@ -0,0 +1,38 @@ +use crate::{ + blocks::Block, + result::{ + Error, + Result, + }, +}; +use tokio::sync::mpsc::{ + Receiver, + Sender, + channel, +}; + +pub trait BlockAggregatorApi: Send + Sync { + fn await_query( + &mut self, + ) -> impl Future> + Send; +} + +pub enum BlockAggregatorQuery { + GetBlockRange { + first: u64, + last: u64, + response: Sender>, + }, +} + +impl BlockAggregatorQuery { + pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver>) { + let (sender, receiver) = channel(100); + let query = Self::GetBlockRange { + first, + last, + response: sender, + }; + (query, receiver) + } +} diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs new file mode 100644 index 00000000000..8e5d61c5345 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -0,0 +1,11 @@ +use crate::result::{ + Error, + Result, +}; + +pub trait BlockSource: Send + Sync { + fn next_block(&mut self) -> impl Future>; +} + +#[derive(Clone, Copy)] +pub struct Block; diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs new file mode 100644 index 00000000000..8ac4726b5dc --- /dev/null +++ b/crates/services/block_aggregator_api/src/db.rs @@ -0,0 +1,12 @@ +use crate::{ + blocks::Block, + result::{ + Error, + Result, + }, +}; + +pub trait BlockAggregatorDB: Send + Sync { + fn store_block(&mut self, block: Block) -> Result<()>; + fn get_block_range(&self, first: u64, last: u64) -> Result>; +} diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs new file mode 100644 index 00000000000..0f8220ab625 --- /dev/null +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -0,0 +1,86 @@ +use fuel_core_services::{ + RunnableTask, + StateWatcher, + TaskNextAction, + try_or_stop, +}; + +use crate::{ + api::{ + BlockAggregatorApi, + BlockAggregatorQuery, + }, + blocks::BlockSource, + db::BlockAggregatorDB, +}; +use result::Result; + +pub mod api; +pub mod blocks; +pub mod db; +pub mod result; + +#[cfg(test)] +mod tests; + +// TODO: this doesn't need to limited to the blocks, +// but we can change the name later +pub struct BlockAggregator { + query: Api, + database: DB, + block_source: Blocks, +} + +impl RunnableTask for BlockAggregator +where + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, + Blocks: BlockSource, +{ + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + tokio::select! { + res = self.query.await_query() => self.handle_query(res), + _ = watcher.while_started() => self.stop(), + } + } + + async fn shutdown(self) -> anyhow::Result<()> { + Ok(()) + } +} + +impl BlockAggregator +where + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, + Blocks: BlockSource, +{ + pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { + Self { + query, + database, + block_source, + } + } + + pub fn stop(&self) -> TaskNextAction { + TaskNextAction::Stop + } + + pub fn handle_query(&mut self, res: Result) -> TaskNextAction { + let query = try_or_stop!(res); + match query { + BlockAggregatorQuery::GetBlockRange { + first, + last, + response, + } => { + let blocks = try_or_stop!(self.database.get_block_range(first, last)); + for block in blocks { + let _ = try_or_stop!(response.try_send(Ok(block))); + } + TaskNextAction::Continue + } + } + } +} diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs new file mode 100644 index 00000000000..b6557c4dd84 --- /dev/null +++ b/crates/services/block_aggregator_api/src/result.rs @@ -0,0 +1,4 @@ +pub enum Error { + ApiError, +} +pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs new file mode 100644 index 00000000000..a3780a3c4c7 --- /dev/null +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -0,0 +1,98 @@ +use super::*; +use crate::blocks::Block; +use std::{ + collections::HashMap, + future, +}; +use tokio::sync::mpsc::{ + Receiver, + Sender, +}; + +struct FakeApi { + receiver: Receiver, +} + +impl FakeApi { + fn new() -> (Self, Sender) { + let (sender, receiver) = tokio::sync::mpsc::channel(1); + let api = Self { receiver }; + (api, sender) + } +} + +impl BlockAggregatorApi for FakeApi { + async fn await_query(&mut self) -> Result { + Ok(self.receiver.recv().await.unwrap()) + } +} + +struct FakeDB { + map: HashMap, +} + +impl FakeDB { + fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + fn add_block(&mut self, id: u64, block: Block) { + self.map.insert(id, block); + } +} + +impl BlockAggregatorDB for FakeDB { + fn store_block(&mut self, block: Block) -> Result<()> { + todo!() + } + + fn get_block_range(&self, first: u64, last: u64) -> Result> { + let mut blocks = vec![]; + for id in first..=last { + if let Some(block) = self.map.get(&id) { + blocks.push(*block); + } + } + Ok(blocks) + } +} + +struct FakeBlockSource; + +impl BlockSource for FakeBlockSource { + async fn next_block(&mut self) -> Result { + future::pending().await + } +} + +#[tokio::test] +async fn run__get_block_range__returns_expected_blocks() { + // given + let (sender, receiver) = tokio::sync::mpsc::channel(1); + let api = FakeApi { receiver }; + let mut db = FakeDB::new(); + db.add_block(1, Block); + db.add_block(2, Block); + db.add_block(3, Block); + + let source = FakeBlockSource; + + let mut srv = BlockAggregator::new(api, db, source); + + // when + let mut watcher = StateWatcher::started(); + tokio::spawn(async move { + let _ = srv.run(&mut watcher).await; + }); + let (query, mut response) = BlockAggregatorQuery::get_block_range(2, 3); + sender.send(query).await.unwrap(); + + // then + let mut buffer = vec![]; + let response = response.recv_many(&mut buffer, 3).await; + + // TODO: Check values + assert_eq!(buffer.len(), 2); +} From d69546910cd9485facc92047b45649fe139011b1 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 1 Sep 2025 13:59:54 -0600 Subject: [PATCH 002/100] Return a BoxStream instead of just sending over channel --- Cargo.lock | 1 + crates/services/block_aggregator_api/Cargo.toml | 1 + crates/services/block_aggregator_api/src/api.rs | 9 +++++---- crates/services/block_aggregator_api/src/db.rs | 3 ++- crates/services/block_aggregator_api/src/lib.rs | 8 ++++---- crates/services/block_aggregator_api/src/result.rs | 1 + crates/services/block_aggregator_api/src/tests.rs | 12 +++++++----- 7 files changed, 21 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bc4e37a1bb..7b2edafffb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1399,6 +1399,7 @@ version = "0.1.0" dependencies = [ "anyhow", "fuel-core-services", + "futures-util", "tokio", ] diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index ae6c11fdb17..86a51e6f2aa 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -7,6 +7,7 @@ edition = "2024" fuel-core-services = { workspace = true} tokio = "1.45.1" anyhow = "1.0.98" +futures-util = "0.3.31" [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 3ba82e76eb9..262575b63d6 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -5,7 +5,8 @@ use crate::{ Result, }, }; -use tokio::sync::mpsc::{ +use fuel_core_services::stream::BoxStream; +use tokio::sync::oneshot::{ Receiver, Sender, channel, @@ -21,13 +22,13 @@ pub enum BlockAggregatorQuery { GetBlockRange { first: u64, last: u64, - response: Sender>, + response: Sender>, }, } impl BlockAggregatorQuery { - pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver>) { - let (sender, receiver) = channel(100); + pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver>) { + let (sender, receiver) = channel(); let query = Self::GetBlockRange { first, last, diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 8ac4726b5dc..dd488500a84 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -5,8 +5,9 @@ use crate::{ Result, }, }; +use fuel_core_services::stream::BoxStream; pub trait BlockAggregatorDB: Send + Sync { fn store_block(&mut self, block: Block) -> Result<()>; - fn get_block_range(&self, first: u64, last: u64) -> Result>; + fn get_block_range(&self, first: u64, last: u64) -> Result>; } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 0f8220ab625..0e2c359f75d 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -75,10 +75,10 @@ where last, response, } => { - let blocks = try_or_stop!(self.database.get_block_range(first, last)); - for block in blocks { - let _ = try_or_stop!(response.try_send(Ok(block))); - } + let res = self.database.get_block_range(first, last); + let block_stream = try_or_stop!(res); + let res = response.send(block_stream); + try_or_stop!(res); TaskNextAction::Continue } } diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index b6557c4dd84..81ac073181a 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -1,3 +1,4 @@ +#[derive(Debug)] pub enum Error { ApiError, } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index a3780a3c4c7..31403692d06 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -1,5 +1,7 @@ use super::*; use crate::blocks::Block; +use fuel_core_services::stream::BoxStream; +use futures_util::StreamExt; use std::{ collections::HashMap, future, @@ -48,14 +50,14 @@ impl BlockAggregatorDB for FakeDB { todo!() } - fn get_block_range(&self, first: u64, last: u64) -> Result> { + fn get_block_range(&self, first: u64, last: u64) -> Result> { let mut blocks = vec![]; for id in first..=last { if let Some(block) = self.map.get(&id) { blocks.push(*block); } } - Ok(blocks) + Ok(Box::pin(futures_util::stream::iter(blocks))) } } @@ -90,9 +92,9 @@ async fn run__get_block_range__returns_expected_blocks() { sender.send(query).await.unwrap(); // then - let mut buffer = vec![]; - let response = response.recv_many(&mut buffer, 3).await; + let stream = response.await.unwrap(); + let blocks = stream.collect::>().await; // TODO: Check values - assert_eq!(buffer.len(), 2); + assert_eq!(blocks.len(), 2); } From 83842386a1ad832fd7d990ab15259fc6cf71cb8d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 1 Sep 2025 14:08:38 -0600 Subject: [PATCH 003/100] Cleanup --- Cargo.lock | 1 + .../services/block_aggregator_api/Cargo.toml | 1 + .../services/block_aggregator_api/src/api.rs | 5 +---- .../block_aggregator_api/src/blocks.rs | 5 +---- crates/services/block_aggregator_api/src/db.rs | 5 +---- .../services/block_aggregator_api/src/lib.rs | 18 ++++++++++++------ .../services/block_aggregator_api/src/tests.rs | 9 +++++---- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b2edafffb2..3c257ad9e71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1401,6 +1401,7 @@ dependencies = [ "fuel-core-services", "futures-util", "tokio", + "tracing", ] [[package]] diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 86a51e6f2aa..a28a886ac39 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -8,6 +8,7 @@ fuel-core-services = { workspace = true} tokio = "1.45.1" anyhow = "1.0.98" futures-util = "0.3.31" +tracing = "0.1.41" [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 262575b63d6..14dce05fe0d 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,9 +1,6 @@ use crate::{ blocks::Block, - result::{ - Error, - Result, - }, + result::Result, }; use fuel_core_services::stream::BoxStream; use tokio::sync::oneshot::{ diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 8e5d61c5345..c19f9c16419 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,7 +1,4 @@ -use crate::result::{ - Error, - Result, -}; +use crate::result::Result; pub trait BlockSource: Send + Sync { fn next_block(&mut self) -> impl Future>; diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index dd488500a84..97e6f2d6004 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -1,9 +1,6 @@ use crate::{ blocks::Block, - result::{ - Error, - Result, - }, + result::Result, }; use fuel_core_services::stream::BoxStream; diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 0e2c359f75d..e7a8bf1439e 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -28,7 +28,7 @@ mod tests; pub struct BlockAggregator { query: Api, database: DB, - block_source: Blocks, + _block_source: Blocks, } impl RunnableTask for BlockAggregator @@ -55,11 +55,11 @@ where DB: BlockAggregatorDB, Blocks: BlockSource, { - pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { + pub fn new(query: Api, database: DB, _block_source: Blocks) -> Self { Self { query, database, - block_source, + _block_source, } } @@ -68,7 +68,9 @@ where } pub fn handle_query(&mut self, res: Result) -> TaskNextAction { - let query = try_or_stop!(res); + let query = try_or_stop!(res, |e| { + tracing::error!("Error receiving query: {e:?}"); + }); match query { BlockAggregatorQuery::GetBlockRange { first, @@ -76,9 +78,13 @@ where response, } => { let res = self.database.get_block_range(first, last); - let block_stream = try_or_stop!(res); + let block_stream = try_or_stop!(res, |e| { + tracing::error!("Error getting block range from database: {e:?}"); + }); let res = response.send(block_stream); - try_or_stop!(res); + try_or_stop!(res, |_| { + tracing::error!("Error sending block range response"); + }); TaskNextAction::Continue } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 31403692d06..62ac77fc23c 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -1,3 +1,5 @@ +#![allow(non_snake_case)] + use super::*; use crate::blocks::Block; use fuel_core_services::stream::BoxStream; @@ -46,7 +48,7 @@ impl FakeDB { } impl BlockAggregatorDB for FakeDB { - fn store_block(&mut self, block: Block) -> Result<()> { + fn store_block(&mut self, _block: Block) -> Result<()> { todo!() } @@ -72,8 +74,7 @@ impl BlockSource for FakeBlockSource { #[tokio::test] async fn run__get_block_range__returns_expected_blocks() { // given - let (sender, receiver) = tokio::sync::mpsc::channel(1); - let api = FakeApi { receiver }; + let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); db.add_block(1, Block); db.add_block(2, Block); @@ -88,7 +89,7 @@ async fn run__get_block_range__returns_expected_blocks() { tokio::spawn(async move { let _ = srv.run(&mut watcher).await; }); - let (query, mut response) = BlockAggregatorQuery::get_block_range(2, 3); + let (query, response) = BlockAggregatorQuery::get_block_range(2, 3); sender.send(query).await.unwrap(); // then From db993bfd0b089940d57b53a15659027ce778aa40 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 1 Sep 2025 14:33:55 -0600 Subject: [PATCH 004/100] Add failing test --- Cargo.lock | 11 +++--- .../services/block_aggregator_api/Cargo.toml | 1 + .../block_aggregator_api/src/blocks.rs | 36 +++++++++++++++++-- .../services/block_aggregator_api/src/lib.rs | 12 +++++-- .../block_aggregator_api/src/tests.rs | 18 +++++++--- 5 files changed, 64 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c257ad9e71..912615a9afa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1400,6 +1400,7 @@ dependencies = [ "anyhow", "fuel-core-services", "futures-util", + "rand 0.9.2", "tokio", "tracing", ] @@ -6640,7 +6641,7 @@ dependencies = [ "hyper 1.6.0", "hyper-util", "log", - "rand 0.9.1", + "rand 0.9.2", "regex", "serde_json", "serde_urlencoded", @@ -8024,7 +8025,7 @@ dependencies = [ "bytes", "getrandom 0.3.3", "lru-slab", - "rand 0.9.1", + "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", "rustls 0.23.27", @@ -8084,9 +8085,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -11575,7 +11576,7 @@ dependencies = [ "nohash-hasher", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "static_assertions", "web-time", ] diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index a28a886ac39..88c2aad3cf5 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -9,6 +9,7 @@ tokio = "1.45.1" anyhow = "1.0.98" futures-util = "0.3.31" tracing = "0.1.41" +rand = "0.9.2" [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index c19f9c16419..3e3c5e43c31 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,8 +1,38 @@ use crate::result::Result; pub trait BlockSource: Send + Sync { - fn next_block(&mut self) -> impl Future>; + fn next_block(&mut self) -> impl Future> + Send; } -#[derive(Clone, Copy)] -pub struct Block; +#[derive(Clone)] +pub struct Block { + bytes: Vec, +} + +impl Block { + pub fn new(bytes: Vec) -> Self { + Self { bytes } + } + + #[cfg(test)] + pub fn arb_size(rng: &mut Rng, size: usize) -> Self { + let bytes: Vec = (0..size).map(|_| rng.random()).collect(); + Self::new(bytes) + } + + #[cfg(test)] + pub fn arb(rng: &mut Rng) -> Self { + const SIZE: usize = 100; + Self::arb_size(rng, SIZE) + } + + pub fn bytes(&self) -> &[u8] { + &self.bytes + } +} + +impl From> for Block { + fn from(bytes: Vec) -> Self { + Self::new(bytes) + } +} diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index e7a8bf1439e..8e8c95c8215 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -10,7 +10,10 @@ use crate::{ BlockAggregatorApi, BlockAggregatorQuery, }, - blocks::BlockSource, + blocks::{ + Block, + BlockSource, + }, db::BlockAggregatorDB, }; use result::Result; @@ -39,7 +42,8 @@ where { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tokio::select! { - res = self.query.await_query() => self.handle_query(res), + query_res = self.query.await_query() => self.handle_query(query_res), + block_res = self._block_source.next_block() => self.handle_block(block_res), _ = watcher.while_started() => self.stop(), } } @@ -89,4 +93,8 @@ where } } } + + pub fn handle_block(&mut self, _res: Result) -> TaskNextAction { + todo!() + } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 62ac77fc23c..743c27988e6 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -4,6 +4,10 @@ use super::*; use crate::blocks::Block; use fuel_core_services::stream::BoxStream; use futures_util::StreamExt; +use rand::{ + SeedableRng, + prelude::StdRng, +}; use std::{ collections::HashMap, future, @@ -56,7 +60,7 @@ impl BlockAggregatorDB for FakeDB { let mut blocks = vec![]; for id in first..=last { if let Some(block) = self.map.get(&id) { - blocks.push(*block); + blocks.push(block.to_owned()); } } Ok(Box::pin(futures_util::stream::iter(blocks))) @@ -74,11 +78,12 @@ impl BlockSource for FakeBlockSource { #[tokio::test] async fn run__get_block_range__returns_expected_blocks() { // given + let mut rng = StdRng::seed_from_u64(42); let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); - db.add_block(1, Block); - db.add_block(2, Block); - db.add_block(3, Block); + db.add_block(1, Block::arb(&mut rng)); + db.add_block(2, Block::arb(&mut rng)); + db.add_block(3, Block::arb(&mut rng)); let source = FakeBlockSource; @@ -99,3 +104,8 @@ async fn run__get_block_range__returns_expected_blocks() { // TODO: Check values assert_eq!(blocks.len(), 2); } + +#[tokio::test] +async fn run__new_block_gets_added_to_db() { + todo!() +} From 17ac53cc8b066880b0a2a6a35d44531c9de4dcbd Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 09:50:11 -0600 Subject: [PATCH 005/100] Add test for receiving new block --- Cargo.lock | 57 ++++--------- .../services/block_aggregator_api/Cargo.toml | 1 + .../services/block_aggregator_api/src/api.rs | 13 +++ .../block_aggregator_api/src/blocks.rs | 6 +- .../services/block_aggregator_api/src/db.rs | 12 ++- .../services/block_aggregator_api/src/lib.rs | 29 +++++-- .../block_aggregator_api/src/tests.rs | 82 +++++++++++++++---- 7 files changed, 130 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 912615a9afa..795b6739fa6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1403,6 +1403,7 @@ dependencies = [ "rand 0.9.2", "tokio", "tracing", + "tracing-subscriber", ] [[package]] @@ -1435,7 +1436,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", - "regex-automata 0.4.9", + "regex-automata", "serde", ] @@ -5741,7 +5742,7 @@ dependencies = [ "lalrpop-util 0.20.2", "petgraph", "regex", - "regex-syntax 0.8.5", + "regex-syntax", "string_cache", "term", "tiny-keccak", @@ -5755,7 +5756,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" dependencies = [ - "regex-automata 0.4.9", + "regex-automata", ] [[package]] @@ -6434,7 +6435,7 @@ dependencies = [ "lazy_static", "proc-macro2", "quote", - "regex-syntax 0.8.5", + "regex-syntax", "syn 2.0.101", ] @@ -6501,11 +6502,11 @@ dependencies = [ [[package]] name = "matchers" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "regex-automata 0.1.10", + "regex-automata", ] [[package]] @@ -6858,12 +6859,11 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -7108,12 +7108,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p256" version = "0.13.2" @@ -7779,7 +7773,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.5", + "regex-syntax", "rusty-fork", "tempfile", "unarray", @@ -8258,17 +8252,8 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] @@ -8279,7 +8264,7 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax", ] [[package]] @@ -8288,12 +8273,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.5" @@ -10459,14 +10438,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "serde", "serde_json", "sharded-slab", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 88c2aad3cf5..c3fc1111b0e 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -13,3 +13,4 @@ rand = "0.9.2" [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } +tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 14dce05fe0d..0c7c96b55e4 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -3,6 +3,7 @@ use crate::{ result::Result, }; use fuel_core_services::stream::BoxStream; +use std::fmt; use tokio::sync::oneshot::{ Receiver, Sender, @@ -23,6 +24,18 @@ pub enum BlockAggregatorQuery { }, } +impl fmt::Debug for BlockAggregatorQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BlockAggregatorQuery::GetBlockRange { first, last, .. } => f + .debug_struct("GetBlockRange") + .field("first", first) + .field("last", last) + .finish(), + } + } +} + impl BlockAggregatorQuery { pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver>) { let (sender, receiver) = channel(); diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 3e3c5e43c31..8ec177b0dec 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,10 +1,10 @@ use crate::result::Result; pub trait BlockSource: Send + Sync { - fn next_block(&mut self) -> impl Future> + Send; + fn next_block(&mut self) -> impl Future> + Send; } -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Block { bytes: Vec, } @@ -21,7 +21,7 @@ impl Block { } #[cfg(test)] - pub fn arb(rng: &mut Rng) -> Self { + pub fn random(rng: &mut Rng) -> Self { const SIZE: usize = 100; Self::arb_size(rng, SIZE) } diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 97e6f2d6004..cfb26bb78d1 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -5,6 +5,14 @@ use crate::{ use fuel_core_services::stream::BoxStream; pub trait BlockAggregatorDB: Send + Sync { - fn store_block(&mut self, block: Block) -> Result<()>; - fn get_block_range(&self, first: u64, last: u64) -> Result>; + fn store_block( + &mut self, + id: u64, + block: Block, + ) -> impl Future> + Send; + fn get_block_range( + &self, + first: u64, + last: u64, + ) -> impl Future>> + Send; } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 8e8c95c8215..d0c8e14f7b0 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -31,7 +31,7 @@ mod tests; pub struct BlockAggregator { query: Api, database: DB, - _block_source: Blocks, + block_source: Blocks, } impl RunnableTask for BlockAggregator @@ -41,9 +41,10 @@ where Blocks: BlockSource, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + tracing::debug!("BlockAggregator running"); tokio::select! { - query_res = self.query.await_query() => self.handle_query(query_res), - block_res = self._block_source.next_block() => self.handle_block(block_res), + query_res = self.query.await_query() => self.handle_query(query_res).await, + block_res = self.block_source.next_block() => self.handle_block(block_res).await, _ = watcher.while_started() => self.stop(), } } @@ -63,7 +64,7 @@ where Self { query, database, - _block_source, + block_source: _block_source, } } @@ -71,7 +72,11 @@ where TaskNextAction::Stop } - pub fn handle_query(&mut self, res: Result) -> TaskNextAction { + pub async fn handle_query( + &mut self, + res: Result, + ) -> TaskNextAction { + tracing::debug!("Handling query: {res:?}"); let query = try_or_stop!(res, |e| { tracing::error!("Error receiving query: {e:?}"); }); @@ -81,7 +86,7 @@ where last, response, } => { - let res = self.database.get_block_range(first, last); + let res = self.database.get_block_range(first, last).await; let block_stream = try_or_stop!(res, |e| { tracing::error!("Error getting block range from database: {e:?}"); }); @@ -94,7 +99,15 @@ where } } - pub fn handle_block(&mut self, _res: Result) -> TaskNextAction { - todo!() + pub async fn handle_block(&mut self, res: Result<(u64, Block)>) -> TaskNextAction { + tracing::debug!("Handling block: {res:?}"); + let (id, block) = try_or_stop!(res, |e| { + tracing::error!("Error receiving block from source: {e:?}"); + }); + let res = self.database.store_block(id, block).await; + try_or_stop!(res, |e| { + tracing::error!("Error storing block in database: {e:?}"); + }); + TaskNextAction::Continue } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 743c27988e6..bebd90d20b9 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -11,6 +11,10 @@ use rand::{ use std::{ collections::HashMap, future, + sync::{ + Arc, + Mutex, + }, }; use tokio::sync::mpsc::{ Receiver, @@ -36,30 +40,39 @@ impl BlockAggregatorApi for FakeApi { } struct FakeDB { - map: HashMap, + map: Arc>>, } impl FakeDB { fn new() -> Self { - Self { - map: HashMap::new(), - } + let map = Arc::new(Mutex::new(HashMap::new())); + Self { map } } fn add_block(&mut self, id: u64, block: Block) { - self.map.insert(id, block); + self.map.lock().unwrap().insert(id, block); + } + + fn clone_inner(&self) -> Arc>> { + self.map.clone() } } impl BlockAggregatorDB for FakeDB { - fn store_block(&mut self, _block: Block) -> Result<()> { - todo!() + async fn store_block(&mut self, id: u64, block: Block) -> Result<()> { + self.map.lock().unwrap().insert(id, block); + Ok(()) } - fn get_block_range(&self, first: u64, last: u64) -> Result> { + async fn get_block_range(&self, first: u64, last: u64) -> Result> { let mut blocks = vec![]; for id in first..=last { - if let Some(block) = self.map.get(&id) { + if let Some(block) = self + .map + .lock() + .expect("lets assume for now the test was written to avoid conflicts") + .get(&id) + { blocks.push(block.to_owned()); } } @@ -67,25 +80,35 @@ impl BlockAggregatorDB for FakeDB { } } -struct FakeBlockSource; +struct FakeBlockSource { + blocks: Receiver<(u64, Block)>, +} + +impl FakeBlockSource { + fn new() -> (Self, Sender<(u64, Block)>) { + let (_sender, receiver) = tokio::sync::mpsc::channel(1); + let _self = Self { blocks: receiver }; + (_self, _sender) + } +} impl BlockSource for FakeBlockSource { - async fn next_block(&mut self) -> Result { - future::pending().await + async fn next_block(&mut self) -> Result<(u64, Block)> { + Ok(self.blocks.recv().await.unwrap()) } } #[tokio::test] async fn run__get_block_range__returns_expected_blocks() { - // given let mut rng = StdRng::seed_from_u64(42); + // given let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); - db.add_block(1, Block::arb(&mut rng)); - db.add_block(2, Block::arb(&mut rng)); - db.add_block(3, Block::arb(&mut rng)); + db.add_block(1, Block::random(&mut rng)); + db.add_block(2, Block::random(&mut rng)); + db.add_block(3, Block::random(&mut rng)); - let source = FakeBlockSource; + let (source, _) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); @@ -107,5 +130,28 @@ async fn run__get_block_range__returns_expected_blocks() { #[tokio::test] async fn run__new_block_gets_added_to_db() { - todo!() + // let _ = tracing_subscriber::fmt() + // .with_max_level(tracing::Level::DEBUG) + // .try_init(); + let mut rng = StdRng::seed_from_u64(42); + // given + let (api, _sender) = FakeApi::new(); + let mut db = FakeDB::new(); + let db_map = db.clone_inner(); + let (mut source, source_sender) = FakeBlockSource::new(); + let mut srv = BlockAggregator::new(api, db, source); + + // when + let mut watcher = StateWatcher::started(); + tokio::spawn(async move { + let _ = srv.run(&mut watcher).await; + }); + let block = Block::random(&mut rng); + let id = 123u64; + source_sender.send((id, block.clone())).await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // then + let actual = db_map.lock().unwrap().get(&id).unwrap().clone(); + assert_eq!(block, actual); } From 77d64efbd94ce1e3a5b658c2d0dbf7e742b311d5 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 09:50:52 -0600 Subject: [PATCH 006/100] Cleanup --- crates/services/block_aggregator_api/src/tests.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index bebd90d20b9..3e11d2321fc 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -10,7 +10,6 @@ use rand::{ }; use std::{ collections::HashMap, - future, sync::{ Arc, Mutex, @@ -136,9 +135,9 @@ async fn run__new_block_gets_added_to_db() { let mut rng = StdRng::seed_from_u64(42); // given let (api, _sender) = FakeApi::new(); - let mut db = FakeDB::new(); + let db = FakeDB::new(); let db_map = db.clone_inner(); - let (mut source, source_sender) = FakeBlockSource::new(); + let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); // when From ad951b92f8bd3b3f597b3e08d614b5d443926767 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 09:57:48 -0600 Subject: [PATCH 007/100] Lint --- crates/services/block_aggregator_api/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index c3fc1111b0e..5243e30e0ec 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -4,12 +4,12 @@ version = "0.1.0" edition = "2024" [dependencies] -fuel-core-services = { workspace = true} -tokio = "1.45.1" anyhow = "1.0.98" +fuel-core-services = { workspace = true } futures-util = "0.3.31" -tracing = "0.1.41" rand = "0.9.2" +tokio = "1.45.1" +tracing = "0.1.41" [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } From 267ce07dad4825081dbcd01d9bd22db630bd3fd2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 10:18:30 -0600 Subject: [PATCH 008/100] Add query for current height to allow checking the progress --- .../services/block_aggregator_api/src/api.rs | 12 +++++++ .../services/block_aggregator_api/src/db.rs | 2 ++ .../services/block_aggregator_api/src/lib.rs | 11 ++++++ .../block_aggregator_api/src/tests.rs | 34 +++++++++++++++++++ 4 files changed, 59 insertions(+) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 0c7c96b55e4..e20e4a642b2 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -22,6 +22,9 @@ pub enum BlockAggregatorQuery { last: u64, response: Sender>, }, + GetCurrentHeight { + response: Sender, + }, } impl fmt::Debug for BlockAggregatorQuery { @@ -32,6 +35,9 @@ impl fmt::Debug for BlockAggregatorQuery { .field("first", first) .field("last", last) .finish(), + BlockAggregatorQuery::GetCurrentHeight { .. } => { + f.debug_struct("GetCurrentHeight").finish() + } } } } @@ -46,4 +52,10 @@ impl BlockAggregatorQuery { }; (query, receiver) } + + pub(crate) fn get_current_height() -> (Self, Receiver) { + let (sender, receiver) = channel(); + let query = Self::GetCurrentHeight { response: sender }; + (query, receiver) + } } diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index cfb26bb78d1..0f18eff0df8 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -15,4 +15,6 @@ pub trait BlockAggregatorDB: Send + Sync { first: u64, last: u64, ) -> impl Future>> + Send; + + fn get_current_height(&self) -> impl Future> + Send; } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index d0c8e14f7b0..cc19140da6e 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -96,6 +96,17 @@ where }); TaskNextAction::Continue } + BlockAggregatorQuery::GetCurrentHeight { response } => { + let res = self.database.get_current_height().await; + let height = try_or_stop!(res, |e| { + tracing::error!("Error getting current height from database: {e:?}"); + }); + let res = response.send(height); + try_or_stop!(res, |_| { + tracing::error!("Error sending current height response"); + }); + TaskNextAction::Continue + } } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 3e11d2321fc..08e73fecef2 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -77,6 +77,12 @@ impl BlockAggregatorDB for FakeDB { } Ok(Box::pin(futures_util::stream::iter(blocks))) } + + async fn get_current_height(&self) -> Result { + let map = self.map.lock().unwrap(); + let max_height = map.keys().max().cloned().unwrap_or(0); + Ok(max_height) + } } struct FakeBlockSource { @@ -154,3 +160,31 @@ async fn run__new_block_gets_added_to_db() { let actual = db_map.lock().unwrap().get(&id).unwrap().clone(); assert_eq!(block, actual); } + +#[tokio::test] +async fn run__get_current_height__returns_expected_height() { + let mut rng = StdRng::seed_from_u64(42); + // given + let (api, sender) = FakeApi::new(); + let mut db = FakeDB::new(); + let expected_height = 3; + db.add_block(1, Block::random(&mut rng)); + db.add_block(2, Block::random(&mut rng)); + db.add_block(expected_height, Block::random(&mut rng)); + + let (source, _) = FakeBlockSource::new(); + + let mut srv = BlockAggregator::new(api, db, source); + + // when + let mut watcher = StateWatcher::started(); + tokio::spawn(async move { + let _ = srv.run(&mut watcher).await; + }); + let (query, response) = BlockAggregatorQuery::get_current_height(); + sender.send(query).await.unwrap(); + + // then + let height = response.await.unwrap(); + assert_eq!(expected_height, height); +} From 78f1ee049efb59684ab4516d75d8254526954c06 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 10:53:55 -0600 Subject: [PATCH 009/100] Remove commented code --- crates/services/block_aggregator_api/src/tests.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 08e73fecef2..616178543bd 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -135,9 +135,6 @@ async fn run__get_block_range__returns_expected_blocks() { #[tokio::test] async fn run__new_block_gets_added_to_db() { - // let _ = tracing_subscriber::fmt() - // .with_max_level(tracing::Level::DEBUG) - // .try_init(); let mut rng = StdRng::seed_from_u64(42); // given let (api, _sender) = FakeApi::new(); From 90867f86aa269c388eec7295d64f3fe7e7bfddb2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 11:29:03 -0600 Subject: [PATCH 010/100] Update CHANGELOG --- .changes/added/3085.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changes/added/3085.md diff --git a/.changes/added/3085.md b/.changes/added/3085.md new file mode 100644 index 00000000000..88a0925a2ea --- /dev/null +++ b/.changes/added/3085.md @@ -0,0 +1 @@ +Add scaffolding for the new block aggregator service \ No newline at end of file From f6d2fdb651c21e84bb447ce2227accc44e0f0622 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 14:06:01 -0600 Subject: [PATCH 011/100] Fix privacy of constructor --- crates/services/block_aggregator_api/src/api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index e20e4a642b2..021716244ce 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -53,7 +53,7 @@ impl BlockAggregatorQuery { (query, receiver) } - pub(crate) fn get_current_height() -> (Self, Receiver) { + pub fn get_current_height() -> (Self, Receiver) { let (sender, receiver) = channel(); let query = Self::GetCurrentHeight { response: sender }; (query, receiver) From e612ea27d292fb4b471b11bb85e8981f734fb4ee Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 14:30:54 -0600 Subject: [PATCH 012/100] Improve tests, fix flake --- .../services/block_aggregator_api/src/lib.rs | 85 +++++++++++++------ .../block_aggregator_api/src/result.rs | 1 + .../block_aggregator_api/src/tests.rs | 37 +++++--- 3 files changed, 85 insertions(+), 38 deletions(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index cc19140da6e..7b670d71848 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -1,10 +1,3 @@ -use fuel_core_services::{ - RunnableTask, - StateWatcher, - TaskNextAction, - try_or_stop, -}; - use crate::{ api::{ BlockAggregatorApi, @@ -16,6 +9,13 @@ use crate::{ }, db::BlockAggregatorDB, }; +use fuel_core_services::{ + RunnableTask, + StateWatcher, + TaskNextAction, + stream::BoxStream, + try_or_stop, +}; use result::Result; pub mod api; @@ -86,30 +86,65 @@ where last, response, } => { - let res = self.database.get_block_range(first, last).await; - let block_stream = try_or_stop!(res, |e| { - tracing::error!("Error getting block range from database: {e:?}"); - }); - let res = response.send(block_stream); - try_or_stop!(res, |_| { - tracing::error!("Error sending block range response"); - }); - TaskNextAction::Continue + self.handle_get_block_range_query(first, last, response) + .await + // let res = self.database.get_block_range(first, last).await; + // let block_stream = try_or_stop!(res, |e| { + // tracing::error!("Error getting block range from database: {e:?}"); + // }); + // let res = response.send(block_stream); + // try_or_stop!(res, |_| { + // tracing::error!("Error sending block range response"); + // }); + // TaskNextAction::Continue } BlockAggregatorQuery::GetCurrentHeight { response } => { - let res = self.database.get_current_height().await; - let height = try_or_stop!(res, |e| { - tracing::error!("Error getting current height from database: {e:?}"); - }); - let res = response.send(height); - try_or_stop!(res, |_| { - tracing::error!("Error sending current height response"); - }); - TaskNextAction::Continue + self.handle_get_current_height_query(response).await + // let res = self.database.get_current_height().await; + // let height = try_or_stop!(res, |e| { + // tracing::error!("Error getting current height from database: {e:?}"); + // }); + // let res = response.send(height); + // try_or_stop!(res, |_| { + // tracing::error!("Error sending current height response"); + // }); + // TaskNextAction::Continue } } } + async fn handle_get_block_range_query( + &mut self, + first: u64, + last: u64, + response: tokio::sync::oneshot::Sender>, + ) -> TaskNextAction { + let res = self.database.get_block_range(first, last).await; + let block_stream = try_or_stop!(res, |e| { + tracing::error!("Error getting block range from database: {e:?}"); + }); + let res = response.send(block_stream); + try_or_stop!(res, |_| { + tracing::error!("Error sending block range response"); + }); + TaskNextAction::Continue + } + + async fn handle_get_current_height_query( + &mut self, + response: tokio::sync::oneshot::Sender, + ) -> TaskNextAction { + let res = self.database.get_current_height().await; + let height = try_or_stop!(res, |e| { + tracing::error!("Error getting current height from database: {e:?}"); + }); + let res = response.send(height); + try_or_stop!(res, |_| { + tracing::error!("Error sending current height response"); + }); + TaskNextAction::Continue + } + pub async fn handle_block(&mut self, res: Result<(u64, Block)>) -> TaskNextAction { tracing::debug!("Handling block: {res:?}"); let (id, block) = try_or_stop!(res, |e| { diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index 81ac073181a..86b24e3a39c 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -1,5 +1,6 @@ #[derive(Debug)] pub enum Error { ApiError, + BlockSourceError, } pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 616178543bd..1479223bbe2 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -1,7 +1,10 @@ #![allow(non_snake_case)] use super::*; -use crate::blocks::Block; +use crate::{ + blocks::Block, + result::Error, +}; use fuel_core_services::stream::BoxStream; use futures_util::StreamExt; use rand::{ @@ -99,7 +102,7 @@ impl FakeBlockSource { impl BlockSource for FakeBlockSource { async fn next_block(&mut self) -> Result<(u64, Block)> { - Ok(self.blocks.recv().await.unwrap()) + self.blocks.recv().await.ok_or(Error::BlockSourceError) } } @@ -113,16 +116,16 @@ async fn run__get_block_range__returns_expected_blocks() { db.add_block(2, Block::random(&mut rng)); db.add_block(3, Block::random(&mut rng)); - let (source, _) = FakeBlockSource::new(); + let (source, _block_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); + let mut watcher = StateWatcher::started(); + let (query, response) = BlockAggregatorQuery::get_block_range(2, 3); // when - let mut watcher = StateWatcher::started(); tokio::spawn(async move { let _ = srv.run(&mut watcher).await; }); - let (query, response) = BlockAggregatorQuery::get_block_range(2, 3); sender.send(query).await.unwrap(); // then @@ -131,6 +134,9 @@ async fn run__get_block_range__returns_expected_blocks() { // TODO: Check values assert_eq!(blocks.len(), 2); + + // cleanup + drop(_block_sender); } #[tokio::test] @@ -143,17 +149,18 @@ async fn run__new_block_gets_added_to_db() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - // when + let block = Block::random(&mut rng); + let id = 123u64; let mut watcher = StateWatcher::started(); + + // when tokio::spawn(async move { let _ = srv.run(&mut watcher).await; }); - let block = Block::random(&mut rng); - let id = 123u64; source_sender.send((id, block.clone())).await.unwrap(); - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; // then + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; let actual = db_map.lock().unwrap().get(&id).unwrap().clone(); assert_eq!(block, actual); } @@ -169,19 +176,23 @@ async fn run__get_current_height__returns_expected_height() { db.add_block(2, Block::random(&mut rng)); db.add_block(expected_height, Block::random(&mut rng)); - let (source, _) = FakeBlockSource::new(); - + let (source, _block_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - // when let mut watcher = StateWatcher::started(); + let (query, response) = BlockAggregatorQuery::get_current_height(); + + // when tokio::spawn(async move { let _ = srv.run(&mut watcher).await; }); - let (query, response) = BlockAggregatorQuery::get_current_height(); sender.send(query).await.unwrap(); // then + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; let height = response.await.unwrap(); assert_eq!(expected_height, height); + + // cleanup + drop(_block_sender); } From 489e74cb819189e8e7d372acf2e6d9f9e119a762 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 16:42:13 -0600 Subject: [PATCH 013/100] Remove commented code --- .../services/block_aggregator_api/src/lib.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 7b670d71848..f7ca7395569 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -88,27 +88,9 @@ where } => { self.handle_get_block_range_query(first, last, response) .await - // let res = self.database.get_block_range(first, last).await; - // let block_stream = try_or_stop!(res, |e| { - // tracing::error!("Error getting block range from database: {e:?}"); - // }); - // let res = response.send(block_stream); - // try_or_stop!(res, |_| { - // tracing::error!("Error sending block range response"); - // }); - // TaskNextAction::Continue } BlockAggregatorQuery::GetCurrentHeight { response } => { self.handle_get_current_height_query(response).await - // let res = self.database.get_current_height().await; - // let height = try_or_stop!(res, |e| { - // tracing::error!("Error getting current height from database: {e:?}"); - // }); - // let res = response.send(height); - // try_or_stop!(res, |_| { - // tracing::error!("Error sending current height response"); - // }); - // TaskNextAction::Continue } } } From 56f4eb08abaad5b327edc028fced31523dbb8b4f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 13:12:36 -0600 Subject: [PATCH 014/100] Remove unnecessary task spawn --- crates/services/block_aggregator_api/src/tests.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 1479223bbe2..3f38d91d6b3 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -123,10 +123,8 @@ async fn run__get_block_range__returns_expected_blocks() { let (query, response) = BlockAggregatorQuery::get_block_range(2, 3); // when - tokio::spawn(async move { - let _ = srv.run(&mut watcher).await; - }); sender.send(query).await.unwrap(); + let _ = srv.run(&mut watcher).await; // then let stream = response.await.unwrap(); @@ -154,10 +152,8 @@ async fn run__new_block_gets_added_to_db() { let mut watcher = StateWatcher::started(); // when - tokio::spawn(async move { - let _ = srv.run(&mut watcher).await; - }); source_sender.send((id, block.clone())).await.unwrap(); + let _ = srv.run(&mut watcher).await; // then tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; @@ -183,10 +179,8 @@ async fn run__get_current_height__returns_expected_height() { let (query, response) = BlockAggregatorQuery::get_current_height(); // when - tokio::spawn(async move { - let _ = srv.run(&mut watcher).await; - }); sender.send(query).await.unwrap(); + let _ = srv.run(&mut watcher).await; // then tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; From 7216d384d23795ec4fd723a869dd98e2484af147 Mon Sep 17 00:00:00 2001 From: Mitchell Turner Date: Thu, 4 Sep 2025 13:14:46 -0600 Subject: [PATCH 015/100] Update crates/services/block_aggregator_api/src/lib.rs Co-authored-by: Aaryamann Challani <43716372+rymnc@users.noreply.github.com> --- crates/services/block_aggregator_api/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index f7ca7395569..7c62afe4d75 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -60,11 +60,11 @@ where DB: BlockAggregatorDB, Blocks: BlockSource, { - pub fn new(query: Api, database: DB, _block_source: Blocks) -> Self { + pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { Self { query, database, - block_source: _block_source, + block_source, } } From 5119ab95d03ea85865f812b539a3bf8279e9e5e2 Mon Sep 17 00:00:00 2001 From: Mitchell Turner Date: Thu, 4 Sep 2025 13:14:55 -0600 Subject: [PATCH 016/100] Update crates/services/block_aggregator_api/Cargo.toml Co-authored-by: Aaryamann Challani <43716372+rymnc@users.noreply.github.com> --- crates/services/block_aggregator_api/Cargo.toml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 5243e30e0ec..58870adcd6b 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -4,12 +4,11 @@ version = "0.1.0" edition = "2024" [dependencies] -anyhow = "1.0.98" +anyhow = { workspace = true } fuel-core-services = { workspace = true } -futures-util = "0.3.31" -rand = "0.9.2" -tokio = "1.45.1" -tracing = "0.1.41" +rand = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } From dc4f1aa69b7638de7aec2adfde2ba8c34a8ec4f5 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 13:19:02 -0600 Subject: [PATCH 017/100] use futures instead of future-utils --- Cargo.lock | 4 ++-- crates/services/block_aggregator_api/Cargo.toml | 1 + crates/services/block_aggregator_api/src/blocks.rs | 2 +- crates/services/block_aggregator_api/src/tests.rs | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 795b6739fa6..e7b6eefb92e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1399,8 +1399,8 @@ version = "0.1.0" dependencies = [ "anyhow", "fuel-core-services", - "futures-util", - "rand 0.9.2", + "futures", + "rand 0.8.5", "tokio", "tracing", "tracing-subscriber", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 58870adcd6b..81eda811ec4 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -13,3 +13,4 @@ tracing = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } tracing-subscriber = { workspace = true } +futures = { workspace = true } \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 8ec177b0dec..9061112d273 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -16,7 +16,7 @@ impl Block { #[cfg(test)] pub fn arb_size(rng: &mut Rng, size: usize) -> Self { - let bytes: Vec = (0..size).map(|_| rng.random()).collect(); + let bytes: Vec = (0..size).map(|_| rng.r#gen()).collect(); Self::new(bytes) } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 3f38d91d6b3..5461ea42b63 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -6,7 +6,7 @@ use crate::{ result::Error, }; use fuel_core_services::stream::BoxStream; -use futures_util::StreamExt; +use futures::StreamExt; use rand::{ SeedableRng, prelude::StdRng, @@ -78,7 +78,7 @@ impl BlockAggregatorDB for FakeDB { blocks.push(block.to_owned()); } } - Ok(Box::pin(futures_util::stream::iter(blocks))) + Ok(Box::pin(futures::stream::iter(blocks))) } async fn get_current_height(&self) -> Result { From 720694aa68bb07e8cacd5a01ab32350c5c072d9d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 13:59:17 -0600 Subject: [PATCH 018/100] Fix Debug impl and only use in test --- Cargo.lock | 1 + .../services/block_aggregator_api/Cargo.toml | 1 + .../block_aggregator_api/src/blocks.rs | 47 +++++++++++++++++-- 3 files changed, 44 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7b6eefb92e..20d75ac4c98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1398,6 +1398,7 @@ name = "block_aggregator_api" version = "0.1.0" dependencies = [ "anyhow", + "bytes", "fuel-core-services", "futures", "rand 0.8.5", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 81eda811ec4..db80521217f 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -9,6 +9,7 @@ fuel-core-services = { workspace = true } rand = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +bytes = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 9061112d273..0b45b6d915e 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,22 +1,58 @@ use crate::result::Result; +use bytes::Bytes; +use std::fmt::{ + Debug, + Formatter, +}; pub trait BlockSource: Send + Sync { fn next_block(&mut self) -> impl Future> + Send; } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] pub struct Block { - bytes: Vec, + bytes: Bytes, +} + +#[cfg(test)] +impl Debug for Block { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + const BYTES_DISPLAY: usize = 8; + if self.bytes.len() <= BYTES_DISPLAY { + let bytes = &self + .bytes + .iter() + .map(|b| format!("{}", b)) + .collect::>(); + let bytes_string = bytes.join(", "); + write!(f, "Block {{ bytes: [{}] }}", bytes_string)?; + } else { + let bytes_string = &self + .bytes + .iter() + .take(BYTES_DISPLAY) + .map(|b| format!("{}", b)) + .collect::>(); + let bytes_string = bytes_string.join(", "); + let len = self.bytes.len(); + write!( + f, + "Block {{ bytes: [{}, ...] (total {} bytes) }}", + bytes_string, len + )?; + } + Ok(()) + } } impl Block { - pub fn new(bytes: Vec) -> Self { + pub fn new(bytes: Bytes) -> Self { Self { bytes } } #[cfg(test)] pub fn arb_size(rng: &mut Rng, size: usize) -> Self { - let bytes: Vec = (0..size).map(|_| rng.r#gen()).collect(); + let bytes: Bytes = (0..size).map(|_| rng.r#gen()).collect(); Self::new(bytes) } @@ -32,7 +68,8 @@ impl Block { } impl From> for Block { - fn from(bytes: Vec) -> Self { + fn from(value: Vec) -> Self { + let bytes = Bytes::from(value); Self::new(bytes) } } From e1a73054a9db08aa7dc03de1afbd5e7e2b140bef Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 14:20:54 -0600 Subject: [PATCH 019/100] Add assoc type for block response to allow for a variety of types --- .../services/block_aggregator_api/src/api.rs | 19 ++++++++----------- .../services/block_aggregator_api/src/db.rs | 5 +++-- .../services/block_aggregator_api/src/lib.rs | 19 ++++++++++--------- .../block_aggregator_api/src/tests.rs | 17 +++++++++++------ 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 021716244ce..4701f6dd73d 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,8 +1,4 @@ -use crate::{ - blocks::Block, - result::Result, -}; -use fuel_core_services::stream::BoxStream; +use crate::result::Result; use std::fmt; use tokio::sync::oneshot::{ Receiver, @@ -11,23 +7,24 @@ use tokio::sync::oneshot::{ }; pub trait BlockAggregatorApi: Send + Sync { + type BlockRangeResponse; fn await_query( &mut self, - ) -> impl Future> + Send; + ) -> impl Future>> + Send; } -pub enum BlockAggregatorQuery { +pub enum BlockAggregatorQuery { GetBlockRange { first: u64, last: u64, - response: Sender>, + response: Sender, }, GetCurrentHeight { response: Sender, }, } -impl fmt::Debug for BlockAggregatorQuery { +impl fmt::Debug for BlockAggregatorQuery { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { BlockAggregatorQuery::GetBlockRange { first, last, .. } => f @@ -42,8 +39,8 @@ impl fmt::Debug for BlockAggregatorQuery { } } -impl BlockAggregatorQuery { - pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver>) { +impl BlockAggregatorQuery { + pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver) { let (sender, receiver) = channel(); let query = Self::GetBlockRange { first, diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 0f18eff0df8..2a08bb9881d 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -2,9 +2,10 @@ use crate::{ blocks::Block, result::Result, }; -use fuel_core_services::stream::BoxStream; pub trait BlockAggregatorDB: Send + Sync { + type BlockRange; + fn store_block( &mut self, id: u64, @@ -14,7 +15,7 @@ pub trait BlockAggregatorDB: Send + Sync { &self, first: u64, last: u64, - ) -> impl Future>> + Send; + ) -> impl Future> + Send; fn get_current_height(&self) -> impl Future> + Send; } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 7c62afe4d75..f53edb1a364 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -13,7 +13,6 @@ use fuel_core_services::{ RunnableTask, StateWatcher, TaskNextAction, - stream::BoxStream, try_or_stop, }; use result::Result; @@ -34,11 +33,12 @@ pub struct BlockAggregator { block_source: Blocks, } -impl RunnableTask for BlockAggregator +impl RunnableTask for BlockAggregator where - Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, Blocks: BlockSource, + BlockRange: Send, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tracing::debug!("BlockAggregator running"); @@ -54,11 +54,12 @@ where } } -impl BlockAggregator +impl BlockAggregator where - Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, Blocks: BlockSource, + BlockRange: Send, { pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { Self { @@ -74,7 +75,7 @@ where pub async fn handle_query( &mut self, - res: Result, + res: Result>, ) -> TaskNextAction { tracing::debug!("Handling query: {res:?}"); let query = try_or_stop!(res, |e| { @@ -99,7 +100,7 @@ where &mut self, first: u64, last: u64, - response: tokio::sync::oneshot::Sender>, + response: tokio::sync::oneshot::Sender, ) -> TaskNextAction { let res = self.database.get_block_range(first, last).await; let block_stream = try_or_stop!(res, |e| { diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 5461ea42b63..c13084a7ea8 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -23,20 +23,23 @@ use tokio::sync::mpsc::{ Sender, }; -struct FakeApi { - receiver: Receiver, +type BlockRangeResponse = BoxStream; + +struct FakeApi { + receiver: Receiver>, } -impl FakeApi { - fn new() -> (Self, Sender) { +impl FakeApi { + fn new() -> (Self, Sender>) { let (sender, receiver) = tokio::sync::mpsc::channel(1); let api = Self { receiver }; (api, sender) } } -impl BlockAggregatorApi for FakeApi { - async fn await_query(&mut self) -> Result { +impl BlockAggregatorApi for FakeApi { + type BlockRangeResponse = T; + async fn await_query(&mut self) -> Result> { Ok(self.receiver.recv().await.unwrap()) } } @@ -61,6 +64,8 @@ impl FakeDB { } impl BlockAggregatorDB for FakeDB { + type BlockRange = BlockRangeResponse; + async fn store_block(&mut self, id: u64, block: Block) -> Result<()> { self.map.lock().unwrap().insert(id, block); Ok(()) From 51b726806bcf43339b8c4f484e16ca347c233c1c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 14:24:14 -0600 Subject: [PATCH 020/100] Lin toml --- crates/services/block_aggregator_api/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index db80521217f..e370eee3126 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -5,13 +5,13 @@ edition = "2024" [dependencies] anyhow = { workspace = true } +bytes = { workspace = true } fuel-core-services = { workspace = true } rand = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } -bytes = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } +futures = { workspace = true } tracing-subscriber = { workspace = true } -futures = { workspace = true } \ No newline at end of file From 389c46b08611b1e1250d499d209b083132e5798a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 14:27:41 -0600 Subject: [PATCH 021/100] Add example block range response type --- .../block_aggregator_api/src/block_range_response.rs | 10 ++++++++++ crates/services/block_aggregator_api/src/lib.rs | 2 ++ 2 files changed, 12 insertions(+) create mode 100644 crates/services/block_aggregator_api/src/block_range_response.rs diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs new file mode 100644 index 00000000000..c08541fb962 --- /dev/null +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -0,0 +1,10 @@ +use fuel_core_services::stream::BoxStream; +use crate::blocks::Block; + +/// The response to a block range query, either as a literal stream of blocks or as a remote URL +pub enum BlockRangeResponse { + /// A literal stream of blocks + Literal(BoxStream), + /// A remote URL where the blocks can be fetched + Remote(String), +} \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index f53edb1a364..eb956e15a7e 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -22,6 +22,8 @@ pub mod blocks; pub mod db; pub mod result; +pub mod block_range_response; + #[cfg(test)] mod tests; From a12da436591ce78f92df40ed5e6b08f1e63bcc7b Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 14:31:37 -0600 Subject: [PATCH 022/100] Add documentation --- crates/services/block_aggregator_api/src/api.rs | 4 ++++ .../block_aggregator_api/src/block_range_response.rs | 4 ++-- crates/services/block_aggregator_api/src/blocks.rs | 2 ++ crates/services/block_aggregator_api/src/db.rs | 8 ++++++++ crates/services/block_aggregator_api/src/lib.rs | 2 ++ 5 files changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 4701f6dd73d..f1f658964b5 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -6,8 +6,12 @@ use tokio::sync::oneshot::{ channel, }; +/// The API for querying the block aggregator service. pub trait BlockAggregatorApi: Send + Sync { + /// The type of the block range response. type BlockRangeResponse; + + /// Awaits the next query to the block aggregator service. fn await_query( &mut self, ) -> impl Future>> + Send; diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index c08541fb962..c8591727289 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,5 +1,5 @@ -use fuel_core_services::stream::BoxStream; use crate::blocks::Block; +use fuel_core_services::stream::BoxStream; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { @@ -7,4 +7,4 @@ pub enum BlockRangeResponse { Literal(BoxStream), /// A remote URL where the blocks can be fetched Remote(String), -} \ No newline at end of file +} diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 0b45b6d915e..d954620578a 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -5,7 +5,9 @@ use std::fmt::{ Formatter, }; +/// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { + /// Asynchronously fetch the next block and its height fn next_block(&mut self) -> impl Future> + Send; } diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 2a08bb9881d..f8318ae456f 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -3,19 +3,27 @@ use crate::{ result::Result, }; +/// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { + /// The type used to report a range of blocks type BlockRange; + /// Stores a block with the given ID fn store_block( &mut self, id: u64, block: Block, ) -> impl Future> + Send; + + /// Retrieves a range of blocks from the database fn get_block_range( &self, first: u64, last: u64, ) -> impl Future> + Send; + /// Retrieves the current height of the aggregated blocks If there is a break in the blocks, + /// i.e. the blocks are being aggregated out of order, return the height of the last + /// contiguous block fn get_current_height(&self) -> impl Future> + Send; } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index eb956e15a7e..91fdf956742 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -29,6 +29,8 @@ mod tests; // TODO: this doesn't need to limited to the blocks, // but we can change the name later +/// The Block Aggregator service, which aggregates blocks from a source and stores them in a database +/// Queries can be made to the service to retrieve data from the `DB` pub struct BlockAggregator { query: Api, database: DB, From 541b71bcbb47e1f7a5747e4758281efee951d779 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 14:46:32 -0600 Subject: [PATCH 023/100] Re-add debug impl for everyone --- .../block_aggregator_api/src/blocks.rs | 38 +------------------ 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index d954620578a..53e4c7a0863 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,9 +1,6 @@ use crate::result::Result; use bytes::Bytes; -use std::fmt::{ - Debug, - Formatter, -}; +use std::fmt::Debug; /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { @@ -11,42 +8,11 @@ pub trait BlockSource: Send + Sync { fn next_block(&mut self) -> impl Future> + Send; } -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct Block { bytes: Bytes, } -#[cfg(test)] -impl Debug for Block { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - const BYTES_DISPLAY: usize = 8; - if self.bytes.len() <= BYTES_DISPLAY { - let bytes = &self - .bytes - .iter() - .map(|b| format!("{}", b)) - .collect::>(); - let bytes_string = bytes.join(", "); - write!(f, "Block {{ bytes: [{}] }}", bytes_string)?; - } else { - let bytes_string = &self - .bytes - .iter() - .take(BYTES_DISPLAY) - .map(|b| format!("{}", b)) - .collect::>(); - let bytes_string = bytes_string.join(", "); - let len = self.bytes.len(); - write!( - f, - "Block {{ bytes: [{}, ...] (total {} bytes) }}", - bytes_string, len - )?; - } - Ok(()) - } -} - impl Block { pub fn new(bytes: Bytes) -> Self { Self { bytes } From 5236bee15e2c9fed75c09a053c6064ed2bd9b050 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 3 Sep 2025 16:53:08 -0600 Subject: [PATCH 024/100] WIP add bare adapter --- .../services/block_aggregator_api/src/db.rs | 2 ++ .../block_aggregator_api/src/db/storage_db.rs | 31 +++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 crates/services/block_aggregator_api/src/db/storage_db.rs diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index f8318ae456f..d79f8950fa0 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -3,6 +3,8 @@ use crate::{ result::Result, }; +pub mod storage_db; + /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { /// The type used to report a range of blocks diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs new file mode 100644 index 00000000000..7b39e9afce8 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -0,0 +1,31 @@ +use crate::{ + blocks::Block, + db::BlockAggregatorDB, +}; +use fuel_core_services::stream::BoxStream; + +pub struct StorageDB; + +impl BlockAggregatorDB for StorageDB { + fn store_block( + &mut self, + id: u64, + block: Block, + ) -> impl Future> + Send { + todo!() + } + + fn get_block_range( + &self, + first: u64, + last: u64, + ) -> impl Future>> + Send { + todo!() + } + + fn get_current_height( + &self, + ) -> impl Future> + Send { + todo!() + } +} From 146812e21f4b419664d051172118e7485f5ae1b8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 15:10:20 -0600 Subject: [PATCH 025/100] Fix compilation --- .../block_aggregator_api/src/db/storage_db.rs | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 7b39e9afce8..0f2cb876403 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,31 +1,24 @@ use crate::{ + block_range_response::BlockRangeResponse, blocks::Block, db::BlockAggregatorDB, + result::Result, }; -use fuel_core_services::stream::BoxStream; pub struct StorageDB; impl BlockAggregatorDB for StorageDB { - fn store_block( - &mut self, - id: u64, - block: Block, - ) -> impl Future> + Send { + type BlockRange = BlockRangeResponse; + + async fn store_block(&mut self, _id: u64, _block: Block) -> Result<()> { todo!() } - fn get_block_range( - &self, - first: u64, - last: u64, - ) -> impl Future>> + Send { + async fn get_block_range(&self, _first: u64, _last: u64) -> Result { todo!() } - fn get_current_height( - &self, - ) -> impl Future> + Send { + async fn get_current_height(&self) -> Result { todo!() } } From 995c6e10d84575e2c19bc200ec7d0f7fa7b67e50 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 16:01:17 -0600 Subject: [PATCH 026/100] Add failing test --- Cargo.lock | 15 +- .../services/block_aggregator_api/Cargo.toml | 6 + .../block_aggregator_api/src/db/storage_db.rs | 23 ++- .../src/db/storage_db/table.rs | 131 ++++++++++++++++++ .../src/db/storage_db/tests.rs | 13 ++ 5 files changed, 182 insertions(+), 6 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/db/storage_db/table.rs create mode 100644 crates/services/block_aggregator_api/src/db/storage_db/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 20d75ac4c98..442c43f33e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1399,9 +1399,15 @@ version = "0.1.0" dependencies = [ "anyhow", "bytes", + "enum-iterator", "fuel-core-services", + "fuel-core-storage", + "fuel-core-types 0.46.0", "futures", + "num_enum", "rand 0.8.5", + "strum 0.25.0", + "strum_macros 0.25.3", "tokio", "tracing", "tracing-subscriber", @@ -6957,18 +6963,19 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate", "proc-macro2", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index e370eee3126..a8f123b3480 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -6,8 +6,14 @@ edition = "2024" [dependencies] anyhow = { workspace = true } bytes = { workspace = true } +enum-iterator = { workspace = true } fuel-core-services = { workspace = true } +fuel-core-storage = { workspace = true, features = ["std"] } +fuel-core-types = { workspace = true, features = ["std"] } +num_enum = { workspace = true } rand = { workspace = true } +strum = { workspace = true } +strum_macros = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 0f2cb876403..1d7c2c969a1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -4,10 +4,29 @@ use crate::{ db::BlockAggregatorDB, result::Result, }; +use fuel_core_storage::{ + Error as StorageError, + StorageMutate, + transactional::{ + Modifiable, + StorageTransaction, + }, +}; +use table::Blocks; + +pub mod table; +#[cfg(test)] +mod tests; -pub struct StorageDB; +pub struct StorageDB { + _inner: S, +} -impl BlockAggregatorDB for StorageDB { +impl BlockAggregatorDB for StorageDB +where + S: Send + Sync + Modifiable, + for<'a> StorageTransaction<&'a mut S>: StorageMutate, +{ type BlockRange = BlockRangeResponse; async fn store_block(&mut self, _id: u64, _block: Block) -> Result<()> { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs new file mode 100644 index 00000000000..5e841c4263d --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -0,0 +1,131 @@ +use crate::blocks::Block; +use fuel_core_storage::{ + Mappable, + blueprint::plain::Plain, + codec::{ + postcard::Postcard, + primitive::Primitive, + }, + kv_store::StorageColumn, + structured_storage::TableWithBlueprint, +}; +use fuel_core_types::fuel_types::BlockHeight; + +#[repr(u32)] +#[derive( + Copy, + Clone, + Debug, + strum_macros::EnumCount, + strum_macros::IntoStaticStr, + PartialEq, + Eq, + enum_iterator::Sequence, + Hash, + num_enum::TryFromPrimitive, +)] +pub enum Column { + Blocks = 0, + // Metadata = 0, + // State = 1, + // UnrecordedBlocks = 2, + // LatestRecordedHeight = 3, +} + +impl Column { + /// The total count of variants in the enum. + pub const COUNT: usize = ::COUNT; + + /// Returns the `usize` representation of the `Column`. + pub fn as_u32(&self) -> u32 { + *self as u32 + } +} + +impl StorageColumn for Column { + fn name(&self) -> String { + let str: &str = self.into(); + str.to_string() + } + + fn id(&self) -> u32 { + self.as_u32() + } +} +// /// The storage table for metadata of the gas price algorithm updater +// pub struct GasPriceMetadata; +// +// impl Mappable for GasPriceMetadata { +// type Key = Self::OwnedKey; +// type OwnedKey = BlockHeight; +// type Value = Self::OwnedValue; +// type OwnedValue = UpdaterMetadata; +// } +// +// impl TableWithBlueprint for GasPriceMetadata { +// type Blueprint = Plain, Postcard>; +// type Column = GasPriceColumn; +// +// fn column() -> Self::Column { +// GasPriceColumn::State +// } +// } +// +// /// The storage for all the unrecorded blocks from gas price algorithm, used for guessing the cost +// /// for future blocks to be recorded on the DA chain +// pub struct UnrecordedBlocksTable; +// +// type BlockSizeInBytes = u64; +// +// impl Mappable for UnrecordedBlocksTable { +// type Key = Self::OwnedKey; +// type OwnedKey = BlockHeight; +// type Value = Self::OwnedValue; +// type OwnedValue = BlockSizeInBytes; +// } +// +// impl TableWithBlueprint for UnrecordedBlocksTable { +// type Blueprint = Plain, Postcard>; +// type Column = GasPriceColumn; +// +// fn column() -> Self::Column { +// GasPriceColumn::UnrecordedBlocks +// } +// } +// +// /// Used to store the latest L2 block that has been recorded on the DA chain +// pub struct RecordedHeights; +// +// impl Mappable for RecordedHeights { +// type Key = Self::OwnedKey; +// type OwnedKey = (); +// type Value = Self::OwnedValue; +// type OwnedValue = BlockHeight; +// } +// +// impl TableWithBlueprint for RecordedHeights { +// type Blueprint = Plain>; +// type Column = GasPriceColumn; +// +// fn column() -> Self::Column { +// GasPriceColumn::LatestRecordedHeight +// } +// } + +pub struct Blocks; + +impl Mappable for Blocks { + type Key = Self::OwnedKey; + type OwnedKey = BlockHeight; + type Value = Self::OwnedValue; + type OwnedValue = Block; +} + +impl TableWithBlueprint for Blocks { + type Blueprint = Plain, Postcard>; + type Column = Column; + + fn column() -> Self::Column { + Column::Blocks + } +} diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs new file mode 100644 index 00000000000..943b3c14894 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -0,0 +1,13 @@ +#![allow(non_snake_case)] + +use super::*; + +#[test] +fn store_block__adds_to_storage() { + // given + + // when + + // then + todo!() +} From cedacfce484469d0eabb3a7c640df460c6777761 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 16:03:41 -0600 Subject: [PATCH 027/100] Remove commented code --- .../src/db/storage_db/table.rs | 59 ------------------- 1 file changed, 59 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index 5e841c4263d..02d932fe289 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -52,65 +52,6 @@ impl StorageColumn for Column { self.as_u32() } } -// /// The storage table for metadata of the gas price algorithm updater -// pub struct GasPriceMetadata; -// -// impl Mappable for GasPriceMetadata { -// type Key = Self::OwnedKey; -// type OwnedKey = BlockHeight; -// type Value = Self::OwnedValue; -// type OwnedValue = UpdaterMetadata; -// } -// -// impl TableWithBlueprint for GasPriceMetadata { -// type Blueprint = Plain, Postcard>; -// type Column = GasPriceColumn; -// -// fn column() -> Self::Column { -// GasPriceColumn::State -// } -// } -// -// /// The storage for all the unrecorded blocks from gas price algorithm, used for guessing the cost -// /// for future blocks to be recorded on the DA chain -// pub struct UnrecordedBlocksTable; -// -// type BlockSizeInBytes = u64; -// -// impl Mappable for UnrecordedBlocksTable { -// type Key = Self::OwnedKey; -// type OwnedKey = BlockHeight; -// type Value = Self::OwnedValue; -// type OwnedValue = BlockSizeInBytes; -// } -// -// impl TableWithBlueprint for UnrecordedBlocksTable { -// type Blueprint = Plain, Postcard>; -// type Column = GasPriceColumn; -// -// fn column() -> Self::Column { -// GasPriceColumn::UnrecordedBlocks -// } -// } -// -// /// Used to store the latest L2 block that has been recorded on the DA chain -// pub struct RecordedHeights; -// -// impl Mappable for RecordedHeights { -// type Key = Self::OwnedKey; -// type OwnedKey = (); -// type Value = Self::OwnedValue; -// type OwnedValue = BlockHeight; -// } -// -// impl TableWithBlueprint for RecordedHeights { -// type Blueprint = Plain>; -// type Column = GasPriceColumn; -// -// fn column() -> Self::Column { -// GasPriceColumn::LatestRecordedHeight -// } -// } pub struct Blocks; From 930b46589098f5f546bb496e480971454626c55e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 16:19:56 -0600 Subject: [PATCH 028/100] WIP --- Cargo.lock | 1 + .../services/block_aggregator_api/Cargo.toml | 4 ++- .../block_aggregator_api/src/blocks.rs | 2 +- .../block_aggregator_api/src/db/storage_db.rs | 8 ++++- .../src/db/storage_db/tests.rs | 30 +++++++++++++++++-- 5 files changed, 39 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 442c43f33e2..cd347b9f9c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1406,6 +1406,7 @@ dependencies = [ "futures", "num_enum", "rand 0.8.5", + "serde", "strum 0.25.0", "strum_macros 0.25.3", "tokio", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index a8f123b3480..adf69dcdef8 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -5,13 +5,14 @@ edition = "2024" [dependencies] anyhow = { workspace = true } -bytes = { workspace = true } +bytes = { workspace = true, features = ["serde"]} enum-iterator = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } num_enum = { workspace = true } rand = { workspace = true } +serde = { workspace = true, features = ["derive"] } strum = { workspace = true } strum_macros = { workspace = true } tokio = { workspace = true } @@ -20,4 +21,5 @@ tracing = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } futures = { workspace = true } +fuel-core-storage = { workspace = true, features = ["test-helpers"] } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 53e4c7a0863..e3a79ebce3d 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -8,7 +8,7 @@ pub trait BlockSource: Send + Sync { fn next_block(&mut self) -> impl Future> + Send; } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct Block { bytes: Bytes, } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 1d7c2c969a1..c7f892894f4 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -19,7 +19,13 @@ pub mod table; mod tests; pub struct StorageDB { - _inner: S, + inner: S, +} + +impl StorageDB { + pub fn new(storage: S) -> Self { + Self { inner: storage } + } } impl BlockAggregatorDB for StorageDB diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 943b3c14894..04b788db512 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -1,13 +1,37 @@ #![allow(non_snake_case)] use super::*; +use crate::db::storage_db::table::Column; +use fuel_core_storage::{ + StorageAsRef, + StorageMutate, + structured_storage::test::InMemoryStorage, + transactional::IntoTransaction, +}; +use fuel_core_types::{ + ed25519::signature::rand_core::SeedableRng, + fuel_merkle::storage::StorageInspectInfallible, + fuel_types::BlockHeight, +}; +use rand::rngs::StdRng; -#[test] -fn store_block__adds_to_storage() { +fn database() -> StorageTransaction> { + InMemoryStorage::default().into_transaction() +} + +#[tokio::test] +async fn store_block__adds_to_storage() { + let mut rng = StdRng::seed_from_u64(666); // given + let db = database(); + let mut adapter = StorageDB::new(db.clone()); + let height = BlockHeight::from(1u32); + let block = Block::random(&mut rng); // when + adapter.store_block(height, block.clone()).await.unwrap(); // then - todo!() + let block = db.storage::().get(&height).unwrap().to_owned(); + assert_eq!(block, Some(block)); } From 71b5d125c36273cb9358bed20e40a408bf0635f8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 16:29:07 -0600 Subject: [PATCH 029/100] Use `BlockHeight` instead of `u64` --- Cargo.lock | 1 + .../services/block_aggregator_api/Cargo.toml | 1 + .../services/block_aggregator_api/src/api.rs | 16 ++++--- .../block_aggregator_api/src/blocks.rs | 4 +- .../services/block_aggregator_api/src/db.rs | 9 ++-- .../services/block_aggregator_api/src/lib.rs | 12 ++++-- .../block_aggregator_api/src/tests.rs | 42 +++++++++++-------- 7 files changed, 53 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20d75ac4c98..61643c04677 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1400,6 +1400,7 @@ dependencies = [ "anyhow", "bytes", "fuel-core-services", + "fuel-core-types 0.46.0", "futures", "rand 0.8.5", "tokio", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index e370eee3126..a5580c29dac 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -7,6 +7,7 @@ edition = "2024" anyhow = { workspace = true } bytes = { workspace = true } fuel-core-services = { workspace = true } +fuel-core-types = { workspace = true, features = ["std"] } rand = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index f1f658964b5..919221fcc80 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,4 +1,5 @@ use crate::result::Result; +use fuel_core_types::fuel_types::BlockHeight; use std::fmt; use tokio::sync::oneshot::{ Receiver, @@ -19,12 +20,12 @@ pub trait BlockAggregatorApi: Send + Sync { pub enum BlockAggregatorQuery { GetBlockRange { - first: u64, - last: u64, + first: BlockHeight, + last: BlockHeight, response: Sender, }, GetCurrentHeight { - response: Sender, + response: Sender, }, } @@ -44,8 +45,13 @@ impl fmt::Debug for BlockAggregatorQuery { } impl BlockAggregatorQuery { - pub fn get_block_range(first: u64, last: u64) -> (Self, Receiver) { + pub fn get_block_range>( + first: H, + last: H, + ) -> (Self, Receiver) { let (sender, receiver) = channel(); + let first: BlockHeight = first.into(); + let last: BlockHeight = last.into(); let query = Self::GetBlockRange { first, last, @@ -54,7 +60,7 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn get_current_height() -> (Self, Receiver) { + pub fn get_current_height() -> (Self, Receiver) { let (sender, receiver) = channel(); let query = Self::GetCurrentHeight { response: sender }; (query, receiver) diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 53e4c7a0863..c937f92ff26 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,11 +1,13 @@ use crate::result::Result; use bytes::Bytes; +use fuel_core_types::fuel_types::BlockHeight; use std::fmt::Debug; /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { /// Asynchronously fetch the next block and its height - fn next_block(&mut self) -> impl Future> + Send; + fn next_block(&mut self) + -> impl Future> + Send; } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index f8318ae456f..c340d1432b2 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -2,6 +2,7 @@ use crate::{ blocks::Block, result::Result, }; +use fuel_core_types::fuel_types::BlockHeight; /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { @@ -11,19 +12,19 @@ pub trait BlockAggregatorDB: Send + Sync { /// Stores a block with the given ID fn store_block( &mut self, - id: u64, + height: BlockHeight, block: Block, ) -> impl Future> + Send; /// Retrieves a range of blocks from the database fn get_block_range( &self, - first: u64, - last: u64, + first: BlockHeight, + last: BlockHeight, ) -> impl Future> + Send; /// Retrieves the current height of the aggregated blocks If there is a break in the blocks, /// i.e. the blocks are being aggregated out of order, return the height of the last /// contiguous block - fn get_current_height(&self) -> impl Future> + Send; + fn get_current_height(&self) -> impl Future> + Send; } diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 91fdf956742..f70092ad842 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -15,6 +15,7 @@ use fuel_core_services::{ TaskNextAction, try_or_stop, }; +use fuel_core_types::fuel_types::BlockHeight; use result::Result; pub mod api; @@ -102,8 +103,8 @@ where async fn handle_get_block_range_query( &mut self, - first: u64, - last: u64, + first: BlockHeight, + last: BlockHeight, response: tokio::sync::oneshot::Sender, ) -> TaskNextAction { let res = self.database.get_block_range(first, last).await; @@ -119,7 +120,7 @@ where async fn handle_get_current_height_query( &mut self, - response: tokio::sync::oneshot::Sender, + response: tokio::sync::oneshot::Sender, ) -> TaskNextAction { let res = self.database.get_current_height().await; let height = try_or_stop!(res, |e| { @@ -132,7 +133,10 @@ where TaskNextAction::Continue } - pub async fn handle_block(&mut self, res: Result<(u64, Block)>) -> TaskNextAction { + pub async fn handle_block( + &mut self, + res: Result<(BlockHeight, Block)>, + ) -> TaskNextAction { tracing::debug!("Handling block: {res:?}"); let (id, block) = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index c13084a7ea8..8aa2f122e99 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -45,7 +45,7 @@ impl BlockAggregatorApi for FakeApi { } struct FakeDB { - map: Arc>>, + map: Arc>>, } impl FakeDB { @@ -54,11 +54,11 @@ impl FakeDB { Self { map } } - fn add_block(&mut self, id: u64, block: Block) { - self.map.lock().unwrap().insert(id, block); + fn add_block(&mut self, height: BlockHeight, block: Block) { + self.map.lock().unwrap().insert(height, block); } - fn clone_inner(&self) -> Arc>> { + fn clone_inner(&self) -> Arc>> { self.map.clone() } } @@ -66,13 +66,19 @@ impl FakeDB { impl BlockAggregatorDB for FakeDB { type BlockRange = BlockRangeResponse; - async fn store_block(&mut self, id: u64, block: Block) -> Result<()> { + async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { self.map.lock().unwrap().insert(id, block); Ok(()) } - async fn get_block_range(&self, first: u64, last: u64) -> Result> { + async fn get_block_range( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> Result> { let mut blocks = vec![]; + let first: u32 = first.into(); + let last: u32 = last.into(); for id in first..=last { if let Some(block) = self .map @@ -86,19 +92,19 @@ impl BlockAggregatorDB for FakeDB { Ok(Box::pin(futures::stream::iter(blocks))) } - async fn get_current_height(&self) -> Result { + async fn get_current_height(&self) -> Result { let map = self.map.lock().unwrap(); - let max_height = map.keys().max().cloned().unwrap_or(0); + let max_height = map.keys().max().cloned().unwrap_or(BlockHeight::from(0u32)); Ok(max_height) } } struct FakeBlockSource { - blocks: Receiver<(u64, Block)>, + blocks: Receiver<(BlockHeight, Block)>, } impl FakeBlockSource { - fn new() -> (Self, Sender<(u64, Block)>) { + fn new() -> (Self, Sender<(BlockHeight, Block)>) { let (_sender, receiver) = tokio::sync::mpsc::channel(1); let _self = Self { blocks: receiver }; (_self, _sender) @@ -106,7 +112,7 @@ impl FakeBlockSource { } impl BlockSource for FakeBlockSource { - async fn next_block(&mut self) -> Result<(u64, Block)> { + async fn next_block(&mut self) -> Result<(BlockHeight, Block)> { self.blocks.recv().await.ok_or(Error::BlockSourceError) } } @@ -117,9 +123,9 @@ async fn run__get_block_range__returns_expected_blocks() { // given let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); - db.add_block(1, Block::random(&mut rng)); - db.add_block(2, Block::random(&mut rng)); - db.add_block(3, Block::random(&mut rng)); + db.add_block(1.into(), Block::random(&mut rng)); + db.add_block(2.into(), Block::random(&mut rng)); + db.add_block(3.into(), Block::random(&mut rng)); let (source, _block_sender) = FakeBlockSource::new(); @@ -153,7 +159,7 @@ async fn run__new_block_gets_added_to_db() { let mut srv = BlockAggregator::new(api, db, source); let block = Block::random(&mut rng); - let id = 123u64; + let id = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); // when @@ -172,9 +178,9 @@ async fn run__get_current_height__returns_expected_height() { // given let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); - let expected_height = 3; - db.add_block(1, Block::random(&mut rng)); - db.add_block(2, Block::random(&mut rng)); + let expected_height = BlockHeight::from(3u32); + db.add_block(1.into(), Block::random(&mut rng)); + db.add_block(2.into(), Block::random(&mut rng)); db.add_block(expected_height, Block::random(&mut rng)); let (source, _block_sender) = FakeBlockSource::new(); From 325de737df6055fdfadb8283182b1652ef262af1 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 4 Sep 2025 16:39:30 -0600 Subject: [PATCH 030/100] Add failing test --- .../block_aggregator_api/src/db/storage_db.rs | 15 ++++++++++----- .../src/db/storage_db/tests.rs | 15 +++++++++------ 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index c7f892894f4..cd3f2823b63 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -12,6 +12,7 @@ use fuel_core_storage::{ StorageTransaction, }, }; +use fuel_core_types::fuel_types::BlockHeight; use table::Blocks; pub mod table; @@ -19,12 +20,12 @@ pub mod table; mod tests; pub struct StorageDB { - inner: S, + _inner: S, } impl StorageDB { pub fn new(storage: S) -> Self { - Self { inner: storage } + Self { _inner: storage } } } @@ -35,15 +36,19 @@ where { type BlockRange = BlockRangeResponse; - async fn store_block(&mut self, _id: u64, _block: Block) -> Result<()> { + async fn store_block(&mut self, _height: BlockHeight, _block: Block) -> Result<()> { todo!() } - async fn get_block_range(&self, _first: u64, _last: u64) -> Result { + async fn get_block_range( + &self, + _first: BlockHeight, + _last: BlockHeight, + ) -> Result { todo!() } - async fn get_current_height(&self) -> Result { + async fn get_current_height(&self) -> Result { todo!() } } diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 04b788db512..3527c47b134 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -4,13 +4,11 @@ use super::*; use crate::db::storage_db::table::Column; use fuel_core_storage::{ StorageAsRef, - StorageMutate, structured_storage::test::InMemoryStorage, transactional::IntoTransaction, }; use fuel_core_types::{ ed25519::signature::rand_core::SeedableRng, - fuel_merkle::storage::StorageInspectInfallible, fuel_types::BlockHeight, }; use rand::rngs::StdRng; @@ -26,12 +24,17 @@ async fn store_block__adds_to_storage() { let db = database(); let mut adapter = StorageDB::new(db.clone()); let height = BlockHeight::from(1u32); - let block = Block::random(&mut rng); + let expected = Block::random(&mut rng); // when - adapter.store_block(height, block.clone()).await.unwrap(); + adapter.store_block(height, expected.clone()).await.unwrap(); // then - let block = db.storage::().get(&height).unwrap().to_owned(); - assert_eq!(block, Some(block)); + let actual = db + .storage::() + .get(&height) + .unwrap() + .unwrap() + .into_owned(); + assert_eq!(actual, expected); } From ced1c10ec953057df1ac452181e0d3b92f699cef Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 5 Sep 2025 16:11:51 -0600 Subject: [PATCH 031/100] Wip add get_block_range test --- .../src/block_range_response.rs | 11 +++- .../block_aggregator_api/src/db/storage_db.rs | 57 +++++++++++++++---- .../src/db/storage_db/table.rs | 10 ++-- .../src/db/storage_db/tests.rs | 45 ++++++++++++++- .../block_aggregator_api/src/result.rs | 6 +- .../block_aggregator_api/src/tests.rs | 2 +- .../metadata_tests.rs | 2 +- 7 files changed, 107 insertions(+), 26 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index c8591727289..03cb50c6e34 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,10 +1,15 @@ -use crate::blocks::Block; -use fuel_core_services::stream::BoxStream; +use crate::{ + blocks::Block, + result::Result, +}; +use fuel_core_services::stream::Stream; + +pub type BoxStream = core::pin::Pin + Send>>; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { /// A literal stream of blocks - Literal(BoxStream), + Literal(BoxStream>), /// A remote URL where the blocks can be fetched Remote(String), } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index cd3f2823b63..bbd8e6d34eb 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,15 +1,36 @@ use crate::{ block_range_response::BlockRangeResponse, blocks::Block, - db::BlockAggregatorDB, - result::Result, + db::{ + BlockAggregatorDB, + storage_db::table::Column, + }, + result::{ + Error, + Result, + }, }; +use anyhow::anyhow; use fuel_core_storage::{ Error as StorageError, + Mappable, + StorageAsMut, + StorageAsRef, + StorageInspect, StorageMutate, + iter::{ + IntoBoxedIter, + IterDirection, + IterableStore, + IterableTable, + IteratorOverTable, + }, + kv_store::KeyValueInspect, transactional::{ Modifiable, + ReadTransaction, StorageTransaction, + WriteTransaction, }, }; use fuel_core_types::fuel_types::BlockHeight; @@ -20,32 +41,48 @@ pub mod table; mod tests; pub struct StorageDB { - _inner: S, + inner: S, } impl StorageDB { pub fn new(storage: S) -> Self { - Self { _inner: storage } + Self { inner: storage } } } impl BlockAggregatorDB for StorageDB where - S: Send + Sync + Modifiable, + S: Send + Sync + Modifiable + Clone + 'static, + S: IterableTable, for<'a> StorageTransaction<&'a mut S>: StorageMutate, { type BlockRange = BlockRangeResponse; - async fn store_block(&mut self, _height: BlockHeight, _block: Block) -> Result<()> { - todo!() + async fn store_block(&mut self, height: BlockHeight, block: Block) -> Result<()> { + let mut tx = self.inner.write_transaction(); + tx.storage_as_mut::() + .insert(&height, &block) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + Ok(()) } async fn get_block_range( &self, - _first: BlockHeight, - _last: BlockHeight, + first: BlockHeight, + last: BlockHeight, ) -> Result { - todo!() + let iter = self + .inner + .iter_all_by_start::(Some(&first), Some(IterDirection::Forward)) + .take_while(move |res| match res { + Ok((height, _)) => *height <= last, + _ => true, + }) + .map(|res| res.map(|(_, block)| block)) + .map(|res| res.map_err(|e| Error::DB(anyhow!(e)))); + let stream = futures::stream::iter(iter); + Ok(BlockRangeResponse::Literal(Box::pin(stream))) } async fn get_current_height(&self) -> Result { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index 02d932fe289..d25685fffd8 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -2,10 +2,7 @@ use crate::blocks::Block; use fuel_core_storage::{ Mappable, blueprint::plain::Plain, - codec::{ - postcard::Postcard, - primitive::Primitive, - }, + codec::postcard::Postcard, kv_store::StorageColumn, structured_storage::TableWithBlueprint, }; @@ -25,7 +22,8 @@ use fuel_core_types::fuel_types::BlockHeight; num_enum::TryFromPrimitive, )] pub enum Column { - Blocks = 0, + Metadata = 0, + Blocks = 1, // Metadata = 0, // State = 1, // UnrecordedBlocks = 2, @@ -63,7 +61,7 @@ impl Mappable for Blocks { } impl TableWithBlueprint for Blocks { - type Blueprint = Plain, Postcard>; + type Blueprint = Plain; type Column = Column; fn column() -> Self::Column { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 3527c47b134..b5428379109 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -4,6 +4,7 @@ use super::*; use crate::db::storage_db::table::Column; use fuel_core_storage::{ StorageAsRef, + iter::IterableTable, structured_storage::test::InMemoryStorage, transactional::IntoTransaction, }; @@ -11,6 +12,7 @@ use fuel_core_types::{ ed25519::signature::rand_core::SeedableRng, fuel_types::BlockHeight, }; +use futures::StreamExt; use rand::rngs::StdRng; fn database() -> StorageTransaction> { @@ -22,7 +24,7 @@ async fn store_block__adds_to_storage() { let mut rng = StdRng::seed_from_u64(666); // given let db = database(); - let mut adapter = StorageDB::new(db.clone()); + let mut adapter = StorageDB::new(db); let height = BlockHeight::from(1u32); let expected = Block::random(&mut rng); @@ -30,11 +32,48 @@ async fn store_block__adds_to_storage() { adapter.store_block(height, expected.clone()).await.unwrap(); // then - let actual = db - .storage::() + let actual = adapter + .inner + .storage_as_ref::() .get(&height) .unwrap() .unwrap() .into_owned(); assert_eq!(actual, expected); } + +#[tokio::test] +async fn get_block__can_get_expected_range() { + let mut rng = StdRng::seed_from_u64(666); + // given + let db = database(); + let mut adapter = StorageDB::new(db); + let height_1 = BlockHeight::from(1u32); + let height_2 = BlockHeight::from(2u32); + let height_3 = BlockHeight::from(3u32); + let expected_1 = Block::random(&mut rng); + let expected_2 = Block::random(&mut rng); + let expected_3 = Block::random(&mut rng); + + let mut tx = adapter.inner.write_transaction(); + tx.storage_as_mut::() + .insert(&height_1, &expected_1) + .unwrap(); + tx.storage_as_mut::() + .insert(&height_2, &expected_2) + .unwrap(); + tx.storage_as_mut::() + .insert(&height_3, &expected_3) + .unwrap(); + + // when + let BlockRangeResponse::Literal(stream) = + adapter.get_block_range(height_2, height_3).await.unwrap() + else { + panic!("expected literal response") + }; + let actual = stream.collect::>().await; + + // then + assert_eq!(actual, vec![expected_2, expected_3]); +} diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index 86b24e3a39c..8a71a151876 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -1,6 +1,8 @@ #[derive(Debug)] pub enum Error { - ApiError, - BlockSourceError, + Api, + BlockSource, + DB(anyhow::Error), } + pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 8aa2f122e99..1595de0190d 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -113,7 +113,7 @@ impl FakeBlockSource { impl BlockSource for FakeBlockSource { async fn next_block(&mut self) -> Result<(BlockHeight, Block)> { - self.blocks.recv().await.ok_or(Error::BlockSourceError) + self.blocks.recv().await.ok_or(Error::BlockSource) } } diff --git a/crates/services/gas_price_service/src/common/fuel_core_storage_adapter/metadata_tests.rs b/crates/services/gas_price_service/src/common/fuel_core_storage_adapter/metadata_tests.rs index 86a3c458f5e..ecacb909c69 100644 --- a/crates/services/gas_price_service/src/common/fuel_core_storage_adapter/metadata_tests.rs +++ b/crates/services/gas_price_service/src/common/fuel_core_storage_adapter/metadata_tests.rs @@ -74,9 +74,9 @@ async fn set_metadata__can_set_metadata() { let actual = database.get_metadata(&block_height).unwrap(); assert_eq!(None, actual); database.set_metadata(&metadata).unwrap(); - let actual = database.get_metadata(&block_height).unwrap(); // then + let actual = database.get_metadata(&block_height).unwrap(); let expected = Some(metadata); assert_eq!(expected, actual); } From fce4c3c93680a338aae205fab2a172e43b408bf9 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 5 Sep 2025 16:46:20 -0600 Subject: [PATCH 032/100] Add a custom stream that gets the job done. Maybe not performant --- .../src/block_range_response.rs | 9 +- .../block_aggregator_api/src/db/storage_db.rs | 91 ++++++++++++++++--- .../src/db/storage_db/tests.rs | 4 + 3 files changed, 87 insertions(+), 17 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 03cb50c6e34..785bfc57ff1 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -2,14 +2,15 @@ use crate::{ blocks::Block, result::Result, }; -use fuel_core_services::stream::Stream; - -pub type BoxStream = core::pin::Pin + Send>>; +use fuel_core_services::stream::{ + BoxStream, + Stream, +}; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { /// A literal stream of blocks - Literal(BoxStream>), + Literal(BoxStream), /// A remote URL where the blocks can be fetched Remote(String), } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index bbd8e6d34eb..577077292f5 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -11,6 +11,7 @@ use crate::{ }, }; use anyhow::anyhow; +use fuel_core_services::stream::Stream; use fuel_core_storage::{ Error as StorageError, Mappable, @@ -34,6 +35,13 @@ use fuel_core_storage::{ }, }; use fuel_core_types::fuel_types::BlockHeight; +use std::{ + pin::Pin, + task::{ + Context, + Poll, + }, +}; use table::Blocks; pub mod table; @@ -52,9 +60,9 @@ impl StorageDB { impl BlockAggregatorDB for StorageDB where - S: Send + Sync + Modifiable + Clone + 'static, - S: IterableTable, + S: Send + Sync + Modifiable + Clone + Unpin + ReadTransaction + 'static, for<'a> StorageTransaction<&'a mut S>: StorageMutate, + for<'a> StorageTransaction<&'a S>: StorageInspect, { type BlockRange = BlockRangeResponse; @@ -71,17 +79,8 @@ where &self, first: BlockHeight, last: BlockHeight, - ) -> Result { - let iter = self - .inner - .iter_all_by_start::(Some(&first), Some(IterDirection::Forward)) - .take_while(move |res| match res { - Ok((height, _)) => *height <= last, - _ => true, - }) - .map(|res| res.map(|(_, block)| block)) - .map(|res| res.map_err(|e| Error::DB(anyhow!(e)))); - let stream = futures::stream::iter(iter); + ) -> Result { + let stream = StorageStream::new(self.inner.clone(), first, last); Ok(BlockRangeResponse::Literal(Box::pin(stream))) } @@ -89,3 +88,69 @@ where todo!() } } + +pub struct StorageStream { + inner: S, + next: Option, + last: BlockHeight, +} + +impl StorageStream { + pub fn new(inner: S, first: BlockHeight, last: BlockHeight) -> Self { + Self { + inner, + next: Some(first), + last, + } + } +} + +impl Stream for StorageStream +where + S: Unpin + ReadTransaction, + for<'a> StorageTransaction<&'a S>: StorageInspect, +{ + type Item = Block; + + fn poll_next( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + tracing::debug!( + "Polling next block from storage stream, next height: {:?}", + self.next + ); + let this = self.get_mut(); + if let Some(height) = this.next { + let mut tx = this.inner.read_transaction(); + let next_block = tx + .storage_as_ref::() + .get(&height) + .map_err(|e| Error::DB(anyhow!(e))); + match next_block { + Ok(Some(block)) => { + tracing::debug!("Found block at height: {:?}", height); + let next = if height < this.last { + Some(BlockHeight::new(*height + 1)) + } else { + None + }; + this.next = next; + Poll::Ready(Some(block.into_owned())) + } + Ok(None) => { + tracing::debug!("No block at height: {:?}", height); + this.next = None; + Poll::Ready(None) + } + Err(e) => { + tracing::debug!("Error while reading next block: {:?}", e); + this.next = None; + Poll::Ready(Some(Err(e).unwrap())) + } + } + } else { + Poll::Ready(None) + } + } +} diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index b5428379109..f05efd829d1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -44,6 +44,9 @@ async fn store_block__adds_to_storage() { #[tokio::test] async fn get_block__can_get_expected_range() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .try_init(); let mut rng = StdRng::seed_from_u64(666); // given let db = database(); @@ -65,6 +68,7 @@ async fn get_block__can_get_expected_range() { tx.storage_as_mut::() .insert(&height_3, &expected_3) .unwrap(); + tx.commit().unwrap(); // when let BlockRangeResponse::Literal(stream) = From eb280c79c02e35c7f1d5ca1dd350aae1c5f1a00d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 16:16:57 -0600 Subject: [PATCH 033/100] Use view for stream of db data --- .../src/block_range_response.rs | 7 ++---- .../block_aggregator_api/src/db/storage_db.rs | 23 ++++++++++++----- .../src/db/storage_db/tests.rs | 16 +++++++++--- crates/storage/src/structured_storage.rs | 25 ++++++++++++++++--- 4 files changed, 53 insertions(+), 18 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 785bfc57ff1..8097b4ee287 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,10 +1,7 @@ -use crate::{ - blocks::Block, - result::Result, -}; +use crate::blocks::Block; use fuel_core_services::stream::{ BoxStream, - Stream, + RefBoxStream, }; /// The response to a block range query, either as a literal stream of blocks or as a remote URL diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 577077292f5..221e1e5a843 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -28,6 +28,7 @@ use fuel_core_storage::{ }, kv_store::KeyValueInspect, transactional::{ + AtomicView, Modifiable, ReadTransaction, StorageTransaction, @@ -36,6 +37,7 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ + marker::PhantomData, pin::Pin, task::{ Context, @@ -58,11 +60,16 @@ impl StorageDB { } } -impl BlockAggregatorDB for StorageDB +impl BlockAggregatorDB for StorageDB where - S: Send + Sync + Modifiable + Clone + Unpin + ReadTransaction + 'static, - for<'a> StorageTransaction<&'a mut S>: StorageMutate, - for<'a> StorageTransaction<&'a S>: StorageInspect, + // S: Send + Sync + Modifiable + Clone + Unpin + ReadTransaction + 'static, + S: Modifiable + std::fmt::Debug, + S: KeyValueInspect, + for<'b> StorageTransaction<&'b mut S>: StorageMutate, + // for<'b> StorageTransaction<&'b S>: StorageInspect, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, + StorageTransaction: AtomicView + StorageInspect, { type BlockRange = BlockRangeResponse; @@ -80,7 +87,11 @@ where first: BlockHeight, last: BlockHeight, ) -> Result { - let stream = StorageStream::new(self.inner.clone(), first, last); + let latest_veiw = self + .inner + .latest_view() + .map_err(|e| Error::DB(anyhow!(e)))?; + let stream = StorageStream::new(latest_veiw, first, last); Ok(BlockRangeResponse::Literal(Box::pin(stream))) } @@ -107,7 +118,7 @@ impl StorageStream { impl Stream for StorageStream where - S: Unpin + ReadTransaction, + S: Unpin + ReadTransaction + std::fmt::Debug, for<'a> StorageTransaction<&'a S>: StorageInspect, { type Item = Block; diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index f05efd829d1..e8e6edf2e46 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -4,7 +4,6 @@ use super::*; use crate::db::storage_db::table::Column; use fuel_core_storage::{ StorageAsRef, - iter::IterableTable, structured_storage::test::InMemoryStorage, transactional::IntoTransaction, }; @@ -49,16 +48,22 @@ async fn get_block__can_get_expected_range() { .try_init(); let mut rng = StdRng::seed_from_u64(666); // given - let db = database(); - let mut adapter = StorageDB::new(db); + let mut db = database(); + let height_0 = BlockHeight::from(0u32); let height_1 = BlockHeight::from(1u32); let height_2 = BlockHeight::from(2u32); let height_3 = BlockHeight::from(3u32); + let expected_0 = Block::random(&mut rng); let expected_1 = Block::random(&mut rng); let expected_2 = Block::random(&mut rng); let expected_3 = Block::random(&mut rng); - let mut tx = adapter.inner.write_transaction(); + let mut tx = db.write_transaction(); + tx.storage_as_mut::() + .insert(&height_0, &expected_0) + .unwrap(); + tx.commit().unwrap(); + let mut tx = db.write_transaction(); tx.storage_as_mut::() .insert(&height_1, &expected_1) .unwrap(); @@ -69,6 +74,9 @@ async fn get_block__can_get_expected_range() { .insert(&height_3, &expected_3) .unwrap(); tx.commit().unwrap(); + let db = db.commit().unwrap(); + let tx = db.into_transaction(); + let mut adapter = StorageDB::new(tx); // when let BlockRangeResponse::Literal(stream) = diff --git a/crates/storage/src/structured_storage.rs b/crates/storage/src/structured_storage.rs index 11c4db2ccbe..3ce5e49ff32 100644 --- a/crates/storage/src/structured_storage.rs +++ b/crates/storage/src/structured_storage.rs @@ -426,9 +426,16 @@ where #[cfg(feature = "test-helpers")] pub mod test { use crate as fuel_core_storage; - use crate::kv_store::{ - KeyValueInspect, - StorageColumn, + use crate::{ + kv_store::{ + KeyValueInspect, + StorageColumn, + }, + structured_storage::StructuredStorage, + transactional::{ + AtomicView, + InMemoryTransaction, + }, }; use fuel_core_storage::{ Result as StorageResult, @@ -472,4 +479,16 @@ pub mod test { Ok(value) } } + + impl AtomicView + for StructuredStorage>> + where + Column: Clone + Send + Sync, + { + type LatestView = InMemoryStorage; + + fn latest_view(&self) -> StorageResult { + Ok(self.inner.storage.clone()) + } + } } From 8d178e1b6610f3ba27f49917b4e2ec3f4e1af9b4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 16:22:30 -0600 Subject: [PATCH 034/100] Cleanup --- .../block_aggregator_api/src/db/storage_db/table.rs | 6 ------ .../block_aggregator_api/src/db/storage_db/tests.rs | 7 ------- 2 files changed, 13 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index d25685fffd8..525645100e8 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -24,17 +24,11 @@ use fuel_core_types::fuel_types::BlockHeight; pub enum Column { Metadata = 0, Blocks = 1, - // Metadata = 0, - // State = 1, - // UnrecordedBlocks = 2, - // LatestRecordedHeight = 3, } impl Column { - /// The total count of variants in the enum. pub const COUNT: usize = ::COUNT; - /// Returns the `usize` representation of the `Column`. pub fn as_u32(&self) -> u32 { *self as u32 } diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index e8e6edf2e46..0b2b70308dd 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -49,20 +49,13 @@ async fn get_block__can_get_expected_range() { let mut rng = StdRng::seed_from_u64(666); // given let mut db = database(); - let height_0 = BlockHeight::from(0u32); let height_1 = BlockHeight::from(1u32); let height_2 = BlockHeight::from(2u32); let height_3 = BlockHeight::from(3u32); - let expected_0 = Block::random(&mut rng); let expected_1 = Block::random(&mut rng); let expected_2 = Block::random(&mut rng); let expected_3 = Block::random(&mut rng); - let mut tx = db.write_transaction(); - tx.storage_as_mut::() - .insert(&height_0, &expected_0) - .unwrap(); - tx.commit().unwrap(); let mut tx = db.write_transaction(); tx.storage_as_mut::() .insert(&height_1, &expected_1) From 571691f5e1ab9076d0ff9c5e51c84c40ebe6ca26 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 19:42:59 -0600 Subject: [PATCH 035/100] Add changelog, spellcheck, lint --- .changes/added/3092.md | 1 + crates/services/block_aggregator_api/Cargo.toml | 4 ++-- crates/services/block_aggregator_api/src/db/storage_db.rs | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 .changes/added/3092.md diff --git a/.changes/added/3092.md b/.changes/added/3092.md new file mode 100644 index 00000000000..5445b604f07 --- /dev/null +++ b/.changes/added/3092.md @@ -0,0 +1 @@ +Add adapter for the block aggregator DB \ No newline at end of file diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index adf69dcdef8..7754851f4a6 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -5,7 +5,7 @@ edition = "2024" [dependencies] anyhow = { workspace = true } -bytes = { workspace = true, features = ["serde"]} +bytes = { workspace = true, features = ["serde"] } enum-iterator = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } @@ -20,6 +20,6 @@ tracing = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } -futures = { workspace = true } fuel-core-storage = { workspace = true, features = ["test-helpers"] } +futures = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 221e1e5a843..66c38c89417 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -87,11 +87,11 @@ where first: BlockHeight, last: BlockHeight, ) -> Result { - let latest_veiw = self + let latest_view = self .inner .latest_view() .map_err(|e| Error::DB(anyhow!(e)))?; - let stream = StorageStream::new(latest_veiw, first, last); + let stream = StorageStream::new(latest_view, first, last); Ok(BlockRangeResponse::Literal(Box::pin(stream))) } From 57971307908cdfbd0780dc2034dd1baca3213ddb Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 19:57:02 -0600 Subject: [PATCH 036/100] Basic --- crates/services/block_aggregator_api/src/blocks.rs | 4 ++++ .../src/blocks/importer_and_onchain_source.rs | 1 + 2 files changed, 5 insertions(+) create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index d21698e0921..4963e83f29f 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -3,11 +3,15 @@ use bytes::Bytes; use fuel_core_types::fuel_types::BlockHeight; use std::fmt::Debug; +pub mod importer_and_onchain_source; + /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { /// Asynchronously fetch the next block and its height fn next_block(&mut self) -> impl Future> + Send; + + fn subscribe_to_new_blocks(&mut self) -> Result<()>; } #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs @@ -0,0 +1 @@ + From 119af9b887263f6eebe605807f5bbba14397e922 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 20:00:20 -0600 Subject: [PATCH 037/100] Appease Clippy-sama --- .../src/block_range_response.rs | 5 +---- .../block_aggregator_api/src/db/storage_db.rs | 15 +++------------ .../src/db/storage_db/tests.rs | 2 +- 3 files changed, 5 insertions(+), 17 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 8097b4ee287..c8591727289 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,8 +1,5 @@ use crate::blocks::Block; -use fuel_core_services::stream::{ - BoxStream, - RefBoxStream, -}; +use fuel_core_services::stream::BoxStream; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 66c38c89417..33257915c01 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -14,18 +14,10 @@ use anyhow::anyhow; use fuel_core_services::stream::Stream; use fuel_core_storage::{ Error as StorageError, - Mappable, StorageAsMut, StorageAsRef, StorageInspect, StorageMutate, - iter::{ - IntoBoxedIter, - IterDirection, - IterableStore, - IterableTable, - IteratorOverTable, - }, kv_store::KeyValueInspect, transactional::{ AtomicView, @@ -37,7 +29,6 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ - marker::PhantomData, pin::Pin, task::{ Context, @@ -133,7 +124,7 @@ where ); let this = self.get_mut(); if let Some(height) = this.next { - let mut tx = this.inner.read_transaction(); + let tx = this.inner.read_transaction(); let next_block = tx .storage_as_ref::() .get(&height) @@ -155,9 +146,9 @@ where Poll::Ready(None) } Err(e) => { - tracing::debug!("Error while reading next block: {:?}", e); + tracing::error!("Error while reading next block: {:?}", e); this.next = None; - Poll::Ready(Some(Err(e).unwrap())) + Poll::Ready(None) } } } else { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 0b2b70308dd..59998b07d19 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -69,7 +69,7 @@ async fn get_block__can_get_expected_range() { tx.commit().unwrap(); let db = db.commit().unwrap(); let tx = db.into_transaction(); - let mut adapter = StorageDB::new(tx); + let adapter = StorageDB::new(tx); // when let BlockRangeResponse::Literal(stream) = From b738793144b4a8726c4edc00de2b4eb7bb3397fa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 20:00:35 -0600 Subject: [PATCH 038/100] Remove commented code --- crates/services/block_aggregator_api/src/db/storage_db.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 33257915c01..612c4c2bfb5 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -53,11 +53,9 @@ impl StorageDB { impl BlockAggregatorDB for StorageDB where - // S: Send + Sync + Modifiable + Clone + Unpin + ReadTransaction + 'static, S: Modifiable + std::fmt::Debug, S: KeyValueInspect, for<'b> StorageTransaction<&'b mut S>: StorageMutate, - // for<'b> StorageTransaction<&'b S>: StorageInspect, S: AtomicView, T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, StorageTransaction: AtomicView + StorageInspect, From 6649ac6acd9e344a6419ed60cf84c4e04d6dbf34 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 8 Sep 2025 20:34:38 -0600 Subject: [PATCH 039/100] Add tests for get_current_height indirectly --- .../block_aggregator_api/src/db/storage_db.rs | 44 ++++++++++-- .../src/db/storage_db/tests.rs | 69 ++++++++++++++++++- 2 files changed, 107 insertions(+), 6 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 612c4c2bfb5..a65f9ca2eda 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -29,6 +29,7 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ + collections::BTreeSet, pin::Pin, task::{ Context, @@ -42,12 +43,44 @@ pub mod table; mod tests; pub struct StorageDB { - inner: S, + highest_contiguous_block: BlockHeight, + orphaned_heights: BTreeSet, + storage: S, } impl StorageDB { pub fn new(storage: S) -> Self { - Self { inner: storage } + let height = BlockHeight::new(0); + Self::new_with_height(storage, height) + } + + pub fn new_with_height(storage: S, highest_contiguous_block: BlockHeight) -> Self { + let orphaned_heights = BTreeSet::new(); + Self { + highest_contiguous_block, + orphaned_heights, + storage, + } + } + + fn update_highest_contiguous_block(&mut self, height: BlockHeight) { + if height == self.next_height() { + self.highest_contiguous_block = height; + while let Some(next_height) = self.orphaned_heights.iter().next().cloned() { + if next_height == self.next_height() { + self.highest_contiguous_block = next_height; + self.orphaned_heights.remove(&next_height); + } else { + break; + } + } + } else if height > self.next_height() { + self.orphaned_heights.insert(height); + } + } + fn next_height(&self) -> BlockHeight { + let last_height = *self.highest_contiguous_block; + BlockHeight::new(last_height.saturating_add(1)) } } @@ -63,7 +96,8 @@ where type BlockRange = BlockRangeResponse; async fn store_block(&mut self, height: BlockHeight, block: Block) -> Result<()> { - let mut tx = self.inner.write_transaction(); + self.update_highest_contiguous_block(height); + let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() .insert(&height, &block) .map_err(|e| Error::DB(anyhow!(e)))?; @@ -77,7 +111,7 @@ where last: BlockHeight, ) -> Result { let latest_view = self - .inner + .storage .latest_view() .map_err(|e| Error::DB(anyhow!(e)))?; let stream = StorageStream::new(latest_view, first, last); @@ -85,7 +119,7 @@ where } async fn get_current_height(&self) -> Result { - todo!() + Ok(self.highest_contiguous_block) } } diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 59998b07d19..467c7e0b490 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -32,7 +32,7 @@ async fn store_block__adds_to_storage() { // then let actual = adapter - .inner + .storage .storage_as_ref::() .get(&height) .unwrap() @@ -82,3 +82,70 @@ async fn get_block__can_get_expected_range() { // then assert_eq!(actual, vec![expected_2, expected_3]); } + +#[tokio::test] +async fn store_block__updates_the_highest_continuous_block_if_contiguous() { + let mut rng = StdRng::seed_from_u64(666); + // given + let db = database(); + let mut adapter = StorageDB::new_with_height(db, BlockHeight::from(0u32)); + let height = BlockHeight::from(1u32); + let expected = Block::random(&mut rng); + + // when + adapter.store_block(height, expected.clone()).await.unwrap(); + + // then + let expected = height; + let actual = adapter.get_current_height().await.unwrap(); + assert_eq!(expected, actual); +} + +#[tokio::test] +async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { + let mut rng = StdRng::seed_from_u64(666); + // given + let db = database(); + let starting_height = BlockHeight::from(0u32); + let mut adapter = StorageDB::new_with_height(db, starting_height); + let height = BlockHeight::from(2u32); + let expected = Block::random(&mut rng); + + // when + adapter.store_block(height, expected.clone()).await.unwrap(); + + // then + let expected = starting_height; + let actual = adapter.get_current_height().await.unwrap(); + assert_eq!(expected, actual); +} + +#[tokio::test] +async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { + let mut rng = StdRng::seed_from_u64(666); + // given + let db = database(); + let starting_height = BlockHeight::from(0u32); + let mut adapter = StorageDB::new_with_height(db, starting_height); + + let mut orphaned_height = None; + for height in 2..=10u32 { + let height = BlockHeight::from(height); + orphaned_height = Some(height); + let block = Block::random(&mut rng); + adapter.store_block(height, block).await.unwrap(); + } + let expected = starting_height; + let actual = adapter.get_current_height().await.unwrap(); + assert_eq!(expected, actual); + + // when + let height = BlockHeight::from(1u32); + let expected = Block::random(&mut rng); + adapter.store_block(height, expected.clone()).await.unwrap(); + + // then + let expected = orphaned_height.unwrap(); + let actual = adapter.get_current_height().await.unwrap(); + assert_eq!(expected, actual); +} From 010e1119dc87df59666cff22636496ea6942a5a2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 14:39:36 -0600 Subject: [PATCH 040/100] Remove unnecessary clone --- crates/services/block_aggregator_api/src/db/storage_db.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index a65f9ca2eda..a379bb074a9 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -66,10 +66,10 @@ impl StorageDB { fn update_highest_contiguous_block(&mut self, height: BlockHeight) { if height == self.next_height() { self.highest_contiguous_block = height; - while let Some(next_height) = self.orphaned_heights.iter().next().cloned() { - if next_height == self.next_height() { - self.highest_contiguous_block = next_height; - self.orphaned_heights.remove(&next_height); + while let Some(next_height) = self.orphaned_heights.first() { + if next_height == &self.next_height() { + self.highest_contiguous_block = *next_height; + let _ = self.orphaned_heights.pop_first(); } else { break; } From dab98e23b681d42cd904f1b5b604be7c016fb14c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 15:59:51 -0600 Subject: [PATCH 041/100] Add block subscription code --- .../services/block_aggregator_api/src/api.rs | 24 ++- .../src/block_aggregator.rs | 142 ++++++++++++++++++ .../block_aggregator_api/src/blocks.rs | 9 +- .../services/block_aggregator_api/src/db.rs | 4 +- .../block_aggregator_api/src/db/storage_db.rs | 2 +- .../services/block_aggregator_api/src/lib.rs | 96 +----------- .../block_aggregator_api/src/tests.rs | 51 ++++++- 7 files changed, 221 insertions(+), 107 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/block_aggregator.rs diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 919221fcc80..323ce7d557a 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,4 +1,7 @@ -use crate::result::Result; +use crate::{ + blocks::Block, + result::Result, +}; use fuel_core_types::fuel_types::BlockHeight; use std::fmt; use tokio::sync::oneshot::{ @@ -18,15 +21,19 @@ pub trait BlockAggregatorApi: Send + Sync { ) -> impl Future>> + Send; } -pub enum BlockAggregatorQuery { +pub enum BlockAggregatorQuery { GetBlockRange { first: BlockHeight, last: BlockHeight, - response: Sender, + response: Sender, }, GetCurrentHeight { response: Sender, }, + // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? + NewBlockSubscription { + response: tokio::sync::mpsc::Sender<(BlockHeight, Block)>, + }, } impl fmt::Debug for BlockAggregatorQuery { @@ -40,6 +47,9 @@ impl fmt::Debug for BlockAggregatorQuery { BlockAggregatorQuery::GetCurrentHeight { .. } => { f.debug_struct("GetCurrentHeight").finish() } + BlockAggregatorQuery::NewBlockSubscription { .. } => { + f.debug_struct("GetNewBlockStream").finish() + } } } } @@ -65,4 +75,12 @@ impl BlockAggregatorQuery { let query = Self::GetCurrentHeight { response: sender }; (query, receiver) } + + pub fn new_block_subscription() + -> (Self, tokio::sync::mpsc::Receiver<(BlockHeight, Block)>) { + const ARBITRARY_CHANNEL_SIZE: usize = 10; + let (sender, receiver) = tokio::sync::mpsc::channel(ARBITRARY_CHANNEL_SIZE); + let query = Self::NewBlockSubscription { response: sender }; + (query, receiver) + } } diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs new file mode 100644 index 00000000000..b0d252fb06f --- /dev/null +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -0,0 +1,142 @@ +use crate::{ + BlockAggregator, + api::{ + BlockAggregatorApi, + BlockAggregatorQuery, + }, + blocks::{ + Block, + BlockSource, + BlockSourceEvent, + }, + db::BlockAggregatorDB, +}; +use fuel_core_services::{ + TaskNextAction, + try_or_stop, +}; +use fuel_core_types::fuel_types::BlockHeight; + +impl BlockAggregator +where + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, + Blocks: BlockSource, + BlockRangeResponse: Send, +{ + pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { + let new_block_subscriptions = Vec::new(); + Self { + query, + database, + block_source, + new_block_subscriptions, + } + } + + pub fn stop(&self) -> TaskNextAction { + TaskNextAction::Stop + } + + pub async fn handle_query( + &mut self, + res: crate::result::Result>, + ) -> TaskNextAction { + tracing::debug!("Handling query: {res:?}"); + let query = try_or_stop!(res, |e| { + tracing::error!("Error receiving query: {e:?}"); + }); + match query { + BlockAggregatorQuery::GetBlockRange { + first, + last, + response, + } => { + self.handle_get_block_range_query(first, last, response) + .await + } + BlockAggregatorQuery::GetCurrentHeight { response } => { + self.handle_get_current_height_query(response).await + } + BlockAggregatorQuery::NewBlockSubscription { response } => { + self.handle_new_block_subscription(response).await + } + } + } + + async fn handle_get_block_range_query( + &mut self, + first: BlockHeight, + last: BlockHeight, + response: tokio::sync::oneshot::Sender, + ) -> TaskNextAction { + let res = self.database.get_block_range(first, last).await; + let block_stream = try_or_stop!(res, |e| { + tracing::error!("Error getting block range from database: {e:?}"); + }); + let res = response.send(block_stream); + try_or_stop!(res, |_| { + tracing::error!("Error sending block range response"); + }); + TaskNextAction::Continue + } + + async fn handle_get_current_height_query( + &mut self, + response: tokio::sync::oneshot::Sender, + ) -> TaskNextAction { + let res = self.database.get_current_height().await; + let height = try_or_stop!(res, |e| { + tracing::error!("Error getting current height from database: {e:?}"); + }); + let res = response.send(height); + try_or_stop!(res, |_| { + tracing::error!("Error sending current height response"); + }); + TaskNextAction::Continue + } + + async fn handle_new_block_subscription( + &mut self, + response: tokio::sync::mpsc::Sender<(BlockHeight, Block)>, + ) -> TaskNextAction { + self.new_block_subscriptions.push(response); + TaskNextAction::Continue + } + + pub async fn handle_block( + &mut self, + res: crate::result::Result, + ) -> TaskNextAction { + tracing::debug!("Handling block: {res:?}"); + let event = try_or_stop!(res, |e| { + tracing::error!("Error receiving block from source: {e:?}"); + }); + let (id, block) = match event { + BlockSourceEvent::NewBlock(id, block) => { + self.new_block_subscriptions.retain_mut(|sub| { + let send_res = sub.try_send((id, block.clone())); + match send_res { + Ok(_) => true, + Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { + tracing::error!("Error sending new block to source due to full channel: {id:?}"); + true + }, + Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { + tracing::debug!("Dropping block subscription due to closed channel"); + false + }, + } + }); + // do more stuff + (id, block) + } + BlockSourceEvent::OldBlock(id, block) => (id, block), + }; + let res = self.database.store_block(id, block).await; + try_or_stop!(res, |e| { + tracing::error!("Error storing block in database: {e:?}"); + }); + TaskNextAction::Continue + } +} diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 4963e83f29f..2a6a783083f 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -8,10 +8,13 @@ pub mod importer_and_onchain_source; /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { /// Asynchronously fetch the next block and its height - fn next_block(&mut self) - -> impl Future> + Send; + fn next_block(&mut self) -> impl Future> + Send; +} - fn subscribe_to_new_blocks(&mut self) -> Result<()>; +#[derive(Debug)] +pub enum BlockSourceEvent { + NewBlock(BlockHeight, Block), + OldBlock(BlockHeight, Block), } #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index fe5dcd8ac72..13a0bcc8489 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -9,7 +9,7 @@ pub mod storage_db; /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { /// The type used to report a range of blocks - type BlockRange; + type BlockRangeResponse; /// Stores a block with the given ID fn store_block( @@ -23,7 +23,7 @@ pub trait BlockAggregatorDB: Send + Sync { &self, first: BlockHeight, last: BlockHeight, - ) -> impl Future> + Send; + ) -> impl Future> + Send; /// Retrieves the current height of the aggregated blocks If there is a break in the blocks, /// i.e. the blocks are being aggregated out of order, return the height of the last diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index a379bb074a9..12c72f5355b 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -93,7 +93,7 @@ where T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, StorageTransaction: AtomicView + StorageInspect, { - type BlockRange = BlockRangeResponse; + type BlockRangeResponse = BlockRangeResponse; async fn store_block(&mut self, height: BlockHeight, block: Block) -> Result<()> { self.update_highest_contiguous_block(height); diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index f70092ad842..9d2fefa477e 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -13,7 +13,6 @@ use fuel_core_services::{ RunnableTask, StateWatcher, TaskNextAction, - try_or_stop, }; use fuel_core_types::fuel_types::BlockHeight; use result::Result; @@ -28,6 +27,8 @@ pub mod block_range_response; #[cfg(test)] mod tests; +pub mod block_aggregator; + // TODO: this doesn't need to limited to the blocks, // but we can change the name later /// The Block Aggregator service, which aggregates blocks from a source and stores them in a database @@ -36,12 +37,13 @@ pub struct BlockAggregator { query: Api, database: DB, block_source: Blocks, + new_block_subscriptions: Vec>, } impl RunnableTask for BlockAggregator where Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + DB: BlockAggregatorDB, Blocks: BlockSource, BlockRange: Send, { @@ -58,93 +60,3 @@ where Ok(()) } } - -impl BlockAggregator -where - Api: BlockAggregatorApi, - DB: BlockAggregatorDB, - Blocks: BlockSource, - BlockRange: Send, -{ - pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { - Self { - query, - database, - block_source, - } - } - - pub fn stop(&self) -> TaskNextAction { - TaskNextAction::Stop - } - - pub async fn handle_query( - &mut self, - res: Result>, - ) -> TaskNextAction { - tracing::debug!("Handling query: {res:?}"); - let query = try_or_stop!(res, |e| { - tracing::error!("Error receiving query: {e:?}"); - }); - match query { - BlockAggregatorQuery::GetBlockRange { - first, - last, - response, - } => { - self.handle_get_block_range_query(first, last, response) - .await - } - BlockAggregatorQuery::GetCurrentHeight { response } => { - self.handle_get_current_height_query(response).await - } - } - } - - async fn handle_get_block_range_query( - &mut self, - first: BlockHeight, - last: BlockHeight, - response: tokio::sync::oneshot::Sender, - ) -> TaskNextAction { - let res = self.database.get_block_range(first, last).await; - let block_stream = try_or_stop!(res, |e| { - tracing::error!("Error getting block range from database: {e:?}"); - }); - let res = response.send(block_stream); - try_or_stop!(res, |_| { - tracing::error!("Error sending block range response"); - }); - TaskNextAction::Continue - } - - async fn handle_get_current_height_query( - &mut self, - response: tokio::sync::oneshot::Sender, - ) -> TaskNextAction { - let res = self.database.get_current_height().await; - let height = try_or_stop!(res, |e| { - tracing::error!("Error getting current height from database: {e:?}"); - }); - let res = response.send(height); - try_or_stop!(res, |_| { - tracing::error!("Error sending current height response"); - }); - TaskNextAction::Continue - } - - pub async fn handle_block( - &mut self, - res: Result<(BlockHeight, Block)>, - ) -> TaskNextAction { - tracing::debug!("Handling block: {res:?}"); - let (id, block) = try_or_stop!(res, |e| { - tracing::error!("Error receiving block from source: {e:?}"); - }); - let res = self.database.store_block(id, block).await; - try_or_stop!(res, |e| { - tracing::error!("Error storing block in database: {e:?}"); - }); - TaskNextAction::Continue - } -} diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 1595de0190d..500c68d639d 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -2,7 +2,10 @@ use super::*; use crate::{ - blocks::Block, + blocks::{ + Block, + BlockSourceEvent, + }, result::Error, }; use fuel_core_services::stream::BoxStream; @@ -64,7 +67,7 @@ impl FakeDB { } impl BlockAggregatorDB for FakeDB { - type BlockRange = BlockRangeResponse; + type BlockRangeResponse = BlockRangeResponse; async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { self.map.lock().unwrap().insert(id, block); @@ -100,11 +103,11 @@ impl BlockAggregatorDB for FakeDB { } struct FakeBlockSource { - blocks: Receiver<(BlockHeight, Block)>, + blocks: Receiver, } impl FakeBlockSource { - fn new() -> (Self, Sender<(BlockHeight, Block)>) { + fn new() -> (Self, Sender) { let (_sender, receiver) = tokio::sync::mpsc::channel(1); let _self = Self { blocks: receiver }; (_self, _sender) @@ -112,7 +115,7 @@ impl FakeBlockSource { } impl BlockSource for FakeBlockSource { - async fn next_block(&mut self) -> Result<(BlockHeight, Block)> { + async fn next_block(&mut self) -> Result { self.blocks.recv().await.ok_or(Error::BlockSource) } } @@ -163,7 +166,8 @@ async fn run__new_block_gets_added_to_db() { let mut watcher = StateWatcher::started(); // when - source_sender.send((id, block.clone())).await.unwrap(); + let event = BlockSourceEvent::NewBlock(id, block.clone()); + source_sender.send(event).await.unwrap(); let _ = srv.run(&mut watcher).await; // then @@ -201,3 +205,38 @@ async fn run__get_current_height__returns_expected_height() { // cleanup drop(_block_sender); } + +#[tokio::test] +async fn run__new_block_subscription__sends_new_block() { + let mut rng = StdRng::seed_from_u64(42); + // given + let (api, sender) = FakeApi::new(); + let db = FakeDB::new(); + let (source, source_sender) = FakeBlockSource::new(); + let mut srv = BlockAggregator::new(api, db, source); + + let expected_block = Block::random(&mut rng); + let expected_height = BlockHeight::from(123u32); + let mut watcher = StateWatcher::started(); + let (query, mut response) = BlockAggregatorQuery::new_block_subscription(); + + // when + sender.send(query).await.unwrap(); + let _ = srv.run(&mut watcher).await; + let event = BlockSourceEvent::NewBlock(expected_height, expected_block.clone()); + source_sender.send(event).await.unwrap(); + let _ = srv.run(&mut watcher).await; + + // then + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let (actual_height, actual_block) = + tokio::time::timeout(tokio::time::Duration::from_secs(1), response.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(expected_block, actual_block); + assert_eq!(expected_height, actual_height); + + // cleanup + drop(source_sender); +} From 358a2425dd4a4cd89625876eba58219e84805e10 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 16:06:22 -0600 Subject: [PATCH 042/100] Appease Clippy-sama --- .../block_aggregator_api/src/db/storage_db.rs | 28 ++++++++++++------- .../src/db/storage_db/tests.rs | 3 -- .../block_aggregator_api/src/tests.rs | 6 +++- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index a379bb074a9..f71e65e989b 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -29,6 +29,7 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ + cmp::Ordering, collections::BTreeSet, pin::Pin, task::{ @@ -64,18 +65,25 @@ impl StorageDB { } fn update_highest_contiguous_block(&mut self, height: BlockHeight) { - if height == self.next_height() { - self.highest_contiguous_block = height; - while let Some(next_height) = self.orphaned_heights.first() { - if next_height == &self.next_height() { - self.highest_contiguous_block = *next_height; - let _ = self.orphaned_heights.pop_first(); - } else { - break; + let next_height = self.next_height(); + match height.cmp(&next_height) { + Ordering::Equal => { + self.highest_contiguous_block = height; + while let Some(next_height) = self.orphaned_heights.first() { + if next_height == &self.next_height() { + self.highest_contiguous_block = *next_height; + let _ = self.orphaned_heights.pop_first(); + } else { + break; + } } } - } else if height > self.next_height() { - self.orphaned_heights.insert(height); + Ordering::Greater => { + self.orphaned_heights.insert(height); + } + Ordering::Less => { + // ignore duplicate or old block + } } } fn next_height(&self) -> BlockHeight { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 467c7e0b490..f09cdaafc2b 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -43,9 +43,6 @@ async fn store_block__adds_to_storage() { #[tokio::test] async fn get_block__can_get_expected_range() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::DEBUG) - .try_init(); let mut rng = StdRng::seed_from_u64(666); // given let mut db = database(); diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 1595de0190d..836b92ceadf 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -2,8 +2,12 @@ use super::*; use crate::{ + api::BlockAggregatorQuery, blocks::Block, - result::Error, + result::{ + Error, + Result, + }, }; use fuel_core_services::stream::BoxStream; use futures::StreamExt; From 0b7aaa543513cfafa1ee0924b3b6033731bbb4aa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 16:17:30 -0600 Subject: [PATCH 043/100] Fix imports --- crates/services/block_aggregator_api/src/lib.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 9d2fefa477e..fdde98fb76a 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -1,8 +1,5 @@ use crate::{ - api::{ - BlockAggregatorApi, - BlockAggregatorQuery, - }, + api::BlockAggregatorApi, blocks::{ Block, BlockSource, @@ -15,7 +12,6 @@ use fuel_core_services::{ TaskNextAction, }; use fuel_core_types::fuel_types::BlockHeight; -use result::Result; pub mod api; pub mod blocks; From 0a17d3c52322c34ee0419ddd80c3e06bbbe6e72f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 16:18:32 -0600 Subject: [PATCH 044/100] Update CHANGELOG --- .changes/added/3096.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changes/added/3096.md diff --git a/.changes/added/3096.md b/.changes/added/3096.md new file mode 100644 index 00000000000..41abcb81d1a --- /dev/null +++ b/.changes/added/3096.md @@ -0,0 +1 @@ +Add subscription manager for core service \ No newline at end of file From d0f2c8104125625a564f5e88c46da864b3210cfc Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 16:21:12 -0600 Subject: [PATCH 045/100] fmt --- crates/services/block_aggregator_api/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 0c6ffabbd90..e4702feb4f0 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -2,11 +2,11 @@ use super::*; use crate::{ + api::BlockAggregatorQuery, blocks::{ Block, BlockSourceEvent, }, - api::BlockAggregatorQuery, result::{ Error, Result, From 95a38fcb5ddce56039fa2cb1a6153e8827125967 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 9 Sep 2025 16:30:51 -0600 Subject: [PATCH 046/100] Add old block test --- .../block_aggregator_api/src/tests.rs | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index e4702feb4f0..3e63868bbbc 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -244,3 +244,35 @@ async fn run__new_block_subscription__sends_new_block() { // cleanup drop(source_sender); } + +#[tokio::test] +async fn run__new_block_subscription__does_not_send_syncing_blocks() { + let mut rng = StdRng::seed_from_u64(42); + // given + let (api, sender) = FakeApi::new(); + let db = FakeDB::new(); + let (source, source_sender) = FakeBlockSource::new(); + let mut srv = BlockAggregator::new(api, db, source); + + let block = Block::random(&mut rng); + let height = BlockHeight::from(123u32); + let mut watcher = StateWatcher::started(); + let (query, mut response) = BlockAggregatorQuery::new_block_subscription(); + + // when + sender.send(query).await.unwrap(); + let _ = srv.run(&mut watcher).await; + let event = BlockSourceEvent::OldBlock(height, block); + source_sender.send(event).await.unwrap(); + let _ = srv.run(&mut watcher).await; + + // then + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let res = + tokio::time::timeout(tokio::time::Duration::from_millis(100), response.recv()) + .await; + assert!(res.is_err(), "should have timed out"); + + // cleanup + drop(source_sender); +} From 81194d4132a65b1dd2dd1349cabe3fc61d84366e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 08:20:48 -0600 Subject: [PATCH 047/100] Add drain method to source --- crates/services/block_aggregator_api/src/blocks.rs | 2 ++ crates/services/block_aggregator_api/src/lib.rs | 5 ++++- crates/services/block_aggregator_api/src/tests.rs | 4 ++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index c937f92ff26..525b41c1ebe 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -8,6 +8,8 @@ pub trait BlockSource: Send + Sync { /// Asynchronously fetch the next block and its height fn next_block(&mut self) -> impl Future> + Send; + + fn drain(&mut self) -> impl Future> + Send; } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index f70092ad842..a58c4e5d23d 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -54,7 +54,10 @@ where } } - async fn shutdown(self) -> anyhow::Result<()> { + async fn shutdown(mut self) -> anyhow::Result<()> { + self.block_source.drain().await.map_err(|e| { + anyhow::anyhow!("Error draining block source during shutdown: {e:?}") + })?; Ok(()) } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 8aa2f122e99..414d774d8b5 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -115,6 +115,10 @@ impl BlockSource for FakeBlockSource { async fn next_block(&mut self) -> Result<(BlockHeight, Block)> { self.blocks.recv().await.ok_or(Error::BlockSourceError) } + + async fn drain(&mut self) -> Result<()> { + todo!() + } } #[tokio::test] From 15b37338e0a449527963ece14e6562424f34632e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 08:26:48 -0600 Subject: [PATCH 048/100] Add doc --- crates/services/block_aggregator_api/src/blocks.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 525b41c1ebe..cc04bf406f2 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -9,6 +9,7 @@ pub trait BlockSource: Send + Sync { fn next_block(&mut self) -> impl Future> + Send; + /// Drain any remaining blocks from the source fn drain(&mut self) -> impl Future> + Send; } From 41343c2a92b93b79ce618732dacd1edf6313b47d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 08:28:19 -0600 Subject: [PATCH 049/100] Remove empty mod --- crates/services/block_aggregator_api/src/blocks.rs | 2 -- .../src/blocks/importer_and_onchain_source.rs | 1 - 2 files changed, 3 deletions(-) delete mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index b12ee54fea2..105d078d124 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -3,8 +3,6 @@ use bytes::Bytes; use fuel_core_types::fuel_types::BlockHeight; use std::fmt::Debug; -pub mod importer_and_onchain_source; - /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { /// Asynchronously fetch the next block and its height diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_onchain_source.rs +++ /dev/null @@ -1 +0,0 @@ - From 2fa9b2cdefc6b6e04312d23a6219530ac07998af Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 12:25:20 -0600 Subject: [PATCH 050/100] WIP add new adapter --- Cargo.lock | 3 + .../services/block_aggregator_api/Cargo.toml | 5 +- .../block_aggregator_api/src/blocks.rs | 4 +- .../src/blocks/importer_and_db_source.rs | 151 ++++++++++++++++++ .../blocks/importer_and_db_source/tests.rs | 48 ++++++ .../block_aggregator_api/src/result.rs | 8 +- .../block_aggregator_api/src/tests.rs | 6 +- 7 files changed, 220 insertions(+), 5 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs diff --git a/Cargo.lock b/Cargo.lock index cd347b9f9c6..55234e2085c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1398,6 +1398,7 @@ name = "block_aggregator_api" version = "0.1.0" dependencies = [ "anyhow", + "async-trait", "bytes", "enum-iterator", "fuel-core-services", @@ -1409,7 +1410,9 @@ dependencies = [ "serde", "strum 0.25.0", "strum_macros 0.25.3", + "thiserror 2.0.12", "tokio", + "tokio-stream", "tracing", "tracing-subscriber", ] diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 7754851f4a6..1e1ae09e3bb 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -9,7 +9,7 @@ bytes = { workspace = true, features = ["serde"] } enum-iterator = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } -fuel-core-types = { workspace = true, features = ["std"] } +fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } num_enum = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -17,9 +17,12 @@ strum = { workspace = true } strum_macros = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +async-trait = { workspace = true } +thiserror = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } futures = { workspace = true } tracing-subscriber = { workspace = true } +tokio-stream = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index 105d078d124..c9ca572ad56 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -3,6 +3,8 @@ use bytes::Bytes; use fuel_core_types::fuel_types::BlockHeight; use std::fmt::Debug; +pub mod importer_and_db_source; + /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { /// Asynchronously fetch the next block and its height @@ -12,7 +14,7 @@ pub trait BlockSource: Send + Sync { fn drain(&mut self) -> impl Future> + Send; } -#[derive(Debug)] +#[derive(Debug, Eq, PartialEq)] pub enum BlockSourceEvent { NewBlock(BlockHeight, Block), OldBlock(BlockHeight, Block), diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs new file mode 100644 index 00000000000..c156a42ebf4 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -0,0 +1,151 @@ +use crate::{ + blocks::{ + Block, + BlockSource, + BlockSourceEvent, + }, + result::{ + Error, + Result, + }, +}; +use anyhow::anyhow; +use fuel_core_services::{ + RunnableService, + RunnableTask, + Service, + ServiceRunner, + StateWatcher, + TaskNextAction, + stream::BoxStream, + try_or_continue, + try_or_stop, +}; +use fuel_core_types::{ + blockchain::SealedBlock as FuelBlock, + services::block_importer::SharedImportResult, +}; +use futures::StreamExt; +use tokio::sync::mpsc::Sender; + +#[cfg(test)] +mod tests; + +pub trait BlockSerializer { + fn serialize_block(&self, block: &FuelBlock) -> Result; +} + +pub struct ImporterAndDbSource +where + Serializer: BlockSerializer + Send + 'static, +{ + inner: ServiceRunner>, + receiver: tokio::sync::mpsc::Receiver, +} + +impl ImporterAndDbSource +where + Serializer: BlockSerializer + Send + 'static, +{ + pub fn new(importer: BoxStream, serializer: Serializer) -> Self { + const ARB_CHANNEL_SIZE: usize = 100; + let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); + let inner = InnerTask { + importer, + serializer, + block_return, + }; + let mut runner = ServiceRunner::new(inner); + runner.start().unwrap(); + Self { + inner: runner, + receiver, + } + } +} + +impl BlockSource for ImporterAndDbSource +where + Serializer: BlockSerializer + Send + 'static, +{ + async fn next_block(&mut self) -> Result { + tracing::debug!("awaiting next block"); + self.receiver + .recv() + .await + .ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) + } + + async fn drain(&mut self) -> Result<()> { + Ok(()) + } +} + +pub struct InnerTask { + importer: BoxStream, + // db: DB, + serializer: Serializer, + block_return: Sender, +} + +impl RunnableTask for InnerTask +where + Serializer: BlockSerializer + Send, +{ + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + tokio::select! { + fuel_block = self.importer.next() => { + tracing::debug!("imported block"); + if let Some(inner) = fuel_block { + let height = inner.sealed_block.entity.header().height(); + let res = self.serializer.serialize_block(&inner.sealed_block); + let block = try_or_continue!(res); + let event = BlockSourceEvent::NewBlock(*height, block); + let res = self.block_return.send(event).await; + try_or_stop!(res, |e| "failed to send imported block to receiver: {e:?}"); + TaskNextAction::Continue + } else { + tracing::debug!("importer stream ended"); + TaskNextAction::Stop + } + } + _ = watcher.while_started() => { + TaskNextAction::Stop + }, + // fuel_block = self.db.next_block() => { + // todo!() + // } + // serialized_block = self.serializer.next_serialized_block() => { + // let res = self.block_return.send(serialized_block); + // try_or_stop!(res) + // } + } + } + + async fn shutdown(self) -> anyhow::Result<()> { + todo!() + } +} + +#[async_trait::async_trait] +impl RunnableService for InnerTask +where + Serializer: BlockSerializer + Send + 'static, +{ + const NAME: &'static str = "BlockSourceInnerService"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData { + () + } + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs new file mode 100644 index 00000000000..05096248fc2 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -0,0 +1,48 @@ +use super::*; +use fuel_core_services::stream::IntoBoxStream; +use fuel_core_types::{ + blockchain::SealedBlock, + fuel_types::BlockHeight, + services::block_importer::ImportResult, +}; +use std::sync::Arc; + +#[derive(Clone)] +pub struct MockSerializer; + +impl BlockSerializer for MockSerializer { + fn serialize_block(&self, _block: &FuelBlock) -> Result { + todo!() + } +} + +#[tokio::test] +async fn next_block__gets_new_block_from_importer() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .try_init(); + // given + let height = BlockHeight::from(123u32); + let block = SealedBlock::default(); + let import_result = Arc::new( + ImportResult { + sealed_block: block.clone(), + tx_status: vec![], + events: vec![], + source: Default::default(), + } + .wrap(), + ); + let blocks: Vec = vec![import_result]; + let block_stream = tokio_stream::iter(blocks).into_boxed(); + let serializer = MockSerializer; + let mut adapter = ImporterAndDbSource::new(block_stream, serializer.clone()); + + // when + let actual = adapter.next_block().await.unwrap(); + + // then + let serialized = serializer.serialize_block(&block).unwrap(); + let expected = BlockSourceEvent::NewBlock(height, serialized); + assert_eq!(expected, actual); +} diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index 8a71a151876..5d6dedd6cab 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -1,7 +1,11 @@ -#[derive(Debug)] +use thiserror::Error; +#[derive(Debug, Error)] pub enum Error { + #[error("Block Aggregator API error")] Api, - BlockSource, + #[error("Block Source error: {0}")] + BlockSource(anyhow::Error), + #[error("Database error: {0}")] DB(anyhow::Error), } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 674c8b0bc24..3b6c2dd513e 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -12,6 +12,7 @@ use crate::{ Result, }, }; +use anyhow::anyhow; use fuel_core_services::stream::BoxStream; use futures::StreamExt; use rand::{ @@ -120,7 +121,10 @@ impl FakeBlockSource { impl BlockSource for FakeBlockSource { async fn next_block(&mut self) -> Result { - self.blocks.recv().await.ok_or(Error::BlockSource) + self.blocks + .recv() + .await + .ok_or(Error::BlockSource(anyhow!("Channel closed"))) } async fn drain(&mut self) -> Result<()> { From 53b2b18e64c7cbcfd30777965e64515aaadd8c0a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 14:57:13 -0600 Subject: [PATCH 051/100] Get test passing --- Cargo.lock | 1 + crates/services/block_aggregator_api/Cargo.toml | 1 + .../src/blocks/importer_and_db_source/tests.rs | 11 +++++++---- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55234e2085c..f489c1d81dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1406,6 +1406,7 @@ dependencies = [ "fuel-core-types 0.46.0", "futures", "num_enum", + "postcard", "rand 0.8.5", "serde", "strum 0.25.0", diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 1e1ae09e3bb..fa67c52a594 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -24,5 +24,6 @@ thiserror = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } futures = { workspace = true } +postcard = { workspace = true } tracing-subscriber = { workspace = true } tokio-stream = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 05096248fc2..0bf1ad03360 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -11,8 +11,11 @@ use std::sync::Arc; pub struct MockSerializer; impl BlockSerializer for MockSerializer { - fn serialize_block(&self, _block: &FuelBlock) -> Result { - todo!() + fn serialize_block(&self, block: &FuelBlock) -> Result { + let bytes_vec = postcard::to_allocvec(block).map_err(|e| { + Error::BlockSource(anyhow!("failed to serialize block: {}", e)) + })?; + Ok(Block::from(bytes_vec)) } } @@ -22,8 +25,8 @@ async fn next_block__gets_new_block_from_importer() { .with_max_level(tracing::Level::DEBUG) .try_init(); // given - let height = BlockHeight::from(123u32); let block = SealedBlock::default(); + let height = block.entity.header().height(); let import_result = Arc::new( ImportResult { sealed_block: block.clone(), @@ -43,6 +46,6 @@ async fn next_block__gets_new_block_from_importer() { // then let serialized = serializer.serialize_block(&block).unwrap(); - let expected = BlockSourceEvent::NewBlock(height, serialized); + let expected = BlockSourceEvent::NewBlock(*height, serialized); assert_eq!(expected, actual); } From 2d44731bd127882ee839e05f22e16387e2c6f788 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 15:36:30 -0600 Subject: [PATCH 052/100] Refactor --- .../src/blocks/importer_and_db_source.rs | 85 ++----------------- .../importer_and_db_source/inner_service.rs | 85 +++++++++++++++++++ .../blocks/importer_and_db_source/tests.rs | 6 +- 3 files changed, 92 insertions(+), 84 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index c156a42ebf4..a6da02609ee 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -3,6 +3,7 @@ use crate::{ Block, BlockSource, BlockSourceEvent, + importer_and_db_source::inner_service::InnerTask, }, result::{ Error, @@ -11,23 +12,16 @@ use crate::{ }; use anyhow::anyhow; use fuel_core_services::{ - RunnableService, - RunnableTask, Service, ServiceRunner, - StateWatcher, - TaskNextAction, stream::BoxStream, - try_or_continue, - try_or_stop, }; use fuel_core_types::{ blockchain::SealedBlock as FuelBlock, services::block_importer::SharedImportResult, }; -use futures::StreamExt; -use tokio::sync::mpsc::Sender; +pub mod inner_service; #[cfg(test)] mod tests; @@ -39,7 +33,7 @@ pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + 'static, { - inner: ServiceRunner>, + _inner: ServiceRunner>, receiver: tokio::sync::mpsc::Receiver, } @@ -55,10 +49,10 @@ where serializer, block_return, }; - let mut runner = ServiceRunner::new(inner); + let runner = ServiceRunner::new(inner); runner.start().unwrap(); Self { - inner: runner, + _inner: runner, receiver, } } @@ -80,72 +74,3 @@ where Ok(()) } } - -pub struct InnerTask { - importer: BoxStream, - // db: DB, - serializer: Serializer, - block_return: Sender, -} - -impl RunnableTask for InnerTask -where - Serializer: BlockSerializer + Send, -{ - async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { - tokio::select! { - fuel_block = self.importer.next() => { - tracing::debug!("imported block"); - if let Some(inner) = fuel_block { - let height = inner.sealed_block.entity.header().height(); - let res = self.serializer.serialize_block(&inner.sealed_block); - let block = try_or_continue!(res); - let event = BlockSourceEvent::NewBlock(*height, block); - let res = self.block_return.send(event).await; - try_or_stop!(res, |e| "failed to send imported block to receiver: {e:?}"); - TaskNextAction::Continue - } else { - tracing::debug!("importer stream ended"); - TaskNextAction::Stop - } - } - _ = watcher.while_started() => { - TaskNextAction::Stop - }, - // fuel_block = self.db.next_block() => { - // todo!() - // } - // serialized_block = self.serializer.next_serialized_block() => { - // let res = self.block_return.send(serialized_block); - // try_or_stop!(res) - // } - } - } - - async fn shutdown(self) -> anyhow::Result<()> { - todo!() - } -} - -#[async_trait::async_trait] -impl RunnableService for InnerTask -where - Serializer: BlockSerializer + Send + 'static, -{ - const NAME: &'static str = "BlockSourceInnerService"; - type SharedData = (); - type Task = Self; - type TaskParams = (); - - fn shared_data(&self) -> Self::SharedData { - () - } - - async fn into_task( - self, - _state_watcher: &StateWatcher, - _params: Self::TaskParams, - ) -> anyhow::Result { - Ok(self) - } -} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs new file mode 100644 index 00000000000..3c18dbe6411 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs @@ -0,0 +1,85 @@ +use crate::blocks::{ + BlockSourceEvent, + importer_and_db_source::BlockSerializer, +}; +use fuel_core_services::{ + RunnableService, + RunnableTask, + StateWatcher, + TaskNextAction, + stream::BoxStream, + try_or_continue, + try_or_stop, +}; +use fuel_core_types::services::block_importer::SharedImportResult; +use futures::StreamExt; +use tokio::sync::mpsc::Sender; + +pub struct InnerTask { + pub(crate) importer: BoxStream, + // db: DB, + pub(crate) serializer: Serializer, + pub(crate) block_return: Sender, +} + +impl RunnableTask for InnerTask +where + Serializer: BlockSerializer + Send, +{ + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + tokio::select! { + fuel_block = self.importer.next() => { + tracing::debug!("imported block"); + if let Some(inner) = fuel_block { + let height = inner.sealed_block.entity.header().height(); + let res = self.serializer.serialize_block(&inner.sealed_block); + let block = try_or_continue!(res); + let event = BlockSourceEvent::NewBlock(*height, block); + let res = self.block_return.send(event).await; + try_or_stop!(res, |_e| "failed to send imported block to receiver: {_e:?}"); + TaskNextAction::Continue + } else { + tracing::debug!("importer stream ended"); + TaskNextAction::Stop + } + } + _ = watcher.while_started() => { + TaskNextAction::Stop + }, + // fuel_block = self.db.next_block() => { + // todo!() + // } + // serialized_block = self.serializer.next_serialized_block() => { + // let res = self.block_return.send(serialized_block); + // try_or_stop!(res) + // } + } + } + + async fn shutdown(self) -> anyhow::Result<()> { + todo!() + } +} + +#[async_trait::async_trait] +impl RunnableService for InnerTask +where + Serializer: BlockSerializer + Send + 'static, +{ + const NAME: &'static str = "BlockSourceInnerService"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData { + () + } + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 0bf1ad03360..2c6baf0c915 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,8 +1,9 @@ +#![allow(non_snake_case)] + use super::*; use fuel_core_services::stream::IntoBoxStream; use fuel_core_types::{ blockchain::SealedBlock, - fuel_types::BlockHeight, services::block_importer::ImportResult, }; use std::sync::Arc; @@ -21,9 +22,6 @@ impl BlockSerializer for MockSerializer { #[tokio::test] async fn next_block__gets_new_block_from_importer() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::DEBUG) - .try_init(); // given let block = SealedBlock::default(); let height = block.entity.header().height(); From e9a04b5774b6bee66a149f322494f39cd5c44687 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 16:19:57 -0600 Subject: [PATCH 053/100] Add db to task generics --- .../src/blocks/importer_and_db_source.rs | 25 ++++---- .../importer_and_db_source/inner_service.rs | 62 +++++++++++++++---- .../blocks/importer_and_db_source/tests.rs | 5 +- crates/types/src/blockchain.rs | 8 +++ 4 files changed, 76 insertions(+), 24 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index a6da02609ee..b847f3fb3fb 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -17,7 +17,7 @@ use fuel_core_services::{ stream::BoxStream, }; use fuel_core_types::{ - blockchain::SealedBlock as FuelBlock, + blockchain::Block as FuelBlock, services::block_importer::SharedImportResult, }; @@ -29,26 +29,28 @@ pub trait BlockSerializer { fn serialize_block(&self, block: &FuelBlock) -> Result; } -pub struct ImporterAndDbSource +pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + 'static, + DB: Send + 'static, { - _inner: ServiceRunner>, + _inner: ServiceRunner>, receiver: tokio::sync::mpsc::Receiver, } -impl ImporterAndDbSource +impl ImporterAndDbSource where Serializer: BlockSerializer + Send + 'static, + DB: Send, { - pub fn new(importer: BoxStream, serializer: Serializer) -> Self { + pub fn new( + importer: BoxStream, + serializer: Serializer, + database: DB, + ) -> Self { const ARB_CHANNEL_SIZE: usize = 100; let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let inner = InnerTask { - importer, - serializer, - block_return, - }; + let inner = InnerTask::new(importer, serializer, block_return, database); let runner = ServiceRunner::new(inner); runner.start().unwrap(); Self { @@ -58,9 +60,10 @@ where } } -impl BlockSource for ImporterAndDbSource +impl BlockSource for ImporterAndDbSource where Serializer: BlockSerializer + Send + 'static, + DB: Send, { async fn next_block(&mut self) -> Result { tracing::debug!("awaiting next block"); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs index 3c18dbe6411..2899738b76c 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs @@ -11,20 +11,59 @@ use fuel_core_services::{ try_or_continue, try_or_stop, }; -use fuel_core_types::services::block_importer::SharedImportResult; +use fuel_core_types::{ + blockchain::Block as FuelBlock, + services::block_importer::SharedImportResult, +}; use futures::StreamExt; -use tokio::sync::mpsc::Sender; +use std::marker::PhantomData; +use tokio::sync::mpsc::{ + Receiver, + Sender, +}; -pub struct InnerTask { - pub(crate) importer: BoxStream, - // db: DB, - pub(crate) serializer: Serializer, - pub(crate) block_return: Sender, +pub struct InnerTask { + importer: BoxStream, + serializer: Serializer, + block_return_sender: Sender, + sync_task_handle: tokio::task::JoinHandle<()>, + sync_task_receiver: Receiver, + _marker: PhantomData, +} + +impl InnerTask +where + Serializer: BlockSerializer + Send, +{ + pub fn new( + importer: BoxStream, + serializer: Serializer, + block_return: Sender, + db: DB, + ) -> Self { + const ARB_CHANNEL_SIZE: usize = 100; + let (sync_task_sender, sync_task_receiver) = + tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); + let sync_task_handle = tokio::spawn(async move { + let _ = sync_task_sender; + let _ = db; + // Placeholder for any synchronous tasks if needed in the future + }); + Self { + importer, + serializer, + block_return_sender: block_return, + sync_task_handle, + sync_task_receiver, + _marker: PhantomData, + } + } } -impl RunnableTask for InnerTask +impl RunnableTask for InnerTask where Serializer: BlockSerializer + Send, + DB: Send + 'static, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tokio::select! { @@ -32,10 +71,10 @@ where tracing::debug!("imported block"); if let Some(inner) = fuel_block { let height = inner.sealed_block.entity.header().height(); - let res = self.serializer.serialize_block(&inner.sealed_block); + let res = self.serializer.serialize_block(&inner.sealed_block.entity); let block = try_or_continue!(res); let event = BlockSourceEvent::NewBlock(*height, block); - let res = self.block_return.send(event).await; + let res = self.block_return_sender.send(event).await; try_or_stop!(res, |_e| "failed to send imported block to receiver: {_e:?}"); TaskNextAction::Continue } else { @@ -62,9 +101,10 @@ where } #[async_trait::async_trait] -impl RunnableService for InnerTask +impl RunnableService for InnerTask where Serializer: BlockSerializer + Send + 'static, + DB: Send + 'static, { const NAME: &'static str = "BlockSourceInnerService"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 2c6baf0c915..0a627276848 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -37,13 +37,14 @@ async fn next_block__gets_new_block_from_importer() { let blocks: Vec = vec![import_result]; let block_stream = tokio_stream::iter(blocks).into_boxed(); let serializer = MockSerializer; - let mut adapter = ImporterAndDbSource::new(block_stream, serializer.clone()); + let db = (); + let mut adapter = ImporterAndDbSource::new(block_stream, serializer.clone(), db); // when let actual = adapter.next_block().await.unwrap(); // then - let serialized = serializer.serialize_block(&block).unwrap(); + let serialized = serializer.serialize_block(&block.entity).unwrap(); let expected = BlockSourceEvent::NewBlock(*height, serialized); assert_eq!(expected, actual); } diff --git a/crates/types/src/blockchain.rs b/crates/types/src/blockchain.rs index b4e9b4230f5..1c6b22e61ce 100644 --- a/crates/types/src/blockchain.rs +++ b/crates/types/src/blockchain.rs @@ -1,11 +1,19 @@ //! Blockchain related types +#[cfg(not(feature = "test-helpers"))] use crate::blockchain::{ block::Block, consensus::Sealed, header::BlockHeader, }; +#[cfg(feature = "test-helpers")] +pub use crate::blockchain::{ + block::Block, + consensus::Sealed, + header::BlockHeader, +}; + pub mod block; pub mod consensus; pub mod header; From 67e63afad5cf05918f466a6c746bb9de61105d6d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 10 Sep 2025 17:19:04 -0600 Subject: [PATCH 054/100] Add more for dbs --- .../src/blocks/importer_and_db_source.rs | 21 +++++- .../importer_and_db_source/inner_service.rs | 70 ++++++++++++++++--- .../blocks/importer_and_db_source/tests.rs | 24 ++++++- crates/types/src/blockchain.rs | 8 --- 4 files changed, 100 insertions(+), 23 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index b847f3fb3fb..e02f3a6608a 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -16,8 +16,13 @@ use fuel_core_services::{ ServiceRunner, stream::BoxStream, }; +use fuel_core_storage::{ + StorageInspect, + tables::FuelBlocks, +}; use fuel_core_types::{ - blockchain::Block as FuelBlock, + blockchain::block::Block as FuelBlock, + fuel_types::BlockHeight, services::block_importer::SharedImportResult, }; @@ -41,16 +46,26 @@ where impl ImporterAndDbSource where Serializer: BlockSerializer + Send + 'static, - DB: Send, + DB: StorageInspect + Send, + >::Error: std::fmt::Debug, { pub fn new( importer: BoxStream, serializer: Serializer, database: DB, + db_starting_height: BlockHeight, + db_ending_height: BlockHeight, ) -> Self { const ARB_CHANNEL_SIZE: usize = 100; let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let inner = InnerTask::new(importer, serializer, block_return, database); + let inner = InnerTask::new( + importer, + serializer, + block_return, + database, + db_starting_height, + db_ending_height, + ); let runner = ServiceRunner::new(inner); runner.start().unwrap(); Self { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs index 2899738b76c..f1f65833bdc 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs @@ -11,8 +11,16 @@ use fuel_core_services::{ try_or_continue, try_or_stop, }; +use fuel_core_storage::{ + StorageInspect, + column::Column as OnChainColumn, + kv_store::KeyValueInspect, + tables::FuelBlocks, + transactional::AtomicView, +}; use fuel_core_types::{ - blockchain::Block as FuelBlock, + blockchain::block::Block as FuelBlock, + fuel_types::BlockHeight, services::block_importer::SharedImportResult, }; use futures::StreamExt; @@ -26,7 +34,7 @@ pub struct InnerTask { importer: BoxStream, serializer: Serializer, block_return_sender: Sender, - sync_task_handle: tokio::task::JoinHandle<()>, + sync_task_handle: tokio::task::JoinHandle, sync_task_receiver: Receiver, _marker: PhantomData, } @@ -34,21 +42,20 @@ pub struct InnerTask { impl InnerTask where Serializer: BlockSerializer + Send, + DB: StorageInspect + Send + 'static, + >::Error: std::fmt::Debug, { pub fn new( importer: BoxStream, serializer: Serializer, block_return: Sender, db: DB, + db_starting_height: BlockHeight, + db_ending_height: BlockHeight, ) -> Self { - const ARB_CHANNEL_SIZE: usize = 100; - let (sync_task_sender, sync_task_receiver) = - tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let sync_task_handle = tokio::spawn(async move { - let _ = sync_task_sender; - let _ = db; - // Placeholder for any synchronous tasks if needed in the future - }); + // TODO: Should this be its own service? + let (sync_task_handle, sync_task_receiver) = + Self::sync_task_handle(db, db_starting_height, db_ending_height); Self { importer, serializer, @@ -58,6 +65,49 @@ where _marker: PhantomData, } } + + fn sync_task_handle( + db: DB, + db_starting_height: BlockHeight, + db_ending_height: BlockHeight, + ) -> (tokio::task::JoinHandle, Receiver) { + const ARB_CHANNEL_SIZE: usize = 100; + let (sync_task_sender, sync_task_receiver) = + tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); + let sync_task_handle = tokio::spawn(async move { + let start = u32::from(db_starting_height); + let end = u32::from(db_ending_height); + for height in start..=end { + let height = BlockHeight::new(height); + let res = StorageInspect::::get(&db, &height); + match res { + Ok(Some(compressed_block)) => { + let block = todo!(); + // let send_res = sync_task_sender.send(block).await; + // if send_res.is_err() { + // tracing::warn!( + // "sync task receiver dropped, stopping sync task" + // ); + // return false + // } + } + Ok(None) => { + tracing::warn!("no block found at height {}, skipping", height); + } + Err(e) => { + tracing::error!( + "error fetching block at height {}: {:?}", + height, + e + ); + return false + } + } + } + true + }); + (sync_task_handle, sync_task_receiver) + } } impl RunnableTask for InnerTask diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 0a627276848..5baf4e59eee 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -2,6 +2,14 @@ use super::*; use fuel_core_services::stream::IntoBoxStream; +use fuel_core_storage::{ + column::Column as OnChainColumn, + structured_storage::test::InMemoryStorage, + transactional::{ + IntoTransaction, + StorageTransaction, + }, +}; use fuel_core_types::{ blockchain::SealedBlock, services::block_importer::ImportResult, @@ -20,6 +28,10 @@ impl BlockSerializer for MockSerializer { } } +fn database() -> StorageTransaction> { + InMemoryStorage::default().into_transaction() +} + #[tokio::test] async fn next_block__gets_new_block_from_importer() { // given @@ -37,8 +49,16 @@ async fn next_block__gets_new_block_from_importer() { let blocks: Vec = vec![import_result]; let block_stream = tokio_stream::iter(blocks).into_boxed(); let serializer = MockSerializer; - let db = (); - let mut adapter = ImporterAndDbSource::new(block_stream, serializer.clone(), db); + let db = database(); + let db_starting_height = BlockHeight::from(0u32); + let db_ending_height = BlockHeight::from(1u32); + let mut adapter = ImporterAndDbSource::new( + block_stream, + serializer.clone(), + db, + db_starting_height, + db_ending_height, + ); // when let actual = adapter.next_block().await.unwrap(); diff --git a/crates/types/src/blockchain.rs b/crates/types/src/blockchain.rs index 1c6b22e61ce..b4e9b4230f5 100644 --- a/crates/types/src/blockchain.rs +++ b/crates/types/src/blockchain.rs @@ -1,19 +1,11 @@ //! Blockchain related types -#[cfg(not(feature = "test-helpers"))] use crate::blockchain::{ block::Block, consensus::Sealed, header::BlockHeader, }; -#[cfg(feature = "test-helpers")] -pub use crate::blockchain::{ - block::Block, - consensus::Sealed, - header::BlockHeader, -}; - pub mod block; pub mod consensus; pub mod header; From ba51463db8312ffe58702c78689cccc920d2141f Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 13:49:49 -0600 Subject: [PATCH 055/100] Get basic db sync test working --- .../src/blocks/importer_and_db_source.rs | 10 ++- .../importer_and_db_source/inner_service.rs | 69 +++++++++++++++---- .../blocks/importer_and_db_source/tests.rs | 42 +++++++++++ 3 files changed, 108 insertions(+), 13 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index e02f3a6608a..562a60fa937 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -18,6 +18,7 @@ use fuel_core_services::{ }; use fuel_core_storage::{ StorageInspect, + kv_store::KeyValueInspect, tables::FuelBlocks, }; use fuel_core_types::{ @@ -26,6 +27,12 @@ use fuel_core_types::{ services::block_importer::SharedImportResult, }; +use fuel_core_storage::{ + column::Column as OnChainColumn, + tables::Transactions, + transactional::StorageTransaction, +}; + pub mod inner_service; #[cfg(test)] mod tests; @@ -47,7 +54,8 @@ impl ImporterAndDbSource where Serializer: BlockSerializer + Send + 'static, DB: StorageInspect + Send, - >::Error: std::fmt::Debug, + DB: StorageInspect + Send + 'static, + >::Error: std::fmt::Debug + Send, { pub fn new( importer: BoxStream, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs index f1f65833bdc..3ee9673bc13 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs @@ -15,11 +15,18 @@ use fuel_core_storage::{ StorageInspect, column::Column as OnChainColumn, kv_store::KeyValueInspect, - tables::FuelBlocks, - transactional::AtomicView, + tables::{ + FuelBlocks, + Transactions, + }, + transactional::{ + AtomicView, + ReadTransaction, + }, }; use fuel_core_types::{ blockchain::block::Block as FuelBlock, + fuel_tx::TxId, fuel_types::BlockHeight, services::block_importer::SharedImportResult, }; @@ -43,7 +50,8 @@ impl InnerTask where Serializer: BlockSerializer + Send, DB: StorageInspect + Send + 'static, - >::Error: std::fmt::Debug, + DB: StorageInspect + Send + 'static, + >::Error: std::fmt::Debug + Send, { pub fn new( importer: BoxStream, @@ -75,6 +83,11 @@ where let (sync_task_sender, sync_task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); let sync_task_handle = tokio::spawn(async move { + tracing::debug!( + "running sync task from height {} to {}", + db_starting_height, + db_ending_height + ); let start = u32::from(db_starting_height); let end = u32::from(db_ending_height); for height in start..=end { @@ -82,14 +95,31 @@ where let res = StorageInspect::::get(&db, &height); match res { Ok(Some(compressed_block)) => { - let block = todo!(); - // let send_res = sync_task_sender.send(block).await; - // if send_res.is_err() { - // tracing::warn!( - // "sync task receiver dropped, stopping sync task" - // ); - // return false - // } + tracing::debug!("found block at height {}, syncing", height); + let tx_ids = compressed_block.transactions(); + let mut txs = Vec::new(); + for tx_id in tx_ids { + let tx_res = StorageInspect::::get(&db, &tx_id); + match tx_res { + Ok(Some(tx)) => { + tracing::debug!("found tx id: {:?}", tx_id); + txs.push(tx.into_owned()); + } + Ok(None) => { + tracing::debug!("tx id not found in db: {:?}", tx_id); + todo!() + } + Err(e) => { + tracing::debug!( + "error while finding tx: {:?}", + tx_id + ); + todo!() + } + } + } + let block = as Clone>::clone(&compressed_block).uncompress(txs); + let _res = sync_task_sender.send(block).await.unwrap(); } Ok(None) => { tracing::warn!("no block found at height {}, skipping", height); @@ -132,6 +162,21 @@ where TaskNextAction::Stop } } + fuel_block = self.sync_task_receiver.recv() => { + tracing::debug!("synced block from db"); + if let Some(fuel_block) = fuel_block { + let height = fuel_block.header().height(); + let res = self.serializer.serialize_block(&fuel_block); + let block = try_or_continue!(res); + let event = BlockSourceEvent::NewBlock(*height, block); + let res = self.block_return_sender.send(event).await; + try_or_stop!(res, |_e| "failed to send synced block to receiver: {_e:?}"); + TaskNextAction::Continue + } else { + tracing::debug!("sync task ended"); + TaskNextAction::Stop + } + } _ = watcher.while_started() => { TaskNextAction::Stop }, @@ -146,7 +191,7 @@ where } async fn shutdown(self) -> anyhow::Result<()> { - todo!() + Ok(()) } } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 5baf4e59eee..f23cb803487 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -3,15 +3,19 @@ use super::*; use fuel_core_services::stream::IntoBoxStream; use fuel_core_storage::{ + StorageAsMut, column::Column as OnChainColumn, structured_storage::test::InMemoryStorage, transactional::{ IntoTransaction, StorageTransaction, + WriteTransaction, }, }; + use fuel_core_types::{ blockchain::SealedBlock, + fuel_types::ChainId, services::block_importer::ImportResult, }; use std::sync::Arc; @@ -68,3 +72,41 @@ async fn next_block__gets_new_block_from_importer() { let expected = BlockSourceEvent::NewBlock(*height, serialized); assert_eq!(expected, actual); } + +#[tokio::test] +async fn next_block__can_get_block_from_db() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .try_init(); + // given + let chain_id = ChainId::default(); + let block = SealedBlock::default(); + let height = block.entity.header().height(); + let serializer = MockSerializer; + let mut db = database(); + let mut tx = db.write_transaction(); + let compressed_block = block.entity.compress(&chain_id); + tx.storage_as_mut::() + .insert(&height, &compressed_block) + .unwrap(); + tx.commit().unwrap(); + let blocks: Vec = vec![]; + let block_stream = tokio_stream::iter(blocks).into_boxed(); + let db_starting_height = *height; + let db_ending_height = *height; + let mut adapter = ImporterAndDbSource::new( + block_stream, + serializer.clone(), + db, + db_starting_height, + db_ending_height, + ); + + // when + let actual = adapter.next_block().await.unwrap(); + + // then + let serialized = serializer.serialize_block(&block.entity).unwrap(); + let expected = BlockSourceEvent::NewBlock(*height, serialized); + assert_eq!(expected, actual); +} From 145b94d72d96a82cbd098f9daa9d9f4f56660d9e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 13:58:53 -0600 Subject: [PATCH 056/100] Include tx in db --- .../blocks/importer_and_db_source/tests.rs | 27 ++++++++++++++++--- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index f23cb803487..db1c4df79a7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -15,6 +15,10 @@ use fuel_core_storage::{ use fuel_core_types::{ blockchain::SealedBlock, + fuel_tx::{ + Transaction, + UniqueIdentifier, + }, fuel_types::ChainId, services::block_importer::ImportResult, }; @@ -73,6 +77,13 @@ async fn next_block__gets_new_block_from_importer() { assert_eq!(expected, actual); } +fn arbitrary_block_with_txs() -> FuelBlock { + let mut block = FuelBlock::default(); + let txs = block.transactions_mut(); + *txs = vec![Transaction::default_test_tx()]; + block +} + #[tokio::test] async fn next_block__can_get_block_from_db() { let _ = tracing_subscriber::fmt() @@ -80,16 +91,24 @@ async fn next_block__can_get_block_from_db() { .try_init(); // given let chain_id = ChainId::default(); - let block = SealedBlock::default(); - let height = block.entity.header().height(); + let block = arbitrary_block_with_txs(); + let height = block.header().height(); let serializer = MockSerializer; let mut db = database(); let mut tx = db.write_transaction(); - let compressed_block = block.entity.compress(&chain_id); + let compressed_block = block.compress(&chain_id); tx.storage_as_mut::() .insert(&height, &compressed_block) .unwrap(); tx.commit().unwrap(); + let mut tx = db.write_transaction(); + tx.storage_as_mut::() + .insert( + &block.transactions()[0].id(&chain_id), + &block.transactions()[0], + ) + .unwrap(); + tx.commit().unwrap(); let blocks: Vec = vec![]; let block_stream = tokio_stream::iter(blocks).into_boxed(); let db_starting_height = *height; @@ -106,7 +125,7 @@ async fn next_block__can_get_block_from_db() { let actual = adapter.next_block().await.unwrap(); // then - let serialized = serializer.serialize_block(&block.entity).unwrap(); + let serialized = serializer.serialize_block(&block).unwrap(); let expected = BlockSourceEvent::NewBlock(*height, serialized); assert_eq!(expected, actual); } From 68846445d7ab20b9d549a0f786522366a5990257 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 14:00:02 -0600 Subject: [PATCH 057/100] Update CHANGELOG --- .changes/added/3097.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changes/added/3097.md diff --git a/.changes/added/3097.md b/.changes/added/3097.md new file mode 100644 index 00000000000..0b822d1f5cc --- /dev/null +++ b/.changes/added/3097.md @@ -0,0 +1 @@ +Add block source adapter using fuel storage and block importer \ No newline at end of file From c5bd39c54ffeadbf5ac626c9515d7b7f4b55b4db Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 14:20:56 -0600 Subject: [PATCH 058/100] Refactor --- .../src/blocks/importer_and_db_source.rs | 19 ++- .../importer_and_db_source/inner_service.rs | 117 ++++++++++-------- .../blocks/importer_and_db_source/tests.rs | 3 +- 3 files changed, 74 insertions(+), 65 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 562a60fa937..90661547613 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -18,7 +18,6 @@ use fuel_core_services::{ }; use fuel_core_storage::{ StorageInspect, - kv_store::KeyValueInspect, tables::FuelBlocks, }; use fuel_core_types::{ @@ -27,11 +26,7 @@ use fuel_core_types::{ services::block_importer::SharedImportResult, }; -use fuel_core_storage::{ - column::Column as OnChainColumn, - tables::Transactions, - transactional::StorageTransaction, -}; +use fuel_core_storage::tables::Transactions; pub mod inner_service; #[cfg(test)] @@ -43,8 +38,8 @@ pub trait BlockSerializer { pub struct ImporterAndDbSource where - Serializer: BlockSerializer + Send + 'static, - DB: Send + 'static, + Serializer: BlockSerializer + Send + Sync + 'static, + DB: Send + Sync + 'static, { _inner: ServiceRunner>, receiver: tokio::sync::mpsc::Receiver, @@ -52,8 +47,8 @@ where impl ImporterAndDbSource where - Serializer: BlockSerializer + Send + 'static, - DB: StorageInspect + Send, + Serializer: BlockSerializer + Send + Sync + 'static, + DB: StorageInspect + Send + Sync, DB: StorageInspect + Send + 'static, >::Error: std::fmt::Debug + Send, { @@ -85,8 +80,8 @@ where impl BlockSource for ImporterAndDbSource where - Serializer: BlockSerializer + Send + 'static, - DB: Send, + Serializer: BlockSerializer + Send + Sync + 'static, + DB: Send + Sync, { async fn next_block(&mut self) -> Result { tracing::debug!("awaiting next block"); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs index 3ee9673bc13..619b4ac7b28 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs @@ -12,17 +12,12 @@ use fuel_core_services::{ try_or_stop, }; use fuel_core_storage::{ + self, StorageInspect, - column::Column as OnChainColumn, - kv_store::KeyValueInspect, tables::{ FuelBlocks, Transactions, }, - transactional::{ - AtomicView, - ReadTransaction, - }, }; use fuel_core_types::{ blockchain::block::Block as FuelBlock, @@ -41,7 +36,7 @@ pub struct InnerTask { importer: BoxStream, serializer: Serializer, block_return_sender: Sender, - sync_task_handle: tokio::task::JoinHandle, + _sync_task_handle: tokio::task::JoinHandle, sync_task_receiver: Receiver, _marker: PhantomData, } @@ -62,13 +57,13 @@ where db_ending_height: BlockHeight, ) -> Self { // TODO: Should this be its own service? - let (sync_task_handle, sync_task_receiver) = + let (_sync_task_handle, sync_task_receiver) = Self::sync_task_handle(db, db_starting_height, db_ending_height); Self { importer, serializer, block_return_sender: block_return, - sync_task_handle, + _sync_task_handle, sync_task_receiver, _marker: PhantomData, } @@ -109,7 +104,7 @@ where tracing::debug!("tx id not found in db: {:?}", tx_id); todo!() } - Err(e) => { + Err(_) => { tracing::debug!( "error while finding tx: {:?}", tx_id @@ -142,51 +137,16 @@ where impl RunnableTask for InnerTask where - Serializer: BlockSerializer + Send, - DB: Send + 'static, + Serializer: BlockSerializer + Send + Sync, + DB: Send + Sync + 'static, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tokio::select! { - fuel_block = self.importer.next() => { - tracing::debug!("imported block"); - if let Some(inner) = fuel_block { - let height = inner.sealed_block.entity.header().height(); - let res = self.serializer.serialize_block(&inner.sealed_block.entity); - let block = try_or_continue!(res); - let event = BlockSourceEvent::NewBlock(*height, block); - let res = self.block_return_sender.send(event).await; - try_or_stop!(res, |_e| "failed to send imported block to receiver: {_e:?}"); - TaskNextAction::Continue - } else { - tracing::debug!("importer stream ended"); - TaskNextAction::Stop - } - } - fuel_block = self.sync_task_receiver.recv() => { - tracing::debug!("synced block from db"); - if let Some(fuel_block) = fuel_block { - let height = fuel_block.header().height(); - let res = self.serializer.serialize_block(&fuel_block); - let block = try_or_continue!(res); - let event = BlockSourceEvent::NewBlock(*height, block); - let res = self.block_return_sender.send(event).await; - try_or_stop!(res, |_e| "failed to send synced block to receiver: {_e:?}"); - TaskNextAction::Continue - } else { - tracing::debug!("sync task ended"); - TaskNextAction::Stop - } - } + fuel_block = self.importer.next() => self.process_shared_import_result(fuel_block).await, + fuel_block = self.sync_task_receiver.recv() => self.process_db_block(fuel_block).await, _ = watcher.while_started() => { TaskNextAction::Stop }, - // fuel_block = self.db.next_block() => { - // todo!() - // } - // serialized_block = self.serializer.next_serialized_block() => { - // let res = self.block_return.send(serialized_block); - // try_or_stop!(res) - // } } } @@ -195,11 +155,66 @@ where } } +impl InnerTask +where + Serializer: BlockSerializer + Send + Sync, + DB: Send + Sync + 'static, +{ + async fn process_shared_import_result( + &self, + maybe_import_result: Option, + ) -> TaskNextAction { + tracing::debug!("imported block"); + match maybe_import_result { + Some(import_result) => { + let height = import_result.sealed_block.entity.header().height(); + let res = self + .serializer + .serialize_block(&import_result.sealed_block.entity); + let block = try_or_continue!(res); + let event = BlockSourceEvent::NewBlock(*height, block); + let res = self.block_return_sender.send(event).await; + try_or_stop!( + res, + |_e| "failed to send imported block to receiver: {_e:?}" + ); + TaskNextAction::Continue + } + None => { + tracing::debug!("importer returned None, stopping"); + TaskNextAction::Stop + } + } + } + + async fn process_db_block( + &self, + maybe_fuel_block: Option, + ) -> TaskNextAction { + tracing::debug!("synced block from db"); + match maybe_fuel_block { + Some(fuel_block) => { + let height = fuel_block.header().height(); + let res = self.serializer.serialize_block(&fuel_block); + let block = try_or_continue!(res); + let event = BlockSourceEvent::NewBlock(*height, block); + let res = self.block_return_sender.send(event).await; + try_or_stop!(res, |_e| "failed to send db block to receiver: {_e:?}"); + TaskNextAction::Continue + } + None => { + tracing::debug!("sync task returned None, stopping"); + TaskNextAction::Stop + } + } + } +} + #[async_trait::async_trait] impl RunnableService for InnerTask where - Serializer: BlockSerializer + Send + 'static, - DB: Send + 'static, + Serializer: BlockSerializer + Send + Sync + 'static, + DB: Send + Sync + 'static, { const NAME: &'static str = "BlockSourceInnerService"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index db1c4df79a7..e186639c3b3 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -109,8 +109,7 @@ async fn next_block__can_get_block_from_db() { ) .unwrap(); tx.commit().unwrap(); - let blocks: Vec = vec![]; - let block_stream = tokio_stream::iter(blocks).into_boxed(); + let block_stream = tokio_stream::pending().into_boxed(); let db_starting_height = *height; let db_ending_height = *height; let mut adapter = ImporterAndDbSource::new( From 7995ff4a49fbaab04395098353562e0c5584ab3b Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 14:54:35 -0600 Subject: [PATCH 059/100] Refactor into two services --- .../src/blocks/importer_and_db_source.rs | 31 ++- .../importer_service.rs | 111 +++++++++ .../importer_and_db_source/inner_service.rs | 235 ------------------ .../importer_and_db_source/sync_service.rs | 144 +++++++++++ 4 files changed, 277 insertions(+), 244 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs delete mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 90661547613..99eebfca40d 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -3,7 +3,7 @@ use crate::{ Block, BlockSource, BlockSourceEvent, - importer_and_db_source::inner_service::InnerTask, + importer_and_db_source::importer_service::ImporterTask, }, result::{ Error, @@ -26,9 +26,11 @@ use fuel_core_types::{ services::block_importer::SharedImportResult, }; +use crate::blocks::importer_and_db_source::sync_service::SyncTask; use fuel_core_storage::tables::Transactions; -pub mod inner_service; +pub mod importer_service; +pub mod sync_service; #[cfg(test)] mod tests; @@ -40,14 +42,18 @@ pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, DB: Send + Sync + 'static, + DB: StorageInspect, + DB: StorageInspect, + >::Error: std::fmt::Debug + Send, { - _inner: ServiceRunner>, + _importer_task: ServiceRunner>, + _sync_task: ServiceRunner>, receiver: tokio::sync::mpsc::Receiver, } impl ImporterAndDbSource where - Serializer: BlockSerializer + Send + Sync + 'static, + Serializer: BlockSerializer + Clone + Send + Sync + 'static, DB: StorageInspect + Send + Sync, DB: StorageInspect + Send + 'static, >::Error: std::fmt::Debug + Send, @@ -61,18 +67,22 @@ where ) -> Self { const ARB_CHANNEL_SIZE: usize = 100; let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let inner = InnerTask::new( - importer, + let importer_task = + ImporterTask::new(importer, serializer.clone(), block_return.clone()); + let importer_runner = ServiceRunner::new(importer_task); + importer_runner.start().unwrap(); + let sync_task = SyncTask::new( serializer, block_return, database, db_starting_height, db_ending_height, ); - let runner = ServiceRunner::new(inner); - runner.start().unwrap(); + let sync_runner = ServiceRunner::new(sync_task); + sync_runner.start().unwrap(); Self { - _inner: runner, + _importer_task: importer_runner, + _sync_task: sync_runner, receiver, } } @@ -82,6 +92,9 @@ impl BlockSource for ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, DB: Send + Sync, + DB: StorageInspect, + DB: StorageInspect, + >::Error: std::fmt::Debug + Send, { async fn next_block(&mut self) -> Result { tracing::debug!("awaiting next block"); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs new file mode 100644 index 00000000000..41fa3ccb255 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -0,0 +1,111 @@ +use crate::blocks::{ + BlockSourceEvent, + importer_and_db_source::BlockSerializer, +}; +use fuel_core_services::{ + RunnableService, + RunnableTask, + StateWatcher, + TaskNextAction, + stream::BoxStream, + try_or_continue, + try_or_stop, +}; +use fuel_core_types::services::block_importer::SharedImportResult; +use futures::StreamExt; +use tokio::sync::mpsc::Sender; + +pub struct ImporterTask { + importer: BoxStream, + serializer: Serializer, + block_return_sender: Sender, +} + +impl ImporterTask +where + Serializer: BlockSerializer + Send, +{ + pub fn new( + importer: BoxStream, + serializer: Serializer, + block_return: Sender, + ) -> Self { + Self { + importer, + serializer, + block_return_sender: block_return, + } + } +} +impl RunnableTask for ImporterTask +where + Serializer: BlockSerializer + Send + Sync, +{ + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + tokio::select! { + fuel_block = self.importer.next() => self.process_shared_import_result(fuel_block).await, + _ = watcher.while_started() => { + TaskNextAction::Stop + }, + } + } + + async fn shutdown(self) -> anyhow::Result<()> { + Ok(()) + } +} + +impl ImporterTask +where + Serializer: BlockSerializer + Send + Sync, +{ + async fn process_shared_import_result( + &self, + maybe_import_result: Option, + ) -> TaskNextAction { + tracing::debug!("imported block"); + match maybe_import_result { + Some(import_result) => { + let height = import_result.sealed_block.entity.header().height(); + let res = self + .serializer + .serialize_block(&import_result.sealed_block.entity); + let block = try_or_continue!(res); + let event = BlockSourceEvent::NewBlock(*height, block); + let res = self.block_return_sender.send(event).await; + try_or_stop!( + res, + |_e| "failed to send imported block to receiver: {_e:?}" + ); + TaskNextAction::Continue + } + None => { + tracing::debug!("importer returned None, stopping"); + TaskNextAction::Stop + } + } + } +} + +#[async_trait::async_trait] +impl RunnableService for ImporterTask +where + Serializer: BlockSerializer + Send + Sync + 'static, +{ + const NAME: &'static str = "BlockSourceInnerService"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData { + () + } + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs deleted file mode 100644 index 619b4ac7b28..00000000000 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/inner_service.rs +++ /dev/null @@ -1,235 +0,0 @@ -use crate::blocks::{ - BlockSourceEvent, - importer_and_db_source::BlockSerializer, -}; -use fuel_core_services::{ - RunnableService, - RunnableTask, - StateWatcher, - TaskNextAction, - stream::BoxStream, - try_or_continue, - try_or_stop, -}; -use fuel_core_storage::{ - self, - StorageInspect, - tables::{ - FuelBlocks, - Transactions, - }, -}; -use fuel_core_types::{ - blockchain::block::Block as FuelBlock, - fuel_tx::TxId, - fuel_types::BlockHeight, - services::block_importer::SharedImportResult, -}; -use futures::StreamExt; -use std::marker::PhantomData; -use tokio::sync::mpsc::{ - Receiver, - Sender, -}; - -pub struct InnerTask { - importer: BoxStream, - serializer: Serializer, - block_return_sender: Sender, - _sync_task_handle: tokio::task::JoinHandle, - sync_task_receiver: Receiver, - _marker: PhantomData, -} - -impl InnerTask -where - Serializer: BlockSerializer + Send, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - >::Error: std::fmt::Debug + Send, -{ - pub fn new( - importer: BoxStream, - serializer: Serializer, - block_return: Sender, - db: DB, - db_starting_height: BlockHeight, - db_ending_height: BlockHeight, - ) -> Self { - // TODO: Should this be its own service? - let (_sync_task_handle, sync_task_receiver) = - Self::sync_task_handle(db, db_starting_height, db_ending_height); - Self { - importer, - serializer, - block_return_sender: block_return, - _sync_task_handle, - sync_task_receiver, - _marker: PhantomData, - } - } - - fn sync_task_handle( - db: DB, - db_starting_height: BlockHeight, - db_ending_height: BlockHeight, - ) -> (tokio::task::JoinHandle, Receiver) { - const ARB_CHANNEL_SIZE: usize = 100; - let (sync_task_sender, sync_task_receiver) = - tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let sync_task_handle = tokio::spawn(async move { - tracing::debug!( - "running sync task from height {} to {}", - db_starting_height, - db_ending_height - ); - let start = u32::from(db_starting_height); - let end = u32::from(db_ending_height); - for height in start..=end { - let height = BlockHeight::new(height); - let res = StorageInspect::::get(&db, &height); - match res { - Ok(Some(compressed_block)) => { - tracing::debug!("found block at height {}, syncing", height); - let tx_ids = compressed_block.transactions(); - let mut txs = Vec::new(); - for tx_id in tx_ids { - let tx_res = StorageInspect::::get(&db, &tx_id); - match tx_res { - Ok(Some(tx)) => { - tracing::debug!("found tx id: {:?}", tx_id); - txs.push(tx.into_owned()); - } - Ok(None) => { - tracing::debug!("tx id not found in db: {:?}", tx_id); - todo!() - } - Err(_) => { - tracing::debug!( - "error while finding tx: {:?}", - tx_id - ); - todo!() - } - } - } - let block = as Clone>::clone(&compressed_block).uncompress(txs); - let _res = sync_task_sender.send(block).await.unwrap(); - } - Ok(None) => { - tracing::warn!("no block found at height {}, skipping", height); - } - Err(e) => { - tracing::error!( - "error fetching block at height {}: {:?}", - height, - e - ); - return false - } - } - } - true - }); - (sync_task_handle, sync_task_receiver) - } -} - -impl RunnableTask for InnerTask -where - Serializer: BlockSerializer + Send + Sync, - DB: Send + Sync + 'static, -{ - async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { - tokio::select! { - fuel_block = self.importer.next() => self.process_shared_import_result(fuel_block).await, - fuel_block = self.sync_task_receiver.recv() => self.process_db_block(fuel_block).await, - _ = watcher.while_started() => { - TaskNextAction::Stop - }, - } - } - - async fn shutdown(self) -> anyhow::Result<()> { - Ok(()) - } -} - -impl InnerTask -where - Serializer: BlockSerializer + Send + Sync, - DB: Send + Sync + 'static, -{ - async fn process_shared_import_result( - &self, - maybe_import_result: Option, - ) -> TaskNextAction { - tracing::debug!("imported block"); - match maybe_import_result { - Some(import_result) => { - let height = import_result.sealed_block.entity.header().height(); - let res = self - .serializer - .serialize_block(&import_result.sealed_block.entity); - let block = try_or_continue!(res); - let event = BlockSourceEvent::NewBlock(*height, block); - let res = self.block_return_sender.send(event).await; - try_or_stop!( - res, - |_e| "failed to send imported block to receiver: {_e:?}" - ); - TaskNextAction::Continue - } - None => { - tracing::debug!("importer returned None, stopping"); - TaskNextAction::Stop - } - } - } - - async fn process_db_block( - &self, - maybe_fuel_block: Option, - ) -> TaskNextAction { - tracing::debug!("synced block from db"); - match maybe_fuel_block { - Some(fuel_block) => { - let height = fuel_block.header().height(); - let res = self.serializer.serialize_block(&fuel_block); - let block = try_or_continue!(res); - let event = BlockSourceEvent::NewBlock(*height, block); - let res = self.block_return_sender.send(event).await; - try_or_stop!(res, |_e| "failed to send db block to receiver: {_e:?}"); - TaskNextAction::Continue - } - None => { - tracing::debug!("sync task returned None, stopping"); - TaskNextAction::Stop - } - } - } -} - -#[async_trait::async_trait] -impl RunnableService for InnerTask -where - Serializer: BlockSerializer + Send + Sync + 'static, - DB: Send + Sync + 'static, -{ - const NAME: &'static str = "BlockSourceInnerService"; - type SharedData = (); - type Task = Self; - type TaskParams = (); - - fn shared_data(&self) -> Self::SharedData { - () - } - - async fn into_task( - self, - _state_watcher: &StateWatcher, - _params: Self::TaskParams, - ) -> anyhow::Result { - Ok(self) - } -} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs new file mode 100644 index 00000000000..50695205fc2 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -0,0 +1,144 @@ +use crate::blocks::{ + BlockSourceEvent, + importer_and_db_source::BlockSerializer, +}; +use fuel_core_services::{ + RunnableService, + RunnableTask, + StateWatcher, + TaskNextAction, + try_or_continue, +}; +use fuel_core_storage::{ + self, + StorageInspect, + tables::{ + FuelBlocks, + Transactions, + }, +}; +use fuel_core_types::{ + fuel_tx::TxId, + fuel_types::BlockHeight, +}; +use tokio::sync::mpsc::Sender; + +pub struct SyncTask { + serializer: Serializer, + block_return_sender: Sender, + db: DB, + db_starting_height: BlockHeight, + db_ending_height: BlockHeight, +} + +impl SyncTask +where + Serializer: BlockSerializer + Send, + DB: StorageInspect + Send + 'static, + DB: StorageInspect + Send + 'static, + >::Error: std::fmt::Debug + Send, +{ + pub fn new( + serializer: Serializer, + block_return: Sender, + db: DB, + db_starting_height: BlockHeight, + db_ending_height: BlockHeight, + ) -> Self { + Self { + serializer, + block_return_sender: block_return, + db, + db_starting_height, + db_ending_height, + } + } +} + +impl RunnableTask for SyncTask +where + Serializer: BlockSerializer + Send + Sync, + DB: Send + Sync + 'static, + DB: StorageInspect + Send + 'static, + DB: StorageInspect + Send + 'static, + >::Error: std::fmt::Debug + Send, +{ + async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { + let start = u32::from(self.db_starting_height); + let end = u32::from(self.db_ending_height); + for height in start..=end { + let height = BlockHeight::new(height); + let res = StorageInspect::::get(&self.db, &height); + match res { + Ok(Some(compressed_block)) => { + tracing::debug!("found block at height {}, syncing", height); + let tx_ids = compressed_block.transactions(); + let mut txs = Vec::new(); + for tx_id in tx_ids { + let tx_res = + StorageInspect::::get(&self.db, &tx_id); + match tx_res { + Ok(Some(tx)) => { + tracing::debug!("found tx id: {:?}", tx_id); + txs.push(tx.into_owned()); + } + Ok(None) => { + tracing::debug!("tx id not found in db: {:?}", tx_id); + todo!() + } + Err(_) => { + tracing::debug!("error while finding tx: {:?}", tx_id); + todo!() + } + } + } + let block = as Clone>::clone(&compressed_block).uncompress(txs); + let res = self.serializer.serialize_block(&block); + let block = try_or_continue!(res); + let event = + BlockSourceEvent::NewBlock(BlockHeight::from(*height), block); + self.block_return_sender.send(event).await.unwrap(); + } + Ok(None) => { + tracing::warn!("no block found at height {}, skipping", height); + } + Err(e) => { + tracing::error!("error fetching block at height {}: {:?}", height, e); + return TaskNextAction::Stop; + } + } + } + TaskNextAction::Stop + } + + async fn shutdown(self) -> anyhow::Result<()> { + Ok(()) + } +} + +#[async_trait::async_trait] +impl RunnableService for SyncTask +where + Serializer: BlockSerializer + Send + Sync + 'static, + DB: Send + Sync + 'static, + DB: StorageInspect + Send + 'static, + DB: StorageInspect + Send + 'static, + >::Error: std::fmt::Debug + Send, +{ + const NAME: &'static str = "BlockSourceInnerService"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData { + () + } + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} From 86db658c8bf850e623dbd3de9a3ad4c33f0d4907 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 14:56:08 -0600 Subject: [PATCH 060/100] lint --- crates/services/block_aggregator_api/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index fa67c52a594..b39c5320705 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -5,6 +5,7 @@ edition = "2024" [dependencies] anyhow = { workspace = true } +async-trait = { workspace = true } bytes = { workspace = true, features = ["serde"] } enum-iterator = { workspace = true } fuel-core-services = { workspace = true } @@ -15,15 +16,14 @@ rand = { workspace = true } serde = { workspace = true, features = ["derive"] } strum = { workspace = true } strum_macros = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } -async-trait = { workspace = true } -thiserror = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } futures = { workspace = true } postcard = { workspace = true } -tracing-subscriber = { workspace = true } tokio-stream = { workspace = true } +tracing-subscriber = { workspace = true } From 71439292af583ab99d35807582c583680face9a7 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 15:02:22 -0600 Subject: [PATCH 061/100] Note some questions about the intended behavior --- .../block_aggregator_api/src/blocks/importer_and_db_source.rs | 1 + .../src/blocks/importer_and_db_source/sync_service.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 99eebfca40d..4dd1919cc8b 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -46,6 +46,7 @@ where DB: StorageInspect, >::Error: std::fmt::Debug + Send, { + // TODO: How to handle errors from these tasks? _importer_task: ServiceRunner>, _sync_task: ServiceRunner>, receiver: tokio::sync::mpsc::Receiver, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 50695205fc2..4dfb48ae056 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -63,8 +63,10 @@ where DB: StorageInspect + Send + 'static, >::Error: std::fmt::Debug + Send, { + // TODO: This is syncronous and then just ends. What do we want to do when this is done? async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { let start = u32::from(self.db_starting_height); + // TODO: make this more dynamic so we can make sure we get all blocks up to what the importer receives let end = u32::from(self.db_ending_height); for height in start..=end { let height = BlockHeight::new(height); From a59e27e9b62e7c1f650d2434c1197abdcbf40239 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 12 Sep 2025 15:07:07 -0600 Subject: [PATCH 062/100] Fix test to look for correct variant --- .../src/blocks/importer_and_db_source/sync_service.rs | 2 +- .../src/blocks/importer_and_db_source/tests.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 4dfb48ae056..df16db25015 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -98,7 +98,7 @@ where let res = self.serializer.serialize_block(&block); let block = try_or_continue!(res); let event = - BlockSourceEvent::NewBlock(BlockHeight::from(*height), block); + BlockSourceEvent::OldBlock(BlockHeight::from(*height), block); self.block_return_sender.send(event).await.unwrap(); } Ok(None) => { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index e186639c3b3..9e6f7e3e75b 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -125,6 +125,6 @@ async fn next_block__can_get_block_from_db() { // then let serialized = serializer.serialize_block(&block).unwrap(); - let expected = BlockSourceEvent::NewBlock(*height, serialized); + let expected = BlockSourceEvent::OldBlock(*height, serialized); assert_eq!(expected, actual); } From 06d342838845c57c2f8ca85c28b341e7192ae146 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 09:40:45 -0600 Subject: [PATCH 063/100] Add warning trace instead of just comment --- crates/services/block_aggregator_api/src/db/storage_db.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index f71e65e989b..282d23349c2 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -82,7 +82,11 @@ impl StorageDB { self.orphaned_heights.insert(height); } Ordering::Less => { - // ignore duplicate or old block + tracing::warn!( + "Received block at height {:?}, but the syncing is already at height {:?}. Ignoring block.", + height, + self.highest_contiguous_block + ); } } } From 7af852b5a480dcabcf2318c2efd6dac88d75cfdb Mon Sep 17 00:00:00 2001 From: Mitchell Turner Date: Mon, 15 Sep 2025 09:50:22 -0600 Subject: [PATCH 064/100] Update crates/services/block_aggregator_api/src/lib.rs Co-authored-by: Aaryamann Challani <43716372+rymnc@users.noreply.github.com> --- crates/services/block_aggregator_api/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 1d8043b6b89..6ee072726c9 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -33,7 +33,12 @@ pub struct BlockAggregator { query: Api, database: DB, block_source: Blocks, - new_block_subscriptions: Vec>, + new_block_subscriptions: Vec>, +} + +pub struct NewBlock { + height: BlockHeight, + block: Block, } impl RunnableTask for BlockAggregator From a813c349659044c03fb5e8046748016a22326cc7 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 09:56:54 -0600 Subject: [PATCH 065/100] Hide test methods, fix trace message --- crates/services/block_aggregator_api/src/api.rs | 1 + crates/services/block_aggregator_api/src/block_aggregator.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 323ce7d557a..f2888db6134 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -54,6 +54,7 @@ impl fmt::Debug for BlockAggregatorQuery { } } +#[cfg(test)] impl BlockAggregatorQuery { pub fn get_block_range>( first: H, diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index b0d252fb06f..04d03894847 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -119,7 +119,7 @@ where match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { - tracing::error!("Error sending new block to source due to full channel: {id:?}"); + tracing::error!("Error sending new block to subscriber due to full channel: {id:?}"); true }, Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { From d0f50c6d5d727bb03c8f8821ea6ec21dc35e8a21 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 10:13:13 -0600 Subject: [PATCH 066/100] Rename tasks, remove feature from deps --- crates/services/block_aggregator_api/Cargo.toml | 2 +- .../src/blocks/importer_and_db_source/importer_service.rs | 2 +- .../src/blocks/importer_and_db_source/sync_service.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index b39c5320705..83abf3c5538 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -10,7 +10,7 @@ bytes = { workspace = true, features = ["serde"] } enum-iterator = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } -fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } +fuel-core-types = { workspace = true, features = ["std"] } num_enum = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 41fa3ccb255..5e6e8dc3011 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -92,7 +92,7 @@ impl RunnableService for ImporterTask where Serializer: BlockSerializer + Send + Sync + 'static, { - const NAME: &'static str = "BlockSourceInnerService"; + const NAME: &'static str = "BlockSourceImporterTask"; type SharedData = (); type Task = Self; type TaskParams = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index df16db25015..0b73608c77b 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -127,7 +127,7 @@ where DB: StorageInspect + Send + 'static, >::Error: std::fmt::Debug + Send, { - const NAME: &'static str = "BlockSourceInnerService"; + const NAME: &'static str = "BlockSourceSyncTask"; type SharedData = (); type Task = Self; type TaskParams = (); From 89ad266505dbd670e5267d9d8fb1fb213e7184b4 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 10:23:56 -0600 Subject: [PATCH 067/100] Account for `NewBlock` type --- .../services/block_aggregator_api/src/api.rs | 24 +++++++------------ .../src/block_aggregator.rs | 6 ++--- .../services/block_aggregator_api/src/lib.rs | 10 ++++++++ .../block_aggregator_api/src/tests.rs | 3 ++- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index f2888db6134..251f47966ac 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,14 +1,9 @@ use crate::{ - blocks::Block, + NewBlock, result::Result, }; use fuel_core_types::fuel_types::BlockHeight; use std::fmt; -use tokio::sync::oneshot::{ - Receiver, - Sender, - channel, -}; /// The API for querying the block aggregator service. pub trait BlockAggregatorApi: Send + Sync { @@ -25,14 +20,14 @@ pub enum BlockAggregatorQuery { GetBlockRange { first: BlockHeight, last: BlockHeight, - response: Sender, + response: tokio::sync::oneshot::Sender, }, GetCurrentHeight { - response: Sender, + response: tokio::sync::oneshot::Sender, }, // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? NewBlockSubscription { - response: tokio::sync::mpsc::Sender<(BlockHeight, Block)>, + response: tokio::sync::mpsc::Sender, }, } @@ -59,8 +54,8 @@ impl BlockAggregatorQuery { pub fn get_block_range>( first: H, last: H, - ) -> (Self, Receiver) { - let (sender, receiver) = channel(); + ) -> (Self, tokio::sync::oneshot::Receiver) { + let (sender, receiver) = tokio::sync::oneshot::channel(); let first: BlockHeight = first.into(); let last: BlockHeight = last.into(); let query = Self::GetBlockRange { @@ -71,14 +66,13 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn get_current_height() -> (Self, Receiver) { - let (sender, receiver) = channel(); + pub fn get_current_height() -> (Self, tokio::sync::oneshot::Receiver) { + let (sender, receiver) = tokio::sync::oneshot::channel(); let query = Self::GetCurrentHeight { response: sender }; (query, receiver) } - pub fn new_block_subscription() - -> (Self, tokio::sync::mpsc::Receiver<(BlockHeight, Block)>) { + pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { const ARBITRARY_CHANNEL_SIZE: usize = 10; let (sender, receiver) = tokio::sync::mpsc::channel(ARBITRARY_CHANNEL_SIZE); let query = Self::NewBlockSubscription { response: sender }; diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 04d03894847..273c340dde7 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -1,11 +1,11 @@ use crate::{ BlockAggregator, + NewBlock, api::{ BlockAggregatorApi, BlockAggregatorQuery, }, blocks::{ - Block, BlockSource, BlockSourceEvent, }, @@ -98,7 +98,7 @@ where async fn handle_new_block_subscription( &mut self, - response: tokio::sync::mpsc::Sender<(BlockHeight, Block)>, + response: tokio::sync::mpsc::Sender, ) -> TaskNextAction { self.new_block_subscriptions.push(response); TaskNextAction::Continue @@ -115,7 +115,7 @@ where let (id, block) = match event { BlockSourceEvent::NewBlock(id, block) => { self.new_block_subscriptions.retain_mut(|sub| { - let send_res = sub.try_send((id, block.clone())); + let send_res = sub.try_send(NewBlock::new(id, block.clone())); match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index 6ee072726c9..abe6914c715 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -41,6 +41,16 @@ pub struct NewBlock { block: Block, } +impl NewBlock { + pub fn new(height: BlockHeight, block: Block) -> Self { + Self { height, block } + } + + pub fn decompose(self) -> (BlockHeight, Block) { + (self.height, self.block) + } +} + impl RunnableTask for BlockAggregator where Api: BlockAggregatorApi, diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 674c8b0bc24..c1110d4af5e 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -241,7 +241,8 @@ async fn run__new_block_subscription__sends_new_block() { tokio::time::timeout(tokio::time::Duration::from_secs(1), response.recv()) .await .unwrap() - .unwrap(); + .unwrap() + .decompose(); assert_eq!(expected_block, actual_block); assert_eq!(expected_height, actual_height); From 231485c579cef6307abf42a15fd9696105848f92 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 10:38:19 -0600 Subject: [PATCH 068/100] Increase timeout on failing test --- tests/tests/gas_price.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests/gas_price.rs b/tests/tests/gas_price.rs index ea9f247a31d..56088f4f568 100644 --- a/tests/tests/gas_price.rs +++ b/tests/tests/gas_price.rs @@ -442,7 +442,7 @@ async fn latest_gas_price__if_node_restarts_gets_latest_value() { for _ in 0..arb_blocks_to_produce { driver.client.produce_blocks(1, None).await.unwrap(); tokio::time::timeout( - Duration::from_millis(10), + Duration::from_millis(100), driver.node.await_gas_price_synced(), ) .await From 29108815eab69fbdf6a0070309fe1a463d4a6987 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 10:48:41 -0600 Subject: [PATCH 069/100] Fix spelling --- .../src/blocks/importer_and_db_source/sync_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 0b73608c77b..dfad1b7812c 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -63,7 +63,7 @@ where DB: StorageInspect + Send + 'static, >::Error: std::fmt::Debug + Send, { - // TODO: This is syncronous and then just ends. What do we want to do when this is done? + // TODO: This is synchronous and then just ends. What do we want to do when this is done? async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { let start = u32::from(self.db_starting_height); // TODO: make this more dynamic so we can make sure we get all blocks up to what the importer receives From fff0c74fc2780b2c7bca53931738105a0c514217 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 13:11:01 -0600 Subject: [PATCH 070/100] Add new test for taking into account importer height --- .../services/block_aggregator_api/Cargo.toml | 1 + .../block_aggregator_api/src/blocks.rs | 4 +- .../src/blocks/importer_and_db_source.rs | 13 +- .../importer_service.rs | 26 +++- .../importer_and_db_source/sync_service.rs | 32 +++-- .../blocks/importer_and_db_source/tests.rs | 118 ++++++++++++++++-- 6 files changed, 170 insertions(+), 24 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 83abf3c5538..373d8234904 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -23,6 +23,7 @@ tracing = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } +fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } futures = { workspace = true } postcard = { workspace = true } tokio-stream = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index c9ca572ad56..de56f280975 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -14,13 +14,13 @@ pub trait BlockSource: Send + Sync { fn drain(&mut self) -> impl Future> + Send; } -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq, Hash)] pub enum BlockSourceEvent { NewBlock(BlockHeight, Block), OldBlock(BlockHeight, Block), } -#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct Block { bytes: Bytes, } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 4dd1919cc8b..244e1ed2d8e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -49,6 +49,7 @@ where // TODO: How to handle errors from these tasks? _importer_task: ServiceRunner>, _sync_task: ServiceRunner>, + /// Receive blocks from the importer and sync tasks receiver: tokio::sync::mpsc::Receiver, } @@ -64,12 +65,17 @@ where serializer: Serializer, database: DB, db_starting_height: BlockHeight, - db_ending_height: BlockHeight, + db_ending_height: Option, ) -> Self { const ARB_CHANNEL_SIZE: usize = 100; let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let importer_task = - ImporterTask::new(importer, serializer.clone(), block_return.clone()); + let (new_end_sender, new_end_receiver) = tokio::sync::oneshot::channel(); + let importer_task = ImporterTask::new( + importer, + serializer.clone(), + block_return.clone(), + Some(new_end_sender), + ); let importer_runner = ServiceRunner::new(importer_task); importer_runner.start().unwrap(); let sync_task = SyncTask::new( @@ -78,6 +84,7 @@ where database, db_starting_height, db_ending_height, + new_end_receiver, ); let sync_runner = ServiceRunner::new(sync_task); sync_runner.start().unwrap(); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 5e6e8dc3011..8680f46223d 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -11,7 +11,10 @@ use fuel_core_services::{ try_or_continue, try_or_stop, }; -use fuel_core_types::services::block_importer::SharedImportResult; +use fuel_core_types::{ + fuel_types::BlockHeight, + services::block_importer::SharedImportResult, +}; use futures::StreamExt; use tokio::sync::mpsc::Sender; @@ -19,6 +22,7 @@ pub struct ImporterTask { importer: BoxStream, serializer: Serializer, block_return_sender: Sender, + new_end_sender: Option>, } impl ImporterTask @@ -29,11 +33,13 @@ where importer: BoxStream, serializer: Serializer, block_return: Sender, + new_end_sender: Option>, ) -> Self { Self { importer, serializer, block_return_sender: block_return, + new_end_sender, } } } @@ -60,13 +66,29 @@ where Serializer: BlockSerializer + Send + Sync, { async fn process_shared_import_result( - &self, + &mut self, maybe_import_result: Option, ) -> TaskNextAction { tracing::debug!("imported block"); match maybe_import_result { Some(import_result) => { let height = import_result.sealed_block.entity.header().height(); + if let Some(sender) = self.new_end_sender.take() { + match sender.send(*height) { + Ok(_) => { + tracing::debug!( + "sent new end height to sync task: {:?}", + height + ); + } + Err(e) => { + tracing::error!( + "failed to send new end height to sync task: {:?}", + e + ); + } + } + } let res = self .serializer .serialize_block(&import_result.sealed_block.entity); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index dfad1b7812c..84673d3b970 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -28,7 +28,8 @@ pub struct SyncTask { block_return_sender: Sender, db: DB, db_starting_height: BlockHeight, - db_ending_height: BlockHeight, + db_ending_height: Option, + new_ending_height: tokio::sync::oneshot::Receiver, } impl SyncTask @@ -43,7 +44,8 @@ where block_return: Sender, db: DB, db_starting_height: BlockHeight, - db_ending_height: BlockHeight, + db_ending_height: Option, + new_ending_height: tokio::sync::oneshot::Receiver, ) -> Self { Self { serializer, @@ -51,8 +53,13 @@ where db, db_starting_height, db_ending_height, + new_ending_height, } } + + async fn check_for_new_end(&mut self) -> Option { + self.new_ending_height.try_recv().ok() + } } impl RunnableTask for SyncTask @@ -65,11 +72,21 @@ where { // TODO: This is synchronous and then just ends. What do we want to do when this is done? async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { - let start = u32::from(self.db_starting_height); - // TODO: make this more dynamic so we can make sure we get all blocks up to what the importer receives - let end = u32::from(self.db_ending_height); - for height in start..=end { - let height = BlockHeight::new(height); + let mut height = self.db_starting_height; + let mut end = self.db_ending_height; + loop { + if let Some(new_end) = self.check_for_new_end().await { + end = Some(new_end); + } + if let Some(current_end) = end { + if height >= current_end { + tracing::info!( + "reached end height {}, stopping sync task", + current_end + ); + break; + } + } let res = StorageInspect::::get(&self.db, &height); match res { Ok(Some(compressed_block)) => { @@ -109,6 +126,7 @@ where return TaskNextAction::Stop; } } + height = BlockHeight::from((*height).saturating_add(1)); } TaskNextAction::Stop } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 9e6f7e3e75b..3286a9da1ee 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -12,6 +12,7 @@ use fuel_core_storage::{ WriteTransaction, }, }; +use std::collections::HashSet; use fuel_core_types::{ blockchain::SealedBlock, @@ -59,13 +60,12 @@ async fn next_block__gets_new_block_from_importer() { let serializer = MockSerializer; let db = database(); let db_starting_height = BlockHeight::from(0u32); - let db_ending_height = BlockHeight::from(1u32); let mut adapter = ImporterAndDbSource::new( block_stream, serializer.clone(), db, db_starting_height, - db_ending_height, + None, ); // when @@ -77,8 +77,9 @@ async fn next_block__gets_new_block_from_importer() { assert_eq!(expected, actual); } -fn arbitrary_block_with_txs() -> FuelBlock { +fn arbitrary_block_with_txs(height: BlockHeight) -> FuelBlock { let mut block = FuelBlock::default(); + block.header_mut().set_block_height(height); let txs = block.transactions_mut(); *txs = vec![Transaction::default_test_tx()]; block @@ -86,12 +87,11 @@ fn arbitrary_block_with_txs() -> FuelBlock { #[tokio::test] async fn next_block__can_get_block_from_db() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::DEBUG) - .try_init(); // given let chain_id = ChainId::default(); - let block = arbitrary_block_with_txs(); + let height1 = BlockHeight::from(0u32); + let height2 = BlockHeight::from(1u32); + let block = arbitrary_block_with_txs(height1); let height = block.header().height(); let serializer = MockSerializer; let mut db = database(); @@ -100,8 +100,6 @@ async fn next_block__can_get_block_from_db() { tx.storage_as_mut::() .insert(&height, &compressed_block) .unwrap(); - tx.commit().unwrap(); - let mut tx = db.write_transaction(); tx.storage_as_mut::() .insert( &block.transactions()[0].id(&chain_id), @@ -111,7 +109,7 @@ async fn next_block__can_get_block_from_db() { tx.commit().unwrap(); let block_stream = tokio_stream::pending().into_boxed(); let db_starting_height = *height; - let db_ending_height = *height; + let db_ending_height = Some(height2); let mut adapter = ImporterAndDbSource::new( block_stream, serializer.clone(), @@ -128,3 +126,103 @@ async fn next_block__can_get_block_from_db() { let expected = BlockSourceEvent::OldBlock(*height, serialized); assert_eq!(expected, actual); } + +#[tokio::test] +async fn next_block__will_sync_blocks_from_db_after_receiving_height_from_new_end() { + // given + let chain_id = ChainId::default(); + let height1 = BlockHeight::from(0u32); + let height2 = BlockHeight::from(1u32); + let height3 = BlockHeight::from(2u32); + let block1 = arbitrary_block_with_txs(height1); + let block2 = arbitrary_block_with_txs(height2); + let serializer = MockSerializer; + let mut db = database(); + let mut tx = db.write_transaction(); + let compressed_block = block1.compress(&chain_id); + tx.storage_as_mut::() + .insert(&height1, &compressed_block) + .unwrap(); + tx.storage_as_mut::() + .insert( + &block1.transactions()[0].id(&chain_id), + &block1.transactions()[0], + ) + .unwrap(); + tx.commit().unwrap(); + let mut tx = db.write_transaction(); + let compressed_block = block2.compress(&chain_id); + tx.storage_as_mut::() + .insert(&height2, &compressed_block) + .unwrap(); + tx.storage_as_mut::() + .insert( + &block2.transactions()[0].id(&chain_id), + &block2.transactions()[0], + ) + .unwrap(); + tx.commit().unwrap(); + + // Add the imported block to db as well as streaming + let block3 = arbitrary_block_with_txs(height3); + let mut tx = db.write_transaction(); + let compressed_block = block3.compress(&chain_id); + tx.storage_as_mut::() + .insert(&height3, &compressed_block) + .unwrap(); + tx.storage_as_mut::() + .insert( + &block3.transactions()[0].id(&chain_id), + &block3.transactions()[0], + ) + .unwrap(); + tx.commit().unwrap(); + + let sealed_block = SealedBlock { + entity: block3.clone(), + consensus: Default::default(), + }; + let import_result = Arc::new( + ImportResult { + sealed_block, + tx_status: vec![], + events: vec![], + source: Default::default(), + } + .wrap(), + ); + let blocks: Vec = vec![import_result]; + let block_stream = tokio_stream::iter(blocks).into_boxed(); + let db_starting_height = height1; + let mut adapter = ImporterAndDbSource::new( + block_stream, + serializer.clone(), + db, + db_starting_height, + None, + ); + + // when + let actual1 = adapter.next_block().await.unwrap(); + let actual2 = adapter.next_block().await.unwrap(); + let actual3 = adapter.next_block().await.unwrap(); + + // then + let actual = vec![actual1, actual2, actual3] + .into_iter() + .collect::>(); + // should receive the + let expected = vec![ + BlockSourceEvent::OldBlock(height1, serializer.serialize_block(&block1).unwrap()), + BlockSourceEvent::OldBlock(height2, serializer.serialize_block(&block2).unwrap()), + BlockSourceEvent::NewBlock(height3, serializer.serialize_block(&block3).unwrap()), + ]; + let expected: HashSet<_> = expected.into_iter().collect(); + let length = actual.len(); + let expected_length = expected.len(); + for event in &actual { + tracing::debug!("actual event: {:?}", event); + } + assert_eq!(length, expected_length); + assert_eq!(expected, actual); +} From a9b3851ae9785c31199e4b54757c122107657a17 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 15:55:10 -0600 Subject: [PATCH 071/100] Remove todos --- .../src/blocks/importer_and_db_source/sync_service.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 84673d3b970..8dfa05e527f 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -103,11 +103,9 @@ where } Ok(None) => { tracing::debug!("tx id not found in db: {:?}", tx_id); - todo!() } Err(_) => { tracing::debug!("error while finding tx: {:?}", tx_id); - todo!() } } } From bdf8f33ac4ada4e2a078ab7a3c1cb50fec87d5f6 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 16:22:34 -0600 Subject: [PATCH 072/100] Refactor and cover cases better --- .../src/blocks/importer_and_db_source.rs | 27 ++-- .../importer_and_db_source/sync_service.rs | 136 ++++++++++-------- 2 files changed, 88 insertions(+), 75 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 244e1ed2d8e..7a7d63dd80e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -38,27 +38,29 @@ pub trait BlockSerializer { fn serialize_block(&self, block: &FuelBlock) -> Result; } -pub struct ImporterAndDbSource +pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect, - DB: StorageInspect, - >::Error: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + E: std::fmt::Debug + Send, { // TODO: How to handle errors from these tasks? _importer_task: ServiceRunner>, _sync_task: ServiceRunner>, /// Receive blocks from the importer and sync tasks receiver: tokio::sync::mpsc::Receiver, + + _error_marker: std::marker::PhantomData, } -impl ImporterAndDbSource +impl ImporterAndDbSource where Serializer: BlockSerializer + Clone + Send + Sync + 'static, - DB: StorageInspect + Send + Sync, - DB: StorageInspect + Send + 'static, - >::Error: std::fmt::Debug + Send, + DB: StorageInspect + Send + Sync, + DB: StorageInspect + Send + 'static, + E: std::fmt::Debug + Send, { pub fn new( importer: BoxStream, @@ -92,17 +94,18 @@ where _importer_task: importer_runner, _sync_task: sync_runner, receiver, + _error_marker: std::marker::PhantomData, } } } -impl BlockSource for ImporterAndDbSource +impl BlockSource for ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, DB: Send + Sync, - DB: StorageInspect, - DB: StorageInspect, - >::Error: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + E: std::fmt::Debug + Send + Sync, { async fn next_block(&mut self) -> Result { tracing::debug!("awaiting next block"); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 8dfa05e527f..8b3b84ad417 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -32,12 +32,12 @@ pub struct SyncTask { new_ending_height: tokio::sync::oneshot::Receiver, } -impl SyncTask +impl SyncTask where Serializer: BlockSerializer + Send, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - >::Error: std::fmt::Debug + Send, + DB: StorageInspect + Send + 'static, + DB: StorageInspect + Send + 'static, + E: std::fmt::Debug + Send, { pub fn new( serializer: Serializer, @@ -60,73 +60,83 @@ where async fn check_for_new_end(&mut self) -> Option { self.new_ending_height.try_recv().ok() } + + async fn get_block( + &mut self, + height: &BlockHeight, + ) -> Result, E> { + let maybe_block = StorageInspect::::get(&self.db, height)?; + if let Some(block) = maybe_block { + let tx_ids = block.transactions(); + let mut txs = Vec::new(); + for tx_id in tx_ids { + let tx_res = StorageInspect::::get(&self.db, &tx_id); + match tx_res { + Ok(Some(tx)) => { + tracing::debug!("found tx id: {:?}", tx_id); + txs.push(tx.into_owned()); + } + Ok(None) => { + return Ok(None); + } + Err(e) => return Err(e), + } + } + let block = + as Clone>::clone(&block) + .uncompress(txs); + Ok(Some(block)) + } else { + Ok(None) + } + } } -impl RunnableTask for SyncTask +impl RunnableTask for SyncTask where Serializer: BlockSerializer + Send + Sync, DB: Send + Sync + 'static, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - >::Error: std::fmt::Debug + Send, + DB: StorageInspect + Send + 'static, + DB: StorageInspect + Send + 'static, + E: std::fmt::Debug + Send, { // TODO: This is synchronous and then just ends. What do we want to do when this is done? async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { - let mut height = self.db_starting_height; - let mut end = self.db_ending_height; - loop { - if let Some(new_end) = self.check_for_new_end().await { - end = Some(new_end); + if let Some(new_end) = self.check_for_new_end().await { + self.db_ending_height = Some(new_end); + } + if let Some(current_end) = self.db_ending_height { + if self.db_starting_height >= current_end { + tracing::info!("reached end height {}, stopping sync task", current_end); + return TaskNextAction::Stop; } - if let Some(current_end) = end { - if height >= current_end { - tracing::info!( - "reached end height {}, stopping sync task", - current_end - ); - break; - } + } + let next_height = self.db_starting_height; + match self.get_block(&next_height).await { + Ok(Some(block)) => { + let res = self.serializer.serialize_block(&block); + let block = try_or_continue!(res); + let event = + BlockSourceEvent::OldBlock(BlockHeight::from(*next_height), block); + let res = self.block_return_sender.send(event).await; + try_or_continue!(res); + self.db_starting_height = + BlockHeight::from((*next_height).saturating_add(1)); + TaskNextAction::Continue } - let res = StorageInspect::::get(&self.db, &height); - match res { - Ok(Some(compressed_block)) => { - tracing::debug!("found block at height {}, syncing", height); - let tx_ids = compressed_block.transactions(); - let mut txs = Vec::new(); - for tx_id in tx_ids { - let tx_res = - StorageInspect::::get(&self.db, &tx_id); - match tx_res { - Ok(Some(tx)) => { - tracing::debug!("found tx id: {:?}", tx_id); - txs.push(tx.into_owned()); - } - Ok(None) => { - tracing::debug!("tx id not found in db: {:?}", tx_id); - } - Err(_) => { - tracing::debug!("error while finding tx: {:?}", tx_id); - } - } - } - let block = as Clone>::clone(&compressed_block).uncompress(txs); - let res = self.serializer.serialize_block(&block); - let block = try_or_continue!(res); - let event = - BlockSourceEvent::OldBlock(BlockHeight::from(*height), block); - self.block_return_sender.send(event).await.unwrap(); - } - Ok(None) => { - tracing::warn!("no block found at height {}, skipping", height); - } - Err(e) => { - tracing::error!("error fetching block at height {}: {:?}", height, e); - return TaskNextAction::Stop; - } + Ok(None) => { + tracing::warn!("no block found at height {:?}, retrying", next_height); + TaskNextAction::Continue + } + Err(e) => { + tracing::error!( + "error fetching block at height {}: {:?}", + next_height, + e + ); + TaskNextAction::Stop } - height = BlockHeight::from((*height).saturating_add(1)); } - TaskNextAction::Stop } async fn shutdown(self) -> anyhow::Result<()> { @@ -135,13 +145,13 @@ where } #[async_trait::async_trait] -impl RunnableService for SyncTask +impl RunnableService for SyncTask where Serializer: BlockSerializer + Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - >::Error: std::fmt::Debug + Send, + DB: StorageInspect + Send + 'static, + DB: StorageInspect + Send + 'static, + E: std::fmt::Debug + Send, { const NAME: &'static str = "BlockSourceSyncTask"; type SharedData = (); From b2a67a79b01073c562932578728cb1150d65800e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 16:31:21 -0600 Subject: [PATCH 073/100] Cleanup --- .../importer_and_db_source/sync_service.rs | 107 +++++++++--------- 1 file changed, 53 insertions(+), 54 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 8b3b84ad417..02ed186f7df 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -8,6 +8,7 @@ use fuel_core_services::{ StateWatcher, TaskNextAction, try_or_continue, + try_or_stop, }; use fuel_core_storage::{ self, @@ -18,7 +19,10 @@ use fuel_core_storage::{ }, }; use fuel_core_types::{ - fuel_tx::TxId, + fuel_tx::{ + Transaction, + TxId, + }, fuel_types::BlockHeight, }; use tokio::sync::mpsc::Sender; @@ -27,8 +31,8 @@ pub struct SyncTask { serializer: Serializer, block_return_sender: Sender, db: DB, - db_starting_height: BlockHeight, - db_ending_height: Option, + next_height: BlockHeight, + maybe_stop_height: Option, new_ending_height: tokio::sync::oneshot::Receiver, } @@ -51,37 +55,27 @@ where serializer, block_return_sender: block_return, db, - db_starting_height, - db_ending_height, + next_height: db_starting_height, + maybe_stop_height: db_ending_height, new_ending_height, } } - async fn check_for_new_end(&mut self) -> Option { - self.new_ending_height.try_recv().ok() + async fn maybe_update_stop_height(&mut self) { + if let Some(last_height) = self.new_ending_height.try_recv().ok() { + tracing::info!("updating last height to {}", last_height); + self.maybe_stop_height = Some(last_height); + } } - async fn get_block( - &mut self, + fn get_block( + &self, height: &BlockHeight, ) -> Result, E> { let maybe_block = StorageInspect::::get(&self.db, height)?; if let Some(block) = maybe_block { let tx_ids = block.transactions(); - let mut txs = Vec::new(); - for tx_id in tx_ids { - let tx_res = StorageInspect::::get(&self.db, &tx_id); - match tx_res { - Ok(Some(tx)) => { - tracing::debug!("found tx id: {:?}", tx_id); - txs.push(tx.into_owned()); - } - Ok(None) => { - return Ok(None); - } - Err(e) => return Err(e), - } - } + let txs = self.get_txs(tx_ids)?; let block = as Clone>::clone(&block) .uncompress(txs); @@ -90,6 +84,22 @@ where Ok(None) } } + + fn get_txs(&self, tx_ids: &[TxId]) -> Result, E> { + let mut txs = Vec::new(); + for tx_id in tx_ids { + match StorageInspect::::get(&self.db, &tx_id)? { + Some(tx) => { + tracing::debug!("found tx id: {:?}", tx_id); + txs.push(tx.into_owned()); + } + None => { + return Ok(vec![]); + } + } + } + Ok(txs) + } } impl RunnableTask for SyncTask @@ -102,41 +112,30 @@ where { // TODO: This is synchronous and then just ends. What do we want to do when this is done? async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { - if let Some(new_end) = self.check_for_new_end().await { - self.db_ending_height = Some(new_end); - } - if let Some(current_end) = self.db_ending_height { - if self.db_starting_height >= current_end { - tracing::info!("reached end height {}, stopping sync task", current_end); + self.maybe_update_stop_height().await; + if let Some(last_height) = self.maybe_stop_height { + if self.next_height >= last_height { + tracing::info!("reached end height {}, stopping sync task", last_height); return TaskNextAction::Stop; } } - let next_height = self.db_starting_height; - match self.get_block(&next_height).await { - Ok(Some(block)) => { - let res = self.serializer.serialize_block(&block); - let block = try_or_continue!(res); - let event = - BlockSourceEvent::OldBlock(BlockHeight::from(*next_height), block); - let res = self.block_return_sender.send(event).await; - try_or_continue!(res); - self.db_starting_height = - BlockHeight::from((*next_height).saturating_add(1)); - TaskNextAction::Continue - } - Ok(None) => { - tracing::warn!("no block found at height {:?}, retrying", next_height); - TaskNextAction::Continue - } - Err(e) => { - tracing::error!( - "error fetching block at height {}: {:?}", - next_height, - e - ); - TaskNextAction::Stop - } + let next_height = self.next_height; + let res = self.get_block(&next_height); + let maybe_block = try_or_stop!(res, |e| { + tracing::error!("error fetching block at height {}: {:?}", next_height, e); + }); + if let Some(block) = maybe_block { + let res = self.serializer.serialize_block(&block); + let block = try_or_continue!(res); + let event = + BlockSourceEvent::OldBlock(BlockHeight::from(*next_height), block); + let res = self.block_return_sender.send(event).await; + try_or_continue!(res); + self.next_height = BlockHeight::from((*next_height).saturating_add(1)); + } else { + tracing::warn!("no block found at height {:?}, retrying", next_height); } + TaskNextAction::Continue } async fn shutdown(self) -> anyhow::Result<()> { From eeeccb79edc60a1cf4297c460eefd2a6b0c3828d Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 15 Sep 2025 16:37:04 -0600 Subject: [PATCH 074/100] Appease Clippy-sama --- crates/services/block_aggregator_api/Cargo.toml | 2 +- .../src/blocks/importer_and_db_source/importer_service.rs | 4 +--- .../src/blocks/importer_and_db_source/sync_service.rs | 8 +++----- .../src/blocks/importer_and_db_source/tests.rs | 5 +++-- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 373d8234904..2f101c53cf8 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -11,6 +11,7 @@ enum-iterator = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } +futures = { workspace = true } num_enum = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -24,7 +25,6 @@ tracing = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } -futures = { workspace = true } postcard = { workspace = true } tokio-stream = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 8680f46223d..500d7d0de08 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -119,9 +119,7 @@ where type Task = Self; type TaskParams = (); - fn shared_data(&self) -> Self::SharedData { - () - } + fn shared_data(&self) -> Self::SharedData {} async fn into_task( self, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 02ed186f7df..4eb1ef20ac4 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -62,7 +62,7 @@ where } async fn maybe_update_stop_height(&mut self) { - if let Some(last_height) = self.new_ending_height.try_recv().ok() { + if let Ok(last_height) = self.new_ending_height.try_recv() { tracing::info!("updating last height to {}", last_height); self.maybe_stop_height = Some(last_height); } @@ -88,7 +88,7 @@ where fn get_txs(&self, tx_ids: &[TxId]) -> Result, E> { let mut txs = Vec::new(); for tx_id in tx_ids { - match StorageInspect::::get(&self.db, &tx_id)? { + match StorageInspect::::get(&self.db, tx_id)? { Some(tx) => { tracing::debug!("found tx id: {:?}", tx_id); txs.push(tx.into_owned()); @@ -157,9 +157,7 @@ where type Task = Self; type TaskParams = (); - fn shared_data(&self) -> Self::SharedData { - () - } + fn shared_data(&self) -> Self::SharedData {} async fn into_task( self, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 3286a9da1ee..c08590581e7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,6 +1,7 @@ #![allow(non_snake_case)] use super::*; +use ::postcard::to_allocvec; use fuel_core_services::stream::IntoBoxStream; use fuel_core_storage::{ StorageAsMut, @@ -30,7 +31,7 @@ pub struct MockSerializer; impl BlockSerializer for MockSerializer { fn serialize_block(&self, block: &FuelBlock) -> Result { - let bytes_vec = postcard::to_allocvec(block).map_err(|e| { + let bytes_vec = to_allocvec(block).map_err(|e| { Error::BlockSource(anyhow!("failed to serialize block: {}", e)) })?; Ok(Block::from(bytes_vec)) @@ -98,7 +99,7 @@ async fn next_block__can_get_block_from_db() { let mut tx = db.write_transaction(); let compressed_block = block.compress(&chain_id); tx.storage_as_mut::() - .insert(&height, &compressed_block) + .insert(height, &compressed_block) .unwrap(); tx.storage_as_mut::() .insert( From 8c30b6bfddd96ea0eae4b32de32bf9b0f5712a24 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 09:22:36 -0600 Subject: [PATCH 075/100] remove comment --- .../src/blocks/importer_and_db_source/sync_service.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 4eb1ef20ac4..ac9584aa4cf 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -110,7 +110,6 @@ where DB: StorageInspect + Send + 'static, E: std::fmt::Debug + Send, { - // TODO: This is synchronous and then just ends. What do we want to do when this is done? async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { self.maybe_update_stop_height().await; if let Some(last_height) = self.maybe_stop_height { From 74e92308477710c95fc8df04caa9ca4d5e076bb9 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 09:46:05 -0600 Subject: [PATCH 076/100] Add error handling for broken tasks --- .../src/blocks/importer_and_db_source.rs | 19 +++++++++++++++---- .../importer_and_db_source/sync_service.rs | 7 +++++-- .../blocks/importer_and_db_source/tests.rs | 15 ++++++++++++--- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 7a7d63dd80e..9d37ca9c4c0 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -109,10 +109,21 @@ where { async fn next_block(&mut self) -> Result { tracing::debug!("awaiting next block"); - self.receiver - .recv() - .await - .ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) + // self.receiver + // .recv() + // .await + // .ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) + tokio::select! { + block_res = self.receiver.recv() => { + block_res.ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) + } + importer_error = self._importer_task.await_stop() => { + Err(Error::BlockSource(anyhow!("Importer task stopped unexpectedly: {:?}", importer_error))) + } + sync_error = self._sync_task.await_stop() => { + Err(Error::BlockSource(anyhow!("Sync task stopped unexpectedly: {:?}", sync_error))) + } + } } async fn drain(&mut self) -> Result<()> { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index ac9584aa4cf..683e36b4ba5 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -114,8 +114,11 @@ where self.maybe_update_stop_height().await; if let Some(last_height) = self.maybe_stop_height { if self.next_height >= last_height { - tracing::info!("reached end height {}, stopping sync task", last_height); - return TaskNextAction::Stop; + tracing::info!( + "reached end height {}, putting task into hibernation", + last_height + ); + futures::future::pending().await } } let next_height = self.next_height; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index c08590581e7..92e04d69e5f 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -2,7 +2,10 @@ use super::*; use ::postcard::to_allocvec; -use fuel_core_services::stream::IntoBoxStream; +use fuel_core_services::stream::{ + IntoBoxStream, + pending, +}; use fuel_core_storage::{ StorageAsMut, column::Column as OnChainColumn, @@ -13,6 +16,7 @@ use fuel_core_storage::{ WriteTransaction, }, }; +use futures::StreamExt; use std::collections::HashSet; use fuel_core_types::{ @@ -42,6 +46,11 @@ fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() } +// let block_stream = tokio_stream::iter(blocks).chain(pending()).into_boxed(); +fn stream_with_pending(items: Vec) -> BoxStream { + tokio_stream::iter(items).chain(pending()).into_boxed() +} + #[tokio::test] async fn next_block__gets_new_block_from_importer() { // given @@ -57,7 +66,7 @@ async fn next_block__gets_new_block_from_importer() { .wrap(), ); let blocks: Vec = vec![import_result]; - let block_stream = tokio_stream::iter(blocks).into_boxed(); + let block_stream = tokio_stream::iter(blocks).chain(pending()).into_boxed(); let serializer = MockSerializer; let db = database(); let db_starting_height = BlockHeight::from(0u32); @@ -193,7 +202,7 @@ async fn next_block__will_sync_blocks_from_db_after_receiving_height_from_new_en .wrap(), ); let blocks: Vec = vec![import_result]; - let block_stream = tokio_stream::iter(blocks).into_boxed(); + let block_stream = stream_with_pending(blocks); let db_starting_height = height1; let mut adapter = ImporterAndDbSource::new( block_stream, From 13a89011f14208dc191200db42b41da252c5918e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 09:46:28 -0600 Subject: [PATCH 077/100] Cleanup --- .../src/blocks/importer_and_db_source.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 9d37ca9c4c0..7e100575bd6 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -46,9 +46,8 @@ where DB: StorageInspect, E: std::fmt::Debug + Send, { - // TODO: How to handle errors from these tasks? - _importer_task: ServiceRunner>, - _sync_task: ServiceRunner>, + importer_task: ServiceRunner>, + sync_task: ServiceRunner>, /// Receive blocks from the importer and sync tasks receiver: tokio::sync::mpsc::Receiver, @@ -91,8 +90,8 @@ where let sync_runner = ServiceRunner::new(sync_task); sync_runner.start().unwrap(); Self { - _importer_task: importer_runner, - _sync_task: sync_runner, + importer_task: importer_runner, + sync_task: sync_runner, receiver, _error_marker: std::marker::PhantomData, } @@ -117,10 +116,10 @@ where block_res = self.receiver.recv() => { block_res.ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) } - importer_error = self._importer_task.await_stop() => { + importer_error = self.importer_task.await_stop() => { Err(Error::BlockSource(anyhow!("Importer task stopped unexpectedly: {:?}", importer_error))) } - sync_error = self._sync_task.await_stop() => { + sync_error = self.sync_task.await_stop() => { Err(Error::BlockSource(anyhow!("Sync task stopped unexpectedly: {:?}", sync_error))) } } From ceede2ad870471e7786a9639f8a175011feff566 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 11:21:38 -0600 Subject: [PATCH 078/100] Create protobuf server and client and implement first endpoint --- Cargo.lock | 251 +++++++++++++++++- Cargo.toml | 4 + .../services/block_aggregator_api/Cargo.toml | 6 + crates/services/block_aggregator_api/build.rs | 4 + .../block_aggregator_api/proto/api.proto | 13 + .../services/block_aggregator_api/src/api.rs | 2 + .../src/api/protobuf_adapter.rs | 140 ++++++++++ .../block_aggregator_api/src/result.rs | 2 +- 8 files changed, 407 insertions(+), 15 deletions(-) create mode 100644 crates/services/block_aggregator_api/build.rs create mode 100644 crates/services/block_aggregator_api/proto/api.proto create mode 100644 crates/services/block_aggregator_api/src/api/protobuf_adapter.rs diff --git a/Cargo.lock b/Cargo.lock index f489c1d81dd..00543c00dea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1105,6 +1105,32 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" +dependencies = [ + "axum-core 0.5.2", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "itoa", + "matchit 0.8.4", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.2", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "axum-core" version = "0.2.9" @@ -1158,6 +1184,25 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-core" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" +dependencies = [ + "bytes", + "futures-core", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -1407,6 +1452,7 @@ dependencies = [ "futures", "num_enum", "postcard", + "prost 0.14.1", "rand 0.8.5", "serde", "strum 0.25.0", @@ -1414,6 +1460,9 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tokio-stream", + "tonic 0.14.2", + "tonic-prost", + "tonic-prost-build", "tracing", "tracing-subscriber", ] @@ -4371,7 +4420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbdd607c9c70921cc016becde659e5062ae460b7bb3f525a1dd65f8209c0083" dependencies = [ "prost 0.12.6", - "prost-types", + "prost-types 0.12.6", "regex", "tonic 0.11.0", ] @@ -4999,7 +5048,7 @@ dependencies = [ "ipnet", "once_cell", "rand 0.8.5", - "socket2", + "socket2 0.5.10", "thiserror 1.0.69", "tinyvec", "tokio", @@ -5162,7 +5211,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -5264,7 +5313,7 @@ dependencies = [ "hyper 1.6.0", "libc", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -5604,7 +5653,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.10", "widestring", "windows-sys 0.48.0", "winreg", @@ -6064,7 +6113,7 @@ dependencies = [ "libp2p-swarm", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "void", @@ -6148,7 +6197,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.14", "rustls 0.23.27", - "socket2", + "socket2 0.5.10", "thiserror 1.0.69", "tokio", "tracing", @@ -6243,7 +6292,7 @@ dependencies = [ "libc", "libp2p-core", "libp2p-identity", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -6533,6 +6582,12 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "md-5" version = "0.10.6" @@ -6719,6 +6774,12 @@ dependencies = [ "unsigned-varint 0.8.0", ] +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + [[package]] name = "multistream-select" version = "0.13.0" @@ -7822,6 +7883,38 @@ dependencies = [ "prost-derive 0.13.5", ] +[[package]] +name = "prost" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +dependencies = [ + "bytes", + "prost-derive 0.14.1", +] + +[[package]] +name = "prost-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +dependencies = [ + "heck 0.5.0", + "itertools 0.14.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.14.1", + "prost-types 0.14.1", + "pulldown-cmark", + "pulldown-cmark-to-cmark", + "regex", + "syn 2.0.101", + "tempfile", +] + [[package]] name = "prost-derive" version = "0.11.9" @@ -7861,6 +7954,19 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "prost-derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "prost-types" version = "0.12.6" @@ -7870,6 +7976,15 @@ dependencies = [ "prost 0.12.6", ] +[[package]] +name = "prost-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +dependencies = [ + "prost 0.14.1", +] + [[package]] name = "psl-types" version = "2.0.11" @@ -7895,6 +8010,26 @@ dependencies = [ "psl-types", ] +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags 2.9.1", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" +dependencies = [ + "pulldown-cmark", +] + [[package]] name = "pulley-interpreter" version = "34.0.2" @@ -8016,7 +8151,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls 0.23.27", - "socket2", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tracing", @@ -8053,7 +8188,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.5.10", "tracing", "windows-sys 0.59.0", ] @@ -9260,6 +9395,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "soketto" version = "0.8.1" @@ -9661,7 +9806,7 @@ dependencies = [ "num-traits", "once_cell", "prost 0.12.6", - "prost-types", + "prost-types 0.12.6", "serde", "serde_bytes", "serde_json", @@ -9728,7 +9873,7 @@ dependencies = [ "bytes", "flex-error", "prost 0.12.6", - "prost-types", + "prost-types 0.12.6", "serde", "serde_bytes", "subtle-encoding", @@ -10069,7 +10214,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.10", "tokio-macros", "windows-sys 0.52.0", ] @@ -10276,7 +10421,7 @@ dependencies = [ "percent-encoding", "pin-project", "prost 0.13.5", - "socket2", + "socket2 0.5.10", "tokio", "tokio-stream", "tower 0.4.13", @@ -10285,6 +10430,74 @@ dependencies = [ "tracing", ] +[[package]] +name = "tonic" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +dependencies = [ + "async-trait", + "axum 0.8.4", + "base64 0.22.1", + "bytes", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-timeout 0.5.2", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.0", + "sync_wrapper 1.0.2", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" +dependencies = [ + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost 0.14.1", + "tonic 0.14.2", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types 0.14.1", + "quote", + "syn 2.0.101", + "tempfile", + "tonic-build", +] + [[package]] name = "tower" version = "0.4.13" @@ -10313,11 +10526,15 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", + "indexmap 2.9.0", "pin-project-lite", + "slab", "sync_wrapper 1.0.2", "tokio", + "tokio-util", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -10536,6 +10753,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-ident" version = "1.0.18" diff --git a/Cargo.toml b/Cargo.toml index 883dde7023f..d7d61cd04da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,6 +133,7 @@ pretty_assertions = "1.4.0" primitive-types = { version = "0.12", default-features = false } prometheus-client = "0.22.0" proptest = "1.1" +prost = "0.14.1" rand = "0.8" rayon = "1.10.0" # enable cookie store to support L7 sticky sessions @@ -154,6 +155,9 @@ tokio = { version = "1.27", default-features = false } tokio-rayon = "2.1.0" tokio-stream = "0.1" tokio-util = { version = "0.7", default-features = false } +tonic = "0.14.2" +tonic-prost = "0.14.2" +tonic-prost-build = "0.14.2" tracing = "0.1" tracing-attributes = "0.1" tracing-subscriber = "0.3" diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 2f101c53cf8..426d9c411ec 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -13,6 +13,7 @@ fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } num_enum = { workspace = true } +prost ={ workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } strum = { workspace = true } @@ -20,6 +21,8 @@ strum_macros = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +tonic = { workspace = true } +tonic-prost = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } @@ -28,3 +31,6 @@ fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } postcard = { workspace = true } tokio-stream = { workspace = true } tracing-subscriber = { workspace = true } + +[build-dependencies] +tonic-prost-build = { workspace = true } diff --git a/crates/services/block_aggregator_api/build.rs b/crates/services/block_aggregator_api/build.rs new file mode 100644 index 00000000000..c438a06453f --- /dev/null +++ b/crates/services/block_aggregator_api/build.rs @@ -0,0 +1,4 @@ +fn main() -> Result<(), Box> { + tonic_prost_build::compile_protos("proto/api.proto")?; + Ok(()) +} diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto new file mode 100644 index 00000000000..685d586b9a4 --- /dev/null +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package blockaggregator; + +message BlockHeightRequest {} + +message BlockHeightResponse { + uint32 height = 1; +} + +service BlockAggregator { + rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); +} \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 251f47966ac..3cc652bdd09 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -5,6 +5,8 @@ use crate::{ use fuel_core_types::fuel_types::BlockHeight; use std::fmt; +pub mod protobuf_adapter; + /// The API for querying the block aggregator service. pub trait BlockAggregatorApi: Send + Sync { /// The type of the block range response. diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs new file mode 100644 index 00000000000..863d1a4598b --- /dev/null +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -0,0 +1,140 @@ +use crate::{ + api::{ + BlockAggregatorApi, + BlockAggregatorQuery, + }, + block_range_response::BlockRangeResponse, + result::Result, +}; +use async_trait::async_trait; + +tonic::include_proto!("blockaggregator"); + +use crate::result::Error; +use block_aggregator_server::BlockAggregator; + +pub struct Server { + query_sender: tokio::sync::mpsc::Sender>, +} + +impl Server { + pub fn new( + query_sender: tokio::sync::mpsc::Sender>, + ) -> Self { + Self { query_sender } + } +} + +#[async_trait] +impl BlockAggregator for Server { + async fn get_block_height( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + tracing::debug!("get_block_height: {:?}", request); + let (response, receiver) = tokio::sync::oneshot::channel(); + let query = BlockAggregatorQuery::GetCurrentHeight { response }; + self.query_sender.send(query).await.map_err(|e| { + tonic::Status::internal(format!("Failed to send query: {}", e)) + })?; + let res = receiver.await; + match res { + Ok(height) => Ok(tonic::Response::new(BlockHeightResponse { + height: *height, + })), + Err(e) => Err(tonic::Status::internal(format!( + "Failed to receive height: {}", + e + ))), + } + } +} + +pub struct ProtobufAPI { + server_task_handle: tokio::task::JoinHandle<()>, + query_receiver: tokio::sync::mpsc::Receiver>, +} + +impl ProtobufAPI { + pub fn new(url: String) -> Self { + let (query_sender, query_receiver) = + tokio::sync::mpsc::channel::>(100); + let server = Server::new(query_sender); + let addr = url.parse().unwrap(); + let server_task_handle = tokio::spawn(async move { + tonic::transport::Server::builder() + .add_service(block_aggregator_server::BlockAggregatorServer::new(server)) + .serve(addr) + .await + .unwrap(); + }); + Self { + server_task_handle, + query_receiver, + } + } +} + +impl BlockAggregatorApi for ProtobufAPI { + type BlockRangeResponse = BlockRangeResponse; + + async fn await_query( + &mut self, + ) -> Result> { + let query = self + .query_receiver + .recv() + .await + .ok_or_else(|| Error::Api(anyhow::anyhow!("Channel closed")))?; + Ok(query) + } +} + +pub struct ProtobufClient; + +#[cfg(test)] +mod tests { + use super::*; + use block_aggregator_client::BlockAggregatorClient; + use fuel_core_types::fuel_types::BlockHeight; + + #[tokio::test] + async fn await_query__client_receives_expected_value() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .init(); + // given + let path = "[::1]:50051"; + let mut api = ProtobufAPI::new(path.to_string()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = "http://[::1]:50051"; + let mut client = BlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .get_block_height(BlockHeightRequest {}) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + // return response through query's channel + if let BlockAggregatorQuery::GetCurrentHeight { response } = query { + response.send(BlockHeight::new(42)).unwrap(); + } else { + panic!("expected GetCurrentHeight query"); + } + let res = handle.await.unwrap(); + + // assert client received expected value + assert_eq!(res.into_inner().height, 42); + } +} diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index 5d6dedd6cab..db1ed11e50d 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -2,7 +2,7 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum Error { #[error("Block Aggregator API error")] - Api, + Api(anyhow::Error), #[error("Block Source error: {0}")] BlockSource(anyhow::Error), #[error("Database error: {0}")] From 3e478231dea1f0dc4c7c7355b1bcecc2684751d5 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 11:25:27 -0600 Subject: [PATCH 079/100] Lint, update changelog --- .changes/added/3100.md | 1 + crates/services/block_aggregator_api/Cargo.toml | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 .changes/added/3100.md diff --git a/.changes/added/3100.md b/.changes/added/3100.md new file mode 100644 index 00000000000..5209d9a20a4 --- /dev/null +++ b/.changes/added/3100.md @@ -0,0 +1 @@ +Add protobuf api for querying the block aggregator \ No newline at end of file diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 426d9c411ec..da7010df52d 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -13,16 +13,19 @@ fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } num_enum = { workspace = true } -prost ={ workspace = true } +prost = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } strum = { workspace = true } strum_macros = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } -tracing = { workspace = true } tonic = { workspace = true } tonic-prost = { workspace = true } +tracing = { workspace = true } + +[build-dependencies] +tonic-prost-build = { workspace = true } [dev-dependencies] fuel-core-services = { workspace = true, features = ["test-helpers"] } @@ -31,6 +34,3 @@ fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } postcard = { workspace = true } tokio-stream = { workspace = true } tracing-subscriber = { workspace = true } - -[build-dependencies] -tonic-prost-build = { workspace = true } From d808cf1bb5ed52241c510e2ac00f9c4d5b6905e6 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 11:41:12 -0600 Subject: [PATCH 080/100] include `protoc` in CI image --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7bc2df2138e..ba34a257092 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -174,6 +174,8 @@ jobs: uses: davidB/rust-cargo-make@v1 with: version: "0.36.4" + - name: Install Protoc + uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: From 7f01759691b818d82d6e67f450233735b631f419 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 12:06:33 -0600 Subject: [PATCH 081/100] Start adding range stream --- .../block_aggregator_api/proto/api.proto | 28 +++++++++++++++++++ .../src/api/protobuf_adapter.rs | 22 +++++++++++++-- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 685d586b9a4..967ce24600e 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -1,5 +1,22 @@ syntax = "proto3"; +/* +pub enum BlockAggregatorQuery { + GetBlockRange { + first: BlockHeight, + last: BlockHeight, + response: tokio::sync::oneshot::Sender, + }, + GetCurrentHeight { + response: tokio::sync::oneshot::Sender, + }, + // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? + NewBlockSubscription { + response: tokio::sync::mpsc::Sender, + }, +} +*/ + package blockaggregator; message BlockHeightRequest {} @@ -8,6 +25,17 @@ message BlockHeightResponse { uint32 height = 1; } +message BlockRangeRequest { + uint32 start = 1; + uint32 end = 2; +} + +message Block { + uint32 height = 1; + bytes data = 2; +} + service BlockAggregator { rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); + rpc GetBlockRange (BlockRangeRequest) returns (stream Block); } \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 863d1a4598b..8226d8a9094 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -7,10 +7,15 @@ use crate::{ result::Result, }; use async_trait::async_trait; +use tokio_stream::wrappers::ReceiverStream; +use tonic::Status; tonic::include_proto!("blockaggregator"); -use crate::result::Error; +use crate::{ + api::protobuf_adapter, + result::Error, +}; use block_aggregator_server::BlockAggregator; pub struct Server { @@ -27,6 +32,7 @@ impl Server { #[async_trait] impl BlockAggregator for Server { + type GetBlockRangeStream = ReceiverStream>; async fn get_block_height( &self, request: tonic::Request, @@ -48,6 +54,13 @@ impl BlockAggregator for Server { ))), } } + + async fn get_block_range( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + todo!() + } } pub struct ProtobufAPI { @@ -99,7 +112,7 @@ mod tests { use fuel_core_types::fuel_types::BlockHeight; #[tokio::test] - async fn await_query__client_receives_expected_value() { + async fn await_query__get_current_height__client_receives_expected_value() { let _ = tracing_subscriber::fmt() .with_max_level(tracing::Level::DEBUG) .init(); @@ -137,4 +150,9 @@ mod tests { // assert client received expected value assert_eq!(res.into_inner().height, 42); } + + #[tokio::test] + async fn await_query__get_block_range__client_receives_expected_value() { + todo!() + } } From aad009a593654ac1b81988117060433358b58ef3 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 16 Sep 2025 12:33:10 -0600 Subject: [PATCH 082/100] Add failing test --- .../block_aggregator_api/proto/api.proto | 3 +- .../src/api/protobuf_adapter.rs | 68 +++++++++++++++++-- .../src/block_range_response.rs | 17 ++++- 3 files changed, 81 insertions(+), 7 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 967ce24600e..1530852fb36 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -31,8 +31,7 @@ message BlockRangeRequest { } message Block { - uint32 height = 1; - bytes data = 2; + bytes data = 1; } service BlockAggregator { diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 8226d8a9094..ce2f0218b33 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -64,7 +64,7 @@ impl BlockAggregator for Server { } pub struct ProtobufAPI { - server_task_handle: tokio::task::JoinHandle<()>, + _server_task_handle: tokio::task::JoinHandle<()>, query_receiver: tokio::sync::mpsc::Receiver>, } @@ -74,7 +74,7 @@ impl ProtobufAPI { tokio::sync::mpsc::channel::>(100); let server = Server::new(query_sender); let addr = url.parse().unwrap(); - let server_task_handle = tokio::spawn(async move { + let _server_task_handle = tokio::spawn(async move { tonic::transport::Server::builder() .add_service(block_aggregator_server::BlockAggregatorServer::new(server)) .serve(addr) @@ -82,7 +82,7 @@ impl ProtobufAPI { .unwrap(); }); Self { - server_task_handle, + _server_task_handle, query_receiver, } } @@ -108,8 +108,14 @@ pub struct ProtobufClient; #[cfg(test)] mod tests { use super::*; + use crate::blocks::Block; use block_aggregator_client::BlockAggregatorClient; + use bytes::Bytes; use fuel_core_types::fuel_types::BlockHeight; + use futures::{ + StreamExt, + TryStreamExt, + }; #[tokio::test] async fn await_query__get_current_height__client_receives_expected_value() { @@ -153,6 +159,60 @@ mod tests { #[tokio::test] async fn await_query__get_block_range__client_receives_expected_value() { - todo!() + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .init(); + // given + let path = "[::1]:50051"; + let mut api = ProtobufAPI::new(path.to_string()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = "http://[::1]:50051"; + let mut client = BlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let request = BlockRangeRequest { start: 0, end: 1 }; + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .get_block_range(request) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + let block1 = Block::new(Bytes::from(vec![0u8; 100])); + let block2 = Block::new(Bytes::from(vec![1u8; 100])); + let list = vec![block1, block2]; + // return response through query's channel + if let BlockAggregatorQuery::GetBlockRange { + first, + last, + response, + } = query + { + let stream = tokio_stream::iter(list.clone()).boxed(); + let range = BlockRangeResponse::Literal(stream); + response.send(range).unwrap(); + } else { + panic!("expected GetBlockRange query"); + } + let response = handle.await.unwrap(); + let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); + let actual: Vec> = response + .into_inner() + .try_collect::>() + .await + .unwrap() + .into_iter() + .map(|b| b.data.to_vec()) + .collect(); + + assert_eq!(expected, actual); } } diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index c8591727289..d6353f94237 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,5 +1,8 @@ use crate::blocks::Block; -use fuel_core_services::stream::BoxStream; +use fuel_core_services::stream::Stream; +use std::fmt; + +pub type BoxStream = core::pin::Pin + Send + 'static>>; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { @@ -8,3 +11,15 @@ pub enum BlockRangeResponse { /// A remote URL where the blocks can be fetched Remote(String), } + +#[cfg(test)] +impl std::fmt::Debug for BlockRangeResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BlockRangeResponse::Literal(_) => f.debug_struct("Literal").finish(), + BlockRangeResponse::Remote(url) => { + f.debug_struct("Remote").field("url", url).finish() + } + } + } +} From d463547a11d9648d3a22f764edae4fe3ca8e8062 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 17 Sep 2025 16:54:01 -0600 Subject: [PATCH 083/100] Add stream of blocks --- .../src/api/protobuf_adapter.rs | 168 ++++++------------ .../src/api/protobuf_adapter/tests.rs | 122 +++++++++++++ 2 files changed, 176 insertions(+), 114 deletions(-) create mode 100644 crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index ce2f0218b33..10454942e32 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -7,6 +7,9 @@ use crate::{ result::Result, }; use async_trait::async_trait; +use futures::StreamExt; +use std::cell::RefCell; +use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; use tonic::Status; @@ -18,6 +21,9 @@ use crate::{ }; use block_aggregator_server::BlockAggregator; +#[cfg(test)] +mod tests; + pub struct Server { query_sender: tokio::sync::mpsc::Sender>, } @@ -32,7 +38,6 @@ impl Server { #[async_trait] impl BlockAggregator for Server { - type GetBlockRangeStream = ReceiverStream>; async fn get_block_height( &self, request: tonic::Request, @@ -54,12 +59,59 @@ impl BlockAggregator for Server { ))), } } + type GetBlockRangeStream = ReceiverStream>; async fn get_block_range( &self, request: tonic::Request, ) -> Result, tonic::Status> { - todo!() + tracing::warn!("get_block_range: {:?}", request); + let req = request.into_inner(); + let (response, receiver) = tokio::sync::oneshot::channel(); + let query = BlockAggregatorQuery::GetBlockRange { + first: req.start.into(), + last: req.end.into(), + response, + }; + self.query_sender + .send(query) + .await + .map_err(|e| Status::internal(format!("Failed to send query: {}", e)))?; + let res = receiver.await; + match res { + Ok(block_range_response) => match block_range_response { + BlockRangeResponse::Literal(inner) => { + let (tx, rx) = + tokio::sync::mpsc::channel::>(16); + + // TODO: is this safe if we just drop the join handle? + let _ = tokio::spawn(async move { + let mut s = inner; + while let Some(block) = s.next().await { + // Convert your internal `blocks::Block` into the protobuf `Block`. + let pb = Block { + data: block.bytes().to_vec(), + }; + + // If the receiver side was dropped, stop forwarding. + if tx.send(Ok(pb)).await.is_err() { + break; + } + } + }); + + Ok(tonic::Response::new(ReceiverStream::new(rx))) + } + BlockRangeResponse::Remote(_) => { + tracing::error!("Remote block range not implemented"); + todo!() + } + }, + Err(e) => Err(tonic::Status::internal(format!( + "Failed to receive block range: {}", + e + ))), + } } } @@ -104,115 +156,3 @@ impl BlockAggregatorApi for ProtobufAPI { } pub struct ProtobufClient; - -#[cfg(test)] -mod tests { - use super::*; - use crate::blocks::Block; - use block_aggregator_client::BlockAggregatorClient; - use bytes::Bytes; - use fuel_core_types::fuel_types::BlockHeight; - use futures::{ - StreamExt, - TryStreamExt, - }; - - #[tokio::test] - async fn await_query__get_current_height__client_receives_expected_value() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::DEBUG) - .init(); - // given - let path = "[::1]:50051"; - let mut api = ProtobufAPI::new(path.to_string()); - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - // call get current height endpoint with client - let url = "http://[::1]:50051"; - let mut client = BlockAggregatorClient::connect(url.to_string()) - .await - .expect("could not connect to server"); - let handle = tokio::spawn(async move { - tracing::info!("querying with client"); - client - .get_block_height(BlockHeightRequest {}) - .await - .expect("could not get height") - }); - - // when - tracing::info!("awaiting query"); - let query = api.await_query().await.unwrap(); - - // then - // return response through query's channel - if let BlockAggregatorQuery::GetCurrentHeight { response } = query { - response.send(BlockHeight::new(42)).unwrap(); - } else { - panic!("expected GetCurrentHeight query"); - } - let res = handle.await.unwrap(); - - // assert client received expected value - assert_eq!(res.into_inner().height, 42); - } - - #[tokio::test] - async fn await_query__get_block_range__client_receives_expected_value() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::DEBUG) - .init(); - // given - let path = "[::1]:50051"; - let mut api = ProtobufAPI::new(path.to_string()); - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - // call get current height endpoint with client - let url = "http://[::1]:50051"; - let mut client = BlockAggregatorClient::connect(url.to_string()) - .await - .expect("could not connect to server"); - let request = BlockRangeRequest { start: 0, end: 1 }; - let handle = tokio::spawn(async move { - tracing::info!("querying with client"); - client - .get_block_range(request) - .await - .expect("could not get height") - }); - - // when - tracing::info!("awaiting query"); - let query = api.await_query().await.unwrap(); - - // then - let block1 = Block::new(Bytes::from(vec![0u8; 100])); - let block2 = Block::new(Bytes::from(vec![1u8; 100])); - let list = vec![block1, block2]; - // return response through query's channel - if let BlockAggregatorQuery::GetBlockRange { - first, - last, - response, - } = query - { - let stream = tokio_stream::iter(list.clone()).boxed(); - let range = BlockRangeResponse::Literal(stream); - response.send(range).unwrap(); - } else { - panic!("expected GetBlockRange query"); - } - let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); - let actual: Vec> = response - .into_inner() - .try_collect::>() - .await - .unwrap() - .into_iter() - .map(|b| b.data.to_vec()) - .collect(); - - assert_eq!(expected, actual); - } -} diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs new file mode 100644 index 00000000000..cb773ffa3cd --- /dev/null +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -0,0 +1,122 @@ +#![allow(non_snake_case)] + +use crate::{ + api::{ + BlockAggregatorApi, + BlockAggregatorQuery, + protobuf_adapter::{ + BlockHeightRequest, + BlockRangeRequest, + ProtobufAPI, + block_aggregator_client::BlockAggregatorClient, + }, + }, + block_range_response::BlockRangeResponse, + blocks::Block, +}; +use bytes::Bytes; +use fuel_core_types::fuel_types::BlockHeight; +use futures::{ + StreamExt, + TryStreamExt, +}; + +#[tokio::test] +async fn await_query__get_current_height__client_receives_expected_value() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .init(); + // given + let path = "[::1]:50051"; + let mut api = ProtobufAPI::new(path.to_string()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = "http://[::1]:50051"; + let mut client = BlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .get_block_height(BlockHeightRequest {}) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + // return response through query's channel + if let BlockAggregatorQuery::GetCurrentHeight { response } = query { + response.send(BlockHeight::new(42)).unwrap(); + } else { + panic!("expected GetCurrentHeight query"); + } + let res = handle.await.unwrap(); + + // assert client received expected value + assert_eq!(res.into_inner().height, 42); +} + +#[tokio::test] +async fn await_query__get_block_range__client_receives_expected_value() { + // given + let path = "[::1]:50051"; + let mut api = ProtobufAPI::new(path.to_string()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = "http://[::1]:50051"; + let mut client = BlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let request = BlockRangeRequest { start: 0, end: 1 }; + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .get_block_range(request) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + let block1 = Block::new(Bytes::from(vec![0u8; 100])); + let block2 = Block::new(Bytes::from(vec![1u8; 100])); + let list = vec![block1, block2]; + // return response through query's channel + if let BlockAggregatorQuery::GetBlockRange { + first, + last, + response, + } = query + { + assert_eq!(first, BlockHeight::new(0)); + assert_eq!(last, BlockHeight::new(1)); + tracing::info!("correct query received, sending response"); + let stream = tokio_stream::iter(list.clone()).boxed(); + let range = BlockRangeResponse::Literal(stream); + response.send(range).unwrap(); + } else { + panic!("expected GetBlockRange query"); + } + tracing::info!("awaiting query"); + let response = handle.await.unwrap(); + let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); + let actual: Vec> = response + .into_inner() + .try_collect::>() + .await + .unwrap() + .into_iter() + .map(|b| b.data.to_vec()) + .collect(); + + assert_eq!(expected, actual); +} From 1519a28fa302328c04dee14ea2d5d335d38e587e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 18 Sep 2025 09:49:48 -0600 Subject: [PATCH 084/100] Add test for new block stream --- .../services/block_aggregator_api/Cargo.toml | 1 + .../block_aggregator_api/proto/api.proto | 3 + .../src/api/protobuf_adapter.rs | 39 ++++++++-- .../src/api/protobuf_adapter/tests.rs | 75 +++++++++++++++++-- 4 files changed, 104 insertions(+), 14 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index da7010df52d..9af16dd1f06 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -33,4 +33,5 @@ fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } postcard = { workspace = true } tokio-stream = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread"] } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 1530852fb36..f2f79928bd7 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -34,7 +34,10 @@ message Block { bytes data = 1; } +message NewBlockSubscriptionRequest {} + service BlockAggregator { rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); rpc GetBlockRange (BlockRangeRequest) returns (stream Block); + rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream Block); } \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 10454942e32..2c368f619a5 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -8,17 +8,12 @@ use crate::{ }; use async_trait::async_trait; use futures::StreamExt; -use std::cell::RefCell; -use tokio::task::JoinHandle; use tokio_stream::wrappers::ReceiverStream; use tonic::Status; tonic::include_proto!("blockaggregator"); -use crate::{ - api::protobuf_adapter, - result::Error, -}; +use crate::result::Error; use block_aggregator_server::BlockAggregator; #[cfg(test)] @@ -59,7 +54,7 @@ impl BlockAggregator for Server { ))), } } - type GetBlockRangeStream = ReceiverStream>; + type GetBlockRangeStream = ReceiverStream>; async fn get_block_range( &self, @@ -113,6 +108,36 @@ impl BlockAggregator for Server { ))), } } + + type NewBlockSubscriptionStream = ReceiverStream>; + + async fn new_block_subscription( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + const ARB_CHANNEL_SIZE: usize = 100; + tracing::warn!("get_block_range: {:?}", request); + let (response, mut receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); + let query = BlockAggregatorQuery::NewBlockSubscription { response }; + self.query_sender + .send(query) + .await + .map_err(|e| Status::internal(format!("Failed to send query: {}", e)))?; + + let (task_sender, task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); + let _ = tokio::spawn(async move { + while let Some(nb) = receiver.recv().await { + let block = Block { + data: nb.block.bytes().to_vec(), + }; + if task_sender.send(Ok(block)).await.is_err() { + break; + } + } + }); + + Ok(tonic::Response::new(ReceiverStream::new(task_receiver))) + } } pub struct ProtobufAPI { diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index cb773ffa3cd..ac4b000f02e 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -1,12 +1,14 @@ #![allow(non_snake_case)] use crate::{ + NewBlock, api::{ BlockAggregatorApi, BlockAggregatorQuery, protobuf_adapter::{ BlockHeightRequest, BlockRangeRequest, + NewBlockSubscriptionRequest, ProtobufAPI, block_aggregator_client::BlockAggregatorClient, }, @@ -20,19 +22,23 @@ use futures::{ StreamExt, TryStreamExt, }; +use std::net::TcpListener; + +fn free_local_addr() -> String { + let listener = TcpListener::bind("[::1]:0").unwrap(); + let addr = listener.local_addr().unwrap(); // OS picks a free port + format!("[::1]:{}", addr.port()) +} #[tokio::test] async fn await_query__get_current_height__client_receives_expected_value() { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::DEBUG) - .init(); // given - let path = "[::1]:50051"; + let path = free_local_addr(); let mut api = ProtobufAPI::new(path.to_string()); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client - let url = "http://[::1]:50051"; + let url = format!("http://{}", path); let mut client = BlockAggregatorClient::connect(url.to_string()) .await .expect("could not connect to server"); @@ -64,12 +70,12 @@ async fn await_query__get_current_height__client_receives_expected_value() { #[tokio::test] async fn await_query__get_block_range__client_receives_expected_value() { // given - let path = "[::1]:50051"; + let path = free_local_addr(); let mut api = ProtobufAPI::new(path.to_string()); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client - let url = "http://[::1]:50051"; + let url = format!("http://{}", path); let mut client = BlockAggregatorClient::connect(url.to_string()) .await .expect("could not connect to server"); @@ -120,3 +126,58 @@ async fn await_query__get_block_range__client_receives_expected_value() { assert_eq!(expected, actual); } + +#[tokio::test] +async fn await_query__new_block_stream__client_receives_expected_value() { + // given + let path = free_local_addr(); + let mut api = ProtobufAPI::new(path.to_string()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = format!("http://{}", path); + let mut client = BlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let request = NewBlockSubscriptionRequest {}; + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .new_block_subscription(request) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + let height1 = BlockHeight::new(0); + let height2 = BlockHeight::new(1); + let block1 = Block::new(Bytes::from(vec![0u8; 100])); + let block2 = Block::new(Bytes::from(vec![1u8; 100])); + let list = vec![(height1, block1), (height2, block2)]; + if let BlockAggregatorQuery::NewBlockSubscription { response } = query { + tracing::info!("correct query received, sending response"); + for (height, block) in list.clone() { + let new_block = NewBlock::new(height, block); + response.send(new_block).await.unwrap(); + } + } else { + panic!("expected GetBlockRange query"); + } + tracing::info!("awaiting query"); + let response = handle.await.unwrap(); + let expected: Vec> = list.iter().map(|(_, b)| b.bytes().to_vec()).collect(); + let actual: Vec> = response + .into_inner() + .try_collect::>() + .await + .unwrap() + .into_iter() + .map(|b| b.data.to_vec()) + .collect(); + + assert_eq!(expected, actual); +} From 9d68fc3b99b1e5a82cc1066e1b0698548dd9751c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 18 Sep 2025 09:53:38 -0600 Subject: [PATCH 085/100] Lint toml --- crates/services/block_aggregator_api/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 9af16dd1f06..da7010df52d 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -33,5 +33,4 @@ fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } postcard = { workspace = true } tokio-stream = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread"] } tracing-subscriber = { workspace = true } From 03152285a54db3d0f560a79b4c4a3dc7817712d8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 18 Sep 2025 09:55:43 -0600 Subject: [PATCH 086/100] Remove comments --- .../block_aggregator_api/proto/api.proto | 17 ----------------- .../src/api/protobuf_adapter.rs | 4 ---- 2 files changed, 21 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index f2f79928bd7..acad8e1f6fb 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -1,22 +1,5 @@ syntax = "proto3"; -/* -pub enum BlockAggregatorQuery { - GetBlockRange { - first: BlockHeight, - last: BlockHeight, - response: tokio::sync::oneshot::Sender, - }, - GetCurrentHeight { - response: tokio::sync::oneshot::Sender, - }, - // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? - NewBlockSubscription { - response: tokio::sync::mpsc::Sender, - }, -} -*/ - package blockaggregator; message BlockHeightRequest {} diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 2c368f619a5..caad6d68602 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -79,16 +79,12 @@ impl BlockAggregator for Server { let (tx, rx) = tokio::sync::mpsc::channel::>(16); - // TODO: is this safe if we just drop the join handle? let _ = tokio::spawn(async move { let mut s = inner; while let Some(block) = s.next().await { - // Convert your internal `blocks::Block` into the protobuf `Block`. let pb = Block { data: block.bytes().to_vec(), }; - - // If the receiver side was dropped, stop forwarding. if tx.send(Ok(pb)).await.is_err() { break; } From 7c63d40abaa58714bd2a06b7a9b0352b04fb10b2 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Fri, 19 Sep 2025 11:35:56 -0600 Subject: [PATCH 087/100] Fix compilation --- crates/services/block_aggregator_api/Cargo.toml | 1 + .../services/block_aggregator_api/src/api/protobuf_adapter.rs | 4 ++-- .../services/block_aggregator_api/src/block_range_response.rs | 3 +-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index da7010df52d..01c8d9be2e2 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -20,6 +20,7 @@ strum = { workspace = true } strum_macros = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +tokio-stream = { workspace = true } tonic = { workspace = true } tonic-prost = { workspace = true } tracing = { workspace = true } diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index caad6d68602..585d726af3c 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -79,7 +79,7 @@ impl BlockAggregator for Server { let (tx, rx) = tokio::sync::mpsc::channel::>(16); - let _ = tokio::spawn(async move { + tokio::spawn(async move { let mut s = inner; while let Some(block) = s.next().await { let pb = Block { @@ -121,7 +121,7 @@ impl BlockAggregator for Server { .map_err(|e| Status::internal(format!("Failed to send query: {}", e)))?; let (task_sender, task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let _ = tokio::spawn(async move { + tokio::spawn(async move { while let Some(nb) = receiver.recv().await { let block = Block { data: nb.block.bytes().to_vec(), diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index d6353f94237..5e071bc3328 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,6 +1,5 @@ use crate::blocks::Block; use fuel_core_services::stream::Stream; -use std::fmt; pub type BoxStream = core::pin::Pin + Send + 'static>>; @@ -14,7 +13,7 @@ pub enum BlockRangeResponse { #[cfg(test)] impl std::fmt::Debug for BlockRangeResponse { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { BlockRangeResponse::Literal(_) => f.debug_struct("Literal").finish(), BlockRangeResponse::Remote(url) => { From a1af5a7c43b972c4eff40a46b333a6c98413f993 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 23 Sep 2025 14:01:34 -0600 Subject: [PATCH 088/100] Remove comment, rename fn --- crates/services/block_aggregator_api/src/block_aggregator.rs | 1 - crates/services/block_aggregator_api/src/lib.rs | 2 +- crates/services/block_aggregator_api/src/tests.rs | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 273c340dde7..a271c129b8e 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -128,7 +128,6 @@ where }, } }); - // do more stuff (id, block) } BlockSourceEvent::OldBlock(id, block) => (id, block), diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index abe6914c715..d4f787bc4b0 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -46,7 +46,7 @@ impl NewBlock { Self { height, block } } - pub fn decompose(self) -> (BlockHeight, Block) { + pub fn into_inner(self) -> (BlockHeight, Block) { (self.height, self.block) } } diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index c1110d4af5e..6e66ed6d983 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -242,7 +242,7 @@ async fn run__new_block_subscription__sends_new_block() { .await .unwrap() .unwrap() - .decompose(); + .into_inner(); assert_eq!(expected_block, actual_block); assert_eq!(expected_height, actual_height); From 96362047a67d5a81368f306b405a633775cf67e8 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 25 Sep 2025 08:25:18 -0600 Subject: [PATCH 089/100] fmt --- crates/services/block_aggregator_api/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index 68a8d64d08c..c7a2998ebbf 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -299,4 +299,4 @@ async fn await_response_with_timeout(mut response: Receiver) -> Result Date: Thu, 25 Sep 2025 10:13:34 -0600 Subject: [PATCH 090/100] Revert mistake from merge --- .../services/block_aggregator_api/src/tests.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index c7a2998ebbf..c00c8cf2a6a 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -243,13 +243,10 @@ async fn run__new_block_subscription__sends_new_block() { let _ = srv.run(&mut watcher).await; // then - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let (actual_height, actual_block) = - tokio::time::timeout(tokio::time::Duration::from_secs(1), response.recv()) - .await - .unwrap() - .unwrap() - .into_inner(); + let (actual_height, actual_block) = await_response_with_timeout(response) + .await + .unwrap() + .into_inner(); assert_eq!(expected_block, actual_block); assert_eq!(expected_height, actual_height); @@ -279,10 +276,7 @@ async fn run__new_block_subscription__does_not_send_syncing_blocks() { let _ = srv.run(&mut watcher).await; // then - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let res = - tokio::time::timeout(tokio::time::Duration::from_millis(100), response.recv()) - .await; + let res = await_response_with_timeout(response).await; assert!(res.is_err(), "should have timed out"); // cleanup From 3b917ba3fd27ba5473f7be8a7606eb315d59e757 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 25 Sep 2025 11:56:15 -0600 Subject: [PATCH 091/100] Remove `mut` --- crates/services/block_aggregator_api/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index c00c8cf2a6a..ac069687760 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -233,7 +233,7 @@ async fn run__new_block_subscription__sends_new_block() { let expected_block = Block::random(&mut rng); let expected_height = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); - let (query, mut response) = BlockAggregatorQuery::new_block_subscription(); + let (query, response) = BlockAggregatorQuery::new_block_subscription(); // when sender.send(query).await.unwrap(); @@ -266,7 +266,7 @@ async fn run__new_block_subscription__does_not_send_syncing_blocks() { let block = Block::random(&mut rng); let height = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); - let (query, mut response) = BlockAggregatorQuery::new_block_subscription(); + let (query, response) = BlockAggregatorQuery::new_block_subscription(); // when sender.send(query).await.unwrap(); From 47a89dba711ee7ea6fd50af3f2cb89c4798d1c02 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 09:42:59 -0600 Subject: [PATCH 092/100] Add recommended changes from previous PR --- .../block_aggregator_api/src/blocks/importer_and_db_source.rs | 4 ---- .../src/blocks/importer_and_db_source/sync_service.rs | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 7e100575bd6..7bfe3f233a6 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -108,10 +108,6 @@ where { async fn next_block(&mut self) -> Result { tracing::debug!("awaiting next block"); - // self.receiver - // .recv() - // .await - // .ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) tokio::select! { block_res = self.receiver.recv() => { block_res.ok_or(Error::BlockSource(anyhow!("Block source channel closed"))) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index 683e36b4ba5..90be34c0506 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -76,9 +76,7 @@ where if let Some(block) = maybe_block { let tx_ids = block.transactions(); let txs = self.get_txs(tx_ids)?; - let block = - as Clone>::clone(&block) - .uncompress(txs); + let block = block.into_owned().uncompress(txs); Ok(Some(block)) } else { Ok(None) From c61cbae67ce643317a222ad6a6b1b5bb8556ca4c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 09:45:55 -0600 Subject: [PATCH 093/100] Fix spelling --- .../consensus_module/poa/src/service_test/trigger_tests.rs | 2 +- tests/tests/da_compression.rs | 2 +- tests/tests/relayer.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs index e989ea26d9a..2d167fd1741 100644 --- a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs +++ b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs @@ -474,7 +474,7 @@ async fn interval_trigger_produces_blocks_in_the_future_when_time_rewinds() { // The fist block should be produced after the given block time. assert_eq!(first_block_time, start_time + block_time.as_secs()); - // Even though the real time clock rewinded, the second block is produced with a future timestamp + // Even though the real time clock rewound, the second block is produced with a future timestamp // similarly to how it works when time is lagging. assert_eq!(second_block_time, start_time + block_time.as_secs() * 2); } diff --git a/tests/tests/da_compression.rs b/tests/tests/da_compression.rs index deba78d3739..87e54b0de1c 100644 --- a/tests/tests/da_compression.rs +++ b/tests/tests/da_compression.rs @@ -264,7 +264,7 @@ async fn da_compression__starts_and_compresses_blocks_correctly_from_empty_datab } #[tokio::test] -async fn da_compression__db_can_be_rewinded() { +async fn da_compression__db_can_be_rewound() { // given let rollback_target_height = 0; let blocks_to_produce = 10; diff --git a/tests/tests/relayer.rs b/tests/tests/relayer.rs index b662ba7af92..6e98f030644 100644 --- a/tests/tests/relayer.rs +++ b/tests/tests/relayer.rs @@ -721,7 +721,7 @@ async fn balances_and_coins_to_spend_never_return_retryable_messages() { } #[tokio::test] -async fn relayer_db_can_be_rewinded() { +async fn relayer_db_can_be_rewound() { // Given let rollback_target_height = 0; let num_da_blocks = 10; From 8bb41cf7933da8c1d6f9d66aba497edc5f3183aa Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 1 Oct 2025 15:16:40 -0600 Subject: [PATCH 094/100] MAke proto block response an enum --- .../block_aggregator_api/proto/api.proto | 11 +++++++++-- .../src/api/protobuf_adapter.rs | 16 +++++++++++----- .../src/api/protobuf_adapter/tests.rs | 17 +++++++++++++++-- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index acad8e1f6fb..1e34a8fa8de 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -17,10 +17,17 @@ message Block { bytes data = 1; } +message BlockResponse { + oneof payload { + Block literal = 1; + string remote = 2; + } +} + message NewBlockSubscriptionRequest {} service BlockAggregator { rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); - rpc GetBlockRange (BlockRangeRequest) returns (stream Block); - rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream Block); + rpc GetBlockRange (BlockRangeRequest) returns (stream BlockResponse); + rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream BlockResponse); } \ No newline at end of file diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 585d726af3c..17b04c0c0e3 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -54,7 +54,7 @@ impl BlockAggregator for Server { ))), } } - type GetBlockRangeStream = ReceiverStream>; + type GetBlockRangeStream = ReceiverStream>; async fn get_block_range( &self, @@ -77,7 +77,7 @@ impl BlockAggregator for Server { Ok(block_range_response) => match block_range_response { BlockRangeResponse::Literal(inner) => { let (tx, rx) = - tokio::sync::mpsc::channel::>(16); + tokio::sync::mpsc::channel::>(16); tokio::spawn(async move { let mut s = inner; @@ -85,7 +85,10 @@ impl BlockAggregator for Server { let pb = Block { data: block.bytes().to_vec(), }; - if tx.send(Ok(pb)).await.is_err() { + let response = BlockResponse { + payload: Some(block_response::Payload::Literal(pb)), + }; + if tx.send(Ok(response)).await.is_err() { break; } } @@ -105,7 +108,7 @@ impl BlockAggregator for Server { } } - type NewBlockSubscriptionStream = ReceiverStream>; + type NewBlockSubscriptionStream = ReceiverStream>; async fn new_block_subscription( &self, @@ -126,7 +129,10 @@ impl BlockAggregator for Server { let block = Block { data: nb.block.bytes().to_vec(), }; - if task_sender.send(Ok(block)).await.is_err() { + let response = BlockResponse { + payload: Some(block_response::Payload::Literal(block)), + }; + if task_sender.send(Ok(response)).await.is_err() { break; } } diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index ac4b000f02e..1617090a7dd 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -11,6 +11,7 @@ use crate::{ NewBlockSubscriptionRequest, ProtobufAPI, block_aggregator_client::BlockAggregatorClient, + block_response::Payload, }, }, block_range_response::BlockRangeResponse, @@ -121,7 +122,13 @@ async fn await_query__get_block_range__client_receives_expected_value() { .await .unwrap() .into_iter() - .map(|b| b.data.to_vec()) + .map(|b| { + if let Some(Payload::Literal(inner)) = b.payload { + inner.data.to_vec() + } else { + panic!("unexpected response type") + } + }) .collect(); assert_eq!(expected, actual); @@ -176,7 +183,13 @@ async fn await_query__new_block_stream__client_receives_expected_value() { .await .unwrap() .into_iter() - .map(|b| b.data.to_vec()) + .map(|b| { + if let Some(Payload::Literal(inner)) = b.payload { + inner.data.to_vec() + } else { + panic!("unexpected response type") + } + }) .collect(); assert_eq!(expected, actual); From ad652dd4949156804a91bb905b5ac3b884dd01b3 Mon Sep 17 00:00:00 2001 From: Mitchell Turner Date: Thu, 2 Oct 2025 09:33:42 -0600 Subject: [PATCH 095/100] Update crates/services/block_aggregator_api/src/api/protobuf_adapter.rs Co-authored-by: Aaryamann Challani <43716372+rymnc@users.noreply.github.com> --- .../services/block_aggregator_api/src/api/protobuf_adapter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index 17b04c0c0e3..aecc845d4f5 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -60,7 +60,7 @@ impl BlockAggregator for Server { &self, request: tonic::Request, ) -> Result, tonic::Status> { - tracing::warn!("get_block_range: {:?}", request); + tracing::debug!("get_block_range: {:?}", request); let req = request.into_inner(); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetBlockRange { From a0d99f28e1eb8fae1d72853c31284f1d1a78d9af Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 2 Oct 2025 09:39:20 -0600 Subject: [PATCH 096/100] include inner error in API error message --- crates/services/block_aggregator_api/src/result.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index db1ed11e50d..b687f1ec6cc 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -1,7 +1,7 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum Error { - #[error("Block Aggregator API error")] + #[error("Block Aggregator API error: {0}")] Api(anyhow::Error), #[error("Block Source error: {0}")] BlockSource(anyhow::Error), From 55b368647660250de97b051d8c5ff27c2d6d8606 Mon Sep 17 00:00:00 2001 From: Mitchell Turner Date: Wed, 12 Nov 2025 07:57:17 -0700 Subject: [PATCH 097/100] Integrate Block Aggregator (#3101) ## Linked Issues/PRs Closes https://github.com/FuelLabs/fuel-core/issues/3090 ## Description ## Checklist - [ ] Breaking changes are clearly marked as such in the PR description and changelog - [ ] New behavior is reflected in tests - [ ] [The specification](https://github.com/FuelLabs/fuel-specs/) matches the implemented behavior (link update PR if changes are needed) ### Before requesting review - [ ] I have reviewed the code myself - [ ] I have created follow-up issues caused by this PR and linked them here ### After merging, notify other teams [Add or remove entries as needed] - [ ] [Rust SDK](https://github.com/FuelLabs/fuels-rs/) - [ ] [Sway compiler](https://github.com/FuelLabs/sway/) - [ ] [Platform documentation](https://github.com/FuelLabs/devrel-requests/issues/new?assignees=&labels=new+request&projects=&template=NEW-REQUEST.yml&title=%5BRequest%5D%3A+) (for out-of-organization contributors, the person merging the PR will do this) - [ ] Someone else? --- > [!NOTE] > [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) is generating a summary for commit 4bb648d9e9d459110917c500f54f417776eac160. Configure [here](https://cursor.com/dashboard?tab=bugbot). --- .changes/added/3101.md | 1 + .changes/added/3116.md | 1 + .changes/fixed/3112.md | 1 + .github/workflows/ci.yml | 1 + .gitignore | 1 + .typos.toml | 2 + Cargo.lock | 9 + Cargo.toml | 3 +- benches/benches/block_target_gas.rs | 1 + .../tests/integration_tests.rs | 7 +- bin/fuel-core/Cargo.toml | 2 + bin/fuel-core/src/cli/run.rs | 10 + bin/fuel-core/src/cli/run/rpc.rs | 21 + crates/fuel-core/Cargo.toml | 2 + crates/fuel-core/src/combined_database.rs | 20 + crates/fuel-core/src/database.rs | 7 + .../src/database/database_description.rs | 2 + .../database_description/block_aggregator.rs | 27 + crates/fuel-core/src/p2p_test_helpers.rs | 11 +- crates/fuel-core/src/service.rs | 5 +- crates/fuel-core/src/service/config.rs | 20 + crates/fuel-core/src/service/sub_services.rs | 52 +- .../services/block_aggregator_api/Cargo.toml | 10 +- crates/services/block_aggregator_api/build.rs | 5 +- .../serializer_adapter.txt | 7 + .../block_aggregator_api/proto/api.proto | 648 ++++++- .../services/block_aggregator_api/src/api.rs | 20 +- .../src/api/protobuf_adapter.rs | 108 +- .../src/api/protobuf_adapter/tests.rs | 81 +- .../src/block_aggregator.rs | 21 +- .../src/block_range_response.rs | 4 +- .../block_aggregator_api/src/blocks.rs | 11 +- .../src/blocks/importer_and_db_source.rs | 19 +- .../importer_service.rs | 17 +- .../serializer_adapter.rs | 1524 +++++++++++++++++ .../importer_and_db_source/sync_service.rs | 22 +- .../blocks/importer_and_db_source/tests.rs | 4 +- .../services/block_aggregator_api/src/db.rs | 8 +- .../block_aggregator_api/src/db/storage_db.rs | 13 +- .../src/db/storage_db/table.rs | 4 +- .../src/db/storage_db/tests.rs | 46 +- .../services/block_aggregator_api/src/lib.rs | 137 +- .../src/protobuf_types.rs | 1 + .../block_aggregator_api/src/result.rs | 2 + .../block_aggregator_api/src/tests.rs | 28 +- crates/services/importer/src/importer.rs | 1 + crates/types/Cargo.toml | 3 +- crates/types/src/blockchain/header.rs | 102 +- crates/types/src/blockchain/header/v1.rs | 15 + crates/types/src/blockchain/header/v2.rs | 15 + crates/types/src/test_helpers.rs | 635 +++++++ tests/Cargo.toml | 3 + tests/test-helpers/Cargo.toml | 4 + tests/test-helpers/src/client_ext.rs | 64 + tests/test-helpers/src/lib.rs | 2 + tests/tests/blocks.rs | 255 +-- tests/tests/lib.rs | 3 + tests/tests/rpc.rs | 156 ++ 58 files changed, 3847 insertions(+), 357 deletions(-) create mode 100644 .changes/added/3101.md create mode 100644 .changes/added/3116.md create mode 100644 .changes/fixed/3112.md create mode 100644 bin/fuel-core/src/cli/run/rpc.rs create mode 100644 crates/fuel-core/src/database/database_description/block_aggregator.rs create mode 100644 crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt create mode 100644 crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs create mode 100644 crates/services/block_aggregator_api/src/protobuf_types.rs create mode 100644 tests/test-helpers/src/client_ext.rs create mode 100644 tests/tests/rpc.rs diff --git a/.changes/added/3101.md b/.changes/added/3101.md new file mode 100644 index 00000000000..551e11a2cc9 --- /dev/null +++ b/.changes/added/3101.md @@ -0,0 +1 @@ +Integrate new block aggregation RPC into Fuel Core \ No newline at end of file diff --git a/.changes/added/3116.md b/.changes/added/3116.md new file mode 100644 index 00000000000..96d67f653dc --- /dev/null +++ b/.changes/added/3116.md @@ -0,0 +1 @@ +Complete coverage of proto block types to cover all cases \ No newline at end of file diff --git a/.changes/fixed/3112.md b/.changes/fixed/3112.md new file mode 100644 index 00000000000..7efa291b31c --- /dev/null +++ b/.changes/fixed/3112.md @@ -0,0 +1 @@ +Use Protobuf types in serialization rather than opaque bytes \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7c1c0a3a6cd..703996769c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -239,6 +239,7 @@ jobs: - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} + - uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: diff --git a/.gitignore b/.gitignore index 42a58004912..5ec17be005d 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,6 @@ package-lock.json package.json bin/fuel-core/chainspec/local-testnet/state_transition_bytecode.wasm .DS_Store +.fueldb/ local-testnet/ diff --git a/.typos.toml b/.typos.toml index d6069bd48a2..a6c9bbab2a3 100644 --- a/.typos.toml +++ b/.typos.toml @@ -4,4 +4,6 @@ extend-ignore-identifiers-re = [ "tro", "Tro", "typ", + "aloc", + "ALOC", ] \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 7f1354835c8..f297766eb7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3583,6 +3583,7 @@ dependencies = [ "derive_more 0.99.20", "enum-iterator", "fuel-core", + "fuel-core-block-aggregator-api", "fuel-core-chain-config", "fuel-core-compression-service", "fuel-core-consensus-module", @@ -3697,6 +3698,7 @@ dependencies = [ "dirs 4.0.0", "dotenvy", "fuel-core", + "fuel-core-block-aggregator-api", "fuel-core-chain-config", "fuel-core-metrics", "fuel-core-poa", @@ -3735,8 +3737,10 @@ dependencies = [ "fuel-core-storage", "fuel-core-types 0.47.1", "futures", + "log", "num_enum", "postcard", + "proptest", "prost 0.14.1", "rand 0.8.5", "serde", @@ -4236,6 +4240,7 @@ dependencies = [ "fuel-core", "fuel-core-benches", "fuel-core-bin", + "fuel-core-block-aggregator-api", "fuel-core-client", "fuel-core-compression", "fuel-core-compression-service", @@ -4371,6 +4376,7 @@ dependencies = [ "k256", "parking_lot", "postcard", + "proptest", "rand 0.8.5", "secrecy", "serde", @@ -10176,10 +10182,13 @@ name = "test-helpers" version = "0.0.0" dependencies = [ "anyhow", + "async-trait", "clap", + "cynic", "fuel-core", "fuel-core-bin", "fuel-core-client", + "fuel-core-executor", "fuel-core-p2p", "fuel-core-poa", "fuel-core-relayer", diff --git a/Cargo.toml b/Cargo.toml index f241f2c553e..ddeec08f331 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,9 +81,10 @@ educe = { version = "0.6", default-features = false, features = [ ] } enum-iterator = "1.2" enum_dispatch = "0.3.13" -# Workspace members fuel-core = { version = "0.47.1", path = "./crates/fuel-core", default-features = false } fuel-core-bin = { version = "0.47.1", path = "./bin/fuel-core" } +# Workspace members +fuel-core-block-aggregator-api = { version = "0.47.1", path = "crates/services/block_aggregator_api" } fuel-core-chain-config = { version = "0.47.1", path = "./crates/chain-config", default-features = false } fuel-core-client = { version = "0.47.1", path = "./crates/client" } fuel-core-compression = { version = "0.47.1", path = "./crates/compression" } diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index d4dfa3a17c4..b363f7d3233 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -401,6 +401,7 @@ fn service_with_many_contracts( Default::default(), Default::default(), Default::default(), + Default::default(), ), config.clone(), ) diff --git a/bin/e2e-test-client/tests/integration_tests.rs b/bin/e2e-test-client/tests/integration_tests.rs index 972d4c5da65..d150a490b19 100644 --- a/bin/e2e-test-client/tests/integration_tests.rs +++ b/bin/e2e-test-client/tests/integration_tests.rs @@ -78,8 +78,11 @@ async fn works_in_multinode_local_env() { ..Default::default() }; - config.wallet_a.endpoint = Some(producer.node.bound_address.to_string()); - config.wallet_b.endpoint = Some(validator.node.bound_address.to_string()); + let producer_bound_addr = producer.node.bound_address.to_string(); + let validator_bound_addr = validator.node.bound_address.to_string(); + + config.wallet_a.endpoint = Some(producer_bound_addr.clone()); + config.wallet_b.endpoint = Some(validator_bound_addr); // save config file let config = save_config_file(config); diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index 3025e322f75..9bd49c25fea 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -26,6 +26,7 @@ relayer = ["fuel-core/relayer", "dep:url"] parquet = ["fuel-core-chain-config/parquet", "fuel-core-types/serde"] rocksdb = ["fuel-core/rocksdb"] rocksdb-production = ["fuel-core/rocksdb-production", "rocksdb"] +rpc = ["fuel-core/rpc"] # features to enable in production, but increase build times production = [ "env", @@ -55,6 +56,7 @@ const_format = { version = "0.2", optional = true } dirs = "4.0" dotenvy = { version = "0.15", optional = true } fuel-core = { workspace = true, features = ["wasm-executor"] } +fuel-core-block-aggregator-api = { workspace = true } fuel-core-chain-config = { workspace = true } fuel-core-metrics = { workspace = true } fuel-core-poa = { workspace = true, optional = true } diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index 81034cd7dc7..dbd8eb5d9cf 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -107,6 +107,8 @@ use std::num::NonZeroUsize; #[cfg(feature = "p2p")] mod p2p; +mod rpc; + #[cfg(feature = "shared-sequencer")] mod shared_sequencer; @@ -290,6 +292,9 @@ pub struct Command { #[cfg(feature = "p2p")] pub p2p_args: p2p::P2PArgs, + #[clap(flatten)] + pub rpc_args: rpc::RpcArgs, + #[cfg_attr(feature = "p2p", clap(flatten))] #[cfg(feature = "p2p")] pub sync_args: p2p::SyncArgs, @@ -369,6 +374,7 @@ impl Command { relayer_args, #[cfg(feature = "p2p")] p2p_args, + rpc_args, #[cfg(feature = "p2p")] sync_args, #[cfg(feature = "p2p")] @@ -451,6 +457,8 @@ impl Command { .echo_delegation_interval, }; + let rpc_config = rpc_args.into_config(); + let trigger: Trigger = poa_trigger.into(); if trigger != Trigger::Never { @@ -776,6 +784,8 @@ impl Command { status_cache_ttl: status_cache_ttl.into(), metrics: metrics.is_enabled(Module::TxStatusManager), }, + #[cfg(feature = "rpc")] + rpc_config, }; Ok(config) } diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs new file mode 100644 index 00000000000..324cc8daee5 --- /dev/null +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -0,0 +1,21 @@ +use clap::Args; +use std::net; + +#[derive(Debug, Clone, Args)] +pub struct RpcArgs { + /// The IP address to bind the RPC service to + #[clap(long = "rpc_ip", default_value = "127.0.0.1", value_parser, env)] + pub rpc_ip: net::IpAddr, + + /// The port to bind the RPC service to + #[clap(long = "rpc_port", default_value = "4001", env)] + pub rpc_port: u16, +} + +impl RpcArgs { + pub fn into_config(self) -> fuel_core_block_aggregator_api::integration::Config { + fuel_core_block_aggregator_api::integration::Config { + addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), + } + } +} diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index be87eb7dd37..0a3eaded254 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -16,6 +16,7 @@ default = ["rocksdb", "serde"] serde = ["dep:serde_with"] p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"] relayer = ["dep:fuel-core-relayer"] +rpc = ["fuel-core/rpc"] shared-sequencer = ["dep:fuel-core-shared-sequencer", "dep:cosmrs"] rocksdb = ["dep:rocksdb", "dep:tempfile", "dep:num_cpus"] backup = ["rocksdb", "fuel-core-database/backup"] @@ -59,6 +60,7 @@ clap = { workspace = true, features = ["derive"] } cosmrs = { version = "0.21", optional = true } derive_more = { version = "0.99" } enum-iterator = { workspace = true } +fuel-core-block-aggregator-api = { workspace = true } fuel-core-chain-config = { workspace = true, features = ["std"] } fuel-core-compression-service = { workspace = true } fuel-core-consensus-module = { workspace = true } diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index c18644171d5..6d6b128ff74 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -10,6 +10,7 @@ use crate::{ GenesisDatabase, Result as DatabaseResult, database_description::{ + block_aggregator::BlockAggregatorDatabase, compression::CompressionDatabase, gas_price::GasPriceDatabase, off_chain::OffChain, @@ -60,6 +61,7 @@ pub struct CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, + block_aggregation: Database, } impl CombinedDatabase { @@ -69,6 +71,7 @@ impl CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, + block_aggregation: Database, ) -> Self { Self { on_chain, @@ -76,6 +79,7 @@ impl CombinedDatabase { relayer, gas_price, compression, + block_aggregation, } } @@ -240,12 +244,22 @@ impl CombinedDatabase { ..database_config }, )?; + let block_aggregation = Database::open_rocksdb( + path, + state_rewind_policy, + DatabaseConfig { + max_fds, + ..database_config + }, + )?; + Ok(Self { on_chain, off_chain, relayer, gas_price, compression, + block_aggregation, }) } @@ -261,6 +275,7 @@ impl CombinedDatabase { relayer: Default::default(), gas_price: Default::default(), compression: Default::default(), + block_aggregation: Default::default(), }) } @@ -306,6 +321,7 @@ impl CombinedDatabase { Database::in_memory(), Database::in_memory(), Database::in_memory(), + Database::in_memory(), ) } @@ -326,6 +342,10 @@ impl CombinedDatabase { &self.compression } + pub fn block_aggregation(&self) -> &Database { + &self.block_aggregation + } + #[cfg(any(feature = "test-helpers", test))] pub fn on_chain_mut(&mut self) -> &mut Database { &mut self.on_chain diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 4561ca12ded..7e8f6ec7061 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -84,6 +84,7 @@ use crate::state::{ }; use crate::{ database::database_description::{ + block_aggregator::BlockAggregatorDatabase, gas_price::GasPriceDatabase, indexation_availability, }, @@ -441,6 +442,12 @@ impl Modifiable for Database { } } +impl Modifiable for Database { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) + } +} + #[cfg(feature = "relayer")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { diff --git a/crates/fuel-core/src/database/database_description.rs b/crates/fuel-core/src/database/database_description.rs index f7eebb96762..e991c2bc7f1 100644 --- a/crates/fuel-core/src/database/database_description.rs +++ b/crates/fuel-core/src/database/database_description.rs @@ -13,6 +13,8 @@ pub mod off_chain; pub mod on_chain; pub mod relayer; +pub mod block_aggregator; + pub trait DatabaseHeight: PartialEq + Default + Debug + Copy + Send + Sync { fn as_u64(&self) -> u64; diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs new file mode 100644 index 00000000000..2d55678552f --- /dev/null +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -0,0 +1,27 @@ +use crate::database::database_description::DatabaseDescription; +use fuel_core_block_aggregator_api::db::storage_db::table::Column; +use fuel_core_types::fuel_types::BlockHeight; + +#[derive(Clone, Copy, Debug)] +pub struct BlockAggregatorDatabase; + +impl DatabaseDescription for BlockAggregatorDatabase { + type Column = Column; + type Height = BlockHeight; + + fn version() -> u32 { + 0 + } + + fn name() -> String { + "block_aggregator".to_string() + } + + fn metadata_column() -> Self::Column { + Column::Metadata + } + + fn prefix(_column: &Self::Column) -> Option { + None + } +} diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index 72ce5b8b9a5..508764d3c76 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -1,5 +1,7 @@ //! # Helpers for creating networks of nodes +#[cfg(feature = "rpc")] +use crate::service::config::free_local_addr; use crate::{ chain_config::{ CoinConfig, @@ -360,7 +362,6 @@ pub async fn make_nodes( let mut producers = Vec::with_capacity(producers_with_txs.len()); for (i, s) in producers_with_txs.into_iter().enumerate() { - let config = config.clone(); let name = s.as_ref().map_or(String::new(), |s| s.0.name.clone()); let overrides = s .clone() @@ -424,7 +425,6 @@ pub async fn make_nodes( let mut validators = vec![]; for (i, s) in validators_setup.into_iter().enumerate() { - let config = config.clone(); let name = s.as_ref().map_or(String::new(), |s| s.name.clone()); let overrides = s .clone() @@ -499,7 +499,12 @@ pub fn make_config( ) -> Config { node_config.p2p = Config::local_node().p2p; node_config.utxo_validation = true; - node_config.name = name; + node_config.name = name.clone(); + #[cfg(feature = "rpc")] + { + node_config.rpc_config.addr = free_local_addr(); + } + if let Some(min_gas_price) = config_overrides.min_exec_gas_price { node_config.gas_price_config.min_exec_gas_price = min_gas_price; } diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index d9e2365ad22..2075361ebc6 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -145,7 +145,6 @@ impl FuelService { )?; // initialize sub services - tracing::info!("Initializing sub services"); database.sync_aux_db_heights(shutdown_listener)?; let block_production_ready_signal = ReadySignal::new(); @@ -194,6 +193,7 @@ impl FuelService { Default::default(), Default::default(), Default::default(), + Default::default(), ); Self::from_combined_database(combined_database, config).await } @@ -574,7 +574,10 @@ mod tests { // - gas price service // - chain info provider #[allow(unused_mut)] + #[cfg(not(feature = "rpc"))] let mut expected_services = 7; + #[cfg(feature = "rpc")] + let mut expected_services = 8; // Relayer service is disabled with `Config::local_node`. // #[cfg(feature = "relayer")] diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index 55fc610e40d..e2d0299bd58 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -1,4 +1,9 @@ use clap::ValueEnum; +#[cfg(feature = "test-helpers")] +use std::net::{ + SocketAddr, + TcpListener, +}; use std::{ num::{ NonZeroU32, @@ -76,6 +81,8 @@ pub struct Config { pub tx_status_manager: TxStatusManagerConfig, pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, + #[cfg(feature = "rpc")] + pub rpc_config: fuel_core_block_aggregator_api::integration::Config, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] @@ -102,6 +109,12 @@ pub struct Config { pub memory_pool_size: usize, } +#[cfg(feature = "test-helpers")] +pub fn free_local_addr() -> SocketAddr { + let listener = TcpListener::bind("[::1]:0").unwrap(); + listener.local_addr().unwrap() // OS picks a free port +} + impl Config { #[cfg(feature = "test-helpers")] pub fn local_node() -> Self { @@ -156,6 +169,11 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); + #[cfg(feature = "rpc")] + let rpc_config = fuel_core_block_aggregator_api::integration::Config { + addr: free_local_addr(), + }; + Self { graphql_config: GraphQLConfig { addr: std::net::SocketAddr::new( @@ -229,6 +247,8 @@ impl Config { time_until_synced: Duration::ZERO, production_timeout: Duration::from_secs(20), memory_pool_size: 4, + #[cfg(feature = "rpc")] + rpc_config, } } diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 59cd02a1b81..412ba2b4b56 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -1,9 +1,19 @@ #![allow(clippy::let_unit_value)] -use std::sync::Arc; - -use tokio::sync::Mutex; - +#[cfg(feature = "relayer")] +use crate::relayer::Config as RelayerConfig; +#[cfg(feature = "p2p")] +use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ + key_generator::Ed25519KeyGenerator, + trigger::TimeBasedTrigger, + tx_receiver::PreconfirmationsReceiver, +}; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, + db::storage_db::StorageDB, +}; +use fuel_core_compression_service::service::new_service as new_compression_service; use fuel_core_gas_price_service::v1::{ algorithm::AlgorithmV1, da_source_service::block_committer_costs::{ @@ -14,7 +24,6 @@ use fuel_core_gas_price_service::v1::{ service::SharedData, uninitialized_task::new_gas_price_service_v1, }; - use fuel_core_poa::Trigger; use fuel_core_storage::{ self, @@ -23,18 +32,8 @@ use fuel_core_storage::{ #[cfg(feature = "relayer")] use fuel_core_types::blockchain::primitives::DaBlockHeight; use fuel_core_types::signer::SignMode; - -use fuel_core_compression_service::service::new_service as new_compression_service; - -#[cfg(feature = "relayer")] -use crate::relayer::Config as RelayerConfig; - -#[cfg(feature = "p2p")] -use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ - key_generator::Ed25519KeyGenerator, - trigger::TimeBasedTrigger, - tx_receiver::PreconfirmationsReceiver, -}; +use std::sync::Arc; +use tokio::sync::Mutex; use super::{ DbType, @@ -459,6 +458,23 @@ pub fn init_sub_services( chain_name, }; + #[cfg(feature = "rpc")] + let block_aggregator_rpc = { + let block_aggregator_config = config.rpc_config.clone(); + let db = database.block_aggregation().clone(); + let db_adapter = StorageDB::new(db); + let serializer = SerializerAdapter; + let onchain_db = database.on_chain().clone(); + let importer = importer_adapter.events_shared_result(); + fuel_core_block_aggregator_api::integration::new_service( + &block_aggregator_config, + db_adapter, + serializer, + onchain_db, + importer, + ) + }; + let graph_ql = fuel_core_graphql_api::api_service::new_service( *genesis_block.header().height(), graphql_config, @@ -523,6 +539,8 @@ pub fn init_sub_services( services.push(Box::new(graph_ql)); services.push(Box::new(graphql_worker)); services.push(Box::new(tx_status_manager)); + #[cfg(feature = "rpc")] + services.push(Box::new(block_aggregator_rpc)); if let Some(compression_service) = compression_service { services.push(Box::new(compression_service)); diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index e8869307a0b..03342654df9 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -8,6 +8,10 @@ homepage = { workspace = true } license = { workspace = true } repository = { workspace = true } rust-version = { workspace = true } +build = "build.rs" + +[features] +fault-proving = ["fuel-core-types/fault-proving"] [dependencies] anyhow = { workspace = true } @@ -18,8 +22,10 @@ fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } +log = "0.4.27" num_enum = { workspace = true } -prost = { workspace = true } +postcard = { workspace = true } +prost = { workspace = true, features = ["derive"] } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } strum = { workspace = true } @@ -38,6 +44,6 @@ tonic-prost-build = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } -postcard = { workspace = true } +proptest = { workspace = true } tokio-stream = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/build.rs b/crates/services/block_aggregator_api/build.rs index c438a06453f..190a1538000 100644 --- a/crates/services/block_aggregator_api/build.rs +++ b/crates/services/block_aggregator_api/build.rs @@ -1,4 +1,7 @@ fn main() -> Result<(), Box> { - tonic_prost_build::compile_protos("proto/api.proto")?; + tonic_prost_build::configure() + .type_attribute(".", "#[derive(serde::Serialize,serde::Deserialize)]") + .type_attribute(".", "#[allow(clippy::large_enum_variant)]") + .compile_protos(&["proto/api.proto"], &["proto/"])?; Ok(()) } diff --git a/crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt b/crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt new file mode 100644 index 00000000000..45867ce5dfb --- /dev/null +++ b/crates/services/block_aggregator_api/proptest-regressions/blocks/importer_and_db_source/serializer_adapter.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 3d8a1dc0826956e2454ff1a3d6b8d75c5b5b0eebe2986c5668745ffb2bb9b0e4 # shrinks to block = V1(BlockV1 { header: V1(BlockHeaderV1 { application: ApplicationHeader { da_height: DaBlockHeight(0), consensus_parameters_version: 0, state_transition_bytecode_version: 31, generated: GeneratedApplicationFieldsV1 { transactions_count: 0, message_receipt_count: 0, transactions_root: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, message_outbox_root: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, event_inbox_root: 0000000000000000000000000000000000000000000000000000000000000000 } }, consensus: ConsensusHeader { prev_root: 0000000000000000000000000000000000000000000000000000000000000000, height: 00000000, time: Tai64(4611686018427387914), generated: GeneratedConsensusFields { application_hash: cda084575be17b88d98713807263d2f5b4ffbe79ba9a4fbf544bf6493a1d641a } }, metadata: Some(BlockHeaderMetadata { id: BlockId(c636fad695fad5e9211cd08b2cb66c024d7b972572cb1005c6ab56aeca4f34b4) }) }), transactions: [Script(ChargeableTransaction { body: ScriptBody { script_gas_limit: 0, receipts_root: 0000000000000000000000000000000000000000000000000000000000000000, script: ScriptCode { bytes: Bytes(24400000) }, script_data: Bytes() }, policies: Policies { bits: PoliciesBits(WitnessLimit | Maturity), values: [0, 10000, 0, 0, 0, 0] }, inputs: [], outputs: [], witnesses: [], metadata: None })] }) diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto index 1e34a8fa8de..b478c8b69b2 100644 --- a/crates/services/block_aggregator_api/proto/api.proto +++ b/crates/services/block_aggregator_api/proto/api.proto @@ -14,9 +14,653 @@ message BlockRangeRequest { } message Block { - bytes data = 1; + oneof versioned_block { + V1Block v1 = 1; + } +} + +message V1Block { + Header header = 1; + repeated Transaction transactions = 2; +} + +message Header { + oneof versioned_header { + V1Header v1 = 1; + V2Header v2 = 2; + } +} + +// pub struct BlockHeaderV1 { +// /// The application header. +// pub(crate) application: ApplicationHeader, +// /// The consensus header. +// pub(crate) consensus: ConsensusHeader, +// /// The header metadata calculated during creation. +// /// The field is pub(crate) to enforce the use of the [`PartialBlockHeader::generate`] method. +// #[cfg_attr(feature = "serde", serde(skip))] +// #[educe(PartialEq(ignore))] +// pub(crate) metadata: Option, +//} +// pub struct ApplicationHeader { +// /// The layer 1 height of messages and events to include since the last layer 1 block number. +// /// This is not meant to represent the layer 1 block this was committed to. Validators will need +// /// to have some rules in place to ensure the block number was chosen in a reasonable way. For +// /// example, they should verify that the block number satisfies the finality requirements of the +// /// layer 1 chain. They should also verify that the block number isn't too stale and is increasing. +// /// Some similar concerns are noted in this issue: +// pub da_height: DaBlockHeight, +// /// The version of the consensus parameters used to execute this block. +// pub consensus_parameters_version: ConsensusParametersVersion, +// /// The version of the state transition bytecode used to execute this block. +// pub state_transition_bytecode_version: StateTransitionBytecodeVersion, +// /// Generated application fields. +// pub generated: Generated, +//} +// pub struct GeneratedApplicationFieldsV1 { +// /// Number of transactions in this block. +// pub transactions_count: u16, +// /// Number of message receipts in this block. +// pub message_receipt_count: u32, +// /// Merkle root of transactions. +// pub transactions_root: Bytes32, +// /// Merkle root of message receipts in this block. +// pub message_outbox_root: Bytes32, +// /// Root hash of all imported events from L1 +// pub event_inbox_root: Bytes32, +//} +// pub struct ConsensusHeader { +// /// Merkle root of all previous block header hashes. +// pub prev_root: Bytes32, +// /// Fuel block height. +// pub height: BlockHeight, +// /// The block producer time. +// pub time: Tai64, +// /// generated consensus fields. +// pub generated: Generated, +//} +// pub struct GeneratedConsensusFields { +// /// Hash of the application header. +// pub application_hash: Bytes32, +//} +// pub struct BlockHeaderMetadata { +// /// Hash of the header. +// id: BlockId, +//} +message V1Header { + uint64 da_height = 1; + uint32 consensus_parameters_version = 2; + uint32 state_transition_bytecode_version = 3; + uint32 transactions_count = 4; + uint32 message_receipt_count = 5; + bytes transactions_root = 6; + bytes message_outbox_root = 7; + bytes event_inbox_root = 8; + bytes prev_root = 9; + uint32 height = 10; + uint64 time = 11; + bytes application_hash = 12; + optional bytes block_id = 13; +} + +// pub struct GeneratedApplicationFieldsV2 { +// /// Number of transactions in this block. +// pub transactions_count: u16, +// /// Number of message receipts in this block. +// pub message_receipt_count: u32, +// /// Merkle root of transactions. +// pub transactions_root: Bytes32, +// /// Merkle root of message receipts in this block. +// pub message_outbox_root: Bytes32, +// /// Root hash of all imported events from L1 +// pub event_inbox_root: Bytes32, +// /// TxID commitment +// pub tx_id_commitment: Bytes32, +//} +message V2Header { + uint64 da_height = 1; + uint32 consensus_parameters_version = 2; + uint32 state_transition_bytecode_version = 3; + uint32 transactions_count = 4; + uint32 message_receipt_count = 5; + bytes transactions_root = 6; + bytes message_outbox_root = 7; + bytes event_inbox_root = 8; + bytes tx_id_commitment = 9; + bytes prev_root = 10; + uint32 height = 11; + uint64 time = 12; + bytes application_hash = 13; + optional bytes block_id = 14; +} + +message Transaction { + oneof variant { + ScriptTransaction script = 1; + CreateTransaction create = 2; + MintTransaction mint = 3; + UpgradeTransaction upgrade = 4; + UploadTransaction upload = 5; + BlobTransaction blob = 6; + } +} + +// pub struct ChargeableTransaction +//where +// Body: BodyConstraints, +//{ +// pub(crate) body: Body, +// pub(crate) policies: Policies, +// pub(crate) inputs: Vec, +// pub(crate) outputs: Vec, +// pub(crate) witnesses: Vec, +// #[serde(skip)] +// #[cfg_attr(feature = "da-compression", compress(skip))] +// #[educe(PartialEq(ignore))] +// #[educe(Hash(ignore))] +// #[canonical(skip)] +// pub(crate) metadata: Option>, +//} +// pub struct ScriptBody { +// pub(crate) script_gas_limit: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub(crate) receipts_root: Bytes32, +// pub(crate) script: ScriptCode, +// #[educe(Debug(method(fmt_truncated_hex::<16>)))] +// pub(crate) script_data: Vec, +//} +// #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] +//pub struct ScriptMetadata { +// pub script_data_offset: usize, +//} +message ScriptTransaction { + uint64 script_gas_limit = 1; + bytes receipts_root = 2; + bytes script = 3; + bytes script_data = 4; + Policies policies = 5; + repeated Input inputs = 6; + repeated Output outputs = 7; + repeated bytes witnesses = 8; + ScriptMetadata metadata = 9; +} + +message CreateTransaction { + uint32 bytecode_witness_index = 1; + bytes salt = 2; + repeated StorageSlot storage_slots = 3; + Policies policies = 4; + repeated Input inputs = 5; + repeated Output outputs = 6; + repeated bytes witnesses = 7; + CreateMetadata metadata = 8; +} + +message MintTransaction { + TxPointer tx_pointer = 1; + ContractInput input_contract = 2; + ContractOutput output_contract = 3; + uint64 mint_amount = 4; + bytes mint_asset_id = 5; + uint64 gas_price = 6; + MintMetadata metadata = 7; +} + +message UpgradeTransaction { + UpgradePurpose purpose = 1; + Policies policies = 2; + repeated Input inputs = 3; + repeated Output outputs = 4; + repeated bytes witnesses = 5; + UpgradeMetadata metadata = 6; +} + +message UploadTransaction { + bytes root = 1; + uint32 witness_index = 2; + uint32 subsection_index = 3; + uint32 subsections_number = 4; + repeated bytes proof_set = 5; + Policies policies = 6; + repeated Input inputs = 7; + repeated Output outputs = 8; + repeated bytes witnesses = 9; + UploadMetadata metadata = 10; } +message BlobTransaction { + bytes blob_id = 1; + uint32 witness_index = 2; + Policies policies = 3; + repeated Input inputs = 4; + repeated Output outputs = 5; + repeated bytes witnesses = 6; + BlobMetadata metadata = 7; +} + +// pub struct Policies { +// /// A bitmask that indicates what policies are set. +// bits: PoliciesBits, +// /// The array of policy values. +// values: [Word; POLICIES_NUMBER], +//} +message Policies { + uint32 bits = 1; + repeated uint64 values = 2; +} + +// pub enum Input { +// CoinSigned(CoinSigned), +// CoinPredicate(CoinPredicate), +// Contract(Contract), +// MessageCoinSigned(MessageCoinSigned), +// MessageCoinPredicate(MessageCoinPredicate), +// MessageDataSigned(MessageDataSigned), +// MessageDataPredicate(MessageDataPredicate), +//} +message Input { + oneof variant { + CoinSignedInput coin_signed = 1; + CoinPredicateInput coin_predicate = 2; + ContractInput contract = 3; + MessageCoinSignedInput message_coin_signed = 4; + MessageCoinPredicateInput message_coin_predicate = 5; + MessageDataSignedInput message_data_signed = 6; + MessageDataPredicateInput message_data_predicate = 7; + } +} + +// pub struct Coin +//where +// Specification: CoinSpecification, +//{ +// pub utxo_id: UtxoId, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub owner: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub amount: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub asset_id: AssetId, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub tx_pointer: TxPointer, +// #[educe(Debug(method(fmt_as_field)))] +// pub witness_index: Specification::Witness, +// /// Exact amount of gas used by the predicate. +// /// If the predicate consumes different amount of gas, +// /// it's considered to be false. +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_gas_used: Specification::PredicateGasUsed, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate: Specification::Predicate, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_data: Specification::PredicateData, +//} +// impl CoinSpecification for Signed { +// type Predicate = Empty; +// type PredicateData = Empty>; +// type PredicateGasUsed = Empty; +// type Witness = u16; +//} +message CoinSignedInput { + UtxoId utxo_id = 1; + bytes owner = 2; + uint64 amount = 3; + bytes asset_id = 4; + TxPointer tx_pointer = 5; + uint32 witness_index = 6; + uint64 predicate_gas_used = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +//impl CoinSpecification for Predicate { +// type Predicate = PredicateCode; +// type PredicateData = Vec; +// type PredicateGasUsed = Word; +// type Witness = Empty; +//} +message CoinPredicateInput { + UtxoId utxo_id = 1; + bytes owner = 2; + uint64 amount = 3; + bytes asset_id = 4; + TxPointer tx_pointer = 5; + uint32 witness_index = 6; + uint64 predicate_gas_used = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub struct Contract { +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub utxo_id: UtxoId, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub balance_root: Bytes32, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub state_root: Bytes32, +// /// Pointer to transaction that last modified the contract state. +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub tx_pointer: TxPointer, +// pub contract_id: ContractId, +//} +message ContractInput { + UtxoId utxo_id = 1; + bytes balance_root = 2; + bytes state_root = 3; + TxPointer tx_pointer = 4; + bytes contract_id = 5; +} + +// pub struct Message +//where +// Specification: MessageSpecification, +//{ +// /// The sender from the L1 chain. +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub sender: Address, +// /// The receiver on the `Fuel` chain. +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub recipient: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub amount: Word, +// // Unique identifier of the message +// pub nonce: Nonce, +// #[educe(Debug(method(fmt_as_field)))] +// pub witness_index: Specification::Witness, +// /// Exact amount of gas used by the predicate. +// /// If the predicate consumes different amount of gas, +// /// it's considered to be false. +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_gas_used: Specification::PredicateGasUsed, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// #[educe(Debug(method(fmt_as_field)))] +// pub data: Specification::Data, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate: Specification::Predicate, +// #[educe(Debug(method(fmt_as_field)))] +// pub predicate_data: Specification::PredicateData, +//} +// pub struct MessageCoin(core::marker::PhantomData); +// +// impl MessageSpecification for MessageCoin { +// type Data = Empty>; +// type Predicate = Empty; +// type PredicateData = Empty>; +// type PredicateGasUsed = Empty; +// type Witness = u16; +// } +message MessageCoinSignedInput { + bytes sender = 1; + bytes recipient = 2; + uint64 amount = 3; + bytes nonce = 4; + uint32 witness_index = 5; + uint64 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// impl MessageSpecification for MessageCoin { +// type Data = Empty>; +// type Predicate = PredicateCode; +// type PredicateData = Vec; +// type PredicateGasUsed = Word; +// type Witness = Empty; +// } +message MessageCoinPredicateInput { + bytes sender = 1; + bytes recipient = 2; + uint64 amount = 3; + bytes nonce = 4; + uint32 witness_index = 5; + uint64 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub type MessageDataSigned = Message>; +message MessageDataSignedInput { + bytes sender = 1; + bytes recipient = 2; + uint64 amount = 3; + bytes nonce = 4; + uint32 witness_index = 5; + uint64 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub type MessageDataPredicate = +// Message>; +message MessageDataPredicateInput { + bytes sender = 1; + bytes recipient = 2; + uint64 amount = 3; + bytes nonce = 4; + uint32 witness_index = 5; + uint64 predicate_gas_used = 6; + bytes data = 7; + bytes predicate = 8; + bytes predicate_data = 9; +} + +// pub enum Output { +// Coin { +// to: Address, +// amount: Word, +// asset_id: AssetId, +// }, +// +// Contract(Contract), +// +// Change { +// to: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// amount: Word, +// asset_id: AssetId, +// }, +// +// Variable { +// #[cfg_attr(feature = "da-compression", compress(skip))] +// to: Address, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// amount: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// asset_id: AssetId, +// }, +// +// ContractCreated { +// contract_id: ContractId, +// state_root: Bytes32, +// }, +//} +message Output { + oneof variant { + CoinOutput coin = 1; + ContractOutput contract = 2; + ChangeOutput change = 3; + VariableOutput variable = 4; + ContractCreatedOutput contract_created = 5; + } +} +message CoinOutput { + bytes to = 1; + uint64 amount = 2; + bytes asset_id = 3; +} +message ContractOutput { + uint32 input_index = 1; + bytes balance_root = 2; + bytes state_root = 3; +} +message ChangeOutput { + bytes to = 1; + uint64 amount = 2; + bytes asset_id = 3; +} +message VariableOutput { + bytes to = 1; + uint64 amount = 2; + bytes asset_id = 3; +} +message ContractCreatedOutput { + bytes contract_id = 1; + bytes state_root = 2; +} + +// pub struct UtxoId { +// /// transaction id +// tx_id: TxId, +// /// output index +// output_index: u16, +//} +message UtxoId { + bytes tx_id = 1; + uint32 output_index = 2; +} + +message TxPointer { + uint32 block_height = 1; + uint32 tx_index = 2; +} + +message StorageSlot { + bytes key = 1; + bytes value = 2; +} + + +// #[derive(Debug, Clone, PartialEq, Eq, Hash)] +//pub struct ChargeableMetadata { +// pub common: CommonMetadata, +// pub body: Body, +//} +// pub struct ScriptBody { +// pub(crate) script_gas_limit: Word, +// #[cfg_attr(feature = "da-compression", compress(skip))] +// pub(crate) receipts_root: Bytes32, +// pub(crate) script: ScriptCode, +// #[educe(Debug(method(fmt_truncated_hex::<16>)))] +// pub(crate) script_data: Vec, +//} +// #[derive(Debug, Clone, PartialEq, Eq, Hash)] +//pub struct CommonMetadata { +// pub id: Bytes32, +// pub inputs_offset: usize, +// pub inputs_offset_at: Vec, +// pub inputs_predicate_offset_at: Vec>, +// pub outputs_offset: usize, +// pub outputs_offset_at: Vec, +// pub witnesses_offset: usize, +// pub witnesses_offset_at: Vec, +//} + +message ScriptMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; + uint64 script_gas_limit = 9; + bytes receipts_root = 10; + bytes script = 11; + bytes script_data = 12; +} + +message CreateMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; + bytes contract_id = 9; + bytes contract_root = 10; + bytes state_root = 11; +} + +message MintMetadata { + bytes id = 1; +} + +message UpgradePurpose { + oneof variant { + UpgradeConsensusParameters consensus_parameters = 1; + UpgradeStateTransition state_transition = 2; + } +} + +message UpgradeConsensusParameters { + uint32 witness_index = 1; + bytes checksum = 2; +} + +message UpgradeStateTransition { + bytes root = 1; +} + +message UpgradeMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; + oneof variant { + UpgradeConsensusParametersMetadata consensus_parameters = 9; + UpgradeStateTransitionMetadata state_transition = 10; + } +} + +message UpgradeConsensusParametersMetadata { + bytes consensus_parameters = 1; + bytes calculated_checksum = 2; +} + +message UpgradeStateTransitionMetadata {} + +message UploadMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; +} + +message BlobMetadata { + bytes id = 1; + uint32 inputs_offset = 2; + repeated uint32 inputs_offset_at = 3; + repeated PredicateOffset inputs_predicate_offset_at = 4; + uint32 outputs_offset = 5; + repeated uint32 outputs_offset_at = 6; + uint32 witnesses_offset = 7; + repeated uint32 witnesses_offset_at = 8; +} + +message PredicateOffset { + optional InnerPredicateOffset offset = 1; +} + +message InnerPredicateOffset { + uint32 offset = 1; + uint32 length = 2; +} + + message BlockResponse { oneof payload { Block literal = 1; @@ -30,4 +674,4 @@ service BlockAggregator { rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); rpc GetBlockRange (BlockRangeRequest) returns (stream BlockResponse); rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream BlockResponse); -} \ No newline at end of file +} diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 3cc652bdd09..4beb51c47f3 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -1,7 +1,4 @@ -use crate::{ - NewBlock, - result::Result, -}; +use crate::result::Result; use fuel_core_types::fuel_types::BlockHeight; use std::fmt; @@ -11,14 +8,17 @@ pub mod protobuf_adapter; pub trait BlockAggregatorApi: Send + Sync { /// The type of the block range response. type BlockRangeResponse; + type Block; /// Awaits the next query to the block aggregator service. fn await_query( &mut self, - ) -> impl Future>> + Send; + ) -> impl Future< + Output = Result>, + > + Send; } -pub enum BlockAggregatorQuery { +pub enum BlockAggregatorQuery { GetBlockRange { first: BlockHeight, last: BlockHeight, @@ -29,11 +29,11 @@ pub enum BlockAggregatorQuery { }, // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? NewBlockSubscription { - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender, }, } -impl fmt::Debug for BlockAggregatorQuery { +impl fmt::Debug for BlockAggregatorQuery { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { BlockAggregatorQuery::GetBlockRange { first, last, .. } => f @@ -52,7 +52,7 @@ impl fmt::Debug for BlockAggregatorQuery { } #[cfg(test)] -impl BlockAggregatorQuery { +impl BlockAggregatorQuery { pub fn get_block_range>( first: H, last: H, @@ -74,7 +74,7 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { + pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { const ARBITRARY_CHANNEL_SIZE: usize = 10; let (sender, receiver) = tokio::sync::mpsc::channel(ARBITRARY_CHANNEL_SIZE); let query = Self::NewBlockSubscription { response: sender }; diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index aecc845d4f5..c944e199917 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -4,28 +4,42 @@ use crate::{ BlockAggregatorQuery, }, block_range_response::BlockRangeResponse, - result::Result, + protobuf_types::{ + Block as ProtoBlock, + BlockHeightRequest as ProtoBlockHeightRequest, + BlockHeightResponse as ProtoBlockHeightResponse, + BlockRangeRequest as ProtoBlockRangeRequest, + BlockResponse as ProtoBlockResponse, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block_aggregator_server::{ + BlockAggregator, + BlockAggregatorServer as ProtoBlockAggregatorServer, + }, + block_response as proto_block_response, + }, + result::{ + Error, + Result, + }, }; use async_trait::async_trait; use futures::StreamExt; use tokio_stream::wrappers::ReceiverStream; use tonic::Status; -tonic::include_proto!("blockaggregator"); - -use crate::result::Error; -use block_aggregator_server::BlockAggregator; - #[cfg(test)] mod tests; pub struct Server { - query_sender: tokio::sync::mpsc::Sender>, + query_sender: + tokio::sync::mpsc::Sender>, } impl Server { pub fn new( - query_sender: tokio::sync::mpsc::Sender>, + query_sender: tokio::sync::mpsc::Sender< + BlockAggregatorQuery, + >, ) -> Self { Self { query_sender } } @@ -35,8 +49,8 @@ impl Server { impl BlockAggregator for Server { async fn get_block_height( &self, - request: tonic::Request, - ) -> Result, tonic::Status> { + request: tonic::Request, + ) -> Result, tonic::Status> { tracing::debug!("get_block_height: {:?}", request); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetCurrentHeight { response }; @@ -45,7 +59,7 @@ impl BlockAggregator for Server { })?; let res = receiver.await; match res { - Ok(height) => Ok(tonic::Response::new(BlockHeightResponse { + Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { height: *height, })), Err(e) => Err(tonic::Status::internal(format!( @@ -54,13 +68,13 @@ impl BlockAggregator for Server { ))), } } - type GetBlockRangeStream = ReceiverStream>; + type GetBlockRangeStream = ReceiverStream>; async fn get_block_range( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { - tracing::debug!("get_block_range: {:?}", request); + const ARB_LITERAL_BLOCK_BUFFER_SIZE: usize = 100; let req = request.into_inner(); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetBlockRange { @@ -76,17 +90,15 @@ impl BlockAggregator for Server { match res { Ok(block_range_response) => match block_range_response { BlockRangeResponse::Literal(inner) => { - let (tx, rx) = - tokio::sync::mpsc::channel::>(16); + let (tx, rx) = tokio::sync::mpsc::channel::< + Result, + >(ARB_LITERAL_BLOCK_BUFFER_SIZE); tokio::spawn(async move { let mut s = inner; - while let Some(block) = s.next().await { - let pb = Block { - data: block.bytes().to_vec(), - }; - let response = BlockResponse { - payload: Some(block_response::Payload::Literal(pb)), + while let Some(pb) = s.next().await { + let response = ProtoBlockResponse { + payload: Some(proto_block_response::Payload::Literal(pb)), }; if tx.send(Ok(response)).await.is_err() { break; @@ -108,11 +120,11 @@ impl BlockAggregator for Server { } } - type NewBlockSubscriptionStream = ReceiverStream>; + type NewBlockSubscriptionStream = ReceiverStream>; async fn new_block_subscription( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { const ARB_CHANNEL_SIZE: usize = 100; tracing::warn!("get_block_range: {:?}", request); @@ -126,11 +138,8 @@ impl BlockAggregator for Server { let (task_sender, task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); tokio::spawn(async move { while let Some(nb) = receiver.recv().await { - let block = Block { - data: nb.block.bytes().to_vec(), - }; - let response = BlockResponse { - payload: Some(block_response::Payload::Literal(block)), + let response = ProtoBlockResponse { + payload: Some(proto_block_response::Payload::Literal(nb)), }; if task_sender.send(Ok(response)).await.is_err() { break; @@ -144,24 +153,38 @@ impl BlockAggregator for Server { pub struct ProtobufAPI { _server_task_handle: tokio::task::JoinHandle<()>, - query_receiver: tokio::sync::mpsc::Receiver>, + shutdown_sender: Option>, + query_receiver: + tokio::sync::mpsc::Receiver>, } impl ProtobufAPI { pub fn new(url: String) -> Self { - let (query_sender, query_receiver) = - tokio::sync::mpsc::channel::>(100); + let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< + BlockAggregatorQuery, + >(100); let server = Server::new(query_sender); let addr = url.parse().unwrap(); + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::<()>(); let _server_task_handle = tokio::spawn(async move { - tonic::transport::Server::builder() - .add_service(block_aggregator_server::BlockAggregatorServer::new(server)) - .serve(addr) - .await - .unwrap(); + let service = tonic::transport::Server::builder() + .add_service(ProtoBlockAggregatorServer::new(server)); + tokio::select! { + res = service.serve(addr) => { + if let Err(e) = res { + tracing::error!("BlockAggregator tonic server error: {}", e); + } else { + tracing::info!("BlockAggregator tonic server stopped"); + } + }, + _ = shutdown_receiver => { + tracing::info!("Shutting down BlockAggregator tonic server"); + }, + } }); Self { _server_task_handle, + shutdown_sender: Some(shutdown_sender), query_receiver, } } @@ -169,10 +192,11 @@ impl ProtobufAPI { impl BlockAggregatorApi for ProtobufAPI { type BlockRangeResponse = BlockRangeResponse; + type Block = ProtoBlock; async fn await_query( &mut self, - ) -> Result> { + ) -> Result> { let query = self .query_receiver .recv() @@ -182,4 +206,10 @@ impl BlockAggregatorApi for ProtobufAPI { } } -pub struct ProtobufClient; +impl Drop for ProtobufAPI { + fn drop(&mut self) { + if let Some(shutdown_sender) = self.shutdown_sender.take() { + let _ = shutdown_sender.send(()); + } + } +} diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 1617090a7dd..7807ac02180 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -1,24 +1,32 @@ #![allow(non_snake_case)] use crate::{ - NewBlock, api::{ BlockAggregatorApi, BlockAggregatorQuery, - protobuf_adapter::{ - BlockHeightRequest, - BlockRangeRequest, - NewBlockSubscriptionRequest, - ProtobufAPI, - block_aggregator_client::BlockAggregatorClient, - block_response::Payload, - }, + protobuf_adapter::ProtobufAPI, }, block_range_response::BlockRangeResponse, - blocks::Block, + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, + protobuf_types::{ + Block as ProtoBlock, + BlockHeightRequest, + BlockRangeRequest, + NewBlockSubscriptionRequest, + block_aggregator_client::{ + BlockAggregatorClient as ProtoBlockAggregatorClient, + BlockAggregatorClient, + }, + block_response::Payload, + }, +}; +use fuel_core_types::{ + blockchain::block::Block as FuelBlock, + fuel_types::BlockHeight, }; -use bytes::Bytes; -use fuel_core_types::fuel_types::BlockHeight; use futures::{ StreamExt, TryStreamExt, @@ -40,7 +48,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { // call get current height endpoint with client let url = format!("http://{}", path); - let mut client = BlockAggregatorClient::connect(url.to_string()) + let mut client = ProtoBlockAggregatorClient::connect(url.to_string()) .await .expect("could not connect to server"); let handle = tokio::spawn(async move { @@ -77,7 +85,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { // call get current height endpoint with client let url = format!("http://{}", path); - let mut client = BlockAggregatorClient::connect(url.to_string()) + let mut client = ProtoBlockAggregatorClient::connect(url.to_string()) .await .expect("could not connect to server"); let request = BlockRangeRequest { start: 0, end: 1 }; @@ -94,8 +102,17 @@ async fn await_query__get_block_range__client_receives_expected_value() { let query = api.await_query().await.unwrap(); // then - let block1 = Block::new(Bytes::from(vec![0u8; 100])); - let block2 = Block::new(Bytes::from(vec![1u8; 100])); + let serializer_adapter = SerializerAdapter; + let fuel_block_1 = FuelBlock::default(); + let mut fuel_block_2 = FuelBlock::default(); + let block_height_2 = fuel_block_1.header().height().succ().unwrap(); + fuel_block_2.header_mut().set_block_height(block_height_2); + let block1 = serializer_adapter + .serialize_block(&fuel_block_1) + .expect("could not serialize block"); + let block2 = serializer_adapter + .serialize_block(&fuel_block_2) + .expect("could not serialize block"); let list = vec![block1, block2]; // return response through query's channel if let BlockAggregatorQuery::GetBlockRange { @@ -115,8 +132,8 @@ async fn await_query__get_block_range__client_receives_expected_value() { } tracing::info!("awaiting query"); let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|b| b.bytes().to_vec()).collect(); - let actual: Vec> = response + let expected = list; + let actual: Vec = response .into_inner() .try_collect::>() .await @@ -124,7 +141,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner.data.to_vec() + inner } else { panic!("unexpected response type") } @@ -162,22 +179,30 @@ async fn await_query__new_block_stream__client_receives_expected_value() { // then let height1 = BlockHeight::new(0); let height2 = BlockHeight::new(1); - let block1 = Block::new(Bytes::from(vec![0u8; 100])); - let block2 = Block::new(Bytes::from(vec![1u8; 100])); - let list = vec![(height1, block1), (height2, block2)]; + let serializer_adapter = SerializerAdapter; + let mut fuel_block_1 = FuelBlock::default(); + fuel_block_1.header_mut().set_block_height(height1); + let mut fuel_block_2 = FuelBlock::default(); + fuel_block_2.header_mut().set_block_height(height2); + let block1 = serializer_adapter + .serialize_block(&fuel_block_1) + .expect("could not serialize block"); + let block2 = serializer_adapter + .serialize_block(&fuel_block_2) + .expect("could not serialize block"); + let list = vec![block1, block2]; if let BlockAggregatorQuery::NewBlockSubscription { response } = query { tracing::info!("correct query received, sending response"); - for (height, block) in list.clone() { - let new_block = NewBlock::new(height, block); - response.send(new_block).await.unwrap(); + for block in list.clone() { + response.send(block).await.unwrap(); } } else { panic!("expected GetBlockRange query"); } tracing::info!("awaiting query"); let response = handle.await.unwrap(); - let expected: Vec> = list.iter().map(|(_, b)| b.bytes().to_vec()).collect(); - let actual: Vec> = response + let expected = list; + let actual: Vec = response .into_inner() .try_collect::>() .await @@ -185,7 +210,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner.data.to_vec() + inner } else { panic!("unexpected response type") } diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index a271c129b8e..4fde80d22b7 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -1,6 +1,5 @@ use crate::{ BlockAggregator, - NewBlock, api::{ BlockAggregatorApi, BlockAggregatorQuery, @@ -17,11 +16,12 @@ use fuel_core_services::{ }; use fuel_core_types::fuel_types::BlockHeight; -impl BlockAggregator +impl BlockAggregator where Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + DB: BlockAggregatorDB, Blocks: BlockSource, + ::Block: Clone + std::fmt::Debug, BlockRangeResponse: Send, { pub fn new(query: Api, database: DB, block_source: Blocks) -> Self { @@ -40,7 +40,9 @@ where pub async fn handle_query( &mut self, - res: crate::result::Result>, + res: crate::result::Result< + BlockAggregatorQuery, + >, ) -> TaskNextAction { tracing::debug!("Handling query: {res:?}"); let query = try_or_stop!(res, |e| { @@ -98,7 +100,7 @@ where async fn handle_new_block_subscription( &mut self, - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender, ) -> TaskNextAction { self.new_block_subscriptions.push(response); TaskNextAction::Continue @@ -106,8 +108,11 @@ where pub async fn handle_block( &mut self, - res: crate::result::Result, - ) -> TaskNextAction { + res: crate::result::Result::Block>>, + ) -> TaskNextAction + where + ::Block: std::fmt::Debug, + { tracing::debug!("Handling block: {res:?}"); let event = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); @@ -115,7 +120,7 @@ where let (id, block) = match event { BlockSourceEvent::NewBlock(id, block) => { self.new_block_subscriptions.retain_mut(|sub| { - let send_res = sub.try_send(NewBlock::new(id, block.clone())); + let send_res = sub.try_send(block.clone()); match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 5e071bc3328..24e78af6ff4 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,4 +1,4 @@ -use crate::blocks::Block; +use crate::protobuf_types::Block as ProtoBlock; use fuel_core_services::stream::Stream; pub type BoxStream = core::pin::Pin + Send + 'static>>; @@ -6,7 +6,7 @@ pub type BoxStream = core::pin::Pin + Send + 'static /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { /// A literal stream of blocks - Literal(BoxStream), + Literal(BoxStream), /// A remote URL where the blocks can be fetched Remote(String), } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index de56f280975..fb8dc76a9c1 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -7,17 +7,20 @@ pub mod importer_and_db_source; /// Source from which blocks can be gathered for aggregation pub trait BlockSource: Send + Sync { + type Block; /// Asynchronously fetch the next block and its height - fn next_block(&mut self) -> impl Future> + Send; + fn next_block( + &mut self, + ) -> impl Future>> + Send; /// Drain any remaining blocks from the source fn drain(&mut self) -> impl Future> + Send; } #[derive(Debug, Eq, PartialEq, Hash)] -pub enum BlockSourceEvent { - NewBlock(BlockHeight, Block), - OldBlock(BlockHeight, Block), +pub enum BlockSourceEvent { + NewBlock(BlockHeight, B), + OldBlock(BlockHeight, B), } #[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 7bfe3f233a6..892b2b40120 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -1,6 +1,5 @@ use crate::{ blocks::{ - Block, BlockSource, BlockSourceEvent, importer_and_db_source::importer_service::ImporterTask, @@ -34,22 +33,26 @@ pub mod sync_service; #[cfg(test)] mod tests; +pub mod serializer_adapter; + pub trait BlockSerializer { - fn serialize_block(&self, block: &FuelBlock) -> Result; + type Block; + fn serialize_block(&self, block: &FuelBlock) -> Result; } pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, DB: StorageInspect, DB: StorageInspect, E: std::fmt::Debug + Send, { - importer_task: ServiceRunner>, - sync_task: ServiceRunner>, + importer_task: ServiceRunner>, + sync_task: ServiceRunner>, /// Receive blocks from the importer and sync tasks - receiver: tokio::sync::mpsc::Receiver, + receiver: tokio::sync::mpsc::Receiver>, _error_marker: std::marker::PhantomData, } @@ -57,6 +60,7 @@ where impl ImporterAndDbSource where Serializer: BlockSerializer + Clone + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: StorageInspect + Send + Sync, DB: StorageInspect + Send + 'static, E: std::fmt::Debug + Send, @@ -101,12 +105,15 @@ where impl BlockSource for ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: Send + Sync, DB: StorageInspect, DB: StorageInspect, E: std::fmt::Debug + Send + Sync, { - async fn next_block(&mut self) -> Result { + type Block = Serializer::Block; + + async fn next_block(&mut self) -> Result> { tracing::debug!("awaiting next block"); tokio::select! { block_res = self.receiver.recv() => { diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 500d7d0de08..74151e2a0c7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -18,21 +18,22 @@ use fuel_core_types::{ use futures::StreamExt; use tokio::sync::mpsc::Sender; -pub struct ImporterTask { +pub struct ImporterTask { importer: BoxStream, serializer: Serializer, - block_return_sender: Sender, + block_return_sender: Sender>, new_end_sender: Option>, } -impl ImporterTask +impl ImporterTask where Serializer: BlockSerializer + Send, + ::Block: Send, { pub fn new( importer: BoxStream, serializer: Serializer, - block_return: Sender, + block_return: Sender>, new_end_sender: Option>, ) -> Self { Self { @@ -43,9 +44,10 @@ where } } } -impl RunnableTask for ImporterTask +impl RunnableTask for ImporterTask where Serializer: BlockSerializer + Send + Sync, + ::Block: Send, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { tokio::select! { @@ -61,7 +63,7 @@ where } } -impl ImporterTask +impl ImporterTask where Serializer: BlockSerializer + Send + Sync, { @@ -110,9 +112,10 @@ where } #[async_trait::async_trait] -impl RunnableService for ImporterTask +impl RunnableService for ImporterTask where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + 'static, { const NAME: &'static str = "BlockSourceImporterTask"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs new file mode 100644 index 00000000000..fa7e7db2d8f --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -0,0 +1,1524 @@ +#[cfg(feature = "fault-proving")] +use crate::protobuf_types::V2Header as ProtoV2Header; +use crate::{ + blocks::importer_and_db_source::BlockSerializer, + protobuf_types::{ + BlobTransaction as ProtoBlobTx, + Block as ProtoBlock, + ChangeOutput as ProtoChangeOutput, + CoinOutput as ProtoCoinOutput, + CoinPredicateInput as ProtoCoinPredicateInput, + CoinSignedInput as ProtoCoinSignedInput, + ContractCreatedOutput as ProtoContractCreatedOutput, + ContractInput as ProtoContractInput, + ContractOutput as ProtoContractOutput, + CreateTransaction as ProtoCreateTx, + Header as ProtoHeader, + Input as ProtoInput, + MessageCoinPredicateInput as ProtoMessageCoinPredicateInput, + MessageCoinSignedInput as ProtoMessageCoinSignedInput, + MessageDataPredicateInput as ProtoMessageDataPredicateInput, + MessageDataSignedInput as ProtoMessageDataSignedInput, + MintTransaction as ProtoMintTx, + Output as ProtoOutput, + Policies as ProtoPolicies, + ScriptTransaction as ProtoScriptTx, + StorageSlot as ProtoStorageSlot, + Transaction as ProtoTransaction, + TxPointer as ProtoTxPointer, + UpgradeConsensusParameters as ProtoUpgradeConsensusParameters, + UpgradePurpose as ProtoUpgradePurpose, + UpgradeStateTransition as ProtoUpgradeStateTransition, + UpgradeTransaction as ProtoUpgradeTx, + UploadTransaction as ProtoUploadTx, + UtxoId as ProtoUtxoId, + V1Block as ProtoV1Block, + V1Header as ProtoV1Header, + VariableOutput as ProtoVariableOutput, + block::VersionedBlock as ProtoVersionedBlock, + header::VersionedHeader as ProtoVersionedHeader, + input::Variant as ProtoInputVariant, + output::Variant as ProtoOutputVariant, + transaction::Variant as ProtoTransactionVariant, + upgrade_purpose::Variant as ProtoUpgradePurposeVariant, + }, + result::{ + Error, + Result, + }, +}; +use anyhow::anyhow; +#[cfg(feature = "fault-proving")] +use fuel_core_types::{ + blockchain::header::BlockHeaderV2, + fuel_types::ChainId, +}; + +use fuel_core_types::{ + blockchain::{ + block::Block as FuelBlock, + header::{ + ApplicationHeader, + BlockHeader, + BlockHeaderV1, + ConsensusHeader, + GeneratedConsensusFields, + PartialBlockHeader, + }, + primitives::{ + BlockId, + DaBlockHeight, + Empty, + }, + }, + fuel_tx::{ + Address, + BlobBody, + Bytes32, + Input, + Output, + StorageSlot, + Transaction as FuelTransaction, + TxPointer, + UpgradePurpose, + UploadBody, + UtxoId, + Witness, + field::{ + BlobId as _, + BytecodeRoot as _, + BytecodeWitnessIndex as _, + InputContract as _, + Inputs, + MintAmount as _, + MintAssetId as _, + MintGasPrice as _, + OutputContract as _, + Outputs, + Policies as _, + ProofSet as _, + ReceiptsRoot as _, + Salt as _, + Script as _, + ScriptData as _, + ScriptGasLimit as _, + StorageSlots as _, + SubsectionIndex as _, + SubsectionsNumber as _, + TxPointer as TxPointerField, + UpgradePurpose as UpgradePurposeField, + Witnesses as _, + }, + policies::{ + Policies as FuelPolicies, + PoliciesBits, + PolicyType, + }, + }, + tai64, +}; + +#[derive(Clone)] +pub struct SerializerAdapter; + +impl BlockSerializer for SerializerAdapter { + type Block = ProtoBlock; + + fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { + // TODO: Should this be owned to begin with? + let (header, txs) = block.clone().into_inner(); + let proto_header = proto_header_from_header(header); + match &block { + FuelBlock::V1(_) => { + let proto_v1_block = ProtoV1Block { + header: Some(proto_header), + transactions: txs.into_iter().map(proto_tx_from_tx).collect(), + }; + Ok(ProtoBlock { + versioned_block: Some(ProtoVersionedBlock::V1(proto_v1_block)), + }) + } + } + } +} + +fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { + let block_id = header.id(); + let consensus = *header.consensus(); + let versioned_header = match header { + BlockHeader::V1(header) => { + let proto_v1_header = + proto_v1_header_from_v1_header(consensus, block_id, header); + ProtoVersionedHeader::V1(proto_v1_header) + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + let proto_v2_header = + proto_v2_header_from_v2_header(consensus, block_id, header); + ProtoVersionedHeader::V2(proto_v2_header) + } + }; + + ProtoHeader { + versioned_header: Some(versioned_header), + } +} + +fn proto_v1_header_from_v1_header( + consensus: ConsensusHeader, + block_id: BlockId, + header: BlockHeaderV1, +) -> ProtoV1Header { + let application = header.application(); + let generated = application.generated; + + ProtoV1Header { + da_height: application.da_height.0, + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0, + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +#[cfg(feature = "fault-proving")] +fn proto_v2_header_from_v2_header( + consensus: ConsensusHeader, + block_id: BlockId, + header: BlockHeaderV2, +) -> ProtoV2Header { + let application = *header.application(); + let generated = application.generated; + + ProtoV2Header { + da_height: application.da_height.0, + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0, + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { + match tx { + FuelTransaction::Script(script) => { + let proto_script = ProtoScriptTx { + script_gas_limit: *script.script_gas_limit(), + receipts_root: bytes32_to_vec(script.receipts_root()), + script: script.script().clone(), + script_data: script.script_data().clone(), + policies: Some(proto_policies_from_policies(script.policies())), + inputs: script + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: script + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: script + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Script(proto_script)), + } + } + FuelTransaction::Create(create) => { + let proto_create = ProtoCreateTx { + bytecode_witness_index: u32::from(*create.bytecode_witness_index()), + salt: create.salt().as_ref().to_vec(), + storage_slots: create + .storage_slots() + .iter() + .map(proto_storage_slot_from_storage_slot) + .collect(), + policies: Some(proto_policies_from_policies(create.policies())), + inputs: create + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: create + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: create + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Create(proto_create)), + } + } + FuelTransaction::Mint(mint) => { + let proto_mint = ProtoMintTx { + tx_pointer: Some(proto_tx_pointer(mint.tx_pointer())), + input_contract: Some(proto_contract_input_from_contract( + mint.input_contract(), + )), + output_contract: Some(proto_contract_output_from_contract( + mint.output_contract(), + )), + mint_amount: *mint.mint_amount(), + mint_asset_id: mint.mint_asset_id().as_ref().to_vec(), + gas_price: *mint.gas_price(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Mint(proto_mint)), + } + } + FuelTransaction::Upgrade(upgrade) => { + let proto_upgrade = ProtoUpgradeTx { + purpose: Some(proto_upgrade_purpose(upgrade.upgrade_purpose())), + policies: Some(proto_policies_from_policies(upgrade.policies())), + inputs: upgrade + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: upgrade + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: upgrade + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upgrade(proto_upgrade)), + } + } + FuelTransaction::Upload(upload) => { + let proto_upload = ProtoUploadTx { + root: bytes32_to_vec(upload.bytecode_root()), + witness_index: u32::from(*upload.bytecode_witness_index()), + subsection_index: u32::from(*upload.subsection_index()), + subsections_number: u32::from(*upload.subsections_number()), + proof_set: upload.proof_set().iter().map(bytes32_to_vec).collect(), + policies: Some(proto_policies_from_policies(upload.policies())), + inputs: upload + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: upload + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: upload + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upload(proto_upload)), + } + } + FuelTransaction::Blob(blob) => { + let proto_blob = ProtoBlobTx { + blob_id: blob.blob_id().as_ref().to_vec(), + witness_index: u32::from(*blob.bytecode_witness_index()), + policies: Some(proto_policies_from_policies(blob.policies())), + inputs: blob + .inputs() + .iter() + .cloned() + .map(proto_input_from_input) + .collect(), + outputs: blob + .outputs() + .iter() + .cloned() + .map(proto_output_from_output) + .collect(), + witnesses: blob + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Blob(proto_blob)), + } + } + } +} + +fn proto_input_from_input(input: Input) -> ProtoInput { + match input { + Input::CoinSigned(coin_signed) => ProtoInput { + variant: Some(ProtoInputVariant::CoinSigned(ProtoCoinSignedInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_signed.utxo_id)), + owner: coin_signed.owner.as_ref().to_vec(), + amount: coin_signed.amount, + asset_id: coin_signed.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_signed.tx_pointer)), + witness_index: coin_signed.witness_index.into(), + predicate_gas_used: 0, + predicate: vec![], + predicate_data: vec![], + })), + }, + Input::CoinPredicate(coin_predicate) => ProtoInput { + variant: Some(ProtoInputVariant::CoinPredicate(ProtoCoinPredicateInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_predicate.utxo_id)), + owner: coin_predicate.owner.as_ref().to_vec(), + amount: coin_predicate.amount, + asset_id: coin_predicate.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_predicate.tx_pointer)), + witness_index: 0, + predicate_gas_used: coin_predicate.predicate_gas_used, + predicate: coin_predicate.predicate.as_ref().to_vec(), + predicate_data: coin_predicate.predicate_data.as_ref().to_vec(), + })), + }, + Input::Contract(contract) => ProtoInput { + variant: Some(ProtoInputVariant::Contract(ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + })), + }, + Input::MessageCoinSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinSigned( + ProtoMessageCoinSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: Vec::new(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageCoinPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinPredicate( + ProtoMessageCoinPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: Vec::new(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + Input::MessageDataSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataSigned( + ProtoMessageDataSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: message.data.as_ref().to_vec(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageDataPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataPredicate( + ProtoMessageDataPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: message.data.as_ref().to_vec(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + } +} + +fn proto_utxo_id_from_utxo_id(utxo_id: &UtxoId) -> ProtoUtxoId { + ProtoUtxoId { + tx_id: utxo_id.tx_id().as_ref().to_vec(), + output_index: utxo_id.output_index().into(), + } +} + +fn proto_tx_pointer(tx_pointer: &TxPointer) -> ProtoTxPointer { + ProtoTxPointer { + block_height: tx_pointer.block_height().into(), + tx_index: tx_pointer.tx_index().into(), + } +} + +fn proto_storage_slot_from_storage_slot(slot: &StorageSlot) -> ProtoStorageSlot { + ProtoStorageSlot { + key: slot.key().as_ref().to_vec(), + value: slot.value().as_ref().to_vec(), + } +} + +fn proto_contract_input_from_contract( + contract: &fuel_core_types::fuel_tx::input::contract::Contract, +) -> ProtoContractInput { + ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + } +} + +fn proto_contract_output_from_contract( + contract: &fuel_core_types::fuel_tx::output::contract::Contract, +) -> ProtoContractOutput { + ProtoContractOutput { + input_index: u32::from(contract.input_index), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + } +} + +fn proto_output_from_output(output: Output) -> ProtoOutput { + let variant = match output { + Output::Coin { + to, + amount, + asset_id, + } => ProtoOutputVariant::Coin(ProtoCoinOutput { + to: to.as_ref().to_vec(), + amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Contract(contract) => { + ProtoOutputVariant::Contract(proto_contract_output_from_contract(&contract)) + } + Output::Change { + to, + amount, + asset_id, + } => ProtoOutputVariant::Change(ProtoChangeOutput { + to: to.as_ref().to_vec(), + amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Variable { + to, + amount, + asset_id, + } => ProtoOutputVariant::Variable(ProtoVariableOutput { + to: to.as_ref().to_vec(), + amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::ContractCreated { + contract_id, + state_root, + } => ProtoOutputVariant::ContractCreated(ProtoContractCreatedOutput { + contract_id: contract_id.as_ref().to_vec(), + state_root: bytes32_to_vec(&state_root), + }), + }; + + ProtoOutput { + variant: Some(variant), + } +} + +fn proto_upgrade_purpose(purpose: &UpgradePurpose) -> ProtoUpgradePurpose { + let variant = match purpose { + UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + } => ProtoUpgradePurposeVariant::ConsensusParameters( + ProtoUpgradeConsensusParameters { + witness_index: u32::from(*witness_index), + checksum: checksum.as_ref().to_vec(), + }, + ), + UpgradePurpose::StateTransition { root } => { + ProtoUpgradePurposeVariant::StateTransition(ProtoUpgradeStateTransition { + root: root.as_ref().to_vec(), + }) + } + }; + + ProtoUpgradePurpose { + variant: Some(variant), + } +} + +fn proto_policies_from_policies( + policies: &fuel_core_types::fuel_tx::policies::Policies, +) -> ProtoPolicies { + let mut values = [0u64; 6]; + if policies.is_set(PolicyType::Tip) { + values[0] = policies.get(PolicyType::Tip).unwrap_or_default(); + } + if policies.is_set(PolicyType::WitnessLimit) { + let value = policies.get(PolicyType::WitnessLimit).unwrap_or_default(); + values[1] = value; + } + if policies.is_set(PolicyType::Maturity) { + let value = policies.get(PolicyType::Maturity).unwrap_or_default(); + values[2] = value; + } + if policies.is_set(PolicyType::MaxFee) { + values[3] = policies.get(PolicyType::MaxFee).unwrap_or_default(); + } + if policies.is_set(PolicyType::Expiration) { + values[4] = policies.get(PolicyType::Expiration).unwrap_or_default(); + } + if policies.is_set(PolicyType::Owner) { + values[5] = policies.get(PolicyType::Owner).unwrap_or_default(); + } + let bits = policies.bits(); + ProtoPolicies { + bits, + values: values.to_vec(), + } +} + +fn tx_pointer_from_proto(proto: &ProtoTxPointer) -> Result { + let block_height = proto.block_height.into(); + #[allow(clippy::useless_conversion)] + let tx_index = proto.tx_index.try_into().map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_index to target type: {}", e)) + })?; + Ok(TxPointer::new(block_height, tx_index)) +} + +fn storage_slot_from_proto(proto: &ProtoStorageSlot) -> Result { + let key = Bytes32::try_from(proto.key.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot key to Bytes32: {}", + e + )) + })?; + let value = Bytes32::try_from(proto.value.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot value to Bytes32: {}", + e + )) + })?; + Ok(StorageSlot::new(key, value)) +} + +fn contract_input_from_proto( + proto: &ProtoContractInput, +) -> Result { + let utxo_proto = proto.utxo_id.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing utxo_id on contract input")) + })?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert balance_root to Bytes32: {}", e)) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert state_root to Bytes32: {}", e)) + })?; + let tx_pointer_proto = proto.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on contract input")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let contract_id = + fuel_core_types::fuel_types::ContractId::try_from(proto.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(fuel_core_types::fuel_tx::input::contract::Contract { + utxo_id, + balance_root, + state_root, + tx_pointer, + contract_id, + }) +} + +fn contract_output_from_proto( + proto: &ProtoContractOutput, +) -> Result { + let input_index = u16::try_from(proto.input_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output input_index to u16: {}", + e + )) + })?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output balance_root to Bytes32: {}", + e + )) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output state_root to Bytes32: {}", + e + )) + })?; + + Ok(fuel_core_types::fuel_tx::output::contract::Contract { + input_index, + balance_root, + state_root, + }) +} + +fn output_from_proto_output(proto_output: &ProtoOutput) -> Result { + match proto_output + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing output variant")))? + { + ProtoOutputVariant::Coin(coin) => { + let to = Address::try_from(coin.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = + fuel_core_types::fuel_types::AssetId::try_from(coin.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::coin(to, coin.amount, asset_id)) + } + ProtoOutputVariant::Contract(contract) => { + let contract = contract_output_from_proto(contract)?; + Ok(Output::Contract(contract)) + } + ProtoOutputVariant::Change(change) => { + let to = Address::try_from(change.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + change.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::change(to, change.amount, asset_id)) + } + ProtoOutputVariant::Variable(variable) => { + let to = Address::try_from(variable.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + variable.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::variable(to, variable.amount, asset_id)) + } + ProtoOutputVariant::ContractCreated(contract_created) => { + let contract_id = fuel_core_types::fuel_types::ContractId::try_from( + contract_created.contract_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let state_root = Bytes32::try_from(contract_created.state_root.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state_root to Bytes32: {}", + e + )) + })?; + Ok(Output::contract_created(contract_id, state_root)) + } + } +} + +fn upgrade_purpose_from_proto(proto: &ProtoUpgradePurpose) -> Result { + match proto + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing upgrade purpose variant")))? + { + ProtoUpgradePurposeVariant::ConsensusParameters(consensus) => { + let witness_index = u16::try_from(consensus.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let checksum = + Bytes32::try_from(consensus.checksum.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert checksum to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + }) + } + ProtoUpgradePurposeVariant::StateTransition(state) => { + let root = Bytes32::try_from(state.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state transition root to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::StateTransition { root }) + } + } +} + +fn utxo_id_from_proto(proto_utxo: &ProtoUtxoId) -> Result { + let tx_id = Bytes32::try_from(proto_utxo.tx_id.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_id to Bytes32: {}", e)) + })?; + let output_index = u16::try_from(proto_utxo.output_index).map_err(|e| { + Error::Serialization(anyhow!("Could not convert output_index to u16: {}", e)) + })?; + Ok(UtxoId::new(tx_id, output_index)) +} + +fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { + bytes.as_ref().to_vec() +} + +pub fn fuel_block_from_protobuf( + proto_block: ProtoBlock, + msg_ids: &[fuel_core_types::fuel_tx::MessageId], + event_inbox_root: Bytes32, +) -> Result { + let versioned_block = proto_block + .versioned_block + .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) + .map_err(Error::Serialization)?; + let partial_header = match &versioned_block { + ProtoVersionedBlock::V1(v1_block) => { + let proto_header = v1_block + .header + .clone() + .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) + .map_err(Error::Serialization)?; + partial_header_from_proto_header(proto_header)? + } + }; + let txs = match versioned_block { + ProtoVersionedBlock::V1(v1_inner) => v1_inner + .transactions + .iter() + .map(tx_from_proto_tx) + .collect::>()?, + }; + FuelBlock::new( + partial_header, + txs, + msg_ids, + event_inbox_root, + #[cfg(feature = "fault-proving")] + &ChainId::default(), + ) + .map_err(|e| anyhow!(e)) + .map_err(Error::Serialization) +} + +pub fn partial_header_from_proto_header( + proto_header: ProtoHeader, +) -> Result { + let partial_header = PartialBlockHeader { + consensus: proto_header_to_empty_consensus_header(&proto_header)?, + application: proto_header_to_empty_application_header(&proto_header)?, + }; + Ok(partial_header) +} + +pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { + let variant = proto_tx + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing transaction variant")))?; + + match variant { + ProtoTransactionVariant::Script(proto_script) => { + let policies = proto_script + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_script + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_script + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_script + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let mut script_tx = FuelTransaction::script( + proto_script.script_gas_limit, + proto_script.script.clone(), + proto_script.script_data.clone(), + policies, + inputs, + outputs, + witnesses, + ); + *script_tx.receipts_root_mut() = Bytes32::try_from( + proto_script.receipts_root.as_slice(), + ) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert receipts_root to Bytes32: {}", + e + )) + })?; + + Ok(FuelTransaction::Script(script_tx)) + } + ProtoTransactionVariant::Create(proto_create) => { + let policies = proto_create + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_create + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_create + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_create + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let storage_slots = proto_create + .storage_slots + .iter() + .map(storage_slot_from_proto) + .collect::>>()?; + let salt = + fuel_core_types::fuel_types::Salt::try_from(proto_create.salt.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let bytecode_witness_index = + u16::try_from(proto_create.bytecode_witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert bytecode_witness_index to u16: {}", + e + )) + })?; + + let create_tx = FuelTransaction::create( + bytecode_witness_index, + policies, + salt, + storage_slots, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Create(create_tx)) + } + ProtoTransactionVariant::Mint(proto_mint) => { + let tx_pointer_proto = proto_mint.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on mint transaction")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let input_contract_proto = + proto_mint.input_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing input_contract on mint transaction" + )) + })?; + let input_contract = contract_input_from_proto(input_contract_proto)?; + let output_contract_proto = + proto_mint.output_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing output_contract on mint transaction" + )) + })?; + let output_contract = contract_output_from_proto(output_contract_proto)?; + let mint_asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_mint.mint_asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + let mint_tx = FuelTransaction::mint( + tx_pointer, + input_contract, + output_contract, + proto_mint.mint_amount, + mint_asset_id, + proto_mint.gas_price, + ); + + Ok(FuelTransaction::Mint(mint_tx)) + } + ProtoTransactionVariant::Upgrade(proto_upgrade) => { + let purpose_proto = proto_upgrade.purpose.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing purpose on upgrade transaction")) + })?; + let upgrade_purpose = upgrade_purpose_from_proto(purpose_proto)?; + let policies = proto_upgrade + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_upgrade + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upgrade + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upgrade + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + + let upgrade_tx = FuelTransaction::upgrade( + upgrade_purpose, + policies, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Upgrade(upgrade_tx)) + } + ProtoTransactionVariant::Upload(proto_upload) => { + let policies = proto_upload + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_upload + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upload + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upload + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let root = Bytes32::try_from(proto_upload.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert upload root to Bytes32: {}", + e + )) + })?; + let witness_index = + u16::try_from(proto_upload.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let subsection_index = + u16::try_from(proto_upload.subsection_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsection_index to u16: {}", + e + )) + })?; + let subsections_number = u16::try_from(proto_upload.subsections_number) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsections_number to u16: {}", + e + )) + })?; + let proof_set = proto_upload + .proof_set + .iter() + .map(|entry| { + Bytes32::try_from(entry.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert proof_set entry to Bytes32: {}", + e + )) + }) + }) + .collect::>>()?; + + let body = UploadBody { + root, + witness_index, + subsection_index, + subsections_number, + proof_set, + }; + + let upload_tx = + FuelTransaction::upload(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Upload(upload_tx)) + } + ProtoTransactionVariant::Blob(proto_blob) => { + let policies = proto_blob + .policies + .clone() + .map(policies_from_proto_policies) + .unwrap_or_default(); + let inputs = proto_blob + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_blob + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_blob + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let blob_id = fuel_core_types::fuel_types::BlobId::try_from( + proto_blob.blob_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = u16::try_from(proto_blob.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert blob witness_index to u16: {}", + e + )) + })?; + let body = BlobBody { + id: blob_id, + witness_index, + }; + + let blob_tx = + FuelTransaction::blob(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Blob(blob_tx)) + } + } +} + +fn input_from_proto_input(proto_input: &ProtoInput) -> Result { + let variant = proto_input + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing input variant")))?; + + match variant { + ProtoInputVariant::CoinSigned(proto_coin_signed) => { + let utxo_proto = proto_coin_signed + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = + Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_signed.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_signed + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let witness_index = + u16::try_from(proto_coin_signed.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::coin_signed( + utxo_id, + owner, + proto_coin_signed.amount, + asset_id, + tx_pointer, + witness_index, + )) + } + ProtoInputVariant::CoinPredicate(proto_coin_predicate) => { + let utxo_proto = proto_coin_predicate + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = Address::try_from(proto_coin_predicate.owner.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_predicate.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_predicate + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + + Ok(Input::coin_predicate( + utxo_id, + owner, + proto_coin_predicate.amount, + asset_id, + tx_pointer, + proto_coin_predicate.predicate_gas_used, + proto_coin_predicate.predicate.clone(), + proto_coin_predicate.predicate_data.clone(), + )) + } + ProtoInputVariant::Contract(proto_contract) => { + let contract = contract_input_from_proto(proto_contract)?; + Ok(Input::Contract(contract)) + } + ProtoInputVariant::MessageCoinSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::message_coin_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + )) + } + ProtoInputVariant::MessageCoinPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_coin_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + ProtoInputVariant::MessageDataSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::message_data_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + proto_message.data.clone(), + )) + } + ProtoInputVariant::MessageDataPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_data_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.data.clone(), + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + } +} + +fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { + let ProtoPolicies { bits, values } = proto_policies; + let mut policies = FuelPolicies::default(); + let bits = + PoliciesBits::from_bits(bits).expect("Should be able to create from `u32`"); + if bits.contains(PoliciesBits::Tip) + && let Some(tip) = values.first() + { + policies.set(PolicyType::Tip, Some(*tip)); + } + if bits.contains(PoliciesBits::WitnessLimit) + && let Some(witness_limit) = values.get(1) + { + policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); + } + if bits.contains(PoliciesBits::Maturity) + && let Some(maturity) = values.get(2) + { + policies.set(PolicyType::Maturity, Some(*maturity)); + } + if bits.contains(PoliciesBits::MaxFee) + && let Some(max_fee) = values.get(3) + { + policies.set(PolicyType::MaxFee, Some(*max_fee)); + } + if bits.contains(PoliciesBits::Expiration) + && let Some(expiration) = values.get(4) + { + policies.set(PolicyType::Expiration, Some(*expiration)); + } + if bits.contains(PoliciesBits::Owner) + && let Some(owner) = values.get(5) + { + policies.set(PolicyType::Owner, Some(*owner)); + } + policies +} + +pub fn proto_header_to_empty_application_header( + proto_header: &ProtoHeader, +) -> Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + if cfg!(feature = "fault-proving") { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) + } else { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } +} + +/// Alias the consensus header into an empty one. +pub fn proto_header_to_empty_consensus_header( + proto_header: &ProtoHeader, +) -> Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root).ok_or( + Error::Serialization(anyhow!("Could create `Bytes32` from bytes")), + )?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + if cfg!(feature = "fault-proving") { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) + .ok_or(Error::Serialization(anyhow!( + "Could create `Bytes32` from bytes" + )))?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } else { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } +} + +// TODO: Add coverage for V2 Block stuff +// https://github.com/FuelLabs/fuel-core/issues/3139 +#[cfg(not(feature = "fault-proving"))] +#[allow(non_snake_case)] +#[cfg(test)] +mod tests { + use super::*; + use fuel_core_types::test_helpers::arb_block; + use proptest::prelude::*; + + proptest! { + #![proptest_config(ProptestConfig { + cases: 100, .. ProptestConfig::default() + })] + #[test] + fn serialize_block__roundtrip((block, msg_ids, event_inbox_root) in arb_block()) { + // given + let serializer = SerializerAdapter; + + // when + let proto_block = serializer.serialize_block(&block).unwrap(); + + // then + let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); + assert_eq!(block, deserialized_block); + + } + } + + #[test] + #[ignore] + fn _dummy() {} +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index c8d327607aa..be8b6b19e94 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -25,18 +25,19 @@ use fuel_core_types::{ }, fuel_types::BlockHeight, }; +use std::time::Duration; use tokio::sync::mpsc::Sender; -pub struct SyncTask { +pub struct SyncTask { serializer: Serializer, - block_return_sender: Sender, + block_return_sender: Sender>, db: DB, next_height: BlockHeight, maybe_stop_height: Option, new_ending_height: tokio::sync::oneshot::Receiver, } -impl SyncTask +impl SyncTask where Serializer: BlockSerializer + Send, DB: StorageInspect + Send + 'static, @@ -45,7 +46,7 @@ where { pub fn new( serializer: Serializer, - block_return: Sender, + block_return: Sender>, db: DB, db_starting_height: BlockHeight, db_ending_height: Option, @@ -98,11 +99,18 @@ where } Ok(txs) } + + // For now just have arbitrary 10 ms sleep to avoid busy looping. + // This could be more complicated with increasing backoff times, etc. + async fn go_to_sleep_before_continuing(&self) { + tokio::time::sleep(Duration::from_millis(10)).await; + } } -impl RunnableTask for SyncTask +impl RunnableTask for SyncTask where Serializer: BlockSerializer + Send + Sync, + Serializer::Block: Send + Sync + 'static, DB: Send + Sync + 'static, DB: StorageInspect + Send + 'static, DB: StorageInspect + Send + 'static, @@ -134,6 +142,7 @@ where self.next_height = BlockHeight::from((*next_height).saturating_add(1)); } else { tracing::warn!("no block found at height {:?}, retrying", next_height); + self.go_to_sleep_before_continuing().await; } TaskNextAction::Continue } @@ -144,9 +153,10 @@ where } #[async_trait::async_trait] -impl RunnableService for SyncTask +impl RunnableService for SyncTask where Serializer: BlockSerializer + Send + Sync + 'static, + ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, DB: StorageInspect + Send + 'static, DB: StorageInspect + Send + 'static, diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 92e04d69e5f..64d0256dbae 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,6 +1,7 @@ #![allow(non_snake_case)] use super::*; +use crate::blocks::Block; use ::postcard::to_allocvec; use fuel_core_services::stream::{ IntoBoxStream, @@ -34,6 +35,8 @@ use std::sync::Arc; pub struct MockSerializer; impl BlockSerializer for MockSerializer { + type Block = Block; + fn serialize_block(&self, block: &FuelBlock) -> Result { let bytes_vec = to_allocvec(block).map_err(|e| { Error::BlockSource(anyhow!("failed to serialize block: {}", e)) @@ -46,7 +49,6 @@ fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() } -// let block_stream = tokio_stream::iter(blocks).chain(pending()).into_boxed(); fn stream_with_pending(items: Vec) -> BoxStream { tokio_stream::iter(items).chain(pending()).into_boxed() } diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index 13a0bcc8489..d664bd13932 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -1,13 +1,11 @@ -use crate::{ - blocks::Block, - result::Result, -}; +use crate::result::Result; use fuel_core_types::fuel_types::BlockHeight; pub mod storage_db; /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { + type Block; /// The type used to report a range of blocks type BlockRangeResponse; @@ -15,7 +13,7 @@ pub trait BlockAggregatorDB: Send + Sync { fn store_block( &mut self, height: BlockHeight, - block: Block, + block: Self::Block, ) -> impl Future> + Send; /// Retrieves a range of blocks from the database diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index ce7b731f790..7aeac0a91d1 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,10 +1,10 @@ use crate::{ block_range_response::BlockRangeResponse, - blocks::Block, db::{ BlockAggregatorDB, storage_db::table::Column, }, + protobuf_types::Block as ProtoBlock, result::{ Error, Result, @@ -103,11 +103,16 @@ where for<'b> StorageTransaction<&'b mut S>: StorageMutate, S: AtomicView, T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, - StorageTransaction: AtomicView + StorageInspect, + StorageTransaction: StorageInspect, { + type Block = ProtoBlock; type BlockRangeResponse = BlockRangeResponse; - async fn store_block(&mut self, height: BlockHeight, block: Block) -> Result<()> { + async fn store_block( + &mut self, + height: BlockHeight, + block: ProtoBlock, + ) -> Result<()> { self.update_highest_contiguous_block(height); let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() @@ -156,7 +161,7 @@ where S: Unpin + ReadTransaction + std::fmt::Debug, for<'a> StorageTransaction<&'a S>: StorageInspect, { - type Item = Block; + type Item = ProtoBlock; fn poll_next( self: Pin<&mut Self>, diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs index 525645100e8..be11785c7af 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/table.rs @@ -1,4 +1,4 @@ -use crate::blocks::Block; +use crate::protobuf_types::Block as ProtoBlock; use fuel_core_storage::{ Mappable, blueprint::plain::Plain, @@ -51,7 +51,7 @@ impl Mappable for Blocks { type Key = Self::OwnedKey; type OwnedKey = BlockHeight; type Value = Self::OwnedValue; - type OwnedValue = Block; + type OwnedValue = ProtoBlock; } impl TableWithBlueprint for Blocks { diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index f09cdaafc2b..593839e406a 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -1,31 +1,43 @@ #![allow(non_snake_case)] use super::*; -use crate::db::storage_db::table::Column; +use crate::{ + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, + db::storage_db::table::Column, +}; use fuel_core_storage::{ StorageAsRef, structured_storage::test::InMemoryStorage, transactional::IntoTransaction, }; use fuel_core_types::{ - ed25519::signature::rand_core::SeedableRng, + blockchain::block::Block as FuelBlock, + fuel_tx::Transaction, fuel_types::BlockHeight, }; use futures::StreamExt; -use rand::rngs::StdRng; fn database() -> StorageTransaction> { InMemoryStorage::default().into_transaction() } +fn proto_block_with_height(height: BlockHeight) -> ProtoBlock { + let serializer_adapter = SerializerAdapter; + let mut default_block = FuelBlock::::default(); + default_block.header_mut().set_block_height(height); + serializer_adapter.serialize_block(&default_block).unwrap() +} + #[tokio::test] async fn store_block__adds_to_storage() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let mut adapter = StorageDB::new(db); let height = BlockHeight::from(1u32); - let expected = Block::random(&mut rng); + let expected = proto_block_with_height(height); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -43,15 +55,15 @@ async fn store_block__adds_to_storage() { #[tokio::test] async fn get_block__can_get_expected_range() { - let mut rng = StdRng::seed_from_u64(666); // given let mut db = database(); let height_1 = BlockHeight::from(1u32); let height_2 = BlockHeight::from(2u32); let height_3 = BlockHeight::from(3u32); - let expected_1 = Block::random(&mut rng); - let expected_2 = Block::random(&mut rng); - let expected_3 = Block::random(&mut rng); + + let expected_1 = proto_block_with_height(height_1); + let expected_2 = proto_block_with_height(height_2); + let expected_3 = proto_block_with_height(height_3); let mut tx = db.write_transaction(); tx.storage_as_mut::() @@ -82,12 +94,11 @@ async fn get_block__can_get_expected_range() { #[tokio::test] async fn store_block__updates_the_highest_continuous_block_if_contiguous() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let mut adapter = StorageDB::new_with_height(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); - let expected = Block::random(&mut rng); + let expected = proto_block_with_height(height); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -100,13 +111,12 @@ async fn store_block__updates_the_highest_continuous_block_if_contiguous() { #[tokio::test] async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let starting_height = BlockHeight::from(0u32); let mut adapter = StorageDB::new_with_height(db, starting_height); let height = BlockHeight::from(2u32); - let expected = Block::random(&mut rng); + let expected = proto_block_with_height(height); // when adapter.store_block(height, expected.clone()).await.unwrap(); @@ -119,7 +129,6 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig #[tokio::test] async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { - let mut rng = StdRng::seed_from_u64(666); // given let db = database(); let starting_height = BlockHeight::from(0u32); @@ -129,7 +138,7 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { for height in 2..=10u32 { let height = BlockHeight::from(height); orphaned_height = Some(height); - let block = Block::random(&mut rng); + let block = proto_block_with_height(height); adapter.store_block(height, block).await.unwrap(); } let expected = starting_height; @@ -138,8 +147,11 @@ async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // when let height = BlockHeight::from(1u32); - let expected = Block::random(&mut rng); - adapter.store_block(height, expected.clone()).await.unwrap(); + let some_block = proto_block_with_height(height); + adapter + .store_block(height, some_block.clone()) + .await + .unwrap(); // then let expected = orphaned_height.unwrap(); diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index d4f787bc4b0..e3e9057d7d7 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -1,17 +1,17 @@ use crate::{ api::BlockAggregatorApi, - blocks::{ - Block, - BlockSource, - }, + blocks::BlockSource, db::BlockAggregatorDB, }; use fuel_core_services::{ + RunnableService, RunnableTask, StateWatcher, TaskNextAction, }; use fuel_core_types::fuel_types::BlockHeight; +use protobuf_types::Block as ProtoBlock; +use std::fmt::Debug; pub mod api; pub mod blocks; @@ -20,6 +20,90 @@ pub mod result; pub mod block_range_response; +pub mod protobuf_types; + +pub mod integration { + use crate::{ + BlockAggregator, + api::{ + BlockAggregatorApi, + protobuf_adapter::ProtobufAPI, + }, + blocks::importer_and_db_source::{ + BlockSerializer, + ImporterAndDbSource, + }, + db::BlockAggregatorDB, + protobuf_types::Block as ProtoBlock, + }; + use fuel_core_services::{ + ServiceRunner, + stream::BoxStream, + }; + use fuel_core_storage::{ + StorageInspect, + tables::{ + FuelBlocks, + Transactions, + }, + }; + use fuel_core_types::{ + fuel_types::BlockHeight, + services::block_importer::SharedImportResult, + }; + use std::net::SocketAddr; + + #[derive(Clone, Debug)] + pub struct Config { + pub addr: SocketAddr, + } + + #[allow(clippy::type_complexity)] + pub fn new_service( + config: &Config, + db: DB, + serializer: S, + onchain_db: OnchainDB, + importer: BoxStream, + ) -> ServiceRunner< + BlockAggregator< + ProtobufAPI, + DB, + ImporterAndDbSource, + ProtoBlock, + >, + > + where + DB: BlockAggregatorDB< + BlockRangeResponse = ::BlockRangeResponse, + Block = ProtoBlock, + >, + S: BlockSerializer + Clone + Send + Sync + 'static, + OnchainDB: Send + Sync, + OnchainDB: StorageInspect, + OnchainDB: StorageInspect, + E: std::fmt::Debug + Send + Sync, + { + let addr = config.addr.to_string(); + let api = ProtobufAPI::new(addr); + let db_starting_height = BlockHeight::from(0); + let db_ending_height = None; + let block_source = ImporterAndDbSource::new( + importer, + serializer, + onchain_db, + db_starting_height, + db_ending_height, + ); + let block_aggregator = BlockAggregator { + query: api, + database: db, + block_source, + new_block_subscriptions: Vec::new(), + }; + ServiceRunner::new(block_aggregator) + } +} #[cfg(test)] mod tests; @@ -29,33 +113,35 @@ pub mod block_aggregator; // but we can change the name later /// The Block Aggregator service, which aggregates blocks from a source and stores them in a database /// Queries can be made to the service to retrieve data from the `DB` -pub struct BlockAggregator { +pub struct BlockAggregator { query: Api, database: DB, block_source: Blocks, - new_block_subscriptions: Vec>, + new_block_subscriptions: Vec>, } pub struct NewBlock { height: BlockHeight, - block: Block, + block: ProtoBlock, } impl NewBlock { - pub fn new(height: BlockHeight, block: Block) -> Self { + pub fn new(height: BlockHeight, block: ProtoBlock) -> Self { Self { height, block } } - pub fn into_inner(self) -> (BlockHeight, Block) { + pub fn into_inner(self) -> (BlockHeight, ProtoBlock) { (self.height, self.block) } } -impl RunnableTask for BlockAggregator +impl RunnableTask + for BlockAggregator where - Api: BlockAggregatorApi, - DB: BlockAggregatorDB, + Api: BlockAggregatorApi, + DB: BlockAggregatorDB, Blocks: BlockSource, + ::Block: Clone + std::fmt::Debug + Send, BlockRange: Send, { async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { @@ -74,3 +160,30 @@ where Ok(()) } } + +#[async_trait::async_trait] +impl RunnableService + for BlockAggregator +where + Api: + BlockAggregatorApi + Send, + DB: BlockAggregatorDB + Send, + Blocks: BlockSource, + BlockRange: Send, + ::Block: Clone + Debug + Send, +{ + const NAME: &'static str = "BlockAggregatorService"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData {} + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + Ok(self) + } +} diff --git a/crates/services/block_aggregator_api/src/protobuf_types.rs b/crates/services/block_aggregator_api/src/protobuf_types.rs new file mode 100644 index 00000000000..648ac0e278d --- /dev/null +++ b/crates/services/block_aggregator_api/src/protobuf_types.rs @@ -0,0 +1 @@ +tonic::include_proto!("blockaggregator"); diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index b687f1ec6cc..ab91f71ece0 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -7,6 +7,8 @@ pub enum Error { BlockSource(anyhow::Error), #[error("Database error: {0}")] DB(anyhow::Error), + #[error("Serialization error: {0}")] + Serialization(anyhow::Error), } pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index ac069687760..d8b9a8744e5 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -36,21 +36,22 @@ use tokio::{ type BlockRangeResponse = BoxStream; -struct FakeApi { - receiver: Receiver>, +struct FakeApi { + receiver: Receiver>, } -impl FakeApi { - fn new() -> (Self, Sender>) { +impl FakeApi { + fn new() -> (Self, Sender>) { let (sender, receiver) = tokio::sync::mpsc::channel(1); let api = Self { receiver }; (api, sender) } } -impl BlockAggregatorApi for FakeApi { +impl BlockAggregatorApi for FakeApi { type BlockRangeResponse = T; - async fn await_query(&mut self) -> Result> { + type Block = B; + async fn await_query(&mut self) -> Result> { Ok(self.receiver.recv().await.unwrap()) } } @@ -75,6 +76,7 @@ impl FakeDB { } impl BlockAggregatorDB for FakeDB { + type Block = Block; type BlockRangeResponse = BlockRangeResponse; async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { @@ -111,11 +113,11 @@ impl BlockAggregatorDB for FakeDB { } struct FakeBlockSource { - blocks: Receiver, + blocks: Receiver>, } impl FakeBlockSource { - fn new() -> (Self, Sender) { + fn new() -> (Self, Sender>) { let (_sender, receiver) = tokio::sync::mpsc::channel(1); let _self = Self { blocks: receiver }; (_self, _sender) @@ -123,7 +125,9 @@ impl FakeBlockSource { } impl BlockSource for FakeBlockSource { - async fn next_block(&mut self) -> Result { + type Block = Block; + + async fn next_block(&mut self) -> Result> { self.blocks .recv() .await @@ -243,12 +247,8 @@ async fn run__new_block_subscription__sends_new_block() { let _ = srv.run(&mut watcher).await; // then - let (actual_height, actual_block) = await_response_with_timeout(response) - .await - .unwrap() - .into_inner(); + let actual_block = await_response_with_timeout(response).await.unwrap(); assert_eq!(expected_block, actual_block); - assert_eq!(expected_height, actual_height); // cleanup drop(source_sender); diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index faa6503271e..1a0b758d93c 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -260,6 +260,7 @@ impl Importer { result: CommitInput, ) -> Result<(), Error> { let (sender, receiver) = oneshot::channel(); + let command = Commands::CommitResult { result, permit, diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index ae85771e9de..f23b0e2126c 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -35,7 +35,7 @@ std = [ "ed25519-dalek/std", ] random = ["dep:rand", "fuel-vm-private/random"] -test-helpers = ["random", "fuel-vm-private/test-helpers"] +test-helpers = ["random", "fuel-vm-private/test-helpers", "dep:proptest"] aws-kms = ["dep:aws-sdk-kms"] fault-proving = [] @@ -53,6 +53,7 @@ fuel-vm-private = { workspace = true, default-features = false, features = [ ] } k256 = { version = "0.13", default-features = false, features = ["ecdsa"] } parking_lot = { workspace = true } +proptest = { workspace = true, optional = true } rand = { workspace = true, optional = true } secrecy = "0.8" serde = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 836c7874b60..896cab86c3a 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -79,6 +79,17 @@ impl BlockHeader { } } + /// Get the application portion of the header. + pub fn application_v1( + &self, + ) -> Option<&ApplicationHeader> { + match self { + BlockHeader::V1(header) => Some(header.application()), + #[cfg(feature = "fault-proving")] + BlockHeader::V2(_header) => None, + } + } + /// Get the consensus portion of the header. pub fn consensus(&self) -> &ConsensusHeader { match self { @@ -133,6 +144,20 @@ impl BlockHeader { } } + /// Setter for the transactions count + #[cfg(feature = "test-helpers")] + pub fn set_transactions_count(&mut self, count: u16) { + match self { + BlockHeader::V1(header) => { + header.application_mut().generated.transactions_count = count + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.application_mut().generated.transactions_count = count + } + } + } + /// Getter for the message receipt count pub fn message_receipt_count(&self) -> u32 { match self { @@ -195,6 +220,25 @@ impl BlockHeader { }, } } + + /// Alias the consensus header into an empty one. + pub fn as_empty_consensus_header(&self) -> ConsensusHeader { + match self { + BlockHeader::V1(header) => ConsensusHeader { + prev_root: header.consensus().prev_root, + height: header.consensus().height, + time: header.consensus().time, + generated: Empty {}, + }, + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => ConsensusHeader { + prev_root: header.consensus().prev_root, + height: header.consensus().height, + time: header.consensus().time, + generated: Empty {}, + }, + } + } } #[cfg(any(test, feature = "test-helpers"))] @@ -269,6 +313,45 @@ impl BlockHeader { } } + /// Set the message outbox root for the header + pub fn set_message_outbox_root(&mut self, root: Bytes32) { + match self { + BlockHeader::V1(header) => { + header.set_message_outbox_root(root); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_message_outbox_root(root); + } + } + } + + /// Set the message receipt count + pub fn set_message_receipt_count(&mut self, count: u32) { + match self { + BlockHeader::V1(header) => { + header.set_message_receipt_count(count); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_message_receipt_count(count); + } + } + } + + /// Set the event inbox root for the header + pub fn set_event_inbox_root(&mut self, root: Bytes32) { + match self { + BlockHeader::V1(header) => { + header.set_event_inbox_root(root); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_event_inbox_root(root); + } + } + } + /// Set the consensus parameters version pub fn set_consensus_parameters_version( &mut self, @@ -285,6 +368,22 @@ impl BlockHeader { } } + /// Set the state transition bytecode version + pub fn set_state_transition_bytecode_version( + &mut self, + version: StateTransitionBytecodeVersion, + ) { + match self { + BlockHeader::V1(header) => { + header.set_stf_version(version); + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + header.set_stf_version(version); + } + } + } + /// Set the stf version pub fn set_stf_version(&mut self, version: StateTransitionBytecodeVersion) { match self { @@ -674,7 +773,8 @@ impl PartialBlockHeader { } } -fn generate_txns_root(transactions: &[Transaction]) -> Bytes32 { +/// Generate the transactions root from the list of transactions. +pub fn generate_txns_root(transactions: &[Transaction]) -> Bytes32 { let transaction_ids = transactions.iter().map(|tx| tx.to_bytes()); // Generate the transaction merkle root. let mut transaction_tree = diff --git a/crates/types/src/blockchain/header/v1.rs b/crates/types/src/blockchain/header/v1.rs index 0d450a664e2..ac1e76cb7fd 100644 --- a/crates/types/src/blockchain/header/v1.rs +++ b/crates/types/src/blockchain/header/v1.rs @@ -132,6 +132,21 @@ impl BlockHeaderV1 { self.recalculate_metadata(); } + pub(crate) fn set_message_outbox_root(&mut self, root: crate::fuel_tx::Bytes32) { + self.application_mut().generated.message_outbox_root = root; + self.recalculate_metadata(); + } + + pub(crate) fn set_message_receipt_count(&mut self, count: u32) { + self.application_mut().generated.message_receipt_count = count; + self.recalculate_metadata(); + } + + pub(crate) fn set_event_inbox_root(&mut self, event_inbox_root: Bytes32) { + self.application_mut().generated.event_inbox_root = event_inbox_root; + self.recalculate_metadata(); + } + pub(crate) fn set_da_height( &mut self, da_height: crate::blockchain::primitives::DaBlockHeight, diff --git a/crates/types/src/blockchain/header/v2.rs b/crates/types/src/blockchain/header/v2.rs index 9aea3a2a614..b464fc7464d 100644 --- a/crates/types/src/blockchain/header/v2.rs +++ b/crates/types/src/blockchain/header/v2.rs @@ -155,6 +155,21 @@ impl BlockHeaderV2 { self.recalculate_metadata(); } + pub(crate) fn set_message_outbox_root(&mut self, root: crate::fuel_tx::Bytes32) { + self.application_mut().generated.message_outbox_root = root; + self.recalculate_metadata(); + } + + pub(crate) fn set_message_receipt_count(&mut self, count: u32) { + self.application_mut().generated.message_receipt_count = count; + self.recalculate_metadata(); + } + + pub(crate) fn set_event_inbox_root(&mut self, root: crate::fuel_tx::Bytes32) { + self.application_mut().generated.event_inbox_root = root; + self.recalculate_metadata(); + } + pub(crate) fn set_da_height( &mut self, da_height: crate::blockchain::primitives::DaBlockHeight, diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 3885007eb5d..37e187f6b81 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -1,17 +1,52 @@ use crate::{ + blockchain::{ + block::Block, + header::{ + GeneratedConsensusFields, + generate_txns_root, + }, + primitives::DaBlockHeight, + }, + fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ + BlobBody, + BlobIdExt, + Bytes32, ContractId, Create, Finalizable, + Input, + MessageId, Output, + StorageSlot, + Transaction, TransactionBuilder, + TxPointer, + UpgradePurpose, + UploadBody, + UtxoId, + Witness, + field::ReceiptsRoot, + input::{ + coin::CoinSigned, + contract::Contract as InputContract, + }, + output::contract::Contract as OutputContract, + policies::Policies, + }, + fuel_types::{ + BlobId, + BlockHeight, + Nonce, }, fuel_vm::{ Contract, Salt, }, }; +use proptest::prelude::*; use rand::Rng; +use tai64::Tai64; /// Helper function to create a contract creation transaction /// from a given contract bytecode. @@ -36,3 +71,603 @@ pub fn create_contract( .finalize(); (tx, contract_id) } + +#[allow(unused)] +fn arb_txs() -> impl Strategy> { + prop::collection::vec(arb_transaction(), 0..10) +} + +fn arb_script_transaction() -> impl Strategy { + ( + 1..10000u64, + any::<[u8; 32]>(), + prop::collection::vec(any::(), 0..100), + prop::collection::vec(any::(), 0..100), + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 0..4), + ) + .prop_map( + |( + script_gas_limit, + receipts_root, + script_bytes, + script_data, + policies, + inputs, + outputs, + witnesses, + )| { + let mut script = crate::fuel_tx::Transaction::script( + script_gas_limit, + script_bytes, + script_data, + policies, + inputs, + outputs, + witnesses, + ); + *script.receipts_root_mut() = receipts_root.into(); + Transaction::Script(script) + }, + ) +} + +prop_compose! { + fn arb_storage_slot()( + key in any::<[u8; 32]>(), + value in any::<[u8; 32]>(), + ) -> StorageSlot { + StorageSlot::new(key.into(), value.into()) + } +} + +prop_compose! { + fn arb_coin_output()( + to in arb_address(), + amount in any::(), + asset in arb_asset_id(), + ) -> Output { + Output::coin(to, amount, asset) + } +} + +prop_compose! { + fn arb_contract_output()( + input_index in any::(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + ) -> Output { + Output::Contract(OutputContract { + input_index, + balance_root: balance_root.into(), + state_root: state_root.into(), + }) + } +} + +prop_compose! { + fn arb_change_output()( + to in arb_address(), + amount in any::(), + asset in arb_asset_id(), + ) -> Output { + Output::change(to, amount, asset) + } +} + +prop_compose! { + fn arb_variable_output()( + to in arb_address(), + amount in any::(), + asset in arb_asset_id(), + ) -> Output { + Output::variable(to, amount, asset) + } +} + +prop_compose! { + fn arb_contract_created_output()( + contract_id in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + ) -> Output { + Output::contract_created(ContractId::new(contract_id), state_root.into()) + } +} + +fn arb_output_any() -> impl Strategy { + prop_oneof![ + arb_coin_output(), + arb_contract_output(), + arb_change_output(), + arb_variable_output(), + arb_contract_created_output(), + ] +} + +fn arb_outputs() -> impl Strategy> { + prop::collection::vec(arb_output_any(), 0..10) +} + +fn arb_witness() -> impl Strategy { + prop::collection::vec(any::(), 0..128).prop_map(Witness::from) +} + +prop_compose! { + fn arb_policies()( + tip in prop::option::of(any::()), + witness_limit in prop::option::of(any::()), + maturity in prop::option::of(0..100u32), + max_fee in prop::option::of(any::()), + expiration in prop::option::of(0..100u32), + owner in prop::option::of(any::()), + ) -> Policies { + let mut policies = Policies::new(); + if let Some(tip) = tip { + policies = policies.with_tip(tip); + } + if let Some(witness_limit) = witness_limit { + policies = policies.with_witness_limit(witness_limit); + } + if let Some(value) = maturity { + policies = policies.with_maturity(BlockHeight::new(value)); + } + if let Some(max_fee) = max_fee { + policies = policies.with_max_fee(max_fee); + } + if let Some(value) = expiration { + policies = policies.with_expiration(BlockHeight::new(value)); + } + if let Some(owner) = owner { + policies = policies.with_owner(owner); + } + policies + } +} + +prop_compose! { + fn arb_msg_id()(inner in any::<[u8; 32]>()) -> MessageId { + MessageId::new(inner) + } +} + +#[allow(unused)] +fn arb_inputs() -> impl Strategy> { + prop::collection::vec(arb_input_any(), 0..10) +} + +prop_compose! { + fn arb_coin_signed()( + utxo_id in arb_utxo_id(), + owner in arb_address(), + amount in 1..1_000_000u64, + asset_id in arb_asset_id(), + tx_pointer in arb_tx_pointer(), + witness_index in 0..1000u16, + ) -> Input { + let inner = CoinSigned { + utxo_id, + owner, + amount, + asset_id, + tx_pointer, + witness_index, + predicate_gas_used: Default::default(), + predicate: Default::default(), + predicate_data: Default::default(), + }; + Input::CoinSigned(inner) + } +} + +prop_compose! { + fn arb_coin_predicate()( + utxo_id in arb_utxo_id(), + owner in arb_address(), + amount in 1..1_000_000u64, + asset_id in arb_asset_id(), + tx_pointer in arb_tx_pointer(), + predicate_gas_used in any::(), + predicate in prop::collection::vec(any::(), 0..100), + predicate_data in prop::collection::vec(any::(), 0..100), + ) -> Input { + let inner = crate::fuel_tx::input::coin::CoinPredicate { + utxo_id, + owner, + amount, + asset_id, + tx_pointer, + witness_index: Default::default(), + predicate_gas_used, + predicate: predicate.into(), + predicate_data: predicate_data.into(), + }; + Input::CoinPredicate(inner) + } +} + +prop_compose! { + fn arb_contract_input_variant()( + utxo_id in arb_utxo_id(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + tx_pointer in arb_tx_pointer(), + contract_id in any::<[u8; 32]>(), + ) -> Input { + let contract = InputContract { + utxo_id, + balance_root: balance_root.into(), + state_root: state_root.into(), + tx_pointer, + contract_id: ContractId::new(contract_id), + }; + Input::Contract(contract) + } +} + +prop_compose! { + fn arb_nonce()(bytes in any::<[u8; 32]>()) -> Nonce { + Nonce::new(bytes) + } +} + +prop_compose! { + fn arb_message_coin_signed_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + witness_index in any::(), + ) -> Input { + Input::message_coin_signed(sender, recipient, amount, nonce, witness_index) + } +} + +prop_compose! { + fn arb_message_coin_predicate_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + predicate_gas_used in any::(), + predicate in prop::collection::vec(any::(), 0..64), + predicate_data in prop::collection::vec(any::(), 0..64), + ) -> Input { + Input::message_coin_predicate( + sender, + recipient, + amount, + nonce, + predicate_gas_used, + predicate, + predicate_data, + ) + } +} + +prop_compose! { + fn arb_message_data_signed_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + witness_index in any::(), + data in prop::collection::vec(any::(), 0..128), + ) -> Input { + Input::message_data_signed(sender, recipient, amount, nonce, witness_index, data) + } +} + +prop_compose! { + fn arb_message_data_predicate_input()( + sender in arb_address(), + recipient in arb_address(), + amount in any::(), + nonce in arb_nonce(), + predicate_gas_used in any::(), + data in prop::collection::vec(any::(), 0..128), + predicate in prop::collection::vec(any::(), 0..64), + predicate_data in prop::collection::vec(any::(), 0..64), + ) -> Input { + Input::message_data_predicate( + sender, + recipient, + amount, + nonce, + predicate_gas_used, + data, + predicate, + predicate_data, + ) + } +} + +fn arb_input_any() -> impl Strategy { + prop_oneof![ + arb_coin_signed(), + arb_coin_predicate(), + arb_contract_input_variant(), + arb_message_coin_signed_input(), + arb_message_coin_predicate_input(), + arb_message_data_signed_input(), + arb_message_data_predicate_input(), + ] +} + +prop_compose! { + fn arb_utxo_id()( + inner in any::<[u8; 32]>(), + index in any::(), + ) -> UtxoId { + let tx_id = inner.into(); + UtxoId::new(tx_id, index) + } +} + +prop_compose! { + fn arb_address()(inner in any::<[u8; 32]>()) -> crate::fuel_types::Address { + crate::fuel_types::Address::new(inner) + } +} + +prop_compose! { + fn arb_asset_id()(inner in any::<[u8; 32]>()) -> crate::fuel_types::AssetId { + crate::fuel_types::AssetId::new(inner) + } +} + +prop_compose! { + fn arb_tx_pointer()( + block_height in 0..1_000_000u32, + tx_index in 0..1_000u16, + ) -> TxPointer { + let block_height = block_height.into(); + TxPointer::new(block_height, tx_index) + } +} + +fn arb_msg_ids() -> impl Strategy> { + prop::collection::vec(arb_msg_id(), 0..10usize) +} + +fn arb_transaction() -> impl Strategy { + prop_oneof![ + arb_script_transaction(), + arb_create_transaction(), + arb_mint_transaction(), + arb_upgrade_transaction(), + arb_upload_transaction(), + arb_blob_transaction(), + ] +} + +prop_compose! { + fn arb_input_contract_core()( + utxo_id in arb_utxo_id(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + tx_pointer in arb_tx_pointer(), + contract_id in any::<[u8; 32]>(), + ) -> InputContract { + InputContract { + utxo_id, + balance_root: balance_root.into(), + state_root: state_root.into(), + tx_pointer, + contract_id: ContractId::new(contract_id), + } + } +} + +prop_compose! { + fn arb_output_contract_core()( + input_index in any::(), + balance_root in any::<[u8; 32]>(), + state_root in any::<[u8; 32]>(), + ) -> OutputContract { + OutputContract { + input_index, + balance_root: balance_root.into(), + state_root: state_root.into(), + } + } +} + +fn arb_create_transaction() -> impl Strategy { + ( + arb_policies(), + any::<[u8; 32]>(), + prop::collection::vec(arb_storage_slot(), 0..4), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + ) + .prop_map( + |(policies, salt_bytes, storage_slots, inputs, outputs, witnesses)| { + let create = crate::fuel_tx::Transaction::create( + 0, + policies, + Salt::from(salt_bytes), + storage_slots, + inputs, + outputs, + witnesses, + ); + Transaction::Create(create) + }, + ) +} + +fn arb_mint_transaction() -> impl Strategy { + ( + arb_tx_pointer(), + arb_input_contract_core(), + arb_output_contract_core(), + any::(), + arb_asset_id(), + any::(), + ) + .prop_map( + |( + tx_pointer, + input_contract, + output_contract, + mint_amount, + mint_asset_id, + gas_price, + )| { + let mint = crate::fuel_tx::Transaction::mint( + tx_pointer, + input_contract, + output_contract, + mint_amount, + mint_asset_id, + gas_price, + ); + Transaction::Mint(mint) + }, + ) +} + +fn arb_upgrade_transaction() -> impl Strategy { + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + any::<[u8; 32]>(), + ) + .prop_map(|(policies, inputs, outputs, witnesses, checksum_bytes)| { + let purpose = UpgradePurpose::ConsensusParameters { + witness_index: 0, + checksum: checksum_bytes.into(), + }; + let upgrade = crate::fuel_tx::Transaction::upgrade( + purpose, policies, inputs, outputs, witnesses, + ); + Transaction::Upgrade(upgrade) + }) +} + +fn arb_upload_transaction() -> impl Strategy { + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 1..4), + any::<[u8; 32]>(), + prop::collection::vec(any::<[u8; 32]>(), 0..4), + 1u16..=4, + any::(), + ) + .prop_map( + |( + policies, + inputs, + outputs, + witnesses, + root_bytes, + proof_entries, + subsections_number, + subsection_index_candidate, + )| { + let proof_set = proof_entries + .into_iter() + .map(Bytes32::from) + .collect::>(); + let subsections_number = subsections_number.max(1); + let body = UploadBody { + root: root_bytes.into(), + witness_index: 0, + subsection_index: subsection_index_candidate, + subsections_number, + proof_set, + }; + let upload = crate::fuel_tx::Transaction::upload( + body, policies, inputs, outputs, witnesses, + ); + Transaction::Upload(upload) + }, + ) +} + +fn arb_blob_transaction() -> impl Strategy { + ( + arb_policies(), + arb_inputs(), + arb_outputs(), + prop::collection::vec(arb_witness(), 0..3), + prop::collection::vec(any::(), 0..256), + ) + .prop_map(|(policies, inputs, outputs, witnesses, payload)| { + let blob_id = BlobId::compute(&payload); + let body = BlobBody { + id: blob_id, + witness_index: 0, + }; + let blob = crate::fuel_tx::Transaction::blob( + body, policies, inputs, outputs, witnesses, + ); + Transaction::Blob(blob) + }) +} + +prop_compose! { + fn arb_consensus_header()( + prev_root in any::<[u8; 32]>(), + time in any::(), + ) -> crate::blockchain::header::ConsensusHeader { + crate::blockchain::header::ConsensusHeader { + prev_root: prev_root.into(), + height: BlockHeight::new(0), + time: Tai64(time), + generated: GeneratedConsensusFields::default(), + } + } +} + +prop_compose! { + /// Generate an arbitrary block with a variable number of transactions + pub fn arb_block()( + txs in arb_txs(), + da_height in any::(), + consensus_parameter_version in any::(), + state_transition_bytecode_version in any::(), + msg_ids in arb_msg_ids(), + event_root in any::<[u8; 32]>(), + mut consensus_header in arb_consensus_header(), + ) -> (Block, Vec, Bytes32) { + let mut fuel_block = Block::default(); + + *fuel_block.transactions_mut() = txs; + + fuel_block.header_mut().set_da_height(DaBlockHeight(da_height)); + fuel_block.header_mut().set_consensus_parameters_version(consensus_parameter_version); + fuel_block.header_mut().set_state_transition_bytecode_version(state_transition_bytecode_version); + + let count = fuel_block.transactions().len().try_into().expect("we shouldn't have more than u16::MAX transactions"); + let msg_root = msg_ids + .iter() + .fold(MerkleRootCalculator::new(), |mut tree, id| { + tree.push(id.as_ref()); + tree + }) + .root() + .into(); + let tx_root = generate_txns_root(fuel_block.transactions()); + let event_root = event_root.into(); + fuel_block.header_mut().set_transactions_count(count); + fuel_block.header_mut().set_message_receipt_count(msg_ids.len().try_into().expect("we shouldn't have more than u32::MAX messages")); + fuel_block.header_mut().set_transaction_root(tx_root); + fuel_block.header_mut().set_message_outbox_root(msg_root); + fuel_block.header_mut().set_event_inbox_root(event_root); + + // Consensus + // TODO: Include V2 Application with V2 Header + let application_hash = fuel_block.header().application_v1().unwrap().hash(); + consensus_header.generated.application_hash = application_hash; + fuel_block.header_mut().set_consensus_header(consensus_header); + (fuel_block, msg_ids, event_root) + } +} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f093de3530c..377d92b1740 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -19,6 +19,7 @@ aws-kms = ["dep:aws-config", "dep:aws-sdk-kms", "fuel-core-bin/aws-kms"] fault-proving = [ "fuel-core/fault-proving", "fuel-core-types/fault-proving", + "fuel-core-block-aggregator-api/fault-proving", "fuel-core-storage/fault-proving", "fuel-core-upgradable-executor/fault-proving", "fuel-core-poa/fault-proving", @@ -26,6 +27,7 @@ fault-proving = [ "fuel-core-compression-service/fault-proving", "fuel-core-benches/fault-proving", ] +rpc = ["fuel-core/rpc", "fuel-core-bin/rpc"] [dependencies] anyhow = { workspace = true } @@ -46,6 +48,7 @@ fuel-core = { path = "../crates/fuel-core", default-features = false, features = ] } fuel-core-benches = { path = "../benches" } fuel-core-bin = { path = "../bin/fuel-core", features = ["parquet", "p2p"] } +fuel-core-block-aggregator-api = { workspace = true } fuel-core-client = { path = "../crates/client", features = ["test-helpers"] } fuel-core-compression = { path = "../crates/compression" } fuel-core-compression-service = { path = "../crates/services/compression", features = [ diff --git a/tests/test-helpers/Cargo.toml b/tests/test-helpers/Cargo.toml index 41d270c7691..cb0f644d839 100644 --- a/tests/test-helpers/Cargo.toml +++ b/tests/test-helpers/Cargo.toml @@ -11,6 +11,7 @@ publish = false [dependencies] anyhow = { workspace = true } clap = { workspace = true } +cynic = { workspace = true } fuel-core = { path = "../../crates/fuel-core", default-features = false, features = [ "test-helpers", ] } @@ -27,6 +28,8 @@ fuel-core-storage = { path = "../../crates/storage", features = [ "test-helpers", ] } fuel-core-trace = { path = "../../crates/trace" } + +fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } fuel-core-txpool = { path = "../../crates/services/txpool_v2", features = [ "test-helpers", ] } @@ -38,3 +41,4 @@ reqwest = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tempfile = { workspace = true } +async-trait = {workspace = true} diff --git a/tests/test-helpers/src/client_ext.rs b/tests/test-helpers/src/client_ext.rs new file mode 100644 index 00000000000..9feb4309579 --- /dev/null +++ b/tests/test-helpers/src/client_ext.rs @@ -0,0 +1,64 @@ +use cynic::QueryBuilder; +use fuel_core_client::client::{ + FuelClient, + schema::{ + BlockId, + U32, + block::{ + BlockByHeightArgs, + BlockByHeightArgsFields, + Consensus, + Header, + }, + schema, + tx::OpaqueTransaction, + }, +}; + +#[derive(cynic::QueryFragment, Debug)] +#[cynic( + schema_path = "../../crates/client/assets/schema.sdl", + graphql_type = "Query", + variables = "BlockByHeightArgs" +)] +pub struct FullBlockByHeightQuery { + #[arguments(height: $height)] + pub block: Option, +} + +#[derive(cynic::QueryFragment, Debug)] +#[cynic( + schema_path = "../../crates/client/assets/schema.sdl", + graphql_type = "Block" +)] +#[allow(dead_code)] +pub struct FullBlock { + pub id: BlockId, + pub header: Header, + pub consensus: Consensus, + pub transactions: Vec, +} + +#[async_trait::async_trait] +pub trait ClientExt { + async fn full_block_by_height( + &self, + height: u32, + ) -> std::io::Result>; +} + +#[async_trait::async_trait] +impl ClientExt for FuelClient { + async fn full_block_by_height( + &self, + height: u32, + ) -> std::io::Result> { + let query = FullBlockByHeightQuery::build(BlockByHeightArgs { + height: Some(U32(height)), + }); + + let block = self.query(query).await?.block; + + Ok(block) + } +} diff --git a/tests/test-helpers/src/lib.rs b/tests/test-helpers/src/lib.rs index 025765d15bd..71309f77756 100644 --- a/tests/test-helpers/src/lib.rs +++ b/tests/test-helpers/src/lib.rs @@ -37,6 +37,8 @@ pub mod counter_contract; pub mod fuel_core_driver; pub mod mint_contract; +pub mod client_ext; + pub fn predicate() -> Vec { vec![op::ret(1)].into_iter().collect::>() } diff --git a/tests/tests/blocks.rs b/tests/tests/blocks.rs index d1df25968e3..d18fa735083 100644 --- a/tests/tests/blocks.rs +++ b/tests/tests/blocks.rs @@ -17,6 +17,7 @@ use fuel_core_client::client::{ }, types::TransactionStatus, }; +use fuel_core_executor::executor::max_tx_count; use fuel_core_poa::Trigger; use fuel_core_storage::{ StorageAsMut, @@ -27,12 +28,17 @@ use fuel_core_storage::{ transactional::WriteTransaction, vm_storage::VmStorageRequirements, }; +use fuel_core_txpool::config::{ + HeavyWorkConfig, + PoolLimits, +}; use fuel_core_types::{ blockchain::{ block::CompressedBlock, consensus::Consensus, }, fuel_tx::*, + fuel_types::BlockHeight, secrecy::ExposeSecret, signer::SignMode, tai64::Tai64, @@ -42,16 +48,19 @@ use itertools::{ Itertools, rev, }; +use rand::{ + SeedableRng, + prelude::StdRng, +}; use rstest::rstest; use std::{ ops::Deref, time::Duration, }; -use test_helpers::send_graph_ql_query; - -use rand::{ - SeedableRng, - rngs::StdRng, +use test_helpers::{ + client_ext::ClientExt, + make_tx, + send_graph_ql_query, }; #[tokio::test] @@ -370,170 +379,94 @@ async fn missing_first_and_last_parameters_returns_an_error() { let result = send_graph_ql_query(&url, query).await; assert!(result.contains("The queries for the whole range is not supported")); } +#[tokio::test] +async fn get_full_block_with_tx() { + let srv = FuelService::from_database(Database::default(), Config::local_node()) + .await + .unwrap(); -mod full_block { - use super::*; - use cynic::QueryBuilder; - use fuel_core_client::client::{ - FuelClient, - schema::{ - BlockId, - U32, - block::{ - BlockByHeightArgs, - BlockByHeightArgsFields, - Consensus, - Header, - }, - schema, - tx::OpaqueTransaction, - }, - }; - use fuel_core_executor::executor::max_tx_count; - use fuel_core_txpool::config::{ - HeavyWorkConfig, - PoolLimits, - }; - use fuel_core_types::fuel_types::BlockHeight; - - #[derive(cynic::QueryFragment, Debug)] - #[cynic( - schema_path = "../crates/client/assets/schema.sdl", - graphql_type = "Query", - variables = "BlockByHeightArgs" - )] - pub struct FullBlockByHeightQuery { - #[arguments(height: $height)] - pub block: Option, - } - - #[derive(cynic::QueryFragment, Debug)] - #[cynic( - schema_path = "../crates/client/assets/schema.sdl", - graphql_type = "Block" - )] - #[allow(dead_code)] - pub struct FullBlock { - pub id: BlockId, - pub header: Header, - pub consensus: Consensus, - pub transactions: Vec, - } - - #[async_trait::async_trait] - pub trait ClientExt { - async fn full_block_by_height( - &self, - height: u32, - ) -> std::io::Result>; - } + let client = FuelClient::from(srv.bound_address); + let tx = Transaction::default_test_tx(); + client.submit_and_await_commit(&tx).await.unwrap(); - #[async_trait::async_trait] - impl ClientExt for FuelClient { - async fn full_block_by_height( - &self, - height: u32, - ) -> std::io::Result> { - let query = FullBlockByHeightQuery::build(BlockByHeightArgs { - height: Some(U32(height)), - }); + let block = client.full_block_by_height(1).await.unwrap().unwrap(); + assert_eq!(block.header.height.0, 1); + assert_eq!(block.transactions.len(), 2 /* mint + our tx */); +} - let block = self.query(query).await?.block; +#[tokio::test] +async fn too_many_transactions_are_split_in_blocks() { + // Given + let max_gas_limit = 50_000_000; + let mut rng = StdRng::seed_from_u64(2322); + + let local_node_config = Config::local_node(); + let txpool = fuel_core_txpool::config::Config { + pool_limits: PoolLimits { + max_txs: 2_000_000, + max_gas: u64::MAX, + max_bytes_size: usize::MAX, + }, + heavy_work: HeavyWorkConfig { + number_threads_to_verify_transactions: 4, + number_threads_p2p_sync: 0, + size_of_verification_queue: u16::MAX as usize, + size_of_p2p_sync_queue: 1, + }, + ..local_node_config.txpool + }; + let chain_config = local_node_config.snapshot_reader.chain_config().clone(); + let mut consensus_parameters = chain_config.consensus_parameters; + consensus_parameters.set_block_gas_limit(u64::MAX); + consensus_parameters + .set_block_transaction_size_limit(u64::MAX) + .expect("should be able to set the limit"); + let snapshot_reader = local_node_config.snapshot_reader.with_chain_config( + fuel_core::chain_config::ChainConfig { + consensus_parameters, + ..chain_config + }, + ); - Ok(block) - } - } + let patched_node_config = Config { + block_production: Trigger::Never, + txpool, + snapshot_reader, + ..local_node_config + }; - #[tokio::test] - async fn get_full_block_with_tx() { - let srv = FuelService::from_database(Database::default(), Config::local_node()) - .await - .unwrap(); + let srv = FuelService::new_node(patched_node_config).await.unwrap(); + let client = FuelClient::from(srv.bound_address); - let client = FuelClient::from(srv.bound_address); - let tx = Transaction::default_test_tx(); - client.submit_and_await_commit(&tx).await.unwrap(); + let tx_count: u64 = max_tx_count() as u64 + 100; + let txs = (1..=tx_count) + .map(|i| make_tx(&mut rng, i, max_gas_limit)) + .collect_vec(); - let block = client.full_block_by_height(1).await.unwrap().unwrap(); - assert_eq!(block.header.height.0, 1); - assert_eq!(block.transactions.len(), 2 /* mint + our tx */); + // When + for tx in txs.iter() { + let _tx_id = client.submit(tx).await.unwrap(); } - #[tokio::test] - async fn too_many_transactions_are_split_in_blocks() { - // Given - let max_gas_limit = 50_000_000; - let mut rng = StdRng::seed_from_u64(2322); - - let local_node_config = Config::local_node(); - let txpool = fuel_core_txpool::config::Config { - pool_limits: PoolLimits { - max_txs: 2_000_000, - max_gas: u64::MAX, - max_bytes_size: usize::MAX, - }, - heavy_work: HeavyWorkConfig { - number_threads_to_verify_transactions: 4, - number_threads_p2p_sync: 0, - size_of_verification_queue: u16::MAX as usize, - size_of_p2p_sync_queue: 1, - }, - ..local_node_config.txpool - }; - let chain_config = local_node_config.snapshot_reader.chain_config().clone(); - let mut consensus_parameters = chain_config.consensus_parameters; - consensus_parameters.set_block_gas_limit(u64::MAX); - consensus_parameters - .set_block_transaction_size_limit(u64::MAX) - .expect("should be able to set the limit"); - let snapshot_reader = local_node_config.snapshot_reader.with_chain_config( - fuel_core::chain_config::ChainConfig { - consensus_parameters, - ..chain_config - }, - ); - - let patched_node_config = Config { - block_production: Trigger::Never, - txpool, - snapshot_reader, - ..local_node_config - }; - - let srv = FuelService::new_node(patched_node_config).await.unwrap(); - let client = FuelClient::from(srv.bound_address); - - let tx_count: u64 = max_tx_count() as u64 + 100; - let txs = (1..=tx_count) - .map(|i| test_helpers::make_tx(&mut rng, i, max_gas_limit)) - .collect_vec(); - - // When - for tx in txs.iter() { - let _tx_id = client.submit(tx).await.unwrap(); - } + // Then + let _last_block_height: u32 = client.produce_blocks(2, None).await.unwrap().into(); + let second_last_block = client + .block_by_height(BlockHeight::from(1)) + .await + .unwrap() + .expect("Second last block should be defined"); + let last_block = client + .block_by_height(BlockHeight::from(2)) + .await + .unwrap() + .expect("Last Block should be defined"); - // Then - let _last_block_height: u32 = - client.produce_blocks(2, None).await.unwrap().into(); - let second_last_block = client - .block_by_height(BlockHeight::from(1)) - .await - .unwrap() - .expect("Second last block should be defined"); - let last_block = client - .block_by_height(BlockHeight::from(2)) - .await - .unwrap() - .expect("Last Block should be defined"); - - assert_eq!( - second_last_block.transactions.len(), - max_tx_count() as usize + 1 // Mint transaction for one block - ); - assert_eq!( - last_block.transactions.len(), - (tx_count as usize - (max_tx_count() as usize)) + 1 /* Mint transaction for second block */ - ); - } + assert_eq!( + second_last_block.transactions.len(), + max_tx_count() as usize + 1 // Mint transaction for one block + ); + assert_eq!( + last_block.transactions.len(), + (tx_count as usize - (max_tx_count() as usize)) + 1 /* Mint transaction for second block */ + ); } diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index 462742e5073..5e6b7458d9f 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -58,6 +58,9 @@ mod regenesis; mod relayer; #[cfg(not(feature = "only-p2p"))] mod required_fuel_block_height_extension; +#[cfg(feature = "rpc")] +mod rpc; + #[cfg(not(feature = "only-p2p"))] mod snapshot; #[cfg(not(feature = "only-p2p"))] diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs new file mode 100644 index 00000000000..aa6c564834b --- /dev/null +++ b/tests/tests/rpc.rs @@ -0,0 +1,156 @@ +#![allow(non_snake_case)] +use fuel_core::{ + database::Database, + service::{ + Config, + FuelService, + }, +}; +use fuel_core_block_aggregator_api::protobuf_types::{ + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block::VersionedBlock as ProtoVersionedBlock, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, + header::VersionedHeader as ProtoVersionedHeader, +}; +use fuel_core_client::client::FuelClient; +use fuel_core_types::fuel_tx::*; +use futures::StreamExt; +use test_helpers::client_ext::ClientExt; + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_serialized_block_from_rpc() { + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let expected_header = expected_block.header; + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let actual_block = if let Some(ProtoPayload::Literal(block)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + block + } else { + panic!("expected literal block payload"); + }; + let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); + + let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { + ProtoVersionedHeader::V1(v1_header) => v1_header.height, + ProtoVersionedHeader::V2(v2_header) => v2_header.height, + }; + // then + assert_eq!(expected_header.height.0, actual_height); +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_height__can_get_value_from_rpc() { + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr; + + // given + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + let request = ProtoBlockHeightRequest {}; + let expected_height = 1; + let actual_height = rpc_client + .get_block_height(request) + .await + .unwrap() + .into_inner() + .height; + + // then + assert_eq!(expected_height, actual_height); +} + +#[tokio::test(flavor = "multi_thread")] +async fn new_block_subscription__can_get_expect_block() { + let config = Config::local_node(); + let rpc_url = config.rpc_config.addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let request = ProtoNewBlockSubscriptionRequest {}; + let mut stream = rpc_client + .new_block_subscription(request) + .await + .unwrap() + .into_inner(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + // when + let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) + .await + .unwrap(); + let actual_block = + if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { + block + } else { + panic!("expected literal block payload"); + }; + + let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); + let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { + ProtoVersionedHeader::V1(v1_header) => v1_header.height, + ProtoVersionedHeader::V2(v2_header) => v2_header.height, + }; + + // then + let expected_height = 1; + assert_eq!(expected_height, actual_height); +} From b2b023b4b7eaff0be3d9a713e3e3754da8d89f49 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 12 Nov 2025 08:08:23 -0700 Subject: [PATCH 098/100] Fix cargo lock --- Cargo.lock | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a474974458..1f9d261cecc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6962,16 +6962,6 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" -[[package]] -name = "md-5" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" -dependencies = [ - "cfg-if", - "digest 0.10.7", -] - [[package]] name = "memchr" version = "2.7.6" From ab29ec61a1eeba9fe8e14de8def6583b27041858 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 17 Nov 2025 10:32:11 -0700 Subject: [PATCH 099/100] Ignore rustsec with tracking issue --- .cargo/audit.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 138d99b305f..f4cf03a5b5b 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -2,4 +2,5 @@ ignore = [ "RUSTSEC-2024-0421", # https://github.com/FuelLabs/fuel-core/issues/2488 "RUSTSEC-2025-0009", # https://github.com/FuelLabs/fuel-core/issues/2814 + "RUSTSEC-2025-0118", # https://github.com/FuelLabs/fuel-core/issues/3149 ] \ No newline at end of file From 412be8adb3297fdc6d319cb56298dcff2e7d2ab6 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 17 Nov 2025 10:41:07 -0700 Subject: [PATCH 100/100] Fix spellcheck, update changelog --- .changes/fixed/3150.md | 1 + .typos.toml | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 .changes/fixed/3150.md diff --git a/.changes/fixed/3150.md b/.changes/fixed/3150.md new file mode 100644 index 00000000000..1cd816c7d98 --- /dev/null +++ b/.changes/fixed/3150.md @@ -0,0 +1 @@ +Ignore rustsec advisories `RUSTSEC-2025-0118` \ No newline at end of file diff --git a/.typos.toml b/.typos.toml index d6069bd48a2..6f72f270976 100644 --- a/.typos.toml +++ b/.typos.toml @@ -4,4 +4,7 @@ extend-ignore-identifiers-re = [ "tro", "Tro", "typ", + "aloc", + "ALOC", + "Aloc", ] \ No newline at end of file