Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions .github/workflows/check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,7 @@ jobs:
components: clippy

- name: Clippy Check
uses: giraffate/clippy-action@v1
with:
reporter: "github-pr-check"
github_token: ${{ secrets.GITHUB_TOKEN }}
clippy_flags: --all-targets --all-features -- -D warnings -D clippy::pedantic
run: cargo clippy --all-targets --all-features -- -D warnings -D clippy::pedantic

typos-cli:
name: typos
Expand Down
221 changes: 221 additions & 0 deletions src/block_scanner.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,221 @@
#![allow(unused)]

use std::{future, marker::PhantomData, time::Duration};

use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio_stream::wrappers::ReceiverStream;

use alloy::{
eips::BlockNumberOrTag,
network::Network,
providers::{Provider, RootProvider},
rpc::{
client::{ClientBuilder, RpcClient},
types::Header,
},
transports::TransportError,
};

// copied form https://github.com/taikoxyz/taiko-mono/blob/f4b3a0e830e42e2fee54829326389709dd422098/packages/taiko-client/pkg/chain_iterator/block_batch_iterator.go#L19
const DEFAULT_BLOCKS_READ_PER_EPOCH: usize = 1000;
const DEFAULT_RETRY_INTERVAL: Duration = Duration::from_secs(12);
const DEFAULT_BLOCK_CONFIRMATIONS: u64 = 0;
const BACK_OFF_MAX_RETRIES: u64 = 5;

// TODO: determine check exact default value
const DEFAULT_REORG_REWIND_DEPTH: u64 = 0;

// State sync aware retry settings
const STATE_SYNC_RETRY_INTERVAL: Duration = Duration::from_secs(30);
const STATE_SYNC_MAX_RETRIES: u64 = 12;

#[derive(Debug)]
pub enum BlockScannerError {
ErrEOF,
ErrContinue,
TerminalError(u64),
}

impl std::fmt::Display for BlockScannerError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockScannerError::ErrEOF => write!(f, "end of block batch iterator"),
BlockScannerError::ErrContinue => write!(f, "continue"),
BlockScannerError::TerminalError(height) => {
write!(f, "terminal error at block height {height}")
}
}
}
}

type EndIterFunc = fn();
type UpdateCurrentFunc = fn(Header);
pub type OnBlocksFunc<N> =
fn(<N as Network>::BlockResponse, UpdateCurrentFunc, EndIterFunc) -> anyhow::Result<()>;

pub struct BlockScannerBuilder<N: Network> {
blocks_read_per_epoch: usize,
start_height: BlockNumberOrTag,
end_height: BlockNumberOrTag,
on_blocks: OnBlocksFunc<N>,
reorg_rewind_depth: u64,
retry_interval: Duration,
block_confirmations: u64,
}

impl<N: Network> Default for BlockScannerBuilder<N> {
fn default() -> Self {
Self::new()
}
}

impl<N: Network> BlockScannerBuilder<N> {
#[must_use]
pub fn new() -> Self {
Self {
blocks_read_per_epoch: DEFAULT_BLOCKS_READ_PER_EPOCH,
start_height: BlockNumberOrTag::Earliest,
end_height: BlockNumberOrTag::Latest,
on_blocks: |_, _, _| Ok(()),
reorg_rewind_depth: DEFAULT_REORG_REWIND_DEPTH,
retry_interval: DEFAULT_RETRY_INTERVAL,
block_confirmations: DEFAULT_BLOCK_CONFIRMATIONS,
}
}

#[must_use]
pub fn with_blocks_read_per_epoch(&mut self, blocks_read_per_epoch: usize) -> &mut Self {
self.blocks_read_per_epoch = blocks_read_per_epoch;
self
}

#[must_use]
pub fn with_start_height(&mut self, start_height: BlockNumberOrTag) -> &mut Self {
self.start_height = start_height;
self
}

#[must_use]
pub fn with_end_height(&mut self, end_height: BlockNumberOrTag) -> &mut Self {
self.end_height = end_height;
self
}

#[must_use]
pub fn with_on_blocks(&mut self, on_blocks: OnBlocksFunc<N>) -> &mut Self {
self.on_blocks = on_blocks;
self
}

#[must_use]
pub fn with_reorg_rewind_depth(&mut self, reorg_rewind_depth: u64) -> &mut Self {
self.reorg_rewind_depth = reorg_rewind_depth;
self
}

#[must_use]
pub fn with_retry_interval(&mut self, retry_interval: Duration) -> &mut Self {
self.retry_interval = retry_interval;
self
}

#[must_use]
pub fn with_block_confirmations(&mut self, block_confirmations: u64) -> &mut Self {
self.block_confirmations = block_confirmations;
self
}

/// Connects to the provider via WebSocket
///
/// # Errors
///
/// Returns an error if the connection fails
pub async fn connect_ws(
self,
connect: alloy::transports::ws::WsConnect,
) -> Result<BlockScanner<RootProvider<N>, N>, TransportError> {
let client = ClientBuilder::default().ws(connect).await?;
Ok(self.connect_client(client))
}

/// Connects to the provider via IPC
///
/// # Errors
///
/// Returns an error if the connection fails
pub async fn connect_ipc<T>(
self,
connect: alloy::transports::ipc::IpcConnect<T>,
) -> Result<BlockScanner<RootProvider<N>, N>, TransportError>
where
alloy::transports::ipc::IpcConnect<T>: alloy::pubsub::PubSubConnect,
{
let client = ClientBuilder::default().ipc(connect).await?;
Ok(self.connect_client(client))
}

#[must_use]
pub fn connect_client(self, client: RpcClient) -> BlockScanner<RootProvider<N>, N> {
let provider = RootProvider::new(client);
self.connect_provider(provider)
}

pub fn connect_provider<P>(self, provider: P) -> BlockScanner<P, N>
where
P: Provider<N>,
{
let (sender, receiver) = mpsc::channel(self.blocks_read_per_epoch);

BlockScanner {
provider,
sender,
receiver,
current: Header::default(),
is_end: false,
blocks_read_per_epoch: self.blocks_read_per_epoch,
start_height: self.start_height,
end_height: self.end_height,
on_blocks: self.on_blocks,
reorg_rewind_depth: self.reorg_rewind_depth,
retry_interval: self.retry_interval,
block_confirmations: self.block_confirmations,
network: PhantomData,
}
}
}

// BlockScanner iterates the blocks in batches between the given start and end heights,
// with the awareness of reorganization.
pub struct BlockScanner<P: Provider<N>, N: Network> {
provider: P,
sender: Sender<Result<N::BlockResponse, BlockScannerError>>,
receiver: Receiver<Result<N::BlockResponse, BlockScannerError>>,
blocks_read_per_epoch: usize,
start_height: BlockNumberOrTag,
end_height: BlockNumberOrTag,
current: Header,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure if this makes a difference but to save memory we could just store the block number rather than the entire header?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we need the hash too for the reorg detection

on_blocks: OnBlocksFunc<N>,
is_end: bool,
reorg_rewind_depth: u64,
retry_interval: Duration,
block_confirmations: u64,
network: PhantomData<fn() -> N>,
}

impl<P, N> BlockScanner<P, N>
where
P: Provider<N>,
N: Network,
{
pub async fn start(self) -> ReceiverStream<Result<N::BlockResponse, BlockScannerError>> {
let receiver_stream = ReceiverStream::new(self.receiver);

future::ready(()).await;

tokio::spawn(async move {
if self.sender.send(Err(BlockScannerError::ErrEOF {})).await.is_err() {}
});

receiver_stream
}
}
11 changes: 11 additions & 0 deletions src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,36 +24,47 @@ impl ScannerBuilder {
}
}

#[must_use]
pub fn start_block(mut self, start_block: u64) -> Self {
self.start_block = Some(start_block);
self
}

#[must_use]
pub fn end_block(mut self, end_block: u64) -> Self {
self.end_block = Some(end_block);
self
}

#[must_use]
pub fn max_blocks_per_filter(mut self, max_blocks: u64) -> Self {
self.max_blocks_per_filter = max_blocks;
self
}

#[must_use]
pub fn add_event_filter(mut self, filter: EventFilter) -> Self {
self.tracked_events.push(filter);
self
}

#[must_use]
pub fn add_event_filters(mut self, filters: Vec<EventFilter>) -> Self {
self.tracked_events.extend(filters);
self
}

#[must_use]
pub fn callback_config(mut self, cfg: CallbackConfig) -> Self {
self.callback_config = cfg;
self
}

/// Builds the scanner
///
/// # Errors
///
/// Returns an error if the scanner fails to build
pub async fn build(self) -> anyhow::Result<Scanner> {
Scanner::new(
self.rpc_url,
Expand Down
Loading