Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
886 changes: 576 additions & 310 deletions Cargo.lock

Large diffs are not rendered by default.

23 changes: 13 additions & 10 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -116,16 +116,17 @@ large_enum_variant = "allow"
[workspace.dependencies]
# alloy
alloy-chains = { version = "0.1.32", default-features = false }
alloy-consensus = { version = "0.12.2", default-features = false }
alloy-eips = { version = "0.12.2", default-features = false }
alloy-json-rpc = { version = "0.12.2", default-features = false }
alloy-network = { version = "0.12.2", default-features = false }
alloy-primitives = { version = "0.8.20", default-features = false }
alloy-provider = { version = "0.12.2", default-features = false }
alloy-rpc-types-engine = { version = "0.12.2", default-features = false }
alloy-rpc-types-eth = { version = "0.12.2", default-features = false }
alloy-sol-types = { version = "0.8.20", default-features = false }
alloy-transport = { version = "0.12.2", default-features = false }
alloy-consensus = { version = "0.13.0", default-features = false }
alloy-eips = { version = "0.13.0", default-features = false }
alloy-json-rpc = { version = "0.13.0", default-features = false }
alloy-network = { version = "0.13.0", default-features = false }
alloy-primitives = { version = "0.8.25", default-features = false }
alloy-provider = { version = "0.13.0", default-features = false }
alloy-rpc-client = { version = "0.13.0", default-features = false }
alloy-rpc-types-engine = { version = "0.13.0", default-features = false }
alloy-rpc-types-eth = { version = "0.13.0", default-features = false }
alloy-sol-types = { version = "0.8.25", default-features = false }
alloy-transport = { version = "0.13.0", default-features = false }

# scroll-alloy
scroll-alloy-consensus = { git = "https://github.com/scroll-tech/reth.git", default-features = false }
Expand Down Expand Up @@ -167,10 +168,12 @@ scroll-migration = { path = "crates/database/migration" }
# misc
arbitrary = { version = "1.4", default-features = false }
async-trait = "0.1"
auto_impl = "1.2"
derive_more = { version = "2.0", default-features = false }
eyre = "0.6"
futures = { version = "0.3", default-features = false }
rand = { version = "0.9" }
reqwest = "0.12"
secp256k1 = { version = "0.29", default-features = false }
thiserror = "2.0"
tokio = { version = "1.39", default-features = false }
Expand Down
16 changes: 11 additions & 5 deletions bin/rollup/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@ exclude.workspace = true
alloy-chains.workspace = true
alloy-provider.workspace = true
alloy-rpc-types-engine.workspace = true
alloy-rpc-client.workspace = true
alloy-transport.workspace = true

# scroll-alloy
scroll-alloy-consensus.workspace = true
scroll-alloy-rpc-types-engine.workspace = true
scroll-alloy-provider.workspace = true
scroll-alloy-rpc-types-engine.workspace = true

# reth
reth-cli-util = { git = "https://github.com/scroll-tech/reth.git" }
Expand All @@ -39,15 +41,16 @@ reth-scroll-cli = { git = "https://github.com/scroll-tech/reth.git" }
reth-scroll-engine-primitives = { git = "https://github.com/scroll-tech/reth.git" }
reth-scroll-node = { workspace = true, features = ["skip-state-root-validation"] }
reth-scroll-primitives = { workspace = true }
scroll-wire.workspace = true
scroll-network.workspace = true
scroll-engine = { workspace = true, features = ["test-utils"] }
scroll-db = { workspace = true }
scroll-engine = { workspace = true, features = ["test-utils"] }
scroll-migration.workspace = true
scroll-network.workspace = true
scroll-wire.workspace = true

# rollup-node
rollup-node-manager.workspace = true
rollup-node-indexer.workspace = true
rollup-node-manager.workspace = true
rollup-node-providers.workspace = true
rollup-node-watcher.workspace = true

# misc
Expand All @@ -72,6 +75,7 @@ reth-tracing = { git = "https://github.com/scroll-tech/reth.git" }

# misc
futures.workspace = true
reqwest.workspace = true
serde_json = { version = "1.0.94", default-features = false, features = ["alloc"] }
tokio = { workspace = true, features = ["full"] }

Expand All @@ -85,9 +89,11 @@ test-utils = [
"scroll-engine/test-utils",
"reth-payload-builder/test-utils",
"reth-payload-builder/test-utils",
"reth-scroll-node/test-utils",
"rollup-node-watcher/test-utils",
"scroll-db/test-utils",
"scroll-db/test-utils",
"rollup-node-providers/test-utils",
]
serde = [
"alloy-primitives/serde",
Expand Down
29 changes: 27 additions & 2 deletions bin/rollup/src/args.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,42 @@
use crate::constants;
use std::path::PathBuf;

/// A struct that represents the arguments for the rollup node.
#[derive(Debug, clap::Args)]
pub struct ScrollRollupNodeArgs {
/// A bool to represent if new blocks should be bridged from the eth wire protocol to the
/// scroll wire protocol.
#[arg(long, default_value_t = false)]
pub enable_eth_scroll_wire_bridge: bool,
/// A bool that represents if the scroll wire protocol should be enabled.
#[arg(long, default_value_t = false)]
pub enable_scroll_wire: bool,
/// Database path
#[arg(long)]
pub database_path: Option<PathBuf>,
/// The URL for the L1 RPC URL.
pub l1_rpc_url: Option<reqwest::Url>,
/// The EngineAPI URL.
#[arg(long)]
pub engine_api_url: Option<reqwest::Url>,
/// The provider arguments
#[command(flatten)]
pub l1_provider_args: L1ProviderArgs,
}

#[derive(Debug, clap::Args)]
pub struct L1ProviderArgs {
/// The URL for the L1 RPC URL.
#[arg(long)]
pub l1_rpc_url: Option<reqwest::Url>,
/// The URL for the Beacon RPC URL.
#[arg(long)]
pub beacon_rpc_url: reqwest::Url,
/// The compute units per second for the provider.
#[arg(long)]
pub compute_units_per_second: u64,
/// The max amount of retries for the provider.
#[arg(long, default_value_t = constants::PROVIDER_MAX_RETRIES)]
pub max_retries: u32,
/// The initial backoff for the provider.
#[arg(long, default_value_t = constants::PROVIDER_INITIAL_BACKOFF)]
pub initial_backoff: u64,
}
11 changes: 11 additions & 0 deletions bin/rollup/src/constants.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
/// The block number at which to start the L1 watcher.
pub const WATCHER_START_BLOCK_NUMBER: u64 = 18318215;

/// The size of the blob cache for the provider.
pub const PROVIDER_BLOB_CACHE_SIZE: usize = 100;

/// The max retries for the L1 provider.
pub const PROVIDER_MAX_RETRIES: u32 = 10;

/// The initial backoff for the L1 provider.
pub const PROVIDER_INITIAL_BACKOFF: u64 = 100;
5 changes: 4 additions & 1 deletion bin/rollup/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
//! Scroll Network Bridge Components.

mod args;
pub use args::ScrollRollupNodeArgs;
pub use args::{L1ProviderArgs, ScrollRollupNodeArgs};

mod constants;
pub use constants::{PROVIDER_INITIAL_BACKOFF, PROVIDER_MAX_RETRIES, WATCHER_START_BLOCK_NUMBER};

mod import;
pub use import::BridgeBlockImport;
Expand Down
48 changes: 36 additions & 12 deletions bin/rollup/src/network.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use alloy_provider::ProviderBuilder;
use alloy_rpc_client::RpcClient;
use alloy_transport::layers::RetryBackoffLayer;
use migration::MigratorTrait;
use reth_network::{config::NetworkMode, NetworkManager, PeersInfo};
use reth_node_api::TxTy;
Expand All @@ -8,18 +10,21 @@ use reth_rpc_builder::config::RethRpcServerConfig;
use reth_scroll_chainspec::ScrollChainSpec;
use reth_scroll_primitives::ScrollPrimitives;
use reth_transaction_pool::{PoolTransaction, TransactionPool};
use rollup_node_indexer::Indexer;
use rollup_node_manager::{PoAConsensus, RollupNodeManager};
use rollup_node_providers::{beacon_provider, DatabaseL1MessageProvider, OnlineL1Provider};
use rollup_node_watcher::L1Watcher;
use scroll_alloy_provider::ScrollAuthEngineApiProvider;
use scroll_db::{Database, DatabaseConnectionProvider};
use scroll_engine::{test_utils::NoopExecutionPayloadProvider, EngineDriver, ForkchoiceState};
use scroll_network::NetworkManager as ScrollNetworkManager;
use scroll_wire::{ProtocolHandler, ScrollWireConfig};
use std::{path::PathBuf, sync::Arc};
use std::sync::Arc;
use tracing::info;

use crate::ScrollRollupNodeArgs;
use crate::{
constants::PROVIDER_BLOB_CACHE_SIZE, L1ProviderArgs, ScrollRollupNodeArgs,
WATCHER_START_BLOCK_NUMBER,
};

/// The network builder for the eth-wire to scroll-wire bridge.
#[derive(Debug)]
Expand Down Expand Up @@ -99,34 +104,53 @@ where

// Instantiate the database
let database_path = if let Some(db_path) = self.config.database_path {
db_path
db_path.to_string_lossy().to_string()
} else {
PathBuf::from("sqlite://").join(ctx.config().datadir().db().join("scroll.db"))
// append the path using strings as using `join(...)` overwrites "sqlite://"
// if the path is absolute.
let path = ctx.config().datadir().db().join("scroll.db");
"sqlite://".to_string() + &*path.to_string_lossy()
};
let db = Database::new(database_path.to_str().unwrap()).await?;
let db = Database::new(&database_path).await?;

// Run the database migrations
migration::Migrator::up(db.get_connection(), None).await?;

// Wrap the database in an Arc
let db = Arc::new(db);

// Spawn the indexer
let indexer = Indexer::new(db.clone());

// Spawn the L1Watcher
let l1_notification_rx = if let Some(l1_rpc_url) = self.config.l1_rpc_url {
Some(L1Watcher::spawn(ProviderBuilder::new().on_http(l1_rpc_url), 20035952).await)
let l1_provider_args = self.config.l1_provider_args;
let l1_notification_rx = if let Some(l1_rpc_url) = l1_provider_args.l1_rpc_url {
let L1ProviderArgs { max_retries, initial_backoff, compute_units_per_second, .. } =
l1_provider_args;
let client = RpcClient::builder()
.layer(RetryBackoffLayer::new(
max_retries,
initial_backoff,
compute_units_per_second,
))
.http(l1_rpc_url);
let provider = ProviderBuilder::new().on_client(client);
Some(L1Watcher::spawn(provider, WATCHER_START_BLOCK_NUMBER).await)
} else {
None
};

// Construct the l1 provider.
let beacon_provider = beacon_provider(l1_provider_args.beacon_rpc_url.to_string());
let l1_messages_provider = DatabaseL1MessageProvider::new(db.clone());
let l1_provider =
OnlineL1Provider::new(beacon_provider, PROVIDER_BLOB_CACHE_SIZE, l1_messages_provider)
.await;

// Spawn the rollup node manager
let rollup_node_manager = RollupNodeManager::new(
scroll_network_manager,
engine,
l1_provider,
db,
l1_notification_rx,
indexer,
ForkchoiceState::genesis(
ctx.config().chain.chain.try_into().expect("must be a named chain"),
),
Expand Down
11 changes: 9 additions & 2 deletions bin/rollup/tests/e2e.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use reth_scroll_chainspec::ScrollChainSpec;
use reth_scroll_engine_primitives::ScrollPayloadBuilderAttributes;
use reth_scroll_node::{ScrollNetworkPrimitives, ScrollNode};
use reth_tasks::TaskManager;
use rollup_node::ScrollRollupNodeArgs;
use rollup_node::{L1ProviderArgs, ScrollRollupNodeArgs};
use scroll_alloy_rpc_types_engine::ScrollPayloadAttributes;
use scroll_network::{NewBlockWithPeer, SCROLL_MAINNET};
use scroll_wire::ScrollWireConfig;
Expand Down Expand Up @@ -136,7 +136,14 @@ pub async fn build_bridge_node(
enable_eth_scroll_wire_bridge: true,
enable_scroll_wire: true,
database_path: Some(PathBuf::from("sqlite::memory:")),
l1_rpc_url: None,
l1_provider_args: L1ProviderArgs {
l1_rpc_url: None,
// <https://docs.arbitrum.io/run-arbitrum-node/l1-ethereum-beacon-chain-rpc-providers>
beacon_rpc_url: reqwest::Url::parse("https://eth-beacon-chain.drpc.org/rest/")?,
compute_units_per_second: 100,
max_retries: 10,
initial_backoff: 100,
},
engine_api_url: None,
};
let node = ScrollNode;
Expand Down
2 changes: 1 addition & 1 deletion crates/codec/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ fn get_codec_version(calldata: &[u8]) -> Result<u8, DecodingError> {
const CODEC_VERSION_LEN: usize = 32;
const CODEC_VERSION_OFFSET_END: usize = CODEC_VERSION_OFFSET_START + CODEC_VERSION_LEN;
const HIGH_BYTES_MASK: U256 =
U256::from_limbs([u64::MAX, u64::MAX, u64::MAX, 0xffffffffffffff00]);
U256::from_limbs([0xffffffffffffff00, u64::MAX, u64::MAX, u64::MAX]);

let version = calldata
.get(CODEC_VERSION_OFFSET_START..CODEC_VERSION_OFFSET_END)
Expand Down
3 changes: 1 addition & 2 deletions crates/database/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,9 @@ rollup-node-primitives.workspace = true

# misc
async-trait.workspace = true
auto_impl.workspace = true
futures.workspace = true
sea-orm = { version = "1.1.0", features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
thiserror.workspace = true
tokio = { workspace = true, features = ["macros", "sync"] }
tracing.workspace = true
Expand Down
1 change: 1 addition & 0 deletions crates/database/db/src/connection.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/// The [`DatabaseConnectionProvider`] trait provides a way to get a connection to the database.
/// This is implemented by the [`crate::Database`] and [`crate::DatabaseTransaction`] types.
#[auto_impl::auto_impl(Arc)]
pub trait DatabaseConnectionProvider {
/// The type of the database connection.
type Connection: sea_orm::ConnectionTrait + sea_orm::StreamTrait;
Expand Down
40 changes: 37 additions & 3 deletions crates/database/db/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,16 @@ impl From<DatabaseConnection> for Database {

#[cfg(test)]
mod test {
use crate::{operations::DatabaseOperations, test_utils::setup_test_db};
use crate::{
models, operations::DatabaseOperations, test_utils::setup_test_db,
DatabaseConnectionProvider,
};

use arbitrary::{Arbitrary, Unstructured};
use futures::StreamExt;
use rand::Rng;
use rollup_node_primitives::{BatchCommitData, L1MessageWithBlockNumber};
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter};

#[tokio::test]
async fn test_database_round_trip_batch_commit() {
Expand All @@ -62,16 +66,46 @@ mod test {
rand::rng().fill(bytes.as_mut_slice());
let mut u = Unstructured::new(&bytes);

// Generate a random BatchInputV1.
// Generate a random BatchCommitData.
let batch_commit = BatchCommitData::arbitrary(&mut u).unwrap();

// Round trip the BatchInput through the database.
// Round trip the BatchCommitData through the database.
db.insert_batch(batch_commit.clone()).await.unwrap();
let batch_commit_from_db =
db.get_batch_by_index(batch_commit.index).await.unwrap().unwrap();
assert_eq!(batch_commit, batch_commit_from_db);
}

#[tokio::test]
async fn test_database_finalize_batch_commit() {
// Set up the test database.
let db = setup_test_db().await;

// Generate unstructured bytes.
let mut bytes = [0u8; 1024];
rand::rng().fill(bytes.as_mut_slice());
let mut u = Unstructured::new(&bytes);

// Generate a random BatchCommitData.
let batch_commit = BatchCommitData::arbitrary(&mut u).unwrap();

// Store the batch and finalize it.
let finalized_block_number = u64::arbitrary(&mut u).unwrap();
db.insert_batch(batch_commit.clone()).await.unwrap();
db.finalize_batch(batch_commit.hash, finalized_block_number).await.unwrap();

// Verify the finalized_block_number is correctly updated.
let finalized_block_number_from_db = models::batch_commit::Entity::find()
.filter(models::batch_commit::Column::Hash.eq(batch_commit.hash.to_vec()))
.one(db.get_connection())
.await
.unwrap()
.unwrap()
.finalized_block_number
.unwrap();
assert_eq!(finalized_block_number, finalized_block_number_from_db as u64);
}

#[tokio::test]
async fn test_database_round_trip_l1_message() {
// Set up the test database.
Expand Down
2 changes: 1 addition & 1 deletion crates/database/db/src/models/batch_commit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ pub struct Model {
block_timestamp: i64,
calldata: Vec<u8>,
blob_hash: Option<Vec<u8>>,
finalized_block_number: Option<i64>,
pub(crate) finalized_block_number: Option<i64>,
}

/// The relation for the batch input model.
Expand Down
2 changes: 1 addition & 1 deletion crates/database/db/src/operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ pub trait DatabaseOperations: DatabaseConnectionProvider {
.one(self.get_connection())
.await?
{
tracing::trace!(target: "scroll::db", batch_hash = ?batch_hash, block_number, "Finalizing batch input in database.");
tracing::trace!(target: "scroll::db", batch_hash = ?batch_hash, block_number, "Finalizing batch commit in database.");
let mut batch: models::batch_commit::ActiveModel = batch.into();
batch.finalized_block_number = Set(Some(block_number as i64));
batch.update(self.get_connection()).await?;
Expand Down
Loading
Loading