Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 1 addition & 3 deletions crates/chain-orchestrator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -456,9 +456,7 @@ impl<
Box::pin(async move {
let head = block_infos.last().expect("block info must not be empty").clone();
let tx = database.tx_mut().await?;
for block in block_infos {
tx.insert_block(block, batch_info).await?;
}
tx.insert_blocks(block_infos, batch_info).await?;
tx.commit().await?;
Result::<_, ChainOrchestratorError>::Ok(Some(
ChainOrchestratorEvent::L2ConsolidatedBlockCommitted(head),
Expand Down
4 changes: 3 additions & 1 deletion crates/database/db/src/operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,9 @@ pub trait DatabaseReadOperations: ReadConnectionProvider + Sync {
})?)
}

/// Get the latest safe L2 ([`BlockInfo`], [`BatchInfo`]) from the database.
/// Get the latest safe/finalized L2 ([`BlockInfo`], [`BatchInfo`]) from the database. Until we
/// update the batch handling logic with issue #273, we don't differentiate between safe and
/// finalized l2 blocks.
async fn get_latest_safe_l2_info(
&self,
) -> Result<Option<(BlockInfo, BatchInfo)>, DatabaseError> {
Expand Down
2 changes: 2 additions & 0 deletions crates/engine/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ rollup-node-providers.workspace = true
rollup-node-signer.workspace = true

# scroll
scroll-db.workspace = true
scroll-network.workspace = true

# misc
Expand Down Expand Up @@ -70,6 +71,7 @@ test-utils = [
"rollup-node-providers/test-utils",
"reth-chainspec/test-utils",
"reth-primitives-traits/test-utils",
"scroll-db/test-utils",
]
serde = [
"alloy-eips/serde",
Expand Down
6 changes: 3 additions & 3 deletions crates/engine/src/fcs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ impl ForkchoiceState {

/// Creates a new [`ForkchoiceState`] instance setting the `head`, `safe` and `finalized` block
/// info to the provided `genesis` hash.
pub const fn head_from_genesis(genesis: B256) -> Self {
pub const fn from_genesis(genesis: B256) -> Self {
Self::new(
BlockInfo { hash: genesis, number: 0 },
BlockInfo { hash: genesis, number: 0 },
Expand All @@ -43,7 +43,7 @@ impl ForkchoiceState {

/// Creates a [`ForkchoiceState`] instance setting the `head`, `safe` and `finalized` hash to
/// the appropriate genesis values by reading from the provider.
pub async fn head_from_provider<P: Provider<Scroll>>(provider: P) -> Option<Self> {
pub async fn from_provider<P: Provider<Scroll>>(provider: &P) -> Option<Self> {
let latest_block =
provider.get_block(BlockId::Number(BlockNumberOrTag::Latest)).await.ok()??;
let safe_block =
Expand All @@ -65,7 +65,7 @@ impl ForkchoiceState {
pub fn head_from_chain_spec<CS: EthChainSpec<Header: BlockHeader>>(
chain_spec: CS,
) -> Option<Self> {
Some(Self::head_from_genesis(genesis_hash_from_chain_spec(chain_spec)?))
Some(Self::from_genesis(genesis_hash_from_chain_spec(chain_spec)?))
}

/// Updates the `head` block info.
Expand Down
44 changes: 25 additions & 19 deletions crates/node/src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use alloy_signer_local::PrivateKeySigner;
use alloy_transport::layers::RetryBackoffLayer;
use aws_sdk_kms::config::BehaviorVersion;
use clap::ArgAction;
use futures::StreamExt;
use reth_chainspec::EthChainSpec;
use reth_network::NetworkProtocols;
use reth_network_api::FullNetwork;
Expand Down Expand Up @@ -243,9 +244,30 @@ impl ScrollRollupNodeConfig {
ForkchoiceState::head_from_chain_spec(chain_spec.clone())
.expect("failed to derive forkchoice state from chain spec")
};
let mut fcs = ForkchoiceState::head_from_provider(l2_provider.clone())
.await
.unwrap_or_else(chain_spec_fcs);
let mut fcs =
ForkchoiceState::from_provider(&l2_provider).await.unwrap_or_else(chain_spec_fcs);

// Update the head block info from the database if available.
if let Some(latest_block) = db.tx().await?.get_l2_blocks().await?.next().await {
let latest_block = latest_block?;
fcs.update_head_block_info(latest_block);
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We only persist safe l2 blocks in the database. Unsafe blocks are not persisted in the database, we only persist a mapping of L1 message -> L2 block number. This may be a shortcoming of the data model that needs to be revised. We may need to revert to persisting unsafe blocks in the database as well.

Copy link
Contributor Author

@greged93 greged93 Sep 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's true... one solution would be to take the latest executed L1 message's L2 block number. We could on top of that (as a later improvement) iterate blocks from there to the tip using the L2 provider and take the shallowest block which doesn't include L1 messages (as suggested by @Thegaram).

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This works, but it could lead to relatively deep reorgs if L1 messages are not included in a large number of blocks. An alternative solution would be to track the current chain head in the database metadata table.


// On startup we replay the latest batch of blocks from the database as such we set the safe
// block hash to the latest block hash associated with the previous consolidated
// batch in the database.
let tx = db.tx_mut().await?;
let (startup_safe_block, l1_start_block_number) =
tx.prepare_on_startup(chain_spec.genesis_hash()).await?;
tx.commit().await?;
if let Some(block_info) = startup_safe_block {
fcs.update_safe_block_info(block_info);
} else {
fcs.update_safe_block_info(BlockInfo {
hash: genesis_hash_from_chain_spec(chain_spec.clone()).unwrap(),
number: 0,
});
}

let chain_spec = Arc::new(chain_spec.clone());

Expand All @@ -267,22 +289,6 @@ impl ScrollRollupNodeConfig {
authorized_signer,
);

// On startup we replay the latest batch of blocks from the database as such we set the safe
// block hash to the latest block hash associated with the previous consolidated
// batch in the database.
let tx = db.tx_mut().await?;
let (startup_safe_block, l1_start_block_number) =
tx.prepare_on_startup(chain_spec.genesis_hash()).await?;
tx.commit().await?;
if let Some(block_info) = startup_safe_block {
fcs.update_safe_block_info(block_info);
} else {
fcs.update_safe_block_info(BlockInfo {
hash: genesis_hash_from_chain_spec(chain_spec.clone()).unwrap(),
number: 0,
});
}

tracing::info!(target: "scroll::node::args", fcs = ?fcs, payload_building_duration = ?self.sequencer_args.payload_building_duration, "Starting engine driver");
let engine = EngineDriver::new(
Arc::new(engine_api),
Expand Down