diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 9e58380a08a..0bbdb325a69 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -19,12 +19,12 @@ use ethrex_common::types::fee_config::FeeConfig; use ethrex_common::types::requests::{EncodedRequests, Requests, compute_requests_hash}; use ethrex_common::types::{ AccountState, AccountUpdate, Block, BlockHash, BlockHeader, BlockNumber, ChainConfig, Code, - EIP4844Transaction, Receipt, Transaction, WrappedEIP4844Transaction, compute_receipts_root, - validate_block_header, validate_cancun_header_fields, validate_prague_header_fields, - validate_pre_cancun_header_fields, + EIP4844Transaction, Fork, Receipt, Transaction, WrappedEIP4844Transaction, + compute_receipts_root, validate_block_header, validate_cancun_header_fields, + validate_prague_header_fields, validate_pre_cancun_header_fields, }; use ethrex_common::types::{ELASTICITY_MULTIPLIER, P2PTransaction}; -use ethrex_common::types::{Fork, MempoolTransaction}; +use ethrex_common::types::{Fork::*, MempoolTransaction}; use ethrex_common::{Address, H256, TrieLogger}; use ethrex_metrics::metrics; use ethrex_rlp::decode::RLPDecode; @@ -1263,7 +1263,7 @@ impl Blockchain { // NOTE: We could add a tx size limit here, but it's not in the actual spec // Check init code size - if config.is_shanghai_activated(header.timestamp) + if config.is_fork_activated(Shanghai, header.timestamp) && tx.is_contract_creation() && tx.data().len() > MAX_INITCODE_SIZE as usize { @@ -1274,7 +1274,8 @@ impl Blockchain { return Err(MempoolError::TxMaxDataSizeError); } - if config.is_osaka_activated(header.timestamp) && tx.gas_limit() > POST_OSAKA_GAS_LIMIT_CAP + if config.is_fork_activated(Osaka, header.timestamp) + && tx.gas_limit() > POST_OSAKA_GAS_LIMIT_CAP { // https://eips.ethereum.org/EIPS/eip-7825 return Err(MempoolError::TxMaxGasLimitExceededError( @@ -1430,7 +1431,7 @@ pub fn validate_requests_hash( chain_config: &ChainConfig, requests: &[Requests], ) -> Result<(), ChainError> { - if !chain_config.is_prague_activated(header.timestamp) { + if !chain_config.is_fork_activated(Prague, header.timestamp) { return Ok(()); } @@ -1517,7 +1518,7 @@ pub fn validate_block( validate_block_header(&block.header, parent_header, elasticity_multiplier) .map_err(InvalidBlockError::from)?; - if chain_config.is_osaka_activated(block.header.timestamp) { + if chain_config.is_fork_activated(Osaka, block.header.timestamp) { let block_rlp_size = block.encode_to_vec().len(); if block_rlp_size > MAX_RLP_BLOCK_SIZE as usize { return Err(error::ChainError::InvalidBlock( @@ -1528,14 +1529,14 @@ pub fn validate_block( )); } } - if chain_config.is_prague_activated(block.header.timestamp) { + if chain_config.is_fork_activated(Prague, block.header.timestamp) { validate_prague_header_fields(&block.header, parent_header, chain_config) .map_err(InvalidBlockError::from)?; verify_blob_gas_usage(block, chain_config)?; - if chain_config.is_osaka_activated(block.header.timestamp) { + if chain_config.is_fork_activated(Osaka, block.header.timestamp) { verify_transaction_max_gas_limit(block)?; } - } else if chain_config.is_cancun_activated(block.header.timestamp) { + } else if chain_config.is_fork_activated(Cancun, block.header.timestamp) { validate_cancun_header_fields(&block.header, parent_header, chain_config) .map_err(InvalidBlockError::from)?; verify_blob_gas_usage(block, chain_config)?; diff --git a/crates/blockchain/constants.rs b/crates/blockchain/constants.rs index e0ba1f698d9..4cc8dcdcda2 100644 --- a/crates/blockchain/constants.rs +++ b/crates/blockchain/constants.rs @@ -18,9 +18,6 @@ pub const TX_ACCESS_LIST_ADDRESS_GAS: u64 = 2400; // Gas cost for each storage key specified on access lists pub const TX_ACCESS_LIST_STORAGE_KEY_GAS: u64 = 1900; -// Gas cost for each non zero byte on transaction data -pub const TX_DATA_NON_ZERO_GAS: u64 = 68; - // === EIP-170 constants === // Max bytecode size @@ -37,7 +34,7 @@ pub const MAX_TRANSACTION_DATA_SIZE: u32 = 4 * 32 * 1024; // 128 Kb // === EIP-2028 constants === // Gas cost for each non zero byte on transaction data -pub const TX_DATA_NON_ZERO_GAS_EIP2028: u64 = 16; +pub const TX_DATA_NON_ZERO_GAS: u64 = 16; // === EIP-4844 constants === diff --git a/crates/blockchain/mempool.rs b/crates/blockchain/mempool.rs index db14719c79a..3c2946b091a 100644 --- a/crates/blockchain/mempool.rs +++ b/crates/blockchain/mempool.rs @@ -6,14 +6,15 @@ use std::{ use crate::{ constants::{ TX_ACCESS_LIST_ADDRESS_GAS, TX_ACCESS_LIST_STORAGE_KEY_GAS, TX_CREATE_GAS_COST, - TX_DATA_NON_ZERO_GAS, TX_DATA_NON_ZERO_GAS_EIP2028, TX_DATA_ZERO_GAS_COST, TX_GAS_COST, - TX_INIT_CODE_WORD_GAS_COST, + TX_DATA_NON_ZERO_GAS, TX_DATA_ZERO_GAS_COST, TX_GAS_COST, TX_INIT_CODE_WORD_GAS_COST, }, error::MempoolError, }; use ethrex_common::{ Address, H160, H256, U256, - types::{BlobsBundle, BlockHeader, ChainConfig, MempoolTransaction, Transaction, TxType}, + types::{ + BlobsBundle, BlockHeader, ChainConfig, Fork::*, MempoolTransaction, Transaction, TxType, + }, }; use ethrex_storage::error::StoreError; use std::collections::HashSet; @@ -426,11 +427,7 @@ pub fn transaction_intrinsic_gas( let data_len = tx.data().len() as u64; if data_len > 0 { - let non_zero_gas_cost = if config.is_istanbul_activated(header.number) { - TX_DATA_NON_ZERO_GAS_EIP2028 - } else { - TX_DATA_NON_ZERO_GAS - }; + let non_zero_gas_cost = TX_DATA_NON_ZERO_GAS; let non_zero_count = tx.data().iter().filter(|&&x| x != 0u8).count() as u64; @@ -444,7 +441,7 @@ pub fn transaction_intrinsic_gas( .checked_add(zero_count * TX_DATA_ZERO_GAS_COST) .ok_or(MempoolError::TxGasOverflowError)?; - if is_contract_creation && config.is_shanghai_activated(header.timestamp) { + if is_contract_creation && config.is_fork_activated(Shanghai, header.timestamp) { // Len in 32 bytes sized words let len_in_words = data_len.saturating_add(31) / 32; @@ -477,8 +474,7 @@ mod tests { use crate::error::MempoolError; use crate::mempool::{ Mempool, TX_ACCESS_LIST_ADDRESS_GAS, TX_ACCESS_LIST_STORAGE_KEY_GAS, TX_CREATE_GAS_COST, - TX_DATA_NON_ZERO_GAS, TX_DATA_NON_ZERO_GAS_EIP2028, TX_DATA_ZERO_GAS_COST, TX_GAS_COST, - TX_INIT_CODE_WORD_GAS_COST, + TX_DATA_NON_ZERO_GAS, TX_DATA_ZERO_GAS_COST, TX_GAS_COST, TX_INIT_CODE_WORD_GAS_COST, }; use std::collections::HashMap; @@ -487,7 +483,7 @@ mod tests { BYTES_PER_BLOB, BlobsBundle, BlockHeader, ChainConfig, EIP1559Transaction, EIP4844Transaction, MempoolTransaction, Transaction, TxKind, }; - use ethrex_common::{Address, Bytes, H256, U256}; + use ethrex_common::{Address, Bytes, H256, U256, types::Fork::*}; use ethrex_storage::EngineType; use ethrex_storage::{Store, error::StoreError}; @@ -505,13 +501,12 @@ mod tests { Ok(store) } - fn build_basic_config_and_header( - istanbul_active: bool, - shanghai_active: bool, - ) -> (ChainConfig, BlockHeader) { + fn build_basic_config_and_header(shanghai_active: bool) -> (ChainConfig, BlockHeader) { + use ethrex_common::types::FORKS; + let mut fork_activation_timestamps: [Option; FORKS.len()] = [None; FORKS.len()]; + fork_activation_timestamps[Shanghai] = Some(if shanghai_active { 1 } else { 10 }); let config = ChainConfig { - shanghai_time: Some(if shanghai_active { 1 } else { 10 }), - istanbul_block: Some(if istanbul_active { 1 } else { 10 }), + fork_activation_timestamps, ..Default::default() }; @@ -528,7 +523,7 @@ mod tests { #[test] fn normal_transaction_intrinsic_gas() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let tx = EIP1559Transaction { nonce: 3, @@ -551,7 +546,7 @@ mod tests { #[test] fn create_transaction_intrinsic_gas() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let tx = EIP1559Transaction { nonce: 3, @@ -573,8 +568,8 @@ mod tests { } #[test] - fn transaction_intrinsic_data_gas_pre_istanbul() { - let (config, header) = build_basic_config_and_header(false, false); + fn transaction_intrinsic_data_gas() { + let (config, header) = build_basic_config_and_header(false); let tx = EIP1559Transaction { nonce: 3, @@ -595,33 +590,9 @@ mod tests { assert_eq!(intrinsic_gas, expected_gas_cost); } - #[test] - fn transaction_intrinsic_data_gas_post_istanbul() { - let (config, header) = build_basic_config_and_header(true, false); - - let tx = EIP1559Transaction { - nonce: 3, - max_priority_fee_per_gas: 0, - max_fee_per_gas: 0, - gas_limit: 100_000, - to: TxKind::Call(Address::from_low_u64_be(1)), // Normal tx - value: U256::zero(), // Value zero - data: Bytes::from(vec![0x0, 0x1, 0x1, 0x0, 0x1, 0x1]), // 6 bytes of data - access_list: Default::default(), // No access list - ..Default::default() - }; - - let tx = Transaction::EIP1559Transaction(tx); - let expected_gas_cost = - TX_GAS_COST + 2 * TX_DATA_ZERO_GAS_COST + 4 * TX_DATA_NON_ZERO_GAS_EIP2028; - let intrinsic_gas = - transaction_intrinsic_gas(&tx, &header, &config).expect("Intrinsic gas"); - assert_eq!(intrinsic_gas, expected_gas_cost); - } - #[test] fn transaction_create_intrinsic_gas_pre_shanghai() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let n_words: u64 = 10; let n_bytes: u64 = 32 * n_words - 3; // Test word rounding @@ -647,7 +618,7 @@ mod tests { #[test] fn transaction_create_intrinsic_gas_post_shanghai() { - let (config, header) = build_basic_config_and_header(false, true); + let (config, header) = build_basic_config_and_header(true); let n_words: u64 = 10; let n_bytes: u64 = 32 * n_words - 3; // Test word rounding @@ -675,7 +646,7 @@ mod tests { #[test] fn transaction_intrinsic_gas_access_list() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let access_list = vec![ (Address::zero(), vec![H256::default(); 10]), @@ -705,7 +676,7 @@ mod tests { #[tokio::test] async fn transaction_with_big_init_code_in_shanghai_fails() { - let (config, header) = build_basic_config_and_header(false, true); + let (config, header) = build_basic_config_and_header(true); let store = setup_storage(config, header).await.expect("Storage setup"); let blockchain = Blockchain::default_with_store(store); @@ -732,7 +703,7 @@ mod tests { #[tokio::test] async fn transaction_with_gas_limit_higher_than_of_the_block_should_fail() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let store = setup_storage(config, header).await.expect("Storage setup"); let blockchain = Blockchain::default_with_store(store); @@ -759,7 +730,7 @@ mod tests { #[tokio::test] async fn transaction_with_priority_fee_higher_than_gas_fee_should_fail() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let store = setup_storage(config, header).await.expect("Storage setup"); let blockchain = Blockchain::default_with_store(store); @@ -786,7 +757,7 @@ mod tests { #[tokio::test] async fn transaction_with_gas_limit_lower_than_intrinsic_gas_should_fail() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let store = setup_storage(config, header).await.expect("Storage setup"); let blockchain = Blockchain::default_with_store(store); let intrinsic_gas_cost = TX_GAS_COST; @@ -813,7 +784,7 @@ mod tests { #[tokio::test] async fn transaction_with_blob_base_fee_below_min_should_fail() { - let (config, header) = build_basic_config_and_header(false, false); + let (config, header) = build_basic_config_and_header(false); let store = setup_storage(config, header).await.expect("Storage setup"); let blockchain = Blockchain::default_with_store(store); diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index 40d0b3f7213..a37cab9421e 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -11,7 +11,9 @@ use ethrex_common::{ constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB, MAX_RLP_BLOCK_SIZE}, types::{ AccountUpdate, BlobsBundle, Block, BlockBody, BlockHash, BlockHeader, BlockNumber, - ChainConfig, MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, bloom_from_logs, + ChainConfig, + Fork::*, + MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, bloom_from_logs, calc_excess_blob_gas, calculate_base_fee_per_blob_gas, calculate_base_fee_per_gas, compute_receipts_root, compute_transactions_root, compute_withdrawals_root, requests::{EncodedRequests, compute_requests_hash}, @@ -159,17 +161,17 @@ pub fn create_payload( args.elasticity_multiplier, ), withdrawals_root: chain_config - .is_shanghai_activated(args.timestamp) + .is_fork_activated(Shanghai, args.timestamp) .then_some(compute_withdrawals_root( args.withdrawals.as_ref().unwrap_or(&Vec::new()), )), blob_gas_used: chain_config - .is_cancun_activated(args.timestamp) + .is_fork_activated(Cancun, args.timestamp) .then_some(0), excess_blob_gas, parent_beacon_block_root: args.beacon_root, requests_hash: chain_config - .is_prague_activated(args.timestamp) + .is_fork_activated(Prague, args.timestamp) .then_some(*DEFAULT_REQUESTS_HASH), ..Default::default() }; @@ -247,7 +249,7 @@ impl PayloadBuildContext { remaining_gas: payload.header.gas_limit, receipts: vec![], requests: config - .is_prague_activated(payload.header.timestamp) + .is_fork_activated(Prague, payload.header.timestamp) .then_some(Vec::new()), block_value: U256::zero(), base_fee_per_blob_gas, @@ -543,7 +545,7 @@ impl Blockchain { context.payload_size + head_tx.encode_canonical_to_vec().len() as u64; if context .chain_config() - .is_osaka_activated(context.payload.header.timestamp) + .is_fork_activated(Osaka, context.payload.header.timestamp) && potential_rlp_block_size > MAX_RLP_BLOCK_SIZE { break; @@ -553,16 +555,6 @@ impl Blockchain { // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); - // Check whether the tx is replay-protected - if head_tx.tx.protected() && !chain_config.is_eip155_activated(context.block_number()) { - // Ignore replay protected tx & all txs from the sender - // Pull transaction from the mempool - debug!("Ignoring replay-protected transaction: {}", tx_hash); - txs.pop(); - self.remove_transaction_from_pool(&tx_hash)?; - continue; - } - // Execute tx let receipt = match self.apply_transaction(&head_tx, context) { Ok(receipt) => { @@ -636,7 +628,7 @@ impl Blockchain { pub fn extract_requests(&self, context: &mut PayloadBuildContext) -> Result<(), EvmError> { if !context .chain_config() - .is_prague_activated(context.payload.header.timestamp) + .is_fork_activated(Prague, context.payload.header.timestamp) { return Ok(()); }; diff --git a/crates/common/config/networks.rs b/crates/common/config/networks.rs index 03cd11e0af3..8aaaf33d573 100644 --- a/crates/common/config/networks.rs +++ b/crates/common/config/networks.rs @@ -4,7 +4,7 @@ use std::{ path::PathBuf, }; -use ethrex_common::types::{ChainConfig, Genesis, GenesisError}; +use ethrex_common::types::{ChainConfig, FORKS, Fork::Prague, Genesis, GenesisError}; use serde::{Deserialize, Serialize}; //TODO: Look for a better place to move these files @@ -117,14 +117,19 @@ impl Network { } Network::LocalDevnet => Ok(serde_json::from_str(LOCAL_DEVNET_GENESIS_CONTENTS)?), Network::LocalDevnetL2 => Ok(serde_json::from_str(LOCAL_DEVNETL2_GENESIS_CONTENTS)?), - Network::L2Chain(chain_id) => Ok(Genesis { - config: ChainConfig { - chain_id: *chain_id, - prague_time: Some(0), + Network::L2Chain(chain_id) => { + let mut fork_activation_timestamps: [Option; FORKS.len()] = + [None; FORKS.len()]; + fork_activation_timestamps[Prague] = Some(0); + Ok(Genesis { + config: ChainConfig { + chain_id: *chain_id, + fork_activation_timestamps, + ..Default::default() + }, ..Default::default() - }, - ..Default::default() - }), + }) + } Network::GenesisPath(s) => Genesis::try_from(s.as_path()), } } diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index 0634a3095e7..abc385680df 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -948,13 +948,13 @@ mod test { base_fee_per_gas: Some(30), ..Default::default() }; + let fork = Fork::Osaka; let schedule = ForkBlobSchedule { + fork, target: 9, max: 14, base_fee_update_fraction: 8832827, }; - let fork = Fork::Osaka; - let res = calc_excess_blob_gas(&parent, schedule, fork); assert_eq!(res, 5617366) } @@ -967,7 +967,9 @@ mod test { base_fee_per_gas: Some(50), ..Default::default() }; + let fork = Fork::Osaka; let schedule = ForkBlobSchedule { + fork, target: 21, max: 32, base_fee_update_fraction: 20609697, @@ -985,12 +987,14 @@ mod test { base_fee_per_gas: Some(0x11), ..Default::default() }; + let fork = Fork::Osaka; + let schedule = ForkBlobSchedule { + fork, target: 9, max: 14, base_fee_update_fraction: 0x86c73b, }; - let fork = Fork::Osaka; let res = calc_excess_blob_gas(&parent, schedule, fork); assert_eq!(res, 3538944) diff --git a/crates/common/types/genesis.rs b/crates/common/types/genesis.rs index 1ceeb867144..1f50d25655f 100644 --- a/crates/common/types/genesis.rs +++ b/crates/common/types/genesis.rs @@ -8,13 +8,15 @@ use sha3::{Digest, Keccak256}; use std::{ collections::{BTreeMap, HashMap}, io::{BufReader, Error}, + ops::{Index, IndexMut}, path::Path, }; use tracing::warn; +use self::Fork::*; use super::{ - AccountState, Block, BlockBody, BlockHeader, BlockNumber, INITIAL_BASE_FEE, - compute_receipts_root, compute_transactions_root, compute_withdrawals_root, + AccountState, Block, BlockBody, BlockHeader, INITIAL_BASE_FEE, compute_receipts_root, + compute_transactions_root, compute_withdrawals_root, }; use crate::{ constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH}, @@ -71,23 +73,12 @@ impl TryFrom<&Path> for Genesis { let genesis_reader = BufReader::new(genesis_file); let genesis: Genesis = serde_json::from_reader(genesis_reader)?; - // Try to derive if the genesis file is PoS - // Different genesis files have different configurations - // TODO: Remove once we have a way to run PoW chains, i.e Snap Sync - if genesis.config.terminal_total_difficulty != Some(0) - && genesis.config.merge_netsplit_block != Some(0) - && genesis.config.shanghai_time != Some(0) - && genesis.config.cancun_time != Some(0) - && genesis.config.prague_time != Some(0) - { - // Hive has a minimalistic genesis file, which is not supported - // return Err(GenesisError::InvalidFork()); - warn!("Invalid fork, only post-merge networks are supported."); - } - - if genesis.config.bpo3_time.is_some() && genesis.config.blob_schedule.bpo3.is_none() - || genesis.config.bpo4_time.is_some() && genesis.config.blob_schedule.bpo4.is_none() - || genesis.config.bpo5_time.is_some() && genesis.config.blob_schedule.bpo5.is_none() + if genesis.config.fork_activation_timestamps[BPO3].is_some() + && genesis.config.blob_schedule[BPO3].is_none() + || genesis.config.fork_activation_timestamps[BPO4].is_some() + && genesis.config.blob_schedule[BPO4].is_none() + || genesis.config.fork_activation_timestamps[BPO5].is_some() + && genesis.config.blob_schedule[BPO5].is_none() { warn!("BPO time set but no BPO BlobSchedule found in ChainConfig") } @@ -111,89 +102,12 @@ impl TryFrom<&Path> for Genesis { )] #[serde(rename_all = "camelCase")] pub struct ForkBlobSchedule { + pub fork: Fork, pub base_fee_update_fraction: u64, pub max: u32, pub target: u32, } -#[allow(unused)] -#[derive( - Clone, Copy, Debug, Serialize, Deserialize, PartialEq, RSerialize, RDeserialize, Archive, -)] -#[serde(rename_all = "camelCase")] -pub struct BlobSchedule { - #[serde(default = "default_cancun_schedule")] - pub cancun: ForkBlobSchedule, - #[serde(default = "default_prague_schedule")] - pub prague: ForkBlobSchedule, - #[serde(default = "default_osaka_schedule")] - pub osaka: ForkBlobSchedule, - #[serde(default = "default_bpo1_schedule")] - pub bpo1: ForkBlobSchedule, - #[serde(default = "default_bpo2_schedule")] - pub bpo2: ForkBlobSchedule, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bpo3: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bpo4: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bpo5: Option, -} - -impl Default for BlobSchedule { - fn default() -> Self { - BlobSchedule { - cancun: default_cancun_schedule(), - prague: default_prague_schedule(), - osaka: default_osaka_schedule(), - bpo1: default_bpo1_schedule(), - bpo2: default_bpo2_schedule(), - bpo3: None, - bpo4: None, - bpo5: None, - } - } -} - -fn default_cancun_schedule() -> ForkBlobSchedule { - ForkBlobSchedule { - target: 3, - max: 6, - base_fee_update_fraction: 3338477, - } -} - -fn default_prague_schedule() -> ForkBlobSchedule { - ForkBlobSchedule { - target: 6, - max: 9, - base_fee_update_fraction: 5007716, - } -} - -fn default_osaka_schedule() -> ForkBlobSchedule { - ForkBlobSchedule { - target: 6, - max: 9, - base_fee_update_fraction: 5007716, - } -} - -fn default_bpo1_schedule() -> ForkBlobSchedule { - ForkBlobSchedule { - target: 10, - max: 15, - base_fee_update_fraction: 8346193, - } -} - -fn default_bpo2_schedule() -> ForkBlobSchedule { - ForkBlobSchedule { - target: 14, - max: 21, - base_fee_update_fraction: 11684671, - } -} /// Blockchain settings defined per block #[allow(unused)] #[derive( @@ -215,41 +129,13 @@ pub struct ChainConfig { /// Block numbers for the block where each fork was activated /// (None = no fork, 0 = fork is already active) - pub homestead_block: Option, - - pub dao_fork_block: Option, - /// Whether the node supports or opposes the DAO hard-fork - #[serde(default)] - pub dao_fork_support: bool, - - pub eip150_block: Option, - pub eip155_block: Option, - pub eip158_block: Option, - - pub byzantium_block: Option, - pub constantinople_block: Option, - pub petersburg_block: Option, - pub istanbul_block: Option, - pub muir_glacier_block: Option, - pub berlin_block: Option, - pub london_block: Option, - pub arrow_glacier_block: Option, - pub gray_glacier_block: Option, - pub merge_netsplit_block: Option, + /// We don't need these to check if certain forks are activated given Ethrex is post-merge, + /// but we need the numbers to calculate the ForkID on some networks (namely mainnet) + pub fork_activation_blocks: [Option; PRE_MERGE_FORKS], /// Timestamp at which each fork was activated /// (None = no fork, 0 = fork is already active) - pub shanghai_time: Option, - pub cancun_time: Option, - pub prague_time: Option, - pub verkle_time: Option, - pub osaka_time: Option, - - pub bpo1_time: Option, - pub bpo2_time: Option, - pub bpo3_time: Option, - pub bpo4_time: Option, - pub bpo5_time: Option, + pub fork_activation_timestamps: [Option; FORKS.len()], /// Amount of total difficulty reached by the network that triggers the consensus upgrade. pub terminal_total_difficulty: Option, @@ -257,7 +143,7 @@ pub struct ChainConfig { #[serde(default)] pub terminal_total_difficulty_passed: bool, #[serde(default)] - pub blob_schedule: BlobSchedule, + pub blob_schedule: [Option; FORKS.len() - Cancun as usize], #[rkyv(with = rkyv_utils::H160Wrapper)] // Deposits system contract address pub deposit_contract_address: Address, @@ -280,54 +166,57 @@ lazy_static::lazy_static! { } #[repr(u8)] -#[derive(Debug, PartialEq, Eq, PartialOrd, Default, Hash, Clone, Copy, Serialize, Deserialize)] +#[derive( + Debug, + PartialEq, + Eq, + PartialOrd, + Default, + Hash, + Clone, + Copy, + Serialize, + Deserialize, + RSerialize, + RDeserialize, + Archive, +)] pub enum Fork { - Frontier = 0, - FrontierThawing = 1, - Homestead = 2, - DaoFork = 3, - Tangerine = 4, - SpuriousDragon = 5, - Byzantium = 6, - Constantinople = 7, - Petersburg = 8, - Istanbul = 9, - MuirGlacier = 10, - Berlin = 11, - London = 12, - ArrowGlacier = 13, - GrayGlacier = 14, - Paris = 15, - Shanghai = 16, + Paris = 0, + Shanghai = 1, #[default] - Cancun = 17, - Prague = 18, - Osaka = 19, - BPO1 = 20, - BPO2 = 21, - BPO3 = 22, - BPO4 = 23, - BPO5 = 24, + Cancun = 2, + Prague = 3, + Osaka = 4, + BPO1 = 5, + BPO2 = 6, + BPO3 = 7, + BPO4 = 8, + BPO5 = 9, +} + +impl Index for [T] { + type Output = T; + fn index(&self, fork: Fork) -> &Self::Output { + &self[fork as usize] + } } +impl IndexMut for [T] { + fn index_mut(&mut self, fork: Fork) -> &mut Self::Output { + &mut self[fork as usize] + } +} + +pub const FORKS: [Fork; 10] = [ + Paris, Shanghai, Cancun, Prague, Osaka, BPO1, BPO2, BPO3, BPO4, BPO5, +]; + +pub const PRE_MERGE_FORKS: usize = 15; + impl From for &str { fn from(fork: Fork) -> Self { match fork { - Fork::Frontier => "Frontier", - Fork::FrontierThawing => "FrontierThawing", - Fork::Homestead => "Homestead", - Fork::DaoFork => "DaoFork", - Fork::Tangerine => "Tangerine", - Fork::SpuriousDragon => "SpuriousDragon", - Fork::Byzantium => "Byzantium", - Fork::Constantinople => "Constantinople", - Fork::Petersburg => "Petersburg", - Fork::Istanbul => "Istanbul", - Fork::MuirGlacier => "MuirGlacier", - Fork::Berlin => "Berlin", - Fork::London => "London", - Fork::ArrowGlacier => "ArrowGlacier", - Fork::GrayGlacier => "GrayGlacier", Fork::Paris => "Paris", Fork::Shanghai => "Shanghai", Fork::Cancun => "Cancun", @@ -343,53 +232,9 @@ impl From for &str { } impl ChainConfig { - pub fn is_bpo1_activated(&self, block_timestamp: u64) -> bool { - self.bpo1_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_bpo2_activated(&self, block_timestamp: u64) -> bool { - self.bpo2_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_bpo3_activated(&self, block_timestamp: u64) -> bool { - self.bpo3_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_bpo4_activated(&self, block_timestamp: u64) -> bool { - self.bpo4_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_bpo5_activated(&self, block_timestamp: u64) -> bool { - self.bpo5_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_osaka_activated(&self, block_timestamp: u64) -> bool { - self.osaka_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_prague_activated(&self, block_timestamp: u64) -> bool { - self.prague_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_shanghai_activated(&self, block_timestamp: u64) -> bool { - self.shanghai_time - .is_some_and(|time| time <= block_timestamp) - } - - pub fn is_cancun_activated(&self, block_timestamp: u64) -> bool { - self.cancun_time.is_some_and(|time| time <= block_timestamp) - } - - pub fn is_istanbul_activated(&self, block_number: BlockNumber) -> bool { - self.istanbul_block.is_some_and(|num| num <= block_number) - } - - pub fn is_london_activated(&self, block_number: BlockNumber) -> bool { - self.london_block.is_some_and(|num| num <= block_number) - } - - pub fn is_eip155_activated(&self, block_number: BlockNumber) -> bool { - self.eip155_block.is_some_and(|num| num <= block_number) + pub fn is_fork_activated(&self, fork: Fork, block_timestamp: u64) -> bool { + self.fork_activation_timestamps[fork] + .is_some_and(|activation_time| block_timestamp >= activation_time) } pub fn display_config(&self) -> String { @@ -397,11 +242,10 @@ impl ChainConfig { let mut output = format!("Chain ID: {} ({})\n\n", self.chain_id, network); let post_merge_forks = [ - ("Shanghai", self.shanghai_time), - ("Cancun", self.cancun_time), - ("Prague", self.prague_time), - ("Verkle", self.verkle_time), - ("Osaka", self.osaka_time), + ("Shanghai", self.fork_activation_timestamps[Shanghai]), + ("Cancun", self.fork_activation_timestamps[Cancun]), + ("Prague", self.fork_activation_timestamps[Prague]), + ("Osaka", self.fork_activation_timestamps[Osaka]), ]; let active_forks: Vec<_> = post_merge_forks @@ -421,39 +265,21 @@ impl ChainConfig { } pub fn get_fork(&self, block_timestamp: u64) -> Fork { - if self.is_osaka_activated(block_timestamp) { - Fork::Osaka - } else if self.is_prague_activated(block_timestamp) { - Fork::Prague - } else if self.is_cancun_activated(block_timestamp) { - Fork::Cancun - } else if self.is_shanghai_activated(block_timestamp) { - Fork::Shanghai - } else { - Fork::Paris - } + let Some(index) = self + .fork_activation_timestamps + .iter() + .rposition(|possible_time| { + possible_time.is_some_and(|activation_time| activation_time <= block_timestamp) + }) + else { + return Paris; + }; + FORKS[index] } pub fn get_fork_blob_schedule(&self, block_timestamp: u64) -> Option { - if self.is_bpo5_activated(block_timestamp) { - Some(self.blob_schedule.bpo5.unwrap_or_default()) - } else if self.is_bpo4_activated(block_timestamp) { - Some(self.blob_schedule.bpo4.unwrap_or_default()) - } else if self.is_bpo3_activated(block_timestamp) { - Some(self.blob_schedule.bpo3.unwrap_or_default()) - } else if self.is_bpo2_activated(block_timestamp) { - Some(self.blob_schedule.bpo2) - } else if self.is_bpo1_activated(block_timestamp) { - Some(self.blob_schedule.bpo1) - } else if self.is_osaka_activated(block_timestamp) { - Some(self.blob_schedule.osaka) - } else if self.is_prague_activated(block_timestamp) { - Some(self.blob_schedule.prague) - } else if self.is_cancun_activated(block_timestamp) { - Some(self.blob_schedule.cancun) - } else { - None - } + let current_fork = self.get_fork(block_timestamp); + self.get_blob_schedule_for_fork(current_fork) } pub fn fork(&self, block_timestamp: u64) -> Fork { @@ -461,141 +287,60 @@ impl ChainConfig { } pub fn next_fork(&self, block_timestamp: u64) -> Option { - let next = if self.is_bpo5_activated(block_timestamp) { - None - } else if self.is_bpo4_activated(block_timestamp) && self.bpo5_time.is_some() { - Some(Fork::BPO5) - } else if self.is_bpo3_activated(block_timestamp) && self.bpo4_time.is_some() { - Some(Fork::BPO4) - } else if self.is_bpo2_activated(block_timestamp) && self.bpo3_time.is_some() { - Some(Fork::BPO3) - } else if self.is_bpo1_activated(block_timestamp) && self.bpo2_time.is_some() { - Some(Fork::BPO2) - } else if self.is_osaka_activated(block_timestamp) && self.bpo1_time.is_some() { - Some(Fork::BPO1) - } else if self.is_prague_activated(block_timestamp) && self.osaka_time.is_some() { - Some(Fork::Osaka) - } else if self.is_cancun_activated(block_timestamp) && self.prague_time.is_some() { - Some(Fork::Prague) - } else if self.is_shanghai_activated(block_timestamp) && self.cancun_time.is_some() { - Some(Fork::Cancun) - } else { - None + let Some(index) = self + .fork_activation_timestamps + .iter() + .position(|possible_time| { + possible_time.is_some_and(|activation_time| activation_time > block_timestamp) + }) + else { + return None; }; - match next { - Some(fork) if fork > self.fork(block_timestamp) => next, - _ => None, - } + Some(FORKS[index]) } pub fn get_last_scheduled_fork(&self) -> Fork { - if self.bpo5_time.is_some() { - Fork::BPO5 - } else if self.bpo4_time.is_some() { - Fork::BPO4 - } else if self.bpo3_time.is_some() { - Fork::BPO3 - } else if self.bpo2_time.is_some() { - Fork::BPO2 - } else if self.bpo1_time.is_some() { - Fork::BPO1 - } else if self.osaka_time.is_some() { - Fork::Osaka - } else if self.prague_time.is_some() { - Fork::Prague - } else if self.cancun_time.is_some() { - Fork::Cancun - } else { - Fork::Paris - } + let Some(index) = self + .fork_activation_timestamps + .iter() + .rposition(|possible_time| possible_time.is_some()) + else { + return Paris; + }; + FORKS[index] } pub fn get_activation_timestamp_for_fork(&self, fork: Fork) -> Option { - match fork { - Fork::Cancun => self.cancun_time, - Fork::Prague => self.prague_time, - Fork::Osaka => self.osaka_time, - Fork::BPO1 => self.bpo1_time, - Fork::BPO2 => self.bpo2_time, - Fork::BPO3 => self.bpo3_time, - Fork::BPO4 => self.bpo4_time, - Fork::BPO5 => self.bpo5_time, - Fork::Homestead => self.homestead_block, - Fork::DaoFork => self.dao_fork_block, - Fork::Byzantium => self.byzantium_block, - Fork::Constantinople => self.constantinople_block, - Fork::Petersburg => self.petersburg_block, - Fork::Istanbul => self.istanbul_block, - Fork::MuirGlacier => self.muir_glacier_block, - Fork::Berlin => self.berlin_block, - Fork::London => self.london_block, - Fork::ArrowGlacier => self.arrow_glacier_block, - Fork::GrayGlacier => self.gray_glacier_block, - Fork::Paris => self.merge_netsplit_block, - Fork::Shanghai => self.shanghai_time, - _ => None, - } + self.fork_activation_timestamps[fork] } pub fn get_blob_schedule_for_fork(&self, fork: Fork) -> Option { - match fork { - Fork::Cancun => Some(self.blob_schedule.cancun), - Fork::Prague => Some(self.blob_schedule.prague), - Fork::Osaka => Some(self.blob_schedule.osaka), - Fork::BPO1 => Some(self.blob_schedule.bpo1), - Fork::BPO2 => Some(self.blob_schedule.bpo2), - Fork::BPO3 => self.blob_schedule.bpo3, - Fork::BPO4 => self.blob_schedule.bpo4, - Fork::BPO5 => self.blob_schedule.bpo5, - _ => None, - } + *self.blob_schedule[0..fork as usize] + .iter() + .rfind(|option| { + option.is_some_and(|fork_blob_schedule| fork_blob_schedule.fork <= fork) + }) + .unwrap_or(&None) } pub fn gather_forks(&self, genesis_header: BlockHeader) -> (Vec, Vec) { - let mut block_number_based_forks: Vec = vec![ - self.homestead_block, - if self.dao_fork_support { - self.dao_fork_block - } else { - None - }, - self.eip150_block, - self.eip155_block, - self.eip158_block, - self.byzantium_block, - self.constantinople_block, - self.petersburg_block, - self.istanbul_block, - self.muir_glacier_block, - self.berlin_block, - self.london_block, - self.arrow_glacier_block, - self.gray_glacier_block, - self.merge_netsplit_block, - ] - .into_iter() - .flatten() - .collect(); + let mut block_number_based_forks: Vec = self + .fork_activation_blocks + .to_vec() + .into_iter() + .flatten() + .collect(); // Remove repeated values block_number_based_forks.sort(); block_number_based_forks.dedup(); - let mut timestamp_based_forks: Vec = vec![ - self.shanghai_time, - self.cancun_time, - self.prague_time, - self.osaka_time, - self.bpo1_time, - self.bpo2_time, - self.bpo3_time, - self.bpo4_time, - self.bpo5_time, - self.verkle_time, - ] - .into_iter() - .flatten() - .collect(); + let mut timestamp_based_forks: Vec = self + .fork_activation_timestamps + .to_vec() + .into_iter() + .flatten() + .collect(); // Remove repeated values timestamp_based_forks.sort(); @@ -631,31 +376,27 @@ impl Genesis { let mut blob_gas_used: Option = None; let mut excess_blob_gas: Option = None; - if let Some(cancun_time) = self.config.cancun_time + if let Some(cancun_time) = self.config.fork_activation_timestamps[Cancun] && cancun_time <= self.timestamp { blob_gas_used = Some(self.blob_gas_used.unwrap_or(0)); excess_blob_gas = Some(self.excess_blob_gas.unwrap_or(0)); } - let base_fee_per_gas = self.base_fee_per_gas.or_else(|| { - self.config - .is_london_activated(0) - .then_some(INITIAL_BASE_FEE) - }); + let base_fee_per_gas = self.base_fee_per_gas.or(Some(INITIAL_BASE_FEE)); let withdrawals_root = self .config - .is_shanghai_activated(self.timestamp) + .is_fork_activated(Shanghai, self.timestamp) .then_some(compute_withdrawals_root(&[])); let parent_beacon_block_root = self .config - .is_cancun_activated(self.timestamp) + .is_fork_activated(Cancun, self.timestamp) .then_some(H256::zero()); let requests_hash = self .config - .is_prague_activated(self.timestamp) + .is_fork_activated(Prague, self.timestamp) .then_some(self.requests_hash.unwrap_or(*DEFAULT_REQUESTS_HASH)); BlockHeader { @@ -721,42 +462,39 @@ mod tests { let reader = BufReader::new(file); let genesis: Genesis = serde_json::from_reader(reader).expect("Failed to deserialize genesis file"); + let mut blob_schedule: [Option; 8] = [None; 8]; + blob_schedule[0] = Some(ForkBlobSchedule { + fork: Cancun, + target: 2, + max: 3, + base_fee_update_fraction: 6676954, + }); + blob_schedule[1] = Some(ForkBlobSchedule { + fork: Prague, + target: 3, + max: 4, + base_fee_update_fraction: 13353908, + }); + + let mut fork_activation_timestamps: [Option; FORKS.len()] = [None; FORKS.len()]; + fork_activation_timestamps[Shanghai] = Some(0); + fork_activation_timestamps[Cancun] = Some(0); + fork_activation_timestamps[Prague] = Some(1718232101); + + let fork_activation_blocks: [Option; 15] = [None; 15]; + // Check Genesis fields // Chain config let expected_chain_config = ChainConfig { chain_id: 3151908_u64, - homestead_block: Some(0), - eip150_block: Some(0), - eip155_block: Some(0), - eip158_block: Some(0), - byzantium_block: Some(0), - constantinople_block: Some(0), - petersburg_block: Some(0), - istanbul_block: Some(0), - berlin_block: Some(0), - london_block: Some(0), - merge_netsplit_block: Some(0), - shanghai_time: Some(0), - cancun_time: Some(0), - prague_time: Some(1718232101), + fork_activation_blocks, + fork_activation_timestamps, terminal_total_difficulty: Some(0), terminal_total_difficulty_passed: true, deposit_contract_address: H160::from_str("0x4242424242424242424242424242424242424242") .unwrap(), // Note this BlobSchedule config is not the default - blob_schedule: BlobSchedule { - cancun: ForkBlobSchedule { - target: 2, - max: 3, - base_fee_update_fraction: 6676954, - }, - prague: ForkBlobSchedule { - target: 3, - max: 4, - base_fee_update_fraction: 13353908, - }, - ..Default::default() - }, + blob_schedule, ..Default::default() }; assert_eq!(&genesis.config, &expected_chain_config); @@ -918,21 +656,23 @@ mod tests { let config: ChainConfig = serde_json::from_str(json).expect("Failed to deserialize ChainConfig"); + let mut blob_schedule: [Option; 8] = [None; 8]; + blob_schedule[0] = Some(ForkBlobSchedule { + fork: Cancun, + target: 1, + max: 2, + base_fee_update_fraction: 10000, + }); + blob_schedule[1] = Some(ForkBlobSchedule { + fork: Prague, + target: 3, + max: 4, + base_fee_update_fraction: 20000, + }); + let expected_chain_config = ChainConfig { chain_id: 123, - blob_schedule: BlobSchedule { - cancun: ForkBlobSchedule { - target: 1, - max: 2, - base_fee_update_fraction: 10000, - }, - prague: ForkBlobSchedule { - target: 3, - max: 4, - base_fee_update_fraction: 20000, - }, - ..Default::default() - }, + blob_schedule, deposit_contract_address: H160::from_str("0x4242424242424242424242424242424242424242") .unwrap(), ..Default::default() @@ -951,21 +691,22 @@ mod tests { let config: ChainConfig = serde_json::from_str(json).expect("Failed to deserialize ChainConfig"); + let mut blob_schedule: [Option; 8] = [None; 8]; + blob_schedule[0] = Some(ForkBlobSchedule { + fork: Cancun, + target: 3, + max: 6, + base_fee_update_fraction: 3338477, + }); + blob_schedule[1] = Some(ForkBlobSchedule { + fork: Prague, + target: 6, + max: 9, + base_fee_update_fraction: 5007716, + }); let expected_chain_config = ChainConfig { chain_id: 123, - blob_schedule: BlobSchedule { - cancun: ForkBlobSchedule { - target: 3, - max: 6, - base_fee_update_fraction: 3338477, - }, - prague: ForkBlobSchedule { - target: 6, - max: 9, - base_fee_update_fraction: 5007716, - }, - ..Default::default() - }, + blob_schedule, deposit_contract_address: H160::from_str("0x4242424242424242424242424242424242424242") .unwrap(), ..Default::default() @@ -991,21 +732,22 @@ mod tests { let config: ChainConfig = serde_json::from_str(json).expect("Failed to deserialize ChainConfig"); + let mut blob_schedule: [Option; 8] = [None; 8]; + blob_schedule[0] = Some(ForkBlobSchedule { + fork: Cancun, + target: 3, + max: 6, + base_fee_update_fraction: 3338477, + }); + blob_schedule[1] = Some(ForkBlobSchedule { + fork: Prague, + target: 3, + max: 4, + base_fee_update_fraction: 20000, + }); let expected_chain_config = ChainConfig { chain_id: 123, - blob_schedule: BlobSchedule { - cancun: ForkBlobSchedule { - target: 3, - max: 6, - base_fee_update_fraction: 3338477, - }, - prague: ForkBlobSchedule { - target: 3, - max: 4, - base_fee_update_fraction: 20000, - }, - ..Default::default() - }, + blob_schedule, deposit_contract_address: H160::from_str("0x4242424242424242424242424242424242424242") .unwrap(), ..Default::default() @@ -1031,21 +773,22 @@ mod tests { let config: ChainConfig = serde_json::from_str(json).expect("Failed to deserialize ChainConfig"); + let mut blob_schedule: [Option; 8] = [None; 8]; + blob_schedule[0] = Some(ForkBlobSchedule { + fork: Cancun, + target: 1, + max: 2, + base_fee_update_fraction: 10000, + }); + blob_schedule[1] = Some(ForkBlobSchedule { + fork: Prague, + target: 6, + max: 9, + base_fee_update_fraction: 5007716, + }); let expected_chain_config = ChainConfig { chain_id: 123, - blob_schedule: BlobSchedule { - cancun: ForkBlobSchedule { - target: 1, - max: 2, - base_fee_update_fraction: 10000, - }, - prague: ForkBlobSchedule { - target: 6, - max: 9, - base_fee_update_fraction: 5007716, - }, - ..Default::default() - }, + blob_schedule, deposit_contract_address: H160::from_str("0x4242424242424242424242424242424242424242") .unwrap(), ..Default::default() diff --git a/crates/l2/sequencer/block_producer/payload_builder.rs b/crates/l2/sequencer/block_producer/payload_builder.rs index 32eb17d8591..79854f46b65 100644 --- a/crates/l2/sequencer/block_producer/payload_builder.rs +++ b/crates/l2/sequencer/block_producer/payload_builder.rs @@ -114,8 +114,6 @@ pub async fn fill_transactions( let safe_bytes_per_blob: u64 = SAFE_BYTES_PER_BLOB.try_into()?; let mut privileged_tx_count = 0; - let chain_config = store.get_chain_config(); - debug!("Fetching transactions from mempool"); // Fetch mempool transactions let latest_block_number = store.get_latest_block_number().await?; @@ -167,16 +165,6 @@ pub async fn fill_transactions( // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); - // Check whether the tx is replay-protected - if head_tx.tx.protected() && !chain_config.is_eip155_activated(context.block_number()) { - // Ignore replay protected tx & all txs from the sender - // Pull transaction from the mempool - debug!("Ignoring replay-protected transaction: {}", tx_hash); - txs.pop(); - blockchain.remove_transaction_from_pool(&tx_hash)?; - continue; - } - let maybe_sender_acc_info = store .get_account_info(latest_block_number, head_tx.tx.sender()) .await?; diff --git a/crates/networking/rpc/engine/blobs.rs b/crates/networking/rpc/engine/blobs.rs index 7a1c68408ac..532807f8c3d 100644 --- a/crates/networking/rpc/engine/blobs.rs +++ b/crates/networking/rpc/engine/blobs.rs @@ -1,7 +1,9 @@ use ethrex_common::{ H256, serde_utils::{self}, - types::{Blob, CELLS_PER_EXT_BLOB, Proof, blobs_bundle::kzg_commitment_to_versioned_hash}, + types::{ + Blob, CELLS_PER_EXT_BLOB, Fork::*, Proof, blobs_bundle::kzg_commitment_to_versioned_hash, + }, }; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -120,7 +122,7 @@ impl RpcHandler for BlobsV2Request { && !context .storage .get_chain_config() - .is_osaka_activated(current_block_header.timestamp) + .is_fork_activated(Osaka, current_block_header.timestamp) { // validation requested in https://github.com/ethereum/execution-apis/blob/a1d95fb555cd91efb3e0d6555e4ab556d9f5dd06/src/engine/osaka.md?plain=1#L130 return Err(RpcErr::UnsuportedFork( diff --git a/crates/networking/rpc/engine/fork_choice.rs b/crates/networking/rpc/engine/fork_choice.rs index f1e74bb4fa6..c76832fa3af 100644 --- a/crates/networking/rpc/engine/fork_choice.rs +++ b/crates/networking/rpc/engine/fork_choice.rs @@ -3,7 +3,7 @@ use ethrex_blockchain::{ fork_choice::apply_fork_choice, payload::{BuildPayloadArgs, create_payload}, }; -use ethrex_common::types::{BlockHeader, ELASTICITY_MULTIPLIER}; +use ethrex_common::types::{BlockHeader, ELASTICITY_MULTIPLIER, Fork::*}; use ethrex_p2p::sync::SyncMode; use serde_json::Value; use tracing::{info, warn}; @@ -38,7 +38,7 @@ impl RpcHandler for ForkChoiceUpdatedV1 { handle_forkchoice(&self.fork_choice_state, context.clone(), 1).await?; if let (Some(head_block), Some(attributes)) = (head_block_opt, &self.payload_attributes) { let chain_config = context.storage.get_chain_config(); - if chain_config.is_cancun_activated(attributes.timestamp) { + if chain_config.is_fork_activated(Cancun, attributes.timestamp) { return Err(RpcErr::UnsuportedFork( "forkChoiceV1 used to build Cancun payload".to_string(), )); @@ -71,11 +71,11 @@ impl RpcHandler for ForkChoiceUpdatedV2 { handle_forkchoice(&self.fork_choice_state, context.clone(), 2).await?; if let (Some(head_block), Some(attributes)) = (head_block_opt, &self.payload_attributes) { let chain_config = context.storage.get_chain_config(); - if chain_config.is_cancun_activated(attributes.timestamp) { + if chain_config.is_fork_activated(Cancun, attributes.timestamp) { return Err(RpcErr::UnsuportedFork( "forkChoiceV2 used to build Cancun payload".to_string(), )); - } else if chain_config.is_shanghai_activated(attributes.timestamp) { + } else if chain_config.is_fork_activated(Shanghai, attributes.timestamp) { validate_attributes_v2(attributes, &head_block)?; } else { // Behave as a v1 @@ -352,7 +352,7 @@ fn validate_attributes_v3( "Attribute parent_beacon_block_root is null".to_string(), )); } - if !chain_config.is_cancun_activated(attributes.timestamp) { + if !chain_config.is_fork_activated(Cancun, attributes.timestamp) { return Err(RpcErr::UnsuportedFork( "forkChoiceV3 used to build pre-Cancun payload".to_string(), )); diff --git a/crates/networking/rpc/engine/payload.rs b/crates/networking/rpc/engine/payload.rs index a99ef9b9cc4..633698d79c5 100644 --- a/crates/networking/rpc/engine/payload.rs +++ b/crates/networking/rpc/engine/payload.rs @@ -2,7 +2,7 @@ use ethrex_blockchain::error::ChainError; use ethrex_blockchain::payload::PayloadBuildResult; use ethrex_common::types::payload::PayloadBundle; use ethrex_common::types::requests::{EncodedRequests, compute_requests_hash}; -use ethrex_common::types::{Block, BlockBody, BlockHash, BlockNumber, Fork}; +use ethrex_common::types::{Block, BlockBody, BlockHash, BlockNumber, Fork, Fork::*}; use ethrex_common::{H256, U256}; use ethrex_p2p::sync::SyncMode; use ethrex_rlp::error::RLPDecodeError; @@ -62,7 +62,7 @@ impl RpcHandler for NewPayloadV2Request { async fn handle(&self, context: RpcApiContext) -> Result { let chain_config = &context.storage.get_chain_config(); - if chain_config.is_shanghai_activated(self.payload.timestamp) { + if chain_config.is_fork_activated(Shanghai, self.payload.timestamp) { validate_execution_payload_v2(&self.payload)?; } else { // Behave as a v1 @@ -206,7 +206,7 @@ impl RpcHandler for NewPayloadV4Request { let chain_config = context.storage.get_chain_config(); - if !chain_config.is_prague_activated(block.header.timestamp) { + if !chain_config.is_fork_activated(Prague, block.header.timestamp) { return Err(RpcErr::UnsuportedFork(format!( "{:?}", chain_config.get_fork(block.header.timestamp) @@ -334,13 +334,13 @@ impl RpcHandler for GetPayloadV4Request { let payload_bundle = get_payload(self.payload_id, &context).await?; let chain_config = &context.storage.get_chain_config(); - if !chain_config.is_prague_activated(payload_bundle.block.header.timestamp) { + if !chain_config.is_fork_activated(Prague, payload_bundle.block.header.timestamp) { return Err(RpcErr::UnsuportedFork(format!( "{:?}", chain_config.get_fork(payload_bundle.block.header.timestamp) ))); } - if chain_config.is_osaka_activated(payload_bundle.block.header.timestamp) { + if chain_config.is_fork_activated(Osaka, payload_bundle.block.header.timestamp) { return Err(RpcErr::UnsuportedFork(format!("{:?}", Fork::Osaka))); } @@ -386,7 +386,7 @@ impl RpcHandler for GetPayloadV5Request { let payload_bundle = get_payload(self.payload_id, &context).await?; let chain_config = &context.storage.get_chain_config(); - if !chain_config.is_osaka_activated(payload_bundle.block.header.timestamp) { + if !chain_config.is_fork_activated(Osaka, payload_bundle.block.header.timestamp) { return Err(RpcErr::UnsuportedFork(format!( "{:?}", chain_config.get_fork(payload_bundle.block.header.timestamp) @@ -541,7 +541,7 @@ fn validate_execution_payload_v3(payload: &ExecutionPayload) -> Result<(), RpcEr fn validate_payload_v1_v2(block: &Block, context: &RpcApiContext) -> Result<(), RpcErr> { let chain_config = &context.storage.get_chain_config(); - if chain_config.is_cancun_activated(block.header.timestamp) { + if chain_config.is_fork_activated(Cancun, block.header.timestamp) { return Err(RpcErr::UnsuportedFork( "Cancun payload received".to_string(), )); diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index 4db2a42df10..c07c61ad8dd 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -610,7 +610,7 @@ mod tests { use crate::utils::test_utils::default_context_with_storage; use ethrex_common::{ H160, - types::{ChainConfig, Genesis}, + types::{ChainConfig, FORKS, Fork::*, Genesis, PRE_MERGE_FORKS}, }; use ethrex_storage::{EngineType, Store}; use sha3::{Digest, Keccak256}; @@ -735,22 +735,17 @@ mod tests { } fn example_chain_config() -> ChainConfig { + let mut fork_activation_timestamps: [Option; FORKS.len()] = [None; FORKS.len()]; + fork_activation_timestamps[Paris] = Some(0); + fork_activation_timestamps[Shanghai] = Some(0); + fork_activation_timestamps[Cancun] = Some(0); + fork_activation_timestamps[Prague] = Some(1718232101); + + let fork_activation_blocks: [Option; PRE_MERGE_FORKS] = [Some(0); PRE_MERGE_FORKS]; ChainConfig { chain_id: 3151908_u64, - homestead_block: Some(0), - eip150_block: Some(0), - eip155_block: Some(0), - eip158_block: Some(0), - byzantium_block: Some(0), - constantinople_block: Some(0), - petersburg_block: Some(0), - istanbul_block: Some(0), - berlin_block: Some(0), - london_block: Some(0), - merge_netsplit_block: Some(0), - shanghai_time: Some(0), - cancun_time: Some(0), - prague_time: Some(1718232101), + fork_activation_blocks, + fork_activation_timestamps, terminal_total_difficulty: Some(0), terminal_total_difficulty_passed: true, deposit_contract_address: H160::from_str("0x00000000219ab540356cbb839cbe05303d7705fa") diff --git a/crates/storage/store.rs b/crates/storage/store.rs index 75c97ce1073..503e40a8c76 100644 --- a/crates/storage/store.rs +++ b/crates/storage/store.rs @@ -1462,7 +1462,7 @@ mod tests { use ethrex_common::{ Bloom, H160, constants::EMPTY_KECCACK_HASH, - types::{Transaction, TxType}, + types::{FORKS, Fork::*, PRE_MERGE_FORKS, Transaction, TxType}, }; use ethrex_rlp::decode::RLPDecode; use std::{fs, str::FromStr}; @@ -1816,22 +1816,18 @@ mod tests { } fn example_chain_config() -> ChainConfig { + let mut fork_activation_timestamps: [Option; FORKS.len()] = [None; FORKS.len()]; + fork_activation_timestamps[Paris] = Some(0); + fork_activation_timestamps[Shanghai] = Some(0); + fork_activation_timestamps[Cancun] = Some(0); + fork_activation_timestamps[Prague] = Some(1718232101); + + let fork_activation_blocks: [Option; PRE_MERGE_FORKS] = [Some(0); PRE_MERGE_FORKS]; + ChainConfig { chain_id: 3151908_u64, - homestead_block: Some(0), - eip150_block: Some(0), - eip155_block: Some(0), - eip158_block: Some(0), - byzantium_block: Some(0), - constantinople_block: Some(0), - petersburg_block: Some(0), - istanbul_block: Some(0), - berlin_block: Some(0), - london_block: Some(0), - merge_netsplit_block: Some(0), - shanghai_time: Some(0), - cancun_time: Some(0), - prague_time: Some(1718232101), + fork_activation_blocks, + fork_activation_timestamps, terminal_total_difficulty: Some(58750000000000000000000), terminal_total_difficulty_passed: true, deposit_contract_address: H160::from_str("0x4242424242424242424242424242424242424242") diff --git a/crates/vm/levm/src/environment.rs b/crates/vm/levm/src/environment.rs index 0a7d1c81cb2..94a564d0322 100644 --- a/crates/vm/levm/src/environment.rs +++ b/crates/vm/levm/src/environment.rs @@ -84,6 +84,7 @@ impl EVMConfig { let base_fee_update_fraction: u64 = Self::get_blob_base_fee_update_fraction_value(fork); ForkBlobSchedule { + fork, target, max: max_blobs_per_block, base_fee_update_fraction,