diff --git a/src/ethereum_test_base_types/composite_types.py b/src/ethereum_test_base_types/composite_types.py index 035bfb2a88e..0a2a68a90ef 100644 --- a/src/ethereum_test_base_types/composite_types.py +++ b/src/ethereum_test_base_types/composite_types.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from typing import Any, ClassVar, Dict, List, SupportsBytes, Type, TypeAlias -from pydantic import Field, PrivateAttr, TypeAdapter +from pydantic import BaseModel, Field, PrivateAttr, TypeAdapter from .base_types import Address, Bytes, Hash, HashInt, HexNumber, ZeroPaddedHexNumber from .conversions import BytesConvertible, NumberConvertible @@ -487,7 +487,7 @@ class ForkBlobSchedule(CamelModel): class BlobSchedule(EthereumTestRootModel[Dict[str, ForkBlobSchedule]]): - """Blob schedule configuration dictionary.""" + """Blob schedule configuration dictionary. Key is fork name.""" root: Dict[str, ForkBlobSchedule] = Field(default_factory=dict, validate_default=True) @@ -503,6 +503,51 @@ def last(self) -> ForkBlobSchedule | None: return None return list(self.root.values())[-1] - def __getitem__(self, key: str) -> ForkBlobSchedule: + def __getitem__(self, key: str) -> ForkBlobSchedule | None: """Return the schedule for a given fork.""" - return self.root[key] + return self.root.get(key) + + +class TimestampBlobSchedule(BaseModel): + """ + Contains a list of dictionaries. Each dictionary is a scheduled BPO fork. + Each dictionary's key is the activation timestamp and the values are a ForkBlobSchedule + object with the fields max, target and base_fee_update_fraction. + """ + + # never directly modify root, instead use `add_schedule()` + root: List[Dict[int, ForkBlobSchedule]] = Field(default_factory=list, validate_default=True) + + def add_schedule(self, activation_timestamp: int, schedule: ForkBlobSchedule): + """Add a schedule to the schedule list.""" + assert activation_timestamp > -1 + assert schedule.max_blobs_per_block > 0 + assert schedule.base_fee_update_fraction > 0 + assert schedule.target_blobs_per_block > 0 + + if self.root is None: + self.root = [] + + # ensure that the timestamp of each scheduled bpo fork is unique + existing_keys: set = set() + for d in self.root: + existing_keys.update(d.keys()) + assert activation_timestamp not in existing_keys, ( + f"No duplicate activation forks allowed: Timestamp {activation_timestamp} already " + "exists in the current schedule." + ) + + # add a scheduled bpo fork + self.root.append({activation_timestamp: schedule}) + + # sort list ascending by dict keys (timestamp) + self.root = sorted(self.root, key=lambda d: list(d.keys())[0]) + + # sanity check to ensure that timestamps are at least 3 time units apart (relevant for bpo test logic) # noqa: E501 + prev_time: int = 0 + for i in self.root: + cur_time = next(iter(i)) # get key of only key-value pair in dict + assert cur_time - prev_time >= 3, "The timestamp difference of the keys of scheduled " + "bpo_forks need to have a difference of at least 3! But you tried to append the " + f"dictionary {dict({activation_timestamp: schedule})} which would lead " # noqa: C418 + f"to a timestamp difference of just {cur_time - prev_time} compared to prev element" diff --git a/src/ethereum_test_base_types/conversions.py b/src/ethereum_test_base_types/conversions.py index e99743ba241..6c7c7008730 100644 --- a/src/ethereum_test_base_types/conversions.py +++ b/src/ethereum_test_base_types/conversions.py @@ -100,4 +100,8 @@ def to_number(input_number: NumberConvertible) -> int: return int(input_number, 0) if isinstance(input_number, bytes) or isinstance(input_number, SupportsBytes): return int.from_bytes(input_number, byteorder="big") - raise Exception("invalid type for `number`") + + raise Exception( + f"Invalid type for `number`. Got {type(input_number)} but expected int, str or bytes!\n" + f"Value of `number` you passed: {input_number}" + ) diff --git a/src/ethereum_test_specs/blockchain.py b/src/ethereum_test_specs/blockchain.py index d896a2509ac..927057da729 100644 --- a/src/ethereum_test_specs/blockchain.py +++ b/src/ethereum_test_specs/blockchain.py @@ -1,13 +1,13 @@ """Ethereum blockchain test spec definition and filler.""" import warnings -from pprint import pprint from typing import Any, Callable, ClassVar, Dict, Generator, List, Optional, Sequence, Tuple, Type import pytest from pydantic import ConfigDict, Field, field_validator from ethereum_clis import BlockExceptionWithMessage, Result, TransitionTool +from ethereum_clis.types import TransitionToolOutput from ethereum_test_base_types import ( Address, Bloom, @@ -18,6 +18,7 @@ HexNumber, Number, ) +from ethereum_test_base_types.composite_types import BlobSchedule, TimestampBlobSchedule from ethereum_test_exceptions import ( BlockException, EngineAPIError, @@ -52,11 +53,14 @@ from ethereum_test_fixtures.common import FixtureBlobSchedule from ethereum_test_forks import Fork from ethereum_test_types import Alloc, Environment, Removable, Requests, Transaction, Withdrawal +from pytest_plugins.logging import get_logger from .base import BaseTest, verify_result from .debugging import print_traces from .helpers import verify_block, verify_transactions +logger = get_logger(__name__) + def environment_from_parent_header(parent: "FixtureHeader") -> "Environment": """Instantiate new environment with the provided header as parent.""" @@ -407,6 +411,7 @@ class BlockchainTest(BaseTest): verify_sync: bool = False chain_id: int = 1 exclude_full_post_state_in_output: bool = False + bpo_schedule: TimestampBlobSchedule = None # type: ignore[assignment] """ Exclude the post state from the fixture output. In this case, the state verification is only performed based on the state root. @@ -489,21 +494,24 @@ def generate_block_data( block: Block, previous_env: Environment, previous_alloc: Alloc, + bpo_schedule: TimestampBlobSchedule | None, ) -> BuiltBlock: """Generate common block data for both make_fixture and make_hive_fixture.""" env = block.set_environment(previous_env) env = env.set_fork_requirements(fork) txs: List[Transaction] = [] + + # check if any tests are gas-heavy for tx in block.txs: if not self.is_tx_gas_heavy_test() and tx.gas_limit >= Environment().gas_limit: warnings.warn( f"{self.node_id()} uses a high Transaction gas_limit: {tx.gas_limit}", stacklevel=2, ) - txs.append(tx.with_signature_and_sender()) + # ensure exception test comes at the end, if it exists at all if failing_tx_count := len([tx for tx in txs if tx.error]) > 0: if failing_tx_count > 1: raise Exception( @@ -515,7 +523,16 @@ def generate_block_data( + "must be the last transaction in the block" ) - transition_tool_output = t8n.evaluate( + # if a bpo schedule has been passed let it overwrite the blob_schedule + fork_blob_schedule: BlobSchedule | None = fork.blob_schedule() + assert fork_blob_schedule is not None + if bpo_schedule is not None: + # fork_blob_schedule = bpo_schedule + pass + # TODO: + + # get transition tool response + transition_tool_output: TransitionToolOutput = t8n.evaluate( transition_tool_data=TransitionTool.TransitionToolData( alloc=previous_alloc, txs=txs, @@ -523,7 +540,7 @@ def generate_block_data( fork=fork, chain_id=self.chain_id, reward=fork.get_reward(env.number, env.timestamp), - blob_schedule=fork.blob_schedule(), + blob_schedule=fork_blob_schedule, ), debug_output_path=self.get_next_transition_tool_output_path(), slow_request=self.is_tx_gas_heavy_test(), @@ -617,9 +634,13 @@ def generate_block_data( verify_result(transition_tool_output.result, env) except Exception as e: print_traces(t8n.get_traces()) - pprint(transition_tool_output.result) - pprint(previous_alloc) - pprint(transition_tool_output.alloc) + + # only spam the cmd with t8n if debug logging is explicitly activated + logger.debug( + f"T8n output: {transition_tool_output.result}\n" + f"Previous alloc: {previous_alloc}\n" + f"T8n alloc: {transition_tool_output.alloc}" + ) raise e if len(rejected_txs) > 0 and block.exception is None: @@ -646,11 +667,16 @@ def verify_post_state(self, t8n, t8n_state: Alloc, expected_state: Alloc | None raise e def make_fixture( - self, - t8n: TransitionTool, - fork: Fork, + self, t8n: TransitionTool, fork: Fork, bpo_schedule: TimestampBlobSchedule | None ) -> BlockchainFixture: """Create a fixture from the blockchain test definition.""" + fork_blob_schedule: BlobSchedule | None = fork.blob_schedule() + assert fork_blob_schedule is not None + if bpo_schedule is not None: + # fork_blob_schedule = bpo_schedule + pass + # TODO: + fixture_blocks: List[FixtureBlock | InvalidFixtureBlock] = [] pre, genesis = self.make_genesis(fork=fork, apply_pre_allocation_blockchain=True) @@ -669,6 +695,7 @@ def make_fixture( block=block, previous_env=env, previous_alloc=alloc, + bpo_schedule=bpo_schedule, ) fixture_blocks.append(built_block.get_fixture_block()) if block.exception is None: @@ -696,7 +723,7 @@ def make_fixture( post_state_hash=alloc.state_root() if self.exclude_full_post_state_in_output else None, config=FixtureConfig( fork=fork, - blob_schedule=FixtureBlobSchedule.from_blob_schedule(fork.blob_schedule()), + blob_schedule=FixtureBlobSchedule.from_blob_schedule(fork_blob_schedule), chain_id=self.chain_id, ), ) @@ -705,9 +732,17 @@ def make_hive_fixture( self, t8n: TransitionTool, fork: Fork, + bpo_schedule: TimestampBlobSchedule | None, fixture_format: FixtureFormat = BlockchainEngineFixture, ) -> BlockchainEngineFixture | BlockchainEngineXFixture: """Create a hive fixture from the blocktest definition.""" + fork_blob_schedule: BlobSchedule | None = fork.blob_schedule() + assert fork_blob_schedule is not None + if bpo_schedule is not None: + # fork_blob_schedule = bpo_schedule + pass + # TODO: + fixture_payloads: List[FixtureEngineNewPayload] = [] pre, genesis = self.make_genesis( @@ -725,6 +760,7 @@ def make_hive_fixture( block=block, previous_env=env, previous_alloc=alloc, + bpo_schedule=bpo_schedule, ) fixture_payloads.append(built_block.get_fixture_engine_new_payload()) if block.exception is None: @@ -765,6 +801,7 @@ def make_hive_fixture( block=Block(), previous_env=env, previous_alloc=alloc, + bpo_schedule=bpo_schedule, ) sync_payload = sync_built_block.get_fixture_engine_new_payload() @@ -779,7 +816,7 @@ def make_hive_fixture( "config": FixtureConfig( fork=fork, chain_id=self.chain_id, - blob_schedule=FixtureBlobSchedule.from_blob_schedule(fork.blob_schedule()), + blob_schedule=FixtureBlobSchedule.from_blob_schedule(fork_blob_schedule), ), } @@ -813,13 +850,16 @@ def generate( t8n: TransitionTool, fork: Fork, fixture_format: FixtureFormat, + bpo_schedule: TimestampBlobSchedule | None = None, ) -> BaseFixture: """Generate the BlockchainTest fixture.""" t8n.reset_traces() if fixture_format in [BlockchainEngineFixture, BlockchainEngineXFixture]: - return self.make_hive_fixture(t8n, fork, fixture_format) + return self.make_hive_fixture( + t8n=t8n, fork=fork, bpo_schedule=bpo_schedule, fixture_format=fixture_format + ) elif fixture_format == BlockchainFixture: - return self.make_fixture(t8n, fork) + return self.make_fixture(t8n=t8n, fork=fork, bpo_schedule=bpo_schedule) raise Exception(f"Unknown fixture format: {fixture_format}") diff --git a/src/ethereum_test_types/blob_bpo_config.json b/src/ethereum_test_types/blob_bpo_config.json new file mode 100644 index 00000000000..6d311a6bf3e --- /dev/null +++ b/src/ethereum_test_types/blob_bpo_config.json @@ -0,0 +1,37 @@ +{ + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + }, + "osaka": { + "target": 6, + "max": 9, + "maxBlobsPerTx": 6, + "baseFeeUpdateFraction": 5007716 + }, + "bpo1": { + "target": 12, + "max": 16, + "maxBlobsPerTx": 12, + "baseFeeUpdateFraction": 5007716 + }, + "bpo2": { + "target": 16, + "max": 24, + "maxBlobsPerTx": 12, + "baseFeeUpdateFraction": 5007716 + } + }, +"cancunTime": 0, +"pragueTime": 0, +"osakaTime": 1747387400, +"bpo1Time": 1757387400, +"bpo2Time": 1767387784 +} \ No newline at end of file diff --git a/src/ethereum_test_types/blob_types.py b/src/ethereum_test_types/blob_types.py index 4fb9f3adc1b..ca7e13afc31 100644 --- a/src/ethereum_test_types/blob_types.py +++ b/src/ethereum_test_types/blob_types.py @@ -26,12 +26,19 @@ def clear_blob_cache(cached_blobs_folder_path: Path): """Delete all cached blobs.""" if not cached_blobs_folder_path.is_dir(): return - for f in cached_blobs_folder_path.glob("*.json"): # only delete .json files + + json_files = list(cached_blobs_folder_path.glob("*.json")) + + for f in json_files: + lock_file_path = f.with_suffix(".lock") + try: - f.unlink() # permanently delete this file + # get file lock for what you want to delete + with FileLock(lock_file_path): + f.unlink() except Exception as e: print( - f"Critical error while trying to delete file {f}:{e}.. " + f"Error while trying to delete file {f}:{e}. " "Aborting clearing of blob cache folder." ) return @@ -186,11 +193,15 @@ def get_cells(fork: Fork, data: Bytes) -> List[Bytes] | None: # (blob related constants are needed and only available for normal forks) fork = fork.fork_at(timestamp=timestamp) - # if this blob already exists then load from file + # if this blob already exists then load from file. use lock blob_location: Path = Blob.get_filepath(fork, seed) - if blob_location.exists(): - logger.debug(f"Blob exists already, reading it from file {blob_location}") - return Blob.from_file(Blob.get_filename(fork, seed)) + + # use lock to avoid race conditions + lock_file_path = blob_location.with_suffix(".lock") + with FileLock(lock_file_path): + if blob_location.exists(): + logger.debug(f"Blob exists already, reading it from file {blob_location}") + return Blob.from_file(Blob.get_filename(fork, seed)) assert fork.supports_blobs(), f"Provided fork {fork.name()} does not support blobs!" @@ -237,17 +248,14 @@ def from_file(file_name: str) -> "Blob": # determine path where this blob would be stored if it existed blob_file_location = CACHED_BLOBS_DIRECTORY / file_name - # use lock to avoid race conditions - lock_file_path = blob_file_location.with_suffix(".lock") - with FileLock(lock_file_path): - # check whether blob exists - assert blob_file_location.exists(), ( - f"Tried to load blob from file but {blob_file_location} does not exist" - ) + # check whether blob exists + assert blob_file_location.exists(), ( + f"Tried to load blob from file but {blob_file_location} does not exist" + ) - # read blob from file - with open(blob_file_location, "r", encoding="utf-8") as f: - json_str: str = f.read() + # read blob from file + with open(blob_file_location, "r", encoding="utf-8") as f: + json_str: str = f.read() # reconstruct and return blob object return Blob.model_validate_json(json_str) diff --git a/src/ethereum_test_types/tests/test_blob_types.py b/src/ethereum_test_types/tests/test_blob_types.py index d3f1be5daa5..4aefc61a912 100644 --- a/src/ethereum_test_types/tests/test_blob_types.py +++ b/src/ethereum_test_types/tests/test_blob_types.py @@ -1,8 +1,10 @@ """Test suite for blobs.""" import copy +import time import pytest +from filelock import FileLock from ethereum_test_forks import ( Cancun, @@ -18,6 +20,65 @@ from ..blob_types import CACHED_BLOBS_DIRECTORY, Blob, clear_blob_cache +def increment_counter(timeout: float = 10): + """ + Increment counter in file, creating if doesn't exist. + + This is needed because we require the unit test 'test_transition_fork_blobs' to run + at the end without having to include another dependency for ordering tests. + That test has to run at the end because it assumes that no json blobs not created + by itself are created while it is running. + + The hardcoded counter value in the test above has to be updated if any new blob_related + unit tests that create json blobs are added in the future. + + """ + file_path = CACHED_BLOBS_DIRECTORY / "blob_unit_test_counter.txt" + lock_file = file_path.with_suffix(".lock") + + with FileLock(lock_file, timeout=timeout): + # Read current value or start at 0 + if file_path.exists(): + current_value = int(file_path.read_text().strip()) + else: + current_value = 0 + + # Increment and write back + new_value = current_value + 1 + file_path.write_text(str(new_value)) + + return new_value + + +def wait_until_counter_reached(target: int, poll_interval: float = 0.1): + """Wait until blob unit test counter reaches target value.""" + file_path = CACHED_BLOBS_DIRECTORY / "blob_unit_test_counter.txt" + lock_file = file_path.with_suffix(".lock") # Add lock file path + + while True: + # Use FileLock when reading! + with FileLock(lock_file, timeout=10): + if file_path.exists(): + try: + current_value = int(file_path.read_text().strip()) + if current_value == target: + # file_path.unlink() # get rid to effectively reset counter to 0 + return current_value + elif current_value > target: + pytest.fail( + f"The blob_unit_test lock counter is too high! " + f"Expected {target}, but got {current_value}. " + f"It probably reused an existing file that was not cleared. " + f"Delete {file_path} manually to fix this." + ) + except Exception: + current_value = 0 + else: + current_value = 0 + + time.sleep(poll_interval) + + @pytest.mark.parametrize("seed", [0, 10, 100]) @pytest.mark.parametrize("fork", [Cancun, Prague, Osaka]) def test_blob_creation_and_writing_and_reading( @@ -42,6 +103,8 @@ def test_blob_creation_and_writing_and_reading( # ensure file you read equals file you wrote assert b.model_dump() == restored.model_dump() + increment_counter() + @pytest.mark.parametrize( "corruption_mode", @@ -71,6 +134,8 @@ def test_blob_proof_corruption( "proof is unchanged!" ) + increment_counter() + @pytest.mark.parametrize("timestamp", [14999, 15000]) @pytest.mark.parametrize( @@ -81,6 +146,9 @@ def test_transition_fork_blobs( timestamp, ): """Generates blobs for transition forks (time 14999 is old fork, time 15000 is new fork).""" + # line below guarantees that this test runs only after the other blob unit tests are done + wait_until_counter_reached(21) + clear_blob_cache(CACHED_BLOBS_DIRECTORY) print(f"Original fork: {fork}, Timestamp: {timestamp}") @@ -109,3 +177,7 @@ def test_transition_fork_blobs( f"Transition fork failure! Fork {fork.name()} at timestamp: {timestamp} should have " f"transitioned to {post_transition_fork_at_15k.name()} but is still at {b.fork.name()}" ) + + # delete counter at last iteration (otherwise re-running all unit tests will fail) + if timestamp == 15_000 and pre_transition_fork == Prague: + (CACHED_BLOBS_DIRECTORY / "blob_unit_test_counter.txt").unlink() diff --git a/src/pytest_plugins/eels_resolutions.json b/src/pytest_plugins/eels_resolutions.json index 81b4f9208c4..53547608817 100644 --- a/src/pytest_plugins/eels_resolutions.json +++ b/src/pytest_plugins/eels_resolutions.json @@ -1,8 +1,8 @@ { "EELSMaster": { "git_url": "https://github.com/ethereum/execution-specs.git", - "branch": "master", - "commit": "e13d33ab21ef1fdd2f073c96a3346e23eb7727f6" + "branch": "forks/osaka", + "commit": "5a49b2f39a909be6a8c84bb70611febdc2b2fd98" }, "Frontier": { "same_as": "EELSMaster" @@ -38,8 +38,6 @@ "same_as": "EELSMaster" }, "Osaka": { - "git_url": "https://github.com/spencer-tb/execution-specs.git", - "branch": "forks/osaka", - "commit": "07699170182691533023fa5d83086258c3edcfd3" + "same_as": "EELSMaster" } } diff --git a/tests/osaka/eip7594_peerdas/spec.py b/tests/osaka/eip7594_peerdas/spec.py index 050da14ac62..65fdf66d8b5 100644 --- a/tests/osaka/eip7594_peerdas/spec.py +++ b/tests/osaka/eip7594_peerdas/spec.py @@ -21,4 +21,5 @@ class Spec: https://eips.ethereum.org/EIPS/eip-7594. """ - pass + MAX_BLOBS_PER_TX = 6 + BLOB_COMMITMENT_VERSION_KZG = 1 diff --git a/tests/osaka/eip7892_bpo/__init__.py b/tests/osaka/eip7892_bpo/__init__.py new file mode 100644 index 00000000000..a2a1ca022b0 --- /dev/null +++ b/tests/osaka/eip7892_bpo/__init__.py @@ -0,0 +1 @@ +"""EIP-7892 Tests.""" diff --git a/tests/osaka/eip7892_bpo/conftest.py b/tests/osaka/eip7892_bpo/conftest.py new file mode 100644 index 00000000000..e65011432ec --- /dev/null +++ b/tests/osaka/eip7892_bpo/conftest.py @@ -0,0 +1 @@ +"""Pytest (plugin) definitions local to EIP-7892 tests.""" diff --git a/tests/osaka/eip7892_bpo/spec.py b/tests/osaka/eip7892_bpo/spec.py new file mode 100644 index 00000000000..fb650ba6d8c --- /dev/null +++ b/tests/osaka/eip7892_bpo/spec.py @@ -0,0 +1,24 @@ +"""Defines EIP-7892 specification constants and functions.""" + +from dataclasses import dataclass + +# Base the spec on EIP-4844 which EIP-7892 extends +from ...cancun.eip4844_blobs.spec import Spec as EIP4844Spec + + +@dataclass(frozen=True) +class ReferenceSpec: + """Defines the reference spec version and git path.""" + + git_path: str + version: str + + +ref_spec_7892 = ReferenceSpec("EIPS/eip-7892.md", "e42c14f83052bfaa8c38832dcbc46e357dd1a1d9") + + +@dataclass(frozen=True) +class Spec(EIP4844Spec): + """Parameters from the EIP-7892 specifications.""" + + pass diff --git a/tests/osaka/eip7892_bpo/test_bpo.py b/tests/osaka/eip7892_bpo/test_bpo.py new file mode 100644 index 00000000000..ee0641a6618 --- /dev/null +++ b/tests/osaka/eip7892_bpo/test_bpo.py @@ -0,0 +1,132 @@ +"""abstract: Test [EIP-7892: Blob Parameter Only Hardforks](https://eips.ethereum.org/EIPS/eip-7892).""" + +import pytest + +from ethereum_test_base_types.base_types import Address, Hash +from ethereum_test_base_types.composite_types import ForkBlobSchedule, TimestampBlobSchedule +from ethereum_test_forks import Fork +from ethereum_test_tools import ( + Alloc, + Block, + BlockchainTestFiller, +) +from ethereum_test_types import Environment +from ethereum_test_types.helpers import add_kzg_version +from ethereum_test_types.transaction_types import Transaction +from tests.osaka.eip7594_peerdas.spec import Spec + +from .spec import ref_spec_7892 # type: ignore + +REFERENCE_SPEC_GIT_PATH = ref_spec_7892.git_path +REFERENCE_SPEC_VERSION = ref_spec_7892.version + + +# def min_base_fee_per_blob_gas_bpo(block_number: int = 0, timestamp: int = 0) -> int: +# """Return the minimum base fee per blob gas for BPO fork.""" +# return 1 + + +# def blob_gas_price_calculator_bpo( +# fork_blob_schedule: ForkBlobSchedule, +# block_number: int = 0, +# timestamp: int = 0, +# ) -> BlobGasPriceCalculator: +# """Return a callable that calculates the blob gas price at Cancun.""" +# fake_exponential( +# factor=min_base_fee_per_blob_gas_bpo(block_number=block_number, timestamp=timestamp), +# numerator=excess_blob_gas, +# denominator=fork_blob_schedule.base_fee_update_fraction, +# ) + + +@pytest.fixture +def bpo_schedule() -> TimestampBlobSchedule: + """Create and return the BPO pseudo schedule used for the tests in this file.""" + bpo_schedule = TimestampBlobSchedule() + # below ensure that there is a timestamp difference of at least 3 between each scheduled fork + bpo_schedule.add_schedule( + 20000, ForkBlobSchedule(max=6, target_blobs_per_block=5, base_fee_update_fraction=5007716) + ) + bpo_schedule.add_schedule( + 21000, ForkBlobSchedule(max=8, target_blobs_per_block=7, base_fee_update_fraction=5555555) + ) + bpo_schedule.add_schedule( + 22000, ForkBlobSchedule(max=4, target_blobs_per_block=3, base_fee_update_fraction=4444444) + ) + return bpo_schedule + + +def tx( + sender: Address, + fork_blob_schedule: ForkBlobSchedule, +) -> Transaction: + """Blob transaction fixture.""" + # calculator = blob_gas_price_calculator_bpo(fork_blob_schedule=fork_blob_schedule) + # max_fee_per_blob_gas = calculator() + + return Transaction( + ty=3, + sender=sender, + value=1, + gas_limit=21_000, + max_fee_per_gas=10, + max_priority_fee_per_gas=1, + max_fee_per_blob_gas=999_999_999_999, # idk how to use the excess_blob_gas fixtures with bpo which is not a fork in our modelling # noqa: E501 + access_list=[], + blob_versioned_hashes=add_kzg_version( + [Hash(i) for i in range(0, fork_blob_schedule.max_blobs_per_block)], + Spec.BLOB_COMMITMENT_VERSION_KZG, + ), + ) + + +@pytest.mark.valid_from("Osaka") +def test_bpo_schedule( + blockchain_test: BlockchainTestFiller, + pre: Alloc, + env: Environment, + fork: Fork, + sender: Address, + bpo_schedule: TimestampBlobSchedule, +): + """Test whether clients correctly set provided BPO schedules.""" + blocks = [] + for schedule_dict in bpo_schedule.root: # each schedule_dict is Dict[int, ForkBlobSchedule] where int is the timestamp # noqa: E501 + timestamp = next(iter(schedule_dict)) # first key is timestamp + fork_blob_schedule: ForkBlobSchedule | None = schedule_dict.get(timestamp) + assert fork_blob_schedule is not None + + # add block before bpo + blocks.append( + Block( + txs=[ + tx(sender=sender, fork_blob_schedule=fork_blob_schedule) + ], # tx should pick up fork_blob_schedule as input parameter + timestamp=timestamp - 1, + ) + ) + # add block at bpo + blocks.append( + Block( + txs=[tx(sender=sender, fork_blob_schedule=fork_blob_schedule)], + timestamp=timestamp, + ) + ) + # add block after bpo + blocks.append( + Block( + txs=[tx(sender=sender, fork_blob_schedule=fork_blob_schedule)], + timestamp=timestamp + 1, + ) + ) + + # amount of created blocks = 3 * len(bpo_schedule.root) + assert len(blocks) == 3 * len(bpo_schedule.root) + + blockchain_test( + genesis_environment=env, + pre=pre, + post={}, + blocks=blocks, + bpo_schedule=bpo_schedule, + )