-
Notifications
You must be signed in to change notification settings - Fork 24
test: poa should be validated against parent blocks epoch snapshot, not current #822
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
9e2f13d
9390829
1d9305c
321a918
d0a3c68
8a94eae
3eb8a91
fef4df7
9fff24c
f61cb47
c658bdf
01935d4
967d5e3
c3f68c0
efdec49
8cf1bb6
21b5aac
8dee343
302c5fe
fef3c63
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,324 @@ | ||
| use crate::utils::{ | ||
| craft_data_poa_solution_from_tx, get_epoch_snapshot, read_block_from_state, | ||
| submit_solution_to_block_producer, BlockValidationOutcome, IrysNodeTest, | ||
| }; | ||
| use irys_actors::BlockProducerCommand; | ||
| use irys_chain::IrysNodeCtx; | ||
| use irys_domain::EpochSnapshot; | ||
| use irys_types::{irys::IrysSigner, DataLedger, NodeConfig}; | ||
| use std::sync::Arc; | ||
|
|
||
| /// End-to-end: mine to epoch boundary, craft a data PoA solution referencing existing ledger data, | ||
| /// submit it to the block producer, and assert the boundary block is accepted and canonical. | ||
| /// | ||
| /// Rationale: | ||
| /// - This exercises the full validation path, including PoA validation against the parent epoch | ||
| /// snapshot at the boundary. | ||
| /// - We set submit_ledger_epoch_length = 2 to encourage slot changes at the boundary, which makes | ||
| /// correctness dependent on using the PARENT snapshot for PoA validation. | ||
| /// - The test constructs a SolutionContext using a real tx's data (with tx_path/data_path) that | ||
| /// is already in the ledger, producing a data PoA block (as opposed to a capacity PoA block). | ||
| #[test_log::test(actix_web::test)] | ||
| async fn data_poa_boundary_acceptance() -> eyre::Result<()> { | ||
| let max_seconds = 10; | ||
| const CHUNK_SIZE: u64 = 32; | ||
| const DATA_SIZE: usize = 256; | ||
| const BLOCKS_PER_EPOCH: u64 = 3; | ||
|
|
||
| let mut config = NodeConfig::testing(); | ||
| config.consensus.get_mut().block_migration_depth = 1; | ||
| config.consensus.get_mut().chunk_size = CHUNK_SIZE; | ||
| config.consensus.get_mut().num_chunks_in_partition = 8; | ||
| config.consensus.get_mut().num_partitions_per_slot = 1; | ||
| config.consensus.get_mut().epoch.num_blocks_in_epoch = BLOCKS_PER_EPOCH; | ||
| config.consensus.get_mut().epoch.num_capacity_partitions = Some(100); | ||
| config.consensus.get_mut().epoch.submit_ledger_epoch_length = 2; | ||
| config.consensus.get_mut().difficulty_adjustment.block_time = 1; | ||
| config | ||
| .consensus | ||
| .get_mut() | ||
| .difficulty_adjustment | ||
| .difficulty_adjustment_interval = 1_000_000; | ||
| config.consensus.get_mut().num_chunks_in_recall_range = 1; | ||
| config.consensus.get_mut().vdf.sha_1s_difficulty = 10_000; | ||
|
|
||
| let user_signer = IrysSigner::random_signer(&config.consensus_config()); | ||
| config.fund_genesis_accounts(vec![&user_signer]); | ||
|
|
||
| let node = IrysNodeTest::new_genesis(config.clone()) | ||
| .start_and_wait_for_packing("data_poa_boundary_acceptance", 30) | ||
| .await; | ||
|
|
||
| let slot_size = node.node_ctx.config.consensus.num_partitions_per_slot | ||
| * node.node_ctx.config.consensus.num_chunks_in_partition; | ||
|
|
||
| let anchor = node.get_block_by_height(0).await?.block_hash; | ||
|
|
||
| debug_partitions(get_epoch_snapshot(&node).await?); | ||
| debug_asserts_first_slots(&node, 1, 1, 1, 1).await?; | ||
|
|
||
| let _block1 = node.mine_block().await?; | ||
| node.wait_until_height(1, max_seconds).await?; | ||
|
|
||
| debug_asserts_first_slots(&node, 1, 1, 1, 1).await?; | ||
|
|
||
| let _ = node.post_stake_commitment_with_signer(&user_signer).await?; | ||
| let _ = node.post_pledge_commitment_with_signer(&user_signer).await; | ||
| node.mine_blocks(2).await?; | ||
| node.wait_until_height(3, max_seconds).await?; | ||
| debug_asserts_first_slots(&node, 1, 1, 1, 1).await?; | ||
|
|
||
| let _ = node.mine_until_next_epoch().await?; | ||
| let _ = node.mine_until_next_epoch().await?; | ||
| node.wait_for_packing(20).await; | ||
| debug_asserts_first_slots(&node, 1, 1, 1, 1).await?; | ||
|
|
||
| // Post TWO txs so data extends into slot 1 (which should have partitions). | ||
| let _tx1 = node | ||
| .post_data_tx(anchor, vec![7_u8; DATA_SIZE], &user_signer) | ||
| .await; | ||
| let _ = node.mine_block().await?; | ||
| let tx = node | ||
| .post_data_tx(anchor, vec![9_u8; DATA_SIZE], &user_signer) | ||
| .await; | ||
| let _ = node.mine_block().await?; | ||
| // tx2 starts at slot 1 | ||
| let tx_ledger_offset: u64 = slot_size; | ||
|
|
||
| node.wait_for_packing(20).await; | ||
| debug_asserts_first_slots(&node, 1, 1, 1, 1).await?; | ||
|
|
||
| let _ = node.mine_until_next_epoch().await?; | ||
| node.wait_for_packing(20).await; | ||
| let snapshot = get_epoch_snapshot(&node).await?; | ||
| let submit_slots = snapshot.ledgers.get_slots(irys_types::DataLedger::Submit); | ||
| assert!( | ||
| !submit_slots.is_empty(), | ||
| "Should have at least 1 submit slot after epochs" | ||
| ); | ||
| let total_partitions: usize = submit_slots.iter().map(|s| s.partitions.len()).sum(); | ||
| assert!( | ||
| total_partitions > 0, | ||
| "Should have at least 1 partition across submit slots" | ||
| ); | ||
|
|
||
| node.mine_until_next_epoch().await?; | ||
| node.wait_for_packing(20).await; | ||
|
|
||
| node.node_ctx | ||
| .service_senders | ||
| .block_producer | ||
| .send(BlockProducerCommand::SetTestBlocksRemaining(Some(0)))?; | ||
|
|
||
| let parent_snapshot = get_epoch_snapshot(&node).await?; | ||
| debug_partitions(parent_snapshot.clone()); | ||
|
|
||
| // Select partition assigned to the slot containing tx_ledger_offset | ||
| let target_slot_idx = (tx_ledger_offset / slot_size) as usize; | ||
| let partition_hash = { | ||
| let submit_slots = parent_snapshot.ledgers.get_slots(DataLedger::Submit); | ||
| submit_slots | ||
| .iter() | ||
| .flat_map(|slot| slot.partitions.iter().copied()) | ||
| .find(|&partition_hash| { | ||
| parent_snapshot | ||
| .partition_assignments | ||
| .get_assignment(partition_hash) | ||
| .and_then(|pa| pa.slot_index) | ||
| .map(|si| si == target_slot_idx) | ||
| .unwrap_or(false) | ||
| }) | ||
| .expect("Submit ledger should have a partition assigned to the slot containing the tx") | ||
| }; | ||
|
|
||
| let miner_addr = node.node_ctx.config.node_config.reward_address; | ||
|
|
||
| // Verify partition slot alignment | ||
| { | ||
| let pa = parent_snapshot | ||
| .partition_assignments | ||
| .get_assignment(partition_hash) | ||
| .expect("partition assignment must exist"); | ||
| let slot_index = pa | ||
| .slot_index | ||
| .expect("slot_index must exist for data partition") as u64; | ||
| assert_eq!( | ||
| slot_index as usize, target_slot_idx, | ||
| "partition slot_index should match the tx's slot" | ||
| ); | ||
| } | ||
|
Comment on lines
+134
to
+149
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Verify miner address used for PoA entropy matches the block’s miner address The PoA chunk entropy is keyed off the miner address, and let miner_addr = node.node_ctx.config.node_config.reward_address;
...
let sol_result = craft_data_poa_solution_from_tx(
&node,
&tx,
partition_hash,
miner_addr,
Some(tx_ledger_offset),
)Elsewhere (e.g., Please double-check that:
If they differ, switch to |
||
|
|
||
| // Enable mining for VDF advancement while retrying solutions | ||
| node.node_ctx.start_mining()?; | ||
|
|
||
| let solution = { | ||
| let max_attempts = 500_usize; | ||
| let mut attempt = 0_usize; | ||
| let delay_ms = 50_u64; | ||
| let parent_block = node | ||
| .get_block_by_height(node.get_canonical_chain_height().await) | ||
| .await?; | ||
| let parent_diff = parent_block.diff; | ||
|
|
||
| loop { | ||
| let sol_result = craft_data_poa_solution_from_tx( | ||
| &node, | ||
| &tx, | ||
| partition_hash, | ||
| miner_addr, | ||
| Some(tx_ledger_offset), | ||
| ) | ||
| .await; | ||
|
|
||
| match sol_result { | ||
| Ok(sol) => { | ||
| let solution_diff = irys_types::u256_from_le_bytes(&sol.solution_hash.0); | ||
| if solution_diff >= parent_diff { | ||
| tracing::debug!(attempt, "Found valid solution after attempts"); | ||
| break sol; | ||
| } | ||
| } | ||
| Err(e) => { | ||
| let err_msg = e.to_string(); | ||
| if !err_msg.contains("recall range mismatch") { | ||
| return Err(e); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| attempt += 1; | ||
| if attempt >= max_attempts { | ||
| eyre::bail!( | ||
| "Failed to craft solution after {} attempts (last error: recall range or difficulty mismatch)", | ||
| attempt | ||
| ); | ||
| } | ||
|
|
||
| // Short constant delay to allow VDF advancement between attempts | ||
| tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await; | ||
| } | ||
| }; | ||
|
Comment on lines
+151
to
+200
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: #!/bin/bash
# Find the test file and check if it exists
find . -name "data_poa_boundary_acceptance.rs" -type fRepository: Irys-xyz/irys Length of output: 126 🏁 Script executed: #!/bin/bash
# Get repo structure overview
ls -la crates/chain/tests/block_production/ 2>/dev/null | head -20Repository: Irys-xyz/irys Length of output: 1223 🏁 Script executed: #!/bin/bash
# Search for VDF-related code to understand the mechanism
rg "start_vdf|current_step|vdf_advance" --type rust -i | head -30Repository: Irys-xyz/irys Length of output: 2378 🏁 Script executed: #!/bin/bash
# Look for difficulty configuration in tests
rg "max_attempts|500" crates/chain/tests/block_production/ --type rust -B 2 -A 2Repository: Irys-xyz/irys Length of output: 1534 🏁 Script executed: #!/bin/bash
# Read the test file to see full context
wc -l crates/chain/tests/block_production/data_poa_boundary_acceptance.rsRepository: Irys-xyz/irys Length of output: 128 🏁 Script executed: #!/bin/bash
# Read the test file starting from earlier to understand context
cat -n crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | head -200Repository: Irys-xyz/irys Length of output: 9263 🏁 Script executed: #!/bin/bash
# Look for the function containing lines 151-200
cat -n crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | sed -n '1,250p'Repository: Irys-xyz/irys Length of output: 11366 🏁 Script executed: #!/bin/bash
# Check if start_mining calls start_vdf or if they're separate
rg "start_mining|start_vdf" crates/chain/src/chain.rs -A 10 | head -50Repository: Irys-xyz/irys Length of output: 962 🏁 Script executed: #!/bin/bash
# Look at the craft_data_poa_solution_from_tx implementation
rg "fn craft_data_poa_solution_from_tx" crates/chain/tests/ -A 40 | head -100Repository: Irys-xyz/irys Length of output: 2967 🏁 Script executed: #!/bin/bash
# Check VDF advancement and what makes current_step change
rg "global_step" crates/chain/tests/utils.rs -B 5 -A 5 | head -80Repository: Irys-xyz/irys Length of output: 2373 🏁 Script executed: #!/bin/bash
# Look for difficulty settings and how they impact solution finding
rg "sha_1s_difficulty|solution_diff|parent_diff" crates/ --type rust | head -30Repository: Irys-xyz/irys Length of output: 2567 🏁 Script executed: #!/bin/bash
# Check if there's a direct call to start_vdf before the loop
cat -n crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | sed -n '145,165p'Repository: Irys-xyz/irys Length of output: 853 🏁 Script executed: #!/bin/bash
# Look at what start_mining does more carefully
cat -n crates/chain/src/chain.rs | grep -A 15 "pub fn start_mining"Repository: Irys-xyz/irys Length of output: 809 🏁 Script executed: #!/bin/bash
# Check if there are any asserts on VDF progression in similar test patterns
rg "global_step|current_step" crates/chain/tests/ -B 3 -A 3 | head -100Repository: Irys-xyz/irys Length of output: 6192 🏁 Script executed: #!/bin/bash
# Look at how other tests handle the solution loop and difficulty
rg "max_attempts|craft_.*_solution" crates/chain/tests/block_production/ --type rust -B 5 -A 10 | head -150Repository: Irys-xyz/irys Length of output: 5444 🏁 Script executed: #!/bin/bash
# Check if there are any recent test failures or flakiness issues mentioned
git log --oneline --all --grep="flak\|data_poa_boundary\|solution.*fail" | head -20Repository: Irys-xyz/irys Length of output: 917 🏁 Script executed: #!/bin/bash
# Search for examples of VDF progression assertions in test code
rg "while.*global_step|assert.*global_step|greater.*step" crates/chain/tests/ --type rust -B 3 -A 3 | head -60Repository: Irys-xyz/irys Length of output: 2048 🏁 Script executed: #!/bin/bash
# Look for any recent changes to the data_poa_boundary_acceptance test
git log --oneline -p crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | head -100Repository: Irys-xyz/irys Length of output: 4493 🏁 Script executed: #!/bin/bash
# Check what the craft_data_poa_solution_from_tx function returns and how it handles VDF step changes
sed -n '560,650p' crates/chain/tests/utils.rsRepository: Irys-xyz/irys Length of output: 3380 🏁 Script executed: #!/bin/bash
# Look at the full craft_data_poa_solution_from_tx to see if it handles VDF progression
sed -n '440,530p' crates/chain/tests/utils.rsRepository: Irys-xyz/irys Length of output: 3149 🏁 Script executed: #!/bin/bash
# Check if there are assertions about VDF step advancement in the solution finding loop
rg "attempt|step.*advance|steps.*change" crates/chain/tests/block_production/data_poa_boundary_acceptance.rs -B 2 -A 2Repository: Irys-xyz/irys Length of output: 973 🏁 Script executed: #!/bin/bash
# Get the full context of how solution_crafting_loop_capacity_poa handles VDF
rg "solution_crafting_loop_capacity_poa" crates/chain/tests/utils.rs -A 100 | head -120Repository: Irys-xyz/irys Length of output: 39 🏁 Script executed: #!/bin/bash
# Check if craft_data_poa_solution_from_tx actually uses new VDF steps or reuses the same one
cat -n crates/chain/tests/utils.rs | sed -n '295,380p'Repository: Irys-xyz/irys Length of output: 3606 🏁 Script executed: #!/bin/bash
# Look at the actual implementation more carefully to see step advancement
rg "fn craft_data_poa_solution_from_tx" crates/chain/tests/utils.rs -A 80 | head -100Repository: Irys-xyz/irys Length of output: 3264 🏁 Script executed: #!/bin/bash
# See the rest of craft_data_poa_solution_from_tx to understand the full flow
sed -n '100,200p' crates/chain/tests/utils.rs | grep -A 150 "pub async fn craft_data_poa_solution_from_tx" | tail -80Repository: Irys-xyz/irys Length of output: 39 🏁 Script executed: #!/bin/bash
# Get a longer look at the craft_data_poa_solution_from_tx function
cat -n crates/chain/tests/utils.rs | sed -n '100,280p'Repository: Irys-xyz/irys Length of output: 8305 🏁 Script executed: #!/bin/bash
# Check if there are any VDF step advancement checks/assertions we might have missed
rg "assert|expect" crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | grep -i vdfRepository: Irys-xyz/irys Length of output: 39 🏁 Script executed: #!/bin/bash
# Understand the VDF progression rate - how fast does global_step advance with sha_1s_difficulty=10_000?
rg "sha_1s_difficulty.*10.*000|vdf.*step.*time|VDF.*advance" crates/ --type rust -B 2 -A 2 | head -40Repository: Irys-xyz/irys Length of output: 2755 🏁 Script executed: #!/antml:bash
# Check if 50ms is sufficient for VDF to advance by one step
rg "Duration.*millis|sleep.*[0-9]" crates/chain/tests/ --type rust | grep -A 2 -B 2 "200\|500" | head -30Repository: Irys-xyz/irys Length of output: 2866 🏁 Script executed: #!/bin/bash
# Check if there's monitoring of VDF step progression in the test loop itself
cat -n crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | sed -n '154,200p'Repository: Irys-xyz/irys Length of output: 2001 🏁 Script executed: #!/antml:bash
# See how long 500 attempts with 50ms delays would take
python3 <<'EOF'
attempts = 500
delay_ms = 50
total_ms = attempts * delay_ms
total_secs = total_ms / 1000
print(f"Max loop time: {total_ms}ms = {total_secs}s")
print(f"This assumes every attempt takes negligible time")
print(f"Test timeout is typically 10-30 seconds")
EOFRepository: Irys-xyz/irys Length of output: 143 🏁 Script executed: #!/bin/bash
# Check what happens if VDF step doesn't advance - does the loop get stuck?
# Look at error handling in the test
cat -n crates/chain/tests/block_production/data_poa_boundary_acceptance.rs | sed -n '200,210p'Repository: Irys-xyz/irys Length of output: 495 Solution loop lacks VDF progression verification despite existing mitigations The test already calls Each iteration of To stabilize this test, add an explicit check that |
||
|
|
||
| node.node_ctx | ||
| .service_senders | ||
| .block_producer | ||
| .send(BlockProducerCommand::SetTestBlocksRemaining(Some(1)))?; | ||
|
|
||
| let built_block = submit_solution_to_block_producer(&node, solution).await?; | ||
| let validation_outcome = read_block_from_state(&node.node_ctx, &built_block.block_hash).await; | ||
| node.node_ctx.stop_mining()?; | ||
|
|
||
| match validation_outcome { | ||
| BlockValidationOutcome::StoredOnNode(chain_state) => { | ||
| tracing::info!("Block validated with state: {:?}", chain_state); | ||
| } | ||
| BlockValidationOutcome::Discarded(error) => { | ||
| return Err(eyre::eyre!("Block validation failed: {:?}", error)); | ||
| } | ||
| } | ||
|
|
||
| let canonical_hash = node.wait_until_height(built_block.height, 30).await?; | ||
| assert_eq!( | ||
| canonical_hash, built_block.block_hash, | ||
| "Boundary block should become canonical at its height" | ||
| ); | ||
| assert_eq!( | ||
| built_block.height % BLOCKS_PER_EPOCH, | ||
| 1, | ||
| "Boundary block should be the first block of the new epoch" | ||
| ); | ||
| assert_eq!( | ||
| built_block.poa.ledger_id, | ||
| Some(DataLedger::Submit as u32), | ||
| "Built block PoA should target the Submit ledger" | ||
| ); | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| fn debug_partitions(parent_snapshot: Arc<EpochSnapshot>) { | ||
| let submit_slots = parent_snapshot.ledgers.get_slots(DataLedger::Submit); | ||
| let publish_slots = parent_snapshot.ledgers.get_slots(DataLedger::Publish); | ||
| tracing::debug!( | ||
| epoch_height = parent_snapshot.epoch_height, | ||
| submit_slots = submit_slots.len(), | ||
| publish_slots = publish_slots.len(), | ||
| "Epoch snapshot partition info" | ||
| ); | ||
| for (i, slot) in submit_slots.iter().enumerate() { | ||
| tracing::debug!( | ||
| slot_index = i, | ||
| partitions_count = slot.partitions.len(), | ||
| partitions = ?slot.partitions, | ||
| "Submit slot" | ||
| ); | ||
| for hash in &slot.partitions { | ||
| if let Some(ass) = parent_snapshot | ||
| .partition_assignments | ||
| .data_partitions | ||
| .get(hash) | ||
| { | ||
| tracing::debug!( | ||
| partition = ?hash, | ||
| ledger_id = ?ass.ledger_id, | ||
| slot_index = ?ass.slot_index, | ||
| miner = ?ass.miner_address, | ||
| "Partition assignment" | ||
| ); | ||
| } | ||
| } | ||
| } | ||
| for (i, slot) in publish_slots.iter().enumerate() { | ||
| tracing::debug!( | ||
| slot_index = i, | ||
| partitions_count = slot.partitions.len(), | ||
| partitions = ?slot.partitions, | ||
| "Publish slot" | ||
| ); | ||
| } | ||
| } | ||
|
|
||
| // reusable debug asserts helper | ||
| async fn debug_asserts_first_slots( | ||
| node: &IrysNodeTest<IrysNodeCtx>, | ||
| expected_submit_slots: usize, | ||
| expected_submit_partitions: usize, | ||
| expected_publish_slots: usize, | ||
| expected_publish_partitions: usize, | ||
| ) -> eyre::Result<()> { | ||
| let snapshot = get_epoch_snapshot(node).await?; | ||
| let submit_slots = snapshot.ledgers.get_slots(DataLedger::Submit); | ||
| let publish_slots = snapshot.ledgers.get_slots(DataLedger::Publish); | ||
|
|
||
| assert_eq!( | ||
| expected_submit_slots, | ||
| submit_slots.len(), | ||
| "We expected {} Submit slot(s)", | ||
| expected_submit_slots | ||
| ); | ||
| assert_eq!( | ||
| expected_submit_partitions, | ||
| submit_slots | ||
| .first() | ||
| .map(|s| s.partitions.len()) | ||
| .unwrap_or_default(), | ||
| "We expected {} Submit partition(s)", | ||
| expected_submit_partitions | ||
| ); | ||
| assert_eq!( | ||
| expected_publish_slots, | ||
| publish_slots.len(), | ||
| "We expected {} publish slot(s)", | ||
| expected_publish_slots | ||
| ); | ||
| assert_eq!( | ||
| expected_publish_partitions, | ||
| publish_slots | ||
| .first() | ||
| .map(|s| s.partitions.len()) | ||
| .unwrap_or_default(), | ||
| "We expected {} publish partition(s)", | ||
| expected_publish_partitions | ||
| ); | ||
| Ok(()) | ||
| } | ||
|
Comment on lines
+281
to
+324
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Snapshot-shape debug assertions are helpful but tightly coupled to config
assert_eq!(expected_submit_slots, submit_slots.len(), ...);
assert_eq!(expected_submit_partitions, submit_slots.first().map(|s| s.partitions.len()).unwrap_or_default(), ...);
...With Given this is primarily a regression test for snapshot selection, consider:
Not strictly wrong, but it’s an easy source of future breakage unrelated to the PoA regression itself. 🤖 Prompt for AI Agents |
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
tx_ledger_offset = slot_sizeis a brittle assumption about ledger layoutHere you assume the second tx’s first chunk starts exactly at
slot_size:That’s only guaranteed if:
Given the amount of prior epoch activity (stake/pledge, capacity assignments, backfill, etc.), this coupling to implementation details of packing is fragile. If the packing or ledger layout changes (or other txs are present), the actual ledger offset for
txcan differ, and the crafted PoA will be misaligned even when the parent snapshot selection is correct.To make the test robust, consider:
txfromBlockIndexonce its inclusion block is known (e.g., viaget_block_boundsor a small helper), then feeding that concrete offset intocraft_data_poa_solution_from_tx, orslot_sizebefore crafting the PoA, and failing fast with a clear message if not.This will decouple the regression check from low-level packing assumptions and should reduce false negatives.